Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(89)

Side by Side Diff: src/ppc/lithium-codegen-ppc.cc

Issue 571173003: PowerPC specific sub-directories (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Updated ppc sub-dirs to current V8 code levels Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 //
3 // Copyright IBM Corp. 2012, 2013. All rights reserved.
4 //
2 // Use of this source code is governed by a BSD-style license that can be 5 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 6 // found in the LICENSE file.
4 7
5 #include "src/v8.h" 8 #include "src/v8.h"
6 9
7 #include "src/arm/lithium-codegen-arm.h"
8 #include "src/arm/lithium-gap-resolver-arm.h"
9 #include "src/base/bits.h" 10 #include "src/base/bits.h"
10 #include "src/code-factory.h" 11 #include "src/code-factory.h"
11 #include "src/code-stubs.h" 12 #include "src/code-stubs.h"
12 #include "src/hydrogen-osr.h" 13 #include "src/hydrogen-osr.h"
14 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h" 15 #include "src/ic/stub-cache.h"
16 #include "src/ppc/lithium-codegen-ppc.h"
17 #include "src/ppc/lithium-gap-resolver-ppc.h"
14 18
15 namespace v8 { 19 namespace v8 {
16 namespace internal { 20 namespace internal {
17 21
18 22
19 class SafepointGenerator FINAL : public CallWrapper { 23 class SafepointGenerator FINAL : public CallWrapper {
20 public: 24 public:
21 SafepointGenerator(LCodeGen* codegen, 25 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
22 LPointerMap* pointers,
23 Safepoint::DeoptMode mode) 26 Safepoint::DeoptMode mode)
24 : codegen_(codegen), 27 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
25 pointers_(pointers),
26 deopt_mode_(mode) { }
27 virtual ~SafepointGenerator() {} 28 virtual ~SafepointGenerator() {}
28 29
29 virtual void BeforeCall(int call_size) const OVERRIDE {} 30 virtual void BeforeCall(int call_size) const OVERRIDE {}
30 31
31 virtual void AfterCall() const OVERRIDE { 32 virtual void AfterCall() const OVERRIDE {
32 codegen_->RecordSafepoint(pointers_, deopt_mode_); 33 codegen_->RecordSafepoint(pointers_, deopt_mode_);
33 } 34 }
34 35
35 private: 36 private:
36 LCodeGen* codegen_; 37 LCodeGen* codegen_;
37 LPointerMap* pointers_; 38 LPointerMap* pointers_;
38 Safepoint::DeoptMode deopt_mode_; 39 Safepoint::DeoptMode deopt_mode_;
39 }; 40 };
40 41
41 42
42 #define __ masm()-> 43 #define __ masm()->
43 44
44 bool LCodeGen::GenerateCode() { 45 bool LCodeGen::GenerateCode() {
45 LPhase phase("Z_Code generation", chunk()); 46 LPhase phase("Z_Code generation", chunk());
46 DCHECK(is_unused()); 47 DCHECK(is_unused());
47 status_ = GENERATING; 48 status_ = GENERATING;
48 49
49 // Open a frame scope to indicate that there is a frame on the stack. The 50 // Open a frame scope to indicate that there is a frame on the stack. The
50 // NONE indicates that the scope shouldn't actually generate code to set up 51 // NONE indicates that the scope shouldn't actually generate code to set up
51 // the frame (that is done in GeneratePrologue). 52 // the frame (that is done in GeneratePrologue).
52 FrameScope frame_scope(masm_, StackFrame::NONE); 53 FrameScope frame_scope(masm_, StackFrame::NONE);
53 54
54 return GeneratePrologue() && 55 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
55 GenerateBody() && 56 GenerateJumpTable() && GenerateSafepointTable();
56 GenerateDeferredCode() &&
57 GenerateDeoptJumpTable() &&
58 GenerateSafepointTable();
59 } 57 }
60 58
61 59
62 void LCodeGen::FinishCode(Handle<Code> code) { 60 void LCodeGen::FinishCode(Handle<Code> code) {
63 DCHECK(is_done()); 61 DCHECK(is_done());
64 code->set_stack_slots(GetStackSlotCount()); 62 code->set_stack_slots(GetStackSlotCount());
65 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
66 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); 64 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
67 PopulateDeoptimizationData(code); 65 PopulateDeoptimizationData(code);
68 } 66 }
69 67
70 68
71 void LCodeGen::SaveCallerDoubles() { 69 void LCodeGen::SaveCallerDoubles() {
72 DCHECK(info()->saves_caller_doubles()); 70 DCHECK(info()->saves_caller_doubles());
73 DCHECK(NeedsEagerFrame()); 71 DCHECK(NeedsEagerFrame());
74 Comment(";;; Save clobbered callee double registers"); 72 Comment(";;; Save clobbered callee double registers");
75 int count = 0; 73 int count = 0;
76 BitVector* doubles = chunk()->allocated_double_registers(); 74 BitVector* doubles = chunk()->allocated_double_registers();
77 BitVector::Iterator save_iterator(doubles); 75 BitVector::Iterator save_iterator(doubles);
78 while (!save_iterator.Done()) { 76 while (!save_iterator.Done()) {
79 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), 77 __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
80 MemOperand(sp, count * kDoubleSize)); 78 MemOperand(sp, count * kDoubleSize));
81 save_iterator.Advance(); 79 save_iterator.Advance();
82 count++; 80 count++;
83 } 81 }
84 } 82 }
85 83
86 84
87 void LCodeGen::RestoreCallerDoubles() { 85 void LCodeGen::RestoreCallerDoubles() {
88 DCHECK(info()->saves_caller_doubles()); 86 DCHECK(info()->saves_caller_doubles());
89 DCHECK(NeedsEagerFrame()); 87 DCHECK(NeedsEagerFrame());
90 Comment(";;; Restore clobbered callee double registers"); 88 Comment(";;; Restore clobbered callee double registers");
91 BitVector* doubles = chunk()->allocated_double_registers(); 89 BitVector* doubles = chunk()->allocated_double_registers();
92 BitVector::Iterator save_iterator(doubles); 90 BitVector::Iterator save_iterator(doubles);
93 int count = 0; 91 int count = 0;
94 while (!save_iterator.Done()) { 92 while (!save_iterator.Done()) {
95 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), 93 __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
96 MemOperand(sp, count * kDoubleSize)); 94 MemOperand(sp, count * kDoubleSize));
97 save_iterator.Advance(); 95 save_iterator.Advance();
98 count++; 96 count++;
99 } 97 }
100 } 98 }
101 99
102 100
103 bool LCodeGen::GeneratePrologue() { 101 bool LCodeGen::GeneratePrologue() {
104 DCHECK(is_generating()); 102 DCHECK(is_generating());
105 103
106 if (info()->IsOptimizing()) { 104 if (info()->IsOptimizing()) {
107 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 105 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
108 106
109 #ifdef DEBUG 107 #ifdef DEBUG
110 if (strlen(FLAG_stop_at) > 0 && 108 if (strlen(FLAG_stop_at) > 0 &&
111 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
112 __ stop("stop_at"); 110 __ stop("stop_at");
113 } 111 }
114 #endif 112 #endif
115 113
116 // r1: Callee's JS function. 114 // r4: Callee's JS function.
117 // cp: Callee's context. 115 // cp: Callee's context.
118 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool) 116 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
119 // fp: Caller's frame pointer. 117 // fp: Caller's frame pointer.
120 // lr: Caller's pc. 118 // lr: Caller's pc.
121 119
122 // Sloppy mode functions and builtins need to replace the receiver with the 120 // Sloppy mode functions and builtins need to replace the receiver with the
123 // global proxy when called as functions (without an explicit receiver 121 // global proxy when called as functions (without an explicit receiver
124 // object). 122 // object).
125 if (info_->this_has_uses() && 123 if (info_->this_has_uses() && info_->strict_mode() == SLOPPY &&
126 info_->strict_mode() == SLOPPY &&
127 !info_->is_native()) { 124 !info_->is_native()) {
128 Label ok; 125 Label ok;
129 int receiver_offset = info_->scope()->num_parameters() * kPointerSize; 126 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
130 __ ldr(r2, MemOperand(sp, receiver_offset)); 127 __ LoadP(r5, MemOperand(sp, receiver_offset));
131 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); 128 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
132 __ b(ne, &ok); 129 __ bne(&ok);
133 130
134 __ ldr(r2, GlobalObjectOperand()); 131 __ LoadP(r5, GlobalObjectOperand());
135 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset)); 132 __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
136 133
137 __ str(r2, MemOperand(sp, receiver_offset)); 134 __ StoreP(r5, MemOperand(sp, receiver_offset));
138 135
139 __ bind(&ok); 136 __ bind(&ok);
140 } 137 }
141 } 138 }
142 139
143 info()->set_prologue_offset(masm_->pc_offset()); 140 info()->set_prologue_offset(masm_->pc_offset());
144 if (NeedsEagerFrame()) { 141 if (NeedsEagerFrame()) {
145 if (info()->IsStub()) { 142 if (info()->IsStub()) {
146 __ StubPrologue(); 143 __ StubPrologue();
147 } else { 144 } else {
148 __ Prologue(info()->IsCodePreAgingActive()); 145 __ Prologue(info()->IsCodePreAgingActive());
149 } 146 }
150 frame_is_built_ = true; 147 frame_is_built_ = true;
151 info_->AddNoFrameRange(0, masm_->pc_offset()); 148 info_->AddNoFrameRange(0, masm_->pc_offset());
152 } 149 }
153 150
154 // Reserve space for the stack slots needed by the code. 151 // Reserve space for the stack slots needed by the code.
155 int slots = GetStackSlotCount(); 152 int slots = GetStackSlotCount();
156 if (slots > 0) { 153 if (slots > 0) {
154 __ subi(sp, sp, Operand(slots * kPointerSize));
157 if (FLAG_debug_code) { 155 if (FLAG_debug_code) {
158 __ sub(sp, sp, Operand(slots * kPointerSize)); 156 __ Push(r3, r4);
159 __ push(r0); 157 __ li(r0, Operand(slots));
160 __ push(r1); 158 __ mtctr(r0);
161 __ add(r0, sp, Operand(slots * kPointerSize)); 159 __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
162 __ mov(r1, Operand(kSlotsZapValue)); 160 __ mov(r4, Operand(kSlotsZapValue));
163 Label loop; 161 Label loop;
164 __ bind(&loop); 162 __ bind(&loop);
165 __ sub(r0, r0, Operand(kPointerSize)); 163 __ StorePU(r4, MemOperand(r3, -kPointerSize));
166 __ str(r1, MemOperand(r0, 2 * kPointerSize)); 164 __ bdnz(&loop);
167 __ cmp(r0, sp); 165 __ Pop(r3, r4);
168 __ b(ne, &loop);
169 __ pop(r1);
170 __ pop(r0);
171 } else {
172 __ sub(sp, sp, Operand(slots * kPointerSize));
173 } 166 }
174 } 167 }
175 168
176 if (info()->saves_caller_doubles()) { 169 if (info()->saves_caller_doubles()) {
177 SaveCallerDoubles(); 170 SaveCallerDoubles();
178 } 171 }
179 172
180 // Possibly allocate a local context. 173 // Possibly allocate a local context.
181 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 174 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
182 if (heap_slots > 0) { 175 if (heap_slots > 0) {
183 Comment(";;; Allocate local context"); 176 Comment(";;; Allocate local context");
184 bool need_write_barrier = true; 177 bool need_write_barrier = true;
185 // Argument to NewContext is the function, which is in r1. 178 // Argument to NewContext is the function, which is in r4.
186 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 179 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
187 FastNewContextStub stub(isolate(), heap_slots); 180 FastNewContextStub stub(isolate(), heap_slots);
188 __ CallStub(&stub); 181 __ CallStub(&stub);
189 // Result of FastNewContextStub is always in new space. 182 // Result of FastNewContextStub is always in new space.
190 need_write_barrier = false; 183 need_write_barrier = false;
191 } else { 184 } else {
192 __ push(r1); 185 __ push(r4);
193 __ CallRuntime(Runtime::kNewFunctionContext, 1); 186 __ CallRuntime(Runtime::kNewFunctionContext, 1);
194 } 187 }
195 RecordSafepoint(Safepoint::kNoLazyDeopt); 188 RecordSafepoint(Safepoint::kNoLazyDeopt);
196 // Context is returned in both r0 and cp. It replaces the context 189 // Context is returned in both r3 and cp. It replaces the context
197 // passed to us. It's saved in the stack and kept live in cp. 190 // passed to us. It's saved in the stack and kept live in cp.
198 __ mov(cp, r0); 191 __ mr(cp, r3);
199 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset)); 192 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
200 // Copy any necessary parameters into the context. 193 // Copy any necessary parameters into the context.
201 int num_parameters = scope()->num_parameters(); 194 int num_parameters = scope()->num_parameters();
202 for (int i = 0; i < num_parameters; i++) { 195 for (int i = 0; i < num_parameters; i++) {
203 Variable* var = scope()->parameter(i); 196 Variable* var = scope()->parameter(i);
204 if (var->IsContextSlot()) { 197 if (var->IsContextSlot()) {
205 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 198 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
206 (num_parameters - 1 - i) * kPointerSize; 199 (num_parameters - 1 - i) * kPointerSize;
207 // Load parameter from stack. 200 // Load parameter from stack.
208 __ ldr(r0, MemOperand(fp, parameter_offset)); 201 __ LoadP(r3, MemOperand(fp, parameter_offset));
209 // Store it in the context. 202 // Store it in the context.
210 MemOperand target = ContextOperand(cp, var->index()); 203 MemOperand target = ContextOperand(cp, var->index());
211 __ str(r0, target); 204 __ StoreP(r3, target, r0);
212 // Update the write barrier. This clobbers r3 and r0. 205 // Update the write barrier. This clobbers r6 and r3.
213 if (need_write_barrier) { 206 if (need_write_barrier) {
214 __ RecordWriteContextSlot( 207 __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
215 cp, 208 GetLinkRegisterState(), kSaveFPRegs);
216 target.offset(),
217 r0,
218 r3,
219 GetLinkRegisterState(),
220 kSaveFPRegs);
221 } else if (FLAG_debug_code) { 209 } else if (FLAG_debug_code) {
222 Label done; 210 Label done;
223 __ JumpIfInNewSpace(cp, r0, &done); 211 __ JumpIfInNewSpace(cp, r3, &done);
224 __ Abort(kExpectedNewSpaceObject); 212 __ Abort(kExpectedNewSpaceObject);
225 __ bind(&done); 213 __ bind(&done);
226 } 214 }
227 } 215 }
228 } 216 }
229 Comment(";;; End allocate local context"); 217 Comment(";;; End allocate local context");
230 } 218 }
231 219
232 // Trace the call. 220 // Trace the call.
233 if (FLAG_trace && info()->IsOptimizing()) { 221 if (FLAG_trace && info()->IsOptimizing()) {
234 // We have not executed any compiled code yet, so cp still holds the 222 // We have not executed any compiled code yet, so cp still holds the
235 // incoming context. 223 // incoming context.
236 __ CallRuntime(Runtime::kTraceEnter, 0); 224 __ CallRuntime(Runtime::kTraceEnter, 0);
237 } 225 }
238 return !is_aborted(); 226 return !is_aborted();
239 } 227 }
240 228
241 229
242 void LCodeGen::GenerateOsrPrologue() { 230 void LCodeGen::GenerateOsrPrologue() {
243 // Generate the OSR entry prologue at the first unknown OSR value, or if there 231 // Generate the OSR entry prologue at the first unknown OSR value, or if there
244 // are none, at the OSR entrypoint instruction. 232 // are none, at the OSR entrypoint instruction.
245 if (osr_pc_offset_ >= 0) return; 233 if (osr_pc_offset_ >= 0) return;
246 234
247 osr_pc_offset_ = masm()->pc_offset(); 235 osr_pc_offset_ = masm()->pc_offset();
248 236
249 // Adjust the frame size, subsuming the unoptimized frame into the 237 // Adjust the frame size, subsuming the unoptimized frame into the
250 // optimized frame. 238 // optimized frame.
251 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 239 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
252 DCHECK(slots >= 0); 240 DCHECK(slots >= 0);
253 __ sub(sp, sp, Operand(slots * kPointerSize)); 241 __ subi(sp, sp, Operand(slots * kPointerSize));
254 } 242 }
255 243
256 244
257 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 245 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
258 if (instr->IsCall()) { 246 if (instr->IsCall()) {
259 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 247 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
260 } 248 }
261 if (!instr->IsLazyBailout() && !instr->IsGap()) { 249 if (!instr->IsLazyBailout() && !instr->IsGap()) {
262 safepoints_.BumpLastLazySafepointIndex(); 250 safepoints_.BumpLastLazySafepointIndex();
263 } 251 }
264 } 252 }
265 253
266 254
267 bool LCodeGen::GenerateDeferredCode() { 255 bool LCodeGen::GenerateDeferredCode() {
268 DCHECK(is_generating()); 256 DCHECK(is_generating());
269 if (deferred_.length() > 0) { 257 if (deferred_.length() > 0) {
270 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 258 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
271 LDeferredCode* code = deferred_[i]; 259 LDeferredCode* code = deferred_[i];
272 260
273 HValue* value = 261 HValue* value =
274 instructions_->at(code->instruction_index())->hydrogen_value(); 262 instructions_->at(code->instruction_index())->hydrogen_value();
275 RecordAndWritePosition( 263 RecordAndWritePosition(
276 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 264 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
277 265
278 Comment(";;; <@%d,#%d> " 266 Comment(
279 "-------------------- Deferred %s --------------------", 267 ";;; <@%d,#%d> "
280 code->instruction_index(), 268 "-------------------- Deferred %s --------------------",
281 code->instr()->hydrogen_value()->id(), 269 code->instruction_index(), code->instr()->hydrogen_value()->id(),
282 code->instr()->Mnemonic()); 270 code->instr()->Mnemonic());
283 __ bind(code->entry()); 271 __ bind(code->entry());
284 if (NeedsDeferredFrame()) { 272 if (NeedsDeferredFrame()) {
285 Comment(";;; Build frame"); 273 Comment(";;; Build frame");
286 DCHECK(!frame_is_built_); 274 DCHECK(!frame_is_built_);
287 DCHECK(info()->IsStub()); 275 DCHECK(info()->IsStub());
288 frame_is_built_ = true; 276 frame_is_built_ = true;
289 __ PushFixedFrame(); 277 __ PushFixedFrame();
290 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 278 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
291 __ push(scratch0()); 279 __ push(scratch0());
292 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 280 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
293 Comment(";;; Deferred code"); 281 Comment(";;; Deferred code");
294 } 282 }
295 code->Generate(); 283 code->Generate();
296 if (NeedsDeferredFrame()) { 284 if (NeedsDeferredFrame()) {
297 Comment(";;; Destroy frame"); 285 Comment(";;; Destroy frame");
298 DCHECK(frame_is_built_); 286 DCHECK(frame_is_built_);
299 __ pop(ip); 287 __ pop(ip);
300 __ PopFixedFrame(); 288 __ PopFixedFrame();
301 frame_is_built_ = false; 289 frame_is_built_ = false;
302 } 290 }
303 __ jmp(code->exit()); 291 __ b(code->exit());
304 } 292 }
305 } 293 }
306 294
307 // Force constant pool emission at the end of the deferred code to make
308 // sure that no constant pools are emitted after.
309 masm()->CheckConstPool(true, false);
310
311 return !is_aborted(); 295 return !is_aborted();
312 } 296 }
313 297
314 298
315 bool LCodeGen::GenerateDeoptJumpTable() { 299 bool LCodeGen::GenerateJumpTable() {
316 // Check that the jump table is accessible from everywhere in the function 300 // Check that the jump table is accessible from everywhere in the function
317 // code, i.e. that offsets to the table can be encoded in the 24bit signed 301 // code, i.e. that offsets to the table can be encoded in the 24bit signed
318 // immediate of a branch instruction. 302 // immediate of a branch instruction.
319 // To simplify we consider the code size from the first instruction to the 303 // To simplify we consider the code size from the first instruction to the
320 // end of the jump table. We also don't consider the pc load delta. 304 // end of the jump table. We also don't consider the pc load delta.
321 // Each entry in the jump table generates one instruction and inlines one 305 // Each entry in the jump table generates one instruction and inlines one
322 // 32bit data after it. 306 // 32bit data after it.
323 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + 307 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
324 deopt_jump_table_.length() * 7)) { 308 jump_table_.length() * 7)) {
325 Abort(kGeneratedCodeIsTooLarge); 309 Abort(kGeneratedCodeIsTooLarge);
326 } 310 }
327 311
328 if (deopt_jump_table_.length() > 0) { 312 if (jump_table_.length() > 0) {
329 Label needs_frame, call_deopt_entry; 313 Label needs_frame, call_deopt_entry;
330 314
331 Comment(";;; -------------------- Jump table --------------------"); 315 Comment(";;; -------------------- Jump table --------------------");
332 Address base = deopt_jump_table_[0].address; 316 Address base = jump_table_[0].address;
333 317
334 Register entry_offset = scratch0(); 318 Register entry_offset = scratch0();
335 319
336 int length = deopt_jump_table_.length(); 320 int length = jump_table_.length();
337 for (int i = 0; i < length; i++) { 321 for (int i = 0; i < length; i++) {
338 __ bind(&deopt_jump_table_[i].label); 322 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
323 __ bind(&table_entry->label);
339 324
340 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; 325 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
341 DCHECK(type == deopt_jump_table_[0].bailout_type); 326 Address entry = table_entry->address;
342 Address entry = deopt_jump_table_[i].address; 327 DeoptComment(table_entry->reason);
343 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
344 DCHECK(id != Deoptimizer::kNotDeoptimizationEntry);
345 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
346 328
347 // Second-level deopt table entries are contiguous and small, so instead 329 // Second-level deopt table entries are contiguous and small, so instead
348 // of loading the full, absolute address of each one, load an immediate 330 // of loading the full, absolute address of each one, load an immediate
349 // offset which will be added to the base address later. 331 // offset which will be added to the base address later.
350 __ mov(entry_offset, Operand(entry - base)); 332 __ mov(entry_offset, Operand(entry - base));
351 333
352 if (deopt_jump_table_[i].needs_frame) { 334 if (table_entry->needs_frame) {
353 DCHECK(!info()->saves_caller_doubles()); 335 DCHECK(!info()->saves_caller_doubles());
354 if (needs_frame.is_bound()) { 336 if (needs_frame.is_bound()) {
355 __ b(&needs_frame); 337 __ b(&needs_frame);
356 } else { 338 } else {
357 __ bind(&needs_frame); 339 __ bind(&needs_frame);
358 Comment(";;; call deopt with frame"); 340 Comment(";;; call deopt with frame");
359 __ PushFixedFrame(); 341 __ PushFixedFrame();
360 // This variant of deopt can only be used with stubs. Since we don't 342 // This variant of deopt can only be used with stubs. Since we don't
361 // have a function pointer to install in the stack frame that we're 343 // have a function pointer to install in the stack frame that we're
362 // building, install a special marker there instead. 344 // building, install a special marker there instead.
363 DCHECK(info()->IsStub()); 345 DCHECK(info()->IsStub());
364 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB))); 346 __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::STUB));
365 __ push(ip); 347 __ push(r0);
366 __ add(fp, sp, 348 __ addi(fp, sp,
367 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 349 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
368 __ bind(&call_deopt_entry); 350 __ bind(&call_deopt_entry);
369 // Add the base address to the offset previously loaded in 351 // Add the base address to the offset previously loaded in
370 // entry_offset. 352 // entry_offset.
371 __ add(entry_offset, entry_offset, 353 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
372 Operand(ExternalReference::ForDeoptEntry(base))); 354 __ add(ip, entry_offset, ip);
373 __ blx(entry_offset); 355 __ Call(ip);
374 } 356 }
375
376 masm()->CheckConstPool(false, false);
377 } else { 357 } else {
378 // The last entry can fall through into `call_deopt_entry`, avoiding a 358 // The last entry can fall through into `call_deopt_entry`, avoiding a
379 // branch. 359 // branch.
380 bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound(); 360 bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
381 361
382 if (need_branch) __ b(&call_deopt_entry); 362 if (need_branch) __ b(&call_deopt_entry);
383
384 masm()->CheckConstPool(false, !need_branch);
385 } 363 }
386 } 364 }
387 365
388 if (!call_deopt_entry.is_bound()) { 366 if (!call_deopt_entry.is_bound()) {
389 Comment(";;; call deopt"); 367 Comment(";;; call deopt");
390 __ bind(&call_deopt_entry); 368 __ bind(&call_deopt_entry);
391 369
392 if (info()->saves_caller_doubles()) { 370 if (info()->saves_caller_doubles()) {
393 DCHECK(info()->IsStub()); 371 DCHECK(info()->IsStub());
394 RestoreCallerDoubles(); 372 RestoreCallerDoubles();
395 } 373 }
396 374
397 // Add the base address to the offset previously loaded in entry_offset. 375 // Add the base address to the offset previously loaded in entry_offset.
398 __ add(entry_offset, entry_offset, 376 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
399 Operand(ExternalReference::ForDeoptEntry(base))); 377 __ add(ip, entry_offset, ip);
400 __ blx(entry_offset); 378 __ Call(ip);
401 } 379 }
402 } 380 }
403 381
404 // Force constant pool emission at the end of the deopt jump table to make
405 // sure that no constant pools are emitted after.
406 masm()->CheckConstPool(true, false);
407
408 // The deoptimization jump table is the last part of the instruction 382 // The deoptimization jump table is the last part of the instruction
409 // sequence. Mark the generated code as done unless we bailed out. 383 // sequence. Mark the generated code as done unless we bailed out.
410 if (!is_aborted()) status_ = DONE; 384 if (!is_aborted()) status_ = DONE;
411 return !is_aborted(); 385 return !is_aborted();
412 } 386 }
413 387
414 388
415 bool LCodeGen::GenerateSafepointTable() { 389 bool LCodeGen::GenerateSafepointTable() {
416 DCHECK(is_done()); 390 DCHECK(is_done());
417 safepoints_.Emit(masm(), GetStackSlotCount()); 391 safepoints_.Emit(masm(), GetStackSlotCount());
418 return !is_aborted(); 392 return !is_aborted();
419 } 393 }
420 394
421 395
422 Register LCodeGen::ToRegister(int index) const { 396 Register LCodeGen::ToRegister(int index) const {
423 return Register::FromAllocationIndex(index); 397 return Register::FromAllocationIndex(index);
424 } 398 }
425 399
426 400
427 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { 401 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
428 return DwVfpRegister::FromAllocationIndex(index); 402 return DoubleRegister::FromAllocationIndex(index);
429 } 403 }
430 404
431 405
432 Register LCodeGen::ToRegister(LOperand* op) const { 406 Register LCodeGen::ToRegister(LOperand* op) const {
433 DCHECK(op->IsRegister()); 407 DCHECK(op->IsRegister());
434 return ToRegister(op->index()); 408 return ToRegister(op->index());
435 } 409 }
436 410
437 411
438 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { 412 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
439 if (op->IsRegister()) { 413 if (op->IsRegister()) {
440 return ToRegister(op->index()); 414 return ToRegister(op->index());
441 } else if (op->IsConstantOperand()) { 415 } else if (op->IsConstantOperand()) {
442 LConstantOperand* const_op = LConstantOperand::cast(op); 416 LConstantOperand* const_op = LConstantOperand::cast(op);
443 HConstant* constant = chunk_->LookupConstant(const_op); 417 HConstant* constant = chunk_->LookupConstant(const_op);
444 Handle<Object> literal = constant->handle(isolate()); 418 Handle<Object> literal = constant->handle(isolate());
445 Representation r = chunk_->LookupLiteralRepresentation(const_op); 419 Representation r = chunk_->LookupLiteralRepresentation(const_op);
446 if (r.IsInteger32()) { 420 if (r.IsInteger32()) {
447 DCHECK(literal->IsNumber()); 421 DCHECK(literal->IsNumber());
448 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); 422 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
449 } else if (r.IsDouble()) { 423 } else if (r.IsDouble()) {
450 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); 424 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
451 } else { 425 } else {
452 DCHECK(r.IsSmiOrTagged()); 426 DCHECK(r.IsSmiOrTagged());
453 __ Move(scratch, literal); 427 __ Move(scratch, literal);
454 } 428 }
455 return scratch; 429 return scratch;
456 } else if (op->IsStackSlot()) { 430 } else if (op->IsStackSlot()) {
457 __ ldr(scratch, ToMemOperand(op)); 431 __ LoadP(scratch, ToMemOperand(op));
458 return scratch; 432 return scratch;
459 } 433 }
460 UNREACHABLE(); 434 UNREACHABLE();
461 return scratch; 435 return scratch;
462 } 436 }
463 437
464 438
465 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 439 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
440 Register dst) {
441 DCHECK(IsInteger32(const_op));
442 HConstant* constant = chunk_->LookupConstant(const_op);
443 int32_t value = constant->Integer32Value();
444 if (IsSmi(const_op)) {
445 __ LoadSmiLiteral(dst, Smi::FromInt(value));
446 } else {
447 __ LoadIntLiteral(dst, value);
448 }
449 }
450
451
452 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
466 DCHECK(op->IsDoubleRegister()); 453 DCHECK(op->IsDoubleRegister());
467 return ToDoubleRegister(op->index()); 454 return ToDoubleRegister(op->index());
468 } 455 }
469 456
470 457
471 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
472 SwVfpRegister flt_scratch,
473 DwVfpRegister dbl_scratch) {
474 if (op->IsDoubleRegister()) {
475 return ToDoubleRegister(op->index());
476 } else if (op->IsConstantOperand()) {
477 LConstantOperand* const_op = LConstantOperand::cast(op);
478 HConstant* constant = chunk_->LookupConstant(const_op);
479 Handle<Object> literal = constant->handle(isolate());
480 Representation r = chunk_->LookupLiteralRepresentation(const_op);
481 if (r.IsInteger32()) {
482 DCHECK(literal->IsNumber());
483 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
484 __ vmov(flt_scratch, ip);
485 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
486 return dbl_scratch;
487 } else if (r.IsDouble()) {
488 Abort(kUnsupportedDoubleImmediate);
489 } else if (r.IsTagged()) {
490 Abort(kUnsupportedTaggedImmediate);
491 }
492 } else if (op->IsStackSlot()) {
493 // TODO(regis): Why is vldr not taking a MemOperand?
494 // __ vldr(dbl_scratch, ToMemOperand(op));
495 MemOperand mem_op = ToMemOperand(op);
496 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
497 return dbl_scratch;
498 }
499 UNREACHABLE();
500 return dbl_scratch;
501 }
502
503
504 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 458 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
505 HConstant* constant = chunk_->LookupConstant(op); 459 HConstant* constant = chunk_->LookupConstant(op);
506 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 460 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
507 return constant->handle(isolate()); 461 return constant->handle(isolate());
508 } 462 }
509 463
510 464
511 bool LCodeGen::IsInteger32(LConstantOperand* op) const { 465 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
512 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 466 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
513 } 467 }
514 468
515 469
516 bool LCodeGen::IsSmi(LConstantOperand* op) const { 470 bool LCodeGen::IsSmi(LConstantOperand* op) const {
517 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 471 return chunk_->LookupLiteralRepresentation(op).IsSmi();
518 } 472 }
519 473
520 474
521 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 475 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
522 return ToRepresentation(op, Representation::Integer32()); 476 return ToRepresentation(op, Representation::Integer32());
523 } 477 }
524 478
525 479
526 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, 480 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
527 const Representation& r) const { 481 const Representation& r) const {
528 HConstant* constant = chunk_->LookupConstant(op); 482 HConstant* constant = chunk_->LookupConstant(op);
529 int32_t value = constant->Integer32Value(); 483 int32_t value = constant->Integer32Value();
530 if (r.IsInteger32()) return value; 484 if (r.IsInteger32()) return value;
531 DCHECK(r.IsSmiOrTagged()); 485 DCHECK(r.IsSmiOrTagged());
532 return reinterpret_cast<int32_t>(Smi::FromInt(value)); 486 return reinterpret_cast<intptr_t>(Smi::FromInt(value));
533 } 487 }
534 488
535 489
536 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 490 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
537 HConstant* constant = chunk_->LookupConstant(op); 491 HConstant* constant = chunk_->LookupConstant(op);
538 return Smi::FromInt(constant->Integer32Value()); 492 return Smi::FromInt(constant->Integer32Value());
539 } 493 }
540 494
541 495
542 double LCodeGen::ToDouble(LConstantOperand* op) const { 496 double LCodeGen::ToDouble(LConstantOperand* op) const {
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
594 } 548 }
595 549
596 550
597 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { 551 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
598 DCHECK(op->IsDoubleStackSlot()); 552 DCHECK(op->IsDoubleStackSlot());
599 if (NeedsEagerFrame()) { 553 if (NeedsEagerFrame()) {
600 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); 554 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
601 } else { 555 } else {
602 // Retrieve parameter without eager stack-frame relative to the 556 // Retrieve parameter without eager stack-frame relative to the
603 // stack-pointer. 557 // stack-pointer.
604 return MemOperand( 558 return MemOperand(sp,
605 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); 559 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
606 } 560 }
607 } 561 }
608 562
609 563
610 void LCodeGen::WriteTranslation(LEnvironment* environment, 564 void LCodeGen::WriteTranslation(LEnvironment* environment,
611 Translation* translation) { 565 Translation* translation) {
612 if (environment == NULL) return; 566 if (environment == NULL) return;
613 567
614 // The translation includes one command per value in the environment. 568 // The translation includes one command per value in the environment.
615 int translation_size = environment->translation_size(); 569 int translation_size = environment->translation_size();
616 // The output frame height does not include the parameters. 570 // The output frame height does not include the parameters.
617 int height = translation_size - environment->parameter_count(); 571 int height = translation_size - environment->parameter_count();
618 572
619 WriteTranslation(environment->outer(), translation); 573 WriteTranslation(environment->outer(), translation);
620 bool has_closure_id = !info()->closure().is_null() && 574 bool has_closure_id =
575 !info()->closure().is_null() &&
621 !info()->closure().is_identical_to(environment->closure()); 576 !info()->closure().is_identical_to(environment->closure());
622 int closure_id = has_closure_id 577 int closure_id = has_closure_id
623 ? DefineDeoptimizationLiteral(environment->closure()) 578 ? DefineDeoptimizationLiteral(environment->closure())
624 : Translation::kSelfLiteralId; 579 : Translation::kSelfLiteralId;
625 580
626 switch (environment->frame_type()) { 581 switch (environment->frame_type()) {
627 case JS_FUNCTION: 582 case JS_FUNCTION:
628 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 583 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
629 break; 584 break;
630 case JS_CONSTRUCT: 585 case JS_CONSTRUCT:
631 translation->BeginConstructStubFrame(closure_id, translation_size); 586 translation->BeginConstructStubFrame(closure_id, translation_size);
632 break; 587 break;
633 case JS_GETTER: 588 case JS_GETTER:
634 DCHECK(translation_size == 1); 589 DCHECK(translation_size == 1);
(...skipping 10 matching lines...) Expand all
645 break; 600 break;
646 case ARGUMENTS_ADAPTOR: 601 case ARGUMENTS_ADAPTOR:
647 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 602 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
648 break; 603 break;
649 } 604 }
650 605
651 int object_index = 0; 606 int object_index = 0;
652 int dematerialized_index = 0; 607 int dematerialized_index = 0;
653 for (int i = 0; i < translation_size; ++i) { 608 for (int i = 0; i < translation_size; ++i) {
654 LOperand* value = environment->values()->at(i); 609 LOperand* value = environment->values()->at(i);
655 AddToTranslation(environment, 610 AddToTranslation(
656 translation, 611 environment, translation, value, environment->HasTaggedValueAt(i),
657 value, 612 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
658 environment->HasTaggedValueAt(i),
659 environment->HasUint32ValueAt(i),
660 &object_index,
661 &dematerialized_index);
662 } 613 }
663 } 614 }
664 615
665 616
666 void LCodeGen::AddToTranslation(LEnvironment* environment, 617 void LCodeGen::AddToTranslation(LEnvironment* environment,
667 Translation* translation, 618 Translation* translation, LOperand* op,
668 LOperand* op, 619 bool is_tagged, bool is_uint32,
669 bool is_tagged,
670 bool is_uint32,
671 int* object_index_pointer, 620 int* object_index_pointer,
672 int* dematerialized_index_pointer) { 621 int* dematerialized_index_pointer) {
673 if (op == LEnvironment::materialization_marker()) { 622 if (op == LEnvironment::materialization_marker()) {
674 int object_index = (*object_index_pointer)++; 623 int object_index = (*object_index_pointer)++;
675 if (environment->ObjectIsDuplicateAt(object_index)) { 624 if (environment->ObjectIsDuplicateAt(object_index)) {
676 int dupe_of = environment->ObjectDuplicateOfAt(object_index); 625 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
677 translation->DuplicateObject(dupe_of); 626 translation->DuplicateObject(dupe_of);
678 return; 627 return;
679 } 628 }
680 int object_length = environment->ObjectLengthAt(object_index); 629 int object_length = environment->ObjectLengthAt(object_index);
681 if (environment->ObjectIsArgumentsAt(object_index)) { 630 if (environment->ObjectIsArgumentsAt(object_index)) {
682 translation->BeginArgumentsObject(object_length); 631 translation->BeginArgumentsObject(object_length);
683 } else { 632 } else {
684 translation->BeginCapturedObject(object_length); 633 translation->BeginCapturedObject(object_length);
685 } 634 }
686 int dematerialized_index = *dematerialized_index_pointer; 635 int dematerialized_index = *dematerialized_index_pointer;
687 int env_offset = environment->translation_size() + dematerialized_index; 636 int env_offset = environment->translation_size() + dematerialized_index;
688 *dematerialized_index_pointer += object_length; 637 *dematerialized_index_pointer += object_length;
689 for (int i = 0; i < object_length; ++i) { 638 for (int i = 0; i < object_length; ++i) {
690 LOperand* value = environment->values()->at(env_offset + i); 639 LOperand* value = environment->values()->at(env_offset + i);
691 AddToTranslation(environment, 640 AddToTranslation(environment, translation, value,
692 translation,
693 value,
694 environment->HasTaggedValueAt(env_offset + i), 641 environment->HasTaggedValueAt(env_offset + i),
695 environment->HasUint32ValueAt(env_offset + i), 642 environment->HasUint32ValueAt(env_offset + i),
696 object_index_pointer, 643 object_index_pointer, dematerialized_index_pointer);
697 dematerialized_index_pointer);
698 } 644 }
699 return; 645 return;
700 } 646 }
701 647
702 if (op->IsStackSlot()) { 648 if (op->IsStackSlot()) {
703 if (is_tagged) { 649 if (is_tagged) {
704 translation->StoreStackSlot(op->index()); 650 translation->StoreStackSlot(op->index());
705 } else if (is_uint32) { 651 } else if (is_uint32) {
706 translation->StoreUint32StackSlot(op->index()); 652 translation->StoreUint32StackSlot(op->index());
707 } else { 653 } else {
(...skipping 16 matching lines...) Expand all
724 } else if (op->IsConstantOperand()) { 670 } else if (op->IsConstantOperand()) {
725 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 671 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
726 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); 672 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
727 translation->StoreLiteral(src_index); 673 translation->StoreLiteral(src_index);
728 } else { 674 } else {
729 UNREACHABLE(); 675 UNREACHABLE();
730 } 676 }
731 } 677 }
732 678
733 679
734 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) { 680 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
735 int size = masm()->CallSize(code, mode); 681 LInstruction* instr) {
736 if (code->kind() == Code::BINARY_OP_IC || 682 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
737 code->kind() == Code::COMPARE_IC) {
738 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
739 }
740 return size;
741 } 683 }
742 684
743 685
744 void LCodeGen::CallCode(Handle<Code> code, 686 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
745 RelocInfo::Mode mode,
746 LInstruction* instr,
747 TargetAddressStorageMode storage_mode) {
748 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
749 }
750
751
752 void LCodeGen::CallCodeGeneric(Handle<Code> code,
753 RelocInfo::Mode mode,
754 LInstruction* instr, 687 LInstruction* instr,
755 SafepointMode safepoint_mode, 688 SafepointMode safepoint_mode) {
756 TargetAddressStorageMode storage_mode) {
757 DCHECK(instr != NULL); 689 DCHECK(instr != NULL);
758 // Block literal pool emission to ensure nop indicating no inlined smi code 690 __ Call(code, mode);
759 // is in the correct position.
760 Assembler::BlockConstPoolScope block_const_pool(masm());
761 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
762 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 691 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
763 692
764 // Signal that we don't inline smi code before these stubs in the 693 // Signal that we don't inline smi code before these stubs in the
765 // optimizing code generator. 694 // optimizing code generator.
766 if (code->kind() == Code::BINARY_OP_IC || 695 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
767 code->kind() == Code::COMPARE_IC) {
768 __ nop(); 696 __ nop();
769 } 697 }
770 } 698 }
771 699
772 700
773 void LCodeGen::CallRuntime(const Runtime::Function* function, 701 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
774 int num_arguments, 702 LInstruction* instr, SaveFPRegsMode save_doubles) {
775 LInstruction* instr,
776 SaveFPRegsMode save_doubles) {
777 DCHECK(instr != NULL); 703 DCHECK(instr != NULL);
778 704
779 __ CallRuntime(function, num_arguments, save_doubles); 705 __ CallRuntime(function, num_arguments, save_doubles);
780 706
781 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 707 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
782 } 708 }
783 709
784 710
785 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 711 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
786 if (context->IsRegister()) { 712 if (context->IsRegister()) {
787 __ Move(cp, ToRegister(context)); 713 __ Move(cp, ToRegister(context));
788 } else if (context->IsStackSlot()) { 714 } else if (context->IsStackSlot()) {
789 __ ldr(cp, ToMemOperand(context)); 715 __ LoadP(cp, ToMemOperand(context));
790 } else if (context->IsConstantOperand()) { 716 } else if (context->IsConstantOperand()) {
791 HConstant* constant = 717 HConstant* constant =
792 chunk_->LookupConstant(LConstantOperand::cast(context)); 718 chunk_->LookupConstant(LConstantOperand::cast(context));
793 __ Move(cp, Handle<Object>::cast(constant->handle(isolate()))); 719 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
794 } else { 720 } else {
795 UNREACHABLE(); 721 UNREACHABLE();
796 } 722 }
797 } 723 }
798 724
799 725
800 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 726 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
801 int argc, 727 LInstruction* instr, LOperand* context) {
802 LInstruction* instr,
803 LOperand* context) {
804 LoadContextFromDeferred(context); 728 LoadContextFromDeferred(context);
805 __ CallRuntimeSaveDoubles(id); 729 __ CallRuntimeSaveDoubles(id);
806 RecordSafepointWithRegisters( 730 RecordSafepointWithRegisters(instr->pointer_map(), argc,
807 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 731 Safepoint::kNoLazyDeopt);
808 } 732 }
809 733
810 734
811 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, 735 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
812 Safepoint::DeoptMode mode) { 736 Safepoint::DeoptMode mode) {
813 environment->set_has_been_used(); 737 environment->set_has_been_used();
814 if (!environment->HasBeenRegistered()) { 738 if (!environment->HasBeenRegistered()) {
815 // Physical stack frame layout: 739 // Physical stack frame layout:
816 // -x ............. -4 0 ..................................... y 740 // -x ............. -4 0 ..................................... y
817 // [incoming arguments] [spill slots] [pushed outgoing arguments] 741 // [incoming arguments] [spill slots] [pushed outgoing arguments]
(...skipping 12 matching lines...) Expand all
830 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 754 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
831 ++frame_count; 755 ++frame_count;
832 if (e->frame_type() == JS_FUNCTION) { 756 if (e->frame_type() == JS_FUNCTION) {
833 ++jsframe_count; 757 ++jsframe_count;
834 } 758 }
835 } 759 }
836 Translation translation(&translations_, frame_count, jsframe_count, zone()); 760 Translation translation(&translations_, frame_count, jsframe_count, zone());
837 WriteTranslation(environment, &translation); 761 WriteTranslation(environment, &translation);
838 int deoptimization_index = deoptimizations_.length(); 762 int deoptimization_index = deoptimizations_.length();
839 int pc_offset = masm()->pc_offset(); 763 int pc_offset = masm()->pc_offset();
840 environment->Register(deoptimization_index, 764 environment->Register(deoptimization_index, translation.index(),
841 translation.index(),
842 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 765 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
843 deoptimizations_.Add(environment, zone()); 766 deoptimizations_.Add(environment, zone());
844 } 767 }
845 } 768 }
846 769
847 770
848 void LCodeGen::DeoptimizeIf(Condition condition, 771 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
849 LEnvironment* environment, 772 const char* detail,
850 Deoptimizer::BailoutType bailout_type) { 773 Deoptimizer::BailoutType bailout_type,
774 CRegister cr) {
775 LEnvironment* environment = instr->environment();
851 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 776 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
852 DCHECK(environment->HasBeenRegistered()); 777 DCHECK(environment->HasBeenRegistered());
853 int id = environment->deoptimization_index(); 778 int id = environment->deoptimization_index();
854 DCHECK(info()->IsOptimizing() || info()->IsStub()); 779 DCHECK(info()->IsOptimizing() || info()->IsStub());
855 Address entry = 780 Address entry =
856 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 781 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
857 if (entry == NULL) { 782 if (entry == NULL) {
858 Abort(kBailoutWasNotPrepared); 783 Abort(kBailoutWasNotPrepared);
859 return; 784 return;
860 } 785 }
861 786
862 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { 787 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
788 CRegister alt_cr = cr6;
863 Register scratch = scratch0(); 789 Register scratch = scratch0();
864 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 790 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
791 Label no_deopt;
792 DCHECK(!alt_cr.is(cr));
793 __ Push(r4, scratch);
794 __ mov(scratch, Operand(count));
795 __ lwz(r4, MemOperand(scratch));
796 __ subi(r4, r4, Operand(1));
797 __ cmpi(r4, Operand::Zero(), alt_cr);
798 __ bne(&no_deopt, alt_cr);
799 __ li(r4, Operand(FLAG_deopt_every_n_times));
800 __ stw(r4, MemOperand(scratch));
801 __ Pop(r4, scratch);
865 802
866 // Store the condition on the stack if necessary 803 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
867 if (condition != al) { 804 __ bind(&no_deopt);
868 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition)); 805 __ stw(r4, MemOperand(scratch));
869 __ mov(scratch, Operand(1), LeaveCC, condition); 806 __ Pop(r4, scratch);
870 __ push(scratch);
871 }
872
873 __ push(r1);
874 __ mov(scratch, Operand(count));
875 __ ldr(r1, MemOperand(scratch));
876 __ sub(r1, r1, Operand(1), SetCC);
877 __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
878 __ str(r1, MemOperand(scratch));
879 __ pop(r1);
880
881 if (condition != al) {
882 // Clean up the stack before the deoptimizer call
883 __ pop(scratch);
884 }
885
886 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
887
888 // 'Restore' the condition in a slightly hacky way. (It would be better
889 // to use 'msr' and 'mrs' instructions here, but they are not supported by
890 // our ARM simulator).
891 if (condition != al) {
892 condition = ne;
893 __ cmp(scratch, Operand::Zero());
894 }
895 } 807 }
896 808
897 if (info()->ShouldTrapOnDeopt()) { 809 if (info()->ShouldTrapOnDeopt()) {
898 __ stop("trap_on_deopt", condition); 810 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
899 } 811 }
900 812
813 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
814 instr->Mnemonic(), detail);
901 DCHECK(info()->IsStub() || frame_is_built_); 815 DCHECK(info()->IsStub() || frame_is_built_);
902 // Go through jump table if we need to handle condition, build frame, or 816 // Go through jump table if we need to handle condition, build frame, or
903 // restore caller doubles. 817 // restore caller doubles.
904 if (condition == al && frame_is_built_ && 818 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
905 !info()->saves_caller_doubles()) { 819 DeoptComment(reason);
906 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 820 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
907 } else { 821 } else {
822 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
823 !frame_is_built_);
908 // We often have several deopts to the same entry, reuse the last 824 // We often have several deopts to the same entry, reuse the last
909 // jump entry if this is the case. 825 // jump entry if this is the case.
910 if (deopt_jump_table_.is_empty() || 826 if (jump_table_.is_empty() ||
911 (deopt_jump_table_.last().address != entry) || 827 !table_entry.IsEquivalentTo(jump_table_.last())) {
912 (deopt_jump_table_.last().bailout_type != bailout_type) || 828 jump_table_.Add(table_entry, zone());
913 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
914 Deoptimizer::JumpTableEntry table_entry(entry,
915 bailout_type,
916 !frame_is_built_);
917 deopt_jump_table_.Add(table_entry, zone());
918 } 829 }
919 __ b(condition, &deopt_jump_table_.last().label); 830 __ b(cond, &jump_table_.last().label, cr);
920 } 831 }
921 } 832 }
922 833
923 834
924 void LCodeGen::DeoptimizeIf(Condition condition, 835 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
925 LEnvironment* environment) { 836 CRegister cr, const char* detail) {
926 Deoptimizer::BailoutType bailout_type = info()->IsStub() 837 Deoptimizer::BailoutType bailout_type =
927 ? Deoptimizer::LAZY 838 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
928 : Deoptimizer::EAGER; 839 DeoptimizeIf(condition, instr, detail, bailout_type, cr);
929 DeoptimizeIf(condition, environment, bailout_type);
930 } 840 }
931 841
932 842
933 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 843 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
934 int length = deoptimizations_.length(); 844 int length = deoptimizations_.length();
935 if (length == 0) return; 845 if (length == 0) return;
936 Handle<DeoptimizationInputData> data = 846 Handle<DeoptimizationInputData> data =
937 DeoptimizationInputData::New(isolate(), length, TENURED); 847 DeoptimizationInputData::New(isolate(), length, TENURED);
938 848
939 Handle<ByteArray> translations = 849 Handle<ByteArray> translations =
940 translations_.CreateByteArray(isolate()->factory()); 850 translations_.CreateByteArray(isolate()->factory());
941 data->SetTranslationByteArray(*translations); 851 data->SetTranslationByteArray(*translations);
942 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); 852 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
943 data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); 853 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
944 if (info_->IsOptimizing()) { 854 if (info_->IsOptimizing()) {
945 // Reference to shared function info does not change between phases. 855 // Reference to shared function info does not change between phases.
946 AllowDeferredHandleDereference allow_handle_dereference; 856 AllowDeferredHandleDereference allow_handle_dereference;
947 data->SetSharedFunctionInfo(*info_->shared_info()); 857 data->SetSharedFunctionInfo(*info_->shared_info());
948 } else { 858 } else {
949 data->SetSharedFunctionInfo(Smi::FromInt(0)); 859 data->SetSharedFunctionInfo(Smi::FromInt(0));
950 } 860 }
951 861
952 Handle<FixedArray> literals = 862 Handle<FixedArray> literals =
953 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); 863 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
954 { AllowDeferredHandleDereference copy_handles; 864 {
865 AllowDeferredHandleDereference copy_handles;
955 for (int i = 0; i < deoptimization_literals_.length(); i++) { 866 for (int i = 0; i < deoptimization_literals_.length(); i++) {
956 literals->set(i, *deoptimization_literals_[i]); 867 literals->set(i, *deoptimization_literals_[i]);
957 } 868 }
958 data->SetLiteralArray(*literals); 869 data->SetLiteralArray(*literals);
959 } 870 }
960 871
961 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); 872 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
962 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); 873 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
963 874
964 // Populate the deoptimization entries. 875 // Populate the deoptimization entries.
(...skipping 18 matching lines...) Expand all
983 return result; 894 return result;
984 } 895 }
985 896
986 897
987 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { 898 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
988 DCHECK(deoptimization_literals_.length() == 0); 899 DCHECK(deoptimization_literals_.length() == 0);
989 900
990 const ZoneList<Handle<JSFunction> >* inlined_closures = 901 const ZoneList<Handle<JSFunction> >* inlined_closures =
991 chunk()->inlined_closures(); 902 chunk()->inlined_closures();
992 903
993 for (int i = 0, length = inlined_closures->length(); 904 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
994 i < length;
995 i++) {
996 DefineDeoptimizationLiteral(inlined_closures->at(i)); 905 DefineDeoptimizationLiteral(inlined_closures->at(i));
997 } 906 }
998 907
999 inlined_function_count_ = deoptimization_literals_.length(); 908 inlined_function_count_ = deoptimization_literals_.length();
1000 } 909 }
1001 910
1002 911
1003 void LCodeGen::RecordSafepointWithLazyDeopt( 912 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
1004 LInstruction* instr, SafepointMode safepoint_mode) { 913 SafepointMode safepoint_mode) {
1005 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 914 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1006 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 915 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1007 } else { 916 } else {
1008 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 917 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1009 RecordSafepointWithRegisters( 918 RecordSafepointWithRegisters(instr->pointer_map(), 0,
1010 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 919 Safepoint::kLazyDeopt);
1011 } 920 }
1012 } 921 }
1013 922
1014 923
1015 void LCodeGen::RecordSafepoint( 924 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
1016 LPointerMap* pointers, 925 int arguments, Safepoint::DeoptMode deopt_mode) {
1017 Safepoint::Kind kind,
1018 int arguments,
1019 Safepoint::DeoptMode deopt_mode) {
1020 DCHECK(expected_safepoint_kind_ == kind); 926 DCHECK(expected_safepoint_kind_ == kind);
1021 927
1022 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 928 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1023 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), 929 Safepoint safepoint =
1024 kind, arguments, deopt_mode); 930 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1025 for (int i = 0; i < operands->length(); i++) { 931 for (int i = 0; i < operands->length(); i++) {
1026 LOperand* pointer = operands->at(i); 932 LOperand* pointer = operands->at(i);
1027 if (pointer->IsStackSlot()) { 933 if (pointer->IsStackSlot()) {
1028 safepoint.DefinePointerSlot(pointer->index(), zone()); 934 safepoint.DefinePointerSlot(pointer->index(), zone());
1029 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 935 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1030 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 936 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1031 } 937 }
1032 } 938 }
1033 if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) { 939 #if V8_OOL_CONSTANT_POOL
1034 // Register pp always contains a pointer to the constant pool. 940 if (kind & Safepoint::kWithRegisters) {
1035 safepoint.DefinePointerRegister(pp, zone()); 941 // Register always contains a pointer to the constant pool.
942 safepoint.DefinePointerRegister(kConstantPoolRegister, zone());
1036 } 943 }
944 #endif
1037 } 945 }
1038 946
1039 947
1040 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 948 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1041 Safepoint::DeoptMode deopt_mode) { 949 Safepoint::DeoptMode deopt_mode) {
1042 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); 950 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1043 } 951 }
1044 952
1045 953
1046 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { 954 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1047 LPointerMap empty_pointers(zone()); 955 LPointerMap empty_pointers(zone());
1048 RecordSafepoint(&empty_pointers, deopt_mode); 956 RecordSafepoint(&empty_pointers, deopt_mode);
1049 } 957 }
1050 958
1051 959
1052 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 960 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1053 int arguments, 961 int arguments,
1054 Safepoint::DeoptMode deopt_mode) { 962 Safepoint::DeoptMode deopt_mode) {
1055 RecordSafepoint( 963 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1056 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1057 } 964 }
1058 965
1059 966
1060 void LCodeGen::RecordAndWritePosition(int position) { 967 void LCodeGen::RecordAndWritePosition(int position) {
1061 if (position == RelocInfo::kNoPosition) return; 968 if (position == RelocInfo::kNoPosition) return;
1062 masm()->positions_recorder()->RecordPosition(position); 969 masm()->positions_recorder()->RecordPosition(position);
1063 masm()->positions_recorder()->WriteRecordedPositions(); 970 masm()->positions_recorder()->WriteRecordedPositions();
1064 } 971 }
1065 972
1066 973
1067 static const char* LabelType(LLabel* label) { 974 static const char* LabelType(LLabel* label) {
1068 if (label->is_loop_header()) return " (loop header)"; 975 if (label->is_loop_header()) return " (loop header)";
1069 if (label->is_osr_entry()) return " (OSR entry)"; 976 if (label->is_osr_entry()) return " (OSR entry)";
1070 return ""; 977 return "";
1071 } 978 }
1072 979
1073 980
1074 void LCodeGen::DoLabel(LLabel* label) { 981 void LCodeGen::DoLabel(LLabel* label) {
1075 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", 982 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1076 current_instruction_, 983 current_instruction_, label->hydrogen_value()->id(),
1077 label->hydrogen_value()->id(), 984 label->block_id(), LabelType(label));
1078 label->block_id(),
1079 LabelType(label));
1080 __ bind(label->label()); 985 __ bind(label->label());
1081 current_block_ = label->block_id(); 986 current_block_ = label->block_id();
1082 DoGap(label); 987 DoGap(label);
1083 } 988 }
1084 989
1085 990
1086 void LCodeGen::DoParallelMove(LParallelMove* move) { 991 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
1087 resolver_.Resolve(move);
1088 }
1089 992
1090 993
1091 void LCodeGen::DoGap(LGap* gap) { 994 void LCodeGen::DoGap(LGap* gap) {
1092 for (int i = LGap::FIRST_INNER_POSITION; 995 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
1093 i <= LGap::LAST_INNER_POSITION;
1094 i++) { 996 i++) {
1095 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 997 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1096 LParallelMove* move = gap->GetParallelMove(inner_pos); 998 LParallelMove* move = gap->GetParallelMove(inner_pos);
1097 if (move != NULL) DoParallelMove(move); 999 if (move != NULL) DoParallelMove(move);
1098 } 1000 }
1099 } 1001 }
1100 1002
1101 1003
1102 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { 1004 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
1103 DoGap(instr);
1104 }
1105 1005
1106 1006
1107 void LCodeGen::DoParameter(LParameter* instr) { 1007 void LCodeGen::DoParameter(LParameter* instr) {
1108 // Nothing to do. 1008 // Nothing to do.
1109 } 1009 }
1110 1010
1111 1011
1112 void LCodeGen::DoCallStub(LCallStub* instr) { 1012 void LCodeGen::DoCallStub(LCallStub* instr) {
1113 DCHECK(ToRegister(instr->context()).is(cp)); 1013 DCHECK(ToRegister(instr->context()).is(cp));
1114 DCHECK(ToRegister(instr->result()).is(r0)); 1014 DCHECK(ToRegister(instr->result()).is(r3));
1115 switch (instr->hydrogen()->major_key()) { 1015 switch (instr->hydrogen()->major_key()) {
1116 case CodeStub::RegExpExec: { 1016 case CodeStub::RegExpExec: {
1117 RegExpExecStub stub(isolate()); 1017 RegExpExecStub stub(isolate());
1118 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1018 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1119 break; 1019 break;
1120 } 1020 }
1121 case CodeStub::SubString: { 1021 case CodeStub::SubString: {
1122 SubStringStub stub(isolate()); 1022 SubStringStub stub(isolate());
1123 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1023 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1124 break; 1024 break;
(...skipping 19 matching lines...) Expand all
1144 int32_t divisor = instr->divisor(); 1044 int32_t divisor = instr->divisor();
1145 DCHECK(dividend.is(ToRegister(instr->result()))); 1045 DCHECK(dividend.is(ToRegister(instr->result())));
1146 1046
1147 // Theoretically, a variation of the branch-free code for integer division by 1047 // Theoretically, a variation of the branch-free code for integer division by
1148 // a power of 2 (calculating the remainder via an additional multiplication 1048 // a power of 2 (calculating the remainder via an additional multiplication
1149 // (which gets simplified to an 'and') and subtraction) should be faster, and 1049 // (which gets simplified to an 'and') and subtraction) should be faster, and
1150 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 1050 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1151 // indicate that positive dividends are heavily favored, so the branching 1051 // indicate that positive dividends are heavily favored, so the branching
1152 // version performs better. 1052 // version performs better.
1153 HMod* hmod = instr->hydrogen(); 1053 HMod* hmod = instr->hydrogen();
1154 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1054 int32_t shift = WhichPowerOf2Abs(divisor);
1155 Label dividend_is_not_negative, done; 1055 Label dividend_is_not_negative, done;
1156 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 1056 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1157 __ cmp(dividend, Operand::Zero()); 1057 __ cmpwi(dividend, Operand::Zero());
1158 __ b(pl, &dividend_is_not_negative); 1058 __ bge(&dividend_is_not_negative);
1159 // Note that this is correct even for kMinInt operands. 1059 if (shift) {
1160 __ rsb(dividend, dividend, Operand::Zero()); 1060 // Note that this is correct even for kMinInt operands.
1161 __ and_(dividend, dividend, Operand(mask)); 1061 __ neg(dividend, dividend);
1162 __ rsb(dividend, dividend, Operand::Zero(), SetCC); 1062 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1163 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1063 __ neg(dividend, dividend, LeaveOE, SetRC);
1164 DeoptimizeIf(eq, instr->environment()); 1064 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1065 DeoptimizeIf(eq, instr, cr0);
1066 }
1067 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1068 __ li(dividend, Operand::Zero());
1069 } else {
1070 DeoptimizeIf(al, instr);
1165 } 1071 }
1166 __ b(&done); 1072 __ b(&done);
1167 } 1073 }
1168 1074
1169 __ bind(&dividend_is_not_negative); 1075 __ bind(&dividend_is_not_negative);
1170 __ and_(dividend, dividend, Operand(mask)); 1076 if (shift) {
1077 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1078 } else {
1079 __ li(dividend, Operand::Zero());
1080 }
1171 __ bind(&done); 1081 __ bind(&done);
1172 } 1082 }
1173 1083
1174 1084
1175 void LCodeGen::DoModByConstI(LModByConstI* instr) { 1085 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1176 Register dividend = ToRegister(instr->dividend()); 1086 Register dividend = ToRegister(instr->dividend());
1177 int32_t divisor = instr->divisor(); 1087 int32_t divisor = instr->divisor();
1178 Register result = ToRegister(instr->result()); 1088 Register result = ToRegister(instr->result());
1179 DCHECK(!dividend.is(result)); 1089 DCHECK(!dividend.is(result));
1180 1090
1181 if (divisor == 0) { 1091 if (divisor == 0) {
1182 DeoptimizeIf(al, instr->environment()); 1092 DeoptimizeIf(al, instr);
1183 return; 1093 return;
1184 } 1094 }
1185 1095
1186 __ TruncatingDiv(result, dividend, Abs(divisor)); 1096 __ TruncatingDiv(result, dividend, Abs(divisor));
1187 __ mov(ip, Operand(Abs(divisor))); 1097 __ mov(ip, Operand(Abs(divisor)));
1188 __ smull(result, ip, result, ip); 1098 __ mullw(result, result, ip);
1189 __ sub(result, dividend, result, SetCC); 1099 __ sub(result, dividend, result, LeaveOE, SetRC);
1190 1100
1191 // Check for negative zero. 1101 // Check for negative zero.
1192 HMod* hmod = instr->hydrogen(); 1102 HMod* hmod = instr->hydrogen();
1193 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1103 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1194 Label remainder_not_zero; 1104 Label remainder_not_zero;
1195 __ b(ne, &remainder_not_zero); 1105 __ bne(&remainder_not_zero, cr0);
1196 __ cmp(dividend, Operand::Zero()); 1106 __ cmpwi(dividend, Operand::Zero());
1197 DeoptimizeIf(lt, instr->environment()); 1107 DeoptimizeIf(lt, instr);
1198 __ bind(&remainder_not_zero); 1108 __ bind(&remainder_not_zero);
1199 } 1109 }
1200 } 1110 }
1201 1111
1202 1112
1203 void LCodeGen::DoModI(LModI* instr) { 1113 void LCodeGen::DoModI(LModI* instr) {
1204 HMod* hmod = instr->hydrogen(); 1114 HMod* hmod = instr->hydrogen();
1205 if (CpuFeatures::IsSupported(SUDIV)) { 1115 Register left_reg = ToRegister(instr->left());
1206 CpuFeatureScope scope(masm(), SUDIV); 1116 Register right_reg = ToRegister(instr->right());
1117 Register result_reg = ToRegister(instr->result());
1118 Register scratch = scratch0();
1119 Label done;
1207 1120
1208 Register left_reg = ToRegister(instr->left()); 1121 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1209 Register right_reg = ToRegister(instr->right()); 1122 __ li(r0, Operand::Zero()); // clear xer
1210 Register result_reg = ToRegister(instr->result()); 1123 __ mtxer(r0);
1124 }
1211 1125
1212 Label done; 1126 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
1213 // Check for x % 0, sdiv might signal an exception. We have to deopt in this 1127
1214 // case because we can't return a NaN. 1128 // Check for x % 0.
1215 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 1129 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1216 __ cmp(right_reg, Operand::Zero()); 1130 __ cmpwi(right_reg, Operand::Zero());
1217 DeoptimizeIf(eq, instr->environment()); 1131 DeoptimizeIf(eq, instr);
1132 }
1133
1134 // Check for kMinInt % -1, divw will return undefined, which is not what we
1135 // want. We have to deopt if we care about -0, because we can't return that.
1136 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1137 Label no_overflow_possible;
1138 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1139 DeoptimizeIf(overflow, instr, cr0);
1140 } else {
1141 __ bnooverflow(&no_overflow_possible, cr0);
1142 __ li(result_reg, Operand::Zero());
1143 __ b(&done);
1218 } 1144 }
1145 __ bind(&no_overflow_possible);
1146 }
1219 1147
1220 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we 1148 __ mullw(scratch, right_reg, scratch);
1221 // want. We have to deopt if we care about -0, because we can't return that. 1149 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1222 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1223 Label no_overflow_possible;
1224 __ cmp(left_reg, Operand(kMinInt));
1225 __ b(ne, &no_overflow_possible);
1226 __ cmp(right_reg, Operand(-1));
1227 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1228 DeoptimizeIf(eq, instr->environment());
1229 } else {
1230 __ b(ne, &no_overflow_possible);
1231 __ mov(result_reg, Operand::Zero());
1232 __ jmp(&done);
1233 }
1234 __ bind(&no_overflow_possible);
1235 }
1236 1150
1237 // For 'r3 = r1 % r2' we can have the following ARM code: 1151 // If we care about -0, test if the dividend is <0 and the result is 0.
1238 // sdiv r3, r1, r2 1152 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1239 // mls r3, r3, r2, r1 1153 __ bne(&done, cr0);
1154 __ cmpwi(left_reg, Operand::Zero());
1155 DeoptimizeIf(lt, instr);
1156 }
1240 1157
1241 __ sdiv(result_reg, left_reg, right_reg); 1158 __ bind(&done);
1242 __ Mls(result_reg, result_reg, right_reg, left_reg);
1243
1244 // If we care about -0, test if the dividend is <0 and the result is 0.
1245 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1246 __ cmp(result_reg, Operand::Zero());
1247 __ b(ne, &done);
1248 __ cmp(left_reg, Operand::Zero());
1249 DeoptimizeIf(lt, instr->environment());
1250 }
1251 __ bind(&done);
1252
1253 } else {
1254 // General case, without any SDIV support.
1255 Register left_reg = ToRegister(instr->left());
1256 Register right_reg = ToRegister(instr->right());
1257 Register result_reg = ToRegister(instr->result());
1258 Register scratch = scratch0();
1259 DCHECK(!scratch.is(left_reg));
1260 DCHECK(!scratch.is(right_reg));
1261 DCHECK(!scratch.is(result_reg));
1262 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1263 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1264 DCHECK(!divisor.is(dividend));
1265 LowDwVfpRegister quotient = double_scratch0();
1266 DCHECK(!quotient.is(dividend));
1267 DCHECK(!quotient.is(divisor));
1268
1269 Label done;
1270 // Check for x % 0, we have to deopt in this case because we can't return a
1271 // NaN.
1272 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1273 __ cmp(right_reg, Operand::Zero());
1274 DeoptimizeIf(eq, instr->environment());
1275 }
1276
1277 __ Move(result_reg, left_reg);
1278 // Load the arguments in VFP registers. The divisor value is preloaded
1279 // before. Be careful that 'right_reg' is only live on entry.
1280 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1281 __ vmov(double_scratch0().low(), left_reg);
1282 __ vcvt_f64_s32(dividend, double_scratch0().low());
1283 __ vmov(double_scratch0().low(), right_reg);
1284 __ vcvt_f64_s32(divisor, double_scratch0().low());
1285
1286 // We do not care about the sign of the divisor. Note that we still handle
1287 // the kMinInt % -1 case correctly, though.
1288 __ vabs(divisor, divisor);
1289 // Compute the quotient and round it to a 32bit integer.
1290 __ vdiv(quotient, dividend, divisor);
1291 __ vcvt_s32_f64(quotient.low(), quotient);
1292 __ vcvt_f64_s32(quotient, quotient.low());
1293
1294 // Compute the remainder in result.
1295 __ vmul(double_scratch0(), divisor, quotient);
1296 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1297 __ vmov(scratch, double_scratch0().low());
1298 __ sub(result_reg, left_reg, scratch, SetCC);
1299
1300 // If we care about -0, test if the dividend is <0 and the result is 0.
1301 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1302 __ b(ne, &done);
1303 __ cmp(left_reg, Operand::Zero());
1304 DeoptimizeIf(mi, instr->environment());
1305 }
1306 __ bind(&done);
1307 }
1308 } 1159 }
1309 1160
1310 1161
1311 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1162 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1312 Register dividend = ToRegister(instr->dividend()); 1163 Register dividend = ToRegister(instr->dividend());
1313 int32_t divisor = instr->divisor(); 1164 int32_t divisor = instr->divisor();
1314 Register result = ToRegister(instr->result()); 1165 Register result = ToRegister(instr->result());
1315 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 1166 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1316 DCHECK(!result.is(dividend)); 1167 DCHECK(!result.is(dividend));
1317 1168
1318 // Check for (0 / -x) that will produce negative zero. 1169 // Check for (0 / -x) that will produce negative zero.
1319 HDiv* hdiv = instr->hydrogen(); 1170 HDiv* hdiv = instr->hydrogen();
1320 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1171 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1321 __ cmp(dividend, Operand::Zero()); 1172 __ cmpwi(dividend, Operand::Zero());
1322 DeoptimizeIf(eq, instr->environment()); 1173 DeoptimizeIf(eq, instr);
1323 } 1174 }
1324 // Check for (kMinInt / -1). 1175 // Check for (kMinInt / -1).
1325 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1176 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1326 __ cmp(dividend, Operand(kMinInt)); 1177 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1327 DeoptimizeIf(eq, instr->environment()); 1178 __ cmpw(dividend, r0);
1179 DeoptimizeIf(eq, instr);
1328 } 1180 }
1181
1182 int32_t shift = WhichPowerOf2Abs(divisor);
1183
1329 // Deoptimize if remainder will not be 0. 1184 // Deoptimize if remainder will not be 0.
1330 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 1185 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1331 divisor != 1 && divisor != -1) { 1186 __ TestBitRange(dividend, shift - 1, 0, r0);
1332 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1187 DeoptimizeIf(ne, instr, cr0);
1333 __ tst(dividend, Operand(mask));
1334 DeoptimizeIf(ne, instr->environment());
1335 } 1188 }
1336 1189
1337 if (divisor == -1) { // Nice shortcut, not needed for correctness. 1190 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1338 __ rsb(result, dividend, Operand(0)); 1191 __ neg(result, dividend);
1339 return; 1192 return;
1340 } 1193 }
1341 int32_t shift = WhichPowerOf2Abs(divisor);
1342 if (shift == 0) { 1194 if (shift == 0) {
1343 __ mov(result, dividend); 1195 __ mr(result, dividend);
1344 } else if (shift == 1) {
1345 __ add(result, dividend, Operand(dividend, LSR, 31));
1346 } else { 1196 } else {
1347 __ mov(result, Operand(dividend, ASR, 31)); 1197 if (shift == 1) {
1348 __ add(result, dividend, Operand(result, LSR, 32 - shift)); 1198 __ srwi(result, dividend, Operand(31));
1199 } else {
1200 __ srawi(result, dividend, 31);
1201 __ srwi(result, result, Operand(32 - shift));
1202 }
1203 __ add(result, dividend, result);
1204 __ srawi(result, result, shift);
1349 } 1205 }
1350 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); 1206 if (divisor < 0) __ neg(result, result);
1351 if (divisor < 0) __ rsb(result, result, Operand(0));
1352 } 1207 }
1353 1208
1354 1209
1355 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1210 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1356 Register dividend = ToRegister(instr->dividend()); 1211 Register dividend = ToRegister(instr->dividend());
1357 int32_t divisor = instr->divisor(); 1212 int32_t divisor = instr->divisor();
1358 Register result = ToRegister(instr->result()); 1213 Register result = ToRegister(instr->result());
1359 DCHECK(!dividend.is(result)); 1214 DCHECK(!dividend.is(result));
1360 1215
1361 if (divisor == 0) { 1216 if (divisor == 0) {
1362 DeoptimizeIf(al, instr->environment()); 1217 DeoptimizeIf(al, instr);
1363 return; 1218 return;
1364 } 1219 }
1365 1220
1366 // Check for (0 / -x) that will produce negative zero. 1221 // Check for (0 / -x) that will produce negative zero.
1367 HDiv* hdiv = instr->hydrogen(); 1222 HDiv* hdiv = instr->hydrogen();
1368 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1223 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1369 __ cmp(dividend, Operand::Zero()); 1224 __ cmpwi(dividend, Operand::Zero());
1370 DeoptimizeIf(eq, instr->environment()); 1225 DeoptimizeIf(eq, instr);
1371 } 1226 }
1372 1227
1373 __ TruncatingDiv(result, dividend, Abs(divisor)); 1228 __ TruncatingDiv(result, dividend, Abs(divisor));
1374 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1229 if (divisor < 0) __ neg(result, result);
1375 1230
1376 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1231 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1232 Register scratch = scratch0();
1377 __ mov(ip, Operand(divisor)); 1233 __ mov(ip, Operand(divisor));
1378 __ smull(scratch0(), ip, result, ip); 1234 __ mullw(scratch, result, ip);
1379 __ sub(scratch0(), scratch0(), dividend, SetCC); 1235 __ cmpw(scratch, dividend);
1380 DeoptimizeIf(ne, instr->environment()); 1236 DeoptimizeIf(ne, instr);
1381 } 1237 }
1382 } 1238 }
1383 1239
1384 1240
1385 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1241 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1386 void LCodeGen::DoDivI(LDivI* instr) { 1242 void LCodeGen::DoDivI(LDivI* instr) {
1387 HBinaryOperation* hdiv = instr->hydrogen(); 1243 HBinaryOperation* hdiv = instr->hydrogen();
1388 Register dividend = ToRegister(instr->dividend()); 1244 const Register dividend = ToRegister(instr->dividend());
1389 Register divisor = ToRegister(instr->divisor()); 1245 const Register divisor = ToRegister(instr->divisor());
1390 Register result = ToRegister(instr->result()); 1246 Register result = ToRegister(instr->result());
1391 1247
1248 DCHECK(!dividend.is(result));
1249 DCHECK(!divisor.is(result));
1250
1251 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1252 __ li(r0, Operand::Zero()); // clear xer
1253 __ mtxer(r0);
1254 }
1255
1256 __ divw(result, dividend, divisor, SetOE, SetRC);
1257
1392 // Check for x / 0. 1258 // Check for x / 0.
1393 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1259 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1394 __ cmp(divisor, Operand::Zero()); 1260 __ cmpwi(divisor, Operand::Zero());
1395 DeoptimizeIf(eq, instr->environment()); 1261 DeoptimizeIf(eq, instr);
1396 } 1262 }
1397 1263
1398 // Check for (0 / -x) that will produce negative zero. 1264 // Check for (0 / -x) that will produce negative zero.
1399 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1265 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1400 Label positive; 1266 Label dividend_not_zero;
1401 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { 1267 __ cmpwi(dividend, Operand::Zero());
1402 // Do the test only if it hadn't be done above. 1268 __ bne(&dividend_not_zero);
1403 __ cmp(divisor, Operand::Zero()); 1269 __ cmpwi(divisor, Operand::Zero());
1404 } 1270 DeoptimizeIf(lt, instr);
1405 __ b(pl, &positive); 1271 __ bind(&dividend_not_zero);
1406 __ cmp(dividend, Operand::Zero());
1407 DeoptimizeIf(eq, instr->environment());
1408 __ bind(&positive);
1409 } 1272 }
1410 1273
1411 // Check for (kMinInt / -1). 1274 // Check for (kMinInt / -1).
1412 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1275 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1413 (!CpuFeatures::IsSupported(SUDIV) || 1276 Label no_overflow_possible;
1414 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { 1277 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1415 // We don't need to check for overflow when truncating with sdiv 1278 DeoptimizeIf(overflow, instr, cr0);
1416 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. 1279 } else {
1417 __ cmp(dividend, Operand(kMinInt)); 1280 // When truncating, we want kMinInt / -1 = kMinInt.
1418 __ cmp(divisor, Operand(-1), eq); 1281 __ bnooverflow(&no_overflow_possible, cr0);
1419 DeoptimizeIf(eq, instr->environment()); 1282 __ mr(result, dividend);
1283 }
1284 __ bind(&no_overflow_possible);
1420 } 1285 }
1421 1286
1422 if (CpuFeatures::IsSupported(SUDIV)) { 1287 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1423 CpuFeatureScope scope(masm(), SUDIV); 1288 // Deoptimize if remainder is not 0.
1424 __ sdiv(result, dividend, divisor); 1289 Register scratch = scratch0();
1425 } else { 1290 __ mullw(scratch, divisor, result);
1426 DoubleRegister vleft = ToDoubleRegister(instr->temp()); 1291 __ cmpw(dividend, scratch);
1427 DoubleRegister vright = double_scratch0(); 1292 DeoptimizeIf(ne, instr);
1428 __ vmov(double_scratch0().low(), dividend);
1429 __ vcvt_f64_s32(vleft, double_scratch0().low());
1430 __ vmov(double_scratch0().low(), divisor);
1431 __ vcvt_f64_s32(vright, double_scratch0().low());
1432 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1433 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1434 __ vmov(result, double_scratch0().low());
1435 }
1436
1437 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1438 // Compute remainder and deopt if it's not zero.
1439 Register remainder = scratch0();
1440 __ Mls(remainder, result, divisor, dividend);
1441 __ cmp(remainder, Operand::Zero());
1442 DeoptimizeIf(ne, instr->environment());
1443 } 1293 }
1444 } 1294 }
1445 1295
1446 1296
1447 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1448 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1449 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1450 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1451
1452 // This is computed in-place.
1453 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1454
1455 __ vmla(addend, multiplier, multiplicand);
1456 }
1457
1458
1459 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1460 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1461 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1462 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1463
1464 // This is computed in-place.
1465 DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1466
1467 __ vmls(minuend, multiplier, multiplicand);
1468 }
1469
1470
1471 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1297 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1298 HBinaryOperation* hdiv = instr->hydrogen();
1472 Register dividend = ToRegister(instr->dividend()); 1299 Register dividend = ToRegister(instr->dividend());
1473 Register result = ToRegister(instr->result()); 1300 Register result = ToRegister(instr->result());
1474 int32_t divisor = instr->divisor(); 1301 int32_t divisor = instr->divisor();
1475 1302
1476 // If the divisor is 1, return the dividend.
1477 if (divisor == 1) {
1478 __ Move(result, dividend);
1479 return;
1480 }
1481
1482 // If the divisor is positive, things are easy: There can be no deopts and we 1303 // If the divisor is positive, things are easy: There can be no deopts and we
1483 // can simply do an arithmetic right shift. 1304 // can simply do an arithmetic right shift.
1484 int32_t shift = WhichPowerOf2Abs(divisor); 1305 int32_t shift = WhichPowerOf2Abs(divisor);
1485 if (divisor > 1) { 1306 if (divisor > 0) {
1486 __ mov(result, Operand(dividend, ASR, shift)); 1307 if (shift || !result.is(dividend)) {
1308 __ srawi(result, dividend, shift);
1309 }
1487 return; 1310 return;
1488 } 1311 }
1489 1312
1490 // If the divisor is negative, we have to negate and handle edge cases. 1313 // If the divisor is negative, we have to negate and handle edge cases.
1491 __ rsb(result, dividend, Operand::Zero(), SetCC); 1314 OEBit oe = LeaveOE;
1492 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1315 #if V8_TARGET_ARCH_PPC64
1493 DeoptimizeIf(eq, instr->environment()); 1316 if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
1317 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1318 __ cmpw(dividend, r0);
1319 DeoptimizeIf(eq, instr);
1320 }
1321 #else
1322 if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
1323 __ li(r0, Operand::Zero()); // clear xer
1324 __ mtxer(r0);
1325 oe = SetOE;
1326 }
1327 #endif
1328
1329 __ neg(result, dividend, oe, SetRC);
1330 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1331 DeoptimizeIf(eq, instr, cr0);
1332 }
1333
1334 // If the negation could not overflow, simply shifting is OK.
1335 #if !V8_TARGET_ARCH_PPC64
1336 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1337 #endif
1338 if (shift) {
1339 __ ShiftRightArithImm(result, result, shift);
1340 }
1341 return;
1342 #if !V8_TARGET_ARCH_PPC64
1494 } 1343 }
1495 1344
1496 // Dividing by -1 is basically negation, unless we overflow. 1345 // Dividing by -1 is basically negation, unless we overflow.
1497 if (divisor == -1) { 1346 if (divisor == -1) {
1498 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1347 DeoptimizeIf(overflow, instr, cr0);
1499 DeoptimizeIf(vs, instr->environment());
1500 }
1501 return; 1348 return;
1502 } 1349 }
1503 1350
1504 // If the negation could not overflow, simply shifting is OK. 1351 Label overflow, done;
1505 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1352 __ boverflow(&overflow, cr0);
1506 __ mov(result, Operand(result, ASR, shift)); 1353 __ srawi(result, result, shift);
1507 return; 1354 __ b(&done);
1508 } 1355 __ bind(&overflow);
1509 1356 __ mov(result, Operand(kMinInt / divisor));
1510 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); 1357 __ bind(&done);
1511 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); 1358 #endif
1512 } 1359 }
1513 1360
1514 1361
1515 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1362 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1516 Register dividend = ToRegister(instr->dividend()); 1363 Register dividend = ToRegister(instr->dividend());
1517 int32_t divisor = instr->divisor(); 1364 int32_t divisor = instr->divisor();
1518 Register result = ToRegister(instr->result()); 1365 Register result = ToRegister(instr->result());
1519 DCHECK(!dividend.is(result)); 1366 DCHECK(!dividend.is(result));
1520 1367
1521 if (divisor == 0) { 1368 if (divisor == 0) {
1522 DeoptimizeIf(al, instr->environment()); 1369 DeoptimizeIf(al, instr);
1523 return; 1370 return;
1524 } 1371 }
1525 1372
1526 // Check for (0 / -x) that will produce negative zero. 1373 // Check for (0 / -x) that will produce negative zero.
1527 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1374 HMathFloorOfDiv* hdiv = instr->hydrogen();
1528 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1375 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1529 __ cmp(dividend, Operand::Zero()); 1376 __ cmpwi(dividend, Operand::Zero());
1530 DeoptimizeIf(eq, instr->environment()); 1377 DeoptimizeIf(eq, instr);
1531 } 1378 }
1532 1379
1533 // Easy case: We need no dynamic check for the dividend and the flooring 1380 // Easy case: We need no dynamic check for the dividend and the flooring
1534 // division is the same as the truncating division. 1381 // division is the same as the truncating division.
1535 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1382 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1536 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1383 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1537 __ TruncatingDiv(result, dividend, Abs(divisor)); 1384 __ TruncatingDiv(result, dividend, Abs(divisor));
1538 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1385 if (divisor < 0) __ neg(result, result);
1539 return; 1386 return;
1540 } 1387 }
1541 1388
1542 // In the general case we may need to adjust before and after the truncating 1389 // In the general case we may need to adjust before and after the truncating
1543 // division to get a flooring division. 1390 // division to get a flooring division.
1544 Register temp = ToRegister(instr->temp()); 1391 Register temp = ToRegister(instr->temp());
1545 DCHECK(!temp.is(dividend) && !temp.is(result)); 1392 DCHECK(!temp.is(dividend) && !temp.is(result));
1546 Label needs_adjustment, done; 1393 Label needs_adjustment, done;
1547 __ cmp(dividend, Operand::Zero()); 1394 __ cmpwi(dividend, Operand::Zero());
1548 __ b(divisor > 0 ? lt : gt, &needs_adjustment); 1395 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1549 __ TruncatingDiv(result, dividend, Abs(divisor)); 1396 __ TruncatingDiv(result, dividend, Abs(divisor));
1550 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1397 if (divisor < 0) __ neg(result, result);
1551 __ jmp(&done); 1398 __ b(&done);
1552 __ bind(&needs_adjustment); 1399 __ bind(&needs_adjustment);
1553 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); 1400 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1554 __ TruncatingDiv(result, temp, Abs(divisor)); 1401 __ TruncatingDiv(result, temp, Abs(divisor));
1555 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1402 if (divisor < 0) __ neg(result, result);
1556 __ sub(result, result, Operand(1)); 1403 __ subi(result, result, Operand(1));
1557 __ bind(&done); 1404 __ bind(&done);
1558 } 1405 }
1559 1406
1560 1407
1561 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1408 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1562 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1409 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1563 HBinaryOperation* hdiv = instr->hydrogen(); 1410 HBinaryOperation* hdiv = instr->hydrogen();
1564 Register left = ToRegister(instr->dividend()); 1411 const Register dividend = ToRegister(instr->dividend());
1565 Register right = ToRegister(instr->divisor()); 1412 const Register divisor = ToRegister(instr->divisor());
1566 Register result = ToRegister(instr->result()); 1413 Register result = ToRegister(instr->result());
1567 1414
1415 DCHECK(!dividend.is(result));
1416 DCHECK(!divisor.is(result));
1417
1418 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1419 __ li(r0, Operand::Zero()); // clear xer
1420 __ mtxer(r0);
1421 }
1422
1423 __ divw(result, dividend, divisor, SetOE, SetRC);
1424
1568 // Check for x / 0. 1425 // Check for x / 0.
1569 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1426 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1570 __ cmp(right, Operand::Zero()); 1427 __ cmpwi(divisor, Operand::Zero());
1571 DeoptimizeIf(eq, instr->environment()); 1428 DeoptimizeIf(eq, instr);
1572 } 1429 }
1573 1430
1574 // Check for (0 / -x) that will produce negative zero. 1431 // Check for (0 / -x) that will produce negative zero.
1575 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1432 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1576 Label positive; 1433 Label dividend_not_zero;
1577 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { 1434 __ cmpwi(dividend, Operand::Zero());
1578 // Do the test only if it hadn't be done above. 1435 __ bne(&dividend_not_zero);
1579 __ cmp(right, Operand::Zero()); 1436 __ cmpwi(divisor, Operand::Zero());
1580 } 1437 DeoptimizeIf(lt, instr);
1581 __ b(pl, &positive); 1438 __ bind(&dividend_not_zero);
1582 __ cmp(left, Operand::Zero());
1583 DeoptimizeIf(eq, instr->environment());
1584 __ bind(&positive);
1585 } 1439 }
1586 1440
1587 // Check for (kMinInt / -1). 1441 // Check for (kMinInt / -1).
1588 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1442 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1589 (!CpuFeatures::IsSupported(SUDIV) || 1443 Label no_overflow_possible;
1590 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { 1444 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1591 // We don't need to check for overflow when truncating with sdiv 1445 DeoptimizeIf(overflow, instr, cr0);
1592 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. 1446 } else {
1593 __ cmp(left, Operand(kMinInt)); 1447 // When truncating, we want kMinInt / -1 = kMinInt.
1594 __ cmp(right, Operand(-1), eq); 1448 __ bnooverflow(&no_overflow_possible, cr0);
1595 DeoptimizeIf(eq, instr->environment()); 1449 __ mr(result, dividend);
1596 } 1450 }
1597 1451 __ bind(&no_overflow_possible);
1598 if (CpuFeatures::IsSupported(SUDIV)) {
1599 CpuFeatureScope scope(masm(), SUDIV);
1600 __ sdiv(result, left, right);
1601 } else {
1602 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1603 DoubleRegister vright = double_scratch0();
1604 __ vmov(double_scratch0().low(), left);
1605 __ vcvt_f64_s32(vleft, double_scratch0().low());
1606 __ vmov(double_scratch0().low(), right);
1607 __ vcvt_f64_s32(vright, double_scratch0().low());
1608 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1609 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1610 __ vmov(result, double_scratch0().low());
1611 } 1452 }
1612 1453
1613 Label done; 1454 Label done;
1614 Register remainder = scratch0(); 1455 Register scratch = scratch0();
1615 __ Mls(remainder, result, right, left); 1456 // If both operands have the same sign then we are done.
1616 __ cmp(remainder, Operand::Zero()); 1457 #if V8_TARGET_ARCH_PPC64
1617 __ b(eq, &done); 1458 __ xor_(scratch, dividend, divisor);
1618 __ eor(remainder, remainder, Operand(right)); 1459 __ cmpwi(scratch, Operand::Zero());
1619 __ add(result, result, Operand(remainder, ASR, 31)); 1460 __ bge(&done);
1461 #else
1462 __ xor_(scratch, dividend, divisor, SetRC);
1463 __ bge(&done, cr0);
1464 #endif
1465
1466 // If there is no remainder then we are done.
1467 __ mullw(scratch, divisor, result);
1468 __ cmpw(dividend, scratch);
1469 __ beq(&done);
1470
1471 // We performed a truncating division. Correct the result.
1472 __ subi(result, result, Operand(1));
1620 __ bind(&done); 1473 __ bind(&done);
1621 } 1474 }
1622 1475
1623 1476
1477 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1478 DoubleRegister addend = ToDoubleRegister(instr->addend());
1479 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1480 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1481 DoubleRegister result = ToDoubleRegister(instr->result());
1482
1483 __ fmadd(result, multiplier, multiplicand, addend);
1484 }
1485
1486
1487 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1488 DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1489 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1490 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1491 DoubleRegister result = ToDoubleRegister(instr->result());
1492
1493 __ fmsub(result, multiplier, multiplicand, minuend);
1494 }
1495
1496
1624 void LCodeGen::DoMulI(LMulI* instr) { 1497 void LCodeGen::DoMulI(LMulI* instr) {
1498 Register scratch = scratch0();
1625 Register result = ToRegister(instr->result()); 1499 Register result = ToRegister(instr->result());
1626 // Note that result may alias left. 1500 // Note that result may alias left.
1627 Register left = ToRegister(instr->left()); 1501 Register left = ToRegister(instr->left());
1628 LOperand* right_op = instr->right(); 1502 LOperand* right_op = instr->right();
1629 1503
1630 bool bailout_on_minus_zero = 1504 bool bailout_on_minus_zero =
1631 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1505 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1632 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1506 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1633 1507
1634 if (right_op->IsConstantOperand()) { 1508 if (right_op->IsConstantOperand()) {
1635 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1509 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1636 1510
1637 if (bailout_on_minus_zero && (constant < 0)) { 1511 if (bailout_on_minus_zero && (constant < 0)) {
1638 // The case of a null constant will be handled separately. 1512 // The case of a null constant will be handled separately.
1639 // If constant is negative and left is null, the result should be -0. 1513 // If constant is negative and left is null, the result should be -0.
1640 __ cmp(left, Operand::Zero()); 1514 __ cmpi(left, Operand::Zero());
1641 DeoptimizeIf(eq, instr->environment()); 1515 DeoptimizeIf(eq, instr);
1642 } 1516 }
1643 1517
1644 switch (constant) { 1518 switch (constant) {
1645 case -1: 1519 case -1:
1646 if (overflow) { 1520 if (can_overflow) {
1647 __ rsb(result, left, Operand::Zero(), SetCC); 1521 #if V8_TARGET_ARCH_PPC64
1648 DeoptimizeIf(vs, instr->environment()); 1522 if (instr->hydrogen()->representation().IsSmi()) {
1523 #endif
1524 __ li(r0, Operand::Zero()); // clear xer
1525 __ mtxer(r0);
1526 __ neg(result, left, SetOE, SetRC);
1527 DeoptimizeIf(overflow, instr, cr0);
1528 #if V8_TARGET_ARCH_PPC64
1529 } else {
1530 __ neg(result, left);
1531 __ TestIfInt32(result, scratch, r0);
1532 DeoptimizeIf(ne, instr);
1533 }
1534 #endif
1649 } else { 1535 } else {
1650 __ rsb(result, left, Operand::Zero()); 1536 __ neg(result, left);
1651 } 1537 }
1652 break; 1538 break;
1653 case 0: 1539 case 0:
1654 if (bailout_on_minus_zero) { 1540 if (bailout_on_minus_zero) {
1655 // If left is strictly negative and the constant is null, the 1541 // If left is strictly negative and the constant is null, the
1656 // result is -0. Deoptimize if required, otherwise return 0. 1542 // result is -0. Deoptimize if required, otherwise return 0.
1657 __ cmp(left, Operand::Zero()); 1543 #if V8_TARGET_ARCH_PPC64
1658 DeoptimizeIf(mi, instr->environment()); 1544 if (instr->hydrogen()->representation().IsSmi()) {
1545 #endif
1546 __ cmpi(left, Operand::Zero());
1547 #if V8_TARGET_ARCH_PPC64
1548 } else {
1549 __ cmpwi(left, Operand::Zero());
1550 }
1551 #endif
1552 DeoptimizeIf(lt, instr);
1659 } 1553 }
1660 __ mov(result, Operand::Zero()); 1554 __ li(result, Operand::Zero());
1661 break; 1555 break;
1662 case 1: 1556 case 1:
1663 __ Move(result, left); 1557 __ Move(result, left);
1664 break; 1558 break;
1665 default: 1559 default:
1666 // Multiplying by powers of two and powers of two plus or minus 1560 // Multiplying by powers of two and powers of two plus or minus
1667 // one can be done faster with shifted operands. 1561 // one can be done faster with shifted operands.
1668 // For other constants we emit standard code. 1562 // For other constants we emit standard code.
1669 int32_t mask = constant >> 31; 1563 int32_t mask = constant >> 31;
1670 uint32_t constant_abs = (constant + mask) ^ mask; 1564 uint32_t constant_abs = (constant + mask) ^ mask;
1671 1565
1672 if (base::bits::IsPowerOfTwo32(constant_abs)) { 1566 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1673 int32_t shift = WhichPowerOf2(constant_abs); 1567 int32_t shift = WhichPowerOf2(constant_abs);
1674 __ mov(result, Operand(left, LSL, shift)); 1568 __ ShiftLeftImm(result, left, Operand(shift));
1675 // Correct the sign of the result is the constant is negative. 1569 // Correct the sign of the result if the constant is negative.
1676 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1570 if (constant < 0) __ neg(result, result);
1677 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { 1571 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1678 int32_t shift = WhichPowerOf2(constant_abs - 1); 1572 int32_t shift = WhichPowerOf2(constant_abs - 1);
1679 __ add(result, left, Operand(left, LSL, shift)); 1573 __ ShiftLeftImm(scratch, left, Operand(shift));
1680 // Correct the sign of the result is the constant is negative. 1574 __ add(result, scratch, left);
1681 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1575 // Correct the sign of the result if the constant is negative.
1576 if (constant < 0) __ neg(result, result);
1682 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { 1577 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1683 int32_t shift = WhichPowerOf2(constant_abs + 1); 1578 int32_t shift = WhichPowerOf2(constant_abs + 1);
1684 __ rsb(result, left, Operand(left, LSL, shift)); 1579 __ ShiftLeftImm(scratch, left, Operand(shift));
1685 // Correct the sign of the result is the constant is negative. 1580 __ sub(result, scratch, left);
1686 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1581 // Correct the sign of the result if the constant is negative.
1582 if (constant < 0) __ neg(result, result);
1687 } else { 1583 } else {
1688 // Generate standard code. 1584 // Generate standard code.
1689 __ mov(ip, Operand(constant)); 1585 __ mov(ip, Operand(constant));
1690 __ mul(result, left, ip); 1586 __ Mul(result, left, ip);
1691 } 1587 }
1692 } 1588 }
1693 1589
1694 } else { 1590 } else {
1695 DCHECK(right_op->IsRegister()); 1591 DCHECK(right_op->IsRegister());
1696 Register right = ToRegister(right_op); 1592 Register right = ToRegister(right_op);
1697 1593
1698 if (overflow) { 1594 if (can_overflow) {
1699 Register scratch = scratch0(); 1595 #if V8_TARGET_ARCH_PPC64
1596 // result = left * right.
1597 if (instr->hydrogen()->representation().IsSmi()) {
1598 __ SmiUntag(result, left);
1599 __ SmiUntag(scratch, right);
1600 __ Mul(result, result, scratch);
1601 } else {
1602 __ Mul(result, left, right);
1603 }
1604 __ TestIfInt32(result, scratch, r0);
1605 DeoptimizeIf(ne, instr);
1606 if (instr->hydrogen()->representation().IsSmi()) {
1607 __ SmiTag(result);
1608 }
1609 #else
1700 // scratch:result = left * right. 1610 // scratch:result = left * right.
1701 if (instr->hydrogen()->representation().IsSmi()) { 1611 if (instr->hydrogen()->representation().IsSmi()) {
1702 __ SmiUntag(result, left); 1612 __ SmiUntag(result, left);
1703 __ smull(result, scratch, result, right); 1613 __ mulhw(scratch, result, right);
1614 __ mullw(result, result, right);
1704 } else { 1615 } else {
1705 __ smull(result, scratch, left, right); 1616 __ mulhw(scratch, left, right);
1617 __ mullw(result, left, right);
1706 } 1618 }
1707 __ cmp(scratch, Operand(result, ASR, 31)); 1619 __ TestIfInt32(scratch, result, r0);
1708 DeoptimizeIf(ne, instr->environment()); 1620 DeoptimizeIf(ne, instr->environment());
1621 #endif
1709 } else { 1622 } else {
1710 if (instr->hydrogen()->representation().IsSmi()) { 1623 if (instr->hydrogen()->representation().IsSmi()) {
1711 __ SmiUntag(result, left); 1624 __ SmiUntag(result, left);
1712 __ mul(result, result, right); 1625 __ Mul(result, result, right);
1713 } else { 1626 } else {
1714 __ mul(result, left, right); 1627 __ Mul(result, left, right);
1715 } 1628 }
1716 } 1629 }
1717 1630
1718 if (bailout_on_minus_zero) { 1631 if (bailout_on_minus_zero) {
1719 Label done; 1632 Label done;
1720 __ teq(left, Operand(right)); 1633 #if V8_TARGET_ARCH_PPC64
1721 __ b(pl, &done); 1634 if (instr->hydrogen()->representation().IsSmi()) {
1635 #endif
1636 __ xor_(r0, left, right, SetRC);
1637 __ bge(&done, cr0);
1638 #if V8_TARGET_ARCH_PPC64
1639 } else {
1640 __ xor_(r0, left, right);
1641 __ cmpwi(r0, Operand::Zero());
1642 __ bge(&done);
1643 }
1644 #endif
1722 // Bail out if the result is minus zero. 1645 // Bail out if the result is minus zero.
1723 __ cmp(result, Operand::Zero()); 1646 __ cmpi(result, Operand::Zero());
1724 DeoptimizeIf(eq, instr->environment()); 1647 DeoptimizeIf(eq, instr);
1725 __ bind(&done); 1648 __ bind(&done);
1726 } 1649 }
1727 } 1650 }
1728 } 1651 }
1729 1652
1730 1653
1731 void LCodeGen::DoBitI(LBitI* instr) { 1654 void LCodeGen::DoBitI(LBitI* instr) {
1732 LOperand* left_op = instr->left(); 1655 LOperand* left_op = instr->left();
1733 LOperand* right_op = instr->right(); 1656 LOperand* right_op = instr->right();
1734 DCHECK(left_op->IsRegister()); 1657 DCHECK(left_op->IsRegister());
1735 Register left = ToRegister(left_op); 1658 Register left = ToRegister(left_op);
1736 Register result = ToRegister(instr->result()); 1659 Register result = ToRegister(instr->result());
1737 Operand right(no_reg); 1660 Operand right(no_reg);
1738 1661
1739 if (right_op->IsStackSlot()) { 1662 if (right_op->IsStackSlot()) {
1740 right = Operand(EmitLoadRegister(right_op, ip)); 1663 right = Operand(EmitLoadRegister(right_op, ip));
1741 } else { 1664 } else {
1742 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); 1665 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1743 right = ToOperand(right_op); 1666 right = ToOperand(right_op);
1667
1668 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
1669 switch (instr->op()) {
1670 case Token::BIT_AND:
1671 __ andi(result, left, right);
1672 break;
1673 case Token::BIT_OR:
1674 __ ori(result, left, right);
1675 break;
1676 case Token::BIT_XOR:
1677 __ xori(result, left, right);
1678 break;
1679 default:
1680 UNREACHABLE();
1681 break;
1682 }
1683 return;
1684 }
1744 } 1685 }
1745 1686
1746 switch (instr->op()) { 1687 switch (instr->op()) {
1747 case Token::BIT_AND: 1688 case Token::BIT_AND:
1748 __ and_(result, left, right); 1689 __ And(result, left, right);
1749 break; 1690 break;
1750 case Token::BIT_OR: 1691 case Token::BIT_OR:
1751 __ orr(result, left, right); 1692 __ Or(result, left, right);
1752 break; 1693 break;
1753 case Token::BIT_XOR: 1694 case Token::BIT_XOR:
1754 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { 1695 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1755 __ mvn(result, Operand(left)); 1696 __ notx(result, left);
1756 } else { 1697 } else {
1757 __ eor(result, left, right); 1698 __ Xor(result, left, right);
1758 } 1699 }
1759 break; 1700 break;
1760 default: 1701 default:
1761 UNREACHABLE(); 1702 UNREACHABLE();
1762 break; 1703 break;
1763 } 1704 }
1764 } 1705 }
1765 1706
1766 1707
1767 void LCodeGen::DoShiftI(LShiftI* instr) { 1708 void LCodeGen::DoShiftI(LShiftI* instr) {
1768 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so 1709 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1769 // result may alias either of them. 1710 // result may alias either of them.
1770 LOperand* right_op = instr->right(); 1711 LOperand* right_op = instr->right();
1771 Register left = ToRegister(instr->left()); 1712 Register left = ToRegister(instr->left());
1772 Register result = ToRegister(instr->result()); 1713 Register result = ToRegister(instr->result());
1773 Register scratch = scratch0(); 1714 Register scratch = scratch0();
1774 if (right_op->IsRegister()) { 1715 if (right_op->IsRegister()) {
1775 // Mask the right_op operand. 1716 // Mask the right_op operand.
1776 __ and_(scratch, ToRegister(right_op), Operand(0x1F)); 1717 __ andi(scratch, ToRegister(right_op), Operand(0x1F));
1777 switch (instr->op()) { 1718 switch (instr->op()) {
1778 case Token::ROR: 1719 case Token::ROR:
1779 __ mov(result, Operand(left, ROR, scratch)); 1720 // rotate_right(a, b) == rotate_left(a, 32 - b)
1721 __ subfic(scratch, scratch, Operand(32));
1722 __ rotlw(result, left, scratch);
1780 break; 1723 break;
1781 case Token::SAR: 1724 case Token::SAR:
1782 __ mov(result, Operand(left, ASR, scratch)); 1725 __ sraw(result, left, scratch);
1783 break; 1726 break;
1784 case Token::SHR: 1727 case Token::SHR:
1785 if (instr->can_deopt()) { 1728 if (instr->can_deopt()) {
1786 __ mov(result, Operand(left, LSR, scratch), SetCC); 1729 __ srw(result, left, scratch, SetRC);
1787 DeoptimizeIf(mi, instr->environment()); 1730 #if V8_TARGET_ARCH_PPC64
1731 __ extsw(result, result, SetRC);
1732 #endif
1733 DeoptimizeIf(lt, instr, cr0);
1788 } else { 1734 } else {
1789 __ mov(result, Operand(left, LSR, scratch)); 1735 __ srw(result, left, scratch);
1790 } 1736 }
1791 break; 1737 break;
1792 case Token::SHL: 1738 case Token::SHL:
1793 __ mov(result, Operand(left, LSL, scratch)); 1739 __ slw(result, left, scratch);
1740 #if V8_TARGET_ARCH_PPC64
1741 __ extsw(result, result);
1742 #endif
1794 break; 1743 break;
1795 default: 1744 default:
1796 UNREACHABLE(); 1745 UNREACHABLE();
1797 break; 1746 break;
1798 } 1747 }
1799 } else { 1748 } else {
1800 // Mask the right_op operand. 1749 // Mask the right_op operand.
1801 int value = ToInteger32(LConstantOperand::cast(right_op)); 1750 int value = ToInteger32(LConstantOperand::cast(right_op));
1802 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1751 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1803 switch (instr->op()) { 1752 switch (instr->op()) {
1804 case Token::ROR: 1753 case Token::ROR:
1805 if (shift_count != 0) { 1754 if (shift_count != 0) {
1806 __ mov(result, Operand(left, ROR, shift_count)); 1755 __ rotrwi(result, left, shift_count);
1807 } else { 1756 } else {
1808 __ Move(result, left); 1757 __ Move(result, left);
1809 } 1758 }
1810 break; 1759 break;
1811 case Token::SAR: 1760 case Token::SAR:
1812 if (shift_count != 0) { 1761 if (shift_count != 0) {
1813 __ mov(result, Operand(left, ASR, shift_count)); 1762 __ srawi(result, left, shift_count);
1814 } else { 1763 } else {
1815 __ Move(result, left); 1764 __ Move(result, left);
1816 } 1765 }
1817 break; 1766 break;
1818 case Token::SHR: 1767 case Token::SHR:
1819 if (shift_count != 0) { 1768 if (shift_count != 0) {
1820 __ mov(result, Operand(left, LSR, shift_count)); 1769 __ srwi(result, left, Operand(shift_count));
1821 } else { 1770 } else {
1822 if (instr->can_deopt()) { 1771 if (instr->can_deopt()) {
1823 __ tst(left, Operand(0x80000000)); 1772 __ cmpwi(left, Operand::Zero());
1824 DeoptimizeIf(ne, instr->environment()); 1773 DeoptimizeIf(lt, instr);
1825 } 1774 }
1826 __ Move(result, left); 1775 __ Move(result, left);
1827 } 1776 }
1828 break; 1777 break;
1829 case Token::SHL: 1778 case Token::SHL:
1830 if (shift_count != 0) { 1779 if (shift_count != 0) {
1780 #if V8_TARGET_ARCH_PPC64
1781 if (instr->hydrogen_value()->representation().IsSmi()) {
1782 __ sldi(result, left, Operand(shift_count));
1783 #else
1831 if (instr->hydrogen_value()->representation().IsSmi() && 1784 if (instr->hydrogen_value()->representation().IsSmi() &&
1832 instr->can_deopt()) { 1785 instr->can_deopt()) {
1833 if (shift_count != 1) { 1786 if (shift_count != 1) {
1834 __ mov(result, Operand(left, LSL, shift_count - 1)); 1787 __ slwi(result, left, Operand(shift_count - 1));
1835 __ SmiTag(result, result, SetCC); 1788 __ SmiTagCheckOverflow(result, result, scratch);
1836 } else { 1789 } else {
1837 __ SmiTag(result, left, SetCC); 1790 __ SmiTagCheckOverflow(result, left, scratch);
1838 } 1791 }
1839 DeoptimizeIf(vs, instr->environment()); 1792 DeoptimizeIf(lt, instr, cr0);
1793 #endif
1840 } else { 1794 } else {
1841 __ mov(result, Operand(left, LSL, shift_count)); 1795 __ slwi(result, left, Operand(shift_count));
1796 #if V8_TARGET_ARCH_PPC64
1797 __ extsw(result, result);
1798 #endif
1842 } 1799 }
1843 } else { 1800 } else {
1844 __ Move(result, left); 1801 __ Move(result, left);
1845 } 1802 }
1846 break; 1803 break;
1847 default: 1804 default:
1848 UNREACHABLE(); 1805 UNREACHABLE();
1849 break; 1806 break;
1850 } 1807 }
1851 } 1808 }
1852 } 1809 }
1853 1810
1854 1811
1855 void LCodeGen::DoSubI(LSubI* instr) { 1812 void LCodeGen::DoSubI(LSubI* instr) {
1856 LOperand* left = instr->left();
1857 LOperand* right = instr->right(); 1813 LOperand* right = instr->right();
1858 LOperand* result = instr->result(); 1814 Register left = ToRegister(instr->left());
1815 Register result = ToRegister(instr->result());
1859 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1816 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1860 SBit set_cond = can_overflow ? SetCC : LeaveCC; 1817 if (!can_overflow && right->IsConstantOperand()) {
1818 Operand right_operand = ToOperand(right);
1819 __ Add(result, left, -right_operand.immediate(), r0);
1820 } else {
1821 Register right_reg = EmitLoadRegister(right, ip);
1861 1822
1862 if (right->IsStackSlot()) { 1823 if (!can_overflow) {
1863 Register right_reg = EmitLoadRegister(right, ip); 1824 __ sub(result, left, right_reg);
1864 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1825 } else {
1865 } else { 1826 __ SubAndCheckForOverflow(result, left, right_reg, scratch0(), r0);
1866 DCHECK(right->IsRegister() || right->IsConstantOperand()); 1827 // Doptimize on overflow
1867 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1828 #if V8_TARGET_ARCH_PPC64
1829 if (!instr->hydrogen()->representation().IsSmi()) {
1830 __ extsw(scratch0(), scratch0(), SetRC);
1831 }
1832 #endif
1833 DeoptimizeIf(lt, instr, cr0);
1834 }
1868 } 1835 }
1869 1836
1870 if (can_overflow) { 1837 #if V8_TARGET_ARCH_PPC64
1871 DeoptimizeIf(vs, instr->environment()); 1838 if (!instr->hydrogen()->representation().IsSmi()) {
1839 __ extsw(result, result);
1872 } 1840 }
1841 #endif
1873 } 1842 }
1874 1843
1875 1844
1876 void LCodeGen::DoRSubI(LRSubI* instr) { 1845 void LCodeGen::DoRSubI(LRSubI* instr) {
1877 LOperand* left = instr->left(); 1846 LOperand* left = instr->left();
1878 LOperand* right = instr->right(); 1847 LOperand* right = instr->right();
1879 LOperand* result = instr->result(); 1848 LOperand* result = instr->result();
1880 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1881 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1882 1849
1883 if (right->IsStackSlot()) { 1850 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1884 Register right_reg = EmitLoadRegister(right, ip); 1851 right->IsConstantOperand());
1885 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1852
1853 Operand right_operand = ToOperand(right);
1854 if (is_int16(right_operand.immediate())) {
1855 __ subfic(ToRegister(result), ToRegister(left), right_operand);
1886 } else { 1856 } else {
1887 DCHECK(right->IsRegister() || right->IsConstantOperand()); 1857 __ mov(r0, right_operand);
1888 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1858 __ sub(ToRegister(result), r0, ToRegister(left));
1889 }
1890
1891 if (can_overflow) {
1892 DeoptimizeIf(vs, instr->environment());
1893 } 1859 }
1894 } 1860 }
1895 1861
1896 1862
1897 void LCodeGen::DoConstantI(LConstantI* instr) { 1863 void LCodeGen::DoConstantI(LConstantI* instr) {
1898 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1864 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1899 } 1865 }
1900 1866
1901 1867
1902 void LCodeGen::DoConstantS(LConstantS* instr) { 1868 void LCodeGen::DoConstantS(LConstantS* instr) {
1903 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1869 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1904 } 1870 }
1905 1871
1906 1872
1873 // TODO(penguin): put const to constant pool instead
1874 // of storing double to stack
1907 void LCodeGen::DoConstantD(LConstantD* instr) { 1875 void LCodeGen::DoConstantD(LConstantD* instr) {
1908 DCHECK(instr->result()->IsDoubleRegister()); 1876 DCHECK(instr->result()->IsDoubleRegister());
1909 DwVfpRegister result = ToDoubleRegister(instr->result()); 1877 DoubleRegister result = ToDoubleRegister(instr->result());
1910 double v = instr->value(); 1878 double v = instr->value();
1911 __ Vmov(result, v, scratch0()); 1879 __ LoadDoubleLiteral(result, v, scratch0());
1912 } 1880 }
1913 1881
1914 1882
1915 void LCodeGen::DoConstantE(LConstantE* instr) { 1883 void LCodeGen::DoConstantE(LConstantE* instr) {
1916 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1884 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1917 } 1885 }
1918 1886
1919 1887
1920 void LCodeGen::DoConstantT(LConstantT* instr) { 1888 void LCodeGen::DoConstantT(LConstantT* instr) {
1921 Handle<Object> object = instr->value(isolate()); 1889 Handle<Object> object = instr->value(isolate());
1922 AllowDeferredHandleDereference smi_check; 1890 AllowDeferredHandleDereference smi_check;
1923 __ Move(ToRegister(instr->result()), object); 1891 __ Move(ToRegister(instr->result()), object);
1924 } 1892 }
1925 1893
1926 1894
1927 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { 1895 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1928 Register result = ToRegister(instr->result()); 1896 Register result = ToRegister(instr->result());
1929 Register map = ToRegister(instr->value()); 1897 Register map = ToRegister(instr->value());
1930 __ EnumLength(result, map); 1898 __ EnumLength(result, map);
1931 } 1899 }
1932 1900
1933 1901
1934 void LCodeGen::DoDateField(LDateField* instr) { 1902 void LCodeGen::DoDateField(LDateField* instr) {
1935 Register object = ToRegister(instr->date()); 1903 Register object = ToRegister(instr->date());
1936 Register result = ToRegister(instr->result()); 1904 Register result = ToRegister(instr->result());
1937 Register scratch = ToRegister(instr->temp()); 1905 Register scratch = ToRegister(instr->temp());
1938 Smi* index = instr->index(); 1906 Smi* index = instr->index();
1939 Label runtime, done; 1907 Label runtime, done;
1940 DCHECK(object.is(result)); 1908 DCHECK(object.is(result));
1941 DCHECK(object.is(r0)); 1909 DCHECK(object.is(r3));
1942 DCHECK(!scratch.is(scratch0())); 1910 DCHECK(!scratch.is(scratch0()));
1943 DCHECK(!scratch.is(object)); 1911 DCHECK(!scratch.is(object));
1944 1912
1945 __ SmiTst(object); 1913 __ TestIfSmi(object, r0);
1946 DeoptimizeIf(eq, instr->environment()); 1914 DeoptimizeIf(eq, instr, cr0);
1947 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); 1915 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1948 DeoptimizeIf(ne, instr->environment()); 1916 DeoptimizeIf(ne, instr);
1949 1917
1950 if (index->value() == 0) { 1918 if (index->value() == 0) {
1951 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); 1919 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
1952 } else { 1920 } else {
1953 if (index->value() < JSDate::kFirstUncachedField) { 1921 if (index->value() < JSDate::kFirstUncachedField) {
1954 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 1922 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1955 __ mov(scratch, Operand(stamp)); 1923 __ mov(scratch, Operand(stamp));
1956 __ ldr(scratch, MemOperand(scratch)); 1924 __ LoadP(scratch, MemOperand(scratch));
1957 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); 1925 __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1958 __ cmp(scratch, scratch0()); 1926 __ cmp(scratch, scratch0());
1959 __ b(ne, &runtime); 1927 __ bne(&runtime);
1960 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset + 1928 __ LoadP(result,
1961 kPointerSize * index->value())); 1929 FieldMemOperand(object, JSDate::kValueOffset +
1962 __ jmp(&done); 1930 kPointerSize * index->value()));
1931 __ b(&done);
1963 } 1932 }
1964 __ bind(&runtime); 1933 __ bind(&runtime);
1965 __ PrepareCallCFunction(2, scratch); 1934 __ PrepareCallCFunction(2, scratch);
1966 __ mov(r1, Operand(index)); 1935 __ LoadSmiLiteral(r4, index);
1967 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); 1936 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1968 __ bind(&done); 1937 __ bind(&done);
1969 } 1938 }
1970 } 1939 }
1971 1940
1972 1941
1973 MemOperand LCodeGen::BuildSeqStringOperand(Register string, 1942 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1974 LOperand* index,
1975 String::Encoding encoding) { 1943 String::Encoding encoding) {
1976 if (index->IsConstantOperand()) { 1944 if (index->IsConstantOperand()) {
1977 int offset = ToInteger32(LConstantOperand::cast(index)); 1945 int offset = ToInteger32(LConstantOperand::cast(index));
1978 if (encoding == String::TWO_BYTE_ENCODING) { 1946 if (encoding == String::TWO_BYTE_ENCODING) {
1979 offset *= kUC16Size; 1947 offset *= kUC16Size;
1980 } 1948 }
1981 STATIC_ASSERT(kCharSize == 1); 1949 STATIC_ASSERT(kCharSize == 1);
1982 return FieldMemOperand(string, SeqString::kHeaderSize + offset); 1950 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1983 } 1951 }
1984 Register scratch = scratch0(); 1952 Register scratch = scratch0();
1985 DCHECK(!scratch.is(string)); 1953 DCHECK(!scratch.is(string));
1986 DCHECK(!scratch.is(ToRegister(index))); 1954 DCHECK(!scratch.is(ToRegister(index)));
1987 if (encoding == String::ONE_BYTE_ENCODING) { 1955 if (encoding == String::ONE_BYTE_ENCODING) {
1988 __ add(scratch, string, Operand(ToRegister(index))); 1956 __ add(scratch, string, ToRegister(index));
1989 } else { 1957 } else {
1990 STATIC_ASSERT(kUC16Size == 2); 1958 STATIC_ASSERT(kUC16Size == 2);
1991 __ add(scratch, string, Operand(ToRegister(index), LSL, 1)); 1959 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
1960 __ add(scratch, string, scratch);
1992 } 1961 }
1993 return FieldMemOperand(scratch, SeqString::kHeaderSize); 1962 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1994 } 1963 }
1995 1964
1996 1965
1997 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 1966 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1998 String::Encoding encoding = instr->hydrogen()->encoding(); 1967 String::Encoding encoding = instr->hydrogen()->encoding();
1999 Register string = ToRegister(instr->string()); 1968 Register string = ToRegister(instr->string());
2000 Register result = ToRegister(instr->result()); 1969 Register result = ToRegister(instr->result());
2001 1970
2002 if (FLAG_debug_code) { 1971 if (FLAG_debug_code) {
2003 Register scratch = scratch0(); 1972 Register scratch = scratch0();
2004 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); 1973 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
2005 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 1974 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2006 1975
2007 __ and_(scratch, scratch, 1976 __ andi(scratch, scratch,
2008 Operand(kStringRepresentationMask | kStringEncodingMask)); 1977 Operand(kStringRepresentationMask | kStringEncodingMask));
2009 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1978 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2010 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1979 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2011 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING 1980 __ cmpi(scratch,
2012 ? one_byte_seq_type : two_byte_seq_type)); 1981 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
1982 : two_byte_seq_type));
2013 __ Check(eq, kUnexpectedStringType); 1983 __ Check(eq, kUnexpectedStringType);
2014 } 1984 }
2015 1985
2016 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1986 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2017 if (encoding == String::ONE_BYTE_ENCODING) { 1987 if (encoding == String::ONE_BYTE_ENCODING) {
2018 __ ldrb(result, operand); 1988 __ lbz(result, operand);
2019 } else { 1989 } else {
2020 __ ldrh(result, operand); 1990 __ lhz(result, operand);
2021 } 1991 }
2022 } 1992 }
2023 1993
2024 1994
2025 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 1995 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2026 String::Encoding encoding = instr->hydrogen()->encoding(); 1996 String::Encoding encoding = instr->hydrogen()->encoding();
2027 Register string = ToRegister(instr->string()); 1997 Register string = ToRegister(instr->string());
2028 Register value = ToRegister(instr->value()); 1998 Register value = ToRegister(instr->value());
2029 1999
2030 if (FLAG_debug_code) { 2000 if (FLAG_debug_code) {
2031 Register index = ToRegister(instr->index()); 2001 Register index = ToRegister(instr->index());
2032 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 2002 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2033 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 2003 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2034 int encoding_mask = 2004 int encoding_mask =
2035 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 2005 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2036 ? one_byte_seq_type : two_byte_seq_type; 2006 ? one_byte_seq_type
2007 : two_byte_seq_type;
2037 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); 2008 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2038 } 2009 }
2039 2010
2040 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 2011 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2041 if (encoding == String::ONE_BYTE_ENCODING) { 2012 if (encoding == String::ONE_BYTE_ENCODING) {
2042 __ strb(value, operand); 2013 __ stb(value, operand);
2043 } else { 2014 } else {
2044 __ strh(value, operand); 2015 __ sth(value, operand);
2045 } 2016 }
2046 } 2017 }
2047 2018
2048 2019
2049 void LCodeGen::DoAddI(LAddI* instr) { 2020 void LCodeGen::DoAddI(LAddI* instr) {
2050 LOperand* left = instr->left();
2051 LOperand* right = instr->right(); 2021 LOperand* right = instr->right();
2052 LOperand* result = instr->result(); 2022 Register left = ToRegister(instr->left());
2023 Register result = ToRegister(instr->result());
2053 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 2024 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2054 SBit set_cond = can_overflow ? SetCC : LeaveCC; 2025 #if V8_TARGET_ARCH_PPC64
2026 bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
2027 instr->hydrogen()->representation().IsExternal());
2028 #endif
2055 2029
2056 if (right->IsStackSlot()) { 2030 if (!can_overflow && right->IsConstantOperand()) {
2031 Operand right_operand = ToOperand(right);
2032 __ Add(result, left, right_operand.immediate(), r0);
2033 } else {
2057 Register right_reg = EmitLoadRegister(right, ip); 2034 Register right_reg = EmitLoadRegister(right, ip);
2058 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 2035
2059 } else { 2036 if (!can_overflow) {
2060 DCHECK(right->IsRegister() || right->IsConstantOperand()); 2037 __ add(result, left, right_reg);
2061 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 2038 } else { // can_overflow.
2039 __ AddAndCheckForOverflow(result, left, right_reg, scratch0(), r0);
2040 #if V8_TARGET_ARCH_PPC64
2041 if (isInteger) {
2042 __ extsw(scratch0(), scratch0(), SetRC);
2043 }
2044 #endif
2045 // Doptimize on overflow
2046 DeoptimizeIf(lt, instr, cr0);
2047 }
2062 } 2048 }
2063 2049
2064 if (can_overflow) { 2050 #if V8_TARGET_ARCH_PPC64
2065 DeoptimizeIf(vs, instr->environment()); 2051 if (isInteger) {
2052 __ extsw(result, result);
2066 } 2053 }
2054 #endif
2067 } 2055 }
2068 2056
2069 2057
2070 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 2058 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2071 LOperand* left = instr->left(); 2059 LOperand* left = instr->left();
2072 LOperand* right = instr->right(); 2060 LOperand* right = instr->right();
2073 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 2061 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2062 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
2074 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 2063 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2075 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2076 Register left_reg = ToRegister(left); 2064 Register left_reg = ToRegister(left);
2077 Operand right_op = (right->IsRegister() || right->IsConstantOperand()) 2065 Register right_reg = EmitLoadRegister(right, ip);
2078 ? ToOperand(right)
2079 : Operand(EmitLoadRegister(right, ip));
2080 Register result_reg = ToRegister(instr->result()); 2066 Register result_reg = ToRegister(instr->result());
2081 __ cmp(left_reg, right_op); 2067 Label return_left, done;
2082 __ Move(result_reg, left_reg, condition); 2068 #if V8_TARGET_ARCH_PPC64
2083 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); 2069 if (instr->hydrogen_value()->representation().IsSmi()) {
2070 #endif
2071 __ cmp(left_reg, right_reg);
2072 #if V8_TARGET_ARCH_PPC64
2073 } else {
2074 __ cmpw(left_reg, right_reg);
2075 }
2076 #endif
2077 __ b(cond, &return_left);
2078 __ Move(result_reg, right_reg);
2079 __ b(&done);
2080 __ bind(&return_left);
2081 __ Move(result_reg, left_reg);
2082 __ bind(&done);
2084 } else { 2083 } else {
2085 DCHECK(instr->hydrogen()->representation().IsDouble()); 2084 DCHECK(instr->hydrogen()->representation().IsDouble());
2086 DwVfpRegister left_reg = ToDoubleRegister(left); 2085 DoubleRegister left_reg = ToDoubleRegister(left);
2087 DwVfpRegister right_reg = ToDoubleRegister(right); 2086 DoubleRegister right_reg = ToDoubleRegister(right);
2088 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); 2087 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2089 Label result_is_nan, return_left, return_right, check_zero, done; 2088 Label check_nan_left, check_zero, return_left, return_right, done;
2090 __ VFPCompareAndSetFlags(left_reg, right_reg); 2089 __ fcmpu(left_reg, right_reg);
2090 __ bunordered(&check_nan_left);
2091 __ beq(&check_zero);
2092 __ b(cond, &return_left);
2093 __ b(&return_right);
2094
2095 __ bind(&check_zero);
2096 __ fcmpu(left_reg, kDoubleRegZero);
2097 __ bne(&return_left); // left == right != 0.
2098
2099 // At this point, both left and right are either 0 or -0.
2100 // N.B. The following works because +0 + -0 == +0
2091 if (operation == HMathMinMax::kMathMin) { 2101 if (operation == HMathMinMax::kMathMin) {
2092 __ b(mi, &return_left); 2102 // For min we want logical-or of sign bit: -(-L + -R)
2093 __ b(gt, &return_right); 2103 __ fneg(left_reg, left_reg);
2104 __ fsub(result_reg, left_reg, right_reg);
2105 __ fneg(result_reg, result_reg);
2094 } else { 2106 } else {
2095 __ b(mi, &return_right); 2107 // For max we want logical-and of sign bit: (L + R)
2096 __ b(gt, &return_left); 2108 __ fadd(result_reg, left_reg, right_reg);
2097 }
2098 __ b(vs, &result_is_nan);
2099 // Left equals right => check for -0.
2100 __ VFPCompareAndSetFlags(left_reg, 0.0);
2101 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2102 __ b(ne, &done); // left == right != 0.
2103 } else {
2104 __ b(ne, &return_left); // left == right != 0.
2105 }
2106 // At this point, both left and right are either 0 or -0.
2107 if (operation == HMathMinMax::kMathMin) {
2108 // We could use a single 'vorr' instruction here if we had NEON support.
2109 __ vneg(left_reg, left_reg);
2110 __ vsub(result_reg, left_reg, right_reg);
2111 __ vneg(result_reg, result_reg);
2112 } else {
2113 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2114 // the decision for vadd is easy because vand is a NEON instruction.
2115 __ vadd(result_reg, left_reg, right_reg);
2116 } 2109 }
2117 __ b(&done); 2110 __ b(&done);
2118 2111
2119 __ bind(&result_is_nan); 2112 __ bind(&check_nan_left);
2120 __ vadd(result_reg, left_reg, right_reg); 2113 __ fcmpu(left_reg, left_reg);
2114 __ bunordered(&return_left); // left == NaN.
2115
2116 __ bind(&return_right);
2117 if (!right_reg.is(result_reg)) {
2118 __ fmr(result_reg, right_reg);
2119 }
2121 __ b(&done); 2120 __ b(&done);
2122 2121
2123 __ bind(&return_right); 2122 __ bind(&return_left);
2124 __ Move(result_reg, right_reg);
2125 if (!left_reg.is(result_reg)) { 2123 if (!left_reg.is(result_reg)) {
2126 __ b(&done); 2124 __ fmr(result_reg, left_reg);
2127 } 2125 }
2128
2129 __ bind(&return_left);
2130 __ Move(result_reg, left_reg);
2131
2132 __ bind(&done); 2126 __ bind(&done);
2133 } 2127 }
2134 } 2128 }
2135 2129
2136 2130
2137 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 2131 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2138 DwVfpRegister left = ToDoubleRegister(instr->left()); 2132 DoubleRegister left = ToDoubleRegister(instr->left());
2139 DwVfpRegister right = ToDoubleRegister(instr->right()); 2133 DoubleRegister right = ToDoubleRegister(instr->right());
2140 DwVfpRegister result = ToDoubleRegister(instr->result()); 2134 DoubleRegister result = ToDoubleRegister(instr->result());
2141 switch (instr->op()) { 2135 switch (instr->op()) {
2142 case Token::ADD: 2136 case Token::ADD:
2143 __ vadd(result, left, right); 2137 __ fadd(result, left, right);
2144 break; 2138 break;
2145 case Token::SUB: 2139 case Token::SUB:
2146 __ vsub(result, left, right); 2140 __ fsub(result, left, right);
2147 break; 2141 break;
2148 case Token::MUL: 2142 case Token::MUL:
2149 __ vmul(result, left, right); 2143 __ fmul(result, left, right);
2150 break; 2144 break;
2151 case Token::DIV: 2145 case Token::DIV:
2152 __ vdiv(result, left, right); 2146 __ fdiv(result, left, right);
2153 break; 2147 break;
2154 case Token::MOD: { 2148 case Token::MOD: {
2155 __ PrepareCallCFunction(0, 2, scratch0()); 2149 __ PrepareCallCFunction(0, 2, scratch0());
2156 __ MovToFloatParameters(left, right); 2150 __ MovToFloatParameters(left, right);
2157 __ CallCFunction( 2151 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
2158 ExternalReference::mod_two_doubles_operation(isolate()), 2152 0, 2);
2159 0, 2);
2160 // Move the result in the double result register. 2153 // Move the result in the double result register.
2161 __ MovFromFloatResult(result); 2154 __ MovFromFloatResult(result);
2162 break; 2155 break;
2163 } 2156 }
2164 default: 2157 default:
2165 UNREACHABLE(); 2158 UNREACHABLE();
2166 break; 2159 break;
2167 } 2160 }
2168 } 2161 }
2169 2162
2170 2163
2171 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2164 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2172 DCHECK(ToRegister(instr->context()).is(cp)); 2165 DCHECK(ToRegister(instr->context()).is(cp));
2173 DCHECK(ToRegister(instr->left()).is(r1)); 2166 DCHECK(ToRegister(instr->left()).is(r4));
2174 DCHECK(ToRegister(instr->right()).is(r0)); 2167 DCHECK(ToRegister(instr->right()).is(r3));
2175 DCHECK(ToRegister(instr->result()).is(r0)); 2168 DCHECK(ToRegister(instr->result()).is(r3));
2176 2169
2177 Handle<Code> code = 2170 Handle<Code> code =
2178 CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code(); 2171 CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2179 // Block literal pool emission to ensure nop indicating no inlined smi code
2180 // is in the correct position.
2181 Assembler::BlockConstPoolScope block_const_pool(masm());
2182 CallCode(code, RelocInfo::CODE_TARGET, instr); 2172 CallCode(code, RelocInfo::CODE_TARGET, instr);
2183 } 2173 }
2184 2174
2185 2175
2186 template<class InstrType> 2176 template <class InstrType>
2187 void LCodeGen::EmitBranch(InstrType instr, Condition condition) { 2177 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
2188 int left_block = instr->TrueDestination(chunk_); 2178 int left_block = instr->TrueDestination(chunk_);
2189 int right_block = instr->FalseDestination(chunk_); 2179 int right_block = instr->FalseDestination(chunk_);
2190 2180
2191 int next_block = GetNextEmittedBlock(); 2181 int next_block = GetNextEmittedBlock();
2192 2182
2193 if (right_block == left_block || condition == al) { 2183 if (right_block == left_block || cond == al) {
2194 EmitGoto(left_block); 2184 EmitGoto(left_block);
2195 } else if (left_block == next_block) { 2185 } else if (left_block == next_block) {
2196 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block)); 2186 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
2197 } else if (right_block == next_block) { 2187 } else if (right_block == next_block) {
2198 __ b(condition, chunk_->GetAssemblyLabel(left_block)); 2188 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2199 } else { 2189 } else {
2200 __ b(condition, chunk_->GetAssemblyLabel(left_block)); 2190 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2201 __ b(chunk_->GetAssemblyLabel(right_block)); 2191 __ b(chunk_->GetAssemblyLabel(right_block));
2202 } 2192 }
2203 } 2193 }
2204 2194
2205 2195
2206 template<class InstrType> 2196 template <class InstrType>
2207 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) { 2197 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
2208 int false_block = instr->FalseDestination(chunk_); 2198 int false_block = instr->FalseDestination(chunk_);
2209 __ b(condition, chunk_->GetAssemblyLabel(false_block)); 2199 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2210 } 2200 }
2211 2201
2212 2202
2213 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 2203 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2214 __ stop("LBreak");
2215 }
2216 2204
2217 2205
2218 void LCodeGen::DoBranch(LBranch* instr) { 2206 void LCodeGen::DoBranch(LBranch* instr) {
2219 Representation r = instr->hydrogen()->value()->representation(); 2207 Representation r = instr->hydrogen()->value()->representation();
2220 if (r.IsInteger32() || r.IsSmi()) { 2208 DoubleRegister dbl_scratch = double_scratch0();
2209 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2210 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2211
2212 if (r.IsInteger32()) {
2221 DCHECK(!info()->IsStub()); 2213 DCHECK(!info()->IsStub());
2222 Register reg = ToRegister(instr->value()); 2214 Register reg = ToRegister(instr->value());
2223 __ cmp(reg, Operand::Zero()); 2215 __ cmpwi(reg, Operand::Zero());
2216 EmitBranch(instr, ne);
2217 } else if (r.IsSmi()) {
2218 DCHECK(!info()->IsStub());
2219 Register reg = ToRegister(instr->value());
2220 __ cmpi(reg, Operand::Zero());
2224 EmitBranch(instr, ne); 2221 EmitBranch(instr, ne);
2225 } else if (r.IsDouble()) { 2222 } else if (r.IsDouble()) {
2226 DCHECK(!info()->IsStub()); 2223 DCHECK(!info()->IsStub());
2227 DwVfpRegister reg = ToDoubleRegister(instr->value()); 2224 DoubleRegister reg = ToDoubleRegister(instr->value());
2228 // Test the double value. Zero and NaN are false. 2225 // Test the double value. Zero and NaN are false.
2229 __ VFPCompareAndSetFlags(reg, 0.0); 2226 __ fcmpu(reg, kDoubleRegZero, cr7);
2230 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false) 2227 __ mfcr(r0);
2231 EmitBranch(instr, ne); 2228 __ andi(r0, r0, Operand(crZOrNaNBits));
2229 EmitBranch(instr, eq, cr0);
2232 } else { 2230 } else {
2233 DCHECK(r.IsTagged()); 2231 DCHECK(r.IsTagged());
2234 Register reg = ToRegister(instr->value()); 2232 Register reg = ToRegister(instr->value());
2235 HType type = instr->hydrogen()->value()->type(); 2233 HType type = instr->hydrogen()->value()->type();
2236 if (type.IsBoolean()) { 2234 if (type.IsBoolean()) {
2237 DCHECK(!info()->IsStub()); 2235 DCHECK(!info()->IsStub());
2238 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2236 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2239 EmitBranch(instr, eq); 2237 EmitBranch(instr, eq);
2240 } else if (type.IsSmi()) { 2238 } else if (type.IsSmi()) {
2241 DCHECK(!info()->IsStub()); 2239 DCHECK(!info()->IsStub());
2242 __ cmp(reg, Operand::Zero()); 2240 __ cmpi(reg, Operand::Zero());
2243 EmitBranch(instr, ne); 2241 EmitBranch(instr, ne);
2244 } else if (type.IsJSArray()) { 2242 } else if (type.IsJSArray()) {
2245 DCHECK(!info()->IsStub()); 2243 DCHECK(!info()->IsStub());
2246 EmitBranch(instr, al); 2244 EmitBranch(instr, al);
2247 } else if (type.IsHeapNumber()) { 2245 } else if (type.IsHeapNumber()) {
2248 DCHECK(!info()->IsStub()); 2246 DCHECK(!info()->IsStub());
2249 DwVfpRegister dbl_scratch = double_scratch0(); 2247 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2250 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2251 // Test the double value. Zero and NaN are false. 2248 // Test the double value. Zero and NaN are false.
2252 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2249 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2253 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN) 2250 __ mfcr(r0);
2254 EmitBranch(instr, ne); 2251 __ andi(r0, r0, Operand(crZOrNaNBits));
2252 EmitBranch(instr, eq, cr0);
2255 } else if (type.IsString()) { 2253 } else if (type.IsString()) {
2256 DCHECK(!info()->IsStub()); 2254 DCHECK(!info()->IsStub());
2257 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2255 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2258 __ cmp(ip, Operand::Zero()); 2256 __ cmpi(ip, Operand::Zero());
2259 EmitBranch(instr, ne); 2257 EmitBranch(instr, ne);
2260 } else { 2258 } else {
2261 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 2259 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2262 // Avoid deopts in the case where we've never executed this path before. 2260 // Avoid deopts in the case where we've never executed this path before.
2263 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); 2261 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2264 2262
2265 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 2263 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2266 // undefined -> false. 2264 // undefined -> false.
2267 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); 2265 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2268 __ b(eq, instr->FalseLabel(chunk_)); 2266 __ beq(instr->FalseLabel(chunk_));
2269 } 2267 }
2270 if (expected.Contains(ToBooleanStub::BOOLEAN)) { 2268 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2271 // Boolean -> its value. 2269 // Boolean -> its value.
2272 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2270 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2273 __ b(eq, instr->TrueLabel(chunk_)); 2271 __ beq(instr->TrueLabel(chunk_));
2274 __ CompareRoot(reg, Heap::kFalseValueRootIndex); 2272 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2275 __ b(eq, instr->FalseLabel(chunk_)); 2273 __ beq(instr->FalseLabel(chunk_));
2276 } 2274 }
2277 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { 2275 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2278 // 'null' -> false. 2276 // 'null' -> false.
2279 __ CompareRoot(reg, Heap::kNullValueRootIndex); 2277 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2280 __ b(eq, instr->FalseLabel(chunk_)); 2278 __ beq(instr->FalseLabel(chunk_));
2281 } 2279 }
2282 2280
2283 if (expected.Contains(ToBooleanStub::SMI)) { 2281 if (expected.Contains(ToBooleanStub::SMI)) {
2284 // Smis: 0 -> false, all other -> true. 2282 // Smis: 0 -> false, all other -> true.
2285 __ cmp(reg, Operand::Zero()); 2283 __ cmpi(reg, Operand::Zero());
2286 __ b(eq, instr->FalseLabel(chunk_)); 2284 __ beq(instr->FalseLabel(chunk_));
2287 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2285 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2288 } else if (expected.NeedsMap()) { 2286 } else if (expected.NeedsMap()) {
2289 // If we need a map later and have a Smi -> deopt. 2287 // If we need a map later and have a Smi -> deopt.
2290 __ SmiTst(reg); 2288 __ TestIfSmi(reg, r0);
2291 DeoptimizeIf(eq, instr->environment()); 2289 DeoptimizeIf(eq, instr, cr0);
2292 } 2290 }
2293 2291
2294 const Register map = scratch0(); 2292 const Register map = scratch0();
2295 if (expected.NeedsMap()) { 2293 if (expected.NeedsMap()) {
2296 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2294 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2297 2295
2298 if (expected.CanBeUndetectable()) { 2296 if (expected.CanBeUndetectable()) {
2299 // Undetectable -> false. 2297 // Undetectable -> false.
2300 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); 2298 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2301 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 2299 __ TestBit(ip, Map::kIsUndetectable, r0);
2302 __ b(ne, instr->FalseLabel(chunk_)); 2300 __ bne(instr->FalseLabel(chunk_), cr0);
2303 } 2301 }
2304 } 2302 }
2305 2303
2306 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { 2304 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2307 // spec object -> true. 2305 // spec object -> true.
2308 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); 2306 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2309 __ b(ge, instr->TrueLabel(chunk_)); 2307 __ bge(instr->TrueLabel(chunk_));
2310 } 2308 }
2311 2309
2312 if (expected.Contains(ToBooleanStub::STRING)) { 2310 if (expected.Contains(ToBooleanStub::STRING)) {
2313 // String value -> false iff empty. 2311 // String value -> false iff empty.
2314 Label not_string; 2312 Label not_string;
2315 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); 2313 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2316 __ b(ge, &not_string); 2314 __ bge(&not_string);
2317 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2315 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2318 __ cmp(ip, Operand::Zero()); 2316 __ cmpi(ip, Operand::Zero());
2319 __ b(ne, instr->TrueLabel(chunk_)); 2317 __ bne(instr->TrueLabel(chunk_));
2320 __ b(instr->FalseLabel(chunk_)); 2318 __ b(instr->FalseLabel(chunk_));
2321 __ bind(&not_string); 2319 __ bind(&not_string);
2322 } 2320 }
2323 2321
2324 if (expected.Contains(ToBooleanStub::SYMBOL)) { 2322 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2325 // Symbol value -> true. 2323 // Symbol value -> true.
2326 __ CompareInstanceType(map, ip, SYMBOL_TYPE); 2324 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2327 __ b(eq, instr->TrueLabel(chunk_)); 2325 __ beq(instr->TrueLabel(chunk_));
2328 } 2326 }
2329 2327
2330 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 2328 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2331 // heap number -> false iff +0, -0, or NaN. 2329 // heap number -> false iff +0, -0, or NaN.
2332 DwVfpRegister dbl_scratch = double_scratch0();
2333 Label not_heap_number; 2330 Label not_heap_number;
2334 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 2331 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2335 __ b(ne, &not_heap_number); 2332 __ bne(&not_heap_number);
2336 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2333 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2337 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2334 // Test the double value. Zero and NaN are false.
2338 __ cmp(r0, r0, vs); // NaN -> false. 2335 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2339 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. 2336 __ mfcr(r0);
2337 __ andi(r0, r0, Operand(crZOrNaNBits));
2338 __ bne(instr->FalseLabel(chunk_), cr0);
2340 __ b(instr->TrueLabel(chunk_)); 2339 __ b(instr->TrueLabel(chunk_));
2341 __ bind(&not_heap_number); 2340 __ bind(&not_heap_number);
2342 } 2341 }
2343 2342
2344 if (!expected.IsGeneric()) { 2343 if (!expected.IsGeneric()) {
2345 // We've seen something for the first time -> deopt. 2344 // We've seen something for the first time -> deopt.
2346 // This can only happen if we are not generic already. 2345 // This can only happen if we are not generic already.
2347 DeoptimizeIf(al, instr->environment()); 2346 DeoptimizeIf(al, instr);
2348 } 2347 }
2349 } 2348 }
2350 } 2349 }
2351 } 2350 }
2352 2351
2353 2352
2354 void LCodeGen::EmitGoto(int block) { 2353 void LCodeGen::EmitGoto(int block) {
2355 if (!IsNextEmittedBlock(block)) { 2354 if (!IsNextEmittedBlock(block)) {
2356 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); 2355 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2357 } 2356 }
2358 } 2357 }
2359 2358
2360 2359
2361 void LCodeGen::DoGoto(LGoto* instr) { 2360 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2362 EmitGoto(instr->block_id());
2363 }
2364 2361
2365 2362
2366 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 2363 Condition LCodeGen::TokenToCondition(Token::Value op) {
2367 Condition cond = kNoCondition; 2364 Condition cond = kNoCondition;
2368 switch (op) { 2365 switch (op) {
2369 case Token::EQ: 2366 case Token::EQ:
2370 case Token::EQ_STRICT: 2367 case Token::EQ_STRICT:
2371 cond = eq; 2368 cond = eq;
2372 break; 2369 break;
2373 case Token::NE: 2370 case Token::NE:
2374 case Token::NE_STRICT: 2371 case Token::NE_STRICT:
2375 cond = ne; 2372 cond = ne;
2376 break; 2373 break;
2377 case Token::LT: 2374 case Token::LT:
2378 cond = is_unsigned ? lo : lt; 2375 cond = lt;
2379 break; 2376 break;
2380 case Token::GT: 2377 case Token::GT:
2381 cond = is_unsigned ? hi : gt; 2378 cond = gt;
2382 break; 2379 break;
2383 case Token::LTE: 2380 case Token::LTE:
2384 cond = is_unsigned ? ls : le; 2381 cond = le;
2385 break; 2382 break;
2386 case Token::GTE: 2383 case Token::GTE:
2387 cond = is_unsigned ? hs : ge; 2384 cond = ge;
2388 break; 2385 break;
2389 case Token::IN: 2386 case Token::IN:
2390 case Token::INSTANCEOF: 2387 case Token::INSTANCEOF:
2391 default: 2388 default:
2392 UNREACHABLE(); 2389 UNREACHABLE();
2393 } 2390 }
2394 return cond; 2391 return cond;
2395 } 2392 }
2396 2393
2397 2394
2398 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2395 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2399 LOperand* left = instr->left(); 2396 LOperand* left = instr->left();
2400 LOperand* right = instr->right(); 2397 LOperand* right = instr->right();
2401 bool is_unsigned = 2398 bool is_unsigned =
2402 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || 2399 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2403 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); 2400 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2404 Condition cond = TokenToCondition(instr->op(), is_unsigned); 2401 Condition cond = TokenToCondition(instr->op());
2405 2402
2406 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2403 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2407 // We can statically evaluate the comparison. 2404 // We can statically evaluate the comparison.
2408 double left_val = ToDouble(LConstantOperand::cast(left)); 2405 double left_val = ToDouble(LConstantOperand::cast(left));
2409 double right_val = ToDouble(LConstantOperand::cast(right)); 2406 double right_val = ToDouble(LConstantOperand::cast(right));
2410 int next_block = EvalComparison(instr->op(), left_val, right_val) ? 2407 int next_block = EvalComparison(instr->op(), left_val, right_val)
2411 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); 2408 ? instr->TrueDestination(chunk_)
2409 : instr->FalseDestination(chunk_);
2412 EmitGoto(next_block); 2410 EmitGoto(next_block);
2413 } else { 2411 } else {
2414 if (instr->is_double()) { 2412 if (instr->is_double()) {
2415 // Compare left and right operands as doubles and load the 2413 // Compare left and right operands as doubles and load the
2416 // resulting flags into the normal status register. 2414 // resulting flags into the normal status register.
2417 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); 2415 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
2418 // If a NaN is involved, i.e. the result is unordered (V set), 2416 // If a NaN is involved, i.e. the result is unordered,
2419 // jump to false block label. 2417 // jump to false block label.
2420 __ b(vs, instr->FalseLabel(chunk_)); 2418 __ bunordered(instr->FalseLabel(chunk_));
2421 } else { 2419 } else {
2422 if (right->IsConstantOperand()) { 2420 if (right->IsConstantOperand()) {
2423 int32_t value = ToInteger32(LConstantOperand::cast(right)); 2421 int32_t value = ToInteger32(LConstantOperand::cast(right));
2424 if (instr->hydrogen_value()->representation().IsSmi()) { 2422 if (instr->hydrogen_value()->representation().IsSmi()) {
2425 __ cmp(ToRegister(left), Operand(Smi::FromInt(value))); 2423 if (is_unsigned) {
2424 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2425 } else {
2426 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2427 }
2426 } else { 2428 } else {
2427 __ cmp(ToRegister(left), Operand(value)); 2429 if (is_unsigned) {
2430 __ Cmplwi(ToRegister(left), Operand(value), r0);
2431 } else {
2432 __ Cmpwi(ToRegister(left), Operand(value), r0);
2433 }
2428 } 2434 }
2429 } else if (left->IsConstantOperand()) { 2435 } else if (left->IsConstantOperand()) {
2430 int32_t value = ToInteger32(LConstantOperand::cast(left)); 2436 int32_t value = ToInteger32(LConstantOperand::cast(left));
2431 if (instr->hydrogen_value()->representation().IsSmi()) { 2437 if (instr->hydrogen_value()->representation().IsSmi()) {
2432 __ cmp(ToRegister(right), Operand(Smi::FromInt(value))); 2438 if (is_unsigned) {
2439 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2440 } else {
2441 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2442 }
2433 } else { 2443 } else {
2434 __ cmp(ToRegister(right), Operand(value)); 2444 if (is_unsigned) {
2445 __ Cmplwi(ToRegister(right), Operand(value), r0);
2446 } else {
2447 __ Cmpwi(ToRegister(right), Operand(value), r0);
2448 }
2435 } 2449 }
2436 // We commuted the operands, so commute the condition. 2450 // We commuted the operands, so commute the condition.
2437 cond = CommuteCondition(cond); 2451 cond = CommuteCondition(cond);
2452 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2453 if (is_unsigned) {
2454 __ cmpl(ToRegister(left), ToRegister(right));
2455 } else {
2456 __ cmp(ToRegister(left), ToRegister(right));
2457 }
2438 } else { 2458 } else {
2439 __ cmp(ToRegister(left), ToRegister(right)); 2459 if (is_unsigned) {
2460 __ cmplw(ToRegister(left), ToRegister(right));
2461 } else {
2462 __ cmpw(ToRegister(left), ToRegister(right));
2463 }
2440 } 2464 }
2441 } 2465 }
2442 EmitBranch(instr, cond); 2466 EmitBranch(instr, cond);
2443 } 2467 }
2444 } 2468 }
2445 2469
2446 2470
2447 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2471 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2448 Register left = ToRegister(instr->left()); 2472 Register left = ToRegister(instr->left());
2449 Register right = ToRegister(instr->right()); 2473 Register right = ToRegister(instr->right());
2450 2474
2451 __ cmp(left, Operand(right)); 2475 __ cmp(left, right);
2452 EmitBranch(instr, eq); 2476 EmitBranch(instr, eq);
2453 } 2477 }
2454 2478
2455 2479
2456 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2480 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2457 if (instr->hydrogen()->representation().IsTagged()) { 2481 if (instr->hydrogen()->representation().IsTagged()) {
2458 Register input_reg = ToRegister(instr->object()); 2482 Register input_reg = ToRegister(instr->object());
2459 __ mov(ip, Operand(factory()->the_hole_value())); 2483 __ mov(ip, Operand(factory()->the_hole_value()));
2460 __ cmp(input_reg, ip); 2484 __ cmp(input_reg, ip);
2461 EmitBranch(instr, eq); 2485 EmitBranch(instr, eq);
2462 return; 2486 return;
2463 } 2487 }
2464 2488
2465 DwVfpRegister input_reg = ToDoubleRegister(instr->object()); 2489 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2466 __ VFPCompareAndSetFlags(input_reg, input_reg); 2490 __ fcmpu(input_reg, input_reg);
2467 EmitFalseBranch(instr, vc); 2491 EmitFalseBranch(instr, ordered);
2468 2492
2469 Register scratch = scratch0(); 2493 Register scratch = scratch0();
2470 __ VmovHigh(scratch, input_reg); 2494 __ MovDoubleHighToInt(scratch, input_reg);
2471 __ cmp(scratch, Operand(kHoleNanUpper32)); 2495 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2472 EmitBranch(instr, eq); 2496 EmitBranch(instr, eq);
2473 } 2497 }
2474 2498
2475 2499
2476 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { 2500 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2477 Representation rep = instr->hydrogen()->value()->representation(); 2501 Representation rep = instr->hydrogen()->value()->representation();
2478 DCHECK(!rep.IsInteger32()); 2502 DCHECK(!rep.IsInteger32());
2479 Register scratch = ToRegister(instr->temp()); 2503 Register scratch = ToRegister(instr->temp());
2480 2504
2481 if (rep.IsDouble()) { 2505 if (rep.IsDouble()) {
2482 DwVfpRegister value = ToDoubleRegister(instr->value()); 2506 DoubleRegister value = ToDoubleRegister(instr->value());
2483 __ VFPCompareAndSetFlags(value, 0.0); 2507 __ fcmpu(value, kDoubleRegZero);
2484 EmitFalseBranch(instr, ne); 2508 EmitFalseBranch(instr, ne);
2485 __ VmovHigh(scratch, value); 2509 #if V8_TARGET_ARCH_PPC64
2486 __ cmp(scratch, Operand(0x80000000)); 2510 __ MovDoubleToInt64(scratch, value);
2511 #else
2512 __ MovDoubleHighToInt(scratch, value);
2513 #endif
2514 __ cmpi(scratch, Operand::Zero());
2515 EmitBranch(instr, lt);
2487 } else { 2516 } else {
2488 Register value = ToRegister(instr->value()); 2517 Register value = ToRegister(instr->value());
2489 __ CheckMap(value, 2518 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2490 scratch, 2519 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2491 Heap::kHeapNumberMapRootIndex, 2520 #if V8_TARGET_ARCH_PPC64
2492 instr->FalseLabel(chunk()), 2521 __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2493 DO_SMI_CHECK); 2522 __ li(ip, Operand(1));
2494 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset)); 2523 __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000
2495 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset)); 2524 __ cmp(scratch, ip);
2496 __ cmp(scratch, Operand(0x80000000)); 2525 #else
2497 __ cmp(ip, Operand(0x00000000), eq); 2526 __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2527 __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2528 Label skip;
2529 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
2530 __ cmp(scratch, r0);
2531 __ bne(&skip);
2532 __ cmpi(ip, Operand::Zero());
2533 __ bind(&skip);
2534 #endif
2535 EmitBranch(instr, eq);
2498 } 2536 }
2499 EmitBranch(instr, eq);
2500 } 2537 }
2501 2538
2502 2539
2503 Condition LCodeGen::EmitIsObject(Register input, 2540 Condition LCodeGen::EmitIsObject(Register input, Register temp1,
2504 Register temp1, 2541 Label* is_not_object, Label* is_object) {
2505 Label* is_not_object,
2506 Label* is_object) {
2507 Register temp2 = scratch0(); 2542 Register temp2 = scratch0();
2508 __ JumpIfSmi(input, is_not_object); 2543 __ JumpIfSmi(input, is_not_object);
2509 2544
2510 __ LoadRoot(temp2, Heap::kNullValueRootIndex); 2545 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2511 __ cmp(input, temp2); 2546 __ cmp(input, temp2);
2512 __ b(eq, is_object); 2547 __ beq(is_object);
2513 2548
2514 // Load map. 2549 // Load map.
2515 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); 2550 __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2516 // Undetectable objects behave like undefined. 2551 // Undetectable objects behave like undefined.
2517 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); 2552 __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2518 __ tst(temp2, Operand(1 << Map::kIsUndetectable)); 2553 __ TestBit(temp2, Map::kIsUndetectable, r0);
2519 __ b(ne, is_not_object); 2554 __ bne(is_not_object, cr0);
2520 2555
2521 // Load instance type and check that it is in object type range. 2556 // Load instance type and check that it is in object type range.
2522 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); 2557 __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2523 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2558 __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2524 __ b(lt, is_not_object); 2559 __ blt(is_not_object);
2525 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2560 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2526 return le; 2561 return le;
2527 } 2562 }
2528 2563
2529 2564
2530 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { 2565 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2531 Register reg = ToRegister(instr->value()); 2566 Register reg = ToRegister(instr->value());
2532 Register temp1 = ToRegister(instr->temp()); 2567 Register temp1 = ToRegister(instr->temp());
2533 2568
2534 Condition true_cond = 2569 Condition true_cond = EmitIsObject(reg, temp1, instr->FalseLabel(chunk_),
2535 EmitIsObject(reg, temp1, 2570 instr->TrueLabel(chunk_));
2536 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2537 2571
2538 EmitBranch(instr, true_cond); 2572 EmitBranch(instr, true_cond);
2539 } 2573 }
2540 2574
2541 2575
2542 Condition LCodeGen::EmitIsString(Register input, 2576 Condition LCodeGen::EmitIsString(Register input, Register temp1,
2543 Register temp1,
2544 Label* is_not_string, 2577 Label* is_not_string,
2545 SmiCheck check_needed = INLINE_SMI_CHECK) { 2578 SmiCheck check_needed = INLINE_SMI_CHECK) {
2546 if (check_needed == INLINE_SMI_CHECK) { 2579 if (check_needed == INLINE_SMI_CHECK) {
2547 __ JumpIfSmi(input, is_not_string); 2580 __ JumpIfSmi(input, is_not_string);
2548 } 2581 }
2549 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); 2582 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2550 2583
2551 return lt; 2584 return lt;
2552 } 2585 }
2553 2586
2554 2587
2555 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { 2588 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2556 Register reg = ToRegister(instr->value()); 2589 Register reg = ToRegister(instr->value());
2557 Register temp1 = ToRegister(instr->temp()); 2590 Register temp1 = ToRegister(instr->temp());
2558 2591
2559 SmiCheck check_needed = 2592 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2560 instr->hydrogen()->value()->type().IsHeapObject() 2593 ? OMIT_SMI_CHECK
2561 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2594 : INLINE_SMI_CHECK;
2562 Condition true_cond = 2595 Condition true_cond =
2563 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); 2596 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2564 2597
2565 EmitBranch(instr, true_cond); 2598 EmitBranch(instr, true_cond);
2566 } 2599 }
2567 2600
2568 2601
2569 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2602 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2570 Register input_reg = EmitLoadRegister(instr->value(), ip); 2603 Register input_reg = EmitLoadRegister(instr->value(), ip);
2571 __ SmiTst(input_reg); 2604 __ TestIfSmi(input_reg, r0);
2572 EmitBranch(instr, eq); 2605 EmitBranch(instr, eq, cr0);
2573 } 2606 }
2574 2607
2575 2608
2576 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2609 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2577 Register input = ToRegister(instr->value()); 2610 Register input = ToRegister(instr->value());
2578 Register temp = ToRegister(instr->temp()); 2611 Register temp = ToRegister(instr->temp());
2579 2612
2580 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2613 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2581 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2614 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2582 } 2615 }
2583 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2616 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2584 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); 2617 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2585 __ tst(temp, Operand(1 << Map::kIsUndetectable)); 2618 __ TestBit(temp, Map::kIsUndetectable, r0);
2586 EmitBranch(instr, ne); 2619 EmitBranch(instr, ne, cr0);
2587 } 2620 }
2588 2621
2589 2622
2590 static Condition ComputeCompareCondition(Token::Value op) { 2623 static Condition ComputeCompareCondition(Token::Value op) {
2591 switch (op) { 2624 switch (op) {
2592 case Token::EQ_STRICT: 2625 case Token::EQ_STRICT:
2593 case Token::EQ: 2626 case Token::EQ:
2594 return eq; 2627 return eq;
2595 case Token::LT: 2628 case Token::LT:
2596 return lt; 2629 return lt;
2597 case Token::GT: 2630 case Token::GT:
2598 return gt; 2631 return gt;
2599 case Token::LTE: 2632 case Token::LTE:
2600 return le; 2633 return le;
2601 case Token::GTE: 2634 case Token::GTE:
2602 return ge; 2635 return ge;
2603 default: 2636 default:
2604 UNREACHABLE(); 2637 UNREACHABLE();
2605 return kNoCondition; 2638 return kNoCondition;
2606 } 2639 }
2607 } 2640 }
2608 2641
2609 2642
2610 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2643 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2611 DCHECK(ToRegister(instr->context()).is(cp)); 2644 DCHECK(ToRegister(instr->context()).is(cp));
2612 Token::Value op = instr->op(); 2645 Token::Value op = instr->op();
2613 2646
2614 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2647 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2615 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2648 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2616 // This instruction also signals no smi code inlined. 2649 // This instruction also signals no smi code inlined
2617 __ cmp(r0, Operand::Zero()); 2650 __ cmpi(r3, Operand::Zero());
2618 2651
2619 Condition condition = ComputeCompareCondition(op); 2652 Condition condition = ComputeCompareCondition(op);
2620 2653
2621 EmitBranch(instr, condition); 2654 EmitBranch(instr, condition);
2622 } 2655 }
2623 2656
2624 2657
2625 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2658 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2626 InstanceType from = instr->from(); 2659 InstanceType from = instr->from();
2627 InstanceType to = instr->to(); 2660 InstanceType to = instr->to();
2628 if (from == FIRST_TYPE) return to; 2661 if (from == FIRST_TYPE) return to;
2629 DCHECK(from == to || to == LAST_TYPE); 2662 DCHECK(from == to || to == LAST_TYPE);
2630 return from; 2663 return from;
2631 } 2664 }
2632 2665
2633 2666
2634 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2667 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2635 InstanceType from = instr->from(); 2668 InstanceType from = instr->from();
2636 InstanceType to = instr->to(); 2669 InstanceType to = instr->to();
2637 if (from == to) return eq; 2670 if (from == to) return eq;
2638 if (to == LAST_TYPE) return hs; 2671 if (to == LAST_TYPE) return ge;
2639 if (from == FIRST_TYPE) return ls; 2672 if (from == FIRST_TYPE) return le;
2640 UNREACHABLE(); 2673 UNREACHABLE();
2641 return eq; 2674 return eq;
2642 } 2675 }
2643 2676
2644 2677
2645 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2678 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2646 Register scratch = scratch0(); 2679 Register scratch = scratch0();
2647 Register input = ToRegister(instr->value()); 2680 Register input = ToRegister(instr->value());
2648 2681
2649 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2682 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2650 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2683 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2651 } 2684 }
2652 2685
2653 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); 2686 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2654 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2687 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2655 } 2688 }
2656 2689
2657 2690
2658 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2691 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2659 Register input = ToRegister(instr->value()); 2692 Register input = ToRegister(instr->value());
2660 Register result = ToRegister(instr->result()); 2693 Register result = ToRegister(instr->result());
2661 2694
2662 __ AssertString(input); 2695 __ AssertString(input);
2663 2696
2664 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset)); 2697 __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
2665 __ IndexFromHash(result, result); 2698 __ IndexFromHash(result, result);
2666 } 2699 }
2667 2700
2668 2701
2669 void LCodeGen::DoHasCachedArrayIndexAndBranch( 2702 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2670 LHasCachedArrayIndexAndBranch* instr) { 2703 LHasCachedArrayIndexAndBranch* instr) {
2671 Register input = ToRegister(instr->value()); 2704 Register input = ToRegister(instr->value());
2672 Register scratch = scratch0(); 2705 Register scratch = scratch0();
2673 2706
2674 __ ldr(scratch, 2707 __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
2675 FieldMemOperand(input, String::kHashFieldOffset)); 2708 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
2676 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); 2709 __ and_(r0, scratch, r0, SetRC);
2677 EmitBranch(instr, eq); 2710 EmitBranch(instr, eq, cr0);
2678 } 2711 }
2679 2712
2680 2713
2681 // Branches to a label or falls through with the answer in flags. Trashes 2714 // Branches to a label or falls through with the answer in flags. Trashes
2682 // the temp registers, but not the input. 2715 // the temp registers, but not the input.
2683 void LCodeGen::EmitClassOfTest(Label* is_true, 2716 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2684 Label* is_false, 2717 Handle<String> class_name, Register input,
2685 Handle<String>class_name, 2718 Register temp, Register temp2) {
2686 Register input,
2687 Register temp,
2688 Register temp2) {
2689 DCHECK(!input.is(temp)); 2719 DCHECK(!input.is(temp));
2690 DCHECK(!input.is(temp2)); 2720 DCHECK(!input.is(temp2));
2691 DCHECK(!temp.is(temp2)); 2721 DCHECK(!temp.is(temp2));
2692 2722
2693 __ JumpIfSmi(input, is_false); 2723 __ JumpIfSmi(input, is_false);
2694 2724
2695 if (String::Equals(isolate()->factory()->Function_string(), class_name)) { 2725 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2696 // Assuming the following assertions, we can use the same compares to test 2726 // Assuming the following assertions, we can use the same compares to test
2697 // for both being a function type and being in the object type range. 2727 // for both being a function type and being in the object type range.
2698 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 2728 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2699 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == 2729 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2700 FIRST_SPEC_OBJECT_TYPE + 1); 2730 FIRST_SPEC_OBJECT_TYPE + 1);
2701 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == 2731 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2702 LAST_SPEC_OBJECT_TYPE - 1); 2732 LAST_SPEC_OBJECT_TYPE - 1);
2703 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); 2733 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2704 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); 2734 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2705 __ b(lt, is_false); 2735 __ blt(is_false);
2706 __ b(eq, is_true); 2736 __ beq(is_true);
2707 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); 2737 __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2708 __ b(eq, is_true); 2738 __ beq(is_true);
2709 } else { 2739 } else {
2710 // Faster code path to avoid two compares: subtract lower bound from the 2740 // Faster code path to avoid two compares: subtract lower bound from the
2711 // actual type and do a signed compare with the width of the type range. 2741 // actual type and do a signed compare with the width of the type range.
2712 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2742 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2713 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); 2743 __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2714 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2744 __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2715 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - 2745 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2716 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2746 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2717 __ b(gt, is_false); 2747 __ bgt(is_false);
2718 } 2748 }
2719 2749
2720 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. 2750 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2721 // Check if the constructor in the map is a function. 2751 // Check if the constructor in the map is a function.
2722 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); 2752 __ LoadP(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2723 2753
2724 // Objects with a non-function constructor have class 'Object'. 2754 // Objects with a non-function constructor have class 'Object'.
2725 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); 2755 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2726 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) { 2756 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2727 __ b(ne, is_true); 2757 __ bne(is_true);
2728 } else { 2758 } else {
2729 __ b(ne, is_false); 2759 __ bne(is_false);
2730 } 2760 }
2731 2761
2732 // temp now contains the constructor function. Grab the 2762 // temp now contains the constructor function. Grab the
2733 // instance class name from there. 2763 // instance class name from there.
2734 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2764 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2735 __ ldr(temp, FieldMemOperand(temp, 2765 __ LoadP(temp,
2736 SharedFunctionInfo::kInstanceClassNameOffset)); 2766 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2737 // The class name we are testing against is internalized since it's a literal. 2767 // The class name we are testing against is internalized since it's a literal.
2738 // The name in the constructor is internalized because of the way the context 2768 // The name in the constructor is internalized because of the way the context
2739 // is booted. This routine isn't expected to work for random API-created 2769 // is booted. This routine isn't expected to work for random API-created
2740 // classes and it doesn't have to because you can't access it with natives 2770 // classes and it doesn't have to because you can't access it with natives
2741 // syntax. Since both sides are internalized it is sufficient to use an 2771 // syntax. Since both sides are internalized it is sufficient to use an
2742 // identity comparison. 2772 // identity comparison.
2743 __ cmp(temp, Operand(class_name)); 2773 __ Cmpi(temp, Operand(class_name), r0);
2744 // End with the answer in flags. 2774 // End with the answer in flags.
2745 } 2775 }
2746 2776
2747 2777
2748 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2778 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2749 Register input = ToRegister(instr->value()); 2779 Register input = ToRegister(instr->value());
2750 Register temp = scratch0(); 2780 Register temp = scratch0();
2751 Register temp2 = ToRegister(instr->temp()); 2781 Register temp2 = ToRegister(instr->temp());
2752 Handle<String> class_name = instr->hydrogen()->class_name(); 2782 Handle<String> class_name = instr->hydrogen()->class_name();
2753 2783
2754 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2784 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2755 class_name, input, temp, temp2); 2785 class_name, input, temp, temp2);
2756 2786
2757 EmitBranch(instr, eq); 2787 EmitBranch(instr, eq);
2758 } 2788 }
2759 2789
2760 2790
2761 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2791 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2762 Register reg = ToRegister(instr->value()); 2792 Register reg = ToRegister(instr->value());
2763 Register temp = ToRegister(instr->temp()); 2793 Register temp = ToRegister(instr->temp());
2764 2794
2765 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); 2795 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2766 __ cmp(temp, Operand(instr->map())); 2796 __ Cmpi(temp, Operand(instr->map()), r0);
2767 EmitBranch(instr, eq); 2797 EmitBranch(instr, eq);
2768 } 2798 }
2769 2799
2770 2800
2771 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2801 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2772 DCHECK(ToRegister(instr->context()).is(cp)); 2802 DCHECK(ToRegister(instr->context()).is(cp));
2773 DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0. 2803 DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
2774 DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1. 2804 DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
2775 2805
2776 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); 2806 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2777 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2807 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2778 2808
2779 __ cmp(r0, Operand::Zero()); 2809 Label equal, done;
2780 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); 2810 __ cmpi(r3, Operand::Zero());
2781 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); 2811 __ beq(&equal);
2812 __ mov(r3, Operand(factory()->false_value()));
2813 __ b(&done);
2814
2815 __ bind(&equal);
2816 __ mov(r3, Operand(factory()->true_value()));
2817 __ bind(&done);
2782 } 2818 }
2783 2819
2784 2820
2785 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 2821 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2786 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode { 2822 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2787 public: 2823 public:
2788 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 2824 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2789 LInstanceOfKnownGlobal* instr) 2825 LInstanceOfKnownGlobal* instr)
2790 : LDeferredCode(codegen), instr_(instr) { } 2826 : LDeferredCode(codegen), instr_(instr) {}
2791 virtual void Generate() OVERRIDE { 2827 virtual void Generate() OVERRIDE {
2792 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_, 2828 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2793 &load_bool_);
2794 } 2829 }
2795 virtual LInstruction* instr() OVERRIDE { return instr_; } 2830 virtual LInstruction* instr() OVERRIDE { return instr_; }
2796 Label* map_check() { return &map_check_; } 2831 Label* map_check() { return &map_check_; }
2797 Label* load_bool() { return &load_bool_; }
2798 2832
2799 private: 2833 private:
2800 LInstanceOfKnownGlobal* instr_; 2834 LInstanceOfKnownGlobal* instr_;
2801 Label map_check_; 2835 Label map_check_;
2802 Label load_bool_;
2803 }; 2836 };
2804 2837
2805 DeferredInstanceOfKnownGlobal* deferred; 2838 DeferredInstanceOfKnownGlobal* deferred;
2806 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); 2839 deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
2807 2840
2808 Label done, false_result; 2841 Label done, false_result;
2809 Register object = ToRegister(instr->value()); 2842 Register object = ToRegister(instr->value());
2810 Register temp = ToRegister(instr->temp()); 2843 Register temp = ToRegister(instr->temp());
2811 Register result = ToRegister(instr->result()); 2844 Register result = ToRegister(instr->result());
2812 2845
2813 // A Smi is not instance of anything. 2846 // A Smi is not instance of anything.
2814 __ JumpIfSmi(object, &false_result); 2847 __ JumpIfSmi(object, &false_result);
2815 2848
2816 // This is the inlined call site instanceof cache. The two occurences of the 2849 // This is the inlined call site instanceof cache. The two occurences of the
2817 // hole value will be patched to the last map/result pair generated by the 2850 // hole value will be patched to the last map/result pair generated by the
2818 // instanceof stub. 2851 // instanceof stub.
2819 Label cache_miss; 2852 Label cache_miss;
2820 Register map = temp; 2853 Register map = temp;
2821 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 2854 __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
2822 { 2855 {
2823 // Block constant pool emission to ensure the positions of instructions are 2856 // Block constant pool emission to ensure the positions of instructions are
2824 // as expected by the patcher. See InstanceofStub::Generate(). 2857 // as expected by the patcher. See InstanceofStub::Generate().
2825 Assembler::BlockConstPoolScope block_const_pool(masm()); 2858 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2826 __ bind(deferred->map_check()); // Label for calculating code patching. 2859 __ bind(deferred->map_check()); // Label for calculating code patching.
2827 // We use Factory::the_hole_value() on purpose instead of loading from the 2860 // We use Factory::the_hole_value() on purpose instead of loading from the
2828 // root array to force relocation to be able to later patch with 2861 // root array to force relocation to be able to later patch with
2829 // the cached map. 2862 // the cached map.
2830 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); 2863 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2831 __ mov(ip, Operand(Handle<Object>(cell))); 2864 __ mov(ip, Operand(Handle<Object>(cell)));
2832 __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset)); 2865 __ LoadP(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
2833 __ cmp(map, Operand(ip)); 2866 __ cmp(map, ip);
2834 __ b(ne, &cache_miss); 2867 __ bne(&cache_miss);
2835 __ bind(deferred->load_bool()); // Label for calculating code patching.
2836 // We use Factory::the_hole_value() on purpose instead of loading from the 2868 // We use Factory::the_hole_value() on purpose instead of loading from the
2837 // root array to force relocation to be able to later patch 2869 // root array to force relocation to be able to later patch
2838 // with true or false. 2870 // with true or false.
2839 __ mov(result, Operand(factory()->the_hole_value())); 2871 __ mov(result, Operand(factory()->the_hole_value()));
2840 } 2872 }
2841 __ b(&done); 2873 __ b(&done);
2842 2874
2843 // The inlined call site cache did not match. Check null and string before 2875 // The inlined call site cache did not match. Check null and string before
2844 // calling the deferred code. 2876 // calling the deferred code.
2845 __ bind(&cache_miss); 2877 __ bind(&cache_miss);
2846 // Null is not instance of anything. 2878 // Null is not instance of anything.
2847 __ LoadRoot(ip, Heap::kNullValueRootIndex); 2879 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2848 __ cmp(object, Operand(ip)); 2880 __ cmp(object, ip);
2849 __ b(eq, &false_result); 2881 __ beq(&false_result);
2850 2882
2851 // String values is not instance of anything. 2883 // String values is not instance of anything.
2852 Condition is_string = masm_->IsObjectStringType(object, temp); 2884 Condition is_string = masm_->IsObjectStringType(object, temp);
2853 __ b(is_string, &false_result); 2885 __ b(is_string, &false_result, cr0);
2854 2886
2855 // Go to the deferred code. 2887 // Go to the deferred code.
2856 __ b(deferred->entry()); 2888 __ b(deferred->entry());
2857 2889
2858 __ bind(&false_result); 2890 __ bind(&false_result);
2859 __ LoadRoot(result, Heap::kFalseValueRootIndex); 2891 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2860 2892
2861 // Here result has either true or false. Deferred code also produces true or 2893 // Here result has either true or false. Deferred code also produces true or
2862 // false object. 2894 // false object.
2863 __ bind(deferred->exit()); 2895 __ bind(deferred->exit());
2864 __ bind(&done); 2896 __ bind(&done);
2865 } 2897 }
2866 2898
2867 2899
2868 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, 2900 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2869 Label* map_check, 2901 Label* map_check) {
2870 Label* bool_load) {
2871 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; 2902 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2872 flags = static_cast<InstanceofStub::Flags>( 2903 flags = static_cast<InstanceofStub::Flags>(flags |
2873 flags | InstanceofStub::kArgsInRegisters); 2904 InstanceofStub::kArgsInRegisters);
2874 flags = static_cast<InstanceofStub::Flags>( 2905 flags = static_cast<InstanceofStub::Flags>(
2875 flags | InstanceofStub::kCallSiteInlineCheck); 2906 flags | InstanceofStub::kCallSiteInlineCheck);
2876 flags = static_cast<InstanceofStub::Flags>( 2907 flags = static_cast<InstanceofStub::Flags>(
2877 flags | InstanceofStub::kReturnTrueFalseObject); 2908 flags | InstanceofStub::kReturnTrueFalseObject);
2878 InstanceofStub stub(isolate(), flags); 2909 InstanceofStub stub(isolate(), flags);
2879 2910
2880 PushSafepointRegistersScope scope(this); 2911 PushSafepointRegistersScope scope(this);
2881 LoadContextFromDeferred(instr->context()); 2912 LoadContextFromDeferred(instr->context());
2882 2913
2883 __ Move(InstanceofStub::right(), instr->function()); 2914 __ Move(InstanceofStub::right(), instr->function());
2884 2915 // Include instructions below in delta: mov + call = mov + (mov + 2)
2885 int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET); 2916 static const int kAdditionalDelta = (2 * Assembler::kMovInstructions) + 2;
2886 int additional_delta = (call_size / Assembler::kInstrSize) + 4; 2917 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2887 // Make sure that code size is predicable, since we use specific constants
2888 // offsets in the code to find embedded values..
2889 PredictableCodeSizeScope predictable(
2890 masm_, (additional_delta + 1) * Assembler::kInstrSize);
2891 // Make sure we don't emit any additional entries in the constant pool before
2892 // the call to ensure that the CallCodeSize() calculated the correct number of
2893 // instructions for the constant pool load.
2894 { 2918 {
2895 ConstantPoolUnavailableScope constant_pool_unavailable(masm_); 2919 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2896 int map_check_delta = 2920 // r8 is used to communicate the offset to the location of the map check.
2897 masm_->InstructionsGeneratedSince(map_check) + additional_delta; 2921 __ mov(r8, Operand(delta * Instruction::kInstrSize));
2898 int bool_load_delta =
2899 masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
2900 Label before_push_delta;
2901 __ bind(&before_push_delta);
2902 __ BlockConstPoolFor(additional_delta);
2903 // r5 is used to communicate the offset to the location of the map check.
2904 __ mov(r5, Operand(map_check_delta * kPointerSize));
2905 // r6 is used to communicate the offset to the location of the bool load.
2906 __ mov(r6, Operand(bool_load_delta * kPointerSize));
2907 // The mov above can generate one or two instructions. The delta was
2908 // computed for two instructions, so we need to pad here in case of one
2909 // instruction.
2910 while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
2911 __ nop();
2912 }
2913 } 2922 }
2914 CallCodeGeneric(stub.GetCode(), 2923 CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
2915 RelocInfo::CODE_TARGET,
2916 instr,
2917 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 2924 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2925 DCHECK(delta == masm_->InstructionsGeneratedSince(map_check));
2918 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 2926 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2919 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2927 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2920 // Put the result value (r0) into the result register slot and 2928 // Put the result value (r3) into the result register slot and
2921 // restore all registers. 2929 // restore all registers.
2922 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result())); 2930 __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
2923 } 2931 }
2924 2932
2925 2933
2926 void LCodeGen::DoCmpT(LCmpT* instr) { 2934 void LCodeGen::DoCmpT(LCmpT* instr) {
2927 DCHECK(ToRegister(instr->context()).is(cp)); 2935 DCHECK(ToRegister(instr->context()).is(cp));
2928 Token::Value op = instr->op(); 2936 Token::Value op = instr->op();
2929 2937
2930 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2938 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2931 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2939 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2932 // This instruction also signals no smi code inlined. 2940 // This instruction also signals no smi code inlined
2933 __ cmp(r0, Operand::Zero()); 2941 __ cmpi(r3, Operand::Zero());
2934 2942
2935 Condition condition = ComputeCompareCondition(op); 2943 Condition condition = ComputeCompareCondition(op);
2936 __ LoadRoot(ToRegister(instr->result()), 2944 Label true_value, done;
2937 Heap::kTrueValueRootIndex, 2945
2938 condition); 2946 __ b(condition, &true_value);
2939 __ LoadRoot(ToRegister(instr->result()), 2947
2940 Heap::kFalseValueRootIndex, 2948 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2941 NegateCondition(condition)); 2949 __ b(&done);
2950
2951 __ bind(&true_value);
2952 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2953
2954 __ bind(&done);
2942 } 2955 }
2943 2956
2944 2957
2945 void LCodeGen::DoReturn(LReturn* instr) { 2958 void LCodeGen::DoReturn(LReturn* instr) {
2946 if (FLAG_trace && info()->IsOptimizing()) { 2959 if (FLAG_trace && info()->IsOptimizing()) {
2947 // Push the return value on the stack as the parameter. 2960 // Push the return value on the stack as the parameter.
2948 // Runtime::TraceExit returns its parameter in r0. We're leaving the code 2961 // Runtime::TraceExit returns its parameter in r3. We're leaving the code
2949 // managed by the register allocator and tearing down the frame, it's 2962 // managed by the register allocator and tearing down the frame, it's
2950 // safe to write to the context register. 2963 // safe to write to the context register.
2951 __ push(r0); 2964 __ push(r3);
2952 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2965 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2953 __ CallRuntime(Runtime::kTraceExit, 1); 2966 __ CallRuntime(Runtime::kTraceExit, 1);
2954 } 2967 }
2955 if (info()->saves_caller_doubles()) { 2968 if (info()->saves_caller_doubles()) {
2956 RestoreCallerDoubles(); 2969 RestoreCallerDoubles();
2957 } 2970 }
2958 int no_frame_start = -1; 2971 int no_frame_start = -1;
2959 if (NeedsEagerFrame()) { 2972 if (NeedsEagerFrame()) {
2960 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); 2973 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2961 } 2974 }
2962 { ConstantPoolUnavailableScope constant_pool_unavailable(masm()); 2975 {
2976 ConstantPoolUnavailableScope constant_pool_unavailable(masm());
2963 if (instr->has_constant_parameter_count()) { 2977 if (instr->has_constant_parameter_count()) {
2964 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2978 int parameter_count = ToInteger32(instr->constant_parameter_count());
2965 int32_t sp_delta = (parameter_count + 1) * kPointerSize; 2979 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2966 if (sp_delta != 0) { 2980 if (sp_delta != 0) {
2967 __ add(sp, sp, Operand(sp_delta)); 2981 __ addi(sp, sp, Operand(sp_delta));
2968 } 2982 }
2969 } else { 2983 } else {
2970 Register reg = ToRegister(instr->parameter_count()); 2984 Register reg = ToRegister(instr->parameter_count());
2971 // The argument count parameter is a smi 2985 // The argument count parameter is a smi
2972 __ SmiUntag(reg); 2986 __ SmiToPtrArrayOffset(r0, reg);
2973 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); 2987 __ add(sp, sp, r0);
2974 } 2988 }
2975 2989
2976 __ Jump(lr); 2990 __ blr();
2977 2991
2978 if (no_frame_start != -1) { 2992 if (no_frame_start != -1) {
2979 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); 2993 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2980 } 2994 }
2981 } 2995 }
2982 } 2996 }
2983 2997
2984 2998
2985 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2999 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2986 Register result = ToRegister(instr->result()); 3000 Register result = ToRegister(instr->result());
2987 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); 3001 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2988 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); 3002 __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset));
2989 if (instr->hydrogen()->RequiresHoleCheck()) { 3003 if (instr->hydrogen()->RequiresHoleCheck()) {
2990 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3004 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2991 __ cmp(result, ip); 3005 __ cmp(result, ip);
2992 DeoptimizeIf(eq, instr->environment()); 3006 DeoptimizeIf(eq, instr);
2993 } 3007 }
2994 } 3008 }
2995 3009
2996 3010
2997 template <class T> 3011 template <class T>
2998 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 3012 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2999 DCHECK(FLAG_vector_ics); 3013 DCHECK(FLAG_vector_ics);
3000 Register vector = ToRegister(instr->temp_vector()); 3014 Register vector = ToRegister(instr->temp_vector());
3001 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); 3015 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
3002 __ Move(vector, instr->hydrogen()->feedback_vector()); 3016 __ Move(vector, instr->hydrogen()->feedback_vector());
3003 // No need to allocate this register. 3017 // No need to allocate this register.
3004 DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0)); 3018 DCHECK(VectorLoadICDescriptor::SlotRegister().is(r3));
3005 __ mov(VectorLoadICDescriptor::SlotRegister(), 3019 __ mov(VectorLoadICDescriptor::SlotRegister(),
3006 Operand(Smi::FromInt(instr->hydrogen()->slot()))); 3020 Operand(Smi::FromInt(instr->hydrogen()->slot())));
3007 } 3021 }
3008 3022
3009 3023
3010 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 3024 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3011 DCHECK(ToRegister(instr->context()).is(cp)); 3025 DCHECK(ToRegister(instr->context()).is(cp));
3012 DCHECK(ToRegister(instr->global_object()) 3026 DCHECK(ToRegister(instr->global_object())
3013 .is(LoadDescriptor::ReceiverRegister())); 3027 .is(LoadDescriptor::ReceiverRegister()));
3014 DCHECK(ToRegister(instr->result()).is(r0)); 3028 DCHECK(ToRegister(instr->result()).is(r3));
3015 3029
3016 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); 3030 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3017 if (FLAG_vector_ics) { 3031 if (FLAG_vector_ics) {
3018 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); 3032 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3019 } 3033 }
3020 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; 3034 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3021 Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code(); 3035 Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
3022 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3036 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3023 } 3037 }
3024 3038
3025 3039
3026 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { 3040 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3027 Register value = ToRegister(instr->value()); 3041 Register value = ToRegister(instr->value());
3028 Register cell = scratch0(); 3042 Register cell = scratch0();
3029 3043
3030 // Load the cell. 3044 // Load the cell.
3031 __ mov(cell, Operand(instr->hydrogen()->cell().handle())); 3045 __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
3032 3046
3033 // If the cell we are storing to contains the hole it could have 3047 // If the cell we are storing to contains the hole it could have
3034 // been deleted from the property dictionary. In that case, we need 3048 // been deleted from the property dictionary. In that case, we need
3035 // to update the property details in the property dictionary to mark 3049 // to update the property details in the property dictionary to mark
3036 // it as no longer deleted. 3050 // it as no longer deleted.
3037 if (instr->hydrogen()->RequiresHoleCheck()) { 3051 if (instr->hydrogen()->RequiresHoleCheck()) {
3038 // We use a temp to check the payload (CompareRoot might clobber ip). 3052 // We use a temp to check the payload (CompareRoot might clobber ip).
3039 Register payload = ToRegister(instr->temp()); 3053 Register payload = ToRegister(instr->temp());
3040 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); 3054 __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset));
3041 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); 3055 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
3042 DeoptimizeIf(eq, instr->environment()); 3056 DeoptimizeIf(eq, instr);
3043 } 3057 }
3044 3058
3045 // Store the value. 3059 // Store the value.
3046 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); 3060 __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0);
3047 // Cells are always rescanned, so no write barrier here. 3061 // Cells are always rescanned, so no write barrier here.
3048 } 3062 }
3049 3063
3050 3064
3051 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 3065 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3052 Register context = ToRegister(instr->context()); 3066 Register context = ToRegister(instr->context());
3053 Register result = ToRegister(instr->result()); 3067 Register result = ToRegister(instr->result());
3054 __ ldr(result, ContextOperand(context, instr->slot_index())); 3068 __ LoadP(result, ContextOperand(context, instr->slot_index()));
3055 if (instr->hydrogen()->RequiresHoleCheck()) { 3069 if (instr->hydrogen()->RequiresHoleCheck()) {
3056 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3070 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3057 __ cmp(result, ip); 3071 __ cmp(result, ip);
3058 if (instr->hydrogen()->DeoptimizesOnHole()) { 3072 if (instr->hydrogen()->DeoptimizesOnHole()) {
3059 DeoptimizeIf(eq, instr->environment()); 3073 DeoptimizeIf(eq, instr);
3060 } else { 3074 } else {
3061 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); 3075 Label skip;
3076 __ bne(&skip);
3077 __ mov(result, Operand(factory()->undefined_value()));
3078 __ bind(&skip);
3062 } 3079 }
3063 } 3080 }
3064 } 3081 }
3065 3082
3066 3083
3067 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 3084 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3068 Register context = ToRegister(instr->context()); 3085 Register context = ToRegister(instr->context());
3069 Register value = ToRegister(instr->value()); 3086 Register value = ToRegister(instr->value());
3070 Register scratch = scratch0(); 3087 Register scratch = scratch0();
3071 MemOperand target = ContextOperand(context, instr->slot_index()); 3088 MemOperand target = ContextOperand(context, instr->slot_index());
3072 3089
3073 Label skip_assignment; 3090 Label skip_assignment;
3074 3091
3075 if (instr->hydrogen()->RequiresHoleCheck()) { 3092 if (instr->hydrogen()->RequiresHoleCheck()) {
3076 __ ldr(scratch, target); 3093 __ LoadP(scratch, target);
3077 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3094 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3078 __ cmp(scratch, ip); 3095 __ cmp(scratch, ip);
3079 if (instr->hydrogen()->DeoptimizesOnHole()) { 3096 if (instr->hydrogen()->DeoptimizesOnHole()) {
3080 DeoptimizeIf(eq, instr->environment()); 3097 DeoptimizeIf(eq, instr);
3081 } else { 3098 } else {
3082 __ b(ne, &skip_assignment); 3099 __ bne(&skip_assignment);
3083 } 3100 }
3084 } 3101 }
3085 3102
3086 __ str(value, target); 3103 __ StoreP(value, target, r0);
3087 if (instr->hydrogen()->NeedsWriteBarrier()) { 3104 if (instr->hydrogen()->NeedsWriteBarrier()) {
3088 SmiCheck check_needed = 3105 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
3089 instr->hydrogen()->value()->type().IsHeapObject() 3106 ? OMIT_SMI_CHECK
3090 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 3107 : INLINE_SMI_CHECK;
3091 __ RecordWriteContextSlot(context, 3108 __ RecordWriteContextSlot(context, target.offset(), value, scratch,
3092 target.offset(), 3109 GetLinkRegisterState(), kSaveFPRegs,
3093 value, 3110 EMIT_REMEMBERED_SET, check_needed);
3094 scratch,
3095 GetLinkRegisterState(),
3096 kSaveFPRegs,
3097 EMIT_REMEMBERED_SET,
3098 check_needed);
3099 } 3111 }
3100 3112
3101 __ bind(&skip_assignment); 3113 __ bind(&skip_assignment);
3102 } 3114 }
3103 3115
3104 3116
3105 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 3117 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3106 HObjectAccess access = instr->hydrogen()->access(); 3118 HObjectAccess access = instr->hydrogen()->access();
3107 int offset = access.offset(); 3119 int offset = access.offset();
3108 Register object = ToRegister(instr->object()); 3120 Register object = ToRegister(instr->object());
3109 3121
3110 if (access.IsExternalMemory()) { 3122 if (access.IsExternalMemory()) {
3111 Register result = ToRegister(instr->result()); 3123 Register result = ToRegister(instr->result());
3112 MemOperand operand = MemOperand(object, offset); 3124 MemOperand operand = MemOperand(object, offset);
3113 __ Load(result, operand, access.representation()); 3125 __ LoadRepresentation(result, operand, access.representation(), r0);
3114 return; 3126 return;
3115 } 3127 }
3116 3128
3117 if (instr->hydrogen()->representation().IsDouble()) { 3129 if (instr->hydrogen()->representation().IsDouble()) {
3118 DwVfpRegister result = ToDoubleRegister(instr->result()); 3130 DoubleRegister result = ToDoubleRegister(instr->result());
3119 __ vldr(result, FieldMemOperand(object, offset)); 3131 __ lfd(result, FieldMemOperand(object, offset));
3120 return; 3132 return;
3121 } 3133 }
3122 3134
3123 Register result = ToRegister(instr->result()); 3135 Register result = ToRegister(instr->result());
3124 if (!access.IsInobject()) { 3136 if (!access.IsInobject()) {
3125 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 3137 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3126 object = result; 3138 object = result;
3127 } 3139 }
3128 MemOperand operand = FieldMemOperand(object, offset); 3140
3129 __ Load(result, operand, access.representation()); 3141 Representation representation = access.representation();
3142
3143 #if V8_TARGET_ARCH_PPC64
3144 // 64-bit Smi optimization
3145 if (representation.IsSmi() &&
3146 instr->hydrogen()->representation().IsInteger32()) {
3147 // Read int value directly from upper half of the smi.
3148 STATIC_ASSERT(kSmiTag == 0);
3149 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3150 #if V8_TARGET_LITTLE_ENDIAN
3151 offset += kPointerSize / 2;
3152 #endif
3153 representation = Representation::Integer32();
3154 }
3155 #endif
3156
3157 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
3158 r0);
3130 } 3159 }
3131 3160
3132 3161
3133 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 3162 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3134 DCHECK(ToRegister(instr->context()).is(cp)); 3163 DCHECK(ToRegister(instr->context()).is(cp));
3135 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3164 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3136 DCHECK(ToRegister(instr->result()).is(r0)); 3165 DCHECK(ToRegister(instr->result()).is(r3));
3137 3166
3138 // Name is always in r2. 3167 // Name is always in r5.
3139 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); 3168 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3140 if (FLAG_vector_ics) { 3169 if (FLAG_vector_ics) {
3141 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); 3170 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3142 } 3171 }
3143 Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code(); 3172 Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3144 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 3173 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3145 } 3174 }
3146 3175
3147 3176
3148 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 3177 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3149 Register scratch = scratch0(); 3178 Register scratch = scratch0();
3150 Register function = ToRegister(instr->function()); 3179 Register function = ToRegister(instr->function());
3151 Register result = ToRegister(instr->result()); 3180 Register result = ToRegister(instr->result());
3152 3181
3153 // Get the prototype or initial map from the function. 3182 // Get the prototype or initial map from the function.
3154 __ ldr(result, 3183 __ LoadP(result,
3155 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3184 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3156 3185
3157 // Check that the function has a prototype or an initial map. 3186 // Check that the function has a prototype or an initial map.
3158 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3187 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3159 __ cmp(result, ip); 3188 __ cmp(result, ip);
3160 DeoptimizeIf(eq, instr->environment()); 3189 DeoptimizeIf(eq, instr);
3161 3190
3162 // If the function does not have an initial map, we're done. 3191 // If the function does not have an initial map, we're done.
3163 Label done; 3192 Label done;
3164 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); 3193 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3165 __ b(ne, &done); 3194 __ bne(&done);
3166 3195
3167 // Get the prototype from the initial map. 3196 // Get the prototype from the initial map.
3168 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3197 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
3169 3198
3170 // All done. 3199 // All done.
3171 __ bind(&done); 3200 __ bind(&done);
3172 } 3201 }
3173 3202
3174 3203
3175 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 3204 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3176 Register result = ToRegister(instr->result()); 3205 Register result = ToRegister(instr->result());
3177 __ LoadRoot(result, instr->index()); 3206 __ LoadRoot(result, instr->index());
3178 } 3207 }
3179 3208
3180 3209
3181 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 3210 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3182 Register arguments = ToRegister(instr->arguments()); 3211 Register arguments = ToRegister(instr->arguments());
3183 Register result = ToRegister(instr->result()); 3212 Register result = ToRegister(instr->result());
3184 // There are two words between the frame pointer and the last argument. 3213 // There are two words between the frame pointer and the last argument.
3185 // Subtracting from length accounts for one of them add one more. 3214 // Subtracting from length accounts for one of them add one more.
3186 if (instr->length()->IsConstantOperand()) { 3215 if (instr->length()->IsConstantOperand()) {
3187 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 3216 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3188 if (instr->index()->IsConstantOperand()) { 3217 if (instr->index()->IsConstantOperand()) {
3189 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3218 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3190 int index = (const_length - const_index) + 1; 3219 int index = (const_length - const_index) + 1;
3191 __ ldr(result, MemOperand(arguments, index * kPointerSize)); 3220 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
3192 } else { 3221 } else {
3193 Register index = ToRegister(instr->index()); 3222 Register index = ToRegister(instr->index());
3194 __ rsb(result, index, Operand(const_length + 1)); 3223 __ subfic(result, index, Operand(const_length + 1));
3195 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); 3224 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3225 __ LoadPX(result, MemOperand(arguments, result));
3196 } 3226 }
3197 } else if (instr->index()->IsConstantOperand()) { 3227 } else if (instr->index()->IsConstantOperand()) {
3198 Register length = ToRegister(instr->length()); 3228 Register length = ToRegister(instr->length());
3199 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3229 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3200 int loc = const_index - 1; 3230 int loc = const_index - 1;
3201 if (loc != 0) { 3231 if (loc != 0) {
3202 __ sub(result, length, Operand(loc)); 3232 __ subi(result, length, Operand(loc));
3203 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); 3233 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3204 } else { 3234 __ LoadPX(result, MemOperand(arguments, result));
3205 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3206 }
3207 } else { 3235 } else {
3236 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
3237 __ LoadPX(result, MemOperand(arguments, result));
3238 }
3239 } else {
3208 Register length = ToRegister(instr->length()); 3240 Register length = ToRegister(instr->length());
3209 Register index = ToRegister(instr->index()); 3241 Register index = ToRegister(instr->index());
3210 __ sub(result, length, index); 3242 __ sub(result, length, index);
3211 __ add(result, result, Operand(1)); 3243 __ addi(result, result, Operand(1));
3212 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); 3244 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3245 __ LoadPX(result, MemOperand(arguments, result));
3213 } 3246 }
3214 } 3247 }
3215 3248
3216 3249
3217 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 3250 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3218 Register external_pointer = ToRegister(instr->elements()); 3251 Register external_pointer = ToRegister(instr->elements());
3219 Register key = no_reg; 3252 Register key = no_reg;
3220 ElementsKind elements_kind = instr->elements_kind(); 3253 ElementsKind elements_kind = instr->elements_kind();
3221 bool key_is_constant = instr->key()->IsConstantOperand(); 3254 bool key_is_constant = instr->key()->IsConstantOperand();
3222 int constant_key = 0; 3255 int constant_key = 0;
3223 if (key_is_constant) { 3256 if (key_is_constant) {
3224 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3257 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3225 if (constant_key & 0xF0000000) { 3258 if (constant_key & 0xF0000000) {
3226 Abort(kArrayIndexConstantValueTooBig); 3259 Abort(kArrayIndexConstantValueTooBig);
3227 } 3260 }
3228 } else { 3261 } else {
3229 key = ToRegister(instr->key()); 3262 key = ToRegister(instr->key());
3230 } 3263 }
3231 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3264 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3232 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 3265 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3233 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3234 int base_offset = instr->base_offset(); 3266 int base_offset = instr->base_offset();
3235 3267
3236 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3268 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3237 elements_kind == FLOAT32_ELEMENTS || 3269 elements_kind == FLOAT32_ELEMENTS ||
3238 elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 3270 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3239 elements_kind == FLOAT64_ELEMENTS) { 3271 elements_kind == FLOAT64_ELEMENTS) {
3240 int base_offset = instr->base_offset(); 3272 DoubleRegister result = ToDoubleRegister(instr->result());
3241 DwVfpRegister result = ToDoubleRegister(instr->result()); 3273 if (key_is_constant) {
3242 Operand operand = key_is_constant 3274 __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
3243 ? Operand(constant_key << element_size_shift) 3275 r0);
3244 : Operand(key, LSL, shift_size); 3276 } else {
3245 __ add(scratch0(), external_pointer, operand); 3277 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3278 __ add(scratch0(), external_pointer, r0);
3279 }
3246 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3280 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3247 elements_kind == FLOAT32_ELEMENTS) { 3281 elements_kind == FLOAT32_ELEMENTS) {
3248 __ vldr(double_scratch0().low(), scratch0(), base_offset); 3282 __ lfs(result, MemOperand(scratch0(), base_offset));
3249 __ vcvt_f64_f32(result, double_scratch0().low()); 3283 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3250 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 3284 __ lfd(result, MemOperand(scratch0(), base_offset));
3251 __ vldr(result, scratch0(), base_offset);
3252 } 3285 }
3253 } else { 3286 } else {
3254 Register result = ToRegister(instr->result()); 3287 Register result = ToRegister(instr->result());
3255 MemOperand mem_operand = PrepareKeyedOperand( 3288 MemOperand mem_operand =
3256 key, external_pointer, key_is_constant, constant_key, 3289 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
3257 element_size_shift, shift_size, base_offset); 3290 constant_key, element_size_shift, base_offset);
3258 switch (elements_kind) { 3291 switch (elements_kind) {
3259 case EXTERNAL_INT8_ELEMENTS: 3292 case EXTERNAL_INT8_ELEMENTS:
3260 case INT8_ELEMENTS: 3293 case INT8_ELEMENTS:
3261 __ ldrsb(result, mem_operand); 3294 if (key_is_constant) {
3295 __ LoadByte(result, mem_operand, r0);
3296 } else {
3297 __ lbzx(result, mem_operand);
3298 }
3299 __ extsb(result, result);
3262 break; 3300 break;
3263 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 3301 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3264 case EXTERNAL_UINT8_ELEMENTS: 3302 case EXTERNAL_UINT8_ELEMENTS:
3265 case UINT8_ELEMENTS: 3303 case UINT8_ELEMENTS:
3266 case UINT8_CLAMPED_ELEMENTS: 3304 case UINT8_CLAMPED_ELEMENTS:
3267 __ ldrb(result, mem_operand); 3305 if (key_is_constant) {
3306 __ LoadByte(result, mem_operand, r0);
3307 } else {
3308 __ lbzx(result, mem_operand);
3309 }
3268 break; 3310 break;
3269 case EXTERNAL_INT16_ELEMENTS: 3311 case EXTERNAL_INT16_ELEMENTS:
3270 case INT16_ELEMENTS: 3312 case INT16_ELEMENTS:
3271 __ ldrsh(result, mem_operand); 3313 if (key_is_constant) {
3314 __ LoadHalfWord(result, mem_operand, r0);
3315 } else {
3316 __ lhzx(result, mem_operand);
3317 }
3318 __ extsh(result, result);
3272 break; 3319 break;
3273 case EXTERNAL_UINT16_ELEMENTS: 3320 case EXTERNAL_UINT16_ELEMENTS:
3274 case UINT16_ELEMENTS: 3321 case UINT16_ELEMENTS:
3275 __ ldrh(result, mem_operand); 3322 if (key_is_constant) {
3323 __ LoadHalfWord(result, mem_operand, r0);
3324 } else {
3325 __ lhzx(result, mem_operand);
3326 }
3276 break; 3327 break;
3277 case EXTERNAL_INT32_ELEMENTS: 3328 case EXTERNAL_INT32_ELEMENTS:
3278 case INT32_ELEMENTS: 3329 case INT32_ELEMENTS:
3279 __ ldr(result, mem_operand); 3330 if (key_is_constant) {
3331 __ LoadWord(result, mem_operand, r0);
3332 } else {
3333 __ lwzx(result, mem_operand);
3334 }
3335 #if V8_TARGET_ARCH_PPC64
3336 __ extsw(result, result);
3337 #endif
3280 break; 3338 break;
3281 case EXTERNAL_UINT32_ELEMENTS: 3339 case EXTERNAL_UINT32_ELEMENTS:
3282 case UINT32_ELEMENTS: 3340 case UINT32_ELEMENTS:
3283 __ ldr(result, mem_operand); 3341 if (key_is_constant) {
3342 __ LoadWord(result, mem_operand, r0);
3343 } else {
3344 __ lwzx(result, mem_operand);
3345 }
3284 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3346 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3285 __ cmp(result, Operand(0x80000000)); 3347 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3286 DeoptimizeIf(cs, instr->environment()); 3348 __ cmplw(result, r0);
3349 DeoptimizeIf(ge, instr);
3287 } 3350 }
3288 break; 3351 break;
3289 case FLOAT32_ELEMENTS: 3352 case FLOAT32_ELEMENTS:
3290 case FLOAT64_ELEMENTS: 3353 case FLOAT64_ELEMENTS:
3291 case EXTERNAL_FLOAT32_ELEMENTS: 3354 case EXTERNAL_FLOAT32_ELEMENTS:
3292 case EXTERNAL_FLOAT64_ELEMENTS: 3355 case EXTERNAL_FLOAT64_ELEMENTS:
3293 case FAST_HOLEY_DOUBLE_ELEMENTS: 3356 case FAST_HOLEY_DOUBLE_ELEMENTS:
3294 case FAST_HOLEY_ELEMENTS: 3357 case FAST_HOLEY_ELEMENTS:
3295 case FAST_HOLEY_SMI_ELEMENTS: 3358 case FAST_HOLEY_SMI_ELEMENTS:
3296 case FAST_DOUBLE_ELEMENTS: 3359 case FAST_DOUBLE_ELEMENTS:
3297 case FAST_ELEMENTS: 3360 case FAST_ELEMENTS:
3298 case FAST_SMI_ELEMENTS: 3361 case FAST_SMI_ELEMENTS:
3299 case DICTIONARY_ELEMENTS: 3362 case DICTIONARY_ELEMENTS:
3300 case SLOPPY_ARGUMENTS_ELEMENTS: 3363 case SLOPPY_ARGUMENTS_ELEMENTS:
3301 UNREACHABLE(); 3364 UNREACHABLE();
3302 break; 3365 break;
3303 } 3366 }
3304 } 3367 }
3305 } 3368 }
3306 3369
3307 3370
3308 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 3371 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3309 Register elements = ToRegister(instr->elements()); 3372 Register elements = ToRegister(instr->elements());
3310 bool key_is_constant = instr->key()->IsConstantOperand(); 3373 bool key_is_constant = instr->key()->IsConstantOperand();
3311 Register key = no_reg; 3374 Register key = no_reg;
3312 DwVfpRegister result = ToDoubleRegister(instr->result()); 3375 DoubleRegister result = ToDoubleRegister(instr->result());
3313 Register scratch = scratch0(); 3376 Register scratch = scratch0();
3314 3377
3315 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 3378 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3316 3379 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3317 int base_offset = instr->base_offset(); 3380 int constant_key = 0;
3318 if (key_is_constant) { 3381 if (key_is_constant) {
3319 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3382 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3320 if (constant_key & 0xF0000000) { 3383 if (constant_key & 0xF0000000) {
3321 Abort(kArrayIndexConstantValueTooBig); 3384 Abort(kArrayIndexConstantValueTooBig);
3322 } 3385 }
3323 base_offset += constant_key * kDoubleSize; 3386 } else {
3324 }
3325 __ add(scratch, elements, Operand(base_offset));
3326
3327 if (!key_is_constant) {
3328 key = ToRegister(instr->key()); 3387 key = ToRegister(instr->key());
3329 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3330 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3331 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3332 } 3388 }
3333 3389
3334 __ vldr(result, scratch, 0); 3390 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
3391 if (!key_is_constant) {
3392 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3393 __ add(scratch, elements, r0);
3394 elements = scratch;
3395 }
3396 if (!is_int16(base_offset)) {
3397 __ Add(scratch, elements, base_offset, r0);
3398 base_offset = 0;
3399 elements = scratch;
3400 }
3401 __ lfd(result, MemOperand(elements, base_offset));
3335 3402
3336 if (instr->hydrogen()->RequiresHoleCheck()) { 3403 if (instr->hydrogen()->RequiresHoleCheck()) {
3337 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); 3404 if (is_int16(base_offset + Register::kExponentOffset)) {
3338 __ cmp(scratch, Operand(kHoleNanUpper32)); 3405 __ lwz(scratch,
3339 DeoptimizeIf(eq, instr->environment()); 3406 MemOperand(elements, base_offset + Register::kExponentOffset));
3407 } else {
3408 __ addi(scratch, elements, Operand(base_offset));
3409 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
3410 }
3411 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
3412 DeoptimizeIf(eq, instr);
3340 } 3413 }
3341 } 3414 }
3342 3415
3343 3416
3344 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3417 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3418 HLoadKeyed* hinstr = instr->hydrogen();
3345 Register elements = ToRegister(instr->elements()); 3419 Register elements = ToRegister(instr->elements());
3346 Register result = ToRegister(instr->result()); 3420 Register result = ToRegister(instr->result());
3347 Register scratch = scratch0(); 3421 Register scratch = scratch0();
3348 Register store_base = scratch; 3422 Register store_base = scratch;
3349 int offset = instr->base_offset(); 3423 int offset = instr->base_offset();
3350 3424
3351 if (instr->key()->IsConstantOperand()) { 3425 if (instr->key()->IsConstantOperand()) {
3352 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3426 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3353 offset += ToInteger32(const_operand) * kPointerSize; 3427 offset += ToInteger32(const_operand) * kPointerSize;
3354 store_base = elements; 3428 store_base = elements;
3355 } else { 3429 } else {
3356 Register key = ToRegister(instr->key()); 3430 Register key = ToRegister(instr->key());
3357 // Even though the HLoadKeyed instruction forces the input 3431 // Even though the HLoadKeyed instruction forces the input
3358 // representation for the key to be an integer, the input gets replaced 3432 // representation for the key to be an integer, the input gets replaced
3359 // during bound check elimination with the index argument to the bounds 3433 // during bound check elimination with the index argument to the bounds
3360 // check, which can be tagged, so that case must be handled here, too. 3434 // check, which can be tagged, so that case must be handled here, too.
3361 if (instr->hydrogen()->key()->representation().IsSmi()) { 3435 if (hinstr->key()->representation().IsSmi()) {
3362 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); 3436 __ SmiToPtrArrayOffset(r0, key);
3363 } else { 3437 } else {
3364 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); 3438 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
3365 } 3439 }
3440 __ add(scratch, elements, r0);
3366 } 3441 }
3367 __ ldr(result, MemOperand(store_base, offset)); 3442
3443 bool requires_hole_check = hinstr->RequiresHoleCheck();
3444 Representation representation = hinstr->representation();
3445
3446 #if V8_TARGET_ARCH_PPC64
3447 // 64-bit Smi optimization
3448 if (representation.IsInteger32() &&
3449 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3450 DCHECK(!requires_hole_check);
3451 // Read int value directly from upper half of the smi.
3452 STATIC_ASSERT(kSmiTag == 0);
3453 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3454 #if V8_TARGET_LITTLE_ENDIAN
3455 offset += kPointerSize / 2;
3456 #endif
3457 }
3458 #endif
3459
3460 __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
3461 r0);
3368 3462
3369 // Check for the hole value. 3463 // Check for the hole value.
3370 if (instr->hydrogen()->RequiresHoleCheck()) { 3464 if (requires_hole_check) {
3371 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3465 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3372 __ SmiTst(result); 3466 __ TestIfSmi(result, r0);
3373 DeoptimizeIf(ne, instr->environment()); 3467 DeoptimizeIf(ne, instr, cr0);
3374 } else { 3468 } else {
3375 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3469 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3376 __ cmp(result, scratch); 3470 __ cmp(result, scratch);
3377 DeoptimizeIf(eq, instr->environment()); 3471 DeoptimizeIf(eq, instr);
3378 } 3472 }
3379 } 3473 }
3380 } 3474 }
3381 3475
3382 3476
3383 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3477 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3384 if (instr->is_typed_elements()) { 3478 if (instr->is_typed_elements()) {
3385 DoLoadKeyedExternalArray(instr); 3479 DoLoadKeyedExternalArray(instr);
3386 } else if (instr->hydrogen()->representation().IsDouble()) { 3480 } else if (instr->hydrogen()->representation().IsDouble()) {
3387 DoLoadKeyedFixedDoubleArray(instr); 3481 DoLoadKeyedFixedDoubleArray(instr);
3388 } else { 3482 } else {
3389 DoLoadKeyedFixedArray(instr); 3483 DoLoadKeyedFixedArray(instr);
3390 } 3484 }
3391 } 3485 }
3392 3486
3393 3487
3394 MemOperand LCodeGen::PrepareKeyedOperand(Register key, 3488 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3395 Register base, 3489 bool key_is_constant, bool key_is_smi,
3396 bool key_is_constant,
3397 int constant_key, 3490 int constant_key,
3398 int element_size, 3491 int element_size_shift,
3399 int shift_size,
3400 int base_offset) { 3492 int base_offset) {
3493 Register scratch = scratch0();
3494
3401 if (key_is_constant) { 3495 if (key_is_constant) {
3402 return MemOperand(base, (constant_key << element_size) + base_offset); 3496 return MemOperand(base, (constant_key << element_size_shift) + base_offset);
3403 } 3497 }
3404 3498
3405 if (base_offset == 0) { 3499 bool needs_shift =
3406 if (shift_size >= 0) { 3500 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3407 return MemOperand(base, key, LSL, shift_size); 3501
3408 } else { 3502 if (!(base_offset || needs_shift)) {
3409 DCHECK_EQ(-1, shift_size); 3503 return MemOperand(base, key);
3410 return MemOperand(base, key, LSR, 1);
3411 }
3412 } 3504 }
3413 3505
3414 if (shift_size >= 0) { 3506 if (needs_shift) {
3415 __ add(scratch0(), base, Operand(key, LSL, shift_size)); 3507 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3416 return MemOperand(scratch0(), base_offset); 3508 key = scratch;
3417 } else {
3418 DCHECK_EQ(-1, shift_size);
3419 __ add(scratch0(), base, Operand(key, ASR, 1));
3420 return MemOperand(scratch0(), base_offset);
3421 } 3509 }
3510
3511 if (base_offset) {
3512 __ Add(scratch, key, base_offset, r0);
3513 }
3514
3515 return MemOperand(base, scratch);
3422 } 3516 }
3423 3517
3424 3518
3425 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3519 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3426 DCHECK(ToRegister(instr->context()).is(cp)); 3520 DCHECK(ToRegister(instr->context()).is(cp));
3427 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3521 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3428 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); 3522 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3429 3523
3430 if (FLAG_vector_ics) { 3524 if (FLAG_vector_ics) {
3431 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); 3525 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3432 } 3526 }
3433 3527
3434 Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code(); 3528 Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3435 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 3529 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3436 } 3530 }
3437 3531
3438 3532
3439 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3533 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3440 Register scratch = scratch0(); 3534 Register scratch = scratch0();
3441 Register result = ToRegister(instr->result()); 3535 Register result = ToRegister(instr->result());
3442 3536
3443 if (instr->hydrogen()->from_inlined()) { 3537 if (instr->hydrogen()->from_inlined()) {
3444 __ sub(result, sp, Operand(2 * kPointerSize)); 3538 __ subi(result, sp, Operand(2 * kPointerSize));
3445 } else { 3539 } else {
3446 // Check if the calling frame is an arguments adaptor frame. 3540 // Check if the calling frame is an arguments adaptor frame.
3447 Label done, adapted; 3541 Label done, adapted;
3448 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3542 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3449 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); 3543 __ LoadP(result,
3450 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3544 MemOperand(scratch, StandardFrameConstants::kContextOffset));
3545 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3451 3546
3452 // Result is the frame pointer for the frame if not adapted and for the real 3547 // Result is the frame pointer for the frame if not adapted and for the real
3453 // frame below the adaptor frame if adapted. 3548 // frame below the adaptor frame if adapted.
3454 __ mov(result, fp, LeaveCC, ne); 3549 __ beq(&adapted);
3455 __ mov(result, scratch, LeaveCC, eq); 3550 __ mr(result, fp);
3551 __ b(&done);
3552
3553 __ bind(&adapted);
3554 __ mr(result, scratch);
3555 __ bind(&done);
3456 } 3556 }
3457 } 3557 }
3458 3558
3459 3559
3460 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3560 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3461 Register elem = ToRegister(instr->elements()); 3561 Register elem = ToRegister(instr->elements());
3462 Register result = ToRegister(instr->result()); 3562 Register result = ToRegister(instr->result());
3463 3563
3464 Label done; 3564 Label done;
3465 3565
3466 // If no arguments adaptor frame the number of arguments is fixed. 3566 // If no arguments adaptor frame the number of arguments is fixed.
3467 __ cmp(fp, elem); 3567 __ cmp(fp, elem);
3468 __ mov(result, Operand(scope()->num_parameters())); 3568 __ mov(result, Operand(scope()->num_parameters()));
3469 __ b(eq, &done); 3569 __ beq(&done);
3470 3570
3471 // Arguments adaptor frame present. Get argument length from there. 3571 // Arguments adaptor frame present. Get argument length from there.
3472 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3572 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3473 __ ldr(result, 3573 __ LoadP(result,
3474 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3574 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3475 __ SmiUntag(result); 3575 __ SmiUntag(result);
3476 3576
3477 // Argument length is in result register. 3577 // Argument length is in result register.
3478 __ bind(&done); 3578 __ bind(&done);
3479 } 3579 }
3480 3580
3481 3581
3482 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3582 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3483 Register receiver = ToRegister(instr->receiver()); 3583 Register receiver = ToRegister(instr->receiver());
3484 Register function = ToRegister(instr->function()); 3584 Register function = ToRegister(instr->function());
3485 Register result = ToRegister(instr->result()); 3585 Register result = ToRegister(instr->result());
3486 Register scratch = scratch0(); 3586 Register scratch = scratch0();
3487 3587
3488 // If the receiver is null or undefined, we have to pass the global 3588 // If the receiver is null or undefined, we have to pass the global
3489 // object as a receiver to normal functions. Values have to be 3589 // object as a receiver to normal functions. Values have to be
3490 // passed unchanged to builtins and strict-mode functions. 3590 // passed unchanged to builtins and strict-mode functions.
3491 Label global_object, result_in_receiver; 3591 Label global_object, result_in_receiver;
3492 3592
3493 if (!instr->hydrogen()->known_function()) { 3593 if (!instr->hydrogen()->known_function()) {
3494 // Do not transform the receiver to object for strict mode 3594 // Do not transform the receiver to object for strict mode
3495 // functions. 3595 // functions.
3496 __ ldr(scratch, 3596 __ LoadP(scratch,
3497 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3597 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3498 __ ldr(scratch, 3598 __ lwz(scratch,
3499 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); 3599 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3500 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); 3600 __ TestBit(scratch,
3501 __ tst(scratch, Operand(mask)); 3601 #if V8_TARGET_ARCH_PPC64
3502 __ b(ne, &result_in_receiver); 3602 SharedFunctionInfo::kStrictModeFunction,
3603 #else
3604 SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
3605 #endif
3606 r0);
3607 __ bne(&result_in_receiver, cr0);
3503 3608
3504 // Do not transform the receiver to object for builtins. 3609 // Do not transform the receiver to object for builtins.
3505 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); 3610 __ TestBit(scratch,
3506 __ b(ne, &result_in_receiver); 3611 #if V8_TARGET_ARCH_PPC64
3612 SharedFunctionInfo::kNative,
3613 #else
3614 SharedFunctionInfo::kNative + kSmiTagSize,
3615 #endif
3616 r0);
3617 __ bne(&result_in_receiver, cr0);
3507 } 3618 }
3508 3619
3509 // Normal function. Replace undefined or null with global receiver. 3620 // Normal function. Replace undefined or null with global receiver.
3510 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3621 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3511 __ cmp(receiver, scratch); 3622 __ cmp(receiver, scratch);
3512 __ b(eq, &global_object); 3623 __ beq(&global_object);
3513 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3624 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3514 __ cmp(receiver, scratch); 3625 __ cmp(receiver, scratch);
3515 __ b(eq, &global_object); 3626 __ beq(&global_object);
3516 3627
3517 // Deoptimize if the receiver is not a JS object. 3628 // Deoptimize if the receiver is not a JS object.
3518 __ SmiTst(receiver); 3629 __ TestIfSmi(receiver, r0);
3519 DeoptimizeIf(eq, instr->environment()); 3630 DeoptimizeIf(eq, instr, cr0);
3520 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); 3631 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3521 DeoptimizeIf(lt, instr->environment()); 3632 DeoptimizeIf(lt, instr);
3522 3633
3523 __ b(&result_in_receiver); 3634 __ b(&result_in_receiver);
3524 __ bind(&global_object); 3635 __ bind(&global_object);
3525 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3636 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3526 __ ldr(result, 3637 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3527 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); 3638 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3528 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3529
3530 if (result.is(receiver)) { 3639 if (result.is(receiver)) {
3531 __ bind(&result_in_receiver); 3640 __ bind(&result_in_receiver);
3532 } else { 3641 } else {
3533 Label result_ok; 3642 Label result_ok;
3534 __ b(&result_ok); 3643 __ b(&result_ok);
3535 __ bind(&result_in_receiver); 3644 __ bind(&result_in_receiver);
3536 __ mov(result, receiver); 3645 __ mr(result, receiver);
3537 __ bind(&result_ok); 3646 __ bind(&result_ok);
3538 } 3647 }
3539 } 3648 }
3540 3649
3541 3650
3542 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3651 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3543 Register receiver = ToRegister(instr->receiver()); 3652 Register receiver = ToRegister(instr->receiver());
3544 Register function = ToRegister(instr->function()); 3653 Register function = ToRegister(instr->function());
3545 Register length = ToRegister(instr->length()); 3654 Register length = ToRegister(instr->length());
3546 Register elements = ToRegister(instr->elements()); 3655 Register elements = ToRegister(instr->elements());
3547 Register scratch = scratch0(); 3656 Register scratch = scratch0();
3548 DCHECK(receiver.is(r0)); // Used for parameter count. 3657 DCHECK(receiver.is(r3)); // Used for parameter count.
3549 DCHECK(function.is(r1)); // Required by InvokeFunction. 3658 DCHECK(function.is(r4)); // Required by InvokeFunction.
3550 DCHECK(ToRegister(instr->result()).is(r0)); 3659 DCHECK(ToRegister(instr->result()).is(r3));
3551 3660
3552 // Copy the arguments to this function possibly from the 3661 // Copy the arguments to this function possibly from the
3553 // adaptor frame below it. 3662 // adaptor frame below it.
3554 const uint32_t kArgumentsLimit = 1 * KB; 3663 const uint32_t kArgumentsLimit = 1 * KB;
3555 __ cmp(length, Operand(kArgumentsLimit)); 3664 __ cmpli(length, Operand(kArgumentsLimit));
3556 DeoptimizeIf(hi, instr->environment()); 3665 DeoptimizeIf(gt, instr);
3557 3666
3558 // Push the receiver and use the register to keep the original 3667 // Push the receiver and use the register to keep the original
3559 // number of arguments. 3668 // number of arguments.
3560 __ push(receiver); 3669 __ push(receiver);
3561 __ mov(receiver, length); 3670 __ mr(receiver, length);
3562 // The arguments are at a one pointer size offset from elements. 3671 // The arguments are at a one pointer size offset from elements.
3563 __ add(elements, elements, Operand(1 * kPointerSize)); 3672 __ addi(elements, elements, Operand(1 * kPointerSize));
3564 3673
3565 // Loop through the arguments pushing them onto the execution 3674 // Loop through the arguments pushing them onto the execution
3566 // stack. 3675 // stack.
3567 Label invoke, loop; 3676 Label invoke, loop;
3568 // length is a small non-negative integer, due to the test above. 3677 // length is a small non-negative integer, due to the test above.
3569 __ cmp(length, Operand::Zero()); 3678 __ cmpi(length, Operand::Zero());
3570 __ b(eq, &invoke); 3679 __ beq(&invoke);
3680 __ mtctr(length);
3571 __ bind(&loop); 3681 __ bind(&loop);
3572 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); 3682 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
3683 __ LoadPX(scratch, MemOperand(elements, r0));
3573 __ push(scratch); 3684 __ push(scratch);
3574 __ sub(length, length, Operand(1), SetCC); 3685 __ addi(length, length, Operand(-1));
3575 __ b(ne, &loop); 3686 __ bdnz(&loop);
3576 3687
3577 __ bind(&invoke); 3688 __ bind(&invoke);
3578 DCHECK(instr->HasPointerMap()); 3689 DCHECK(instr->HasPointerMap());
3579 LPointerMap* pointers = instr->pointer_map(); 3690 LPointerMap* pointers = instr->pointer_map();
3580 SafepointGenerator safepoint_generator( 3691 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3581 this, pointers, Safepoint::kLazyDeopt); 3692 // The number of arguments is stored in receiver which is r3, as expected
3582 // The number of arguments is stored in receiver which is r0, as expected
3583 // by InvokeFunction. 3693 // by InvokeFunction.
3584 ParameterCount actual(receiver); 3694 ParameterCount actual(receiver);
3585 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); 3695 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3586 } 3696 }
3587 3697
3588 3698
3589 void LCodeGen::DoPushArgument(LPushArgument* instr) { 3699 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3590 LOperand* argument = instr->value(); 3700 LOperand* argument = instr->value();
3591 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { 3701 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3592 Abort(kDoPushArgumentNotImplementedForDoubleType); 3702 Abort(kDoPushArgumentNotImplementedForDoubleType);
3593 } else { 3703 } else {
3594 Register argument_reg = EmitLoadRegister(argument, ip); 3704 Register argument_reg = EmitLoadRegister(argument, ip);
3595 __ push(argument_reg); 3705 __ push(argument_reg);
3596 } 3706 }
3597 } 3707 }
3598 3708
3599 3709
3600 void LCodeGen::DoDrop(LDrop* instr) { 3710 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3601 __ Drop(instr->count());
3602 }
3603 3711
3604 3712
3605 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3713 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3606 Register result = ToRegister(instr->result()); 3714 Register result = ToRegister(instr->result());
3607 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 3715 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3608 } 3716 }
3609 3717
3610 3718
3611 void LCodeGen::DoContext(LContext* instr) { 3719 void LCodeGen::DoContext(LContext* instr) {
3612 // If there is a non-return use, the context must be moved to a register. 3720 // If there is a non-return use, the context must be moved to a register.
3613 Register result = ToRegister(instr->result()); 3721 Register result = ToRegister(instr->result());
3614 if (info()->IsOptimizing()) { 3722 if (info()->IsOptimizing()) {
3615 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3723 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3616 } else { 3724 } else {
3617 // If there is no frame, the context must be in cp. 3725 // If there is no frame, the context must be in cp.
3618 DCHECK(result.is(cp)); 3726 DCHECK(result.is(cp));
3619 } 3727 }
3620 } 3728 }
3621 3729
3622 3730
3623 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3731 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3624 DCHECK(ToRegister(instr->context()).is(cp)); 3732 DCHECK(ToRegister(instr->context()).is(cp));
3625 __ push(cp); // The context is the first argument. 3733 __ push(cp); // The context is the first argument.
3626 __ Move(scratch0(), instr->hydrogen()->pairs()); 3734 __ Move(scratch0(), instr->hydrogen()->pairs());
3627 __ push(scratch0()); 3735 __ push(scratch0());
3628 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); 3736 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3629 __ push(scratch0()); 3737 __ push(scratch0());
3630 CallRuntime(Runtime::kDeclareGlobals, 3, instr); 3738 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3631 } 3739 }
3632 3740
3633 3741
3634 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3742 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3635 int formal_parameter_count, 3743 int formal_parameter_count, int arity,
3636 int arity, 3744 LInstruction* instr, R4State r4_state) {
3637 LInstruction* instr,
3638 R1State r1_state) {
3639 bool dont_adapt_arguments = 3745 bool dont_adapt_arguments =
3640 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3746 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3641 bool can_invoke_directly = 3747 bool can_invoke_directly =
3642 dont_adapt_arguments || formal_parameter_count == arity; 3748 dont_adapt_arguments || formal_parameter_count == arity;
3643 3749
3644 LPointerMap* pointers = instr->pointer_map(); 3750 LPointerMap* pointers = instr->pointer_map();
3645 3751
3646 if (can_invoke_directly) { 3752 if (can_invoke_directly) {
3647 if (r1_state == R1_UNINITIALIZED) { 3753 if (r4_state == R4_UNINITIALIZED) {
3648 __ Move(r1, function); 3754 __ Move(r4, function);
3649 } 3755 }
3650 3756
3651 // Change context. 3757 // Change context.
3652 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 3758 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
3653 3759
3654 // Set r0 to arguments count if adaption is not needed. Assumes that r0 3760 // Set r3 to arguments count if adaption is not needed. Assumes that r3
3655 // is available to write to at this point. 3761 // is available to write to at this point.
3656 if (dont_adapt_arguments) { 3762 if (dont_adapt_arguments) {
3657 __ mov(r0, Operand(arity)); 3763 __ mov(r3, Operand(arity));
3658 } 3764 }
3659 3765
3660 // Invoke function. 3766 // Invoke function.
3661 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 3767 if (function.is_identical_to(info()->closure())) {
3662 __ Call(ip); 3768 __ CallSelf();
3769 } else {
3770 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
3771 __ Call(ip);
3772 }
3663 3773
3664 // Set up deoptimization. 3774 // Set up deoptimization.
3665 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3775 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3666 } else { 3776 } else {
3667 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3777 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3668 ParameterCount count(arity); 3778 ParameterCount count(arity);
3669 ParameterCount expected(formal_parameter_count); 3779 ParameterCount expected(formal_parameter_count);
3670 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); 3780 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3671 } 3781 }
3672 } 3782 }
3673 3783
3674 3784
3675 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3785 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3676 DCHECK(instr->context() != NULL); 3786 DCHECK(instr->context() != NULL);
3677 DCHECK(ToRegister(instr->context()).is(cp)); 3787 DCHECK(ToRegister(instr->context()).is(cp));
3678 Register input = ToRegister(instr->value()); 3788 Register input = ToRegister(instr->value());
3679 Register result = ToRegister(instr->result()); 3789 Register result = ToRegister(instr->result());
3680 Register scratch = scratch0(); 3790 Register scratch = scratch0();
3681 3791
3682 // Deoptimize if not a heap number. 3792 // Deoptimize if not a heap number.
3683 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3793 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3684 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 3794 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3685 __ cmp(scratch, Operand(ip)); 3795 __ cmp(scratch, ip);
3686 DeoptimizeIf(ne, instr->environment()); 3796 DeoptimizeIf(ne, instr);
3687 3797
3688 Label done; 3798 Label done;
3689 Register exponent = scratch0(); 3799 Register exponent = scratch0();
3690 scratch = no_reg; 3800 scratch = no_reg;
3691 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3801 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3692 // Check the sign of the argument. If the argument is positive, just 3802 // Check the sign of the argument. If the argument is positive, just
3693 // return it. 3803 // return it.
3694 __ tst(exponent, Operand(HeapNumber::kSignMask)); 3804 __ cmpwi(exponent, Operand::Zero());
3695 // Move the input to the result if necessary. 3805 // Move the input to the result if necessary.
3696 __ Move(result, input); 3806 __ Move(result, input);
3697 __ b(eq, &done); 3807 __ bge(&done);
3698 3808
3699 // Input is negative. Reverse its sign. 3809 // Input is negative. Reverse its sign.
3700 // Preserve the value of all registers. 3810 // Preserve the value of all registers.
3701 { 3811 {
3702 PushSafepointRegistersScope scope(this); 3812 PushSafepointRegistersScope scope(this);
3703 3813
3704 // Registers were saved at the safepoint, so we can use 3814 // Registers were saved at the safepoint, so we can use
3705 // many scratch registers. 3815 // many scratch registers.
3706 Register tmp1 = input.is(r1) ? r0 : r1; 3816 Register tmp1 = input.is(r4) ? r3 : r4;
3707 Register tmp2 = input.is(r2) ? r0 : r2; 3817 Register tmp2 = input.is(r5) ? r3 : r5;
3708 Register tmp3 = input.is(r3) ? r0 : r3; 3818 Register tmp3 = input.is(r6) ? r3 : r6;
3709 Register tmp4 = input.is(r4) ? r0 : r4; 3819 Register tmp4 = input.is(r7) ? r3 : r7;
3710 3820
3711 // exponent: floating point exponent value. 3821 // exponent: floating point exponent value.
3712 3822
3713 Label allocated, slow; 3823 Label allocated, slow;
3714 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); 3824 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3715 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); 3825 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3716 __ b(&allocated); 3826 __ b(&allocated);
3717 3827
3718 // Slow case: Call the runtime system to do the number allocation. 3828 // Slow case: Call the runtime system to do the number allocation.
3719 __ bind(&slow); 3829 __ bind(&slow);
3720 3830
3721 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, 3831 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3722 instr->context()); 3832 instr->context());
3723 // Set the pointer to the new heap number in tmp. 3833 // Set the pointer to the new heap number in tmp.
3724 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); 3834 if (!tmp1.is(r3)) __ mr(tmp1, r3);
3725 // Restore input_reg after call to runtime. 3835 // Restore input_reg after call to runtime.
3726 __ LoadFromSafepointRegisterSlot(input, input); 3836 __ LoadFromSafepointRegisterSlot(input, input);
3727 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3837 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3728 3838
3729 __ bind(&allocated); 3839 __ bind(&allocated);
3730 // exponent: floating point exponent value. 3840 // exponent: floating point exponent value.
3731 // tmp1: allocated heap number. 3841 // tmp1: allocated heap number.
3732 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask)); 3842 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
3733 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); 3843 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
3734 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); 3844 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3735 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); 3845 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3846 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3736 3847
3737 __ StoreToSafepointRegisterSlot(tmp1, result); 3848 __ StoreToSafepointRegisterSlot(tmp1, result);
3738 } 3849 }
3739 3850
3740 __ bind(&done); 3851 __ bind(&done);
3741 } 3852 }
3742 3853
3743 3854
3744 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3855 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3745 Register input = ToRegister(instr->value()); 3856 Register input = ToRegister(instr->value());
3746 Register result = ToRegister(instr->result()); 3857 Register result = ToRegister(instr->result());
3747 __ cmp(input, Operand::Zero()); 3858 Label done;
3748 __ Move(result, input, pl); 3859 __ cmpi(input, Operand::Zero());
3749 // We can make rsb conditional because the previous cmp instruction 3860 __ Move(result, input);
3750 // will clear the V (overflow) flag and rsb won't set this flag 3861 __ bge(&done);
3751 // if input is positive. 3862 __ li(r0, Operand::Zero()); // clear xer
3752 __ rsb(result, input, Operand::Zero(), SetCC, mi); 3863 __ mtxer(r0);
3864 __ neg(result, result, SetOE, SetRC);
3753 // Deoptimize on overflow. 3865 // Deoptimize on overflow.
3754 DeoptimizeIf(vs, instr->environment()); 3866 DeoptimizeIf(overflow, instr, cr0);
3755 } 3867 __ bind(&done);
3868 }
3869
3870
3871 #if V8_TARGET_ARCH_PPC64
3872 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3873 Register input = ToRegister(instr->value());
3874 Register result = ToRegister(instr->result());
3875 Label done;
3876 __ cmpwi(input, Operand::Zero());
3877 __ Move(result, input);
3878 __ bge(&done);
3879
3880 // Deoptimize on overflow.
3881 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3882 __ cmpw(input, r0);
3883 DeoptimizeIf(eq, instr);
3884
3885 __ neg(result, result);
3886 __ bind(&done);
3887 }
3888 #endif
3756 3889
3757 3890
3758 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3891 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3759 // Class for deferred case. 3892 // Class for deferred case.
3760 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { 3893 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3761 public: 3894 public:
3762 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3895 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3763 : LDeferredCode(codegen), instr_(instr) { } 3896 : LDeferredCode(codegen), instr_(instr) {}
3764 virtual void Generate() OVERRIDE { 3897 virtual void Generate() OVERRIDE {
3765 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3898 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3766 } 3899 }
3767 virtual LInstruction* instr() OVERRIDE { return instr_; } 3900 virtual LInstruction* instr() OVERRIDE { return instr_; }
3901
3768 private: 3902 private:
3769 LMathAbs* instr_; 3903 LMathAbs* instr_;
3770 }; 3904 };
3771 3905
3772 Representation r = instr->hydrogen()->value()->representation(); 3906 Representation r = instr->hydrogen()->value()->representation();
3773 if (r.IsDouble()) { 3907 if (r.IsDouble()) {
3774 DwVfpRegister input = ToDoubleRegister(instr->value()); 3908 DoubleRegister input = ToDoubleRegister(instr->value());
3775 DwVfpRegister result = ToDoubleRegister(instr->result()); 3909 DoubleRegister result = ToDoubleRegister(instr->result());
3776 __ vabs(result, input); 3910 __ fabs(result, input);
3911 #if V8_TARGET_ARCH_PPC64
3912 } else if (r.IsInteger32()) {
3913 EmitInteger32MathAbs(instr);
3914 } else if (r.IsSmi()) {
3915 #else
3777 } else if (r.IsSmiOrInteger32()) { 3916 } else if (r.IsSmiOrInteger32()) {
3778 EmitIntegerMathAbs(instr); 3917 #endif
3918 EmitMathAbs(instr);
3779 } else { 3919 } else {
3780 // Representation is tagged. 3920 // Representation is tagged.
3781 DeferredMathAbsTaggedHeapNumber* deferred = 3921 DeferredMathAbsTaggedHeapNumber* deferred =
3782 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3922 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3783 Register input = ToRegister(instr->value()); 3923 Register input = ToRegister(instr->value());
3784 // Smi check. 3924 // Smi check.
3785 __ JumpIfNotSmi(input, deferred->entry()); 3925 __ JumpIfNotSmi(input, deferred->entry());
3786 // If smi, handle it directly. 3926 // If smi, handle it directly.
3787 EmitIntegerMathAbs(instr); 3927 EmitMathAbs(instr);
3788 __ bind(deferred->exit()); 3928 __ bind(deferred->exit());
3789 } 3929 }
3790 } 3930 }
3791 3931
3792 3932
3793 void LCodeGen::DoMathFloor(LMathFloor* instr) { 3933 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3794 DwVfpRegister input = ToDoubleRegister(instr->value()); 3934 DoubleRegister input = ToDoubleRegister(instr->value());
3795 Register result = ToRegister(instr->result()); 3935 Register result = ToRegister(instr->result());
3796 Register input_high = scratch0(); 3936 Register input_high = scratch0();
3937 Register scratch = ip;
3797 Label done, exact; 3938 Label done, exact;
3798 3939
3799 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); 3940 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3800 DeoptimizeIf(al, instr->environment()); 3941 &exact);
3942 DeoptimizeIf(al, instr);
3801 3943
3802 __ bind(&exact); 3944 __ bind(&exact);
3803 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3945 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3804 // Test for -0. 3946 // Test for -0.
3805 __ cmp(result, Operand::Zero()); 3947 __ cmpi(result, Operand::Zero());
3806 __ b(ne, &done); 3948 __ bne(&done);
3807 __ cmp(input_high, Operand::Zero()); 3949 __ cmpwi(input_high, Operand::Zero());
3808 DeoptimizeIf(mi, instr->environment()); 3950 DeoptimizeIf(lt, instr);
3809 } 3951 }
3810 __ bind(&done); 3952 __ bind(&done);
3811 } 3953 }
3812 3954
3813 3955
3814 void LCodeGen::DoMathRound(LMathRound* instr) { 3956 void LCodeGen::DoMathRound(LMathRound* instr) {
3815 DwVfpRegister input = ToDoubleRegister(instr->value()); 3957 DoubleRegister input = ToDoubleRegister(instr->value());
3816 Register result = ToRegister(instr->result()); 3958 Register result = ToRegister(instr->result());
3817 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3959 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3818 DwVfpRegister input_plus_dot_five = double_scratch1; 3960 DoubleRegister input_plus_dot_five = double_scratch1;
3819 Register input_high = scratch0(); 3961 Register scratch1 = scratch0();
3820 DwVfpRegister dot_five = double_scratch0(); 3962 Register scratch2 = ip;
3963 DoubleRegister dot_five = double_scratch0();
3821 Label convert, done; 3964 Label convert, done;
3822 3965
3823 __ Vmov(dot_five, 0.5, scratch0()); 3966 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3824 __ vabs(double_scratch1, input); 3967 __ fabs(double_scratch1, input);
3825 __ VFPCompareAndSetFlags(double_scratch1, dot_five); 3968 __ fcmpu(double_scratch1, dot_five);
3969 DeoptimizeIf(unordered, instr);
3826 // If input is in [-0.5, -0], the result is -0. 3970 // If input is in [-0.5, -0], the result is -0.
3827 // If input is in [+0, +0.5[, the result is +0. 3971 // If input is in [+0, +0.5[, the result is +0.
3828 // If the input is +0.5, the result is 1. 3972 // If the input is +0.5, the result is 1.
3829 __ b(hi, &convert); // Out of [-0.5, +0.5]. 3973 __ bgt(&convert); // Out of [-0.5, +0.5].
3830 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3974 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3831 __ VmovHigh(input_high, input); 3975 #if V8_TARGET_ARCH_PPC64
3832 __ cmp(input_high, Operand::Zero()); 3976 __ MovDoubleToInt64(scratch1, input);
3833 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0]. 3977 #else
3978 __ MovDoubleHighToInt(scratch1, input);
3979 #endif
3980 __ cmpi(scratch1, Operand::Zero());
3981 DeoptimizeIf(lt, instr); // [-0.5, -0].
3834 } 3982 }
3835 __ VFPCompareAndSetFlags(input, dot_five); 3983 Label return_zero;
3836 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. 3984 __ fcmpu(input, dot_five);
3985 __ bne(&return_zero);
3986 __ li(result, Operand(1)); // +0.5.
3987 __ b(&done);
3837 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on 3988 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3838 // flag kBailoutOnMinusZero. 3989 // flag kBailoutOnMinusZero.
3839 __ mov(result, Operand::Zero(), LeaveCC, ne); 3990 __ bind(&return_zero);
3991 __ li(result, Operand::Zero());
3840 __ b(&done); 3992 __ b(&done);
3841 3993
3842 __ bind(&convert); 3994 __ bind(&convert);
3843 __ vadd(input_plus_dot_five, input, dot_five); 3995 __ fadd(input_plus_dot_five, input, dot_five);
3844 // Reuse dot_five (double_scratch0) as we no longer need this value. 3996 // Reuse dot_five (double_scratch0) as we no longer need this value.
3845 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), 3997 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
3846 &done, &done); 3998 double_scratch0(), &done, &done);
3847 DeoptimizeIf(al, instr->environment()); 3999 DeoptimizeIf(al, instr);
3848 __ bind(&done); 4000 __ bind(&done);
3849 } 4001 }
3850 4002
3851 4003
3852 void LCodeGen::DoMathFround(LMathFround* instr) { 4004 void LCodeGen::DoMathFround(LMathFround* instr) {
3853 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); 4005 DoubleRegister input_reg = ToDoubleRegister(instr->value());
3854 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); 4006 DoubleRegister output_reg = ToDoubleRegister(instr->result());
3855 LowDwVfpRegister scratch = double_scratch0(); 4007 __ frsp(output_reg, input_reg);
3856 __ vcvt_f32_f64(scratch.low(), input_reg);
3857 __ vcvt_f64_f32(output_reg, scratch.low());
3858 } 4008 }
3859 4009
3860 4010
3861 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 4011 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3862 DwVfpRegister input = ToDoubleRegister(instr->value()); 4012 DoubleRegister input = ToDoubleRegister(instr->value());
3863 DwVfpRegister result = ToDoubleRegister(instr->result()); 4013 DoubleRegister result = ToDoubleRegister(instr->result());
3864 __ vsqrt(result, input); 4014 __ fsqrt(result, input);
3865 } 4015 }
3866 4016
3867 4017
3868 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 4018 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3869 DwVfpRegister input = ToDoubleRegister(instr->value()); 4019 DoubleRegister input = ToDoubleRegister(instr->value());
3870 DwVfpRegister result = ToDoubleRegister(instr->result()); 4020 DoubleRegister result = ToDoubleRegister(instr->result());
3871 DwVfpRegister temp = double_scratch0(); 4021 DoubleRegister temp = double_scratch0();
3872 4022
3873 // Note that according to ECMA-262 15.8.2.13: 4023 // Note that according to ECMA-262 15.8.2.13:
3874 // Math.pow(-Infinity, 0.5) == Infinity 4024 // Math.pow(-Infinity, 0.5) == Infinity
3875 // Math.sqrt(-Infinity) == NaN 4025 // Math.sqrt(-Infinity) == NaN
3876 Label done; 4026 Label skip, done;
3877 __ vmov(temp, -V8_INFINITY, scratch0()); 4027
3878 __ VFPCompareAndSetFlags(input, temp); 4028 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
3879 __ vneg(result, temp, eq); 4029 __ fcmpu(input, temp);
3880 __ b(&done, eq); 4030 __ bne(&skip);
4031 __ fneg(result, temp);
4032 __ b(&done);
3881 4033
3882 // Add +0 to convert -0 to +0. 4034 // Add +0 to convert -0 to +0.
3883 __ vadd(result, input, kDoubleRegZero); 4035 __ bind(&skip);
3884 __ vsqrt(result, result); 4036 __ fadd(result, input, kDoubleRegZero);
4037 __ fsqrt(result, result);
3885 __ bind(&done); 4038 __ bind(&done);
3886 } 4039 }
3887 4040
3888 4041
3889 void LCodeGen::DoPower(LPower* instr) { 4042 void LCodeGen::DoPower(LPower* instr) {
3890 Representation exponent_type = instr->hydrogen()->right()->representation(); 4043 Representation exponent_type = instr->hydrogen()->right()->representation();
3891 // Having marked this as a call, we can use any registers. 4044 // Having marked this as a call, we can use any registers.
3892 // Just make sure that the input/output registers are the expected ones. 4045 // Just make sure that the input/output registers are the expected ones.
4046 #ifdef DEBUG
3893 Register tagged_exponent = MathPowTaggedDescriptor::exponent(); 4047 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4048 #endif
3894 DCHECK(!instr->right()->IsDoubleRegister() || 4049 DCHECK(!instr->right()->IsDoubleRegister() ||
3895 ToDoubleRegister(instr->right()).is(d1)); 4050 ToDoubleRegister(instr->right()).is(d2));
3896 DCHECK(!instr->right()->IsRegister() || 4051 DCHECK(!instr->right()->IsRegister() ||
3897 ToRegister(instr->right()).is(tagged_exponent)); 4052 ToRegister(instr->right()).is(tagged_exponent));
3898 DCHECK(ToDoubleRegister(instr->left()).is(d0)); 4053 DCHECK(ToDoubleRegister(instr->left()).is(d1));
3899 DCHECK(ToDoubleRegister(instr->result()).is(d2)); 4054 DCHECK(ToDoubleRegister(instr->result()).is(d3));
3900 4055
3901 if (exponent_type.IsSmi()) { 4056 if (exponent_type.IsSmi()) {
3902 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4057 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3903 __ CallStub(&stub); 4058 __ CallStub(&stub);
3904 } else if (exponent_type.IsTagged()) { 4059 } else if (exponent_type.IsTagged()) {
3905 Label no_deopt; 4060 Label no_deopt;
3906 __ JumpIfSmi(tagged_exponent, &no_deopt); 4061 __ JumpIfSmi(r5, &no_deopt);
3907 DCHECK(!r6.is(tagged_exponent)); 4062 __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset));
3908 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3909 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4063 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3910 __ cmp(r6, Operand(ip)); 4064 __ cmp(r10, ip);
3911 DeoptimizeIf(ne, instr->environment()); 4065 DeoptimizeIf(ne, instr);
3912 __ bind(&no_deopt); 4066 __ bind(&no_deopt);
3913 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4067 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3914 __ CallStub(&stub); 4068 __ CallStub(&stub);
3915 } else if (exponent_type.IsInteger32()) { 4069 } else if (exponent_type.IsInteger32()) {
3916 MathPowStub stub(isolate(), MathPowStub::INTEGER); 4070 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3917 __ CallStub(&stub); 4071 __ CallStub(&stub);
3918 } else { 4072 } else {
3919 DCHECK(exponent_type.IsDouble()); 4073 DCHECK(exponent_type.IsDouble());
3920 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 4074 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3921 __ CallStub(&stub); 4075 __ CallStub(&stub);
3922 } 4076 }
3923 } 4077 }
3924 4078
3925 4079
3926 void LCodeGen::DoMathExp(LMathExp* instr) { 4080 void LCodeGen::DoMathExp(LMathExp* instr) {
3927 DwVfpRegister input = ToDoubleRegister(instr->value()); 4081 DoubleRegister input = ToDoubleRegister(instr->value());
3928 DwVfpRegister result = ToDoubleRegister(instr->result()); 4082 DoubleRegister result = ToDoubleRegister(instr->result());
3929 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 4083 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3930 DwVfpRegister double_scratch2 = double_scratch0(); 4084 DoubleRegister double_scratch2 = double_scratch0();
3931 Register temp1 = ToRegister(instr->temp1()); 4085 Register temp1 = ToRegister(instr->temp1());
3932 Register temp2 = ToRegister(instr->temp2()); 4086 Register temp2 = ToRegister(instr->temp2());
3933 4087
3934 MathExpGenerator::EmitMathExp( 4088 MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
3935 masm(), input, result, double_scratch1, double_scratch2, 4089 double_scratch2, temp1, temp2, scratch0());
3936 temp1, temp2, scratch0());
3937 } 4090 }
3938 4091
3939 4092
3940 void LCodeGen::DoMathLog(LMathLog* instr) { 4093 void LCodeGen::DoMathLog(LMathLog* instr) {
3941 __ PrepareCallCFunction(0, 1, scratch0()); 4094 __ PrepareCallCFunction(0, 1, scratch0());
3942 __ MovToFloatParameter(ToDoubleRegister(instr->value())); 4095 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3943 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 4096 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
3944 0, 1); 4097 1);
3945 __ MovFromFloatResult(ToDoubleRegister(instr->result())); 4098 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3946 } 4099 }
3947 4100
3948 4101
3949 void LCodeGen::DoMathClz32(LMathClz32* instr) { 4102 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3950 Register input = ToRegister(instr->value()); 4103 Register input = ToRegister(instr->value());
3951 Register result = ToRegister(instr->result()); 4104 Register result = ToRegister(instr->result());
3952 __ clz(result, input); 4105 __ cntlzw_(result, input);
3953 } 4106 }
3954 4107
3955 4108
3956 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 4109 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3957 DCHECK(ToRegister(instr->context()).is(cp)); 4110 DCHECK(ToRegister(instr->context()).is(cp));
3958 DCHECK(ToRegister(instr->function()).is(r1)); 4111 DCHECK(ToRegister(instr->function()).is(r4));
3959 DCHECK(instr->HasPointerMap()); 4112 DCHECK(instr->HasPointerMap());
3960 4113
3961 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); 4114 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3962 if (known_function.is_null()) { 4115 if (known_function.is_null()) {
3963 LPointerMap* pointers = instr->pointer_map(); 4116 LPointerMap* pointers = instr->pointer_map();
3964 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 4117 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3965 ParameterCount count(instr->arity()); 4118 ParameterCount count(instr->arity());
3966 __ InvokeFunction(r1, count, CALL_FUNCTION, generator); 4119 __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
3967 } else { 4120 } else {
3968 CallKnownFunction(known_function, 4121 CallKnownFunction(known_function,
3969 instr->hydrogen()->formal_parameter_count(), 4122 instr->hydrogen()->formal_parameter_count(),
3970 instr->arity(), 4123 instr->arity(), instr, R4_CONTAINS_TARGET);
3971 instr,
3972 R1_CONTAINS_TARGET);
3973 } 4124 }
3974 } 4125 }
3975 4126
3976 4127
3977 void LCodeGen::DoTailCallThroughMegamorphicCache( 4128 void LCodeGen::DoTailCallThroughMegamorphicCache(
3978 LTailCallThroughMegamorphicCache* instr) { 4129 LTailCallThroughMegamorphicCache* instr) {
3979 Register receiver = ToRegister(instr->receiver()); 4130 Register receiver = ToRegister(instr->receiver());
3980 Register name = ToRegister(instr->name()); 4131 Register name = ToRegister(instr->name());
3981 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister())); 4132 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3982 DCHECK(name.is(LoadDescriptor::NameRegister())); 4133 DCHECK(name.is(LoadDescriptor::NameRegister()));
3983 DCHECK(receiver.is(r1)); 4134 DCHECK(receiver.is(r4));
3984 DCHECK(name.is(r2)); 4135 DCHECK(name.is(r5));
3985 4136
3986 Register scratch = r3; 4137 Register scratch = r6;
3987 Register extra = r4; 4138 Register extra = r7;
3988 Register extra2 = r5; 4139 Register extra2 = r8;
3989 Register extra3 = r6; 4140 Register extra3 = r9;
3990 4141
3991 // Important for the tail-call. 4142 // Important for the tail-call.
3992 bool must_teardown_frame = NeedsEagerFrame(); 4143 bool must_teardown_frame = NeedsEagerFrame();
3993 4144
3994 // The probe will tail call to a handler if found. 4145 // The probe will tail call to a handler if found.
3995 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(), 4146 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3996 must_teardown_frame, receiver, name, 4147 must_teardown_frame, receiver, name,
3997 scratch, extra, extra2, extra3); 4148 scratch, extra, extra2, extra3);
3998 4149
3999 // Tail call to miss if we ended up here. 4150 // Tail call to miss if we ended up here.
4000 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL); 4151 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
4001 LoadIC::GenerateMiss(masm()); 4152 LoadIC::GenerateMiss(masm());
4002 } 4153 }
4003 4154
4004 4155
4005 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 4156 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4006 DCHECK(ToRegister(instr->result()).is(r0)); 4157 DCHECK(ToRegister(instr->result()).is(r3));
4007 4158
4008 LPointerMap* pointers = instr->pointer_map(); 4159 LPointerMap* pointers = instr->pointer_map();
4009 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 4160 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4010 4161
4011 if (instr->target()->IsConstantOperand()) { 4162 if (instr->target()->IsConstantOperand()) {
4012 LConstantOperand* target = LConstantOperand::cast(instr->target()); 4163 LConstantOperand* target = LConstantOperand::cast(instr->target());
4013 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 4164 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4014 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 4165 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4015 PlatformInterfaceDescriptor* call_descriptor = 4166 __ Call(code, RelocInfo::CODE_TARGET);
4016 instr->descriptor().platform_specific_descriptor();
4017 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
4018 call_descriptor->storage_mode());
4019 } else { 4167 } else {
4020 DCHECK(instr->target()->IsRegister()); 4168 DCHECK(instr->target()->IsRegister());
4021 Register target = ToRegister(instr->target()); 4169 Register target = ToRegister(instr->target());
4022 generator.BeforeCall(__ CallSize(target)); 4170 generator.BeforeCall(__ CallSize(target));
4023 // Make sure we don't emit any additional entries in the constant pool 4171 __ addi(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4024 // before the call to ensure that the CallCodeSize() calculated the correct
4025 // number of instructions for the constant pool load.
4026 {
4027 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
4028 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4029 }
4030 __ Call(target); 4172 __ Call(target);
4031 } 4173 }
4032 generator.AfterCall(); 4174 generator.AfterCall();
4033 } 4175 }
4034 4176
4035 4177
4036 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { 4178 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4037 DCHECK(ToRegister(instr->function()).is(r1)); 4179 DCHECK(ToRegister(instr->function()).is(r4));
4038 DCHECK(ToRegister(instr->result()).is(r0)); 4180 DCHECK(ToRegister(instr->result()).is(r3));
4039 4181
4040 if (instr->hydrogen()->pass_argument_count()) { 4182 if (instr->hydrogen()->pass_argument_count()) {
4041 __ mov(r0, Operand(instr->arity())); 4183 __ mov(r3, Operand(instr->arity()));
4042 } 4184 }
4043 4185
4044 // Change context. 4186 // Change context.
4045 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 4187 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
4046 4188
4047 // Load the code entry address 4189 // Load the code entry address
4048 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 4190 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
4049 __ Call(ip); 4191 __ Call(ip);
4050 4192
4051 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 4193 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4052 } 4194 }
4053 4195
4054 4196
4055 void LCodeGen::DoCallFunction(LCallFunction* instr) { 4197 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4056 DCHECK(ToRegister(instr->context()).is(cp)); 4198 DCHECK(ToRegister(instr->context()).is(cp));
4057 DCHECK(ToRegister(instr->function()).is(r1)); 4199 DCHECK(ToRegister(instr->function()).is(r4));
4058 DCHECK(ToRegister(instr->result()).is(r0)); 4200 DCHECK(ToRegister(instr->result()).is(r3));
4059 4201
4060 int arity = instr->arity(); 4202 int arity = instr->arity();
4061 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); 4203 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4062 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4204 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4063 } 4205 }
4064 4206
4065 4207
4066 void LCodeGen::DoCallNew(LCallNew* instr) { 4208 void LCodeGen::DoCallNew(LCallNew* instr) {
4067 DCHECK(ToRegister(instr->context()).is(cp)); 4209 DCHECK(ToRegister(instr->context()).is(cp));
4068 DCHECK(ToRegister(instr->constructor()).is(r1)); 4210 DCHECK(ToRegister(instr->constructor()).is(r4));
4069 DCHECK(ToRegister(instr->result()).is(r0)); 4211 DCHECK(ToRegister(instr->result()).is(r3));
4070 4212
4071 __ mov(r0, Operand(instr->arity())); 4213 __ mov(r3, Operand(instr->arity()));
4072 // No cell in r2 for construct type feedback in optimized code 4214 // No cell in r5 for construct type feedback in optimized code
4073 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 4215 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4074 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); 4216 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4075 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4217 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4076 } 4218 }
4077 4219
4078 4220
4079 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 4221 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4080 DCHECK(ToRegister(instr->context()).is(cp)); 4222 DCHECK(ToRegister(instr->context()).is(cp));
4081 DCHECK(ToRegister(instr->constructor()).is(r1)); 4223 DCHECK(ToRegister(instr->constructor()).is(r4));
4082 DCHECK(ToRegister(instr->result()).is(r0)); 4224 DCHECK(ToRegister(instr->result()).is(r3));
4083 4225
4084 __ mov(r0, Operand(instr->arity())); 4226 __ mov(r3, Operand(instr->arity()));
4085 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 4227 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4086 ElementsKind kind = instr->hydrogen()->elements_kind(); 4228 ElementsKind kind = instr->hydrogen()->elements_kind();
4087 AllocationSiteOverrideMode override_mode = 4229 AllocationSiteOverrideMode override_mode =
4088 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 4230 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4089 ? DISABLE_ALLOCATION_SITES 4231 ? DISABLE_ALLOCATION_SITES
4090 : DONT_OVERRIDE; 4232 : DONT_OVERRIDE;
4091 4233
4092 if (instr->arity() == 0) { 4234 if (instr->arity() == 0) {
4093 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 4235 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4094 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4236 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4095 } else if (instr->arity() == 1) { 4237 } else if (instr->arity() == 1) {
4096 Label done; 4238 Label done;
4097 if (IsFastPackedElementsKind(kind)) { 4239 if (IsFastPackedElementsKind(kind)) {
4098 Label packed_case; 4240 Label packed_case;
4099 // We might need a change here 4241 // We might need a change here
4100 // look at the first argument 4242 // look at the first argument
4101 __ ldr(r5, MemOperand(sp, 0)); 4243 __ LoadP(r8, MemOperand(sp, 0));
4102 __ cmp(r5, Operand::Zero()); 4244 __ cmpi(r8, Operand::Zero());
4103 __ b(eq, &packed_case); 4245 __ beq(&packed_case);
4104 4246
4105 ElementsKind holey_kind = GetHoleyElementsKind(kind); 4247 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4106 ArraySingleArgumentConstructorStub stub(isolate(), 4248 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
4107 holey_kind,
4108 override_mode); 4249 override_mode);
4109 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4250 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4110 __ jmp(&done); 4251 __ b(&done);
4111 __ bind(&packed_case); 4252 __ bind(&packed_case);
4112 } 4253 }
4113 4254
4114 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 4255 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4115 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4256 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4116 __ bind(&done); 4257 __ bind(&done);
4117 } else { 4258 } else {
4118 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); 4259 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4119 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4260 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4120 } 4261 }
4121 } 4262 }
4122 4263
4123 4264
4124 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 4265 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4125 CallRuntime(instr->function(), instr->arity(), instr); 4266 CallRuntime(instr->function(), instr->arity(), instr);
4126 } 4267 }
4127 4268
4128 4269
4129 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 4270 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4130 Register function = ToRegister(instr->function()); 4271 Register function = ToRegister(instr->function());
4131 Register code_object = ToRegister(instr->code_object()); 4272 Register code_object = ToRegister(instr->code_object());
4132 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag)); 4273 __ addi(code_object, code_object,
4133 __ str(code_object, 4274 Operand(Code::kHeaderSize - kHeapObjectTag));
4134 FieldMemOperand(function, JSFunction::kCodeEntryOffset)); 4275 __ StoreP(code_object,
4276 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
4135 } 4277 }
4136 4278
4137 4279
4138 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 4280 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4139 Register result = ToRegister(instr->result()); 4281 Register result = ToRegister(instr->result());
4140 Register base = ToRegister(instr->base_object()); 4282 Register base = ToRegister(instr->base_object());
4141 if (instr->offset()->IsConstantOperand()) { 4283 if (instr->offset()->IsConstantOperand()) {
4142 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); 4284 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4143 __ add(result, base, Operand(ToInteger32(offset))); 4285 __ Add(result, base, ToInteger32(offset), r0);
4144 } else { 4286 } else {
4145 Register offset = ToRegister(instr->offset()); 4287 Register offset = ToRegister(instr->offset());
4146 __ add(result, base, offset); 4288 __ add(result, base, offset);
4147 } 4289 }
4148 } 4290 }
4149 4291
4150 4292
4151 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 4293 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4294 HStoreNamedField* hinstr = instr->hydrogen();
4152 Representation representation = instr->representation(); 4295 Representation representation = instr->representation();
4153 4296
4154 Register object = ToRegister(instr->object()); 4297 Register object = ToRegister(instr->object());
4155 Register scratch = scratch0(); 4298 Register scratch = scratch0();
4156 HObjectAccess access = instr->hydrogen()->access(); 4299 HObjectAccess access = hinstr->access();
4157 int offset = access.offset(); 4300 int offset = access.offset();
4158 4301
4159 if (access.IsExternalMemory()) { 4302 if (access.IsExternalMemory()) {
4160 Register value = ToRegister(instr->value()); 4303 Register value = ToRegister(instr->value());
4161 MemOperand operand = MemOperand(object, offset); 4304 MemOperand operand = MemOperand(object, offset);
4162 __ Store(value, operand, representation); 4305 __ StoreRepresentation(value, operand, representation, r0);
4163 return; 4306 return;
4164 } 4307 }
4165 4308
4166 __ AssertNotSmi(object); 4309 __ AssertNotSmi(object);
4167 4310
4168 DCHECK(!representation.IsSmi() || 4311 #if V8_TARGET_ARCH_PPC64
4169 !instr->value()->IsConstantOperand() || 4312 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4313 IsInteger32(LConstantOperand::cast(instr->value())));
4314 #else
4315 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4170 IsSmi(LConstantOperand::cast(instr->value()))); 4316 IsSmi(LConstantOperand::cast(instr->value())));
4317 #endif
4171 if (representation.IsDouble()) { 4318 if (representation.IsDouble()) {
4172 DCHECK(access.IsInobject()); 4319 DCHECK(access.IsInobject());
4173 DCHECK(!instr->hydrogen()->has_transition()); 4320 DCHECK(!hinstr->has_transition());
4174 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 4321 DCHECK(!hinstr->NeedsWriteBarrier());
4175 DwVfpRegister value = ToDoubleRegister(instr->value()); 4322 DoubleRegister value = ToDoubleRegister(instr->value());
4176 __ vstr(value, FieldMemOperand(object, offset)); 4323 __ stfd(value, FieldMemOperand(object, offset));
4177 return; 4324 return;
4178 } 4325 }
4179 4326
4180 if (instr->hydrogen()->has_transition()) { 4327 if (hinstr->has_transition()) {
4181 Handle<Map> transition = instr->hydrogen()->transition_map(); 4328 Handle<Map> transition = hinstr->transition_map();
4182 AddDeprecationDependency(transition); 4329 AddDeprecationDependency(transition);
4183 __ mov(scratch, Operand(transition)); 4330 __ mov(scratch, Operand(transition));
4184 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 4331 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
4185 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { 4332 if (hinstr->NeedsWriteBarrierForMap()) {
4186 Register temp = ToRegister(instr->temp()); 4333 Register temp = ToRegister(instr->temp());
4187 // Update the write barrier for the map field. 4334 // Update the write barrier for the map field.
4188 __ RecordWriteForMap(object, 4335 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
4189 scratch,
4190 temp,
4191 GetLinkRegisterState(),
4192 kSaveFPRegs); 4336 kSaveFPRegs);
4193 } 4337 }
4194 } 4338 }
4195 4339
4196 // Do the store. 4340 // Do the store.
4197 Register value = ToRegister(instr->value()); 4341 Register value = ToRegister(instr->value());
4342
4343 #if V8_TARGET_ARCH_PPC64
4344 // 64-bit Smi optimization
4345 if (representation.IsSmi() &&
4346 hinstr->value()->representation().IsInteger32()) {
4347 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4348 // Store int value directly to upper half of the smi.
4349 STATIC_ASSERT(kSmiTag == 0);
4350 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4351 #if V8_TARGET_LITTLE_ENDIAN
4352 offset += kPointerSize / 2;
4353 #endif
4354 representation = Representation::Integer32();
4355 }
4356 #endif
4357
4198 if (access.IsInobject()) { 4358 if (access.IsInobject()) {
4199 MemOperand operand = FieldMemOperand(object, offset); 4359 MemOperand operand = FieldMemOperand(object, offset);
4200 __ Store(value, operand, representation); 4360 __ StoreRepresentation(value, operand, representation, r0);
4201 if (instr->hydrogen()->NeedsWriteBarrier()) { 4361 if (hinstr->NeedsWriteBarrier()) {
4202 // Update the write barrier for the object for in-object properties. 4362 // Update the write barrier for the object for in-object properties.
4203 __ RecordWriteField(object, 4363 __ RecordWriteField(
4204 offset, 4364 object, offset, value, scratch, GetLinkRegisterState(), kSaveFPRegs,
4205 value, 4365 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4206 scratch, 4366 hinstr->PointersToHereCheckForValue());
4207 GetLinkRegisterState(),
4208 kSaveFPRegs,
4209 EMIT_REMEMBERED_SET,
4210 instr->hydrogen()->SmiCheckForWriteBarrier(),
4211 instr->hydrogen()->PointersToHereCheckForValue());
4212 } 4367 }
4213 } else { 4368 } else {
4214 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); 4369 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4215 MemOperand operand = FieldMemOperand(scratch, offset); 4370 MemOperand operand = FieldMemOperand(scratch, offset);
4216 __ Store(value, operand, representation); 4371 __ StoreRepresentation(value, operand, representation, r0);
4217 if (instr->hydrogen()->NeedsWriteBarrier()) { 4372 if (hinstr->NeedsWriteBarrier()) {
4218 // Update the write barrier for the properties array. 4373 // Update the write barrier for the properties array.
4219 // object is used as a scratch register. 4374 // object is used as a scratch register.
4220 __ RecordWriteField(scratch, 4375 __ RecordWriteField(
4221 offset, 4376 scratch, offset, value, object, GetLinkRegisterState(), kSaveFPRegs,
4222 value, 4377 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4223 object, 4378 hinstr->PointersToHereCheckForValue());
4224 GetLinkRegisterState(),
4225 kSaveFPRegs,
4226 EMIT_REMEMBERED_SET,
4227 instr->hydrogen()->SmiCheckForWriteBarrier(),
4228 instr->hydrogen()->PointersToHereCheckForValue());
4229 } 4379 }
4230 } 4380 }
4231 } 4381 }
4232 4382
4233 4383
4234 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4384 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4235 DCHECK(ToRegister(instr->context()).is(cp)); 4385 DCHECK(ToRegister(instr->context()).is(cp));
4236 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4386 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4237 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4387 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4238 4388
4239 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name())); 4389 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4240 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); 4390 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4241 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4391 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4242 } 4392 }
4243 4393
4244 4394
4245 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 4395 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4246 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; 4396 Representation representation = instr->hydrogen()->length()->representation();
4247 if (instr->index()->IsConstantOperand()) { 4397 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4248 Operand index = ToOperand(instr->index()); 4398 DCHECK(representation.IsSmiOrInteger32());
4399
4400 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4401 if (instr->length()->IsConstantOperand()) {
4402 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4403 Register index = ToRegister(instr->index());
4404 if (representation.IsSmi()) {
4405 __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
4406 } else {
4407 __ Cmplwi(index, Operand(length), r0);
4408 }
4409 cc = CommuteCondition(cc);
4410 } else if (instr->index()->IsConstantOperand()) {
4411 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4249 Register length = ToRegister(instr->length()); 4412 Register length = ToRegister(instr->length());
4250 __ cmp(length, index); 4413 if (representation.IsSmi()) {
4251 cc = CommuteCondition(cc); 4414 __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
4415 } else {
4416 __ Cmplwi(length, Operand(index), r0);
4417 }
4252 } else { 4418 } else {
4253 Register index = ToRegister(instr->index()); 4419 Register index = ToRegister(instr->index());
4254 Operand length = ToOperand(instr->length()); 4420 Register length = ToRegister(instr->length());
4255 __ cmp(index, length); 4421 if (representation.IsSmi()) {
4422 __ cmpl(length, index);
4423 } else {
4424 __ cmplw(length, index);
4425 }
4256 } 4426 }
4257 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 4427 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4258 Label done; 4428 Label done;
4259 __ b(NegateCondition(cc), &done); 4429 __ b(NegateCondition(cc), &done);
4260 __ stop("eliminated bounds check failed"); 4430 __ stop("eliminated bounds check failed");
4261 __ bind(&done); 4431 __ bind(&done);
4262 } else { 4432 } else {
4263 DeoptimizeIf(cc, instr->environment()); 4433 DeoptimizeIf(cc, instr);
4264 } 4434 }
4265 } 4435 }
4266 4436
4267 4437
4268 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4438 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4269 Register external_pointer = ToRegister(instr->elements()); 4439 Register external_pointer = ToRegister(instr->elements());
4270 Register key = no_reg; 4440 Register key = no_reg;
4271 ElementsKind elements_kind = instr->elements_kind(); 4441 ElementsKind elements_kind = instr->elements_kind();
4272 bool key_is_constant = instr->key()->IsConstantOperand(); 4442 bool key_is_constant = instr->key()->IsConstantOperand();
4273 int constant_key = 0; 4443 int constant_key = 0;
4274 if (key_is_constant) { 4444 if (key_is_constant) {
4275 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4445 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4276 if (constant_key & 0xF0000000) { 4446 if (constant_key & 0xF0000000) {
4277 Abort(kArrayIndexConstantValueTooBig); 4447 Abort(kArrayIndexConstantValueTooBig);
4278 } 4448 }
4279 } else { 4449 } else {
4280 key = ToRegister(instr->key()); 4450 key = ToRegister(instr->key());
4281 } 4451 }
4282 int element_size_shift = ElementsKindToShiftSize(elements_kind); 4452 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4283 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4453 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4284 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4285 int base_offset = instr->base_offset(); 4454 int base_offset = instr->base_offset();
4286 4455
4287 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4456 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4288 elements_kind == FLOAT32_ELEMENTS || 4457 elements_kind == FLOAT32_ELEMENTS ||
4289 elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 4458 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4290 elements_kind == FLOAT64_ELEMENTS) { 4459 elements_kind == FLOAT64_ELEMENTS) {
4291 Register address = scratch0(); 4460 Register address = scratch0();
4292 DwVfpRegister value(ToDoubleRegister(instr->value())); 4461 DoubleRegister value(ToDoubleRegister(instr->value()));
4293 if (key_is_constant) { 4462 if (key_is_constant) {
4294 if (constant_key != 0) { 4463 if (constant_key != 0) {
4295 __ add(address, external_pointer, 4464 __ Add(address, external_pointer, constant_key << element_size_shift,
4296 Operand(constant_key << element_size_shift)); 4465 r0);
4297 } else { 4466 } else {
4298 address = external_pointer; 4467 address = external_pointer;
4299 } 4468 }
4300 } else { 4469 } else {
4301 __ add(address, external_pointer, Operand(key, LSL, shift_size)); 4470 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
4471 __ add(address, external_pointer, r0);
4302 } 4472 }
4303 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4473 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4304 elements_kind == FLOAT32_ELEMENTS) { 4474 elements_kind == FLOAT32_ELEMENTS) {
4305 __ vcvt_f32_f64(double_scratch0().low(), value); 4475 __ frsp(double_scratch0(), value);
4306 __ vstr(double_scratch0().low(), address, base_offset); 4476 __ stfs(double_scratch0(), MemOperand(address, base_offset));
4307 } else { // Storing doubles, not floats. 4477 } else { // Storing doubles, not floats.
4308 __ vstr(value, address, base_offset); 4478 __ stfd(value, MemOperand(address, base_offset));
4309 } 4479 }
4310 } else { 4480 } else {
4311 Register value(ToRegister(instr->value())); 4481 Register value(ToRegister(instr->value()));
4312 MemOperand mem_operand = PrepareKeyedOperand( 4482 MemOperand mem_operand =
4313 key, external_pointer, key_is_constant, constant_key, 4483 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4314 element_size_shift, shift_size, 4484 constant_key, element_size_shift, base_offset);
4315 base_offset);
4316 switch (elements_kind) { 4485 switch (elements_kind) {
4317 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 4486 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4318 case EXTERNAL_INT8_ELEMENTS: 4487 case EXTERNAL_INT8_ELEMENTS:
4319 case EXTERNAL_UINT8_ELEMENTS: 4488 case EXTERNAL_UINT8_ELEMENTS:
4320 case UINT8_ELEMENTS: 4489 case UINT8_ELEMENTS:
4321 case UINT8_CLAMPED_ELEMENTS: 4490 case UINT8_CLAMPED_ELEMENTS:
4322 case INT8_ELEMENTS: 4491 case INT8_ELEMENTS:
4323 __ strb(value, mem_operand); 4492 if (key_is_constant) {
4493 __ StoreByte(value, mem_operand, r0);
4494 } else {
4495 __ stbx(value, mem_operand);
4496 }
4324 break; 4497 break;
4325 case EXTERNAL_INT16_ELEMENTS: 4498 case EXTERNAL_INT16_ELEMENTS:
4326 case EXTERNAL_UINT16_ELEMENTS: 4499 case EXTERNAL_UINT16_ELEMENTS:
4327 case INT16_ELEMENTS: 4500 case INT16_ELEMENTS:
4328 case UINT16_ELEMENTS: 4501 case UINT16_ELEMENTS:
4329 __ strh(value, mem_operand); 4502 if (key_is_constant) {
4503 __ StoreHalfWord(value, mem_operand, r0);
4504 } else {
4505 __ sthx(value, mem_operand);
4506 }
4330 break; 4507 break;
4331 case EXTERNAL_INT32_ELEMENTS: 4508 case EXTERNAL_INT32_ELEMENTS:
4332 case EXTERNAL_UINT32_ELEMENTS: 4509 case EXTERNAL_UINT32_ELEMENTS:
4333 case INT32_ELEMENTS: 4510 case INT32_ELEMENTS:
4334 case UINT32_ELEMENTS: 4511 case UINT32_ELEMENTS:
4335 __ str(value, mem_operand); 4512 if (key_is_constant) {
4513 __ StoreWord(value, mem_operand, r0);
4514 } else {
4515 __ stwx(value, mem_operand);
4516 }
4336 break; 4517 break;
4337 case FLOAT32_ELEMENTS: 4518 case FLOAT32_ELEMENTS:
4338 case FLOAT64_ELEMENTS: 4519 case FLOAT64_ELEMENTS:
4339 case EXTERNAL_FLOAT32_ELEMENTS: 4520 case EXTERNAL_FLOAT32_ELEMENTS:
4340 case EXTERNAL_FLOAT64_ELEMENTS: 4521 case EXTERNAL_FLOAT64_ELEMENTS:
4341 case FAST_DOUBLE_ELEMENTS: 4522 case FAST_DOUBLE_ELEMENTS:
4342 case FAST_ELEMENTS: 4523 case FAST_ELEMENTS:
4343 case FAST_SMI_ELEMENTS: 4524 case FAST_SMI_ELEMENTS:
4344 case FAST_HOLEY_DOUBLE_ELEMENTS: 4525 case FAST_HOLEY_DOUBLE_ELEMENTS:
4345 case FAST_HOLEY_ELEMENTS: 4526 case FAST_HOLEY_ELEMENTS:
4346 case FAST_HOLEY_SMI_ELEMENTS: 4527 case FAST_HOLEY_SMI_ELEMENTS:
4347 case DICTIONARY_ELEMENTS: 4528 case DICTIONARY_ELEMENTS:
4348 case SLOPPY_ARGUMENTS_ELEMENTS: 4529 case SLOPPY_ARGUMENTS_ELEMENTS:
4349 UNREACHABLE(); 4530 UNREACHABLE();
4350 break; 4531 break;
4351 } 4532 }
4352 } 4533 }
4353 } 4534 }
4354 4535
4355 4536
4356 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4537 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4357 DwVfpRegister value = ToDoubleRegister(instr->value()); 4538 DoubleRegister value = ToDoubleRegister(instr->value());
4358 Register elements = ToRegister(instr->elements()); 4539 Register elements = ToRegister(instr->elements());
4540 Register key = no_reg;
4359 Register scratch = scratch0(); 4541 Register scratch = scratch0();
4360 DwVfpRegister double_scratch = double_scratch0(); 4542 DoubleRegister double_scratch = double_scratch0();
4361 bool key_is_constant = instr->key()->IsConstantOperand(); 4543 bool key_is_constant = instr->key()->IsConstantOperand();
4362 int base_offset = instr->base_offset(); 4544 int constant_key = 0;
4363 4545
4364 // Calculate the effective address of the slot in the array to store the 4546 // Calculate the effective address of the slot in the array to store the
4365 // double value. 4547 // double value.
4366 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4367 if (key_is_constant) { 4548 if (key_is_constant) {
4368 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4549 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4369 if (constant_key & 0xF0000000) { 4550 if (constant_key & 0xF0000000) {
4370 Abort(kArrayIndexConstantValueTooBig); 4551 Abort(kArrayIndexConstantValueTooBig);
4371 } 4552 }
4372 __ add(scratch, elements,
4373 Operand((constant_key << element_size_shift) + base_offset));
4374 } else { 4553 } else {
4375 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4554 key = ToRegister(instr->key());
4376 ? (element_size_shift - kSmiTagSize) : element_size_shift; 4555 }
4377 __ add(scratch, elements, Operand(base_offset)); 4556 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4378 __ add(scratch, scratch, 4557 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4379 Operand(ToRegister(instr->key()), LSL, shift_size)); 4558 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4559 if (!key_is_constant) {
4560 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4561 __ add(scratch, elements, scratch);
4562 elements = scratch;
4563 }
4564 if (!is_int16(base_offset)) {
4565 __ Add(scratch, elements, base_offset, r0);
4566 base_offset = 0;
4567 elements = scratch;
4380 } 4568 }
4381 4569
4382 if (instr->NeedsCanonicalization()) { 4570 if (instr->NeedsCanonicalization()) {
4383 // Force a canonical NaN. 4571 // Force a canonical NaN.
4384 if (masm()->emit_debug_code()) { 4572 __ CanonicalizeNaN(double_scratch, value);
4385 __ vmrs(ip); 4573 __ stfd(double_scratch, MemOperand(elements, base_offset));
4386 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4387 __ Assert(ne, kDefaultNaNModeNotSet);
4388 }
4389 __ VFPCanonicalizeNaN(double_scratch, value);
4390 __ vstr(double_scratch, scratch, 0);
4391 } else { 4574 } else {
4392 __ vstr(value, scratch, 0); 4575 __ stfd(value, MemOperand(elements, base_offset));
4393 } 4576 }
4394 } 4577 }
4395 4578
4396 4579
4397 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4580 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4581 HStoreKeyed* hinstr = instr->hydrogen();
4398 Register value = ToRegister(instr->value()); 4582 Register value = ToRegister(instr->value());
4399 Register elements = ToRegister(instr->elements()); 4583 Register elements = ToRegister(instr->elements());
4400 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) 4584 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4401 : no_reg;
4402 Register scratch = scratch0(); 4585 Register scratch = scratch0();
4403 Register store_base = scratch; 4586 Register store_base = scratch;
4404 int offset = instr->base_offset(); 4587 int offset = instr->base_offset();
4405 4588
4406 // Do the store. 4589 // Do the store.
4407 if (instr->key()->IsConstantOperand()) { 4590 if (instr->key()->IsConstantOperand()) {
4408 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 4591 DCHECK(!hinstr->NeedsWriteBarrier());
4409 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 4592 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4410 offset += ToInteger32(const_operand) * kPointerSize; 4593 offset += ToInteger32(const_operand) * kPointerSize;
4411 store_base = elements; 4594 store_base = elements;
4412 } else { 4595 } else {
4413 // Even though the HLoadKeyed instruction forces the input 4596 // Even though the HLoadKeyed instruction forces the input
4414 // representation for the key to be an integer, the input gets replaced 4597 // representation for the key to be an integer, the input gets replaced
4415 // during bound check elimination with the index argument to the bounds 4598 // during bound check elimination with the index argument to the bounds
4416 // check, which can be tagged, so that case must be handled here, too. 4599 // check, which can be tagged, so that case must be handled here, too.
4417 if (instr->hydrogen()->key()->representation().IsSmi()) { 4600 if (hinstr->key()->representation().IsSmi()) {
4418 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); 4601 __ SmiToPtrArrayOffset(scratch, key);
4419 } else { 4602 } else {
4420 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); 4603 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
4421 } 4604 }
4605 __ add(scratch, elements, scratch);
4422 } 4606 }
4423 __ str(value, MemOperand(store_base, offset));
4424 4607
4425 if (instr->hydrogen()->NeedsWriteBarrier()) { 4608 Representation representation = hinstr->value()->representation();
4426 SmiCheck check_needed = 4609
4427 instr->hydrogen()->value()->type().IsHeapObject() 4610 #if V8_TARGET_ARCH_PPC64
4428 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4611 // 64-bit Smi optimization
4612 if (representation.IsInteger32()) {
4613 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4614 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4615 // Store int value directly to upper half of the smi.
4616 STATIC_ASSERT(kSmiTag == 0);
4617 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4618 #if V8_TARGET_LITTLE_ENDIAN
4619 offset += kPointerSize / 2;
4620 #endif
4621 }
4622 #endif
4623
4624 __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
4625 r0);
4626
4627 if (hinstr->NeedsWriteBarrier()) {
4628 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4629 ? OMIT_SMI_CHECK
4630 : INLINE_SMI_CHECK;
4429 // Compute address of modified element and store it into key register. 4631 // Compute address of modified element and store it into key register.
4430 __ add(key, store_base, Operand(offset)); 4632 __ Add(key, store_base, offset, r0);
4431 __ RecordWrite(elements, 4633 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4432 key, 4634 EMIT_REMEMBERED_SET, check_needed,
4433 value, 4635 hinstr->PointersToHereCheckForValue());
4434 GetLinkRegisterState(),
4435 kSaveFPRegs,
4436 EMIT_REMEMBERED_SET,
4437 check_needed,
4438 instr->hydrogen()->PointersToHereCheckForValue());
4439 } 4636 }
4440 } 4637 }
4441 4638
4442 4639
4443 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4640 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4444 // By cases: external, fast double 4641 // By cases: external, fast double
4445 if (instr->is_typed_elements()) { 4642 if (instr->is_typed_elements()) {
4446 DoStoreKeyedExternalArray(instr); 4643 DoStoreKeyedExternalArray(instr);
4447 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4644 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4448 DoStoreKeyedFixedDoubleArray(instr); 4645 DoStoreKeyedFixedDoubleArray(instr);
4449 } else { 4646 } else {
4450 DoStoreKeyedFixedArray(instr); 4647 DoStoreKeyedFixedArray(instr);
4451 } 4648 }
4452 } 4649 }
4453 4650
4454 4651
4455 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4652 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4456 DCHECK(ToRegister(instr->context()).is(cp)); 4653 DCHECK(ToRegister(instr->context()).is(cp));
4457 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4654 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4458 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); 4655 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4459 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4656 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4460 4657
4461 Handle<Code> ic = 4658 Handle<Code> ic =
4462 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code(); 4659 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4463 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4660 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4464 } 4661 }
4465 4662
4466 4663
4467 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4664 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4468 Register object_reg = ToRegister(instr->object()); 4665 Register object_reg = ToRegister(instr->object());
4469 Register scratch = scratch0(); 4666 Register scratch = scratch0();
4470 4667
4471 Handle<Map> from_map = instr->original_map(); 4668 Handle<Map> from_map = instr->original_map();
4472 Handle<Map> to_map = instr->transitioned_map(); 4669 Handle<Map> to_map = instr->transitioned_map();
4473 ElementsKind from_kind = instr->from_kind(); 4670 ElementsKind from_kind = instr->from_kind();
4474 ElementsKind to_kind = instr->to_kind(); 4671 ElementsKind to_kind = instr->to_kind();
4475 4672
4476 Label not_applicable; 4673 Label not_applicable;
4477 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4674 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4478 __ cmp(scratch, Operand(from_map)); 4675 __ Cmpi(scratch, Operand(from_map), r0);
4479 __ b(ne, &not_applicable); 4676 __ bne(&not_applicable);
4480 4677
4481 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4678 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4482 Register new_map_reg = ToRegister(instr->new_map_temp()); 4679 Register new_map_reg = ToRegister(instr->new_map_temp());
4483 __ mov(new_map_reg, Operand(to_map)); 4680 __ mov(new_map_reg, Operand(to_map));
4484 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4681 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
4682 r0);
4485 // Write barrier. 4683 // Write barrier.
4486 __ RecordWriteForMap(object_reg, 4684 __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4487 new_map_reg, 4685 GetLinkRegisterState(), kDontSaveFPRegs);
4488 scratch,
4489 GetLinkRegisterState(),
4490 kDontSaveFPRegs);
4491 } else { 4686 } else {
4492 DCHECK(ToRegister(instr->context()).is(cp)); 4687 DCHECK(ToRegister(instr->context()).is(cp));
4493 DCHECK(object_reg.is(r0)); 4688 DCHECK(object_reg.is(r3));
4494 PushSafepointRegistersScope scope(this); 4689 PushSafepointRegistersScope scope(this);
4495 __ Move(r1, to_map); 4690 __ Move(r4, to_map);
4496 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; 4691 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4497 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); 4692 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4498 __ CallStub(&stub); 4693 __ CallStub(&stub);
4499 RecordSafepointWithRegisters( 4694 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4500 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 4695 Safepoint::kLazyDeopt);
4501 } 4696 }
4502 __ bind(&not_applicable); 4697 __ bind(&not_applicable);
4503 } 4698 }
4504 4699
4505 4700
4506 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4701 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4507 Register object = ToRegister(instr->object()); 4702 Register object = ToRegister(instr->object());
4508 Register temp = ToRegister(instr->temp()); 4703 Register temp = ToRegister(instr->temp());
4509 Label no_memento_found; 4704 Label no_memento_found;
4510 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); 4705 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4511 DeoptimizeIf(eq, instr->environment()); 4706 DeoptimizeIf(eq, instr);
4512 __ bind(&no_memento_found); 4707 __ bind(&no_memento_found);
4513 } 4708 }
4514 4709
4515 4710
4516 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4711 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4517 DCHECK(ToRegister(instr->context()).is(cp)); 4712 DCHECK(ToRegister(instr->context()).is(cp));
4518 DCHECK(ToRegister(instr->left()).is(r1)); 4713 DCHECK(ToRegister(instr->left()).is(r4));
4519 DCHECK(ToRegister(instr->right()).is(r0)); 4714 DCHECK(ToRegister(instr->right()).is(r3));
4520 StringAddStub stub(isolate(), 4715 StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4521 instr->hydrogen()->flags(),
4522 instr->hydrogen()->pretenure_flag()); 4716 instr->hydrogen()->pretenure_flag());
4523 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4717 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4524 } 4718 }
4525 4719
4526 4720
4527 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4721 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4528 class DeferredStringCharCodeAt FINAL : public LDeferredCode { 4722 class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4529 public: 4723 public:
4530 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 4724 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4531 : LDeferredCode(codegen), instr_(instr) { } 4725 : LDeferredCode(codegen), instr_(instr) {}
4532 virtual void Generate() OVERRIDE { 4726 virtual void Generate() OVERRIDE {
4533 codegen()->DoDeferredStringCharCodeAt(instr_); 4727 codegen()->DoDeferredStringCharCodeAt(instr_);
4534 } 4728 }
4535 virtual LInstruction* instr() OVERRIDE { return instr_; } 4729 virtual LInstruction* instr() OVERRIDE { return instr_; }
4730
4536 private: 4731 private:
4537 LStringCharCodeAt* instr_; 4732 LStringCharCodeAt* instr_;
4538 }; 4733 };
4539 4734
4540 DeferredStringCharCodeAt* deferred = 4735 DeferredStringCharCodeAt* deferred =
4541 new(zone()) DeferredStringCharCodeAt(this, instr); 4736 new (zone()) DeferredStringCharCodeAt(this, instr);
4542 4737
4543 StringCharLoadGenerator::Generate(masm(), 4738 StringCharLoadGenerator::Generate(
4544 ToRegister(instr->string()), 4739 masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4545 ToRegister(instr->index()), 4740 ToRegister(instr->result()), deferred->entry());
4546 ToRegister(instr->result()),
4547 deferred->entry());
4548 __ bind(deferred->exit()); 4741 __ bind(deferred->exit());
4549 } 4742 }
4550 4743
4551 4744
4552 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4745 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4553 Register string = ToRegister(instr->string()); 4746 Register string = ToRegister(instr->string());
4554 Register result = ToRegister(instr->result()); 4747 Register result = ToRegister(instr->result());
4555 Register scratch = scratch0(); 4748 Register scratch = scratch0();
4556 4749
4557 // TODO(3095996): Get rid of this. For now, we need to make the 4750 // TODO(3095996): Get rid of this. For now, we need to make the
4558 // result register contain a valid pointer because it is already 4751 // result register contain a valid pointer because it is already
4559 // contained in the register pointer map. 4752 // contained in the register pointer map.
4560 __ mov(result, Operand::Zero()); 4753 __ li(result, Operand::Zero());
4561 4754
4562 PushSafepointRegistersScope scope(this); 4755 PushSafepointRegistersScope scope(this);
4563 __ push(string); 4756 __ push(string);
4564 // Push the index as a smi. This is safe because of the checks in 4757 // Push the index as a smi. This is safe because of the checks in
4565 // DoStringCharCodeAt above. 4758 // DoStringCharCodeAt above.
4566 if (instr->index()->IsConstantOperand()) { 4759 if (instr->index()->IsConstantOperand()) {
4567 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 4760 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4568 __ mov(scratch, Operand(Smi::FromInt(const_index))); 4761 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4569 __ push(scratch); 4762 __ push(scratch);
4570 } else { 4763 } else {
4571 Register index = ToRegister(instr->index()); 4764 Register index = ToRegister(instr->index());
4572 __ SmiTag(index); 4765 __ SmiTag(index);
4573 __ push(index); 4766 __ push(index);
4574 } 4767 }
4575 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, 4768 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4576 instr->context()); 4769 instr->context());
4577 __ AssertSmi(r0); 4770 __ AssertSmi(r3);
4578 __ SmiUntag(r0); 4771 __ SmiUntag(r3);
4579 __ StoreToSafepointRegisterSlot(r0, result); 4772 __ StoreToSafepointRegisterSlot(r3, result);
4580 } 4773 }
4581 4774
4582 4775
4583 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4776 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4584 class DeferredStringCharFromCode FINAL : public LDeferredCode { 4777 class DeferredStringCharFromCode FINAL : public LDeferredCode {
4585 public: 4778 public:
4586 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 4779 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4587 : LDeferredCode(codegen), instr_(instr) { } 4780 : LDeferredCode(codegen), instr_(instr) {}
4588 virtual void Generate() OVERRIDE { 4781 virtual void Generate() OVERRIDE {
4589 codegen()->DoDeferredStringCharFromCode(instr_); 4782 codegen()->DoDeferredStringCharFromCode(instr_);
4590 } 4783 }
4591 virtual LInstruction* instr() OVERRIDE { return instr_; } 4784 virtual LInstruction* instr() OVERRIDE { return instr_; }
4785
4592 private: 4786 private:
4593 LStringCharFromCode* instr_; 4787 LStringCharFromCode* instr_;
4594 }; 4788 };
4595 4789
4596 DeferredStringCharFromCode* deferred = 4790 DeferredStringCharFromCode* deferred =
4597 new(zone()) DeferredStringCharFromCode(this, instr); 4791 new (zone()) DeferredStringCharFromCode(this, instr);
4598 4792
4599 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); 4793 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4600 Register char_code = ToRegister(instr->char_code()); 4794 Register char_code = ToRegister(instr->char_code());
4601 Register result = ToRegister(instr->result()); 4795 Register result = ToRegister(instr->result());
4602 DCHECK(!char_code.is(result)); 4796 DCHECK(!char_code.is(result));
4603 4797
4604 __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); 4798 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
4605 __ b(hi, deferred->entry()); 4799 __ bgt(deferred->entry());
4606 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 4800 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4607 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); 4801 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
4608 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); 4802 __ add(result, result, r0);
4803 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4609 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4804 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4610 __ cmp(result, ip); 4805 __ cmp(result, ip);
4611 __ b(eq, deferred->entry()); 4806 __ beq(deferred->entry());
4612 __ bind(deferred->exit()); 4807 __ bind(deferred->exit());
4613 } 4808 }
4614 4809
4615 4810
4616 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4811 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4617 Register char_code = ToRegister(instr->char_code()); 4812 Register char_code = ToRegister(instr->char_code());
4618 Register result = ToRegister(instr->result()); 4813 Register result = ToRegister(instr->result());
4619 4814
4620 // TODO(3095996): Get rid of this. For now, we need to make the 4815 // TODO(3095996): Get rid of this. For now, we need to make the
4621 // result register contain a valid pointer because it is already 4816 // result register contain a valid pointer because it is already
4622 // contained in the register pointer map. 4817 // contained in the register pointer map.
4623 __ mov(result, Operand::Zero()); 4818 __ li(result, Operand::Zero());
4624 4819
4625 PushSafepointRegistersScope scope(this); 4820 PushSafepointRegistersScope scope(this);
4626 __ SmiTag(char_code); 4821 __ SmiTag(char_code);
4627 __ push(char_code); 4822 __ push(char_code);
4628 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); 4823 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4629 __ StoreToSafepointRegisterSlot(r0, result); 4824 __ StoreToSafepointRegisterSlot(r3, result);
4630 } 4825 }
4631 4826
4632 4827
4633 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4828 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4634 LOperand* input = instr->value(); 4829 LOperand* input = instr->value();
4635 DCHECK(input->IsRegister() || input->IsStackSlot()); 4830 DCHECK(input->IsRegister() || input->IsStackSlot());
4636 LOperand* output = instr->result(); 4831 LOperand* output = instr->result();
4637 DCHECK(output->IsDoubleRegister()); 4832 DCHECK(output->IsDoubleRegister());
4638 SwVfpRegister single_scratch = double_scratch0().low();
4639 if (input->IsStackSlot()) { 4833 if (input->IsStackSlot()) {
4640 Register scratch = scratch0(); 4834 Register scratch = scratch0();
4641 __ ldr(scratch, ToMemOperand(input)); 4835 __ LoadP(scratch, ToMemOperand(input));
4642 __ vmov(single_scratch, scratch); 4836 __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4643 } else { 4837 } else {
4644 __ vmov(single_scratch, ToRegister(input)); 4838 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4645 } 4839 }
4646 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4647 } 4840 }
4648 4841
4649 4842
4650 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4843 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4651 LOperand* input = instr->value(); 4844 LOperand* input = instr->value();
4652 LOperand* output = instr->result(); 4845 LOperand* output = instr->result();
4653 4846 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4654 SwVfpRegister flt_scratch = double_scratch0().low();
4655 __ vmov(flt_scratch, ToRegister(input));
4656 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4657 } 4847 }
4658 4848
4659 4849
4660 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4850 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4661 class DeferredNumberTagI FINAL : public LDeferredCode { 4851 class DeferredNumberTagI FINAL : public LDeferredCode {
4662 public: 4852 public:
4663 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 4853 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4664 : LDeferredCode(codegen), instr_(instr) { } 4854 : LDeferredCode(codegen), instr_(instr) {}
4665 virtual void Generate() OVERRIDE { 4855 virtual void Generate() OVERRIDE {
4666 codegen()->DoDeferredNumberTagIU(instr_, 4856 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4667 instr_->value(), 4857 instr_->temp2(), SIGNED_INT32);
4668 instr_->temp1(),
4669 instr_->temp2(),
4670 SIGNED_INT32);
4671 } 4858 }
4672 virtual LInstruction* instr() OVERRIDE { return instr_; } 4859 virtual LInstruction* instr() OVERRIDE { return instr_; }
4860
4673 private: 4861 private:
4674 LNumberTagI* instr_; 4862 LNumberTagI* instr_;
4675 }; 4863 };
4676 4864
4677 Register src = ToRegister(instr->value()); 4865 Register src = ToRegister(instr->value());
4678 Register dst = ToRegister(instr->result()); 4866 Register dst = ToRegister(instr->result());
4679 4867
4680 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); 4868 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
4681 __ SmiTag(dst, src, SetCC); 4869 #if V8_TARGET_ARCH_PPC64
4682 __ b(vs, deferred->entry()); 4870 __ SmiTag(dst, src);
4871 #else
4872 __ SmiTagCheckOverflow(dst, src, r0);
4873 __ BranchOnOverflow(deferred->entry());
4874 #endif
4683 __ bind(deferred->exit()); 4875 __ bind(deferred->exit());
4684 } 4876 }
4685 4877
4686 4878
4687 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4879 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4688 class DeferredNumberTagU FINAL : public LDeferredCode { 4880 class DeferredNumberTagU FINAL : public LDeferredCode {
4689 public: 4881 public:
4690 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4882 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4691 : LDeferredCode(codegen), instr_(instr) { } 4883 : LDeferredCode(codegen), instr_(instr) {}
4692 virtual void Generate() OVERRIDE { 4884 virtual void Generate() OVERRIDE {
4693 codegen()->DoDeferredNumberTagIU(instr_, 4885 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4694 instr_->value(), 4886 instr_->temp2(), UNSIGNED_INT32);
4695 instr_->temp1(),
4696 instr_->temp2(),
4697 UNSIGNED_INT32);
4698 } 4887 }
4699 virtual LInstruction* instr() OVERRIDE { return instr_; } 4888 virtual LInstruction* instr() OVERRIDE { return instr_; }
4889
4700 private: 4890 private:
4701 LNumberTagU* instr_; 4891 LNumberTagU* instr_;
4702 }; 4892 };
4703 4893
4704 Register input = ToRegister(instr->value()); 4894 Register input = ToRegister(instr->value());
4705 Register result = ToRegister(instr->result()); 4895 Register result = ToRegister(instr->result());
4706 4896
4707 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4897 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
4708 __ cmp(input, Operand(Smi::kMaxValue)); 4898 __ Cmpli(input, Operand(Smi::kMaxValue), r0);
4709 __ b(hi, deferred->entry()); 4899 __ bgt(deferred->entry());
4710 __ SmiTag(result, input); 4900 __ SmiTag(result, input);
4711 __ bind(deferred->exit()); 4901 __ bind(deferred->exit());
4712 } 4902 }
4713 4903
4714 4904
4715 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, 4905 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
4716 LOperand* value, 4906 LOperand* temp1, LOperand* temp2,
4717 LOperand* temp1,
4718 LOperand* temp2,
4719 IntegerSignedness signedness) { 4907 IntegerSignedness signedness) {
4720 Label done, slow; 4908 Label done, slow;
4721 Register src = ToRegister(value); 4909 Register src = ToRegister(value);
4722 Register dst = ToRegister(instr->result()); 4910 Register dst = ToRegister(instr->result());
4723 Register tmp1 = scratch0(); 4911 Register tmp1 = scratch0();
4724 Register tmp2 = ToRegister(temp1); 4912 Register tmp2 = ToRegister(temp1);
4725 Register tmp3 = ToRegister(temp2); 4913 Register tmp3 = ToRegister(temp2);
4726 LowDwVfpRegister dbl_scratch = double_scratch0(); 4914 DoubleRegister dbl_scratch = double_scratch0();
4727 4915
4728 if (signedness == SIGNED_INT32) { 4916 if (signedness == SIGNED_INT32) {
4729 // There was overflow, so bits 30 and 31 of the original integer 4917 // There was overflow, so bits 30 and 31 of the original integer
4730 // disagree. Try to allocate a heap number in new space and store 4918 // disagree. Try to allocate a heap number in new space and store
4731 // the value in there. If that fails, call the runtime system. 4919 // the value in there. If that fails, call the runtime system.
4732 if (dst.is(src)) { 4920 if (dst.is(src)) {
4733 __ SmiUntag(src, dst); 4921 __ SmiUntag(src, dst);
4734 __ eor(src, src, Operand(0x80000000)); 4922 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
4735 } 4923 }
4736 __ vmov(dbl_scratch.low(), src); 4924 __ ConvertIntToDouble(src, dbl_scratch);
4737 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4738 } else { 4925 } else {
4739 __ vmov(dbl_scratch.low(), src); 4926 __ ConvertUnsignedIntToDouble(src, dbl_scratch);
4740 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4741 } 4927 }
4742 4928
4743 if (FLAG_inline_new) { 4929 if (FLAG_inline_new) {
4744 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); 4930 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4745 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT); 4931 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4746 __ b(&done); 4932 __ b(&done);
4747 } 4933 }
4748 4934
4749 // Slow case: Call the runtime system to do the number allocation. 4935 // Slow case: Call the runtime system to do the number allocation.
4750 __ bind(&slow); 4936 __ bind(&slow);
4751 { 4937 {
4752 // TODO(3095996): Put a valid pointer value in the stack slot where the 4938 // TODO(3095996): Put a valid pointer value in the stack slot where the
4753 // result register is stored, as this register is in the pointer map, but 4939 // result register is stored, as this register is in the pointer map, but
4754 // contains an integer value. 4940 // contains an integer value.
4755 __ mov(dst, Operand::Zero()); 4941 __ li(dst, Operand::Zero());
4756 4942
4757 // Preserve the value of all registers. 4943 // Preserve the value of all registers.
4758 PushSafepointRegistersScope scope(this); 4944 PushSafepointRegistersScope scope(this);
4759 4945
4760 // NumberTagI and NumberTagD use the context from the frame, rather than 4946 // NumberTagI and NumberTagD use the context from the frame, rather than
4761 // the environment's HContext or HInlinedContext value. 4947 // the environment's HContext or HInlinedContext value.
4762 // They only call Runtime::kAllocateHeapNumber. 4948 // They only call Runtime::kAllocateHeapNumber.
4763 // The corresponding HChange instructions are added in a phase that does 4949 // The corresponding HChange instructions are added in a phase that does
4764 // not have easy access to the local context. 4950 // not have easy access to the local context.
4765 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4951 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4766 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4952 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4767 RecordSafepointWithRegisters( 4953 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4768 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4954 Safepoint::kNoLazyDeopt);
4769 __ sub(r0, r0, Operand(kHeapObjectTag)); 4955 __ StoreToSafepointRegisterSlot(r3, dst);
4770 __ StoreToSafepointRegisterSlot(r0, dst);
4771 } 4956 }
4772 4957
4773 // Done. Put the value in dbl_scratch into the value of the allocated heap 4958 // Done. Put the value in dbl_scratch into the value of the allocated heap
4774 // number. 4959 // number.
4775 __ bind(&done); 4960 __ bind(&done);
4776 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); 4961 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4777 __ add(dst, dst, Operand(kHeapObjectTag));
4778 } 4962 }
4779 4963
4780 4964
4781 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4965 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4782 class DeferredNumberTagD FINAL : public LDeferredCode { 4966 class DeferredNumberTagD FINAL : public LDeferredCode {
4783 public: 4967 public:
4784 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4968 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4785 : LDeferredCode(codegen), instr_(instr) { } 4969 : LDeferredCode(codegen), instr_(instr) {}
4786 virtual void Generate() OVERRIDE { 4970 virtual void Generate() OVERRIDE {
4787 codegen()->DoDeferredNumberTagD(instr_); 4971 codegen()->DoDeferredNumberTagD(instr_);
4788 } 4972 }
4789 virtual LInstruction* instr() OVERRIDE { return instr_; } 4973 virtual LInstruction* instr() OVERRIDE { return instr_; }
4974
4790 private: 4975 private:
4791 LNumberTagD* instr_; 4976 LNumberTagD* instr_;
4792 }; 4977 };
4793 4978
4794 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); 4979 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4795 Register scratch = scratch0(); 4980 Register scratch = scratch0();
4796 Register reg = ToRegister(instr->result()); 4981 Register reg = ToRegister(instr->result());
4797 Register temp1 = ToRegister(instr->temp()); 4982 Register temp1 = ToRegister(instr->temp());
4798 Register temp2 = ToRegister(instr->temp2()); 4983 Register temp2 = ToRegister(instr->temp2());
4799 4984
4800 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4985 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
4801 if (FLAG_inline_new) { 4986 if (FLAG_inline_new) {
4802 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 4987 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4803 // We want the untagged address first for performance 4988 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4804 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4805 DONT_TAG_RESULT);
4806 } else { 4989 } else {
4807 __ jmp(deferred->entry()); 4990 __ b(deferred->entry());
4808 } 4991 }
4809 __ bind(deferred->exit()); 4992 __ bind(deferred->exit());
4810 __ vstr(input_reg, reg, HeapNumber::kValueOffset); 4993 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4811 // Now that we have finished with the object's real address tag it
4812 __ add(reg, reg, Operand(kHeapObjectTag));
4813 } 4994 }
4814 4995
4815 4996
4816 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4997 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4817 // TODO(3095996): Get rid of this. For now, we need to make the 4998 // TODO(3095996): Get rid of this. For now, we need to make the
4818 // result register contain a valid pointer because it is already 4999 // result register contain a valid pointer because it is already
4819 // contained in the register pointer map. 5000 // contained in the register pointer map.
4820 Register reg = ToRegister(instr->result()); 5001 Register reg = ToRegister(instr->result());
4821 __ mov(reg, Operand::Zero()); 5002 __ li(reg, Operand::Zero());
4822 5003
4823 PushSafepointRegistersScope scope(this); 5004 PushSafepointRegistersScope scope(this);
4824 // NumberTagI and NumberTagD use the context from the frame, rather than 5005 // NumberTagI and NumberTagD use the context from the frame, rather than
4825 // the environment's HContext or HInlinedContext value. 5006 // the environment's HContext or HInlinedContext value.
4826 // They only call Runtime::kAllocateHeapNumber. 5007 // They only call Runtime::kAllocateHeapNumber.
4827 // The corresponding HChange instructions are added in a phase that does 5008 // The corresponding HChange instructions are added in a phase that does
4828 // not have easy access to the local context. 5009 // not have easy access to the local context.
4829 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 5010 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4830 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 5011 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4831 RecordSafepointWithRegisters( 5012 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4832 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 5013 Safepoint::kNoLazyDeopt);
4833 __ sub(r0, r0, Operand(kHeapObjectTag)); 5014 __ StoreToSafepointRegisterSlot(r3, reg);
4834 __ StoreToSafepointRegisterSlot(r0, reg);
4835 } 5015 }
4836 5016
4837 5017
4838 void LCodeGen::DoSmiTag(LSmiTag* instr) { 5018 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4839 HChange* hchange = instr->hydrogen(); 5019 HChange* hchange = instr->hydrogen();
4840 Register input = ToRegister(instr->value()); 5020 Register input = ToRegister(instr->value());
4841 Register output = ToRegister(instr->result()); 5021 Register output = ToRegister(instr->result());
4842 if (hchange->CheckFlag(HValue::kCanOverflow) && 5022 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4843 hchange->value()->CheckFlag(HValue::kUint32)) { 5023 hchange->value()->CheckFlag(HValue::kUint32)) {
4844 __ tst(input, Operand(0xc0000000)); 5024 __ TestUnsignedSmiCandidate(input, r0);
4845 DeoptimizeIf(ne, instr->environment()); 5025 DeoptimizeIf(ne, instr, cr0);
4846 } 5026 }
5027 #if !V8_TARGET_ARCH_PPC64
4847 if (hchange->CheckFlag(HValue::kCanOverflow) && 5028 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4848 !hchange->value()->CheckFlag(HValue::kUint32)) { 5029 !hchange->value()->CheckFlag(HValue::kUint32)) {
4849 __ SmiTag(output, input, SetCC); 5030 __ SmiTagCheckOverflow(output, input, r0);
4850 DeoptimizeIf(vs, instr->environment()); 5031 DeoptimizeIf(lt, instr, cr0);
4851 } else { 5032 } else {
5033 #endif
4852 __ SmiTag(output, input); 5034 __ SmiTag(output, input);
5035 #if !V8_TARGET_ARCH_PPC64
4853 } 5036 }
5037 #endif
4854 } 5038 }
4855 5039
4856 5040
4857 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 5041 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5042 Register scratch = scratch0();
4858 Register input = ToRegister(instr->value()); 5043 Register input = ToRegister(instr->value());
4859 Register result = ToRegister(instr->result()); 5044 Register result = ToRegister(instr->result());
4860 if (instr->needs_check()) { 5045 if (instr->needs_check()) {
4861 STATIC_ASSERT(kHeapObjectTag == 1); 5046 STATIC_ASSERT(kHeapObjectTag == 1);
4862 // If the input is a HeapObject, SmiUntag will set the carry flag. 5047 // If the input is a HeapObject, value of scratch won't be zero.
4863 __ SmiUntag(result, input, SetCC); 5048 __ andi(scratch, input, Operand(kHeapObjectTag));
4864 DeoptimizeIf(cs, instr->environment()); 5049 __ SmiUntag(result, input);
5050 DeoptimizeIf(ne, instr, cr0);
4865 } else { 5051 } else {
4866 __ SmiUntag(result, input); 5052 __ SmiUntag(result, input);
4867 } 5053 }
4868 } 5054 }
4869 5055
4870 5056
4871 void LCodeGen::EmitNumberUntagD(Register input_reg, 5057 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4872 DwVfpRegister result_reg, 5058 DoubleRegister result_reg,
4873 bool can_convert_undefined_to_nan,
4874 bool deoptimize_on_minus_zero,
4875 LEnvironment* env,
4876 NumberUntagDMode mode) { 5059 NumberUntagDMode mode) {
5060 bool can_convert_undefined_to_nan =
5061 instr->hydrogen()->can_convert_undefined_to_nan();
5062 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
5063
4877 Register scratch = scratch0(); 5064 Register scratch = scratch0();
4878 SwVfpRegister flt_scratch = double_scratch0().low();
4879 DCHECK(!result_reg.is(double_scratch0())); 5065 DCHECK(!result_reg.is(double_scratch0()));
5066
4880 Label convert, load_smi, done; 5067 Label convert, load_smi, done;
5068
4881 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 5069 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4882 // Smi check. 5070 // Smi check.
4883 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 5071 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5072
4884 // Heap number map check. 5073 // Heap number map check.
4885 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5074 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4886 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 5075 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4887 __ cmp(scratch, Operand(ip)); 5076 __ cmp(scratch, ip);
4888 if (can_convert_undefined_to_nan) { 5077 if (can_convert_undefined_to_nan) {
4889 __ b(ne, &convert); 5078 __ bne(&convert);
4890 } else { 5079 } else {
4891 DeoptimizeIf(ne, env); 5080 DeoptimizeIf(ne, instr);
4892 } 5081 }
4893 // load heap number 5082 // load heap number
4894 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); 5083 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4895 if (deoptimize_on_minus_zero) { 5084 if (deoptimize_on_minus_zero) {
4896 __ VmovLow(scratch, result_reg); 5085 #if V8_TARGET_ARCH_PPC64
4897 __ cmp(scratch, Operand::Zero()); 5086 __ MovDoubleToInt64(scratch, result_reg);
4898 __ b(ne, &done); 5087 // rotate left by one for simple compare.
4899 __ VmovHigh(scratch, result_reg); 5088 __ rldicl(scratch, scratch, 1, 0);
4900 __ cmp(scratch, Operand(HeapNumber::kSignMask)); 5089 __ cmpi(scratch, Operand(1));
4901 DeoptimizeIf(eq, env); 5090 #else
5091 __ MovDoubleToInt64(scratch, ip, result_reg);
5092 __ cmpi(ip, Operand::Zero());
5093 __ bne(&done);
5094 __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
5095 #endif
5096 DeoptimizeIf(eq, instr);
4902 } 5097 }
4903 __ jmp(&done); 5098 __ b(&done);
4904 if (can_convert_undefined_to_nan) { 5099 if (can_convert_undefined_to_nan) {
4905 __ bind(&convert); 5100 __ bind(&convert);
4906 // Convert undefined (and hole) to NaN. 5101 // Convert undefined (and hole) to NaN.
4907 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5102 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4908 __ cmp(input_reg, Operand(ip)); 5103 __ cmp(input_reg, ip);
4909 DeoptimizeIf(ne, env); 5104 DeoptimizeIf(ne, instr);
4910 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 5105 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4911 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); 5106 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4912 __ jmp(&done); 5107 __ b(&done);
4913 } 5108 }
4914 } else { 5109 } else {
4915 __ SmiUntag(scratch, input_reg); 5110 __ SmiUntag(scratch, input_reg);
4916 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 5111 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4917 } 5112 }
4918 // Smi to double register conversion 5113 // Smi to double register conversion
4919 __ bind(&load_smi); 5114 __ bind(&load_smi);
4920 // scratch: untagged value of input_reg 5115 // scratch: untagged value of input_reg
4921 __ vmov(flt_scratch, scratch); 5116 __ ConvertIntToDouble(scratch, result_reg);
4922 __ vcvt_f64_s32(result_reg, flt_scratch);
4923 __ bind(&done); 5117 __ bind(&done);
4924 } 5118 }
4925 5119
4926 5120
4927 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { 5121 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4928 Register input_reg = ToRegister(instr->value()); 5122 Register input_reg = ToRegister(instr->value());
4929 Register scratch1 = scratch0(); 5123 Register scratch1 = scratch0();
4930 Register scratch2 = ToRegister(instr->temp()); 5124 Register scratch2 = ToRegister(instr->temp());
4931 LowDwVfpRegister double_scratch = double_scratch0(); 5125 DoubleRegister double_scratch = double_scratch0();
4932 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2()); 5126 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4933 5127
4934 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); 5128 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4935 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); 5129 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4936 5130
4937 Label done; 5131 Label done;
4938 5132
4939 // The input was optimistically untagged; revert it.
4940 // The carry flag is set when we reach this deferred code as we just executed
4941 // SmiUntag(heap_object, SetCC)
4942 STATIC_ASSERT(kHeapObjectTag == 1);
4943 __ adc(scratch2, input_reg, Operand(input_reg));
4944
4945 // Heap number map check. 5133 // Heap number map check.
4946 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset)); 5134 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4947 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 5135 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4948 __ cmp(scratch1, Operand(ip)); 5136 __ cmp(scratch1, ip);
4949 5137
4950 if (instr->truncating()) { 5138 if (instr->truncating()) {
4951 // Performs a truncating conversion of a floating point number as used by 5139 // Performs a truncating conversion of a floating point number as used by
4952 // the JS bitwise operations. 5140 // the JS bitwise operations.
4953 Label no_heap_number, check_bools, check_false; 5141 Label no_heap_number, check_bools, check_false;
4954 __ b(ne, &no_heap_number); 5142 __ bne(&no_heap_number);
5143 __ mr(scratch2, input_reg);
4955 __ TruncateHeapNumberToI(input_reg, scratch2); 5144 __ TruncateHeapNumberToI(input_reg, scratch2);
4956 __ b(&done); 5145 __ b(&done);
4957 5146
4958 // Check for Oddballs. Undefined/False is converted to zero and True to one 5147 // Check for Oddballs. Undefined/False is converted to zero and True to one
4959 // for truncating conversions. 5148 // for truncating conversions.
4960 __ bind(&no_heap_number); 5149 __ bind(&no_heap_number);
4961 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5150 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4962 __ cmp(scratch2, Operand(ip)); 5151 __ cmp(input_reg, ip);
4963 __ b(ne, &check_bools); 5152 __ bne(&check_bools);
4964 __ mov(input_reg, Operand::Zero()); 5153 __ li(input_reg, Operand::Zero());
4965 __ b(&done); 5154 __ b(&done);
4966 5155
4967 __ bind(&check_bools); 5156 __ bind(&check_bools);
4968 __ LoadRoot(ip, Heap::kTrueValueRootIndex); 5157 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4969 __ cmp(scratch2, Operand(ip)); 5158 __ cmp(input_reg, ip);
4970 __ b(ne, &check_false); 5159 __ bne(&check_false);
4971 __ mov(input_reg, Operand(1)); 5160 __ li(input_reg, Operand(1));
4972 __ b(&done); 5161 __ b(&done);
4973 5162
4974 __ bind(&check_false); 5163 __ bind(&check_false);
4975 __ LoadRoot(ip, Heap::kFalseValueRootIndex); 5164 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4976 __ cmp(scratch2, Operand(ip)); 5165 __ cmp(input_reg, ip);
4977 DeoptimizeIf(ne, instr->environment()); 5166 DeoptimizeIf(ne, instr, cr7, "cannot truncate");
4978 __ mov(input_reg, Operand::Zero()); 5167 __ li(input_reg, Operand::Zero());
4979 __ b(&done);
4980 } else { 5168 } else {
4981 // Deoptimize if we don't have a heap number. 5169 DeoptimizeIf(ne, instr, cr7, "not a heap number");
4982 DeoptimizeIf(ne, instr->environment());
4983 5170
4984 __ sub(ip, scratch2, Operand(kHeapObjectTag)); 5171 __ lfd(double_scratch2,
4985 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); 5172 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4986 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); 5173 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4987 DeoptimizeIf(ne, instr->environment()); 5174 // preserve heap number pointer in scratch2 for minus zero check below
5175 __ mr(scratch2, input_reg);
5176 }
5177 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
5178 double_scratch);
5179 DeoptimizeIf(ne, instr, cr7, "lost precision or NaN");
4988 5180
4989 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5181 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4990 __ cmp(input_reg, Operand::Zero()); 5182 __ cmpi(input_reg, Operand::Zero());
4991 __ b(ne, &done); 5183 __ bne(&done);
4992 __ VmovHigh(scratch1, double_scratch2); 5184 __ lwz(scratch1,
4993 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5185 FieldMemOperand(scratch2, HeapNumber::kValueOffset +
4994 DeoptimizeIf(ne, instr->environment()); 5186 Register::kExponentOffset));
5187 __ cmpwi(scratch1, Operand::Zero());
5188 DeoptimizeIf(lt, instr, cr7, "minus zero");
4995 } 5189 }
4996 } 5190 }
4997 __ bind(&done); 5191 __ bind(&done);
4998 } 5192 }
4999 5193
5000 5194
5001 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 5195 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5002 class DeferredTaggedToI FINAL : public LDeferredCode { 5196 class DeferredTaggedToI FINAL : public LDeferredCode {
5003 public: 5197 public:
5004 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5198 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5005 : LDeferredCode(codegen), instr_(instr) { } 5199 : LDeferredCode(codegen), instr_(instr) {}
5006 virtual void Generate() OVERRIDE { 5200 virtual void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
5007 codegen()->DoDeferredTaggedToI(instr_);
5008 }
5009 virtual LInstruction* instr() OVERRIDE { return instr_; } 5201 virtual LInstruction* instr() OVERRIDE { return instr_; }
5202
5010 private: 5203 private:
5011 LTaggedToI* instr_; 5204 LTaggedToI* instr_;
5012 }; 5205 };
5013 5206
5014 LOperand* input = instr->value(); 5207 LOperand* input = instr->value();
5015 DCHECK(input->IsRegister()); 5208 DCHECK(input->IsRegister());
5016 DCHECK(input->Equals(instr->result())); 5209 DCHECK(input->Equals(instr->result()));
5017 5210
5018 Register input_reg = ToRegister(input); 5211 Register input_reg = ToRegister(input);
5019 5212
5020 if (instr->hydrogen()->value()->representation().IsSmi()) { 5213 if (instr->hydrogen()->value()->representation().IsSmi()) {
5021 __ SmiUntag(input_reg); 5214 __ SmiUntag(input_reg);
5022 } else { 5215 } else {
5023 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 5216 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
5024 5217
5025 // Optimistically untag the input. 5218 // Branch to deferred code if the input is a HeapObject.
5026 // If the input is a HeapObject, SmiUntag will set the carry flag. 5219 __ JumpIfNotSmi(input_reg, deferred->entry());
5027 __ SmiUntag(input_reg, SetCC); 5220
5028 // Branch to deferred code if the input was tagged. 5221 __ SmiUntag(input_reg);
5029 // The deferred code will take care of restoring the tag.
5030 __ b(cs, deferred->entry());
5031 __ bind(deferred->exit()); 5222 __ bind(deferred->exit());
5032 } 5223 }
5033 } 5224 }
5034 5225
5035 5226
5036 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5227 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5037 LOperand* input = instr->value(); 5228 LOperand* input = instr->value();
5038 DCHECK(input->IsRegister()); 5229 DCHECK(input->IsRegister());
5039 LOperand* result = instr->result(); 5230 LOperand* result = instr->result();
5040 DCHECK(result->IsDoubleRegister()); 5231 DCHECK(result->IsDoubleRegister());
5041 5232
5042 Register input_reg = ToRegister(input); 5233 Register input_reg = ToRegister(input);
5043 DwVfpRegister result_reg = ToDoubleRegister(result); 5234 DoubleRegister result_reg = ToDoubleRegister(result);
5044 5235
5045 HValue* value = instr->hydrogen()->value(); 5236 HValue* value = instr->hydrogen()->value();
5046 NumberUntagDMode mode = value->representation().IsSmi() 5237 NumberUntagDMode mode = value->representation().IsSmi()
5047 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 5238 ? NUMBER_CANDIDATE_IS_SMI
5239 : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5048 5240
5049 EmitNumberUntagD(input_reg, result_reg, 5241 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5050 instr->hydrogen()->can_convert_undefined_to_nan(),
5051 instr->hydrogen()->deoptimize_on_minus_zero(),
5052 instr->environment(),
5053 mode);
5054 } 5242 }
5055 5243
5056 5244
5057 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5245 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5058 Register result_reg = ToRegister(instr->result()); 5246 Register result_reg = ToRegister(instr->result());
5059 Register scratch1 = scratch0(); 5247 Register scratch1 = scratch0();
5060 DwVfpRegister double_input = ToDoubleRegister(instr->value()); 5248 DoubleRegister double_input = ToDoubleRegister(instr->value());
5061 LowDwVfpRegister double_scratch = double_scratch0(); 5249 DoubleRegister double_scratch = double_scratch0();
5062 5250
5063 if (instr->truncating()) { 5251 if (instr->truncating()) {
5064 __ TruncateDoubleToI(result_reg, double_input); 5252 __ TruncateDoubleToI(result_reg, double_input);
5065 } else { 5253 } else {
5066 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); 5254 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5255 double_scratch);
5067 // Deoptimize if the input wasn't a int32 (inside a double). 5256 // Deoptimize if the input wasn't a int32 (inside a double).
5068 DeoptimizeIf(ne, instr->environment()); 5257 DeoptimizeIf(ne, instr);
5069 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5258 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5070 Label done; 5259 Label done;
5071 __ cmp(result_reg, Operand::Zero()); 5260 __ cmpi(result_reg, Operand::Zero());
5072 __ b(ne, &done); 5261 __ bne(&done);
5073 __ VmovHigh(scratch1, double_input); 5262 #if V8_TARGET_ARCH_PPC64
5074 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5263 __ MovDoubleToInt64(scratch1, double_input);
5075 DeoptimizeIf(ne, instr->environment()); 5264 #else
5265 __ MovDoubleHighToInt(scratch1, double_input);
5266 #endif
5267 __ cmpi(scratch1, Operand::Zero());
5268 DeoptimizeIf(lt, instr);
5076 __ bind(&done); 5269 __ bind(&done);
5077 } 5270 }
5078 } 5271 }
5079 } 5272 }
5080 5273
5081 5274
5082 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 5275 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5083 Register result_reg = ToRegister(instr->result()); 5276 Register result_reg = ToRegister(instr->result());
5084 Register scratch1 = scratch0(); 5277 Register scratch1 = scratch0();
5085 DwVfpRegister double_input = ToDoubleRegister(instr->value()); 5278 DoubleRegister double_input = ToDoubleRegister(instr->value());
5086 LowDwVfpRegister double_scratch = double_scratch0(); 5279 DoubleRegister double_scratch = double_scratch0();
5087 5280
5088 if (instr->truncating()) { 5281 if (instr->truncating()) {
5089 __ TruncateDoubleToI(result_reg, double_input); 5282 __ TruncateDoubleToI(result_reg, double_input);
5090 } else { 5283 } else {
5091 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); 5284 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5285 double_scratch);
5092 // Deoptimize if the input wasn't a int32 (inside a double). 5286 // Deoptimize if the input wasn't a int32 (inside a double).
5093 DeoptimizeIf(ne, instr->environment()); 5287 DeoptimizeIf(ne, instr);
5094 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5288 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5095 Label done; 5289 Label done;
5096 __ cmp(result_reg, Operand::Zero()); 5290 __ cmpi(result_reg, Operand::Zero());
5097 __ b(ne, &done); 5291 __ bne(&done);
5098 __ VmovHigh(scratch1, double_input); 5292 #if V8_TARGET_ARCH_PPC64
5099 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5293 __ MovDoubleToInt64(scratch1, double_input);
5100 DeoptimizeIf(ne, instr->environment()); 5294 #else
5295 __ MovDoubleHighToInt(scratch1, double_input);
5296 #endif
5297 __ cmpi(scratch1, Operand::Zero());
5298 DeoptimizeIf(lt, instr);
5101 __ bind(&done); 5299 __ bind(&done);
5102 } 5300 }
5103 } 5301 }
5104 __ SmiTag(result_reg, SetCC); 5302 #if V8_TARGET_ARCH_PPC64
5105 DeoptimizeIf(vs, instr->environment()); 5303 __ SmiTag(result_reg);
5304 #else
5305 __ SmiTagCheckOverflow(result_reg, r0);
5306 DeoptimizeIf(lt, instr, cr0);
5307 #endif
5106 } 5308 }
5107 5309
5108 5310
5109 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 5311 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5110 LOperand* input = instr->value(); 5312 LOperand* input = instr->value();
5111 __ SmiTst(ToRegister(input)); 5313 __ TestIfSmi(ToRegister(input), r0);
5112 DeoptimizeIf(ne, instr->environment()); 5314 DeoptimizeIf(ne, instr, cr0);
5113 } 5315 }
5114 5316
5115 5317
5116 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 5318 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5117 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 5319 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5118 LOperand* input = instr->value(); 5320 LOperand* input = instr->value();
5119 __ SmiTst(ToRegister(input)); 5321 __ TestIfSmi(ToRegister(input), r0);
5120 DeoptimizeIf(eq, instr->environment()); 5322 DeoptimizeIf(eq, instr, cr0);
5121 } 5323 }
5122 } 5324 }
5123 5325
5124 5326
5125 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 5327 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5126 Register input = ToRegister(instr->value()); 5328 Register input = ToRegister(instr->value());
5127 Register scratch = scratch0(); 5329 Register scratch = scratch0();
5128 5330
5129 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5331 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5130 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 5332 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5131 5333
5132 if (instr->hydrogen()->is_interval_check()) { 5334 if (instr->hydrogen()->is_interval_check()) {
5133 InstanceType first; 5335 InstanceType first;
5134 InstanceType last; 5336 InstanceType last;
5135 instr->hydrogen()->GetCheckInterval(&first, &last); 5337 instr->hydrogen()->GetCheckInterval(&first, &last);
5136 5338
5137 __ cmp(scratch, Operand(first)); 5339 __ cmpli(scratch, Operand(first));
5138 5340
5139 // If there is only one type in the interval check for equality. 5341 // If there is only one type in the interval check for equality.
5140 if (first == last) { 5342 if (first == last) {
5141 DeoptimizeIf(ne, instr->environment()); 5343 DeoptimizeIf(ne, instr);
5142 } else { 5344 } else {
5143 DeoptimizeIf(lo, instr->environment()); 5345 DeoptimizeIf(lt, instr);
5144 // Omit check for the last type. 5346 // Omit check for the last type.
5145 if (last != LAST_TYPE) { 5347 if (last != LAST_TYPE) {
5146 __ cmp(scratch, Operand(last)); 5348 __ cmpli(scratch, Operand(last));
5147 DeoptimizeIf(hi, instr->environment()); 5349 DeoptimizeIf(gt, instr);
5148 } 5350 }
5149 } 5351 }
5150 } else { 5352 } else {
5151 uint8_t mask; 5353 uint8_t mask;
5152 uint8_t tag; 5354 uint8_t tag;
5153 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 5355 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5154 5356
5155 if (base::bits::IsPowerOfTwo32(mask)) { 5357 if (base::bits::IsPowerOfTwo32(mask)) {
5156 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); 5358 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5157 __ tst(scratch, Operand(mask)); 5359 __ andi(r0, scratch, Operand(mask));
5158 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); 5360 DeoptimizeIf(tag == 0 ? ne : eq, instr, cr0);
5159 } else { 5361 } else {
5160 __ and_(scratch, scratch, Operand(mask)); 5362 __ andi(scratch, scratch, Operand(mask));
5161 __ cmp(scratch, Operand(tag)); 5363 __ cmpi(scratch, Operand(tag));
5162 DeoptimizeIf(ne, instr->environment()); 5364 DeoptimizeIf(ne, instr);
5163 } 5365 }
5164 } 5366 }
5165 } 5367 }
5166 5368
5167 5369
5168 void LCodeGen::DoCheckValue(LCheckValue* instr) { 5370 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5169 Register reg = ToRegister(instr->value()); 5371 Register reg = ToRegister(instr->value());
5170 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 5372 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5171 AllowDeferredHandleDereference smi_check; 5373 AllowDeferredHandleDereference smi_check;
5172 if (isolate()->heap()->InNewSpace(*object)) { 5374 if (isolate()->heap()->InNewSpace(*object)) {
5173 Register reg = ToRegister(instr->value()); 5375 Register reg = ToRegister(instr->value());
5174 Handle<Cell> cell = isolate()->factory()->NewCell(object); 5376 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5175 __ mov(ip, Operand(Handle<Object>(cell))); 5377 __ mov(ip, Operand(Handle<Object>(cell)));
5176 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); 5378 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
5177 __ cmp(reg, ip); 5379 __ cmp(reg, ip);
5178 } else { 5380 } else {
5179 __ cmp(reg, Operand(object)); 5381 __ Cmpi(reg, Operand(object), r0);
5180 } 5382 }
5181 DeoptimizeIf(ne, instr->environment()); 5383 DeoptimizeIf(ne, instr);
5182 } 5384 }
5183 5385
5184 5386
5185 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 5387 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5186 { 5388 {
5187 PushSafepointRegistersScope scope(this); 5389 PushSafepointRegistersScope scope(this);
5188 __ push(object); 5390 __ push(object);
5189 __ mov(cp, Operand::Zero()); 5391 __ li(cp, Operand::Zero());
5190 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 5392 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5191 RecordSafepointWithRegisters( 5393 RecordSafepointWithRegisters(instr->pointer_map(), 1,
5192 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 5394 Safepoint::kNoLazyDeopt);
5193 __ StoreToSafepointRegisterSlot(r0, scratch0()); 5395 __ StoreToSafepointRegisterSlot(r3, scratch0());
5194 } 5396 }
5195 __ tst(scratch0(), Operand(kSmiTagMask)); 5397 __ TestIfSmi(scratch0(), r0);
5196 DeoptimizeIf(eq, instr->environment()); 5398 DeoptimizeIf(eq, instr, cr0);
5197 } 5399 }
5198 5400
5199 5401
5200 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5402 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5201 class DeferredCheckMaps FINAL : public LDeferredCode { 5403 class DeferredCheckMaps FINAL : public LDeferredCode {
5202 public: 5404 public:
5203 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 5405 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5204 : LDeferredCode(codegen), instr_(instr), object_(object) { 5406 : LDeferredCode(codegen), instr_(instr), object_(object) {
5205 SetExit(check_maps()); 5407 SetExit(check_maps());
5206 } 5408 }
5207 virtual void Generate() OVERRIDE { 5409 virtual void Generate() OVERRIDE {
5208 codegen()->DoDeferredInstanceMigration(instr_, object_); 5410 codegen()->DoDeferredInstanceMigration(instr_, object_);
5209 } 5411 }
5210 Label* check_maps() { return &check_maps_; } 5412 Label* check_maps() { return &check_maps_; }
5211 virtual LInstruction* instr() OVERRIDE { return instr_; } 5413 virtual LInstruction* instr() OVERRIDE { return instr_; }
5414
5212 private: 5415 private:
5213 LCheckMaps* instr_; 5416 LCheckMaps* instr_;
5214 Label check_maps_; 5417 Label check_maps_;
5215 Register object_; 5418 Register object_;
5216 }; 5419 };
5217 5420
5218 if (instr->hydrogen()->IsStabilityCheck()) { 5421 if (instr->hydrogen()->IsStabilityCheck()) {
5219 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5422 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5220 for (int i = 0; i < maps->size(); ++i) { 5423 for (int i = 0; i < maps->size(); ++i) {
5221 AddStabilityDependency(maps->at(i).handle()); 5424 AddStabilityDependency(maps->at(i).handle());
5222 } 5425 }
5223 return; 5426 return;
5224 } 5427 }
5225 5428
5226 Register map_reg = scratch0(); 5429 Register map_reg = scratch0();
5227 5430
5228 LOperand* input = instr->value(); 5431 LOperand* input = instr->value();
5229 DCHECK(input->IsRegister()); 5432 DCHECK(input->IsRegister());
5230 Register reg = ToRegister(input); 5433 Register reg = ToRegister(input);
5231 5434
5232 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); 5435 __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5233 5436
5234 DeferredCheckMaps* deferred = NULL; 5437 DeferredCheckMaps* deferred = NULL;
5235 if (instr->hydrogen()->HasMigrationTarget()) { 5438 if (instr->hydrogen()->HasMigrationTarget()) {
5236 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); 5439 deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
5237 __ bind(deferred->check_maps()); 5440 __ bind(deferred->check_maps());
5238 } 5441 }
5239 5442
5240 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5443 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5241 Label success; 5444 Label success;
5242 for (int i = 0; i < maps->size() - 1; i++) { 5445 for (int i = 0; i < maps->size() - 1; i++) {
5243 Handle<Map> map = maps->at(i).handle(); 5446 Handle<Map> map = maps->at(i).handle();
5244 __ CompareMap(map_reg, map, &success); 5447 __ CompareMap(map_reg, map, &success);
5245 __ b(eq, &success); 5448 __ beq(&success);
5246 } 5449 }
5247 5450
5248 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5451 Handle<Map> map = maps->at(maps->size() - 1).handle();
5249 __ CompareMap(map_reg, map, &success); 5452 __ CompareMap(map_reg, map, &success);
5250 if (instr->hydrogen()->HasMigrationTarget()) { 5453 if (instr->hydrogen()->HasMigrationTarget()) {
5251 __ b(ne, deferred->entry()); 5454 __ bne(deferred->entry());
5252 } else { 5455 } else {
5253 DeoptimizeIf(ne, instr->environment()); 5456 DeoptimizeIf(ne, instr);
5254 } 5457 }
5255 5458
5256 __ bind(&success); 5459 __ bind(&success);
5257 } 5460 }
5258 5461
5259 5462
5260 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5463 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5261 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); 5464 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5262 Register result_reg = ToRegister(instr->result()); 5465 Register result_reg = ToRegister(instr->result());
5263 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); 5466 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5264 } 5467 }
5265 5468
5266 5469
5267 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5470 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5268 Register unclamped_reg = ToRegister(instr->unclamped()); 5471 Register unclamped_reg = ToRegister(instr->unclamped());
5269 Register result_reg = ToRegister(instr->result()); 5472 Register result_reg = ToRegister(instr->result());
5270 __ ClampUint8(result_reg, unclamped_reg); 5473 __ ClampUint8(result_reg, unclamped_reg);
5271 } 5474 }
5272 5475
5273 5476
5274 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5477 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5275 Register scratch = scratch0(); 5478 Register scratch = scratch0();
5276 Register input_reg = ToRegister(instr->unclamped()); 5479 Register input_reg = ToRegister(instr->unclamped());
5277 Register result_reg = ToRegister(instr->result()); 5480 Register result_reg = ToRegister(instr->result());
5278 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); 5481 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5279 Label is_smi, done, heap_number; 5482 Label is_smi, done, heap_number;
5280 5483
5281 // Both smi and heap number cases are handled. 5484 // Both smi and heap number cases are handled.
5282 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); 5485 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5283 5486
5284 // Check for heap number 5487 // Check for heap number
5285 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5488 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5286 __ cmp(scratch, Operand(factory()->heap_number_map())); 5489 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
5287 __ b(eq, &heap_number); 5490 __ beq(&heap_number);
5288 5491
5289 // Check for undefined. Undefined is converted to zero for clamping 5492 // Check for undefined. Undefined is converted to zero for clamping
5290 // conversions. 5493 // conversions.
5291 __ cmp(input_reg, Operand(factory()->undefined_value())); 5494 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
5292 DeoptimizeIf(ne, instr->environment()); 5495 DeoptimizeIf(ne, instr);
5293 __ mov(result_reg, Operand::Zero()); 5496 __ li(result_reg, Operand::Zero());
5294 __ jmp(&done); 5497 __ b(&done);
5295 5498
5296 // Heap number 5499 // Heap number
5297 __ bind(&heap_number); 5500 __ bind(&heap_number);
5298 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 5501 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5299 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); 5502 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5300 __ jmp(&done); 5503 __ b(&done);
5301 5504
5302 // smi 5505 // smi
5303 __ bind(&is_smi); 5506 __ bind(&is_smi);
5304 __ ClampUint8(result_reg, result_reg); 5507 __ ClampUint8(result_reg, result_reg);
5305 5508
5306 __ bind(&done); 5509 __ bind(&done);
5307 } 5510 }
5308 5511
5309 5512
5310 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { 5513 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5311 DwVfpRegister value_reg = ToDoubleRegister(instr->value()); 5514 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5312 Register result_reg = ToRegister(instr->result()); 5515 Register result_reg = ToRegister(instr->result());
5516
5313 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { 5517 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5314 __ VmovHigh(result_reg, value_reg); 5518 __ MovDoubleHighToInt(result_reg, value_reg);
5315 } else { 5519 } else {
5316 __ VmovLow(result_reg, value_reg); 5520 __ MovDoubleLowToInt(result_reg, value_reg);
5317 } 5521 }
5318 } 5522 }
5319 5523
5320 5524
5321 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { 5525 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5322 Register hi_reg = ToRegister(instr->hi()); 5526 Register hi_reg = ToRegister(instr->hi());
5323 Register lo_reg = ToRegister(instr->lo()); 5527 Register lo_reg = ToRegister(instr->lo());
5324 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); 5528 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5325 __ VmovHigh(result_reg, hi_reg); 5529 #if V8_TARGET_ARCH_PPC64
5326 __ VmovLow(result_reg, lo_reg); 5530 __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
5531 #else
5532 __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
5533 #endif
5327 } 5534 }
5328 5535
5329 5536
5330 void LCodeGen::DoAllocate(LAllocate* instr) { 5537 void LCodeGen::DoAllocate(LAllocate* instr) {
5331 class DeferredAllocate FINAL : public LDeferredCode { 5538 class DeferredAllocate FINAL : public LDeferredCode {
5332 public: 5539 public:
5333 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 5540 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5334 : LDeferredCode(codegen), instr_(instr) { } 5541 : LDeferredCode(codegen), instr_(instr) {}
5335 virtual void Generate() OVERRIDE { 5542 virtual void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
5336 codegen()->DoDeferredAllocate(instr_);
5337 }
5338 virtual LInstruction* instr() OVERRIDE { return instr_; } 5543 virtual LInstruction* instr() OVERRIDE { return instr_; }
5544
5339 private: 5545 private:
5340 LAllocate* instr_; 5546 LAllocate* instr_;
5341 }; 5547 };
5342 5548
5343 DeferredAllocate* deferred = 5549 DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
5344 new(zone()) DeferredAllocate(this, instr);
5345 5550
5346 Register result = ToRegister(instr->result()); 5551 Register result = ToRegister(instr->result());
5347 Register scratch = ToRegister(instr->temp1()); 5552 Register scratch = ToRegister(instr->temp1());
5348 Register scratch2 = ToRegister(instr->temp2()); 5553 Register scratch2 = ToRegister(instr->temp2());
5349 5554
5350 // Allocate memory for the object. 5555 // Allocate memory for the object.
5351 AllocationFlags flags = TAG_OBJECT; 5556 AllocationFlags flags = TAG_OBJECT;
5352 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5557 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5353 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5558 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5354 } 5559 }
5355 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5560 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5356 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5561 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5357 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5562 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5358 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); 5563 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5359 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5564 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5360 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5565 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5361 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); 5566 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5362 } 5567 }
5363 5568
5364 if (instr->size()->IsConstantOperand()) { 5569 if (instr->size()->IsConstantOperand()) {
5365 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5570 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5366 if (size <= Page::kMaxRegularHeapObjectSize) { 5571 if (size <= Page::kMaxRegularHeapObjectSize) {
5367 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5572 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5368 } else { 5573 } else {
5369 __ jmp(deferred->entry()); 5574 __ b(deferred->entry());
5370 } 5575 }
5371 } else { 5576 } else {
5372 Register size = ToRegister(instr->size()); 5577 Register size = ToRegister(instr->size());
5373 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5578 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5374 } 5579 }
5375 5580
5376 __ bind(deferred->exit()); 5581 __ bind(deferred->exit());
5377 5582
5378 if (instr->hydrogen()->MustPrefillWithFiller()) { 5583 if (instr->hydrogen()->MustPrefillWithFiller()) {
5379 STATIC_ASSERT(kHeapObjectTag == 1); 5584 STATIC_ASSERT(kHeapObjectTag == 1);
5380 if (instr->size()->IsConstantOperand()) { 5585 if (instr->size()->IsConstantOperand()) {
5381 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5586 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5382 __ mov(scratch, Operand(size - kHeapObjectTag)); 5587 __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5383 } else { 5588 } else {
5384 __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); 5589 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5385 } 5590 }
5386 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 5591 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5387 Label loop; 5592 Label loop;
5388 __ bind(&loop); 5593 __ bind(&loop);
5389 __ sub(scratch, scratch, Operand(kPointerSize), SetCC); 5594 __ subi(scratch, scratch, Operand(kPointerSize));
5390 __ str(scratch2, MemOperand(result, scratch)); 5595 __ StorePX(scratch2, MemOperand(result, scratch));
5391 __ b(ge, &loop); 5596 __ cmpi(scratch, Operand::Zero());
5597 __ bge(&loop);
5392 } 5598 }
5393 } 5599 }
5394 5600
5395 5601
5396 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5602 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5397 Register result = ToRegister(instr->result()); 5603 Register result = ToRegister(instr->result());
5398 5604
5399 // TODO(3095996): Get rid of this. For now, we need to make the 5605 // TODO(3095996): Get rid of this. For now, we need to make the
5400 // result register contain a valid pointer because it is already 5606 // result register contain a valid pointer because it is already
5401 // contained in the register pointer map. 5607 // contained in the register pointer map.
5402 __ mov(result, Operand(Smi::FromInt(0))); 5608 __ LoadSmiLiteral(result, Smi::FromInt(0));
5403 5609
5404 PushSafepointRegistersScope scope(this); 5610 PushSafepointRegistersScope scope(this);
5405 if (instr->size()->IsRegister()) { 5611 if (instr->size()->IsRegister()) {
5406 Register size = ToRegister(instr->size()); 5612 Register size = ToRegister(instr->size());
5407 DCHECK(!size.is(result)); 5613 DCHECK(!size.is(result));
5408 __ SmiTag(size); 5614 __ SmiTag(size);
5409 __ push(size); 5615 __ push(size);
5410 } else { 5616 } else {
5411 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5617 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5618 #if !V8_TARGET_ARCH_PPC64
5412 if (size >= 0 && size <= Smi::kMaxValue) { 5619 if (size >= 0 && size <= Smi::kMaxValue) {
5620 #endif
5413 __ Push(Smi::FromInt(size)); 5621 __ Push(Smi::FromInt(size));
5622 #if !V8_TARGET_ARCH_PPC64
5414 } else { 5623 } else {
5415 // We should never get here at runtime => abort 5624 // We should never get here at runtime => abort
5416 __ stop("invalid allocation size"); 5625 __ stop("invalid allocation size");
5417 return; 5626 return;
5418 } 5627 }
5628 #endif
5419 } 5629 }
5420 5630
5421 int flags = AllocateDoubleAlignFlag::encode( 5631 int flags = AllocateDoubleAlignFlag::encode(
5422 instr->hydrogen()->MustAllocateDoubleAligned()); 5632 instr->hydrogen()->MustAllocateDoubleAligned());
5423 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5633 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5424 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5634 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5425 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5635 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5426 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); 5636 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5427 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5637 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5428 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5638 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5429 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); 5639 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5430 } else { 5640 } else {
5431 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5641 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5432 } 5642 }
5433 __ Push(Smi::FromInt(flags)); 5643 __ Push(Smi::FromInt(flags));
5434 5644
5435 CallRuntimeFromDeferred( 5645 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5436 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 5646 instr->context());
5437 __ StoreToSafepointRegisterSlot(r0, result); 5647 __ StoreToSafepointRegisterSlot(r3, result);
5438 } 5648 }
5439 5649
5440 5650
5441 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 5651 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5442 DCHECK(ToRegister(instr->value()).is(r0)); 5652 DCHECK(ToRegister(instr->value()).is(r3));
5443 __ push(r0); 5653 __ push(r3);
5444 CallRuntime(Runtime::kToFastProperties, 1, instr); 5654 CallRuntime(Runtime::kToFastProperties, 1, instr);
5445 } 5655 }
5446 5656
5447 5657
5448 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 5658 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5449 DCHECK(ToRegister(instr->context()).is(cp)); 5659 DCHECK(ToRegister(instr->context()).is(cp));
5450 Label materialized; 5660 Label materialized;
5451 // Registers will be used as follows: 5661 // Registers will be used as follows:
5452 // r6 = literals array. 5662 // r10 = literals array.
5453 // r1 = regexp literal. 5663 // r4 = regexp literal.
5454 // r0 = regexp literal clone. 5664 // r3 = regexp literal clone.
5455 // r2-5 are used as temporaries. 5665 // r5 and r7-r9 are used as temporaries.
5456 int literal_offset = 5666 int literal_offset =
5457 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); 5667 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5458 __ Move(r6, instr->hydrogen()->literals()); 5668 __ Move(r10, instr->hydrogen()->literals());
5459 __ ldr(r1, FieldMemOperand(r6, literal_offset)); 5669 __ LoadP(r4, FieldMemOperand(r10, literal_offset));
5460 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5670 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5461 __ cmp(r1, ip); 5671 __ cmp(r4, ip);
5462 __ b(ne, &materialized); 5672 __ bne(&materialized);
5463 5673
5464 // Create regexp literal using runtime function 5674 // Create regexp literal using runtime function
5465 // Result will be in r0. 5675 // Result will be in r3.
5466 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); 5676 __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
5467 __ mov(r4, Operand(instr->hydrogen()->pattern())); 5677 __ mov(r8, Operand(instr->hydrogen()->pattern()));
5468 __ mov(r3, Operand(instr->hydrogen()->flags())); 5678 __ mov(r7, Operand(instr->hydrogen()->flags()));
5469 __ Push(r6, r5, r4, r3); 5679 __ Push(r10, r9, r8, r7);
5470 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); 5680 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5471 __ mov(r1, r0); 5681 __ mr(r4, r3);
5472 5682
5473 __ bind(&materialized); 5683 __ bind(&materialized);
5474 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 5684 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5475 Label allocated, runtime_allocate; 5685 Label allocated, runtime_allocate;
5476 5686
5477 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); 5687 __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
5478 __ jmp(&allocated); 5688 __ b(&allocated);
5479 5689
5480 __ bind(&runtime_allocate); 5690 __ bind(&runtime_allocate);
5481 __ mov(r0, Operand(Smi::FromInt(size))); 5691 __ LoadSmiLiteral(r3, Smi::FromInt(size));
5482 __ Push(r1, r0); 5692 __ Push(r4, r3);
5483 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); 5693 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5484 __ pop(r1); 5694 __ pop(r4);
5485 5695
5486 __ bind(&allocated); 5696 __ bind(&allocated);
5487 // Copy the content into the newly allocated memory. 5697 // Copy the content into the newly allocated memory.
5488 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize); 5698 __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
5489 } 5699 }
5490 5700
5491 5701
5492 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 5702 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5493 DCHECK(ToRegister(instr->context()).is(cp)); 5703 DCHECK(ToRegister(instr->context()).is(cp));
5494 // Use the fast case closure allocation code that allocates in new 5704 // Use the fast case closure allocation code that allocates in new
5495 // space for nested functions that don't need literals cloning. 5705 // space for nested functions that don't need literals cloning.
5496 bool pretenure = instr->hydrogen()->pretenure(); 5706 bool pretenure = instr->hydrogen()->pretenure();
5497 if (!pretenure && instr->hydrogen()->has_no_literals()) { 5707 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5498 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(), 5708 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5499 instr->hydrogen()->kind()); 5709 instr->hydrogen()->kind());
5500 __ mov(r2, Operand(instr->hydrogen()->shared_info())); 5710 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5501 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5711 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5502 } else { 5712 } else {
5503 __ mov(r2, Operand(instr->hydrogen()->shared_info())); 5713 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5504 __ mov(r1, Operand(pretenure ? factory()->true_value() 5714 __ mov(r4, Operand(pretenure ? factory()->true_value()
5505 : factory()->false_value())); 5715 : factory()->false_value()));
5506 __ Push(cp, r2, r1); 5716 __ Push(cp, r5, r4);
5507 CallRuntime(Runtime::kNewClosure, 3, instr); 5717 CallRuntime(Runtime::kNewClosure, 3, instr);
5508 } 5718 }
5509 } 5719 }
5510 5720
5511 5721
5512 void LCodeGen::DoTypeof(LTypeof* instr) { 5722 void LCodeGen::DoTypeof(LTypeof* instr) {
5513 Register input = ToRegister(instr->value()); 5723 Register input = ToRegister(instr->value());
5514 __ push(input); 5724 __ push(input);
5515 CallRuntime(Runtime::kTypeof, 1, instr); 5725 CallRuntime(Runtime::kTypeof, 1, instr);
5516 } 5726 }
5517 5727
5518 5728
5519 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5729 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5520 Register input = ToRegister(instr->value()); 5730 Register input = ToRegister(instr->value());
5521 5731
5522 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), 5732 Condition final_branch_condition =
5523 instr->FalseLabel(chunk_), 5733 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5524 input, 5734 instr->type_literal());
5525 instr->type_literal());
5526 if (final_branch_condition != kNoCondition) { 5735 if (final_branch_condition != kNoCondition) {
5527 EmitBranch(instr, final_branch_condition); 5736 EmitBranch(instr, final_branch_condition);
5528 } 5737 }
5529 } 5738 }
5530 5739
5531 5740
5532 Condition LCodeGen::EmitTypeofIs(Label* true_label, 5741 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5533 Label* false_label, 5742 Register input, Handle<String> type_name) {
5534 Register input,
5535 Handle<String> type_name) {
5536 Condition final_branch_condition = kNoCondition; 5743 Condition final_branch_condition = kNoCondition;
5537 Register scratch = scratch0(); 5744 Register scratch = scratch0();
5538 Factory* factory = isolate()->factory(); 5745 Factory* factory = isolate()->factory();
5539 if (String::Equals(type_name, factory->number_string())) { 5746 if (String::Equals(type_name, factory->number_string())) {
5540 __ JumpIfSmi(input, true_label); 5747 __ JumpIfSmi(input, true_label);
5541 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5748 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5542 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); 5749 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5543 final_branch_condition = eq; 5750 final_branch_condition = eq;
5544 5751
5545 } else if (String::Equals(type_name, factory->string_string())) { 5752 } else if (String::Equals(type_name, factory->string_string())) {
5546 __ JumpIfSmi(input, false_label); 5753 __ JumpIfSmi(input, false_label);
5547 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); 5754 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5548 __ b(ge, false_label); 5755 __ bge(false_label);
5549 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5756 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5550 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); 5757 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5758 __ cmpi(r0, Operand::Zero());
5551 final_branch_condition = eq; 5759 final_branch_condition = eq;
5552 5760
5553 } else if (String::Equals(type_name, factory->symbol_string())) { 5761 } else if (String::Equals(type_name, factory->symbol_string())) {
5554 __ JumpIfSmi(input, false_label); 5762 __ JumpIfSmi(input, false_label);
5555 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); 5763 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5556 final_branch_condition = eq; 5764 final_branch_condition = eq;
5557 5765
5558 } else if (String::Equals(type_name, factory->boolean_string())) { 5766 } else if (String::Equals(type_name, factory->boolean_string())) {
5559 __ CompareRoot(input, Heap::kTrueValueRootIndex); 5767 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5560 __ b(eq, true_label); 5768 __ beq(true_label);
5561 __ CompareRoot(input, Heap::kFalseValueRootIndex); 5769 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5562 final_branch_condition = eq; 5770 final_branch_condition = eq;
5563 5771
5564 } else if (String::Equals(type_name, factory->undefined_string())) { 5772 } else if (String::Equals(type_name, factory->undefined_string())) {
5565 __ CompareRoot(input, Heap::kUndefinedValueRootIndex); 5773 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5566 __ b(eq, true_label); 5774 __ beq(true_label);
5567 __ JumpIfSmi(input, false_label); 5775 __ JumpIfSmi(input, false_label);
5568 // Check for undetectable objects => true. 5776 // Check for undetectable objects => true.
5569 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5777 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5570 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5778 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5571 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); 5779 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5780 __ cmpi(r0, Operand::Zero());
5572 final_branch_condition = ne; 5781 final_branch_condition = ne;
5573 5782
5574 } else if (String::Equals(type_name, factory->function_string())) { 5783 } else if (String::Equals(type_name, factory->function_string())) {
5575 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 5784 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5576 Register type_reg = scratch; 5785 Register type_reg = scratch;
5577 __ JumpIfSmi(input, false_label); 5786 __ JumpIfSmi(input, false_label);
5578 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE); 5787 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5579 __ b(eq, true_label); 5788 __ beq(true_label);
5580 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE)); 5789 __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5581 final_branch_condition = eq; 5790 final_branch_condition = eq;
5582 5791
5583 } else if (String::Equals(type_name, factory->object_string())) { 5792 } else if (String::Equals(type_name, factory->object_string())) {
5584 Register map = scratch; 5793 Register map = scratch;
5585 __ JumpIfSmi(input, false_label); 5794 __ JumpIfSmi(input, false_label);
5586 __ CompareRoot(input, Heap::kNullValueRootIndex); 5795 __ CompareRoot(input, Heap::kNullValueRootIndex);
5587 __ b(eq, true_label); 5796 __ beq(true_label);
5588 __ CheckObjectTypeRange(input, 5797 __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5589 map, 5798 LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label);
5590 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5591 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5592 false_label);
5593 // Check for undetectable objects => false. 5799 // Check for undetectable objects => false.
5594 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 5800 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5595 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); 5801 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5802 __ cmpi(r0, Operand::Zero());
5596 final_branch_condition = eq; 5803 final_branch_condition = eq;
5597 5804
5598 } else { 5805 } else {
5599 __ b(false_label); 5806 __ b(false_label);
5600 } 5807 }
5601 5808
5602 return final_branch_condition; 5809 return final_branch_condition;
5603 } 5810 }
5604 5811
5605 5812
5606 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { 5813 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5607 Register temp1 = ToRegister(instr->temp()); 5814 Register temp1 = ToRegister(instr->temp());
5608 5815
5609 EmitIsConstructCall(temp1, scratch0()); 5816 EmitIsConstructCall(temp1, scratch0());
5610 EmitBranch(instr, eq); 5817 EmitBranch(instr, eq);
5611 } 5818 }
5612 5819
5613 5820
5614 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { 5821 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5615 DCHECK(!temp1.is(temp2)); 5822 DCHECK(!temp1.is(temp2));
5616 // Get the frame pointer for the calling frame. 5823 // Get the frame pointer for the calling frame.
5617 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 5824 __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5618 5825
5619 // Skip the arguments adaptor frame if it exists. 5826 // Skip the arguments adaptor frame if it exists.
5620 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); 5827 Label check_frame_marker;
5621 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 5828 __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5622 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq); 5829 __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
5830 __ bne(&check_frame_marker);
5831 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5623 5832
5624 // Check the marker in the calling frame. 5833 // Check the marker in the calling frame.
5625 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); 5834 __ bind(&check_frame_marker);
5626 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); 5835 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5836 __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
5627 } 5837 }
5628 5838
5629 5839
5630 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 5840 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5631 if (!info()->IsStub()) { 5841 if (!info()->IsStub()) {
5632 // Ensure that we have enough space after the previous lazy-bailout 5842 // Ensure that we have enough space after the previous lazy-bailout
5633 // instruction for patching the code here. 5843 // instruction for patching the code here.
5634 int current_pc = masm()->pc_offset(); 5844 int current_pc = masm()->pc_offset();
5635 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 5845 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5636 // Block literal pool emission for duration of padding.
5637 Assembler::BlockConstPoolScope block_const_pool(masm());
5638 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5846 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5639 DCHECK_EQ(0, padding_size % Assembler::kInstrSize); 5847 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5640 while (padding_size > 0) { 5848 while (padding_size > 0) {
5641 __ nop(); 5849 __ nop();
5642 padding_size -= Assembler::kInstrSize; 5850 padding_size -= Assembler::kInstrSize;
5643 } 5851 }
5644 } 5852 }
5645 } 5853 }
5646 last_lazy_deopt_pc_ = masm()->pc_offset(); 5854 last_lazy_deopt_pc_ = masm()->pc_offset();
5647 } 5855 }
(...skipping 11 matching lines...) Expand all
5659 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 5867 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5660 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 5868 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5661 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 5869 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5662 // needed return address), even though the implementation of LAZY and EAGER is 5870 // needed return address), even though the implementation of LAZY and EAGER is
5663 // now identical. When LAZY is eventually completely folded into EAGER, remove 5871 // now identical. When LAZY is eventually completely folded into EAGER, remove
5664 // the special case below. 5872 // the special case below.
5665 if (info()->IsStub() && type == Deoptimizer::EAGER) { 5873 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5666 type = Deoptimizer::LAZY; 5874 type = Deoptimizer::LAZY;
5667 } 5875 }
5668 5876
5669 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); 5877 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5670 DeoptimizeIf(al, instr->environment(), type);
5671 } 5878 }
5672 5879
5673 5880
5674 void LCodeGen::DoDummy(LDummy* instr) { 5881 void LCodeGen::DoDummy(LDummy* instr) {
5675 // Nothing to see here, move on! 5882 // Nothing to see here, move on!
5676 } 5883 }
5677 5884
5678 5885
5679 void LCodeGen::DoDummyUse(LDummyUse* instr) { 5886 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5680 // Nothing to see here, move on! 5887 // Nothing to see here, move on!
5681 } 5888 }
5682 5889
5683 5890
5684 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 5891 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5685 PushSafepointRegistersScope scope(this); 5892 PushSafepointRegistersScope scope(this);
5686 LoadContextFromDeferred(instr->context()); 5893 LoadContextFromDeferred(instr->context());
5687 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 5894 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5688 RecordSafepointWithLazyDeopt( 5895 RecordSafepointWithLazyDeopt(
5689 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5896 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5690 DCHECK(instr->HasEnvironment()); 5897 DCHECK(instr->HasEnvironment());
5691 LEnvironment* env = instr->environment(); 5898 LEnvironment* env = instr->environment();
5692 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5899 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5693 } 5900 }
5694 5901
5695 5902
5696 void LCodeGen::DoStackCheck(LStackCheck* instr) { 5903 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5697 class DeferredStackCheck FINAL : public LDeferredCode { 5904 class DeferredStackCheck FINAL : public LDeferredCode {
5698 public: 5905 public:
5699 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 5906 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5700 : LDeferredCode(codegen), instr_(instr) { } 5907 : LDeferredCode(codegen), instr_(instr) {}
5701 virtual void Generate() OVERRIDE { 5908 virtual void Generate() OVERRIDE {
5702 codegen()->DoDeferredStackCheck(instr_); 5909 codegen()->DoDeferredStackCheck(instr_);
5703 } 5910 }
5704 virtual LInstruction* instr() OVERRIDE { return instr_; } 5911 virtual LInstruction* instr() OVERRIDE { return instr_; }
5912
5705 private: 5913 private:
5706 LStackCheck* instr_; 5914 LStackCheck* instr_;
5707 }; 5915 };
5708 5916
5709 DCHECK(instr->HasEnvironment()); 5917 DCHECK(instr->HasEnvironment());
5710 LEnvironment* env = instr->environment(); 5918 LEnvironment* env = instr->environment();
5711 // There is no LLazyBailout instruction for stack-checks. We have to 5919 // There is no LLazyBailout instruction for stack-checks. We have to
5712 // prepare for lazy deoptimization explicitly here. 5920 // prepare for lazy deoptimization explicitly here.
5713 if (instr->hydrogen()->is_function_entry()) { 5921 if (instr->hydrogen()->is_function_entry()) {
5714 // Perform stack overflow check. 5922 // Perform stack overflow check.
5715 Label done; 5923 Label done;
5716 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 5924 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5717 __ cmp(sp, Operand(ip)); 5925 __ cmpl(sp, ip);
5718 __ b(hs, &done); 5926 __ bge(&done);
5719 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5720 PredictableCodeSizeScope predictable(masm(),
5721 CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5722 DCHECK(instr->context()->IsRegister()); 5927 DCHECK(instr->context()->IsRegister());
5723 DCHECK(ToRegister(instr->context()).is(cp)); 5928 DCHECK(ToRegister(instr->context()).is(cp));
5724 CallCode(stack_check, RelocInfo::CODE_TARGET, instr); 5929 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
5930 instr);
5725 __ bind(&done); 5931 __ bind(&done);
5726 } else { 5932 } else {
5727 DCHECK(instr->hydrogen()->is_backwards_branch()); 5933 DCHECK(instr->hydrogen()->is_backwards_branch());
5728 // Perform stack overflow check if this goto needs it before jumping. 5934 // Perform stack overflow check if this goto needs it before jumping.
5729 DeferredStackCheck* deferred_stack_check = 5935 DeferredStackCheck* deferred_stack_check =
5730 new(zone()) DeferredStackCheck(this, instr); 5936 new (zone()) DeferredStackCheck(this, instr);
5731 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 5937 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5732 __ cmp(sp, Operand(ip)); 5938 __ cmpl(sp, ip);
5733 __ b(lo, deferred_stack_check->entry()); 5939 __ blt(deferred_stack_check->entry());
5734 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5940 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5735 __ bind(instr->done_label()); 5941 __ bind(instr->done_label());
5736 deferred_stack_check->SetExit(instr->done_label()); 5942 deferred_stack_check->SetExit(instr->done_label());
5737 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5943 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5738 // Don't record a deoptimization index for the safepoint here. 5944 // Don't record a deoptimization index for the safepoint here.
5739 // This will be done explicitly when emitting call and the safepoint in 5945 // This will be done explicitly when emitting call and the safepoint in
5740 // the deferred code. 5946 // the deferred code.
5741 } 5947 }
5742 } 5948 }
5743 5949
5744 5950
5745 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 5951 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5746 // This is a pseudo-instruction that ensures that the environment here is 5952 // This is a pseudo-instruction that ensures that the environment here is
5747 // properly registered for deoptimization and records the assembler's PC 5953 // properly registered for deoptimization and records the assembler's PC
5748 // offset. 5954 // offset.
5749 LEnvironment* environment = instr->environment(); 5955 LEnvironment* environment = instr->environment();
5750 5956
5751 // If the environment were already registered, we would have no way of 5957 // If the environment were already registered, we would have no way of
5752 // backpatching it with the spill slot operands. 5958 // backpatching it with the spill slot operands.
5753 DCHECK(!environment->HasBeenRegistered()); 5959 DCHECK(!environment->HasBeenRegistered());
5754 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5960 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5755 5961
5756 GenerateOsrPrologue(); 5962 GenerateOsrPrologue();
5757 } 5963 }
5758 5964
5759 5965
5760 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5966 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5761 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5967 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5762 __ cmp(r0, ip); 5968 __ cmp(r3, ip);
5763 DeoptimizeIf(eq, instr->environment()); 5969 DeoptimizeIf(eq, instr);
5764 5970
5765 Register null_value = r5; 5971 Register null_value = r8;
5766 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 5972 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5767 __ cmp(r0, null_value); 5973 __ cmp(r3, null_value);
5768 DeoptimizeIf(eq, instr->environment()); 5974 DeoptimizeIf(eq, instr);
5769 5975
5770 __ SmiTst(r0); 5976 __ TestIfSmi(r3, r0);
5771 DeoptimizeIf(eq, instr->environment()); 5977 DeoptimizeIf(eq, instr, cr0);
5772 5978
5773 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 5979 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5774 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); 5980 __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
5775 DeoptimizeIf(le, instr->environment()); 5981 DeoptimizeIf(le, instr);
5776 5982
5777 Label use_cache, call_runtime; 5983 Label use_cache, call_runtime;
5778 __ CheckEnumCache(null_value, &call_runtime); 5984 __ CheckEnumCache(null_value, &call_runtime);
5779 5985
5780 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); 5986 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
5781 __ b(&use_cache); 5987 __ b(&use_cache);
5782 5988
5783 // Get the set of properties to enumerate. 5989 // Get the set of properties to enumerate.
5784 __ bind(&call_runtime); 5990 __ bind(&call_runtime);
5785 __ push(r0); 5991 __ push(r3);
5786 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); 5992 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5787 5993
5788 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 5994 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
5789 __ LoadRoot(ip, Heap::kMetaMapRootIndex); 5995 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5790 __ cmp(r1, ip); 5996 __ cmp(r4, ip);
5791 DeoptimizeIf(ne, instr->environment()); 5997 DeoptimizeIf(ne, instr);
5792 __ bind(&use_cache); 5998 __ bind(&use_cache);
5793 } 5999 }
5794 6000
5795 6001
5796 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 6002 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5797 Register map = ToRegister(instr->map()); 6003 Register map = ToRegister(instr->map());
5798 Register result = ToRegister(instr->result()); 6004 Register result = ToRegister(instr->result());
5799 Label load_cache, done; 6005 Label load_cache, done;
5800 __ EnumLength(result, map); 6006 __ EnumLength(result, map);
5801 __ cmp(result, Operand(Smi::FromInt(0))); 6007 __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
5802 __ b(ne, &load_cache); 6008 __ bne(&load_cache);
5803 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); 6009 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5804 __ jmp(&done); 6010 __ b(&done);
5805 6011
5806 __ bind(&load_cache); 6012 __ bind(&load_cache);
5807 __ LoadInstanceDescriptors(map, result); 6013 __ LoadInstanceDescriptors(map, result);
5808 __ ldr(result, 6014 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5809 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 6015 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5810 __ ldr(result, 6016 __ cmpi(result, Operand::Zero());
5811 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 6017 DeoptimizeIf(eq, instr);
5812 __ cmp(result, Operand::Zero());
5813 DeoptimizeIf(eq, instr->environment());
5814 6018
5815 __ bind(&done); 6019 __ bind(&done);
5816 } 6020 }
5817 6021
5818 6022
5819 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 6023 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5820 Register object = ToRegister(instr->value()); 6024 Register object = ToRegister(instr->value());
5821 Register map = ToRegister(instr->map()); 6025 Register map = ToRegister(instr->map());
5822 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 6026 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5823 __ cmp(map, scratch0()); 6027 __ cmp(map, scratch0());
5824 DeoptimizeIf(ne, instr->environment()); 6028 DeoptimizeIf(ne, instr);
5825 } 6029 }
5826 6030
5827 6031
5828 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 6032 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5829 Register result, 6033 Register result, Register object,
5830 Register object,
5831 Register index) { 6034 Register index) {
5832 PushSafepointRegistersScope scope(this); 6035 PushSafepointRegistersScope scope(this);
5833 __ Push(object); 6036 __ Push(object, index);
5834 __ Push(index); 6037 __ li(cp, Operand::Zero());
5835 __ mov(cp, Operand::Zero());
5836 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 6038 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5837 RecordSafepointWithRegisters( 6039 RecordSafepointWithRegisters(instr->pointer_map(), 2,
5838 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); 6040 Safepoint::kNoLazyDeopt);
5839 __ StoreToSafepointRegisterSlot(r0, result); 6041 __ StoreToSafepointRegisterSlot(r3, result);
5840 } 6042 }
5841 6043
5842 6044
5843 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 6045 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5844 class DeferredLoadMutableDouble FINAL : public LDeferredCode { 6046 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5845 public: 6047 public:
5846 DeferredLoadMutableDouble(LCodeGen* codegen, 6048 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
5847 LLoadFieldByIndex* instr, 6049 Register result, Register object, Register index)
5848 Register result,
5849 Register object,
5850 Register index)
5851 : LDeferredCode(codegen), 6050 : LDeferredCode(codegen),
5852 instr_(instr), 6051 instr_(instr),
5853 result_(result), 6052 result_(result),
5854 object_(object), 6053 object_(object),
5855 index_(index) { 6054 index_(index) {}
5856 }
5857 virtual void Generate() OVERRIDE { 6055 virtual void Generate() OVERRIDE {
5858 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); 6056 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5859 } 6057 }
5860 virtual LInstruction* instr() OVERRIDE { return instr_; } 6058 virtual LInstruction* instr() OVERRIDE { return instr_; }
6059
5861 private: 6060 private:
5862 LLoadFieldByIndex* instr_; 6061 LLoadFieldByIndex* instr_;
5863 Register result_; 6062 Register result_;
5864 Register object_; 6063 Register object_;
5865 Register index_; 6064 Register index_;
5866 }; 6065 };
5867 6066
5868 Register object = ToRegister(instr->object()); 6067 Register object = ToRegister(instr->object());
5869 Register index = ToRegister(instr->index()); 6068 Register index = ToRegister(instr->index());
5870 Register result = ToRegister(instr->result()); 6069 Register result = ToRegister(instr->result());
5871 Register scratch = scratch0(); 6070 Register scratch = scratch0();
5872 6071
5873 DeferredLoadMutableDouble* deferred; 6072 DeferredLoadMutableDouble* deferred;
5874 deferred = new(zone()) DeferredLoadMutableDouble( 6073 deferred = new (zone())
5875 this, instr, result, object, index); 6074 DeferredLoadMutableDouble(this, instr, result, object, index);
5876 6075
5877 Label out_of_object, done; 6076 Label out_of_object, done;
5878 6077
5879 __ tst(index, Operand(Smi::FromInt(1))); 6078 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
5880 __ b(ne, deferred->entry()); 6079 __ bne(deferred->entry(), cr0);
5881 __ mov(index, Operand(index, ASR, 1)); 6080 __ ShiftRightArithImm(index, index, 1);
5882 6081
5883 __ cmp(index, Operand::Zero()); 6082 __ cmpi(index, Operand::Zero());
5884 __ b(lt, &out_of_object); 6083 __ blt(&out_of_object);
5885 6084
5886 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index)); 6085 __ SmiToPtrArrayOffset(r0, index);
5887 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); 6086 __ add(scratch, object, r0);
6087 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5888 6088
5889 __ b(&done); 6089 __ b(&done);
5890 6090
5891 __ bind(&out_of_object); 6091 __ bind(&out_of_object);
5892 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 6092 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5893 // Index is equal to negated out of object property index plus 1. 6093 // Index is equal to negated out of object property index plus 1.
5894 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); 6094 __ SmiToPtrArrayOffset(r0, index);
5895 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); 6095 __ sub(scratch, result, r0);
5896 __ ldr(result, FieldMemOperand(scratch, 6096 __ LoadP(result,
5897 FixedArray::kHeaderSize - kPointerSize)); 6097 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
5898 __ bind(deferred->exit()); 6098 __ bind(deferred->exit());
5899 __ bind(&done); 6099 __ bind(&done);
5900 } 6100 }
5901 6101
5902 6102
5903 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { 6103 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5904 Register context = ToRegister(instr->context()); 6104 Register context = ToRegister(instr->context());
5905 __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); 6105 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5906 } 6106 }
5907 6107
5908 6108
5909 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { 6109 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5910 Handle<ScopeInfo> scope_info = instr->scope_info(); 6110 Handle<ScopeInfo> scope_info = instr->scope_info();
5911 __ Push(scope_info); 6111 __ Push(scope_info);
5912 __ push(ToRegister(instr->function())); 6112 __ push(ToRegister(instr->function()));
5913 CallRuntime(Runtime::kPushBlockContext, 2, instr); 6113 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5914 RecordSafepoint(Safepoint::kNoLazyDeopt); 6114 RecordSafepoint(Safepoint::kNoLazyDeopt);
5915 } 6115 }
5916 6116
5917 6117
5918 #undef __ 6118 #undef __
5919 6119 }
5920 } } // namespace v8::internal 6120 } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698