OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
58 Safepoint::DeoptMode deopt_mode_; | 58 Safepoint::DeoptMode deopt_mode_; |
59 }; | 59 }; |
60 | 60 |
61 | 61 |
62 #define __ masm()-> | 62 #define __ masm()-> |
63 | 63 |
64 bool LCodeGen::GenerateCode() { | 64 bool LCodeGen::GenerateCode() { |
65 HPhase phase("Z_Code generation", chunk()); | 65 HPhase phase("Z_Code generation", chunk()); |
66 ASSERT(is_unused()); | 66 ASSERT(is_unused()); |
67 status_ = GENERATING; | 67 status_ = GENERATING; |
68 CpuFeatures::Scope scope1(VFP3); | |
69 CpuFeatures::Scope scope2(ARMv7); | |
70 | 68 |
71 CodeStub::GenerateFPStubs(); | 69 CodeStub::GenerateFPStubs(); |
72 | 70 |
73 // Open a frame scope to indicate that there is a frame on the stack. The | 71 // Open a frame scope to indicate that there is a frame on the stack. The |
74 // NONE indicates that the scope shouldn't actually generate code to set up | 72 // NONE indicates that the scope shouldn't actually generate code to set up |
75 // the frame (that is done in GeneratePrologue). | 73 // the frame (that is done in GeneratePrologue). |
76 FrameScope frame_scope(masm_, StackFrame::NONE); | 74 FrameScope frame_scope(masm_, StackFrame::NONE); |
77 | 75 |
78 return GeneratePrologue() && | 76 return GeneratePrologue() && |
79 GenerateBody() && | 77 GenerateBody() && |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
111 size_t length = builder.position(); | 109 size_t length = builder.position(); |
112 Vector<char> copy = Vector<char>::New(length + 1); | 110 Vector<char> copy = Vector<char>::New(length + 1); |
113 memcpy(copy.start(), builder.Finalize(), copy.length()); | 111 memcpy(copy.start(), builder.Finalize(), copy.length()); |
114 masm()->RecordComment(copy.start()); | 112 masm()->RecordComment(copy.start()); |
115 } | 113 } |
116 | 114 |
117 | 115 |
118 bool LCodeGen::GeneratePrologue() { | 116 bool LCodeGen::GeneratePrologue() { |
119 ASSERT(is_generating()); | 117 ASSERT(is_generating()); |
120 | 118 |
121 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 119 if (info()->IsOptimizing()) { |
120 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | |
122 | 121 |
123 #ifdef DEBUG | 122 #ifdef DEBUG |
124 if (strlen(FLAG_stop_at) > 0 && | 123 if (strlen(FLAG_stop_at) > 0 && |
125 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | 124 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { |
126 __ stop("stop_at"); | 125 __ stop("stop_at"); |
127 } | 126 } |
128 #endif | 127 #endif |
129 | 128 |
130 // r1: Callee's JS function. | 129 // r1: Callee's JS function. |
131 // cp: Callee's context. | 130 // cp: Callee's context. |
132 // fp: Caller's frame pointer. | 131 // fp: Caller's frame pointer. |
133 // lr: Caller's pc. | 132 // lr: Caller's pc. |
134 | 133 |
135 // Strict mode functions and builtins need to replace the receiver | 134 // Strict mode functions and builtins need to replace the receiver |
136 // with undefined when called as functions (without an explicit | 135 // with undefined when called as functions (without an explicit |
137 // receiver object). r5 is zero for method calls and non-zero for | 136 // receiver object). r5 is zero for method calls and non-zero for |
138 // function calls. | 137 // function calls. |
139 if (!info_->is_classic_mode() || info_->is_native()) { | 138 if (!info_->is_classic_mode() || info_->is_native()) { |
140 Label ok; | 139 Label ok; |
141 Label begin; | 140 Label begin; |
142 __ bind(&begin); | 141 __ bind(&begin); |
143 __ cmp(r5, Operand(0)); | 142 __ cmp(r5, Operand(0)); |
144 __ b(eq, &ok); | 143 __ b(eq, &ok); |
145 int receiver_offset = scope()->num_parameters() * kPointerSize; | 144 int receiver_offset = scope()->num_parameters() * kPointerSize; |
146 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 145 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
147 __ str(r2, MemOperand(sp, receiver_offset)); | 146 __ str(r2, MemOperand(sp, receiver_offset)); |
148 __ bind(&ok); | 147 __ bind(&ok); |
149 ASSERT_EQ(kSizeOfOptimizedStrictModePrologue, ok.pos() - begin.pos()); | 148 ASSERT_EQ(kSizeOfOptimizedStrictModePrologue, ok.pos() - begin.pos()); |
149 } | |
150 } | 150 } |
151 | 151 |
152 // The following three instructions must remain together and unmodified for | 152 if (NeedsEagerFrame()) { |
153 // code aging to work properly. | 153 // The following three instructions must remain together and unmodified for |
154 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); | 154 // code aging to work properly. |
155 // Add unused load of ip to ensure prologue sequence is identical for | 155 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); |
156 // full-codegen and lithium-codegen. | 156 // Add unused load of ip to ensure prologue sequence is identical for |
157 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 157 // full-codegen and lithium-codegen. |
158 __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. | 158 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
159 __ add(fp, sp, Operand(2 * kPointerSize)); | |
160 frame_is_built_ = true; | |
161 } | |
159 | 162 |
160 // Reserve space for the stack slots needed by the code. | 163 // Reserve space for the stack slots needed by the code. |
161 int slots = GetStackSlotCount(); | 164 int slots = GetStackSlotCount(); |
162 if (slots > 0) { | 165 if (slots > 0) { |
163 if (FLAG_debug_code) { | 166 if (FLAG_debug_code) { |
164 __ mov(r0, Operand(slots)); | 167 __ mov(r0, Operand(slots)); |
165 __ mov(r2, Operand(kSlotsZapValue)); | 168 __ mov(r2, Operand(kSlotsZapValue)); |
166 Label loop; | 169 Label loop; |
167 __ bind(&loop); | 170 __ bind(&loop); |
168 __ push(r2); | 171 __ push(r2); |
169 __ sub(r0, r0, Operand(1), SetCC); | 172 __ sub(r0, r0, Operand(1), SetCC); |
170 __ b(ne, &loop); | 173 __ b(ne, &loop); |
171 } else { | 174 } else { |
172 __ sub(sp, sp, Operand(slots * kPointerSize)); | 175 __ sub(sp, sp, Operand(slots * kPointerSize)); |
173 } | 176 } |
174 } | 177 } |
175 | 178 |
176 // Possibly allocate a local context. | 179 // Possibly allocate a local context. |
177 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 180 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
178 if (heap_slots > 0) { | 181 if (heap_slots > 0) { |
179 Comment(";;; Allocate local context"); | 182 Comment(";;; Allocate local context"); |
180 // Argument to NewContext is the function, which is in r1. | 183 // Argument to NewContext is the function, which is in r1. |
181 __ push(r1); | 184 __ push(r1); |
182 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 185 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
183 FastNewContextStub stub(heap_slots); | 186 FastNewContextStub stub(heap_slots); |
184 __ CallStub(&stub); | 187 __ CallStub(&stub); |
185 } else { | 188 } else { |
186 __ CallRuntime(Runtime::kNewFunctionContext, 1); | 189 __ CallRuntime(Runtime::kNewFunctionContext, 1); |
187 } | 190 } |
(...skipping 15 matching lines...) Expand all Loading... | |
203 __ str(r0, target); | 206 __ str(r0, target); |
204 // Update the write barrier. This clobbers r3 and r0. | 207 // Update the write barrier. This clobbers r3 and r0. |
205 __ RecordWriteContextSlot( | 208 __ RecordWriteContextSlot( |
206 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); | 209 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); |
207 } | 210 } |
208 } | 211 } |
209 Comment(";;; End allocate local context"); | 212 Comment(";;; End allocate local context"); |
210 } | 213 } |
211 | 214 |
212 // Trace the call. | 215 // Trace the call. |
213 if (FLAG_trace) { | 216 if (FLAG_trace && info()->IsOptimizing()) { |
214 __ CallRuntime(Runtime::kTraceEnter, 0); | 217 __ CallRuntime(Runtime::kTraceEnter, 0); |
215 } | 218 } |
216 return !is_aborted(); | 219 return !is_aborted(); |
217 } | 220 } |
218 | 221 |
219 | 222 |
220 bool LCodeGen::GenerateBody() { | 223 bool LCodeGen::GenerateBody() { |
221 ASSERT(is_generating()); | 224 ASSERT(is_generating()); |
222 bool emit_instructions = true; | 225 bool emit_instructions = true; |
223 for (current_instruction_ = 0; | 226 for (current_instruction_ = 0; |
(...skipping 14 matching lines...) Expand all Loading... | |
238 return !is_aborted(); | 241 return !is_aborted(); |
239 } | 242 } |
240 | 243 |
241 | 244 |
242 bool LCodeGen::GenerateDeferredCode() { | 245 bool LCodeGen::GenerateDeferredCode() { |
243 ASSERT(is_generating()); | 246 ASSERT(is_generating()); |
244 if (deferred_.length() > 0) { | 247 if (deferred_.length() > 0) { |
245 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 248 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
246 LDeferredCode* code = deferred_[i]; | 249 LDeferredCode* code = deferred_[i]; |
247 __ bind(code->entry()); | 250 __ bind(code->entry()); |
251 if (NeedsDeferredFrame()) { | |
252 Comment(";;; Deferred build frame", | |
253 code->instruction_index(), | |
254 code->instr()->Mnemonic()); | |
255 ASSERT(!frame_is_built_); | |
256 ASSERT(info()->IsStub()); | |
257 frame_is_built_ = true; | |
258 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); | |
259 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); | |
260 __ push(scratch0()); | |
261 __ add(fp, sp, Operand(2 * kPointerSize)); | |
262 } | |
248 Comment(";;; Deferred code @%d: %s.", | 263 Comment(";;; Deferred code @%d: %s.", |
249 code->instruction_index(), | 264 code->instruction_index(), |
250 code->instr()->Mnemonic()); | 265 code->instr()->Mnemonic()); |
251 code->Generate(); | 266 code->Generate(); |
267 if (NeedsDeferredFrame()) { | |
268 Comment(";;; Deferred destory frame", | |
269 code->instruction_index(), | |
270 code->instr()->Mnemonic()); | |
271 ASSERT(frame_is_built_); | |
272 __ pop(ip); | |
273 __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit()); | |
274 frame_is_built_ = false; | |
275 } | |
252 __ jmp(code->exit()); | 276 __ jmp(code->exit()); |
253 } | 277 } |
254 } | 278 } |
255 | 279 |
256 // Force constant pool emission at the end of the deferred code to make | 280 // Force constant pool emission at the end of the deferred code to make |
257 // sure that no constant pools are emitted after. | 281 // sure that no constant pools are emitted after. |
258 masm()->CheckConstPool(true, false); | 282 masm()->CheckConstPool(true, false); |
259 | 283 |
260 return !is_aborted(); | 284 return !is_aborted(); |
261 } | 285 } |
262 | 286 |
263 | 287 |
264 bool LCodeGen::GenerateDeoptJumpTable() { | 288 bool LCodeGen::GenerateDeoptJumpTable() { |
265 // Check that the jump table is accessible from everywhere in the function | 289 // Check that the jump table is accessible from everywhere in the function |
266 // code, i.e. that offsets to the table can be encoded in the 24bit signed | 290 // code, i.e. that offsets to the table can be encoded in the 24bit signed |
267 // immediate of a branch instruction. | 291 // immediate of a branch instruction. |
268 // To simplify we consider the code size from the first instruction to the | 292 // To simplify we consider the code size from the first instruction to the |
269 // end of the jump table. We also don't consider the pc load delta. | 293 // end of the jump table. We also don't consider the pc load delta. |
270 // Each entry in the jump table generates one instruction and inlines one | 294 // Each entry in the jump table generates one instruction and inlines one |
271 // 32bit data after it. | 295 // 32bit data after it. |
272 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + | 296 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + |
273 deopt_jump_table_.length() * 2)) { | 297 deopt_jump_table_.length() * 5)) { |
274 Abort("Generated code is too large"); | 298 Abort("Generated code is too large"); |
275 } | 299 } |
276 | 300 |
277 // Block the constant pool emission during the jump table emission. | 301 masm()->CheckConstPool(true, false); |
278 __ BlockConstPoolFor(deopt_jump_table_.length()); | 302 |
279 __ RecordComment("[ Deoptimisation jump table"); | 303 __ RecordComment("[ Deoptimisation jump table"); |
280 Label table_start; | 304 Label table_start; |
281 __ bind(&table_start); | 305 __ bind(&table_start); |
306 Label needs_frame_not_call; | |
307 bool has_generated_needs_frame_not_call = false; | |
308 Label needs_frame_is_call; | |
309 bool has_generated_needs_frame_is_call = false; | |
282 for (int i = 0; i < deopt_jump_table_.length(); i++) { | 310 for (int i = 0; i < deopt_jump_table_.length(); i++) { |
283 __ bind(&deopt_jump_table_[i].label); | 311 __ bind(&deopt_jump_table_[i].label); |
284 __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta)); | 312 Address entry = deopt_jump_table_[i].address; |
285 __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address)); | 313 if (deopt_jump_table_[i].needs_frame) { |
314 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); | |
315 if (deopt_jump_table_[i].is_call) { | |
316 if (!has_generated_needs_frame_is_call) { | |
317 has_generated_needs_frame_is_call = true; | |
318 __ bind(&needs_frame_is_call); | |
319 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); | |
320 // If there is not frame, we don't have access to the JSFunction that | |
321 // needs to be put into the frame. | |
322 ASSERT(info()->IsStub()); | |
323 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); | |
324 __ push(scratch0()); | |
325 __ add(fp, sp, Operand(2 * kPointerSize)); | |
326 __ mov(lr, Operand(pc), LeaveCC, al); | |
327 __ mov(pc, ip); | |
328 } else { | |
329 __ b(&needs_frame_is_call); | |
330 } | |
331 } else { | |
332 if (!has_generated_needs_frame_not_call) { | |
333 has_generated_needs_frame_not_call = true; | |
334 __ bind(&needs_frame_not_call); | |
335 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); | |
336 // If there is not frame, we don't have access to the JSFunction that | |
337 // needs to be put into the frame. | |
338 ASSERT(info()->IsStub()); | |
339 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); | |
340 __ push(scratch0()); | |
341 __ add(fp, sp, Operand(2 * kPointerSize)); | |
342 __ mov(pc, ip); | |
343 } else { | |
344 __ b(&needs_frame_not_call); | |
345 } | |
346 } | |
347 } else { | |
348 if (deopt_jump_table_[i].is_call) { | |
349 __ mov(lr, Operand(pc), LeaveCC, al); | |
350 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); | |
351 } else { | |
352 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); | |
353 } | |
354 } | |
355 masm()->CheckConstPool(false, false); | |
286 } | 356 } |
287 ASSERT(masm()->InstructionsGeneratedSince(&table_start) == | |
288 deopt_jump_table_.length() * 2); | |
289 __ RecordComment("]"); | 357 __ RecordComment("]"); |
290 | 358 |
359 // Force constant pool emission at the end of the deopt jump table to make | |
360 // sure that no constant pools are emitted after. | |
361 masm()->CheckConstPool(true, false); | |
362 | |
291 // The deoptimization jump table is the last part of the instruction | 363 // The deoptimization jump table is the last part of the instruction |
292 // sequence. Mark the generated code as done unless we bailed out. | 364 // sequence. Mark the generated code as done unless we bailed out. |
293 if (!is_aborted()) status_ = DONE; | 365 if (!is_aborted()) status_ = DONE; |
294 return !is_aborted(); | 366 return !is_aborted(); |
295 } | 367 } |
296 | 368 |
297 | 369 |
298 bool LCodeGen::GenerateSafepointTable() { | 370 bool LCodeGen::GenerateSafepointTable() { |
299 ASSERT(is_done()); | 371 ASSERT(is_done()); |
300 safepoints_.Emit(masm(), GetStackSlotCount()); | 372 safepoints_.Emit(masm(), GetStackSlotCount()); |
301 return !is_aborted(); | 373 return !is_aborted(); |
302 } | 374 } |
303 | 375 |
304 | 376 |
305 Register LCodeGen::ToRegister(int index) const { | 377 Register LCodeGen::ToRegister(int index) const { |
306 return Register::FromAllocationIndex(index); | 378 return Register::FromAllocationIndex(index); |
307 } | 379 } |
308 | 380 |
309 | 381 |
310 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { | 382 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { |
311 return DoubleRegister::FromAllocationIndex(index); | 383 return DwVfpRegister::FromAllocationIndex(index); |
312 } | 384 } |
313 | 385 |
314 | 386 |
315 Register LCodeGen::ToRegister(LOperand* op) const { | 387 Register LCodeGen::ToRegister(LOperand* op) const { |
316 ASSERT(op->IsRegister()); | 388 ASSERT(op->IsRegister()); |
317 return ToRegister(op->index()); | 389 return ToRegister(op->index()); |
318 } | 390 } |
319 | 391 |
320 | 392 |
321 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { | 393 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
(...skipping 20 matching lines...) Expand all Loading... | |
342 return scratch; | 414 return scratch; |
343 } else if (op->IsStackSlot() || op->IsArgument()) { | 415 } else if (op->IsStackSlot() || op->IsArgument()) { |
344 __ ldr(scratch, ToMemOperand(op)); | 416 __ ldr(scratch, ToMemOperand(op)); |
345 return scratch; | 417 return scratch; |
346 } | 418 } |
347 UNREACHABLE(); | 419 UNREACHABLE(); |
348 return scratch; | 420 return scratch; |
349 } | 421 } |
350 | 422 |
351 | 423 |
352 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 424 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
353 ASSERT(op->IsDoubleRegister()); | 425 ASSERT(op->IsDoubleRegister()); |
354 return ToDoubleRegister(op->index()); | 426 return ToDoubleRegister(op->index()); |
355 } | 427 } |
356 | 428 |
357 | 429 |
358 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, | 430 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, |
359 SwVfpRegister flt_scratch, | 431 SwVfpRegister flt_scratch, |
360 DoubleRegister dbl_scratch) { | 432 DwVfpRegister dbl_scratch) { |
361 if (op->IsDoubleRegister()) { | 433 if (op->IsDoubleRegister()) { |
362 return ToDoubleRegister(op->index()); | 434 return ToDoubleRegister(op->index()); |
363 } else if (op->IsConstantOperand()) { | 435 } else if (op->IsConstantOperand()) { |
364 LConstantOperand* const_op = LConstantOperand::cast(op); | 436 LConstantOperand* const_op = LConstantOperand::cast(op); |
365 HConstant* constant = chunk_->LookupConstant(const_op); | 437 HConstant* constant = chunk_->LookupConstant(const_op); |
366 Handle<Object> literal = constant->handle(); | 438 Handle<Object> literal = constant->handle(); |
367 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 439 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
368 if (r.IsInteger32()) { | 440 if (r.IsInteger32()) { |
369 ASSERT(literal->IsNumber()); | 441 ASSERT(literal->IsNumber()); |
370 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); | 442 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
486 // arguments index points to the first element of a sequence of tagged | 558 // arguments index points to the first element of a sequence of tagged |
487 // values on the stack that represent the arguments. This needs to be | 559 // values on the stack that represent the arguments. This needs to be |
488 // kept in sync with the LArgumentsElements implementation. | 560 // kept in sync with the LArgumentsElements implementation. |
489 *arguments_index = -environment->parameter_count(); | 561 *arguments_index = -environment->parameter_count(); |
490 *arguments_count = environment->parameter_count(); | 562 *arguments_count = environment->parameter_count(); |
491 | 563 |
492 WriteTranslation(environment->outer(), | 564 WriteTranslation(environment->outer(), |
493 translation, | 565 translation, |
494 arguments_index, | 566 arguments_index, |
495 arguments_count); | 567 arguments_count); |
496 int closure_id = *info()->closure() != *environment->closure() | 568 bool has_closure_id = !info()->closure().is_null() && |
569 *info()->closure() != *environment->closure(); | |
570 int closure_id = has_closure_id | |
497 ? DefineDeoptimizationLiteral(environment->closure()) | 571 ? DefineDeoptimizationLiteral(environment->closure()) |
498 : Translation::kSelfLiteralId; | 572 : Translation::kSelfLiteralId; |
499 | 573 |
500 switch (environment->frame_type()) { | 574 switch (environment->frame_type()) { |
501 case JS_FUNCTION: | 575 case JS_FUNCTION: |
502 translation->BeginJSFrame(environment->ast_id(), closure_id, height); | 576 translation->BeginJSFrame(environment->ast_id(), closure_id, height); |
503 break; | 577 break; |
504 case JS_CONSTRUCT: | 578 case JS_CONSTRUCT: |
505 translation->BeginConstructStubFrame(closure_id, translation_size); | 579 translation->BeginConstructStubFrame(closure_id, translation_size); |
506 break; | 580 break; |
507 case JS_GETTER: | 581 case JS_GETTER: |
508 ASSERT(translation_size == 1); | 582 ASSERT(translation_size == 1); |
509 ASSERT(height == 0); | 583 ASSERT(height == 0); |
510 translation->BeginGetterStubFrame(closure_id); | 584 translation->BeginGetterStubFrame(closure_id); |
511 break; | 585 break; |
512 case JS_SETTER: | 586 case JS_SETTER: |
513 ASSERT(translation_size == 2); | 587 ASSERT(translation_size == 2); |
514 ASSERT(height == 0); | 588 ASSERT(height == 0); |
515 translation->BeginSetterStubFrame(closure_id); | 589 translation->BeginSetterStubFrame(closure_id); |
516 break; | 590 break; |
591 case STUB: | |
592 translation->BeginCompiledStubPseudoFrame(Code::KEYED_LOAD_IC); | |
Jakob Kummerow
2012/11/19 12:36:00
can we get the code type dynamically?
danno
2012/11/26 17:16:18
Done.
| |
593 break; | |
517 case ARGUMENTS_ADAPTOR: | 594 case ARGUMENTS_ADAPTOR: |
518 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); | 595 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
519 break; | 596 break; |
520 } | 597 } |
521 | 598 |
522 // Inlined frames which push their arguments cause the index to be | 599 // Inlined frames which push their arguments cause the index to be |
523 // bumped and a new stack area to be used for materialization. | 600 // bumped and a new stack area to be used for materialization. |
524 if (environment->entry() != NULL && | 601 if (environment->entry() != NULL && |
525 environment->entry()->arguments_pushed()) { | 602 environment->entry()->arguments_pushed()) { |
526 *arguments_index = *arguments_index < 0 | 603 *arguments_index = *arguments_index < 0 |
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
702 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 779 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
703 deoptimizations_.Add(environment, zone()); | 780 deoptimizations_.Add(environment, zone()); |
704 } | 781 } |
705 } | 782 } |
706 | 783 |
707 | 784 |
708 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { | 785 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { |
709 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 786 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
710 ASSERT(environment->HasBeenRegistered()); | 787 ASSERT(environment->HasBeenRegistered()); |
711 int id = environment->deoptimization_index(); | 788 int id = environment->deoptimization_index(); |
712 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); | 789 |
790 Deoptimizer::BailoutType bailout_type = frame_is_built_ | |
791 ? Deoptimizer::EAGER | |
792 : Deoptimizer::LAZY; | |
793 Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type); | |
713 if (entry == NULL) { | 794 if (entry == NULL) { |
714 Abort("bailout was not prepared"); | 795 Abort("bailout was not prepared"); |
715 return; | 796 return; |
716 } | 797 } |
717 | 798 |
718 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. | 799 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. |
719 | 800 |
720 if (FLAG_deopt_every_n_times == 1 && | 801 if (FLAG_deopt_every_n_times == 1 && |
721 info_->shared_info()->opt_count() == id) { | 802 info_->shared_info()->opt_count() == id) { |
722 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 803 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
723 return; | 804 return; |
724 } | 805 } |
725 | 806 |
726 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); | 807 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); |
727 | 808 |
728 if (cc == al) { | 809 ASSERT(info()->IsStub() || frame_is_built_); |
729 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 810 if (cc == al && frame_is_built_) { |
811 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | |
Jakob Kummerow
2012/11/19 12:36:00
nit: indentation
danno
2012/11/26 17:16:18
Done.
| |
730 } else { | 812 } else { |
731 // We often have several deopts to the same entry, reuse the last | 813 // We often have several deopts to the same entry, reuse the last |
732 // jump entry if this is the case. | 814 // jump entry if this is the case. |
815 bool is_call = !frame_is_built_; | |
733 if (deopt_jump_table_.is_empty() || | 816 if (deopt_jump_table_.is_empty() || |
734 (deopt_jump_table_.last().address != entry)) { | 817 (deopt_jump_table_.last().address != entry) || |
735 deopt_jump_table_.Add(JumpTableEntry(entry), zone()); | 818 (deopt_jump_table_.last().is_call != is_call) || |
819 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { | |
820 deopt_jump_table_.Add( | |
821 JumpTableEntry(entry, !frame_is_built_, is_call), zone()); | |
736 } | 822 } |
737 __ b(cc, &deopt_jump_table_.last().label); | 823 __ b(cc, &deopt_jump_table_.last().label); |
738 } | 824 } |
739 } | 825 } |
740 | 826 |
741 | 827 |
742 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 828 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
743 int length = deoptimizations_.length(); | 829 int length = deoptimizations_.length(); |
744 if (length == 0) return; | 830 if (length == 0) return; |
745 Handle<DeoptimizationInputData> data = | 831 Handle<DeoptimizationInputData> data = |
(...skipping 1020 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1766 ? ToOperand(right) | 1852 ? ToOperand(right) |
1767 : Operand(EmitLoadRegister(right, ip)); | 1853 : Operand(EmitLoadRegister(right, ip)); |
1768 Register result_reg = ToRegister(instr->result()); | 1854 Register result_reg = ToRegister(instr->result()); |
1769 __ cmp(left_reg, right_op); | 1855 __ cmp(left_reg, right_op); |
1770 if (!result_reg.is(left_reg)) { | 1856 if (!result_reg.is(left_reg)) { |
1771 __ mov(result_reg, left_reg, LeaveCC, condition); | 1857 __ mov(result_reg, left_reg, LeaveCC, condition); |
1772 } | 1858 } |
1773 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); | 1859 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); |
1774 } else { | 1860 } else { |
1775 ASSERT(instr->hydrogen()->representation().IsDouble()); | 1861 ASSERT(instr->hydrogen()->representation().IsDouble()); |
1776 DoubleRegister left_reg = ToDoubleRegister(left); | 1862 DwVfpRegister left_reg = ToDoubleRegister(left); |
1777 DoubleRegister right_reg = ToDoubleRegister(right); | 1863 DwVfpRegister right_reg = ToDoubleRegister(right); |
1778 DoubleRegister result_reg = ToDoubleRegister(instr->result()); | 1864 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); |
1779 Label check_nan_left, check_zero, return_left, return_right, done; | 1865 Label check_nan_left, check_zero, return_left, return_right, done; |
1780 __ VFPCompareAndSetFlags(left_reg, right_reg); | 1866 __ VFPCompareAndSetFlags(left_reg, right_reg); |
1781 __ b(vs, &check_nan_left); | 1867 __ b(vs, &check_nan_left); |
1782 __ b(eq, &check_zero); | 1868 __ b(eq, &check_zero); |
1783 __ b(condition, &return_left); | 1869 __ b(condition, &return_left); |
1784 __ b(al, &return_right); | 1870 __ b(al, &return_right); |
1785 | 1871 |
1786 __ bind(&check_zero); | 1872 __ bind(&check_zero); |
1787 __ VFPCompareAndSetFlags(left_reg, 0.0); | 1873 __ VFPCompareAndSetFlags(left_reg, 0.0); |
1788 __ b(ne, &return_left); // left == right != 0. | 1874 __ b(ne, &return_left); // left == right != 0. |
(...skipping 22 matching lines...) Expand all Loading... | |
1811 __ bind(&return_left); | 1897 __ bind(&return_left); |
1812 if (!left_reg.is(result_reg)) { | 1898 if (!left_reg.is(result_reg)) { |
1813 __ vmov(result_reg, left_reg); | 1899 __ vmov(result_reg, left_reg); |
1814 } | 1900 } |
1815 __ bind(&done); | 1901 __ bind(&done); |
1816 } | 1902 } |
1817 } | 1903 } |
1818 | 1904 |
1819 | 1905 |
1820 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 1906 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
1821 DoubleRegister left = ToDoubleRegister(instr->left()); | 1907 DwVfpRegister left = ToDoubleRegister(instr->left()); |
1822 DoubleRegister right = ToDoubleRegister(instr->right()); | 1908 DwVfpRegister right = ToDoubleRegister(instr->right()); |
1823 DoubleRegister result = ToDoubleRegister(instr->result()); | 1909 DwVfpRegister result = ToDoubleRegister(instr->result()); |
1824 switch (instr->op()) { | 1910 switch (instr->op()) { |
1825 case Token::ADD: | 1911 case Token::ADD: |
1826 __ vadd(result, left, right); | 1912 __ vadd(result, left, right); |
1827 break; | 1913 break; |
1828 case Token::SUB: | 1914 case Token::SUB: |
1829 __ vsub(result, left, right); | 1915 __ vsub(result, left, right); |
1830 break; | 1916 break; |
1831 case Token::MUL: | 1917 case Token::MUL: |
1832 __ vmul(result, left, right); | 1918 __ vmul(result, left, right); |
1833 break; | 1919 break; |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1901 void LCodeGen::DoBranch(LBranch* instr) { | 1987 void LCodeGen::DoBranch(LBranch* instr) { |
1902 int true_block = chunk_->LookupDestination(instr->true_block_id()); | 1988 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
1903 int false_block = chunk_->LookupDestination(instr->false_block_id()); | 1989 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
1904 | 1990 |
1905 Representation r = instr->hydrogen()->value()->representation(); | 1991 Representation r = instr->hydrogen()->value()->representation(); |
1906 if (r.IsInteger32()) { | 1992 if (r.IsInteger32()) { |
1907 Register reg = ToRegister(instr->value()); | 1993 Register reg = ToRegister(instr->value()); |
1908 __ cmp(reg, Operand(0)); | 1994 __ cmp(reg, Operand(0)); |
1909 EmitBranch(true_block, false_block, ne); | 1995 EmitBranch(true_block, false_block, ne); |
1910 } else if (r.IsDouble()) { | 1996 } else if (r.IsDouble()) { |
1911 DoubleRegister reg = ToDoubleRegister(instr->value()); | 1997 DwVfpRegister reg = ToDoubleRegister(instr->value()); |
1912 Register scratch = scratch0(); | 1998 Register scratch = scratch0(); |
1913 | 1999 |
1914 // Test the double value. Zero and NaN are false. | 2000 // Test the double value. Zero and NaN are false. |
1915 __ VFPCompareAndLoadFlags(reg, 0.0, scratch); | 2001 __ VFPCompareAndLoadFlags(reg, 0.0, scratch); |
1916 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit)); | 2002 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit)); |
1917 EmitBranch(true_block, false_block, eq); | 2003 EmitBranch(true_block, false_block, eq); |
1918 } else { | 2004 } else { |
1919 ASSERT(r.IsTagged()); | 2005 ASSERT(r.IsTagged()); |
1920 Register reg = ToRegister(instr->value()); | 2006 Register reg = ToRegister(instr->value()); |
1921 HType type = instr->hydrogen()->value()->type(); | 2007 HType type = instr->hydrogen()->value()->type(); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1987 __ b(ge, ¬_string); | 2073 __ b(ge, ¬_string); |
1988 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); | 2074 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); |
1989 __ cmp(ip, Operand(0)); | 2075 __ cmp(ip, Operand(0)); |
1990 __ b(ne, true_label); | 2076 __ b(ne, true_label); |
1991 __ b(false_label); | 2077 __ b(false_label); |
1992 __ bind(¬_string); | 2078 __ bind(¬_string); |
1993 } | 2079 } |
1994 | 2080 |
1995 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | 2081 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
1996 // heap number -> false iff +0, -0, or NaN. | 2082 // heap number -> false iff +0, -0, or NaN. |
1997 DoubleRegister dbl_scratch = double_scratch0(); | 2083 DwVfpRegister dbl_scratch = double_scratch0(); |
1998 Label not_heap_number; | 2084 Label not_heap_number; |
1999 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 2085 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
2000 __ b(ne, ¬_heap_number); | 2086 __ b(ne, ¬_heap_number); |
2001 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | 2087 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
2002 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 2088 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); |
2003 __ b(vs, false_label); // NaN -> false. | 2089 __ b(vs, false_label); // NaN -> false. |
2004 __ b(eq, false_label); // +0, -0 -> false. | 2090 __ b(eq, false_label); // +0, -0 -> false. |
2005 __ b(true_label); | 2091 __ b(true_label); |
2006 __ bind(¬_heap_number); | 2092 __ bind(¬_heap_number); |
2007 } | 2093 } |
(...skipping 595 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2603 __ LoadRoot(ToRegister(instr->result()), | 2689 __ LoadRoot(ToRegister(instr->result()), |
2604 Heap::kTrueValueRootIndex, | 2690 Heap::kTrueValueRootIndex, |
2605 condition); | 2691 condition); |
2606 __ LoadRoot(ToRegister(instr->result()), | 2692 __ LoadRoot(ToRegister(instr->result()), |
2607 Heap::kFalseValueRootIndex, | 2693 Heap::kFalseValueRootIndex, |
2608 NegateCondition(condition)); | 2694 NegateCondition(condition)); |
2609 } | 2695 } |
2610 | 2696 |
2611 | 2697 |
2612 void LCodeGen::DoReturn(LReturn* instr) { | 2698 void LCodeGen::DoReturn(LReturn* instr) { |
2613 if (FLAG_trace) { | 2699 if (FLAG_trace && info()->IsOptimizing()) { |
2614 // Push the return value on the stack as the parameter. | 2700 // Push the return value on the stack as the parameter. |
2615 // Runtime::TraceExit returns its parameter in r0. | 2701 // Runtime::TraceExit returns its parameter in r0. |
2616 __ push(r0); | 2702 __ push(r0); |
2617 __ CallRuntime(Runtime::kTraceExit, 1); | 2703 __ CallRuntime(Runtime::kTraceExit, 1); |
2618 } | 2704 } |
2619 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; | 2705 if (NeedsEagerFrame()) { |
2620 __ mov(sp, fp); | 2706 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; |
2621 __ ldm(ia_w, sp, fp.bit() | lr.bit()); | 2707 __ mov(sp, fp); |
2622 __ add(sp, sp, Operand(sp_delta)); | 2708 __ ldm(ia_w, sp, fp.bit() | lr.bit()); |
2709 __ add(sp, sp, Operand(sp_delta)); | |
2710 } | |
2711 if (info()->IsStub()) { | |
2712 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
2713 } | |
2623 __ Jump(lr); | 2714 __ Jump(lr); |
2624 } | 2715 } |
2625 | 2716 |
2626 | 2717 |
2627 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2718 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
2628 Register result = ToRegister(instr->result()); | 2719 Register result = ToRegister(instr->result()); |
2629 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); | 2720 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); |
2630 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); | 2721 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); |
2631 if (instr->hydrogen()->RequiresHoleCheck()) { | 2722 if (instr->hydrogen()->RequiresHoleCheck()) { |
2632 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2723 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
(...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2962 } else { | 3053 } else { |
2963 key = ToRegister(instr->key()); | 3054 key = ToRegister(instr->key()); |
2964 } | 3055 } |
2965 int element_size_shift = ElementsKindToShiftSize(elements_kind); | 3056 int element_size_shift = ElementsKindToShiftSize(elements_kind); |
2966 int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) | 3057 int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) |
2967 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 3058 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
2968 int additional_offset = instr->additional_index() << element_size_shift; | 3059 int additional_offset = instr->additional_index() << element_size_shift; |
2969 | 3060 |
2970 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || | 3061 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
2971 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 3062 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
2972 CpuFeatures::Scope scope(VFP3); | |
2973 DwVfpRegister result = ToDoubleRegister(instr->result()); | 3063 DwVfpRegister result = ToDoubleRegister(instr->result()); |
2974 Operand operand = key_is_constant | 3064 Operand operand = key_is_constant |
2975 ? Operand(constant_key << element_size_shift) | 3065 ? Operand(constant_key << element_size_shift) |
2976 : Operand(key, LSL, shift_size); | 3066 : Operand(key, LSL, shift_size); |
2977 __ add(scratch0(), external_pointer, operand); | 3067 __ add(scratch0(), external_pointer, operand); |
2978 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 3068 if (CpuFeatures::IsSupported(VFP2)) { |
2979 __ vldr(result.low(), scratch0(), additional_offset); | 3069 CpuFeatures::Scope scope(VFP2); |
2980 __ vcvt_f64_f32(result, result.low()); | 3070 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
2981 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS | 3071 __ vldr(result.low(), scratch0(), additional_offset); |
2982 __ vldr(result, scratch0(), additional_offset); | 3072 __ vcvt_f64_f32(result, result.low()); |
3073 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS | |
3074 __ vldr(result, scratch0(), additional_offset); | |
3075 } | |
3076 } else { | |
3077 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | |
3078 UNIMPLEMENTED(); | |
3079 } else { | |
3080 } | |
2983 } | 3081 } |
2984 } else { | 3082 } else { |
2985 Register result = ToRegister(instr->result()); | 3083 Register result = ToRegister(instr->result()); |
2986 MemOperand mem_operand = PrepareKeyedOperand( | 3084 MemOperand mem_operand = PrepareKeyedOperand( |
2987 key, external_pointer, key_is_constant, constant_key, | 3085 key, external_pointer, key_is_constant, constant_key, |
2988 element_size_shift, shift_size, | 3086 element_size_shift, shift_size, |
2989 instr->additional_index(), additional_offset); | 3087 instr->additional_index(), additional_offset); |
2990 switch (elements_kind) { | 3088 switch (elements_kind) { |
2991 case EXTERNAL_BYTE_ELEMENTS: | 3089 case EXTERNAL_BYTE_ELEMENTS: |
2992 __ ldrsb(result, mem_operand); | 3090 __ ldrsb(result, mem_operand); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3041 int constant_key = 0; | 3139 int constant_key = 0; |
3042 if (key_is_constant) { | 3140 if (key_is_constant) { |
3043 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 3141 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
3044 if (constant_key & 0xF0000000) { | 3142 if (constant_key & 0xF0000000) { |
3045 Abort("array index constant value too big."); | 3143 Abort("array index constant value too big."); |
3046 } | 3144 } |
3047 } else { | 3145 } else { |
3048 key = ToRegister(instr->key()); | 3146 key = ToRegister(instr->key()); |
3049 } | 3147 } |
3050 | 3148 |
3051 Operand operand = key_is_constant | 3149 int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + |
3052 ? Operand(((constant_key + instr->additional_index()) << | 3150 ((constant_key + instr->additional_index()) << element_size_shift); |
3053 element_size_shift) + | |
3054 FixedDoubleArray::kHeaderSize - kHeapObjectTag) | |
3055 : Operand(key, LSL, shift_size); | |
3056 __ add(elements, elements, operand); | |
3057 if (!key_is_constant) { | 3151 if (!key_is_constant) { |
3058 __ add(elements, elements, | 3152 __ add(elements, elements, Operand(key, LSL, shift_size)); |
3059 Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + | |
3060 (instr->additional_index() << element_size_shift))); | |
3061 } | 3153 } |
3062 | 3154 if (CpuFeatures::IsSupported(VFP2)) { |
3063 __ vldr(result, elements, 0); | 3155 CpuFeatures::Scope scope(VFP2); |
3064 if (instr->hydrogen()->RequiresHoleCheck()) { | 3156 __ add(elements, elements, Operand(base_offset)); |
3065 __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); | 3157 __ vldr(result, elements, 0); |
3066 __ cmp(scratch, Operand(kHoleNanUpper32)); | 3158 if (instr->hydrogen()->RequiresHoleCheck()) { |
3067 DeoptimizeIf(eq, instr->environment()); | 3159 __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); |
3160 __ cmp(scratch, Operand(kHoleNanUpper32)); | |
3161 DeoptimizeIf(eq, instr->environment()); | |
3162 } | |
3163 } else { | |
3164 __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize)); | |
3165 __ ldr(sfpd_lo, MemOperand(elements, base_offset)); | |
3166 if (instr->hydrogen()->RequiresHoleCheck()) { | |
3167 ASSERT(kPointerSize == sizeof(kHoleNanLower32)); | |
3168 __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); | |
3169 DeoptimizeIf(eq, instr->environment()); | |
3170 } | |
3068 } | 3171 } |
3069 } | 3172 } |
3070 | 3173 |
3071 | 3174 |
3072 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3175 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
3073 Register elements = ToRegister(instr->elements()); | 3176 Register elements = ToRegister(instr->elements()); |
3074 Register result = ToRegister(instr->result()); | 3177 Register result = ToRegister(instr->result()); |
3075 Register scratch = scratch0(); | 3178 Register scratch = scratch0(); |
3076 Register store_base = scratch; | 3179 Register store_base = scratch; |
3077 int offset = 0; | 3180 int offset = 0; |
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3529 // Smi check. | 3632 // Smi check. |
3530 __ JumpIfNotSmi(input, deferred->entry()); | 3633 __ JumpIfNotSmi(input, deferred->entry()); |
3531 // If smi, handle it directly. | 3634 // If smi, handle it directly. |
3532 EmitIntegerMathAbs(instr); | 3635 EmitIntegerMathAbs(instr); |
3533 __ bind(deferred->exit()); | 3636 __ bind(deferred->exit()); |
3534 } | 3637 } |
3535 } | 3638 } |
3536 | 3639 |
3537 | 3640 |
3538 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { | 3641 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
3539 DoubleRegister input = ToDoubleRegister(instr->value()); | 3642 DwVfpRegister input = ToDoubleRegister(instr->value()); |
3540 Register result = ToRegister(instr->result()); | 3643 Register result = ToRegister(instr->result()); |
3541 Register scratch = scratch0(); | 3644 Register scratch = scratch0(); |
3542 | 3645 |
3543 __ EmitVFPTruncate(kRoundToMinusInf, | 3646 __ EmitVFPTruncate(kRoundToMinusInf, |
3544 result, | 3647 result, |
3545 input, | 3648 input, |
3546 scratch, | 3649 scratch, |
3547 double_scratch0()); | 3650 double_scratch0()); |
3548 DeoptimizeIf(ne, instr->environment()); | 3651 DeoptimizeIf(ne, instr->environment()); |
3549 | 3652 |
3550 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3653 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3551 // Test for -0. | 3654 // Test for -0. |
3552 Label done; | 3655 Label done; |
3553 __ cmp(result, Operand(0)); | 3656 __ cmp(result, Operand(0)); |
3554 __ b(ne, &done); | 3657 __ b(ne, &done); |
3555 __ vmov(scratch, input.high()); | 3658 __ vmov(scratch, input.high()); |
3556 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 3659 __ tst(scratch, Operand(HeapNumber::kSignMask)); |
3557 DeoptimizeIf(ne, instr->environment()); | 3660 DeoptimizeIf(ne, instr->environment()); |
3558 __ bind(&done); | 3661 __ bind(&done); |
3559 } | 3662 } |
3560 } | 3663 } |
3561 | 3664 |
3562 | 3665 |
3563 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { | 3666 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { |
3564 DoubleRegister input = ToDoubleRegister(instr->value()); | 3667 DwVfpRegister input = ToDoubleRegister(instr->value()); |
3565 Register result = ToRegister(instr->result()); | 3668 Register result = ToRegister(instr->result()); |
3566 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3669 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
3567 Register scratch = scratch0(); | 3670 Register scratch = scratch0(); |
3568 Label done, check_sign_on_zero; | 3671 Label done, check_sign_on_zero; |
3569 | 3672 |
3570 // Extract exponent bits. | 3673 // Extract exponent bits. |
3571 __ vmov(result, input.high()); | 3674 __ vmov(result, input.high()); |
3572 __ ubfx(scratch, | 3675 __ ubfx(scratch, |
3573 result, | 3676 result, |
3574 HeapNumber::kExponentShift, | 3677 HeapNumber::kExponentShift, |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3619 __ bind(&check_sign_on_zero); | 3722 __ bind(&check_sign_on_zero); |
3620 __ vmov(scratch, input.high()); | 3723 __ vmov(scratch, input.high()); |
3621 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 3724 __ tst(scratch, Operand(HeapNumber::kSignMask)); |
3622 DeoptimizeIf(ne, instr->environment()); | 3725 DeoptimizeIf(ne, instr->environment()); |
3623 } | 3726 } |
3624 __ bind(&done); | 3727 __ bind(&done); |
3625 } | 3728 } |
3626 | 3729 |
3627 | 3730 |
3628 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { | 3731 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { |
3629 DoubleRegister input = ToDoubleRegister(instr->value()); | 3732 DwVfpRegister input = ToDoubleRegister(instr->value()); |
3630 DoubleRegister result = ToDoubleRegister(instr->result()); | 3733 DwVfpRegister result = ToDoubleRegister(instr->result()); |
3631 __ vsqrt(result, input); | 3734 __ vsqrt(result, input); |
3632 } | 3735 } |
3633 | 3736 |
3634 | 3737 |
3635 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { | 3738 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { |
3636 DoubleRegister input = ToDoubleRegister(instr->value()); | 3739 DwVfpRegister input = ToDoubleRegister(instr->value()); |
3637 DoubleRegister result = ToDoubleRegister(instr->result()); | 3740 DwVfpRegister result = ToDoubleRegister(instr->result()); |
3638 DoubleRegister temp = ToDoubleRegister(instr->temp()); | 3741 DwVfpRegister temp = ToDoubleRegister(instr->temp()); |
3639 | 3742 |
3640 // Note that according to ECMA-262 15.8.2.13: | 3743 // Note that according to ECMA-262 15.8.2.13: |
3641 // Math.pow(-Infinity, 0.5) == Infinity | 3744 // Math.pow(-Infinity, 0.5) == Infinity |
3642 // Math.sqrt(-Infinity) == NaN | 3745 // Math.sqrt(-Infinity) == NaN |
3643 Label done; | 3746 Label done; |
3644 __ vmov(temp, -V8_INFINITY, scratch0()); | 3747 __ vmov(temp, -V8_INFINITY, scratch0()); |
3645 __ VFPCompareAndSetFlags(input, temp); | 3748 __ VFPCompareAndSetFlags(input, temp); |
3646 __ vneg(result, temp, eq); | 3749 __ vneg(result, temp, eq); |
3647 __ b(&done, eq); | 3750 __ b(&done, eq); |
3648 | 3751 |
(...skipping 813 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4462 __ bind(deferred->exit()); | 4565 __ bind(deferred->exit()); |
4463 } | 4566 } |
4464 | 4567 |
4465 | 4568 |
4466 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, | 4569 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, |
4467 LOperand* value, | 4570 LOperand* value, |
4468 IntegerSignedness signedness) { | 4571 IntegerSignedness signedness) { |
4469 Label slow; | 4572 Label slow; |
4470 Register src = ToRegister(value); | 4573 Register src = ToRegister(value); |
4471 Register dst = ToRegister(instr->result()); | 4574 Register dst = ToRegister(instr->result()); |
4472 DoubleRegister dbl_scratch = double_scratch0(); | 4575 DwVfpRegister dbl_scratch = double_scratch0(); |
4473 SwVfpRegister flt_scratch = dbl_scratch.low(); | 4576 SwVfpRegister flt_scratch = dbl_scratch.low(); |
4474 | 4577 |
4475 // Preserve the value of all registers. | 4578 // Preserve the value of all registers. |
4476 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 4579 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
4477 | 4580 |
4478 Label done; | 4581 Label done; |
4479 if (signedness == SIGNED_INT32) { | 4582 if (signedness == SIGNED_INT32) { |
4480 // There was overflow, so bits 30 and 31 of the original integer | 4583 // There was overflow, so bits 30 and 31 of the original integer |
4481 // disagree. Try to allocate a heap number in new space and store | 4584 // disagree. Try to allocate a heap number in new space and store |
4482 // the value in there. If that fails, call the runtime system. | 4585 // the value in there. If that fails, call the runtime system. |
4483 if (dst.is(src)) { | 4586 if (dst.is(src)) { |
4484 __ SmiUntag(src, dst); | 4587 __ SmiUntag(src, dst); |
4485 __ eor(src, src, Operand(0x80000000)); | 4588 __ eor(src, src, Operand(0x80000000)); |
4486 } | 4589 } |
4487 __ vmov(flt_scratch, src); | 4590 if (CpuFeatures::IsSupported(VFP2)) { |
4488 __ vcvt_f64_s32(dbl_scratch, flt_scratch); | 4591 CpuFeatures::Scope scope(VFP2); |
4592 __ vmov(flt_scratch, src); | |
4593 __ vcvt_f64_s32(dbl_scratch, flt_scratch); | |
4594 } else { | |
4595 FloatingPointHelper::Destination dest = | |
4596 FloatingPointHelper::kCoreRegisters; | |
4597 FloatingPointHelper::ConvertIntToDouble(masm(), | |
Jakob Kummerow
2012/11/19 12:36:00
nit: fix format
danno
2012/11/26 17:16:18
Done.
| |
4598 src, | |
4599 dest, | |
4600 d0, | |
4601 sfpd_lo, | |
4602 sfpd_hi, | |
4603 r9, | |
4604 s0); | |
4605 } | |
4489 } else { | 4606 } else { |
4490 __ vmov(flt_scratch, src); | 4607 __ vmov(flt_scratch, src); |
4491 __ vcvt_f64_u32(dbl_scratch, flt_scratch); | 4608 __ vcvt_f64_u32(dbl_scratch, flt_scratch); |
4492 } | 4609 } |
4493 | 4610 |
4494 if (FLAG_inline_new) { | 4611 if (FLAG_inline_new) { |
4495 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | 4612 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
4496 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); | 4613 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); |
4497 __ Move(dst, r5); | 4614 __ Move(dst, r5); |
4498 __ b(&done); | 4615 __ b(&done); |
4499 } | 4616 } |
4500 | 4617 |
4501 // Slow case: Call the runtime system to do the number allocation. | 4618 // Slow case: Call the runtime system to do the number allocation. |
4502 __ bind(&slow); | 4619 __ bind(&slow); |
4503 | 4620 |
4504 // TODO(3095996): Put a valid pointer value in the stack slot where the result | 4621 // TODO(3095996): Put a valid pointer value in the stack slot where the result |
4505 // register is stored, as this register is in the pointer map, but contains an | 4622 // register is stored, as this register is in the pointer map, but contains an |
4506 // integer value. | 4623 // integer value. |
4507 __ mov(ip, Operand(0)); | 4624 __ mov(ip, Operand(0)); |
4508 __ StoreToSafepointRegisterSlot(ip, dst); | 4625 __ StoreToSafepointRegisterSlot(ip, dst); |
4509 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 4626 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); |
4510 __ Move(dst, r0); | 4627 __ Move(dst, r0); |
4511 __ sub(dst, dst, Operand(kHeapObjectTag)); | 4628 __ sub(dst, dst, Operand(kHeapObjectTag)); |
4512 | 4629 |
4513 // Done. Put the value in dbl_scratch into the value of the allocated heap | 4630 // Done. Put the value in dbl_scratch into the value of the allocated heap |
4514 // number. | 4631 // number. |
4515 __ bind(&done); | 4632 __ bind(&done); |
4516 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); | 4633 if (CpuFeatures::IsSupported(VFP2)) { |
4634 CpuFeatures::Scope scope(VFP2); | |
4635 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); | |
4636 } else { | |
4637 __ str(sfpd_lo, MemOperand(dst, HeapNumber::kValueOffset)); | |
4638 __ str(sfpd_hi, MemOperand(dst, HeapNumber::kValueOffset + kPointerSize)); | |
4639 } | |
4517 __ add(dst, dst, Operand(kHeapObjectTag)); | 4640 __ add(dst, dst, Operand(kHeapObjectTag)); |
4518 __ StoreToSafepointRegisterSlot(dst, dst); | 4641 __ StoreToSafepointRegisterSlot(dst, dst); |
4519 } | 4642 } |
4520 | 4643 |
4521 | 4644 |
4522 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 4645 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
4523 class DeferredNumberTagD: public LDeferredCode { | 4646 class DeferredNumberTagD: public LDeferredCode { |
4524 public: | 4647 public: |
4525 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 4648 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
4526 : LDeferredCode(codegen), instr_(instr) { } | 4649 : LDeferredCode(codegen), instr_(instr) { } |
4527 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } | 4650 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } |
4528 virtual LInstruction* instr() { return instr_; } | 4651 virtual LInstruction* instr() { return instr_; } |
4529 private: | 4652 private: |
4530 LNumberTagD* instr_; | 4653 LNumberTagD* instr_; |
4531 }; | 4654 }; |
4532 | 4655 |
4533 DoubleRegister input_reg = ToDoubleRegister(instr->value()); | 4656 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
4534 Register scratch = scratch0(); | 4657 Register scratch = scratch0(); |
4535 Register reg = ToRegister(instr->result()); | 4658 Register reg = ToRegister(instr->result()); |
4536 Register temp1 = ToRegister(instr->temp()); | 4659 Register temp1 = ToRegister(instr->temp()); |
4537 Register temp2 = ToRegister(instr->temp2()); | 4660 Register temp2 = ToRegister(instr->temp2()); |
4538 | 4661 |
4539 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 4662 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
4540 if (FLAG_inline_new) { | 4663 if (FLAG_inline_new) { |
4541 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | 4664 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); |
4542 // We want the untagged address first for performance | 4665 // We want the untagged address first for performance |
4543 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), | 4666 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), |
4544 DONT_TAG_RESULT); | 4667 DONT_TAG_RESULT); |
4545 } else { | 4668 } else { |
4546 __ jmp(deferred->entry()); | 4669 __ jmp(deferred->entry()); |
4547 } | 4670 } |
4548 __ bind(deferred->exit()); | 4671 __ bind(deferred->exit()); |
4549 __ vstr(input_reg, reg, HeapNumber::kValueOffset); | 4672 if (CpuFeatures::IsSupported(VFP2)) { |
4673 CpuFeatures::Scope scope(VFP2); | |
4674 __ vstr(input_reg, reg, HeapNumber::kValueOffset); | |
4675 } else { | |
4676 __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); | |
4677 __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); | |
4678 } | |
4550 // Now that we have finished with the object's real address tag it | 4679 // Now that we have finished with the object's real address tag it |
4551 __ add(reg, reg, Operand(kHeapObjectTag)); | 4680 __ add(reg, reg, Operand(kHeapObjectTag)); |
4552 } | 4681 } |
4553 | 4682 |
4554 | 4683 |
4555 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4684 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
4556 // TODO(3095996): Get rid of this. For now, we need to make the | 4685 // TODO(3095996): Get rid of this. For now, we need to make the |
4557 // result register contain a valid pointer because it is already | 4686 // result register contain a valid pointer because it is already |
4558 // contained in the register pointer map. | 4687 // contained in the register pointer map. |
4559 Register reg = ToRegister(instr->result()); | 4688 Register reg = ToRegister(instr->result()); |
(...skipping 20 matching lines...) Expand all Loading... | |
4580 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4709 // If the input is a HeapObject, SmiUntag will set the carry flag. |
4581 __ SmiUntag(result, input, SetCC); | 4710 __ SmiUntag(result, input, SetCC); |
4582 DeoptimizeIf(cs, instr->environment()); | 4711 DeoptimizeIf(cs, instr->environment()); |
4583 } else { | 4712 } else { |
4584 __ SmiUntag(result, input); | 4713 __ SmiUntag(result, input); |
4585 } | 4714 } |
4586 } | 4715 } |
4587 | 4716 |
4588 | 4717 |
4589 void LCodeGen::EmitNumberUntagD(Register input_reg, | 4718 void LCodeGen::EmitNumberUntagD(Register input_reg, |
4590 DoubleRegister result_reg, | 4719 DwVfpRegister result_reg, |
4591 bool deoptimize_on_undefined, | 4720 bool deoptimize_on_undefined, |
4592 bool deoptimize_on_minus_zero, | 4721 bool deoptimize_on_minus_zero, |
4593 LEnvironment* env) { | 4722 LEnvironment* env) { |
4594 Register scratch = scratch0(); | 4723 Register scratch = scratch0(); |
4595 SwVfpRegister flt_scratch = double_scratch0().low(); | 4724 SwVfpRegister flt_scratch = double_scratch0().low(); |
4596 ASSERT(!result_reg.is(double_scratch0())); | 4725 ASSERT(!result_reg.is(double_scratch0())); |
4597 | 4726 |
4598 Label load_smi, heap_number, done; | 4727 Label load_smi, heap_number, done; |
4599 | 4728 |
4600 // Smi check. | 4729 // Smi check. |
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4752 } | 4881 } |
4753 | 4882 |
4754 | 4883 |
4755 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 4884 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
4756 LOperand* input = instr->value(); | 4885 LOperand* input = instr->value(); |
4757 ASSERT(input->IsRegister()); | 4886 ASSERT(input->IsRegister()); |
4758 LOperand* result = instr->result(); | 4887 LOperand* result = instr->result(); |
4759 ASSERT(result->IsDoubleRegister()); | 4888 ASSERT(result->IsDoubleRegister()); |
4760 | 4889 |
4761 Register input_reg = ToRegister(input); | 4890 Register input_reg = ToRegister(input); |
4762 DoubleRegister result_reg = ToDoubleRegister(result); | 4891 DwVfpRegister result_reg = ToDoubleRegister(result); |
4763 | 4892 |
4764 EmitNumberUntagD(input_reg, result_reg, | 4893 EmitNumberUntagD(input_reg, result_reg, |
4765 instr->hydrogen()->deoptimize_on_undefined(), | 4894 instr->hydrogen()->deoptimize_on_undefined(), |
4766 instr->hydrogen()->deoptimize_on_minus_zero(), | 4895 instr->hydrogen()->deoptimize_on_minus_zero(), |
4767 instr->environment()); | 4896 instr->environment()); |
4768 } | 4897 } |
4769 | 4898 |
4770 | 4899 |
4771 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 4900 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
4772 Register result_reg = ToRegister(instr->result()); | 4901 Register result_reg = ToRegister(instr->result()); |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4873 __ cmp(reg, Operand(target)); | 5002 __ cmp(reg, Operand(target)); |
4874 } | 5003 } |
4875 DeoptimizeIf(ne, instr->environment()); | 5004 DeoptimizeIf(ne, instr->environment()); |
4876 } | 5005 } |
4877 | 5006 |
4878 | 5007 |
4879 void LCodeGen::DoCheckMapCommon(Register reg, | 5008 void LCodeGen::DoCheckMapCommon(Register reg, |
4880 Register scratch, | 5009 Register scratch, |
4881 Handle<Map> map, | 5010 Handle<Map> map, |
4882 CompareMapMode mode, | 5011 CompareMapMode mode, |
4883 LEnvironment* env) { | 5012 LInstruction* instr) { |
4884 Label success; | 5013 Label success; |
4885 __ CompareMap(reg, scratch, map, &success, mode); | 5014 __ CompareMap(reg, scratch, map, &success, mode); |
4886 DeoptimizeIf(ne, env); | 5015 DeoptimizeIf(ne, instr->environment()); |
4887 __ bind(&success); | 5016 __ bind(&success); |
4888 } | 5017 } |
4889 | 5018 |
4890 | 5019 |
4891 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5020 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
4892 Register scratch = scratch0(); | 5021 Register scratch = scratch0(); |
4893 LOperand* input = instr->value(); | 5022 LOperand* input = instr->value(); |
4894 ASSERT(input->IsRegister()); | 5023 ASSERT(input->IsRegister()); |
4895 Register reg = ToRegister(input); | 5024 Register reg = ToRegister(input); |
4896 | 5025 |
4897 Label success; | 5026 Label success; |
4898 SmallMapList* map_set = instr->hydrogen()->map_set(); | 5027 SmallMapList* map_set = instr->hydrogen()->map_set(); |
4899 for (int i = 0; i < map_set->length() - 1; i++) { | 5028 for (int i = 0; i < map_set->length() - 1; i++) { |
4900 Handle<Map> map = map_set->at(i); | 5029 Handle<Map> map = map_set->at(i); |
4901 __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); | 5030 __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); |
4902 __ b(eq, &success); | 5031 __ b(eq, &success); |
4903 } | 5032 } |
4904 Handle<Map> map = map_set->last(); | 5033 Handle<Map> map = map_set->last(); |
4905 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); | 5034 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr); |
4906 __ bind(&success); | 5035 __ bind(&success); |
4907 } | 5036 } |
4908 | 5037 |
4909 | 5038 |
4910 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5039 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
4911 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5040 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |
4912 Register result_reg = ToRegister(instr->result()); | 5041 Register result_reg = ToRegister(instr->result()); |
4913 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 5042 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); |
4914 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); | 5043 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); |
4915 } | 5044 } |
4916 | 5045 |
4917 | 5046 |
4918 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 5047 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
4919 Register unclamped_reg = ToRegister(instr->unclamped()); | 5048 Register unclamped_reg = ToRegister(instr->unclamped()); |
4920 Register result_reg = ToRegister(instr->result()); | 5049 Register result_reg = ToRegister(instr->result()); |
4921 __ ClampUint8(result_reg, unclamped_reg); | 5050 __ ClampUint8(result_reg, unclamped_reg); |
4922 } | 5051 } |
4923 | 5052 |
4924 | 5053 |
4925 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 5054 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
4926 Register scratch = scratch0(); | 5055 Register scratch = scratch0(); |
4927 Register input_reg = ToRegister(instr->unclamped()); | 5056 Register input_reg = ToRegister(instr->unclamped()); |
4928 Register result_reg = ToRegister(instr->result()); | 5057 Register result_reg = ToRegister(instr->result()); |
4929 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 5058 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); |
4930 Label is_smi, done, heap_number; | 5059 Label is_smi, done, heap_number; |
4931 | 5060 |
4932 // Both smi and heap number cases are handled. | 5061 // Both smi and heap number cases are handled. |
4933 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 5062 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); |
4934 | 5063 |
4935 // Check for heap number | 5064 // Check for heap number |
4936 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5065 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
4937 __ cmp(scratch, Operand(factory()->heap_number_map())); | 5066 __ cmp(scratch, Operand(factory()->heap_number_map())); |
4938 __ b(eq, &heap_number); | 5067 __ b(eq, &heap_number); |
4939 | 5068 |
(...skipping 27 matching lines...) Expand all Loading... | |
4967 Handle<JSObject> holder = instr->holder(); | 5096 Handle<JSObject> holder = instr->holder(); |
4968 Handle<JSObject> current_prototype = instr->prototype(); | 5097 Handle<JSObject> current_prototype = instr->prototype(); |
4969 | 5098 |
4970 // Load prototype object. | 5099 // Load prototype object. |
4971 __ LoadHeapObject(temp1, current_prototype); | 5100 __ LoadHeapObject(temp1, current_prototype); |
4972 | 5101 |
4973 // Check prototype maps up to the holder. | 5102 // Check prototype maps up to the holder. |
4974 while (!current_prototype.is_identical_to(holder)) { | 5103 while (!current_prototype.is_identical_to(holder)) { |
4975 DoCheckMapCommon(temp1, temp2, | 5104 DoCheckMapCommon(temp1, temp2, |
4976 Handle<Map>(current_prototype->map()), | 5105 Handle<Map>(current_prototype->map()), |
4977 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); | 5106 ALLOW_ELEMENT_TRANSITION_MAPS, instr); |
4978 current_prototype = | 5107 current_prototype = |
4979 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); | 5108 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); |
4980 // Load next prototype object. | 5109 // Load next prototype object. |
4981 __ LoadHeapObject(temp1, current_prototype); | 5110 __ LoadHeapObject(temp1, current_prototype); |
4982 } | 5111 } |
4983 | 5112 |
4984 // Check the holder map. | 5113 // Check the holder map. |
4985 DoCheckMapCommon(temp1, temp2, | 5114 DoCheckMapCommon(temp1, temp2, |
4986 Handle<Map>(current_prototype->map()), | 5115 Handle<Map>(current_prototype->map()), |
4987 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); | 5116 ALLOW_ELEMENT_TRANSITION_MAPS, instr); |
4988 } | 5117 } |
4989 | 5118 |
4990 | 5119 |
4991 void LCodeGen::DoAllocateObject(LAllocateObject* instr) { | 5120 void LCodeGen::DoAllocateObject(LAllocateObject* instr) { |
4992 class DeferredAllocateObject: public LDeferredCode { | 5121 class DeferredAllocateObject: public LDeferredCode { |
4993 public: | 5122 public: |
4994 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) | 5123 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) |
4995 : LDeferredCode(codegen), instr_(instr) { } | 5124 : LDeferredCode(codegen), instr_(instr) { } |
4996 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } | 5125 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } |
4997 virtual LInstruction* instr() { return instr_; } | 5126 virtual LInstruction* instr() { return instr_; } |
(...skipping 498 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5496 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); | 5625 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); |
5497 | 5626 |
5498 // Check the marker in the calling frame. | 5627 // Check the marker in the calling frame. |
5499 __ bind(&check_frame_marker); | 5628 __ bind(&check_frame_marker); |
5500 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | 5629 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); |
5501 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | 5630 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); |
5502 } | 5631 } |
5503 | 5632 |
5504 | 5633 |
5505 void LCodeGen::EnsureSpaceForLazyDeopt() { | 5634 void LCodeGen::EnsureSpaceForLazyDeopt() { |
5635 if (info()->IsStub()) return; | |
5506 // Ensure that we have enough space after the previous lazy-bailout | 5636 // Ensure that we have enough space after the previous lazy-bailout |
5507 // instruction for patching the code here. | 5637 // instruction for patching the code here. |
5508 int current_pc = masm()->pc_offset(); | 5638 int current_pc = masm()->pc_offset(); |
5509 int patch_size = Deoptimizer::patch_size(); | 5639 int patch_size = Deoptimizer::patch_size(); |
5510 if (current_pc < last_lazy_deopt_pc_ + patch_size) { | 5640 if (current_pc < last_lazy_deopt_pc_ + patch_size) { |
5511 // Block literal pool emission for duration of padding. | 5641 // Block literal pool emission for duration of padding. |
5512 Assembler::BlockConstPoolScope block_const_pool(masm()); | 5642 Assembler::BlockConstPoolScope block_const_pool(masm()); |
5513 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; | 5643 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; |
5514 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); | 5644 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); |
5515 while (padding_size > 0) { | 5645 while (padding_size > 0) { |
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5727 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 5857 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); |
5728 __ ldr(result, FieldMemOperand(scratch, | 5858 __ ldr(result, FieldMemOperand(scratch, |
5729 FixedArray::kHeaderSize - kPointerSize)); | 5859 FixedArray::kHeaderSize - kPointerSize)); |
5730 __ bind(&done); | 5860 __ bind(&done); |
5731 } | 5861 } |
5732 | 5862 |
5733 | 5863 |
5734 #undef __ | 5864 #undef __ |
5735 | 5865 |
5736 } } // namespace v8::internal | 5866 } } // namespace v8::internal |
OLD | NEW |