OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/code-stubs.h" | 7 #include "src/code-stubs.h" |
8 #include "src/hydrogen-osr.h" | 8 #include "src/hydrogen-osr.h" |
9 #include "src/mips64/lithium-codegen-mips64.h" | 9 #include "src/mips64/lithium-codegen-mips64.h" |
10 #include "src/mips64/lithium-gap-resolver-mips64.h" | 10 #include "src/mips64/lithium-gap-resolver-mips64.h" |
(...skipping 23 matching lines...) Expand all Loading... |
34 LCodeGen* codegen_; | 34 LCodeGen* codegen_; |
35 LPointerMap* pointers_; | 35 LPointerMap* pointers_; |
36 Safepoint::DeoptMode deopt_mode_; | 36 Safepoint::DeoptMode deopt_mode_; |
37 }; | 37 }; |
38 | 38 |
39 | 39 |
40 #define __ masm()-> | 40 #define __ masm()-> |
41 | 41 |
42 bool LCodeGen::GenerateCode() { | 42 bool LCodeGen::GenerateCode() { |
43 LPhase phase("Z_Code generation", chunk()); | 43 LPhase phase("Z_Code generation", chunk()); |
44 ASSERT(is_unused()); | 44 DCHECK(is_unused()); |
45 status_ = GENERATING; | 45 status_ = GENERATING; |
46 | 46 |
47 // Open a frame scope to indicate that there is a frame on the stack. The | 47 // Open a frame scope to indicate that there is a frame on the stack. The |
48 // NONE indicates that the scope shouldn't actually generate code to set up | 48 // NONE indicates that the scope shouldn't actually generate code to set up |
49 // the frame (that is done in GeneratePrologue). | 49 // the frame (that is done in GeneratePrologue). |
50 FrameScope frame_scope(masm_, StackFrame::NONE); | 50 FrameScope frame_scope(masm_, StackFrame::NONE); |
51 | 51 |
52 return GeneratePrologue() && | 52 return GeneratePrologue() && |
53 GenerateBody() && | 53 GenerateBody() && |
54 GenerateDeferredCode() && | 54 GenerateDeferredCode() && |
55 GenerateDeoptJumpTable() && | 55 GenerateDeoptJumpTable() && |
56 GenerateSafepointTable(); | 56 GenerateSafepointTable(); |
57 } | 57 } |
58 | 58 |
59 | 59 |
60 void LCodeGen::FinishCode(Handle<Code> code) { | 60 void LCodeGen::FinishCode(Handle<Code> code) { |
61 ASSERT(is_done()); | 61 DCHECK(is_done()); |
62 code->set_stack_slots(GetStackSlotCount()); | 62 code->set_stack_slots(GetStackSlotCount()); |
63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | 63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
64 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); | 64 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); |
65 PopulateDeoptimizationData(code); | 65 PopulateDeoptimizationData(code); |
66 } | 66 } |
67 | 67 |
68 | 68 |
69 void LCodeGen::SaveCallerDoubles() { | 69 void LCodeGen::SaveCallerDoubles() { |
70 ASSERT(info()->saves_caller_doubles()); | 70 DCHECK(info()->saves_caller_doubles()); |
71 ASSERT(NeedsEagerFrame()); | 71 DCHECK(NeedsEagerFrame()); |
72 Comment(";;; Save clobbered callee double registers"); | 72 Comment(";;; Save clobbered callee double registers"); |
73 int count = 0; | 73 int count = 0; |
74 BitVector* doubles = chunk()->allocated_double_registers(); | 74 BitVector* doubles = chunk()->allocated_double_registers(); |
75 BitVector::Iterator save_iterator(doubles); | 75 BitVector::Iterator save_iterator(doubles); |
76 while (!save_iterator.Done()) { | 76 while (!save_iterator.Done()) { |
77 __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), | 77 __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), |
78 MemOperand(sp, count * kDoubleSize)); | 78 MemOperand(sp, count * kDoubleSize)); |
79 save_iterator.Advance(); | 79 save_iterator.Advance(); |
80 count++; | 80 count++; |
81 } | 81 } |
82 } | 82 } |
83 | 83 |
84 | 84 |
85 void LCodeGen::RestoreCallerDoubles() { | 85 void LCodeGen::RestoreCallerDoubles() { |
86 ASSERT(info()->saves_caller_doubles()); | 86 DCHECK(info()->saves_caller_doubles()); |
87 ASSERT(NeedsEagerFrame()); | 87 DCHECK(NeedsEagerFrame()); |
88 Comment(";;; Restore clobbered callee double registers"); | 88 Comment(";;; Restore clobbered callee double registers"); |
89 BitVector* doubles = chunk()->allocated_double_registers(); | 89 BitVector* doubles = chunk()->allocated_double_registers(); |
90 BitVector::Iterator save_iterator(doubles); | 90 BitVector::Iterator save_iterator(doubles); |
91 int count = 0; | 91 int count = 0; |
92 while (!save_iterator.Done()) { | 92 while (!save_iterator.Done()) { |
93 __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), | 93 __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), |
94 MemOperand(sp, count * kDoubleSize)); | 94 MemOperand(sp, count * kDoubleSize)); |
95 save_iterator.Advance(); | 95 save_iterator.Advance(); |
96 count++; | 96 count++; |
97 } | 97 } |
98 } | 98 } |
99 | 99 |
100 | 100 |
101 bool LCodeGen::GeneratePrologue() { | 101 bool LCodeGen::GeneratePrologue() { |
102 ASSERT(is_generating()); | 102 DCHECK(is_generating()); |
103 | 103 |
104 if (info()->IsOptimizing()) { | 104 if (info()->IsOptimizing()) { |
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 105 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
106 | 106 |
107 #ifdef DEBUG | 107 #ifdef DEBUG |
108 if (strlen(FLAG_stop_at) > 0 && | 108 if (strlen(FLAG_stop_at) > 0 && |
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { | 109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { |
110 __ stop("stop_at"); | 110 __ stop("stop_at"); |
111 } | 111 } |
112 #endif | 112 #endif |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
231 void LCodeGen::GenerateOsrPrologue() { | 231 void LCodeGen::GenerateOsrPrologue() { |
232 // Generate the OSR entry prologue at the first unknown OSR value, or if there | 232 // Generate the OSR entry prologue at the first unknown OSR value, or if there |
233 // are none, at the OSR entrypoint instruction. | 233 // are none, at the OSR entrypoint instruction. |
234 if (osr_pc_offset_ >= 0) return; | 234 if (osr_pc_offset_ >= 0) return; |
235 | 235 |
236 osr_pc_offset_ = masm()->pc_offset(); | 236 osr_pc_offset_ = masm()->pc_offset(); |
237 | 237 |
238 // Adjust the frame size, subsuming the unoptimized frame into the | 238 // Adjust the frame size, subsuming the unoptimized frame into the |
239 // optimized frame. | 239 // optimized frame. |
240 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); | 240 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); |
241 ASSERT(slots >= 0); | 241 DCHECK(slots >= 0); |
242 __ Dsubu(sp, sp, Operand(slots * kPointerSize)); | 242 __ Dsubu(sp, sp, Operand(slots * kPointerSize)); |
243 } | 243 } |
244 | 244 |
245 | 245 |
246 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { | 246 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { |
247 if (instr->IsCall()) { | 247 if (instr->IsCall()) { |
248 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 248 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
249 } | 249 } |
250 if (!instr->IsLazyBailout() && !instr->IsGap()) { | 250 if (!instr->IsLazyBailout() && !instr->IsGap()) { |
251 safepoints_.BumpLastLazySafepointIndex(); | 251 safepoints_.BumpLastLazySafepointIndex(); |
252 } | 252 } |
253 } | 253 } |
254 | 254 |
255 | 255 |
256 bool LCodeGen::GenerateDeferredCode() { | 256 bool LCodeGen::GenerateDeferredCode() { |
257 ASSERT(is_generating()); | 257 DCHECK(is_generating()); |
258 if (deferred_.length() > 0) { | 258 if (deferred_.length() > 0) { |
259 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 259 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
260 LDeferredCode* code = deferred_[i]; | 260 LDeferredCode* code = deferred_[i]; |
261 | 261 |
262 HValue* value = | 262 HValue* value = |
263 instructions_->at(code->instruction_index())->hydrogen_value(); | 263 instructions_->at(code->instruction_index())->hydrogen_value(); |
264 RecordAndWritePosition( | 264 RecordAndWritePosition( |
265 chunk()->graph()->SourcePositionToScriptPosition(value->position())); | 265 chunk()->graph()->SourcePositionToScriptPosition(value->position())); |
266 | 266 |
267 Comment(";;; <@%d,#%d> " | 267 Comment(";;; <@%d,#%d> " |
268 "-------------------- Deferred %s --------------------", | 268 "-------------------- Deferred %s --------------------", |
269 code->instruction_index(), | 269 code->instruction_index(), |
270 code->instr()->hydrogen_value()->id(), | 270 code->instr()->hydrogen_value()->id(), |
271 code->instr()->Mnemonic()); | 271 code->instr()->Mnemonic()); |
272 __ bind(code->entry()); | 272 __ bind(code->entry()); |
273 if (NeedsDeferredFrame()) { | 273 if (NeedsDeferredFrame()) { |
274 Comment(";;; Build frame"); | 274 Comment(";;; Build frame"); |
275 ASSERT(!frame_is_built_); | 275 DCHECK(!frame_is_built_); |
276 ASSERT(info()->IsStub()); | 276 DCHECK(info()->IsStub()); |
277 frame_is_built_ = true; | 277 frame_is_built_ = true; |
278 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); | 278 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); |
279 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); | 279 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
280 __ push(scratch0()); | 280 __ push(scratch0()); |
281 __ Daddu(fp, sp, | 281 __ Daddu(fp, sp, |
282 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 282 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
283 Comment(";;; Deferred code"); | 283 Comment(";;; Deferred code"); |
284 } | 284 } |
285 code->Generate(); | 285 code->Generate(); |
286 if (NeedsDeferredFrame()) { | 286 if (NeedsDeferredFrame()) { |
287 Comment(";;; Destroy frame"); | 287 Comment(";;; Destroy frame"); |
288 ASSERT(frame_is_built_); | 288 DCHECK(frame_is_built_); |
289 __ pop(at); | 289 __ pop(at); |
290 __ MultiPop(cp.bit() | fp.bit() | ra.bit()); | 290 __ MultiPop(cp.bit() | fp.bit() | ra.bit()); |
291 frame_is_built_ = false; | 291 frame_is_built_ = false; |
292 } | 292 } |
293 __ jmp(code->exit()); | 293 __ jmp(code->exit()); |
294 } | 294 } |
295 } | 295 } |
296 // Deferred code is the last part of the instruction sequence. Mark | 296 // Deferred code is the last part of the instruction sequence. Mark |
297 // the generated code as done unless we bailed out. | 297 // the generated code as done unless we bailed out. |
298 if (!is_aborted()) status_ = DONE; | 298 if (!is_aborted()) status_ = DONE; |
(...skipping 14 matching lines...) Expand all Loading... |
313 Address entry = deopt_jump_table_[i].address; | 313 Address entry = deopt_jump_table_[i].address; |
314 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; | 314 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; |
315 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 315 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
316 if (id == Deoptimizer::kNotDeoptimizationEntry) { | 316 if (id == Deoptimizer::kNotDeoptimizationEntry) { |
317 Comment(";;; jump table entry %d.", i); | 317 Comment(";;; jump table entry %d.", i); |
318 } else { | 318 } else { |
319 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); | 319 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); |
320 } | 320 } |
321 __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); | 321 __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); |
322 if (deopt_jump_table_[i].needs_frame) { | 322 if (deopt_jump_table_[i].needs_frame) { |
323 ASSERT(!info()->saves_caller_doubles()); | 323 DCHECK(!info()->saves_caller_doubles()); |
324 if (needs_frame.is_bound()) { | 324 if (needs_frame.is_bound()) { |
325 __ Branch(&needs_frame); | 325 __ Branch(&needs_frame); |
326 } else { | 326 } else { |
327 __ bind(&needs_frame); | 327 __ bind(&needs_frame); |
328 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); | 328 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); |
329 // This variant of deopt can only be used with stubs. Since we don't | 329 // This variant of deopt can only be used with stubs. Since we don't |
330 // have a function pointer to install in the stack frame that we're | 330 // have a function pointer to install in the stack frame that we're |
331 // building, install a special marker there instead. | 331 // building, install a special marker there instead. |
332 ASSERT(info()->IsStub()); | 332 DCHECK(info()->IsStub()); |
333 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); | 333 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
334 __ push(scratch0()); | 334 __ push(scratch0()); |
335 __ Daddu(fp, sp, | 335 __ Daddu(fp, sp, |
336 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 336 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
337 __ Call(t9); | 337 __ Call(t9); |
338 } | 338 } |
339 } else { | 339 } else { |
340 if (info()->saves_caller_doubles()) { | 340 if (info()->saves_caller_doubles()) { |
341 ASSERT(info()->IsStub()); | 341 DCHECK(info()->IsStub()); |
342 RestoreCallerDoubles(); | 342 RestoreCallerDoubles(); |
343 } | 343 } |
344 __ Call(t9); | 344 __ Call(t9); |
345 } | 345 } |
346 } | 346 } |
347 __ RecordComment("]"); | 347 __ RecordComment("]"); |
348 | 348 |
349 // The deoptimization jump table is the last part of the instruction | 349 // The deoptimization jump table is the last part of the instruction |
350 // sequence. Mark the generated code as done unless we bailed out. | 350 // sequence. Mark the generated code as done unless we bailed out. |
351 if (!is_aborted()) status_ = DONE; | 351 if (!is_aborted()) status_ = DONE; |
352 return !is_aborted(); | 352 return !is_aborted(); |
353 } | 353 } |
354 | 354 |
355 | 355 |
356 bool LCodeGen::GenerateSafepointTable() { | 356 bool LCodeGen::GenerateSafepointTable() { |
357 ASSERT(is_done()); | 357 DCHECK(is_done()); |
358 safepoints_.Emit(masm(), GetStackSlotCount()); | 358 safepoints_.Emit(masm(), GetStackSlotCount()); |
359 return !is_aborted(); | 359 return !is_aborted(); |
360 } | 360 } |
361 | 361 |
362 | 362 |
363 Register LCodeGen::ToRegister(int index) const { | 363 Register LCodeGen::ToRegister(int index) const { |
364 return Register::FromAllocationIndex(index); | 364 return Register::FromAllocationIndex(index); |
365 } | 365 } |
366 | 366 |
367 | 367 |
368 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { | 368 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { |
369 return DoubleRegister::FromAllocationIndex(index); | 369 return DoubleRegister::FromAllocationIndex(index); |
370 } | 370 } |
371 | 371 |
372 | 372 |
373 Register LCodeGen::ToRegister(LOperand* op) const { | 373 Register LCodeGen::ToRegister(LOperand* op) const { |
374 ASSERT(op->IsRegister()); | 374 DCHECK(op->IsRegister()); |
375 return ToRegister(op->index()); | 375 return ToRegister(op->index()); |
376 } | 376 } |
377 | 377 |
378 | 378 |
379 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { | 379 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
380 if (op->IsRegister()) { | 380 if (op->IsRegister()) { |
381 return ToRegister(op->index()); | 381 return ToRegister(op->index()); |
382 } else if (op->IsConstantOperand()) { | 382 } else if (op->IsConstantOperand()) { |
383 LConstantOperand* const_op = LConstantOperand::cast(op); | 383 LConstantOperand* const_op = LConstantOperand::cast(op); |
384 HConstant* constant = chunk_->LookupConstant(const_op); | 384 HConstant* constant = chunk_->LookupConstant(const_op); |
385 Handle<Object> literal = constant->handle(isolate()); | 385 Handle<Object> literal = constant->handle(isolate()); |
386 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 386 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
387 if (r.IsInteger32()) { | 387 if (r.IsInteger32()) { |
388 ASSERT(literal->IsNumber()); | 388 DCHECK(literal->IsNumber()); |
389 __ li(scratch, Operand(static_cast<int32_t>(literal->Number()))); | 389 __ li(scratch, Operand(static_cast<int32_t>(literal->Number()))); |
390 } else if (r.IsSmi()) { | 390 } else if (r.IsSmi()) { |
391 ASSERT(constant->HasSmiValue()); | 391 DCHECK(constant->HasSmiValue()); |
392 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value()))); | 392 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value()))); |
393 } else if (r.IsDouble()) { | 393 } else if (r.IsDouble()) { |
394 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); | 394 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); |
395 } else { | 395 } else { |
396 ASSERT(r.IsSmiOrTagged()); | 396 DCHECK(r.IsSmiOrTagged()); |
397 __ li(scratch, literal); | 397 __ li(scratch, literal); |
398 } | 398 } |
399 return scratch; | 399 return scratch; |
400 } else if (op->IsStackSlot()) { | 400 } else if (op->IsStackSlot()) { |
401 __ ld(scratch, ToMemOperand(op)); | 401 __ ld(scratch, ToMemOperand(op)); |
402 return scratch; | 402 return scratch; |
403 } | 403 } |
404 UNREACHABLE(); | 404 UNREACHABLE(); |
405 return scratch; | 405 return scratch; |
406 } | 406 } |
407 | 407 |
408 | 408 |
409 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 409 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
410 ASSERT(op->IsDoubleRegister()); | 410 DCHECK(op->IsDoubleRegister()); |
411 return ToDoubleRegister(op->index()); | 411 return ToDoubleRegister(op->index()); |
412 } | 412 } |
413 | 413 |
414 | 414 |
415 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, | 415 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, |
416 FloatRegister flt_scratch, | 416 FloatRegister flt_scratch, |
417 DoubleRegister dbl_scratch) { | 417 DoubleRegister dbl_scratch) { |
418 if (op->IsDoubleRegister()) { | 418 if (op->IsDoubleRegister()) { |
419 return ToDoubleRegister(op->index()); | 419 return ToDoubleRegister(op->index()); |
420 } else if (op->IsConstantOperand()) { | 420 } else if (op->IsConstantOperand()) { |
421 LConstantOperand* const_op = LConstantOperand::cast(op); | 421 LConstantOperand* const_op = LConstantOperand::cast(op); |
422 HConstant* constant = chunk_->LookupConstant(const_op); | 422 HConstant* constant = chunk_->LookupConstant(const_op); |
423 Handle<Object> literal = constant->handle(isolate()); | 423 Handle<Object> literal = constant->handle(isolate()); |
424 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 424 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
425 if (r.IsInteger32()) { | 425 if (r.IsInteger32()) { |
426 ASSERT(literal->IsNumber()); | 426 DCHECK(literal->IsNumber()); |
427 __ li(at, Operand(static_cast<int32_t>(literal->Number()))); | 427 __ li(at, Operand(static_cast<int32_t>(literal->Number()))); |
428 __ mtc1(at, flt_scratch); | 428 __ mtc1(at, flt_scratch); |
429 __ cvt_d_w(dbl_scratch, flt_scratch); | 429 __ cvt_d_w(dbl_scratch, flt_scratch); |
430 return dbl_scratch; | 430 return dbl_scratch; |
431 } else if (r.IsDouble()) { | 431 } else if (r.IsDouble()) { |
432 Abort(kUnsupportedDoubleImmediate); | 432 Abort(kUnsupportedDoubleImmediate); |
433 } else if (r.IsTagged()) { | 433 } else if (r.IsTagged()) { |
434 Abort(kUnsupportedTaggedImmediate); | 434 Abort(kUnsupportedTaggedImmediate); |
435 } | 435 } |
436 } else if (op->IsStackSlot()) { | 436 } else if (op->IsStackSlot()) { |
437 MemOperand mem_op = ToMemOperand(op); | 437 MemOperand mem_op = ToMemOperand(op); |
438 __ ldc1(dbl_scratch, mem_op); | 438 __ ldc1(dbl_scratch, mem_op); |
439 return dbl_scratch; | 439 return dbl_scratch; |
440 } | 440 } |
441 UNREACHABLE(); | 441 UNREACHABLE(); |
442 return dbl_scratch; | 442 return dbl_scratch; |
443 } | 443 } |
444 | 444 |
445 | 445 |
446 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { | 446 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { |
447 HConstant* constant = chunk_->LookupConstant(op); | 447 HConstant* constant = chunk_->LookupConstant(op); |
448 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); | 448 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); |
449 return constant->handle(isolate()); | 449 return constant->handle(isolate()); |
450 } | 450 } |
451 | 451 |
452 | 452 |
453 bool LCodeGen::IsInteger32(LConstantOperand* op) const { | 453 bool LCodeGen::IsInteger32(LConstantOperand* op) const { |
454 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); | 454 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); |
455 } | 455 } |
456 | 456 |
457 | 457 |
458 bool LCodeGen::IsSmi(LConstantOperand* op) const { | 458 bool LCodeGen::IsSmi(LConstantOperand* op) const { |
459 return chunk_->LookupLiteralRepresentation(op).IsSmi(); | 459 return chunk_->LookupLiteralRepresentation(op).IsSmi(); |
460 } | 460 } |
461 | 461 |
462 | 462 |
463 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { | 463 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { |
464 // return ToRepresentation(op, Representation::Integer32()); | 464 // return ToRepresentation(op, Representation::Integer32()); |
465 HConstant* constant = chunk_->LookupConstant(op); | 465 HConstant* constant = chunk_->LookupConstant(op); |
466 return constant->Integer32Value(); | 466 return constant->Integer32Value(); |
467 } | 467 } |
468 | 468 |
469 | 469 |
470 int32_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op, | 470 int32_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op, |
471 const Representation& r) const { | 471 const Representation& r) const { |
472 HConstant* constant = chunk_->LookupConstant(op); | 472 HConstant* constant = chunk_->LookupConstant(op); |
473 int32_t value = constant->Integer32Value(); | 473 int32_t value = constant->Integer32Value(); |
474 if (r.IsInteger32()) return value; | 474 if (r.IsInteger32()) return value; |
475 ASSERT(r.IsSmiOrTagged()); | 475 DCHECK(r.IsSmiOrTagged()); |
476 return reinterpret_cast<int64_t>(Smi::FromInt(value)); | 476 return reinterpret_cast<int64_t>(Smi::FromInt(value)); |
477 } | 477 } |
478 | 478 |
479 | 479 |
480 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { | 480 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { |
481 HConstant* constant = chunk_->LookupConstant(op); | 481 HConstant* constant = chunk_->LookupConstant(op); |
482 return Smi::FromInt(constant->Integer32Value()); | 482 return Smi::FromInt(constant->Integer32Value()); |
483 } | 483 } |
484 | 484 |
485 | 485 |
486 double LCodeGen::ToDouble(LConstantOperand* op) const { | 486 double LCodeGen::ToDouble(LConstantOperand* op) const { |
487 HConstant* constant = chunk_->LookupConstant(op); | 487 HConstant* constant = chunk_->LookupConstant(op); |
488 ASSERT(constant->HasDoubleValue()); | 488 DCHECK(constant->HasDoubleValue()); |
489 return constant->DoubleValue(); | 489 return constant->DoubleValue(); |
490 } | 490 } |
491 | 491 |
492 | 492 |
493 Operand LCodeGen::ToOperand(LOperand* op) { | 493 Operand LCodeGen::ToOperand(LOperand* op) { |
494 if (op->IsConstantOperand()) { | 494 if (op->IsConstantOperand()) { |
495 LConstantOperand* const_op = LConstantOperand::cast(op); | 495 LConstantOperand* const_op = LConstantOperand::cast(op); |
496 HConstant* constant = chunk()->LookupConstant(const_op); | 496 HConstant* constant = chunk()->LookupConstant(const_op); |
497 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 497 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
498 if (r.IsSmi()) { | 498 if (r.IsSmi()) { |
499 ASSERT(constant->HasSmiValue()); | 499 DCHECK(constant->HasSmiValue()); |
500 return Operand(Smi::FromInt(constant->Integer32Value())); | 500 return Operand(Smi::FromInt(constant->Integer32Value())); |
501 } else if (r.IsInteger32()) { | 501 } else if (r.IsInteger32()) { |
502 ASSERT(constant->HasInteger32Value()); | 502 DCHECK(constant->HasInteger32Value()); |
503 return Operand(constant->Integer32Value()); | 503 return Operand(constant->Integer32Value()); |
504 } else if (r.IsDouble()) { | 504 } else if (r.IsDouble()) { |
505 Abort(kToOperandUnsupportedDoubleImmediate); | 505 Abort(kToOperandUnsupportedDoubleImmediate); |
506 } | 506 } |
507 ASSERT(r.IsTagged()); | 507 DCHECK(r.IsTagged()); |
508 return Operand(constant->handle(isolate())); | 508 return Operand(constant->handle(isolate())); |
509 } else if (op->IsRegister()) { | 509 } else if (op->IsRegister()) { |
510 return Operand(ToRegister(op)); | 510 return Operand(ToRegister(op)); |
511 } else if (op->IsDoubleRegister()) { | 511 } else if (op->IsDoubleRegister()) { |
512 Abort(kToOperandIsDoubleRegisterUnimplemented); | 512 Abort(kToOperandIsDoubleRegisterUnimplemented); |
513 return Operand((int64_t)0); | 513 return Operand((int64_t)0); |
514 } | 514 } |
515 // Stack slots not implemented, use ToMemOperand instead. | 515 // Stack slots not implemented, use ToMemOperand instead. |
516 UNREACHABLE(); | 516 UNREACHABLE(); |
517 return Operand((int64_t)0); | 517 return Operand((int64_t)0); |
518 } | 518 } |
519 | 519 |
520 | 520 |
521 static int ArgumentsOffsetWithoutFrame(int index) { | 521 static int ArgumentsOffsetWithoutFrame(int index) { |
522 ASSERT(index < 0); | 522 DCHECK(index < 0); |
523 return -(index + 1) * kPointerSize; | 523 return -(index + 1) * kPointerSize; |
524 } | 524 } |
525 | 525 |
526 | 526 |
527 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { | 527 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { |
528 ASSERT(!op->IsRegister()); | 528 DCHECK(!op->IsRegister()); |
529 ASSERT(!op->IsDoubleRegister()); | 529 DCHECK(!op->IsDoubleRegister()); |
530 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); | 530 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); |
531 if (NeedsEagerFrame()) { | 531 if (NeedsEagerFrame()) { |
532 return MemOperand(fp, StackSlotOffset(op->index())); | 532 return MemOperand(fp, StackSlotOffset(op->index())); |
533 } else { | 533 } else { |
534 // Retrieve parameter without eager stack-frame relative to the | 534 // Retrieve parameter without eager stack-frame relative to the |
535 // stack-pointer. | 535 // stack-pointer. |
536 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); | 536 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); |
537 } | 537 } |
538 } | 538 } |
539 | 539 |
540 | 540 |
541 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { | 541 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { |
542 ASSERT(op->IsDoubleStackSlot()); | 542 DCHECK(op->IsDoubleStackSlot()); |
543 if (NeedsEagerFrame()) { | 543 if (NeedsEagerFrame()) { |
544 // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); | 544 // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); |
545 return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize); | 545 return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize); |
546 } else { | 546 } else { |
547 // Retrieve parameter without eager stack-frame relative to the | 547 // Retrieve parameter without eager stack-frame relative to the |
548 // stack-pointer. | 548 // stack-pointer. |
549 // return MemOperand( | 549 // return MemOperand( |
550 // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); | 550 // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); |
551 return MemOperand( | 551 return MemOperand( |
552 sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize); | 552 sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize); |
(...skipping 18 matching lines...) Expand all Loading... |
571 : Translation::kSelfLiteralId; | 571 : Translation::kSelfLiteralId; |
572 | 572 |
573 switch (environment->frame_type()) { | 573 switch (environment->frame_type()) { |
574 case JS_FUNCTION: | 574 case JS_FUNCTION: |
575 translation->BeginJSFrame(environment->ast_id(), closure_id, height); | 575 translation->BeginJSFrame(environment->ast_id(), closure_id, height); |
576 break; | 576 break; |
577 case JS_CONSTRUCT: | 577 case JS_CONSTRUCT: |
578 translation->BeginConstructStubFrame(closure_id, translation_size); | 578 translation->BeginConstructStubFrame(closure_id, translation_size); |
579 break; | 579 break; |
580 case JS_GETTER: | 580 case JS_GETTER: |
581 ASSERT(translation_size == 1); | 581 DCHECK(translation_size == 1); |
582 ASSERT(height == 0); | 582 DCHECK(height == 0); |
583 translation->BeginGetterStubFrame(closure_id); | 583 translation->BeginGetterStubFrame(closure_id); |
584 break; | 584 break; |
585 case JS_SETTER: | 585 case JS_SETTER: |
586 ASSERT(translation_size == 2); | 586 DCHECK(translation_size == 2); |
587 ASSERT(height == 0); | 587 DCHECK(height == 0); |
588 translation->BeginSetterStubFrame(closure_id); | 588 translation->BeginSetterStubFrame(closure_id); |
589 break; | 589 break; |
590 case STUB: | 590 case STUB: |
591 translation->BeginCompiledStubFrame(); | 591 translation->BeginCompiledStubFrame(); |
592 break; | 592 break; |
593 case ARGUMENTS_ADAPTOR: | 593 case ARGUMENTS_ADAPTOR: |
594 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); | 594 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
595 break; | 595 break; |
596 } | 596 } |
597 | 597 |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
682 RelocInfo::Mode mode, | 682 RelocInfo::Mode mode, |
683 LInstruction* instr) { | 683 LInstruction* instr) { |
684 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); | 684 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); |
685 } | 685 } |
686 | 686 |
687 | 687 |
688 void LCodeGen::CallCodeGeneric(Handle<Code> code, | 688 void LCodeGen::CallCodeGeneric(Handle<Code> code, |
689 RelocInfo::Mode mode, | 689 RelocInfo::Mode mode, |
690 LInstruction* instr, | 690 LInstruction* instr, |
691 SafepointMode safepoint_mode) { | 691 SafepointMode safepoint_mode) { |
692 ASSERT(instr != NULL); | 692 DCHECK(instr != NULL); |
693 __ Call(code, mode); | 693 __ Call(code, mode); |
694 RecordSafepointWithLazyDeopt(instr, safepoint_mode); | 694 RecordSafepointWithLazyDeopt(instr, safepoint_mode); |
695 } | 695 } |
696 | 696 |
697 | 697 |
698 void LCodeGen::CallRuntime(const Runtime::Function* function, | 698 void LCodeGen::CallRuntime(const Runtime::Function* function, |
699 int num_arguments, | 699 int num_arguments, |
700 LInstruction* instr, | 700 LInstruction* instr, |
701 SaveFPRegsMode save_doubles) { | 701 SaveFPRegsMode save_doubles) { |
702 ASSERT(instr != NULL); | 702 DCHECK(instr != NULL); |
703 | 703 |
704 __ CallRuntime(function, num_arguments, save_doubles); | 704 __ CallRuntime(function, num_arguments, save_doubles); |
705 | 705 |
706 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | 706 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
707 } | 707 } |
708 | 708 |
709 | 709 |
710 void LCodeGen::LoadContextFromDeferred(LOperand* context) { | 710 void LCodeGen::LoadContextFromDeferred(LOperand* context) { |
711 if (context->IsRegister()) { | 711 if (context->IsRegister()) { |
712 __ Move(cp, ToRegister(context)); | 712 __ Move(cp, ToRegister(context)); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
769 } | 769 } |
770 } | 770 } |
771 | 771 |
772 | 772 |
773 void LCodeGen::DeoptimizeIf(Condition condition, | 773 void LCodeGen::DeoptimizeIf(Condition condition, |
774 LEnvironment* environment, | 774 LEnvironment* environment, |
775 Deoptimizer::BailoutType bailout_type, | 775 Deoptimizer::BailoutType bailout_type, |
776 Register src1, | 776 Register src1, |
777 const Operand& src2) { | 777 const Operand& src2) { |
778 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 778 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
779 ASSERT(environment->HasBeenRegistered()); | 779 DCHECK(environment->HasBeenRegistered()); |
780 int id = environment->deoptimization_index(); | 780 int id = environment->deoptimization_index(); |
781 ASSERT(info()->IsOptimizing() || info()->IsStub()); | 781 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
782 Address entry = | 782 Address entry = |
783 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 783 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
784 if (entry == NULL) { | 784 if (entry == NULL) { |
785 Abort(kBailoutWasNotPrepared); | 785 Abort(kBailoutWasNotPrepared); |
786 return; | 786 return; |
787 } | 787 } |
788 | 788 |
789 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { | 789 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { |
790 Register scratch = scratch0(); | 790 Register scratch = scratch0(); |
791 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); | 791 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); |
(...skipping 15 matching lines...) Expand all Loading... |
807 | 807 |
808 if (info()->ShouldTrapOnDeopt()) { | 808 if (info()->ShouldTrapOnDeopt()) { |
809 Label skip; | 809 Label skip; |
810 if (condition != al) { | 810 if (condition != al) { |
811 __ Branch(&skip, NegateCondition(condition), src1, src2); | 811 __ Branch(&skip, NegateCondition(condition), src1, src2); |
812 } | 812 } |
813 __ stop("trap_on_deopt"); | 813 __ stop("trap_on_deopt"); |
814 __ bind(&skip); | 814 __ bind(&skip); |
815 } | 815 } |
816 | 816 |
817 ASSERT(info()->IsStub() || frame_is_built_); | 817 DCHECK(info()->IsStub() || frame_is_built_); |
818 // Go through jump table if we need to handle condition, build frame, or | 818 // Go through jump table if we need to handle condition, build frame, or |
819 // restore caller doubles. | 819 // restore caller doubles. |
820 if (condition == al && frame_is_built_ && | 820 if (condition == al && frame_is_built_ && |
821 !info()->saves_caller_doubles()) { | 821 !info()->saves_caller_doubles()) { |
822 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); | 822 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); |
823 } else { | 823 } else { |
824 // We often have several deopts to the same entry, reuse the last | 824 // We often have several deopts to the same entry, reuse the last |
825 // jump entry if this is the case. | 825 // jump entry if this is the case. |
826 if (deopt_jump_table_.is_empty() || | 826 if (deopt_jump_table_.is_empty() || |
827 (deopt_jump_table_.last().address != entry) || | 827 (deopt_jump_table_.last().address != entry) || |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
896 int result = deoptimization_literals_.length(); | 896 int result = deoptimization_literals_.length(); |
897 for (int i = 0; i < deoptimization_literals_.length(); ++i) { | 897 for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
898 if (deoptimization_literals_[i].is_identical_to(literal)) return i; | 898 if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
899 } | 899 } |
900 deoptimization_literals_.Add(literal, zone()); | 900 deoptimization_literals_.Add(literal, zone()); |
901 return result; | 901 return result; |
902 } | 902 } |
903 | 903 |
904 | 904 |
905 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { | 905 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { |
906 ASSERT(deoptimization_literals_.length() == 0); | 906 DCHECK(deoptimization_literals_.length() == 0); |
907 | 907 |
908 const ZoneList<Handle<JSFunction> >* inlined_closures = | 908 const ZoneList<Handle<JSFunction> >* inlined_closures = |
909 chunk()->inlined_closures(); | 909 chunk()->inlined_closures(); |
910 | 910 |
911 for (int i = 0, length = inlined_closures->length(); | 911 for (int i = 0, length = inlined_closures->length(); |
912 i < length; | 912 i < length; |
913 i++) { | 913 i++) { |
914 DefineDeoptimizationLiteral(inlined_closures->at(i)); | 914 DefineDeoptimizationLiteral(inlined_closures->at(i)); |
915 } | 915 } |
916 | 916 |
917 inlined_function_count_ = deoptimization_literals_.length(); | 917 inlined_function_count_ = deoptimization_literals_.length(); |
918 } | 918 } |
919 | 919 |
920 | 920 |
921 void LCodeGen::RecordSafepointWithLazyDeopt( | 921 void LCodeGen::RecordSafepointWithLazyDeopt( |
922 LInstruction* instr, SafepointMode safepoint_mode) { | 922 LInstruction* instr, SafepointMode safepoint_mode) { |
923 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { | 923 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { |
924 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); | 924 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); |
925 } else { | 925 } else { |
926 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 926 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
927 RecordSafepointWithRegisters( | 927 RecordSafepointWithRegisters( |
928 instr->pointer_map(), 0, Safepoint::kLazyDeopt); | 928 instr->pointer_map(), 0, Safepoint::kLazyDeopt); |
929 } | 929 } |
930 } | 930 } |
931 | 931 |
932 | 932 |
933 void LCodeGen::RecordSafepoint( | 933 void LCodeGen::RecordSafepoint( |
934 LPointerMap* pointers, | 934 LPointerMap* pointers, |
935 Safepoint::Kind kind, | 935 Safepoint::Kind kind, |
936 int arguments, | 936 int arguments, |
937 Safepoint::DeoptMode deopt_mode) { | 937 Safepoint::DeoptMode deopt_mode) { |
938 ASSERT(expected_safepoint_kind_ == kind); | 938 DCHECK(expected_safepoint_kind_ == kind); |
939 | 939 |
940 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); | 940 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); |
941 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), | 941 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), |
942 kind, arguments, deopt_mode); | 942 kind, arguments, deopt_mode); |
943 for (int i = 0; i < operands->length(); i++) { | 943 for (int i = 0; i < operands->length(); i++) { |
944 LOperand* pointer = operands->at(i); | 944 LOperand* pointer = operands->at(i); |
945 if (pointer->IsStackSlot()) { | 945 if (pointer->IsStackSlot()) { |
946 safepoint.DefinePointerSlot(pointer->index(), zone()); | 946 safepoint.DefinePointerSlot(pointer->index(), zone()); |
947 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { | 947 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
948 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); | 948 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1017 DoGap(instr); | 1017 DoGap(instr); |
1018 } | 1018 } |
1019 | 1019 |
1020 | 1020 |
1021 void LCodeGen::DoParameter(LParameter* instr) { | 1021 void LCodeGen::DoParameter(LParameter* instr) { |
1022 // Nothing to do. | 1022 // Nothing to do. |
1023 } | 1023 } |
1024 | 1024 |
1025 | 1025 |
1026 void LCodeGen::DoCallStub(LCallStub* instr) { | 1026 void LCodeGen::DoCallStub(LCallStub* instr) { |
1027 ASSERT(ToRegister(instr->context()).is(cp)); | 1027 DCHECK(ToRegister(instr->context()).is(cp)); |
1028 ASSERT(ToRegister(instr->result()).is(v0)); | 1028 DCHECK(ToRegister(instr->result()).is(v0)); |
1029 switch (instr->hydrogen()->major_key()) { | 1029 switch (instr->hydrogen()->major_key()) { |
1030 case CodeStub::RegExpExec: { | 1030 case CodeStub::RegExpExec: { |
1031 RegExpExecStub stub(isolate()); | 1031 RegExpExecStub stub(isolate()); |
1032 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 1032 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
1033 break; | 1033 break; |
1034 } | 1034 } |
1035 case CodeStub::SubString: { | 1035 case CodeStub::SubString: { |
1036 SubStringStub stub(isolate()); | 1036 SubStringStub stub(isolate()); |
1037 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 1037 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
1038 break; | 1038 break; |
(...skipping 10 matching lines...) Expand all Loading... |
1049 | 1049 |
1050 | 1050 |
1051 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { | 1051 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { |
1052 GenerateOsrPrologue(); | 1052 GenerateOsrPrologue(); |
1053 } | 1053 } |
1054 | 1054 |
1055 | 1055 |
1056 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { | 1056 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { |
1057 Register dividend = ToRegister(instr->dividend()); | 1057 Register dividend = ToRegister(instr->dividend()); |
1058 int32_t divisor = instr->divisor(); | 1058 int32_t divisor = instr->divisor(); |
1059 ASSERT(dividend.is(ToRegister(instr->result()))); | 1059 DCHECK(dividend.is(ToRegister(instr->result()))); |
1060 | 1060 |
1061 // Theoretically, a variation of the branch-free code for integer division by | 1061 // Theoretically, a variation of the branch-free code for integer division by |
1062 // a power of 2 (calculating the remainder via an additional multiplication | 1062 // a power of 2 (calculating the remainder via an additional multiplication |
1063 // (which gets simplified to an 'and') and subtraction) should be faster, and | 1063 // (which gets simplified to an 'and') and subtraction) should be faster, and |
1064 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to | 1064 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to |
1065 // indicate that positive dividends are heavily favored, so the branching | 1065 // indicate that positive dividends are heavily favored, so the branching |
1066 // version performs better. | 1066 // version performs better. |
1067 HMod* hmod = instr->hydrogen(); | 1067 HMod* hmod = instr->hydrogen(); |
1068 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1068 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
1069 Label dividend_is_not_negative, done; | 1069 Label dividend_is_not_negative, done; |
(...skipping 13 matching lines...) Expand all Loading... |
1083 __ bind(÷nd_is_not_negative); | 1083 __ bind(÷nd_is_not_negative); |
1084 __ And(dividend, dividend, Operand(mask)); | 1084 __ And(dividend, dividend, Operand(mask)); |
1085 __ bind(&done); | 1085 __ bind(&done); |
1086 } | 1086 } |
1087 | 1087 |
1088 | 1088 |
1089 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1089 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
1090 Register dividend = ToRegister(instr->dividend()); | 1090 Register dividend = ToRegister(instr->dividend()); |
1091 int32_t divisor = instr->divisor(); | 1091 int32_t divisor = instr->divisor(); |
1092 Register result = ToRegister(instr->result()); | 1092 Register result = ToRegister(instr->result()); |
1093 ASSERT(!dividend.is(result)); | 1093 DCHECK(!dividend.is(result)); |
1094 | 1094 |
1095 if (divisor == 0) { | 1095 if (divisor == 0) { |
1096 DeoptimizeIf(al, instr->environment()); | 1096 DeoptimizeIf(al, instr->environment()); |
1097 return; | 1097 return; |
1098 } | 1098 } |
1099 | 1099 |
1100 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1100 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1101 __ Dmul(result, result, Operand(Abs(divisor))); | 1101 __ Dmul(result, result, Operand(Abs(divisor))); |
1102 __ Dsubu(result, dividend, Operand(result)); | 1102 __ Dsubu(result, dividend, Operand(result)); |
1103 | 1103 |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1150 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg)); | 1150 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg)); |
1151 } | 1151 } |
1152 __ bind(&done); | 1152 __ bind(&done); |
1153 } | 1153 } |
1154 | 1154 |
1155 | 1155 |
1156 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1156 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
1157 Register dividend = ToRegister(instr->dividend()); | 1157 Register dividend = ToRegister(instr->dividend()); |
1158 int32_t divisor = instr->divisor(); | 1158 int32_t divisor = instr->divisor(); |
1159 Register result = ToRegister(instr->result()); | 1159 Register result = ToRegister(instr->result()); |
1160 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor))); | 1160 DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); |
1161 ASSERT(!result.is(dividend)); | 1161 DCHECK(!result.is(dividend)); |
1162 | 1162 |
1163 // Check for (0 / -x) that will produce negative zero. | 1163 // Check for (0 / -x) that will produce negative zero. |
1164 HDiv* hdiv = instr->hydrogen(); | 1164 HDiv* hdiv = instr->hydrogen(); |
1165 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1165 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1166 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); | 1166 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); |
1167 } | 1167 } |
1168 // Check for (kMinInt / -1). | 1168 // Check for (kMinInt / -1). |
1169 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1169 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
1170 DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt)); | 1170 DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt)); |
1171 } | 1171 } |
(...skipping 22 matching lines...) Expand all Loading... |
1194 } | 1194 } |
1195 if (shift > 0) __ dsra(result, result, shift); | 1195 if (shift > 0) __ dsra(result, result, shift); |
1196 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1196 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
1197 } | 1197 } |
1198 | 1198 |
1199 | 1199 |
1200 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1200 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
1201 Register dividend = ToRegister(instr->dividend()); | 1201 Register dividend = ToRegister(instr->dividend()); |
1202 int32_t divisor = instr->divisor(); | 1202 int32_t divisor = instr->divisor(); |
1203 Register result = ToRegister(instr->result()); | 1203 Register result = ToRegister(instr->result()); |
1204 ASSERT(!dividend.is(result)); | 1204 DCHECK(!dividend.is(result)); |
1205 | 1205 |
1206 if (divisor == 0) { | 1206 if (divisor == 0) { |
1207 DeoptimizeIf(al, instr->environment()); | 1207 DeoptimizeIf(al, instr->environment()); |
1208 return; | 1208 return; |
1209 } | 1209 } |
1210 | 1210 |
1211 // Check for (0 / -x) that will produce negative zero. | 1211 // Check for (0 / -x) that will produce negative zero. |
1212 HDiv* hdiv = instr->hydrogen(); | 1212 HDiv* hdiv = instr->hydrogen(); |
1213 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1213 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1214 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); | 1214 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1270 } | 1270 } |
1271 } | 1271 } |
1272 | 1272 |
1273 | 1273 |
1274 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1274 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
1275 DoubleRegister addend = ToDoubleRegister(instr->addend()); | 1275 DoubleRegister addend = ToDoubleRegister(instr->addend()); |
1276 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1276 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); |
1277 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1277 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
1278 | 1278 |
1279 // This is computed in-place. | 1279 // This is computed in-place. |
1280 ASSERT(addend.is(ToDoubleRegister(instr->result()))); | 1280 DCHECK(addend.is(ToDoubleRegister(instr->result()))); |
1281 | 1281 |
1282 __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0()); | 1282 __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0()); |
1283 } | 1283 } |
1284 | 1284 |
1285 | 1285 |
1286 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 1286 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
1287 Register dividend = ToRegister(instr->dividend()); | 1287 Register dividend = ToRegister(instr->dividend()); |
1288 Register result = ToRegister(instr->result()); | 1288 Register result = ToRegister(instr->result()); |
1289 int32_t divisor = instr->divisor(); | 1289 int32_t divisor = instr->divisor(); |
1290 Register scratch = result.is(dividend) ? scratch0() : dividend; | 1290 Register scratch = result.is(dividend) ? scratch0() : dividend; |
1291 ASSERT(!result.is(dividend) || !scratch.is(dividend)); | 1291 DCHECK(!result.is(dividend) || !scratch.is(dividend)); |
1292 | 1292 |
1293 // If the divisor is 1, return the dividend. | 1293 // If the divisor is 1, return the dividend. |
1294 if (divisor == 1) { | 1294 if (divisor == 1) { |
1295 __ Move(result, dividend); | 1295 __ Move(result, dividend); |
1296 return; | 1296 return; |
1297 } | 1297 } |
1298 | 1298 |
1299 // If the divisor is positive, things are easy: There can be no deopts and we | 1299 // If the divisor is positive, things are easy: There can be no deopts and we |
1300 // can simply do an arithmetic right shift. | 1300 // can simply do an arithmetic right shift. |
1301 uint16_t shift = WhichPowerOf2Abs(divisor); | 1301 uint16_t shift = WhichPowerOf2Abs(divisor); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1336 __ bind(&no_overflow); | 1336 __ bind(&no_overflow); |
1337 __ dsra(result, result, shift); | 1337 __ dsra(result, result, shift); |
1338 __ bind(&done); | 1338 __ bind(&done); |
1339 } | 1339 } |
1340 | 1340 |
1341 | 1341 |
1342 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1342 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
1343 Register dividend = ToRegister(instr->dividend()); | 1343 Register dividend = ToRegister(instr->dividend()); |
1344 int32_t divisor = instr->divisor(); | 1344 int32_t divisor = instr->divisor(); |
1345 Register result = ToRegister(instr->result()); | 1345 Register result = ToRegister(instr->result()); |
1346 ASSERT(!dividend.is(result)); | 1346 DCHECK(!dividend.is(result)); |
1347 | 1347 |
1348 if (divisor == 0) { | 1348 if (divisor == 0) { |
1349 DeoptimizeIf(al, instr->environment()); | 1349 DeoptimizeIf(al, instr->environment()); |
1350 return; | 1350 return; |
1351 } | 1351 } |
1352 | 1352 |
1353 // Check for (0 / -x) that will produce negative zero. | 1353 // Check for (0 / -x) that will produce negative zero. |
1354 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1354 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
1355 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1355 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1356 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); | 1356 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); |
1357 } | 1357 } |
1358 | 1358 |
1359 // Easy case: We need no dynamic check for the dividend and the flooring | 1359 // Easy case: We need no dynamic check for the dividend and the flooring |
1360 // division is the same as the truncating division. | 1360 // division is the same as the truncating division. |
1361 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1361 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
1362 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1362 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
1363 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1363 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1364 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1364 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
1365 return; | 1365 return; |
1366 } | 1366 } |
1367 | 1367 |
1368 // In the general case we may need to adjust before and after the truncating | 1368 // In the general case we may need to adjust before and after the truncating |
1369 // division to get a flooring division. | 1369 // division to get a flooring division. |
1370 Register temp = ToRegister(instr->temp()); | 1370 Register temp = ToRegister(instr->temp()); |
1371 ASSERT(!temp.is(dividend) && !temp.is(result)); | 1371 DCHECK(!temp.is(dividend) && !temp.is(result)); |
1372 Label needs_adjustment, done; | 1372 Label needs_adjustment, done; |
1373 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt, | 1373 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt, |
1374 dividend, Operand(zero_reg)); | 1374 dividend, Operand(zero_reg)); |
1375 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1375 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1376 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1376 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
1377 __ jmp(&done); | 1377 __ jmp(&done); |
1378 __ bind(&needs_adjustment); | 1378 __ bind(&needs_adjustment); |
1379 __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1)); | 1379 __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1)); |
1380 __ TruncatingDiv(result, temp, Abs(divisor)); | 1380 __ TruncatingDiv(result, temp, Abs(divisor)); |
1381 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1381 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1499 // Correct the sign of the result if the constant is negative. | 1499 // Correct the sign of the result if the constant is negative. |
1500 if (constant < 0) __ Dsubu(result, zero_reg, result); | 1500 if (constant < 0) __ Dsubu(result, zero_reg, result); |
1501 } else { | 1501 } else { |
1502 // Generate standard code. | 1502 // Generate standard code. |
1503 __ li(at, constant); | 1503 __ li(at, constant); |
1504 __ Dmul(result, left, at); | 1504 __ Dmul(result, left, at); |
1505 } | 1505 } |
1506 } | 1506 } |
1507 | 1507 |
1508 } else { | 1508 } else { |
1509 ASSERT(right_op->IsRegister()); | 1509 DCHECK(right_op->IsRegister()); |
1510 Register right = ToRegister(right_op); | 1510 Register right = ToRegister(right_op); |
1511 | 1511 |
1512 if (overflow) { | 1512 if (overflow) { |
1513 // hi:lo = left * right. | 1513 // hi:lo = left * right. |
1514 if (instr->hydrogen()->representation().IsSmi()) { | 1514 if (instr->hydrogen()->representation().IsSmi()) { |
1515 __ Dmulh(result, left, right); | 1515 __ Dmulh(result, left, right); |
1516 } else { | 1516 } else { |
1517 __ Dmul(result, left, right); | 1517 __ Dmul(result, left, right); |
1518 } | 1518 } |
1519 __ dsra32(scratch, result, 0); | 1519 __ dsra32(scratch, result, 0); |
(...skipping 22 matching lines...) Expand all Loading... |
1542 Operand(zero_reg)); | 1542 Operand(zero_reg)); |
1543 __ bind(&done); | 1543 __ bind(&done); |
1544 } | 1544 } |
1545 } | 1545 } |
1546 } | 1546 } |
1547 | 1547 |
1548 | 1548 |
1549 void LCodeGen::DoBitI(LBitI* instr) { | 1549 void LCodeGen::DoBitI(LBitI* instr) { |
1550 LOperand* left_op = instr->left(); | 1550 LOperand* left_op = instr->left(); |
1551 LOperand* right_op = instr->right(); | 1551 LOperand* right_op = instr->right(); |
1552 ASSERT(left_op->IsRegister()); | 1552 DCHECK(left_op->IsRegister()); |
1553 Register left = ToRegister(left_op); | 1553 Register left = ToRegister(left_op); |
1554 Register result = ToRegister(instr->result()); | 1554 Register result = ToRegister(instr->result()); |
1555 Operand right(no_reg); | 1555 Operand right(no_reg); |
1556 | 1556 |
1557 if (right_op->IsStackSlot()) { | 1557 if (right_op->IsStackSlot()) { |
1558 right = Operand(EmitLoadRegister(right_op, at)); | 1558 right = Operand(EmitLoadRegister(right_op, at)); |
1559 } else { | 1559 } else { |
1560 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand()); | 1560 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); |
1561 right = ToOperand(right_op); | 1561 right = ToOperand(right_op); |
1562 } | 1562 } |
1563 | 1563 |
1564 switch (instr->op()) { | 1564 switch (instr->op()) { |
1565 case Token::BIT_AND: | 1565 case Token::BIT_AND: |
1566 __ And(result, left, right); | 1566 __ And(result, left, right); |
1567 break; | 1567 break; |
1568 case Token::BIT_OR: | 1568 case Token::BIT_OR: |
1569 __ Or(result, left, right); | 1569 __ Or(result, left, right); |
1570 break; | 1570 break; |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1667 LOperand* left = instr->left(); | 1667 LOperand* left = instr->left(); |
1668 LOperand* right = instr->right(); | 1668 LOperand* right = instr->right(); |
1669 LOperand* result = instr->result(); | 1669 LOperand* result = instr->result(); |
1670 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1670 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1671 | 1671 |
1672 if (!can_overflow) { | 1672 if (!can_overflow) { |
1673 if (right->IsStackSlot()) { | 1673 if (right->IsStackSlot()) { |
1674 Register right_reg = EmitLoadRegister(right, at); | 1674 Register right_reg = EmitLoadRegister(right, at); |
1675 __ Dsubu(ToRegister(result), ToRegister(left), Operand(right_reg)); | 1675 __ Dsubu(ToRegister(result), ToRegister(left), Operand(right_reg)); |
1676 } else { | 1676 } else { |
1677 ASSERT(right->IsRegister() || right->IsConstantOperand()); | 1677 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
1678 __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right)); | 1678 __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right)); |
1679 } | 1679 } |
1680 } else { // can_overflow. | 1680 } else { // can_overflow. |
1681 Register overflow = scratch0(); | 1681 Register overflow = scratch0(); |
1682 Register scratch = scratch1(); | 1682 Register scratch = scratch1(); |
1683 if (right->IsStackSlot() || right->IsConstantOperand()) { | 1683 if (right->IsStackSlot() || right->IsConstantOperand()) { |
1684 Register right_reg = EmitLoadRegister(right, scratch); | 1684 Register right_reg = EmitLoadRegister(right, scratch); |
1685 __ SubuAndCheckForOverflow(ToRegister(result), | 1685 __ SubuAndCheckForOverflow(ToRegister(result), |
1686 ToRegister(left), | 1686 ToRegister(left), |
1687 right_reg, | 1687 right_reg, |
1688 overflow); // Reg at also used as scratch. | 1688 overflow); // Reg at also used as scratch. |
1689 } else { | 1689 } else { |
1690 ASSERT(right->IsRegister()); | 1690 DCHECK(right->IsRegister()); |
1691 // Due to overflow check macros not supporting constant operands, | 1691 // Due to overflow check macros not supporting constant operands, |
1692 // handling the IsConstantOperand case was moved to prev if clause. | 1692 // handling the IsConstantOperand case was moved to prev if clause. |
1693 __ SubuAndCheckForOverflow(ToRegister(result), | 1693 __ SubuAndCheckForOverflow(ToRegister(result), |
1694 ToRegister(left), | 1694 ToRegister(left), |
1695 ToRegister(right), | 1695 ToRegister(right), |
1696 overflow); // Reg at also used as scratch. | 1696 overflow); // Reg at also used as scratch. |
1697 } | 1697 } |
1698 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); | 1698 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); |
1699 if (!instr->hydrogen()->representation().IsSmi()) { | 1699 if (!instr->hydrogen()->representation().IsSmi()) { |
1700 DeoptimizeIf(gt, instr->environment(), | 1700 DeoptimizeIf(gt, instr->environment(), |
1701 ToRegister(result), Operand(kMaxInt)); | 1701 ToRegister(result), Operand(kMaxInt)); |
1702 DeoptimizeIf(lt, instr->environment(), | 1702 DeoptimizeIf(lt, instr->environment(), |
1703 ToRegister(result), Operand(kMinInt)); | 1703 ToRegister(result), Operand(kMinInt)); |
1704 } | 1704 } |
1705 } | 1705 } |
1706 } | 1706 } |
1707 | 1707 |
1708 | 1708 |
1709 void LCodeGen::DoConstantI(LConstantI* instr) { | 1709 void LCodeGen::DoConstantI(LConstantI* instr) { |
1710 __ li(ToRegister(instr->result()), Operand(instr->value())); | 1710 __ li(ToRegister(instr->result()), Operand(instr->value())); |
1711 } | 1711 } |
1712 | 1712 |
1713 | 1713 |
1714 void LCodeGen::DoConstantS(LConstantS* instr) { | 1714 void LCodeGen::DoConstantS(LConstantS* instr) { |
1715 __ li(ToRegister(instr->result()), Operand(instr->value())); | 1715 __ li(ToRegister(instr->result()), Operand(instr->value())); |
1716 } | 1716 } |
1717 | 1717 |
1718 | 1718 |
1719 void LCodeGen::DoConstantD(LConstantD* instr) { | 1719 void LCodeGen::DoConstantD(LConstantD* instr) { |
1720 ASSERT(instr->result()->IsDoubleRegister()); | 1720 DCHECK(instr->result()->IsDoubleRegister()); |
1721 DoubleRegister result = ToDoubleRegister(instr->result()); | 1721 DoubleRegister result = ToDoubleRegister(instr->result()); |
1722 double v = instr->value(); | 1722 double v = instr->value(); |
1723 __ Move(result, v); | 1723 __ Move(result, v); |
1724 } | 1724 } |
1725 | 1725 |
1726 | 1726 |
1727 void LCodeGen::DoConstantE(LConstantE* instr) { | 1727 void LCodeGen::DoConstantE(LConstantE* instr) { |
1728 __ li(ToRegister(instr->result()), Operand(instr->value())); | 1728 __ li(ToRegister(instr->result()), Operand(instr->value())); |
1729 } | 1729 } |
1730 | 1730 |
(...skipping 11 matching lines...) Expand all Loading... |
1742 __ EnumLength(result, map); | 1742 __ EnumLength(result, map); |
1743 } | 1743 } |
1744 | 1744 |
1745 | 1745 |
1746 void LCodeGen::DoDateField(LDateField* instr) { | 1746 void LCodeGen::DoDateField(LDateField* instr) { |
1747 Register object = ToRegister(instr->date()); | 1747 Register object = ToRegister(instr->date()); |
1748 Register result = ToRegister(instr->result()); | 1748 Register result = ToRegister(instr->result()); |
1749 Register scratch = ToRegister(instr->temp()); | 1749 Register scratch = ToRegister(instr->temp()); |
1750 Smi* index = instr->index(); | 1750 Smi* index = instr->index(); |
1751 Label runtime, done; | 1751 Label runtime, done; |
1752 ASSERT(object.is(a0)); | 1752 DCHECK(object.is(a0)); |
1753 ASSERT(result.is(v0)); | 1753 DCHECK(result.is(v0)); |
1754 ASSERT(!scratch.is(scratch0())); | 1754 DCHECK(!scratch.is(scratch0())); |
1755 ASSERT(!scratch.is(object)); | 1755 DCHECK(!scratch.is(object)); |
1756 | 1756 |
1757 __ SmiTst(object, at); | 1757 __ SmiTst(object, at); |
1758 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); | 1758 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); |
1759 __ GetObjectType(object, scratch, scratch); | 1759 __ GetObjectType(object, scratch, scratch); |
1760 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE)); | 1760 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE)); |
1761 | 1761 |
1762 if (index->value() == 0) { | 1762 if (index->value() == 0) { |
1763 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1763 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); |
1764 } else { | 1764 } else { |
1765 if (index->value() < JSDate::kFirstUncachedField) { | 1765 if (index->value() < JSDate::kFirstUncachedField) { |
(...skipping 20 matching lines...) Expand all Loading... |
1786 String::Encoding encoding) { | 1786 String::Encoding encoding) { |
1787 if (index->IsConstantOperand()) { | 1787 if (index->IsConstantOperand()) { |
1788 int offset = ToInteger32(LConstantOperand::cast(index)); | 1788 int offset = ToInteger32(LConstantOperand::cast(index)); |
1789 if (encoding == String::TWO_BYTE_ENCODING) { | 1789 if (encoding == String::TWO_BYTE_ENCODING) { |
1790 offset *= kUC16Size; | 1790 offset *= kUC16Size; |
1791 } | 1791 } |
1792 STATIC_ASSERT(kCharSize == 1); | 1792 STATIC_ASSERT(kCharSize == 1); |
1793 return FieldMemOperand(string, SeqString::kHeaderSize + offset); | 1793 return FieldMemOperand(string, SeqString::kHeaderSize + offset); |
1794 } | 1794 } |
1795 Register scratch = scratch0(); | 1795 Register scratch = scratch0(); |
1796 ASSERT(!scratch.is(string)); | 1796 DCHECK(!scratch.is(string)); |
1797 ASSERT(!scratch.is(ToRegister(index))); | 1797 DCHECK(!scratch.is(ToRegister(index))); |
1798 if (encoding == String::ONE_BYTE_ENCODING) { | 1798 if (encoding == String::ONE_BYTE_ENCODING) { |
1799 __ Daddu(scratch, string, ToRegister(index)); | 1799 __ Daddu(scratch, string, ToRegister(index)); |
1800 } else { | 1800 } else { |
1801 STATIC_ASSERT(kUC16Size == 2); | 1801 STATIC_ASSERT(kUC16Size == 2); |
1802 __ dsll(scratch, ToRegister(index), 1); | 1802 __ dsll(scratch, ToRegister(index), 1); |
1803 __ Daddu(scratch, string, scratch); | 1803 __ Daddu(scratch, string, scratch); |
1804 } | 1804 } |
1805 return FieldMemOperand(scratch, SeqString::kHeaderSize); | 1805 return FieldMemOperand(scratch, SeqString::kHeaderSize); |
1806 } | 1806 } |
1807 | 1807 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1863 LOperand* left = instr->left(); | 1863 LOperand* left = instr->left(); |
1864 LOperand* right = instr->right(); | 1864 LOperand* right = instr->right(); |
1865 LOperand* result = instr->result(); | 1865 LOperand* result = instr->result(); |
1866 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1866 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1867 | 1867 |
1868 if (!can_overflow) { | 1868 if (!can_overflow) { |
1869 if (right->IsStackSlot()) { | 1869 if (right->IsStackSlot()) { |
1870 Register right_reg = EmitLoadRegister(right, at); | 1870 Register right_reg = EmitLoadRegister(right, at); |
1871 __ Daddu(ToRegister(result), ToRegister(left), Operand(right_reg)); | 1871 __ Daddu(ToRegister(result), ToRegister(left), Operand(right_reg)); |
1872 } else { | 1872 } else { |
1873 ASSERT(right->IsRegister() || right->IsConstantOperand()); | 1873 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
1874 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right)); | 1874 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right)); |
1875 } | 1875 } |
1876 } else { // can_overflow. | 1876 } else { // can_overflow. |
1877 Register overflow = scratch0(); | 1877 Register overflow = scratch0(); |
1878 Register scratch = scratch1(); | 1878 Register scratch = scratch1(); |
1879 if (right->IsStackSlot() || | 1879 if (right->IsStackSlot() || |
1880 right->IsConstantOperand()) { | 1880 right->IsConstantOperand()) { |
1881 Register right_reg = EmitLoadRegister(right, scratch); | 1881 Register right_reg = EmitLoadRegister(right, scratch); |
1882 __ AdduAndCheckForOverflow(ToRegister(result), | 1882 __ AdduAndCheckForOverflow(ToRegister(result), |
1883 ToRegister(left), | 1883 ToRegister(left), |
1884 right_reg, | 1884 right_reg, |
1885 overflow); // Reg at also used as scratch. | 1885 overflow); // Reg at also used as scratch. |
1886 } else { | 1886 } else { |
1887 ASSERT(right->IsRegister()); | 1887 DCHECK(right->IsRegister()); |
1888 // Due to overflow check macros not supporting constant operands, | 1888 // Due to overflow check macros not supporting constant operands, |
1889 // handling the IsConstantOperand case was moved to prev if clause. | 1889 // handling the IsConstantOperand case was moved to prev if clause. |
1890 __ AdduAndCheckForOverflow(ToRegister(result), | 1890 __ AdduAndCheckForOverflow(ToRegister(result), |
1891 ToRegister(left), | 1891 ToRegister(left), |
1892 ToRegister(right), | 1892 ToRegister(right), |
1893 overflow); // Reg at also used as scratch. | 1893 overflow); // Reg at also used as scratch. |
1894 } | 1894 } |
1895 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); | 1895 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); |
1896 // if not smi, it must int32. | 1896 // if not smi, it must int32. |
1897 if (!instr->hydrogen()->representation().IsSmi()) { | 1897 if (!instr->hydrogen()->representation().IsSmi()) { |
(...skipping 15 matching lines...) Expand all Loading... |
1913 Register left_reg = ToRegister(left); | 1913 Register left_reg = ToRegister(left); |
1914 Register right_reg = EmitLoadRegister(right, scratch0()); | 1914 Register right_reg = EmitLoadRegister(right, scratch0()); |
1915 Register result_reg = ToRegister(instr->result()); | 1915 Register result_reg = ToRegister(instr->result()); |
1916 Label return_right, done; | 1916 Label return_right, done; |
1917 Register scratch = scratch1(); | 1917 Register scratch = scratch1(); |
1918 __ Slt(scratch, left_reg, Operand(right_reg)); | 1918 __ Slt(scratch, left_reg, Operand(right_reg)); |
1919 if (condition == ge) { | 1919 if (condition == ge) { |
1920 __ Movz(result_reg, left_reg, scratch); | 1920 __ Movz(result_reg, left_reg, scratch); |
1921 __ Movn(result_reg, right_reg, scratch); | 1921 __ Movn(result_reg, right_reg, scratch); |
1922 } else { | 1922 } else { |
1923 ASSERT(condition == le); | 1923 DCHECK(condition == le); |
1924 __ Movn(result_reg, left_reg, scratch); | 1924 __ Movn(result_reg, left_reg, scratch); |
1925 __ Movz(result_reg, right_reg, scratch); | 1925 __ Movz(result_reg, right_reg, scratch); |
1926 } | 1926 } |
1927 } else { | 1927 } else { |
1928 ASSERT(instr->hydrogen()->representation().IsDouble()); | 1928 DCHECK(instr->hydrogen()->representation().IsDouble()); |
1929 FPURegister left_reg = ToDoubleRegister(left); | 1929 FPURegister left_reg = ToDoubleRegister(left); |
1930 FPURegister right_reg = ToDoubleRegister(right); | 1930 FPURegister right_reg = ToDoubleRegister(right); |
1931 FPURegister result_reg = ToDoubleRegister(instr->result()); | 1931 FPURegister result_reg = ToDoubleRegister(instr->result()); |
1932 Label check_nan_left, check_zero, return_left, return_right, done; | 1932 Label check_nan_left, check_zero, return_left, return_right, done; |
1933 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg); | 1933 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg); |
1934 __ BranchF(&return_left, NULL, condition, left_reg, right_reg); | 1934 __ BranchF(&return_left, NULL, condition, left_reg, right_reg); |
1935 __ Branch(&return_right); | 1935 __ Branch(&return_right); |
1936 | 1936 |
1937 __ bind(&check_zero); | 1937 __ bind(&check_zero); |
1938 // left == right != 0. | 1938 // left == right != 0. |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2000 break; | 2000 break; |
2001 } | 2001 } |
2002 default: | 2002 default: |
2003 UNREACHABLE(); | 2003 UNREACHABLE(); |
2004 break; | 2004 break; |
2005 } | 2005 } |
2006 } | 2006 } |
2007 | 2007 |
2008 | 2008 |
2009 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 2009 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
2010 ASSERT(ToRegister(instr->context()).is(cp)); | 2010 DCHECK(ToRegister(instr->context()).is(cp)); |
2011 ASSERT(ToRegister(instr->left()).is(a1)); | 2011 DCHECK(ToRegister(instr->left()).is(a1)); |
2012 ASSERT(ToRegister(instr->right()).is(a0)); | 2012 DCHECK(ToRegister(instr->right()).is(a0)); |
2013 ASSERT(ToRegister(instr->result()).is(v0)); | 2013 DCHECK(ToRegister(instr->result()).is(v0)); |
2014 | 2014 |
2015 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); | 2015 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); |
2016 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 2016 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
2017 // Other arch use a nop here, to signal that there is no inlined | 2017 // Other arch use a nop here, to signal that there is no inlined |
2018 // patchable code. Mips does not need the nop, since our marker | 2018 // patchable code. Mips does not need the nop, since our marker |
2019 // instruction (andi zero_reg) will never be used in normal code. | 2019 // instruction (andi zero_reg) will never be used in normal code. |
2020 } | 2020 } |
2021 | 2021 |
2022 | 2022 |
2023 template<class InstrType> | 2023 template<class InstrType> |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2090 | 2090 |
2091 | 2091 |
2092 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { | 2092 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { |
2093 __ stop("LDebugBreak"); | 2093 __ stop("LDebugBreak"); |
2094 } | 2094 } |
2095 | 2095 |
2096 | 2096 |
2097 void LCodeGen::DoBranch(LBranch* instr) { | 2097 void LCodeGen::DoBranch(LBranch* instr) { |
2098 Representation r = instr->hydrogen()->value()->representation(); | 2098 Representation r = instr->hydrogen()->value()->representation(); |
2099 if (r.IsInteger32() || r.IsSmi()) { | 2099 if (r.IsInteger32() || r.IsSmi()) { |
2100 ASSERT(!info()->IsStub()); | 2100 DCHECK(!info()->IsStub()); |
2101 Register reg = ToRegister(instr->value()); | 2101 Register reg = ToRegister(instr->value()); |
2102 EmitBranch(instr, ne, reg, Operand(zero_reg)); | 2102 EmitBranch(instr, ne, reg, Operand(zero_reg)); |
2103 } else if (r.IsDouble()) { | 2103 } else if (r.IsDouble()) { |
2104 ASSERT(!info()->IsStub()); | 2104 DCHECK(!info()->IsStub()); |
2105 DoubleRegister reg = ToDoubleRegister(instr->value()); | 2105 DoubleRegister reg = ToDoubleRegister(instr->value()); |
2106 // Test the double value. Zero and NaN are false. | 2106 // Test the double value. Zero and NaN are false. |
2107 EmitBranchF(instr, nue, reg, kDoubleRegZero); | 2107 EmitBranchF(instr, nue, reg, kDoubleRegZero); |
2108 } else { | 2108 } else { |
2109 ASSERT(r.IsTagged()); | 2109 DCHECK(r.IsTagged()); |
2110 Register reg = ToRegister(instr->value()); | 2110 Register reg = ToRegister(instr->value()); |
2111 HType type = instr->hydrogen()->value()->type(); | 2111 HType type = instr->hydrogen()->value()->type(); |
2112 if (type.IsBoolean()) { | 2112 if (type.IsBoolean()) { |
2113 ASSERT(!info()->IsStub()); | 2113 DCHECK(!info()->IsStub()); |
2114 __ LoadRoot(at, Heap::kTrueValueRootIndex); | 2114 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
2115 EmitBranch(instr, eq, reg, Operand(at)); | 2115 EmitBranch(instr, eq, reg, Operand(at)); |
2116 } else if (type.IsSmi()) { | 2116 } else if (type.IsSmi()) { |
2117 ASSERT(!info()->IsStub()); | 2117 DCHECK(!info()->IsStub()); |
2118 EmitBranch(instr, ne, reg, Operand(zero_reg)); | 2118 EmitBranch(instr, ne, reg, Operand(zero_reg)); |
2119 } else if (type.IsJSArray()) { | 2119 } else if (type.IsJSArray()) { |
2120 ASSERT(!info()->IsStub()); | 2120 DCHECK(!info()->IsStub()); |
2121 EmitBranch(instr, al, zero_reg, Operand(zero_reg)); | 2121 EmitBranch(instr, al, zero_reg, Operand(zero_reg)); |
2122 } else if (type.IsHeapNumber()) { | 2122 } else if (type.IsHeapNumber()) { |
2123 ASSERT(!info()->IsStub()); | 2123 DCHECK(!info()->IsStub()); |
2124 DoubleRegister dbl_scratch = double_scratch0(); | 2124 DoubleRegister dbl_scratch = double_scratch0(); |
2125 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | 2125 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
2126 // Test the double value. Zero and NaN are false. | 2126 // Test the double value. Zero and NaN are false. |
2127 EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero); | 2127 EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero); |
2128 } else if (type.IsString()) { | 2128 } else if (type.IsString()) { |
2129 ASSERT(!info()->IsStub()); | 2129 DCHECK(!info()->IsStub()); |
2130 __ ld(at, FieldMemOperand(reg, String::kLengthOffset)); | 2130 __ ld(at, FieldMemOperand(reg, String::kLengthOffset)); |
2131 EmitBranch(instr, ne, at, Operand(zero_reg)); | 2131 EmitBranch(instr, ne, at, Operand(zero_reg)); |
2132 } else { | 2132 } else { |
2133 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | 2133 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
2134 // Avoid deopts in the case where we've never executed this path before. | 2134 // Avoid deopts in the case where we've never executed this path before. |
2135 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); | 2135 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); |
2136 | 2136 |
2137 if (expected.Contains(ToBooleanStub::UNDEFINED)) { | 2137 if (expected.Contains(ToBooleanStub::UNDEFINED)) { |
2138 // undefined -> false. | 2138 // undefined -> false. |
2139 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 2139 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2348 EmitFalseBranchF(instr, eq, input_reg, input_reg); | 2348 EmitFalseBranchF(instr, eq, input_reg, input_reg); |
2349 | 2349 |
2350 Register scratch = scratch0(); | 2350 Register scratch = scratch0(); |
2351 __ FmoveHigh(scratch, input_reg); | 2351 __ FmoveHigh(scratch, input_reg); |
2352 EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32)); | 2352 EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32)); |
2353 } | 2353 } |
2354 | 2354 |
2355 | 2355 |
2356 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { | 2356 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
2357 Representation rep = instr->hydrogen()->value()->representation(); | 2357 Representation rep = instr->hydrogen()->value()->representation(); |
2358 ASSERT(!rep.IsInteger32()); | 2358 DCHECK(!rep.IsInteger32()); |
2359 Register scratch = ToRegister(instr->temp()); | 2359 Register scratch = ToRegister(instr->temp()); |
2360 | 2360 |
2361 if (rep.IsDouble()) { | 2361 if (rep.IsDouble()) { |
2362 DoubleRegister value = ToDoubleRegister(instr->value()); | 2362 DoubleRegister value = ToDoubleRegister(instr->value()); |
2363 EmitFalseBranchF(instr, ne, value, kDoubleRegZero); | 2363 EmitFalseBranchF(instr, ne, value, kDoubleRegZero); |
2364 __ FmoveHigh(scratch, value); | 2364 __ FmoveHigh(scratch, value); |
2365 // Only use low 32-bits of value. | 2365 // Only use low 32-bits of value. |
2366 __ dsll32(scratch, scratch, 0); | 2366 __ dsll32(scratch, scratch, 0); |
2367 __ dsrl32(scratch, scratch, 0); | 2367 __ dsrl32(scratch, scratch, 0); |
2368 __ li(at, 0x80000000); | 2368 __ li(at, 0x80000000); |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2485 case Token::GTE: | 2485 case Token::GTE: |
2486 return ge; | 2486 return ge; |
2487 default: | 2487 default: |
2488 UNREACHABLE(); | 2488 UNREACHABLE(); |
2489 return kNoCondition; | 2489 return kNoCondition; |
2490 } | 2490 } |
2491 } | 2491 } |
2492 | 2492 |
2493 | 2493 |
2494 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { | 2494 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { |
2495 ASSERT(ToRegister(instr->context()).is(cp)); | 2495 DCHECK(ToRegister(instr->context()).is(cp)); |
2496 Token::Value op = instr->op(); | 2496 Token::Value op = instr->op(); |
2497 | 2497 |
2498 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); | 2498 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
2499 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 2499 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2500 | 2500 |
2501 Condition condition = ComputeCompareCondition(op); | 2501 Condition condition = ComputeCompareCondition(op); |
2502 | 2502 |
2503 EmitBranch(instr, condition, v0, Operand(zero_reg)); | 2503 EmitBranch(instr, condition, v0, Operand(zero_reg)); |
2504 } | 2504 } |
2505 | 2505 |
2506 | 2506 |
2507 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { | 2507 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { |
2508 InstanceType from = instr->from(); | 2508 InstanceType from = instr->from(); |
2509 InstanceType to = instr->to(); | 2509 InstanceType to = instr->to(); |
2510 if (from == FIRST_TYPE) return to; | 2510 if (from == FIRST_TYPE) return to; |
2511 ASSERT(from == to || to == LAST_TYPE); | 2511 DCHECK(from == to || to == LAST_TYPE); |
2512 return from; | 2512 return from; |
2513 } | 2513 } |
2514 | 2514 |
2515 | 2515 |
2516 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { | 2516 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { |
2517 InstanceType from = instr->from(); | 2517 InstanceType from = instr->from(); |
2518 InstanceType to = instr->to(); | 2518 InstanceType to = instr->to(); |
2519 if (from == to) return eq; | 2519 if (from == to) return eq; |
2520 if (to == LAST_TYPE) return hs; | 2520 if (to == LAST_TYPE) return hs; |
2521 if (from == FIRST_TYPE) return ls; | 2521 if (from == FIRST_TYPE) return ls; |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2564 | 2564 |
2565 | 2565 |
2566 // Branches to a label or falls through with the answer in flags. Trashes | 2566 // Branches to a label or falls through with the answer in flags. Trashes |
2567 // the temp registers, but not the input. | 2567 // the temp registers, but not the input. |
2568 void LCodeGen::EmitClassOfTest(Label* is_true, | 2568 void LCodeGen::EmitClassOfTest(Label* is_true, |
2569 Label* is_false, | 2569 Label* is_false, |
2570 Handle<String>class_name, | 2570 Handle<String>class_name, |
2571 Register input, | 2571 Register input, |
2572 Register temp, | 2572 Register temp, |
2573 Register temp2) { | 2573 Register temp2) { |
2574 ASSERT(!input.is(temp)); | 2574 DCHECK(!input.is(temp)); |
2575 ASSERT(!input.is(temp2)); | 2575 DCHECK(!input.is(temp2)); |
2576 ASSERT(!temp.is(temp2)); | 2576 DCHECK(!temp.is(temp2)); |
2577 | 2577 |
2578 __ JumpIfSmi(input, is_false); | 2578 __ JumpIfSmi(input, is_false); |
2579 | 2579 |
2580 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { | 2580 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { |
2581 // Assuming the following assertions, we can use the same compares to test | 2581 // Assuming the following assertions, we can use the same compares to test |
2582 // for both being a function type and being in the object type range. | 2582 // for both being a function type and being in the object type range. |
2583 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); | 2583 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
2584 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == | 2584 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == |
2585 FIRST_SPEC_OBJECT_TYPE + 1); | 2585 FIRST_SPEC_OBJECT_TYPE + 1); |
2586 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == | 2586 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2645 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { | 2645 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { |
2646 Register reg = ToRegister(instr->value()); | 2646 Register reg = ToRegister(instr->value()); |
2647 Register temp = ToRegister(instr->temp()); | 2647 Register temp = ToRegister(instr->temp()); |
2648 | 2648 |
2649 __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2649 __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); |
2650 EmitBranch(instr, eq, temp, Operand(instr->map())); | 2650 EmitBranch(instr, eq, temp, Operand(instr->map())); |
2651 } | 2651 } |
2652 | 2652 |
2653 | 2653 |
2654 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { | 2654 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { |
2655 ASSERT(ToRegister(instr->context()).is(cp)); | 2655 DCHECK(ToRegister(instr->context()).is(cp)); |
2656 Label true_label, done; | 2656 Label true_label, done; |
2657 ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0. | 2657 DCHECK(ToRegister(instr->left()).is(a0)); // Object is in a0. |
2658 ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1. | 2658 DCHECK(ToRegister(instr->right()).is(a1)); // Function is in a1. |
2659 Register result = ToRegister(instr->result()); | 2659 Register result = ToRegister(instr->result()); |
2660 ASSERT(result.is(v0)); | 2660 DCHECK(result.is(v0)); |
2661 | 2661 |
2662 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); | 2662 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); |
2663 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 2663 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
2664 | 2664 |
2665 __ Branch(&true_label, eq, result, Operand(zero_reg)); | 2665 __ Branch(&true_label, eq, result, Operand(zero_reg)); |
2666 __ li(result, Operand(factory()->false_value())); | 2666 __ li(result, Operand(factory()->false_value())); |
2667 __ Branch(&done); | 2667 __ Branch(&done); |
2668 __ bind(&true_label); | 2668 __ bind(&true_label); |
2669 __ li(result, Operand(factory()->true_value())); | 2669 __ li(result, Operand(factory()->true_value())); |
2670 __ bind(&done); | 2670 __ bind(&done); |
(...skipping 18 matching lines...) Expand all Loading... |
2689 }; | 2689 }; |
2690 | 2690 |
2691 DeferredInstanceOfKnownGlobal* deferred; | 2691 DeferredInstanceOfKnownGlobal* deferred; |
2692 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); | 2692 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); |
2693 | 2693 |
2694 Label done, false_result; | 2694 Label done, false_result; |
2695 Register object = ToRegister(instr->value()); | 2695 Register object = ToRegister(instr->value()); |
2696 Register temp = ToRegister(instr->temp()); | 2696 Register temp = ToRegister(instr->temp()); |
2697 Register result = ToRegister(instr->result()); | 2697 Register result = ToRegister(instr->result()); |
2698 | 2698 |
2699 ASSERT(object.is(a0)); | 2699 DCHECK(object.is(a0)); |
2700 ASSERT(result.is(v0)); | 2700 DCHECK(result.is(v0)); |
2701 | 2701 |
2702 // A Smi is not instance of anything. | 2702 // A Smi is not instance of anything. |
2703 __ JumpIfSmi(object, &false_result); | 2703 __ JumpIfSmi(object, &false_result); |
2704 | 2704 |
2705 // This is the inlined call site instanceof cache. The two occurences of the | 2705 // This is the inlined call site instanceof cache. The two occurences of the |
2706 // hole value will be patched to the last map/result pair generated by the | 2706 // hole value will be patched to the last map/result pair generated by the |
2707 // instanceof stub. | 2707 // instanceof stub. |
2708 Label cache_miss; | 2708 Label cache_miss; |
2709 Register map = temp; | 2709 Register map = temp; |
2710 __ ld(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 2710 __ ld(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2744 // Here result has either true or false. Deferred code also produces true or | 2744 // Here result has either true or false. Deferred code also produces true or |
2745 // false object. | 2745 // false object. |
2746 __ bind(deferred->exit()); | 2746 __ bind(deferred->exit()); |
2747 __ bind(&done); | 2747 __ bind(&done); |
2748 } | 2748 } |
2749 | 2749 |
2750 | 2750 |
2751 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, | 2751 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, |
2752 Label* map_check) { | 2752 Label* map_check) { |
2753 Register result = ToRegister(instr->result()); | 2753 Register result = ToRegister(instr->result()); |
2754 ASSERT(result.is(v0)); | 2754 DCHECK(result.is(v0)); |
2755 | 2755 |
2756 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; | 2756 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; |
2757 flags = static_cast<InstanceofStub::Flags>( | 2757 flags = static_cast<InstanceofStub::Flags>( |
2758 flags | InstanceofStub::kArgsInRegisters); | 2758 flags | InstanceofStub::kArgsInRegisters); |
2759 flags = static_cast<InstanceofStub::Flags>( | 2759 flags = static_cast<InstanceofStub::Flags>( |
2760 flags | InstanceofStub::kCallSiteInlineCheck); | 2760 flags | InstanceofStub::kCallSiteInlineCheck); |
2761 flags = static_cast<InstanceofStub::Flags>( | 2761 flags = static_cast<InstanceofStub::Flags>( |
2762 flags | InstanceofStub::kReturnTrueFalseObject); | 2762 flags | InstanceofStub::kReturnTrueFalseObject); |
2763 InstanceofStub stub(isolate(), flags); | 2763 InstanceofStub stub(isolate(), flags); |
2764 | 2764 |
2765 PushSafepointRegistersScope scope(this); | 2765 PushSafepointRegistersScope scope(this); |
2766 LoadContextFromDeferred(instr->context()); | 2766 LoadContextFromDeferred(instr->context()); |
2767 | 2767 |
2768 // Get the temp register reserved by the instruction. This needs to be a4 as | 2768 // Get the temp register reserved by the instruction. This needs to be a4 as |
2769 // its slot of the pushing of safepoint registers is used to communicate the | 2769 // its slot of the pushing of safepoint registers is used to communicate the |
2770 // offset to the location of the map check. | 2770 // offset to the location of the map check. |
2771 Register temp = ToRegister(instr->temp()); | 2771 Register temp = ToRegister(instr->temp()); |
2772 ASSERT(temp.is(a4)); | 2772 DCHECK(temp.is(a4)); |
2773 __ li(InstanceofStub::right(), instr->function()); | 2773 __ li(InstanceofStub::right(), instr->function()); |
2774 static const int kAdditionalDelta = 13; | 2774 static const int kAdditionalDelta = 13; |
2775 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; | 2775 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; |
2776 Label before_push_delta; | 2776 Label before_push_delta; |
2777 __ bind(&before_push_delta); | 2777 __ bind(&before_push_delta); |
2778 { | 2778 { |
2779 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | 2779 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
2780 __ li(temp, Operand(delta * kIntSize), CONSTANT_SIZE); | 2780 __ li(temp, Operand(delta * kIntSize), CONSTANT_SIZE); |
2781 __ StoreToSafepointRegisterSlot(temp, temp); | 2781 __ StoreToSafepointRegisterSlot(temp, temp); |
2782 } | 2782 } |
2783 CallCodeGeneric(stub.GetCode(), | 2783 CallCodeGeneric(stub.GetCode(), |
2784 RelocInfo::CODE_TARGET, | 2784 RelocInfo::CODE_TARGET, |
2785 instr, | 2785 instr, |
2786 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 2786 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
2787 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); | 2787 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); |
2788 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 2788 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
2789 // Put the result value into the result register slot and | 2789 // Put the result value into the result register slot and |
2790 // restore all registers. | 2790 // restore all registers. |
2791 __ StoreToSafepointRegisterSlot(result, result); | 2791 __ StoreToSafepointRegisterSlot(result, result); |
2792 } | 2792 } |
2793 | 2793 |
2794 | 2794 |
2795 void LCodeGen::DoCmpT(LCmpT* instr) { | 2795 void LCodeGen::DoCmpT(LCmpT* instr) { |
2796 ASSERT(ToRegister(instr->context()).is(cp)); | 2796 DCHECK(ToRegister(instr->context()).is(cp)); |
2797 Token::Value op = instr->op(); | 2797 Token::Value op = instr->op(); |
2798 | 2798 |
2799 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); | 2799 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
2800 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 2800 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2801 // On MIPS there is no need for a "no inlined smi code" marker (nop). | 2801 // On MIPS there is no need for a "no inlined smi code" marker (nop). |
2802 | 2802 |
2803 Condition condition = ComputeCompareCondition(op); | 2803 Condition condition = ComputeCompareCondition(op); |
2804 // A minor optimization that relies on LoadRoot always emitting one | 2804 // A minor optimization that relies on LoadRoot always emitting one |
2805 // instruction. | 2805 // instruction. |
2806 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); | 2806 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); |
2807 Label done, check; | 2807 Label done, check; |
2808 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); | 2808 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); |
2809 __ bind(&check); | 2809 __ bind(&check); |
2810 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); | 2810 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); |
2811 ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check)); | 2811 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check)); |
2812 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); | 2812 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
2813 __ bind(&done); | 2813 __ bind(&done); |
2814 } | 2814 } |
2815 | 2815 |
2816 | 2816 |
2817 void LCodeGen::DoReturn(LReturn* instr) { | 2817 void LCodeGen::DoReturn(LReturn* instr) { |
2818 if (FLAG_trace && info()->IsOptimizing()) { | 2818 if (FLAG_trace && info()->IsOptimizing()) { |
2819 // Push the return value on the stack as the parameter. | 2819 // Push the return value on the stack as the parameter. |
2820 // Runtime::TraceExit returns its parameter in v0. We're leaving the code | 2820 // Runtime::TraceExit returns its parameter in v0. We're leaving the code |
2821 // managed by the register allocator and tearing down the frame, it's | 2821 // managed by the register allocator and tearing down the frame, it's |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2860 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 2860 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
2861 __ ld(result, FieldMemOperand(at, Cell::kValueOffset)); | 2861 __ ld(result, FieldMemOperand(at, Cell::kValueOffset)); |
2862 if (instr->hydrogen()->RequiresHoleCheck()) { | 2862 if (instr->hydrogen()->RequiresHoleCheck()) { |
2863 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2863 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
2864 DeoptimizeIf(eq, instr->environment(), result, Operand(at)); | 2864 DeoptimizeIf(eq, instr->environment(), result, Operand(at)); |
2865 } | 2865 } |
2866 } | 2866 } |
2867 | 2867 |
2868 | 2868 |
2869 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { | 2869 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { |
2870 ASSERT(ToRegister(instr->context()).is(cp)); | 2870 DCHECK(ToRegister(instr->context()).is(cp)); |
2871 ASSERT(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); | 2871 DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); |
2872 ASSERT(ToRegister(instr->result()).is(v0)); | 2872 DCHECK(ToRegister(instr->result()).is(v0)); |
2873 | 2873 |
2874 __ li(LoadIC::NameRegister(), Operand(instr->name())); | 2874 __ li(LoadIC::NameRegister(), Operand(instr->name())); |
2875 if (FLAG_vector_ics) { | 2875 if (FLAG_vector_ics) { |
2876 Register vector = ToRegister(instr->temp_vector()); | 2876 Register vector = ToRegister(instr->temp_vector()); |
2877 ASSERT(vector.is(LoadIC::VectorRegister())); | 2877 DCHECK(vector.is(LoadIC::VectorRegister())); |
2878 __ li(vector, instr->hydrogen()->feedback_vector()); | 2878 __ li(vector, instr->hydrogen()->feedback_vector()); |
2879 // No need to allocate this register. | 2879 // No need to allocate this register. |
2880 ASSERT(LoadIC::SlotRegister().is(a0)); | 2880 DCHECK(LoadIC::SlotRegister().is(a0)); |
2881 __ li(LoadIC::SlotRegister(), | 2881 __ li(LoadIC::SlotRegister(), |
2882 Operand(Smi::FromInt(instr->hydrogen()->slot()))); | 2882 Operand(Smi::FromInt(instr->hydrogen()->slot()))); |
2883 } | 2883 } |
2884 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; | 2884 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; |
2885 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); | 2885 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); |
2886 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 2886 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2887 } | 2887 } |
2888 | 2888 |
2889 | 2889 |
2890 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { | 2890 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3007 STATIC_ASSERT(kSmiTag == 0); | 3007 STATIC_ASSERT(kSmiTag == 0); |
3008 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); | 3008 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
3009 offset += kPointerSize / 2; | 3009 offset += kPointerSize / 2; |
3010 representation = Representation::Integer32(); | 3010 representation = Representation::Integer32(); |
3011 } | 3011 } |
3012 __ Load(result, FieldMemOperand(object, offset), representation); | 3012 __ Load(result, FieldMemOperand(object, offset), representation); |
3013 } | 3013 } |
3014 | 3014 |
3015 | 3015 |
3016 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { | 3016 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { |
3017 ASSERT(ToRegister(instr->context()).is(cp)); | 3017 DCHECK(ToRegister(instr->context()).is(cp)); |
3018 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); | 3018 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); |
3019 ASSERT(ToRegister(instr->result()).is(v0)); | 3019 DCHECK(ToRegister(instr->result()).is(v0)); |
3020 | 3020 |
3021 // Name is always in a2. | 3021 // Name is always in a2. |
3022 __ li(LoadIC::NameRegister(), Operand(instr->name())); | 3022 __ li(LoadIC::NameRegister(), Operand(instr->name())); |
3023 if (FLAG_vector_ics) { | 3023 if (FLAG_vector_ics) { |
3024 Register vector = ToRegister(instr->temp_vector()); | 3024 Register vector = ToRegister(instr->temp_vector()); |
3025 ASSERT(vector.is(LoadIC::VectorRegister())); | 3025 DCHECK(vector.is(LoadIC::VectorRegister())); |
3026 __ li(vector, instr->hydrogen()->feedback_vector()); | 3026 __ li(vector, instr->hydrogen()->feedback_vector()); |
3027 // No need to allocate this register. | 3027 // No need to allocate this register. |
3028 ASSERT(LoadIC::SlotRegister().is(a0)); | 3028 DCHECK(LoadIC::SlotRegister().is(a0)); |
3029 __ li(LoadIC::SlotRegister(), | 3029 __ li(LoadIC::SlotRegister(), |
3030 Operand(Smi::FromInt(instr->hydrogen()->slot()))); | 3030 Operand(Smi::FromInt(instr->hydrogen()->slot()))); |
3031 } | 3031 } |
3032 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); | 3032 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); |
3033 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 3033 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
3034 } | 3034 } |
3035 | 3035 |
3036 | 3036 |
3037 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | 3037 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
3038 Register scratch = scratch0(); | 3038 Register scratch = scratch0(); |
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3280 __ daddu(scratch, elements, scratch); | 3280 __ daddu(scratch, elements, scratch); |
3281 } else { | 3281 } else { |
3282 __ dsll(scratch, key, kPointerSizeLog2); | 3282 __ dsll(scratch, key, kPointerSizeLog2); |
3283 __ daddu(scratch, elements, scratch); | 3283 __ daddu(scratch, elements, scratch); |
3284 } | 3284 } |
3285 } | 3285 } |
3286 | 3286 |
3287 Representation representation = hinstr->representation(); | 3287 Representation representation = hinstr->representation(); |
3288 if (representation.IsInteger32() && SmiValuesAre32Bits() && | 3288 if (representation.IsInteger32() && SmiValuesAre32Bits() && |
3289 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { | 3289 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { |
3290 ASSERT(!hinstr->RequiresHoleCheck()); | 3290 DCHECK(!hinstr->RequiresHoleCheck()); |
3291 if (FLAG_debug_code) { | 3291 if (FLAG_debug_code) { |
3292 Register temp = scratch1(); | 3292 Register temp = scratch1(); |
3293 __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); | 3293 __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); |
3294 __ AssertSmi(temp); | 3294 __ AssertSmi(temp); |
3295 } | 3295 } |
3296 | 3296 |
3297 // Read int value directly from upper half of the smi. | 3297 // Read int value directly from upper half of the smi. |
3298 STATIC_ASSERT(kSmiTag == 0); | 3298 STATIC_ASSERT(kSmiTag == 0); |
3299 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); | 3299 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
3300 offset += kPointerSize / 2; | 3300 offset += kPointerSize / 2; |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3363 } else { | 3363 } else { |
3364 __ dsra(scratch0(), key, -shift_size); | 3364 __ dsra(scratch0(), key, -shift_size); |
3365 } | 3365 } |
3366 __ Daddu(scratch0(), base, scratch0()); | 3366 __ Daddu(scratch0(), base, scratch0()); |
3367 return MemOperand(scratch0(), base_offset); | 3367 return MemOperand(scratch0(), base_offset); |
3368 } | 3368 } |
3369 } | 3369 } |
3370 | 3370 |
3371 | 3371 |
3372 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { | 3372 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
3373 ASSERT(ToRegister(instr->context()).is(cp)); | 3373 DCHECK(ToRegister(instr->context()).is(cp)); |
3374 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); | 3374 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); |
3375 ASSERT(ToRegister(instr->key()).is(LoadIC::NameRegister())); | 3375 DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); |
3376 | 3376 |
3377 if (FLAG_vector_ics) { | 3377 if (FLAG_vector_ics) { |
3378 Register vector = ToRegister(instr->temp_vector()); | 3378 Register vector = ToRegister(instr->temp_vector()); |
3379 ASSERT(vector.is(LoadIC::VectorRegister())); | 3379 DCHECK(vector.is(LoadIC::VectorRegister())); |
3380 __ li(vector, instr->hydrogen()->feedback_vector()); | 3380 __ li(vector, instr->hydrogen()->feedback_vector()); |
3381 // No need to allocate this register. | 3381 // No need to allocate this register. |
3382 ASSERT(LoadIC::SlotRegister().is(a0)); | 3382 DCHECK(LoadIC::SlotRegister().is(a0)); |
3383 __ li(LoadIC::SlotRegister(), | 3383 __ li(LoadIC::SlotRegister(), |
3384 Operand(Smi::FromInt(instr->hydrogen()->slot()))); | 3384 Operand(Smi::FromInt(instr->hydrogen()->slot()))); |
3385 } | 3385 } |
3386 | 3386 |
3387 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); | 3387 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
3388 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 3388 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
3389 } | 3389 } |
3390 | 3390 |
3391 | 3391 |
3392 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { | 3392 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3496 } | 3496 } |
3497 } | 3497 } |
3498 | 3498 |
3499 | 3499 |
3500 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { | 3500 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
3501 Register receiver = ToRegister(instr->receiver()); | 3501 Register receiver = ToRegister(instr->receiver()); |
3502 Register function = ToRegister(instr->function()); | 3502 Register function = ToRegister(instr->function()); |
3503 Register length = ToRegister(instr->length()); | 3503 Register length = ToRegister(instr->length()); |
3504 Register elements = ToRegister(instr->elements()); | 3504 Register elements = ToRegister(instr->elements()); |
3505 Register scratch = scratch0(); | 3505 Register scratch = scratch0(); |
3506 ASSERT(receiver.is(a0)); // Used for parameter count. | 3506 DCHECK(receiver.is(a0)); // Used for parameter count. |
3507 ASSERT(function.is(a1)); // Required by InvokeFunction. | 3507 DCHECK(function.is(a1)); // Required by InvokeFunction. |
3508 ASSERT(ToRegister(instr->result()).is(v0)); | 3508 DCHECK(ToRegister(instr->result()).is(v0)); |
3509 | 3509 |
3510 // Copy the arguments to this function possibly from the | 3510 // Copy the arguments to this function possibly from the |
3511 // adaptor frame below it. | 3511 // adaptor frame below it. |
3512 const uint32_t kArgumentsLimit = 1 * KB; | 3512 const uint32_t kArgumentsLimit = 1 * KB; |
3513 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit)); | 3513 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit)); |
3514 | 3514 |
3515 // Push the receiver and use the register to keep the original | 3515 // Push the receiver and use the register to keep the original |
3516 // number of arguments. | 3516 // number of arguments. |
3517 __ push(receiver); | 3517 __ push(receiver); |
3518 __ Move(receiver, length); | 3518 __ Move(receiver, length); |
3519 // The arguments are at a one pointer size offset from elements. | 3519 // The arguments are at a one pointer size offset from elements. |
3520 __ Daddu(elements, elements, Operand(1 * kPointerSize)); | 3520 __ Daddu(elements, elements, Operand(1 * kPointerSize)); |
3521 | 3521 |
3522 // Loop through the arguments pushing them onto the execution | 3522 // Loop through the arguments pushing them onto the execution |
3523 // stack. | 3523 // stack. |
3524 Label invoke, loop; | 3524 Label invoke, loop; |
3525 // length is a small non-negative integer, due to the test above. | 3525 // length is a small non-negative integer, due to the test above. |
3526 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg)); | 3526 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg)); |
3527 __ dsll(scratch, length, kPointerSizeLog2); | 3527 __ dsll(scratch, length, kPointerSizeLog2); |
3528 __ bind(&loop); | 3528 __ bind(&loop); |
3529 __ Daddu(scratch, elements, scratch); | 3529 __ Daddu(scratch, elements, scratch); |
3530 __ ld(scratch, MemOperand(scratch)); | 3530 __ ld(scratch, MemOperand(scratch)); |
3531 __ push(scratch); | 3531 __ push(scratch); |
3532 __ Dsubu(length, length, Operand(1)); | 3532 __ Dsubu(length, length, Operand(1)); |
3533 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg)); | 3533 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg)); |
3534 __ dsll(scratch, length, kPointerSizeLog2); | 3534 __ dsll(scratch, length, kPointerSizeLog2); |
3535 | 3535 |
3536 __ bind(&invoke); | 3536 __ bind(&invoke); |
3537 ASSERT(instr->HasPointerMap()); | 3537 DCHECK(instr->HasPointerMap()); |
3538 LPointerMap* pointers = instr->pointer_map(); | 3538 LPointerMap* pointers = instr->pointer_map(); |
3539 SafepointGenerator safepoint_generator( | 3539 SafepointGenerator safepoint_generator( |
3540 this, pointers, Safepoint::kLazyDeopt); | 3540 this, pointers, Safepoint::kLazyDeopt); |
3541 // The number of arguments is stored in receiver which is a0, as expected | 3541 // The number of arguments is stored in receiver which is a0, as expected |
3542 // by InvokeFunction. | 3542 // by InvokeFunction. |
3543 ParameterCount actual(receiver); | 3543 ParameterCount actual(receiver); |
3544 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); | 3544 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); |
3545 } | 3545 } |
3546 | 3546 |
3547 | 3547 |
(...skipping 19 matching lines...) Expand all Loading... |
3567 } | 3567 } |
3568 | 3568 |
3569 | 3569 |
3570 void LCodeGen::DoContext(LContext* instr) { | 3570 void LCodeGen::DoContext(LContext* instr) { |
3571 // If there is a non-return use, the context must be moved to a register. | 3571 // If there is a non-return use, the context must be moved to a register. |
3572 Register result = ToRegister(instr->result()); | 3572 Register result = ToRegister(instr->result()); |
3573 if (info()->IsOptimizing()) { | 3573 if (info()->IsOptimizing()) { |
3574 __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 3574 __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
3575 } else { | 3575 } else { |
3576 // If there is no frame, the context must be in cp. | 3576 // If there is no frame, the context must be in cp. |
3577 ASSERT(result.is(cp)); | 3577 DCHECK(result.is(cp)); |
3578 } | 3578 } |
3579 } | 3579 } |
3580 | 3580 |
3581 | 3581 |
3582 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { | 3582 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { |
3583 ASSERT(ToRegister(instr->context()).is(cp)); | 3583 DCHECK(ToRegister(instr->context()).is(cp)); |
3584 __ li(scratch0(), instr->hydrogen()->pairs()); | 3584 __ li(scratch0(), instr->hydrogen()->pairs()); |
3585 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); | 3585 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); |
3586 // The context is the first argument. | 3586 // The context is the first argument. |
3587 __ Push(cp, scratch0(), scratch1()); | 3587 __ Push(cp, scratch0(), scratch1()); |
3588 CallRuntime(Runtime::kDeclareGlobals, 3, instr); | 3588 CallRuntime(Runtime::kDeclareGlobals, 3, instr); |
3589 } | 3589 } |
3590 | 3590 |
3591 | 3591 |
3592 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | 3592 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
3593 int formal_parameter_count, | 3593 int formal_parameter_count, |
(...skipping 30 matching lines...) Expand all Loading... |
3624 } else { | 3624 } else { |
3625 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | 3625 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
3626 ParameterCount count(arity); | 3626 ParameterCount count(arity); |
3627 ParameterCount expected(formal_parameter_count); | 3627 ParameterCount expected(formal_parameter_count); |
3628 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); | 3628 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); |
3629 } | 3629 } |
3630 } | 3630 } |
3631 | 3631 |
3632 | 3632 |
3633 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | 3633 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
3634 ASSERT(instr->context() != NULL); | 3634 DCHECK(instr->context() != NULL); |
3635 ASSERT(ToRegister(instr->context()).is(cp)); | 3635 DCHECK(ToRegister(instr->context()).is(cp)); |
3636 Register input = ToRegister(instr->value()); | 3636 Register input = ToRegister(instr->value()); |
3637 Register result = ToRegister(instr->result()); | 3637 Register result = ToRegister(instr->result()); |
3638 Register scratch = scratch0(); | 3638 Register scratch = scratch0(); |
3639 | 3639 |
3640 // Deoptimize if not a heap number. | 3640 // Deoptimize if not a heap number. |
3641 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3641 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
3642 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3642 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
3643 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); | 3643 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); |
3644 | 3644 |
3645 Label done; | 3645 Label done; |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3867 DoubleRegister result = ToDoubleRegister(instr->result()); | 3867 DoubleRegister result = ToDoubleRegister(instr->result()); |
3868 __ sqrt_d(result, input); | 3868 __ sqrt_d(result, input); |
3869 } | 3869 } |
3870 | 3870 |
3871 | 3871 |
3872 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { | 3872 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
3873 DoubleRegister input = ToDoubleRegister(instr->value()); | 3873 DoubleRegister input = ToDoubleRegister(instr->value()); |
3874 DoubleRegister result = ToDoubleRegister(instr->result()); | 3874 DoubleRegister result = ToDoubleRegister(instr->result()); |
3875 DoubleRegister temp = ToDoubleRegister(instr->temp()); | 3875 DoubleRegister temp = ToDoubleRegister(instr->temp()); |
3876 | 3876 |
3877 ASSERT(!input.is(result)); | 3877 DCHECK(!input.is(result)); |
3878 | 3878 |
3879 // Note that according to ECMA-262 15.8.2.13: | 3879 // Note that according to ECMA-262 15.8.2.13: |
3880 // Math.pow(-Infinity, 0.5) == Infinity | 3880 // Math.pow(-Infinity, 0.5) == Infinity |
3881 // Math.sqrt(-Infinity) == NaN | 3881 // Math.sqrt(-Infinity) == NaN |
3882 Label done; | 3882 Label done; |
3883 __ Move(temp, -V8_INFINITY); | 3883 __ Move(temp, -V8_INFINITY); |
3884 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input); | 3884 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input); |
3885 // Set up Infinity in the delay slot. | 3885 // Set up Infinity in the delay slot. |
3886 // result is overwritten if the branch is not taken. | 3886 // result is overwritten if the branch is not taken. |
3887 __ neg_d(result, temp); | 3887 __ neg_d(result, temp); |
3888 | 3888 |
3889 // Add +0 to convert -0 to +0. | 3889 // Add +0 to convert -0 to +0. |
3890 __ add_d(result, input, kDoubleRegZero); | 3890 __ add_d(result, input, kDoubleRegZero); |
3891 __ sqrt_d(result, result); | 3891 __ sqrt_d(result, result); |
3892 __ bind(&done); | 3892 __ bind(&done); |
3893 } | 3893 } |
3894 | 3894 |
3895 | 3895 |
3896 void LCodeGen::DoPower(LPower* instr) { | 3896 void LCodeGen::DoPower(LPower* instr) { |
3897 Representation exponent_type = instr->hydrogen()->right()->representation(); | 3897 Representation exponent_type = instr->hydrogen()->right()->representation(); |
3898 // Having marked this as a call, we can use any registers. | 3898 // Having marked this as a call, we can use any registers. |
3899 // Just make sure that the input/output registers are the expected ones. | 3899 // Just make sure that the input/output registers are the expected ones. |
3900 ASSERT(!instr->right()->IsDoubleRegister() || | 3900 DCHECK(!instr->right()->IsDoubleRegister() || |
3901 ToDoubleRegister(instr->right()).is(f4)); | 3901 ToDoubleRegister(instr->right()).is(f4)); |
3902 ASSERT(!instr->right()->IsRegister() || | 3902 DCHECK(!instr->right()->IsRegister() || |
3903 ToRegister(instr->right()).is(a2)); | 3903 ToRegister(instr->right()).is(a2)); |
3904 ASSERT(ToDoubleRegister(instr->left()).is(f2)); | 3904 DCHECK(ToDoubleRegister(instr->left()).is(f2)); |
3905 ASSERT(ToDoubleRegister(instr->result()).is(f0)); | 3905 DCHECK(ToDoubleRegister(instr->result()).is(f0)); |
3906 | 3906 |
3907 if (exponent_type.IsSmi()) { | 3907 if (exponent_type.IsSmi()) { |
3908 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3908 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
3909 __ CallStub(&stub); | 3909 __ CallStub(&stub); |
3910 } else if (exponent_type.IsTagged()) { | 3910 } else if (exponent_type.IsTagged()) { |
3911 Label no_deopt; | 3911 Label no_deopt; |
3912 __ JumpIfSmi(a2, &no_deopt); | 3912 __ JumpIfSmi(a2, &no_deopt); |
3913 __ ld(a7, FieldMemOperand(a2, HeapObject::kMapOffset)); | 3913 __ ld(a7, FieldMemOperand(a2, HeapObject::kMapOffset)); |
3914 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3914 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
3915 DeoptimizeIf(ne, instr->environment(), a7, Operand(at)); | 3915 DeoptimizeIf(ne, instr->environment(), a7, Operand(at)); |
3916 __ bind(&no_deopt); | 3916 __ bind(&no_deopt); |
3917 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3917 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
3918 __ CallStub(&stub); | 3918 __ CallStub(&stub); |
3919 } else if (exponent_type.IsInteger32()) { | 3919 } else if (exponent_type.IsInteger32()) { |
3920 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3920 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
3921 __ CallStub(&stub); | 3921 __ CallStub(&stub); |
3922 } else { | 3922 } else { |
3923 ASSERT(exponent_type.IsDouble()); | 3923 DCHECK(exponent_type.IsDouble()); |
3924 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3924 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
3925 __ CallStub(&stub); | 3925 __ CallStub(&stub); |
3926 } | 3926 } |
3927 } | 3927 } |
3928 | 3928 |
3929 | 3929 |
3930 void LCodeGen::DoMathExp(LMathExp* instr) { | 3930 void LCodeGen::DoMathExp(LMathExp* instr) { |
3931 DoubleRegister input = ToDoubleRegister(instr->value()); | 3931 DoubleRegister input = ToDoubleRegister(instr->value()); |
3932 DoubleRegister result = ToDoubleRegister(instr->result()); | 3932 DoubleRegister result = ToDoubleRegister(instr->result()); |
3933 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); | 3933 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); |
(...skipping 17 matching lines...) Expand all Loading... |
3951 | 3951 |
3952 | 3952 |
3953 void LCodeGen::DoMathClz32(LMathClz32* instr) { | 3953 void LCodeGen::DoMathClz32(LMathClz32* instr) { |
3954 Register input = ToRegister(instr->value()); | 3954 Register input = ToRegister(instr->value()); |
3955 Register result = ToRegister(instr->result()); | 3955 Register result = ToRegister(instr->result()); |
3956 __ Clz(result, input); | 3956 __ Clz(result, input); |
3957 } | 3957 } |
3958 | 3958 |
3959 | 3959 |
3960 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { | 3960 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
3961 ASSERT(ToRegister(instr->context()).is(cp)); | 3961 DCHECK(ToRegister(instr->context()).is(cp)); |
3962 ASSERT(ToRegister(instr->function()).is(a1)); | 3962 DCHECK(ToRegister(instr->function()).is(a1)); |
3963 ASSERT(instr->HasPointerMap()); | 3963 DCHECK(instr->HasPointerMap()); |
3964 | 3964 |
3965 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); | 3965 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); |
3966 if (known_function.is_null()) { | 3966 if (known_function.is_null()) { |
3967 LPointerMap* pointers = instr->pointer_map(); | 3967 LPointerMap* pointers = instr->pointer_map(); |
3968 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | 3968 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
3969 ParameterCount count(instr->arity()); | 3969 ParameterCount count(instr->arity()); |
3970 __ InvokeFunction(a1, count, CALL_FUNCTION, generator); | 3970 __ InvokeFunction(a1, count, CALL_FUNCTION, generator); |
3971 } else { | 3971 } else { |
3972 CallKnownFunction(known_function, | 3972 CallKnownFunction(known_function, |
3973 instr->hydrogen()->formal_parameter_count(), | 3973 instr->hydrogen()->formal_parameter_count(), |
3974 instr->arity(), | 3974 instr->arity(), |
3975 instr, | 3975 instr, |
3976 A1_CONTAINS_TARGET); | 3976 A1_CONTAINS_TARGET); |
3977 } | 3977 } |
3978 } | 3978 } |
3979 | 3979 |
3980 | 3980 |
3981 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { | 3981 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { |
3982 ASSERT(ToRegister(instr->result()).is(v0)); | 3982 DCHECK(ToRegister(instr->result()).is(v0)); |
3983 | 3983 |
3984 LPointerMap* pointers = instr->pointer_map(); | 3984 LPointerMap* pointers = instr->pointer_map(); |
3985 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | 3985 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
3986 | 3986 |
3987 if (instr->target()->IsConstantOperand()) { | 3987 if (instr->target()->IsConstantOperand()) { |
3988 LConstantOperand* target = LConstantOperand::cast(instr->target()); | 3988 LConstantOperand* target = LConstantOperand::cast(instr->target()); |
3989 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); | 3989 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); |
3990 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); | 3990 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); |
3991 __ Call(code, RelocInfo::CODE_TARGET); | 3991 __ Call(code, RelocInfo::CODE_TARGET); |
3992 } else { | 3992 } else { |
3993 ASSERT(instr->target()->IsRegister()); | 3993 DCHECK(instr->target()->IsRegister()); |
3994 Register target = ToRegister(instr->target()); | 3994 Register target = ToRegister(instr->target()); |
3995 generator.BeforeCall(__ CallSize(target)); | 3995 generator.BeforeCall(__ CallSize(target)); |
3996 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); | 3996 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); |
3997 __ Call(target); | 3997 __ Call(target); |
3998 } | 3998 } |
3999 generator.AfterCall(); | 3999 generator.AfterCall(); |
4000 } | 4000 } |
4001 | 4001 |
4002 | 4002 |
4003 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { | 4003 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { |
4004 ASSERT(ToRegister(instr->function()).is(a1)); | 4004 DCHECK(ToRegister(instr->function()).is(a1)); |
4005 ASSERT(ToRegister(instr->result()).is(v0)); | 4005 DCHECK(ToRegister(instr->result()).is(v0)); |
4006 | 4006 |
4007 if (instr->hydrogen()->pass_argument_count()) { | 4007 if (instr->hydrogen()->pass_argument_count()) { |
4008 __ li(a0, Operand(instr->arity())); | 4008 __ li(a0, Operand(instr->arity())); |
4009 } | 4009 } |
4010 | 4010 |
4011 // Change context. | 4011 // Change context. |
4012 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | 4012 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
4013 | 4013 |
4014 // Load the code entry address | 4014 // Load the code entry address |
4015 __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); | 4015 __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
4016 __ Call(at); | 4016 __ Call(at); |
4017 | 4017 |
4018 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | 4018 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
4019 } | 4019 } |
4020 | 4020 |
4021 | 4021 |
4022 void LCodeGen::DoCallFunction(LCallFunction* instr) { | 4022 void LCodeGen::DoCallFunction(LCallFunction* instr) { |
4023 ASSERT(ToRegister(instr->context()).is(cp)); | 4023 DCHECK(ToRegister(instr->context()).is(cp)); |
4024 ASSERT(ToRegister(instr->function()).is(a1)); | 4024 DCHECK(ToRegister(instr->function()).is(a1)); |
4025 ASSERT(ToRegister(instr->result()).is(v0)); | 4025 DCHECK(ToRegister(instr->result()).is(v0)); |
4026 | 4026 |
4027 int arity = instr->arity(); | 4027 int arity = instr->arity(); |
4028 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); | 4028 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); |
4029 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4029 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4030 } | 4030 } |
4031 | 4031 |
4032 | 4032 |
4033 void LCodeGen::DoCallNew(LCallNew* instr) { | 4033 void LCodeGen::DoCallNew(LCallNew* instr) { |
4034 ASSERT(ToRegister(instr->context()).is(cp)); | 4034 DCHECK(ToRegister(instr->context()).is(cp)); |
4035 ASSERT(ToRegister(instr->constructor()).is(a1)); | 4035 DCHECK(ToRegister(instr->constructor()).is(a1)); |
4036 ASSERT(ToRegister(instr->result()).is(v0)); | 4036 DCHECK(ToRegister(instr->result()).is(v0)); |
4037 | 4037 |
4038 __ li(a0, Operand(instr->arity())); | 4038 __ li(a0, Operand(instr->arity())); |
4039 // No cell in a2 for construct type feedback in optimized code | 4039 // No cell in a2 for construct type feedback in optimized code |
4040 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 4040 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
4041 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); | 4041 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); |
4042 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | 4042 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); |
4043 } | 4043 } |
4044 | 4044 |
4045 | 4045 |
4046 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { | 4046 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { |
4047 ASSERT(ToRegister(instr->context()).is(cp)); | 4047 DCHECK(ToRegister(instr->context()).is(cp)); |
4048 ASSERT(ToRegister(instr->constructor()).is(a1)); | 4048 DCHECK(ToRegister(instr->constructor()).is(a1)); |
4049 ASSERT(ToRegister(instr->result()).is(v0)); | 4049 DCHECK(ToRegister(instr->result()).is(v0)); |
4050 | 4050 |
4051 __ li(a0, Operand(instr->arity())); | 4051 __ li(a0, Operand(instr->arity())); |
4052 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 4052 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
4053 ElementsKind kind = instr->hydrogen()->elements_kind(); | 4053 ElementsKind kind = instr->hydrogen()->elements_kind(); |
4054 AllocationSiteOverrideMode override_mode = | 4054 AllocationSiteOverrideMode override_mode = |
4055 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) | 4055 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) |
4056 ? DISABLE_ALLOCATION_SITES | 4056 ? DISABLE_ALLOCATION_SITES |
4057 : DONT_OVERRIDE; | 4057 : DONT_OVERRIDE; |
4058 | 4058 |
4059 if (instr->arity() == 0) { | 4059 if (instr->arity() == 0) { |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4125 int offset = access.offset(); | 4125 int offset = access.offset(); |
4126 if (access.IsExternalMemory()) { | 4126 if (access.IsExternalMemory()) { |
4127 Register value = ToRegister(instr->value()); | 4127 Register value = ToRegister(instr->value()); |
4128 MemOperand operand = MemOperand(object, offset); | 4128 MemOperand operand = MemOperand(object, offset); |
4129 __ Store(value, operand, representation); | 4129 __ Store(value, operand, representation); |
4130 return; | 4130 return; |
4131 } | 4131 } |
4132 | 4132 |
4133 __ AssertNotSmi(object); | 4133 __ AssertNotSmi(object); |
4134 | 4134 |
4135 ASSERT(!representation.IsSmi() || | 4135 DCHECK(!representation.IsSmi() || |
4136 !instr->value()->IsConstantOperand() || | 4136 !instr->value()->IsConstantOperand() || |
4137 IsSmi(LConstantOperand::cast(instr->value()))); | 4137 IsSmi(LConstantOperand::cast(instr->value()))); |
4138 if (representation.IsDouble()) { | 4138 if (representation.IsDouble()) { |
4139 ASSERT(access.IsInobject()); | 4139 DCHECK(access.IsInobject()); |
4140 ASSERT(!instr->hydrogen()->has_transition()); | 4140 DCHECK(!instr->hydrogen()->has_transition()); |
4141 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | 4141 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); |
4142 DoubleRegister value = ToDoubleRegister(instr->value()); | 4142 DoubleRegister value = ToDoubleRegister(instr->value()); |
4143 __ sdc1(value, FieldMemOperand(object, offset)); | 4143 __ sdc1(value, FieldMemOperand(object, offset)); |
4144 return; | 4144 return; |
4145 } | 4145 } |
4146 | 4146 |
4147 if (instr->hydrogen()->has_transition()) { | 4147 if (instr->hydrogen()->has_transition()) { |
4148 Handle<Map> transition = instr->hydrogen()->transition_map(); | 4148 Handle<Map> transition = instr->hydrogen()->transition_map(); |
4149 AddDeprecationDependency(transition); | 4149 AddDeprecationDependency(transition); |
4150 __ li(scratch1, Operand(transition)); | 4150 __ li(scratch1, Operand(transition)); |
4151 __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); | 4151 __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); |
(...skipping 10 matching lines...) Expand all Loading... |
4162 | 4162 |
4163 // Do the store. | 4163 // Do the store. |
4164 Register destination = object; | 4164 Register destination = object; |
4165 if (!access.IsInobject()) { | 4165 if (!access.IsInobject()) { |
4166 destination = scratch1; | 4166 destination = scratch1; |
4167 __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 4167 __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
4168 } | 4168 } |
4169 Register value = ToRegister(instr->value()); | 4169 Register value = ToRegister(instr->value()); |
4170 if (representation.IsSmi() && SmiValuesAre32Bits() && | 4170 if (representation.IsSmi() && SmiValuesAre32Bits() && |
4171 instr->hydrogen()->value()->representation().IsInteger32()) { | 4171 instr->hydrogen()->value()->representation().IsInteger32()) { |
4172 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); | 4172 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
4173 if (FLAG_debug_code) { | 4173 if (FLAG_debug_code) { |
4174 __ Load(scratch2, FieldMemOperand(destination, offset), representation); | 4174 __ Load(scratch2, FieldMemOperand(destination, offset), representation); |
4175 __ AssertSmi(scratch2); | 4175 __ AssertSmi(scratch2); |
4176 } | 4176 } |
4177 | 4177 |
4178 // Store int value directly to upper half of the smi. | 4178 // Store int value directly to upper half of the smi. |
4179 offset += kPointerSize / 2; | 4179 offset += kPointerSize / 2; |
4180 representation = Representation::Integer32(); | 4180 representation = Representation::Integer32(); |
4181 } | 4181 } |
4182 | 4182 |
4183 MemOperand operand = FieldMemOperand(destination, offset); | 4183 MemOperand operand = FieldMemOperand(destination, offset); |
4184 __ Store(value, operand, representation); | 4184 __ Store(value, operand, representation); |
4185 if (instr->hydrogen()->NeedsWriteBarrier()) { | 4185 if (instr->hydrogen()->NeedsWriteBarrier()) { |
4186 // Update the write barrier for the object for in-object properties. | 4186 // Update the write barrier for the object for in-object properties. |
4187 __ RecordWriteField(destination, | 4187 __ RecordWriteField(destination, |
4188 offset, | 4188 offset, |
4189 value, | 4189 value, |
4190 scratch2, | 4190 scratch2, |
4191 GetRAState(), | 4191 GetRAState(), |
4192 kSaveFPRegs, | 4192 kSaveFPRegs, |
4193 EMIT_REMEMBERED_SET, | 4193 EMIT_REMEMBERED_SET, |
4194 instr->hydrogen()->SmiCheckForWriteBarrier(), | 4194 instr->hydrogen()->SmiCheckForWriteBarrier(), |
4195 instr->hydrogen()->PointersToHereCheckForValue()); | 4195 instr->hydrogen()->PointersToHereCheckForValue()); |
4196 } | 4196 } |
4197 } | 4197 } |
4198 | 4198 |
4199 | 4199 |
4200 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | 4200 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
4201 ASSERT(ToRegister(instr->context()).is(cp)); | 4201 DCHECK(ToRegister(instr->context()).is(cp)); |
4202 ASSERT(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); | 4202 DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); |
4203 ASSERT(ToRegister(instr->value()).is(StoreIC::ValueRegister())); | 4203 DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); |
4204 | 4204 |
4205 __ li(StoreIC::NameRegister(), Operand(instr->name())); | 4205 __ li(StoreIC::NameRegister(), Operand(instr->name())); |
4206 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); | 4206 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); |
4207 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 4207 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
4208 } | 4208 } |
4209 | 4209 |
4210 | 4210 |
4211 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { | 4211 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { |
4212 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; | 4212 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; |
4213 Operand operand((int64_t)0); | 4213 Operand operand((int64_t)0); |
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4347 if (constant_key & 0xF0000000) { | 4347 if (constant_key & 0xF0000000) { |
4348 Abort(kArrayIndexConstantValueTooBig); | 4348 Abort(kArrayIndexConstantValueTooBig); |
4349 } | 4349 } |
4350 __ Daddu(scratch, elements, | 4350 __ Daddu(scratch, elements, |
4351 Operand((constant_key << element_size_shift) + base_offset)); | 4351 Operand((constant_key << element_size_shift) + base_offset)); |
4352 } else { | 4352 } else { |
4353 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 4353 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
4354 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) | 4354 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) |
4355 : element_size_shift; | 4355 : element_size_shift; |
4356 __ Daddu(scratch, elements, Operand(base_offset)); | 4356 __ Daddu(scratch, elements, Operand(base_offset)); |
4357 ASSERT((shift_size == 3) || (shift_size == -29)); | 4357 DCHECK((shift_size == 3) || (shift_size == -29)); |
4358 if (shift_size == 3) { | 4358 if (shift_size == 3) { |
4359 __ dsll(at, ToRegister(instr->key()), 3); | 4359 __ dsll(at, ToRegister(instr->key()), 3); |
4360 } else if (shift_size == -29) { | 4360 } else if (shift_size == -29) { |
4361 __ dsra(at, ToRegister(instr->key()), 29); | 4361 __ dsra(at, ToRegister(instr->key()), 29); |
4362 } | 4362 } |
4363 __ Daddu(scratch, scratch, at); | 4363 __ Daddu(scratch, scratch, at); |
4364 } | 4364 } |
4365 | 4365 |
4366 if (instr->NeedsCanonicalization()) { | 4366 if (instr->NeedsCanonicalization()) { |
4367 Label is_nan; | 4367 Label is_nan; |
(...skipping 19 matching lines...) Expand all Loading... |
4387 Register value = ToRegister(instr->value()); | 4387 Register value = ToRegister(instr->value()); |
4388 Register elements = ToRegister(instr->elements()); | 4388 Register elements = ToRegister(instr->elements()); |
4389 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) | 4389 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) |
4390 : no_reg; | 4390 : no_reg; |
4391 Register scratch = scratch0(); | 4391 Register scratch = scratch0(); |
4392 Register store_base = scratch; | 4392 Register store_base = scratch; |
4393 int offset = instr->base_offset(); | 4393 int offset = instr->base_offset(); |
4394 | 4394 |
4395 // Do the store. | 4395 // Do the store. |
4396 if (instr->key()->IsConstantOperand()) { | 4396 if (instr->key()->IsConstantOperand()) { |
4397 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | 4397 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); |
4398 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | 4398 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
4399 offset += ToInteger32(const_operand) * kPointerSize; | 4399 offset += ToInteger32(const_operand) * kPointerSize; |
4400 store_base = elements; | 4400 store_base = elements; |
4401 } else { | 4401 } else { |
4402 // Even though the HLoadKeyed instruction forces the input | 4402 // Even though the HLoadKeyed instruction forces the input |
4403 // representation for the key to be an integer, the input gets replaced | 4403 // representation for the key to be an integer, the input gets replaced |
4404 // during bound check elimination with the index argument to the bounds | 4404 // during bound check elimination with the index argument to the bounds |
4405 // check, which can be tagged, so that case must be handled here, too. | 4405 // check, which can be tagged, so that case must be handled here, too. |
4406 if (instr->hydrogen()->key()->representation().IsSmi()) { | 4406 if (instr->hydrogen()->key()->representation().IsSmi()) { |
4407 __ SmiScale(scratch, key, kPointerSizeLog2); | 4407 __ SmiScale(scratch, key, kPointerSizeLog2); |
4408 __ daddu(store_base, elements, scratch); | 4408 __ daddu(store_base, elements, scratch); |
4409 } else { | 4409 } else { |
4410 __ dsll(scratch, key, kPointerSizeLog2); | 4410 __ dsll(scratch, key, kPointerSizeLog2); |
4411 __ daddu(store_base, elements, scratch); | 4411 __ daddu(store_base, elements, scratch); |
4412 } | 4412 } |
4413 } | 4413 } |
4414 | 4414 |
4415 Representation representation = instr->hydrogen()->value()->representation(); | 4415 Representation representation = instr->hydrogen()->value()->representation(); |
4416 if (representation.IsInteger32() && SmiValuesAre32Bits()) { | 4416 if (representation.IsInteger32() && SmiValuesAre32Bits()) { |
4417 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); | 4417 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
4418 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); | 4418 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); |
4419 if (FLAG_debug_code) { | 4419 if (FLAG_debug_code) { |
4420 Register temp = scratch1(); | 4420 Register temp = scratch1(); |
4421 __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); | 4421 __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); |
4422 __ AssertSmi(temp); | 4422 __ AssertSmi(temp); |
4423 } | 4423 } |
4424 | 4424 |
4425 // Store int value directly to upper half of the smi. | 4425 // Store int value directly to upper half of the smi. |
4426 STATIC_ASSERT(kSmiTag == 0); | 4426 STATIC_ASSERT(kSmiTag == 0); |
4427 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); | 4427 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
4428 offset += kPointerSize / 2; | 4428 offset += kPointerSize / 2; |
(...skipping 26 matching lines...) Expand all Loading... |
4455 DoStoreKeyedExternalArray(instr); | 4455 DoStoreKeyedExternalArray(instr); |
4456 } else if (instr->hydrogen()->value()->representation().IsDouble()) { | 4456 } else if (instr->hydrogen()->value()->representation().IsDouble()) { |
4457 DoStoreKeyedFixedDoubleArray(instr); | 4457 DoStoreKeyedFixedDoubleArray(instr); |
4458 } else { | 4458 } else { |
4459 DoStoreKeyedFixedArray(instr); | 4459 DoStoreKeyedFixedArray(instr); |
4460 } | 4460 } |
4461 } | 4461 } |
4462 | 4462 |
4463 | 4463 |
4464 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { | 4464 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { |
4465 ASSERT(ToRegister(instr->context()).is(cp)); | 4465 DCHECK(ToRegister(instr->context()).is(cp)); |
4466 ASSERT(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); | 4466 DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); |
4467 ASSERT(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); | 4467 DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); |
4468 ASSERT(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); | 4468 DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); |
4469 | 4469 |
4470 Handle<Code> ic = (instr->strict_mode() == STRICT) | 4470 Handle<Code> ic = (instr->strict_mode() == STRICT) |
4471 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() | 4471 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
4472 : isolate()->builtins()->KeyedStoreIC_Initialize(); | 4472 : isolate()->builtins()->KeyedStoreIC_Initialize(); |
4473 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 4473 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
4474 } | 4474 } |
4475 | 4475 |
4476 | 4476 |
4477 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | 4477 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
4478 Register object_reg = ToRegister(instr->object()); | 4478 Register object_reg = ToRegister(instr->object()); |
(...skipping 12 matching lines...) Expand all Loading... |
4491 Register new_map_reg = ToRegister(instr->new_map_temp()); | 4491 Register new_map_reg = ToRegister(instr->new_map_temp()); |
4492 __ li(new_map_reg, Operand(to_map)); | 4492 __ li(new_map_reg, Operand(to_map)); |
4493 __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); | 4493 __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); |
4494 // Write barrier. | 4494 // Write barrier. |
4495 __ RecordWriteForMap(object_reg, | 4495 __ RecordWriteForMap(object_reg, |
4496 new_map_reg, | 4496 new_map_reg, |
4497 scratch, | 4497 scratch, |
4498 GetRAState(), | 4498 GetRAState(), |
4499 kDontSaveFPRegs); | 4499 kDontSaveFPRegs); |
4500 } else { | 4500 } else { |
4501 ASSERT(object_reg.is(a0)); | 4501 DCHECK(object_reg.is(a0)); |
4502 ASSERT(ToRegister(instr->context()).is(cp)); | 4502 DCHECK(ToRegister(instr->context()).is(cp)); |
4503 PushSafepointRegistersScope scope(this); | 4503 PushSafepointRegistersScope scope(this); |
4504 __ li(a1, Operand(to_map)); | 4504 __ li(a1, Operand(to_map)); |
4505 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; | 4505 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; |
4506 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); | 4506 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); |
4507 __ CallStub(&stub); | 4507 __ CallStub(&stub); |
4508 RecordSafepointWithRegisters( | 4508 RecordSafepointWithRegisters( |
4509 instr->pointer_map(), 0, Safepoint::kLazyDeopt); | 4509 instr->pointer_map(), 0, Safepoint::kLazyDeopt); |
4510 } | 4510 } |
4511 __ bind(¬_applicable); | 4511 __ bind(¬_applicable); |
4512 } | 4512 } |
4513 | 4513 |
4514 | 4514 |
4515 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4515 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
4516 Register object = ToRegister(instr->object()); | 4516 Register object = ToRegister(instr->object()); |
4517 Register temp = ToRegister(instr->temp()); | 4517 Register temp = ToRegister(instr->temp()); |
4518 Label no_memento_found; | 4518 Label no_memento_found; |
4519 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found, | 4519 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found, |
4520 ne, &no_memento_found); | 4520 ne, &no_memento_found); |
4521 DeoptimizeIf(al, instr->environment()); | 4521 DeoptimizeIf(al, instr->environment()); |
4522 __ bind(&no_memento_found); | 4522 __ bind(&no_memento_found); |
4523 } | 4523 } |
4524 | 4524 |
4525 | 4525 |
4526 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4526 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
4527 ASSERT(ToRegister(instr->context()).is(cp)); | 4527 DCHECK(ToRegister(instr->context()).is(cp)); |
4528 ASSERT(ToRegister(instr->left()).is(a1)); | 4528 DCHECK(ToRegister(instr->left()).is(a1)); |
4529 ASSERT(ToRegister(instr->right()).is(a0)); | 4529 DCHECK(ToRegister(instr->right()).is(a0)); |
4530 StringAddStub stub(isolate(), | 4530 StringAddStub stub(isolate(), |
4531 instr->hydrogen()->flags(), | 4531 instr->hydrogen()->flags(), |
4532 instr->hydrogen()->pretenure_flag()); | 4532 instr->hydrogen()->pretenure_flag()); |
4533 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4533 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4534 } | 4534 } |
4535 | 4535 |
4536 | 4536 |
4537 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | 4537 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
4538 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { | 4538 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { |
4539 public: | 4539 public: |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4598 codegen()->DoDeferredStringCharFromCode(instr_); | 4598 codegen()->DoDeferredStringCharFromCode(instr_); |
4599 } | 4599 } |
4600 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4600 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4601 private: | 4601 private: |
4602 LStringCharFromCode* instr_; | 4602 LStringCharFromCode* instr_; |
4603 }; | 4603 }; |
4604 | 4604 |
4605 DeferredStringCharFromCode* deferred = | 4605 DeferredStringCharFromCode* deferred = |
4606 new(zone()) DeferredStringCharFromCode(this, instr); | 4606 new(zone()) DeferredStringCharFromCode(this, instr); |
4607 | 4607 |
4608 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); | 4608 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); |
4609 Register char_code = ToRegister(instr->char_code()); | 4609 Register char_code = ToRegister(instr->char_code()); |
4610 Register result = ToRegister(instr->result()); | 4610 Register result = ToRegister(instr->result()); |
4611 Register scratch = scratch0(); | 4611 Register scratch = scratch0(); |
4612 ASSERT(!char_code.is(result)); | 4612 DCHECK(!char_code.is(result)); |
4613 | 4613 |
4614 __ Branch(deferred->entry(), hi, | 4614 __ Branch(deferred->entry(), hi, |
4615 char_code, Operand(String::kMaxOneByteCharCode)); | 4615 char_code, Operand(String::kMaxOneByteCharCode)); |
4616 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); | 4616 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); |
4617 __ dsll(scratch, char_code, kPointerSizeLog2); | 4617 __ dsll(scratch, char_code, kPointerSizeLog2); |
4618 __ Daddu(result, result, scratch); | 4618 __ Daddu(result, result, scratch); |
4619 __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize)); | 4619 __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize)); |
4620 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 4620 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
4621 __ Branch(deferred->entry(), eq, result, Operand(scratch)); | 4621 __ Branch(deferred->entry(), eq, result, Operand(scratch)); |
4622 __ bind(deferred->exit()); | 4622 __ bind(deferred->exit()); |
(...skipping 12 matching lines...) Expand all Loading... |
4635 PushSafepointRegistersScope scope(this); | 4635 PushSafepointRegistersScope scope(this); |
4636 __ SmiTag(char_code); | 4636 __ SmiTag(char_code); |
4637 __ push(char_code); | 4637 __ push(char_code); |
4638 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); | 4638 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); |
4639 __ StoreToSafepointRegisterSlot(v0, result); | 4639 __ StoreToSafepointRegisterSlot(v0, result); |
4640 } | 4640 } |
4641 | 4641 |
4642 | 4642 |
4643 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4643 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
4644 LOperand* input = instr->value(); | 4644 LOperand* input = instr->value(); |
4645 ASSERT(input->IsRegister() || input->IsStackSlot()); | 4645 DCHECK(input->IsRegister() || input->IsStackSlot()); |
4646 LOperand* output = instr->result(); | 4646 LOperand* output = instr->result(); |
4647 ASSERT(output->IsDoubleRegister()); | 4647 DCHECK(output->IsDoubleRegister()); |
4648 FPURegister single_scratch = double_scratch0().low(); | 4648 FPURegister single_scratch = double_scratch0().low(); |
4649 if (input->IsStackSlot()) { | 4649 if (input->IsStackSlot()) { |
4650 Register scratch = scratch0(); | 4650 Register scratch = scratch0(); |
4651 __ ld(scratch, ToMemOperand(input)); | 4651 __ ld(scratch, ToMemOperand(input)); |
4652 __ mtc1(scratch, single_scratch); | 4652 __ mtc1(scratch, single_scratch); |
4653 } else { | 4653 } else { |
4654 __ mtc1(ToRegister(input), single_scratch); | 4654 __ mtc1(ToRegister(input), single_scratch); |
4655 } | 4655 } |
4656 __ cvt_d_w(ToDoubleRegister(output), single_scratch); | 4656 __ cvt_d_w(ToDoubleRegister(output), single_scratch); |
4657 } | 4657 } |
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4881 __ bind(&convert); | 4881 __ bind(&convert); |
4882 // Convert undefined (and hole) to NaN. | 4882 // Convert undefined (and hole) to NaN. |
4883 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 4883 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
4884 DeoptimizeIf(ne, env, input_reg, Operand(at)); | 4884 DeoptimizeIf(ne, env, input_reg, Operand(at)); |
4885 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4885 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
4886 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4886 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
4887 __ Branch(&done); | 4887 __ Branch(&done); |
4888 } | 4888 } |
4889 } else { | 4889 } else { |
4890 __ SmiUntag(scratch, input_reg); | 4890 __ SmiUntag(scratch, input_reg); |
4891 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); | 4891 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
4892 } | 4892 } |
4893 // Smi to double register conversion | 4893 // Smi to double register conversion |
4894 __ bind(&load_smi); | 4894 __ bind(&load_smi); |
4895 // scratch: untagged value of input_reg | 4895 // scratch: untagged value of input_reg |
4896 __ mtc1(scratch, result_reg); | 4896 __ mtc1(scratch, result_reg); |
4897 __ cvt_d_w(result_reg, result_reg); | 4897 __ cvt_d_w(result_reg, result_reg); |
4898 __ bind(&done); | 4898 __ bind(&done); |
4899 } | 4899 } |
4900 | 4900 |
4901 | 4901 |
4902 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { | 4902 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
4903 Register input_reg = ToRegister(instr->value()); | 4903 Register input_reg = ToRegister(instr->value()); |
4904 Register scratch1 = scratch0(); | 4904 Register scratch1 = scratch0(); |
4905 Register scratch2 = ToRegister(instr->temp()); | 4905 Register scratch2 = ToRegister(instr->temp()); |
4906 DoubleRegister double_scratch = double_scratch0(); | 4906 DoubleRegister double_scratch = double_scratch0(); |
4907 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); | 4907 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); |
4908 | 4908 |
4909 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); | 4909 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); |
4910 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); | 4910 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); |
4911 | 4911 |
4912 Label done; | 4912 Label done; |
4913 | 4913 |
4914 // The input is a tagged HeapObject. | 4914 // The input is a tagged HeapObject. |
4915 // Heap number map check. | 4915 // Heap number map check. |
4916 __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4916 __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
4917 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 4917 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
4918 // This 'at' value and scratch1 map value are used for tests in both clauses | 4918 // This 'at' value and scratch1 map value are used for tests in both clauses |
4919 // of the if. | 4919 // of the if. |
4920 | 4920 |
4921 if (instr->truncating()) { | 4921 if (instr->truncating()) { |
4922 // Performs a truncating conversion of a floating point number as used by | 4922 // Performs a truncating conversion of a floating point number as used by |
4923 // the JS bitwise operations. | 4923 // the JS bitwise operations. |
4924 Label no_heap_number, check_bools, check_false; | 4924 Label no_heap_number, check_bools, check_false; |
4925 // Check HeapNumber map. | 4925 // Check HeapNumber map. |
4926 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at)); | 4926 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at)); |
4927 __ mov(scratch2, input_reg); // In delay slot. | 4927 __ mov(scratch2, input_reg); // In delay slot. |
4928 __ TruncateHeapNumberToI(input_reg, scratch2); | 4928 __ TruncateHeapNumberToI(input_reg, scratch2); |
4929 __ Branch(&done); | 4929 __ Branch(&done); |
4930 | 4930 |
4931 // Check for Oddballs. Undefined/False is converted to zero and True to one | 4931 // Check for Oddballs. Undefined/False is converted to zero and True to one |
4932 // for truncating conversions. | 4932 // for truncating conversions. |
4933 __ bind(&no_heap_number); | 4933 __ bind(&no_heap_number); |
4934 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 4934 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
4935 __ Branch(&check_bools, ne, input_reg, Operand(at)); | 4935 __ Branch(&check_bools, ne, input_reg, Operand(at)); |
4936 ASSERT(ToRegister(instr->result()).is(input_reg)); | 4936 DCHECK(ToRegister(instr->result()).is(input_reg)); |
4937 __ Branch(USE_DELAY_SLOT, &done); | 4937 __ Branch(USE_DELAY_SLOT, &done); |
4938 __ mov(input_reg, zero_reg); // In delay slot. | 4938 __ mov(input_reg, zero_reg); // In delay slot. |
4939 | 4939 |
4940 __ bind(&check_bools); | 4940 __ bind(&check_bools); |
4941 __ LoadRoot(at, Heap::kTrueValueRootIndex); | 4941 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
4942 __ Branch(&check_false, ne, scratch2, Operand(at)); | 4942 __ Branch(&check_false, ne, scratch2, Operand(at)); |
4943 __ Branch(USE_DELAY_SLOT, &done); | 4943 __ Branch(USE_DELAY_SLOT, &done); |
4944 __ li(input_reg, Operand(1)); // In delay slot. | 4944 __ li(input_reg, Operand(1)); // In delay slot. |
4945 | 4945 |
4946 __ bind(&check_false); | 4946 __ bind(&check_false); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4987 : LDeferredCode(codegen), instr_(instr) { } | 4987 : LDeferredCode(codegen), instr_(instr) { } |
4988 virtual void Generate() V8_OVERRIDE { | 4988 virtual void Generate() V8_OVERRIDE { |
4989 codegen()->DoDeferredTaggedToI(instr_); | 4989 codegen()->DoDeferredTaggedToI(instr_); |
4990 } | 4990 } |
4991 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4991 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4992 private: | 4992 private: |
4993 LTaggedToI* instr_; | 4993 LTaggedToI* instr_; |
4994 }; | 4994 }; |
4995 | 4995 |
4996 LOperand* input = instr->value(); | 4996 LOperand* input = instr->value(); |
4997 ASSERT(input->IsRegister()); | 4997 DCHECK(input->IsRegister()); |
4998 ASSERT(input->Equals(instr->result())); | 4998 DCHECK(input->Equals(instr->result())); |
4999 | 4999 |
5000 Register input_reg = ToRegister(input); | 5000 Register input_reg = ToRegister(input); |
5001 | 5001 |
5002 if (instr->hydrogen()->value()->representation().IsSmi()) { | 5002 if (instr->hydrogen()->value()->representation().IsSmi()) { |
5003 __ SmiUntag(input_reg); | 5003 __ SmiUntag(input_reg); |
5004 } else { | 5004 } else { |
5005 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); | 5005 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); |
5006 | 5006 |
5007 // Let the deferred code handle the HeapObject case. | 5007 // Let the deferred code handle the HeapObject case. |
5008 __ JumpIfNotSmi(input_reg, deferred->entry()); | 5008 __ JumpIfNotSmi(input_reg, deferred->entry()); |
5009 | 5009 |
5010 // Smi to int32 conversion. | 5010 // Smi to int32 conversion. |
5011 __ SmiUntag(input_reg); | 5011 __ SmiUntag(input_reg); |
5012 __ bind(deferred->exit()); | 5012 __ bind(deferred->exit()); |
5013 } | 5013 } |
5014 } | 5014 } |
5015 | 5015 |
5016 | 5016 |
5017 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 5017 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
5018 LOperand* input = instr->value(); | 5018 LOperand* input = instr->value(); |
5019 ASSERT(input->IsRegister()); | 5019 DCHECK(input->IsRegister()); |
5020 LOperand* result = instr->result(); | 5020 LOperand* result = instr->result(); |
5021 ASSERT(result->IsDoubleRegister()); | 5021 DCHECK(result->IsDoubleRegister()); |
5022 | 5022 |
5023 Register input_reg = ToRegister(input); | 5023 Register input_reg = ToRegister(input); |
5024 DoubleRegister result_reg = ToDoubleRegister(result); | 5024 DoubleRegister result_reg = ToDoubleRegister(result); |
5025 | 5025 |
5026 HValue* value = instr->hydrogen()->value(); | 5026 HValue* value = instr->hydrogen()->value(); |
5027 NumberUntagDMode mode = value->representation().IsSmi() | 5027 NumberUntagDMode mode = value->representation().IsSmi() |
5028 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; | 5028 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
5029 | 5029 |
5030 EmitNumberUntagD(input_reg, result_reg, | 5030 EmitNumberUntagD(input_reg, result_reg, |
5031 instr->hydrogen()->can_convert_undefined_to_nan(), | 5031 instr->hydrogen()->can_convert_undefined_to_nan(), |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5138 if (last != LAST_TYPE) { | 5138 if (last != LAST_TYPE) { |
5139 DeoptimizeIf(hi, instr->environment(), scratch, Operand(last)); | 5139 DeoptimizeIf(hi, instr->environment(), scratch, Operand(last)); |
5140 } | 5140 } |
5141 } | 5141 } |
5142 } else { | 5142 } else { |
5143 uint8_t mask; | 5143 uint8_t mask; |
5144 uint8_t tag; | 5144 uint8_t tag; |
5145 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5145 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
5146 | 5146 |
5147 if (IsPowerOf2(mask)) { | 5147 if (IsPowerOf2(mask)) { |
5148 ASSERT(tag == 0 || IsPowerOf2(tag)); | 5148 DCHECK(tag == 0 || IsPowerOf2(tag)); |
5149 __ And(at, scratch, mask); | 5149 __ And(at, scratch, mask); |
5150 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(), | 5150 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(), |
5151 at, Operand(zero_reg)); | 5151 at, Operand(zero_reg)); |
5152 } else { | 5152 } else { |
5153 __ And(scratch, scratch, Operand(mask)); | 5153 __ And(scratch, scratch, Operand(mask)); |
5154 DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag)); | 5154 DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag)); |
5155 } | 5155 } |
5156 } | 5156 } |
5157 } | 5157 } |
5158 | 5158 |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5211 if (instr->hydrogen()->IsStabilityCheck()) { | 5211 if (instr->hydrogen()->IsStabilityCheck()) { |
5212 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | 5212 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); |
5213 for (int i = 0; i < maps->size(); ++i) { | 5213 for (int i = 0; i < maps->size(); ++i) { |
5214 AddStabilityDependency(maps->at(i).handle()); | 5214 AddStabilityDependency(maps->at(i).handle()); |
5215 } | 5215 } |
5216 return; | 5216 return; |
5217 } | 5217 } |
5218 | 5218 |
5219 Register map_reg = scratch0(); | 5219 Register map_reg = scratch0(); |
5220 LOperand* input = instr->value(); | 5220 LOperand* input = instr->value(); |
5221 ASSERT(input->IsRegister()); | 5221 DCHECK(input->IsRegister()); |
5222 Register reg = ToRegister(input); | 5222 Register reg = ToRegister(input); |
5223 __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); | 5223 __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
5224 | 5224 |
5225 DeferredCheckMaps* deferred = NULL; | 5225 DeferredCheckMaps* deferred = NULL; |
5226 if (instr->hydrogen()->HasMigrationTarget()) { | 5226 if (instr->hydrogen()->HasMigrationTarget()) { |
5227 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); | 5227 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); |
5228 __ bind(deferred->check_maps()); | 5228 __ bind(deferred->check_maps()); |
5229 } | 5229 } |
5230 | 5230 |
5231 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | 5231 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5334 Register result = ToRegister(instr->result()); | 5334 Register result = ToRegister(instr->result()); |
5335 Register scratch = ToRegister(instr->temp1()); | 5335 Register scratch = ToRegister(instr->temp1()); |
5336 Register scratch2 = ToRegister(instr->temp2()); | 5336 Register scratch2 = ToRegister(instr->temp2()); |
5337 | 5337 |
5338 // Allocate memory for the object. | 5338 // Allocate memory for the object. |
5339 AllocationFlags flags = TAG_OBJECT; | 5339 AllocationFlags flags = TAG_OBJECT; |
5340 if (instr->hydrogen()->MustAllocateDoubleAligned()) { | 5340 if (instr->hydrogen()->MustAllocateDoubleAligned()) { |
5341 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); | 5341 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); |
5342 } | 5342 } |
5343 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { | 5343 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
5344 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); | 5344 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
5345 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | 5345 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); |
5346 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); | 5346 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); |
5347 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { | 5347 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
5348 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | 5348 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); |
5349 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); | 5349 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); |
5350 } | 5350 } |
5351 if (instr->size()->IsConstantOperand()) { | 5351 if (instr->size()->IsConstantOperand()) { |
5352 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | 5352 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
5353 if (size <= Page::kMaxRegularHeapObjectSize) { | 5353 if (size <= Page::kMaxRegularHeapObjectSize) { |
5354 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); | 5354 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); |
5355 } else { | 5355 } else { |
5356 __ jmp(deferred->entry()); | 5356 __ jmp(deferred->entry()); |
5357 } | 5357 } |
5358 } else { | 5358 } else { |
(...skipping 26 matching lines...) Expand all Loading... |
5385 Register result = ToRegister(instr->result()); | 5385 Register result = ToRegister(instr->result()); |
5386 | 5386 |
5387 // TODO(3095996): Get rid of this. For now, we need to make the | 5387 // TODO(3095996): Get rid of this. For now, we need to make the |
5388 // result register contain a valid pointer because it is already | 5388 // result register contain a valid pointer because it is already |
5389 // contained in the register pointer map. | 5389 // contained in the register pointer map. |
5390 __ mov(result, zero_reg); | 5390 __ mov(result, zero_reg); |
5391 | 5391 |
5392 PushSafepointRegistersScope scope(this); | 5392 PushSafepointRegistersScope scope(this); |
5393 if (instr->size()->IsRegister()) { | 5393 if (instr->size()->IsRegister()) { |
5394 Register size = ToRegister(instr->size()); | 5394 Register size = ToRegister(instr->size()); |
5395 ASSERT(!size.is(result)); | 5395 DCHECK(!size.is(result)); |
5396 __ SmiTag(size); | 5396 __ SmiTag(size); |
5397 __ push(size); | 5397 __ push(size); |
5398 } else { | 5398 } else { |
5399 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | 5399 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
5400 if (size >= 0 && size <= Smi::kMaxValue) { | 5400 if (size >= 0 && size <= Smi::kMaxValue) { |
5401 __ li(v0, Operand(Smi::FromInt(size))); | 5401 __ li(v0, Operand(Smi::FromInt(size))); |
5402 __ Push(v0); | 5402 __ Push(v0); |
5403 } else { | 5403 } else { |
5404 // We should never get here at runtime => abort | 5404 // We should never get here at runtime => abort |
5405 __ stop("invalid allocation size"); | 5405 __ stop("invalid allocation size"); |
5406 return; | 5406 return; |
5407 } | 5407 } |
5408 } | 5408 } |
5409 | 5409 |
5410 int flags = AllocateDoubleAlignFlag::encode( | 5410 int flags = AllocateDoubleAlignFlag::encode( |
5411 instr->hydrogen()->MustAllocateDoubleAligned()); | 5411 instr->hydrogen()->MustAllocateDoubleAligned()); |
5412 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { | 5412 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
5413 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); | 5413 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
5414 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | 5414 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); |
5415 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); | 5415 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); |
5416 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { | 5416 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
5417 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | 5417 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); |
5418 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); | 5418 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); |
5419 } else { | 5419 } else { |
5420 flags = AllocateTargetSpace::update(flags, NEW_SPACE); | 5420 flags = AllocateTargetSpace::update(flags, NEW_SPACE); |
5421 } | 5421 } |
5422 __ li(v0, Operand(Smi::FromInt(flags))); | 5422 __ li(v0, Operand(Smi::FromInt(flags))); |
5423 __ Push(v0); | 5423 __ Push(v0); |
5424 | 5424 |
5425 CallRuntimeFromDeferred( | 5425 CallRuntimeFromDeferred( |
5426 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); | 5426 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); |
5427 __ StoreToSafepointRegisterSlot(v0, result); | 5427 __ StoreToSafepointRegisterSlot(v0, result); |
5428 } | 5428 } |
5429 | 5429 |
5430 | 5430 |
5431 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { | 5431 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { |
5432 ASSERT(ToRegister(instr->value()).is(a0)); | 5432 DCHECK(ToRegister(instr->value()).is(a0)); |
5433 ASSERT(ToRegister(instr->result()).is(v0)); | 5433 DCHECK(ToRegister(instr->result()).is(v0)); |
5434 __ push(a0); | 5434 __ push(a0); |
5435 CallRuntime(Runtime::kToFastProperties, 1, instr); | 5435 CallRuntime(Runtime::kToFastProperties, 1, instr); |
5436 } | 5436 } |
5437 | 5437 |
5438 | 5438 |
5439 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { | 5439 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { |
5440 ASSERT(ToRegister(instr->context()).is(cp)); | 5440 DCHECK(ToRegister(instr->context()).is(cp)); |
5441 Label materialized; | 5441 Label materialized; |
5442 // Registers will be used as follows: | 5442 // Registers will be used as follows: |
5443 // a7 = literals array. | 5443 // a7 = literals array. |
5444 // a1 = regexp literal. | 5444 // a1 = regexp literal. |
5445 // a0 = regexp literal clone. | 5445 // a0 = regexp literal clone. |
5446 // a2 and a4-a6 are used as temporaries. | 5446 // a2 and a4-a6 are used as temporaries. |
5447 int literal_offset = | 5447 int literal_offset = |
5448 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); | 5448 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); |
5449 __ li(a7, instr->hydrogen()->literals()); | 5449 __ li(a7, instr->hydrogen()->literals()); |
5450 __ ld(a1, FieldMemOperand(a7, literal_offset)); | 5450 __ ld(a1, FieldMemOperand(a7, literal_offset)); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5483 __ sd(a2, FieldMemOperand(v0, i + kPointerSize)); | 5483 __ sd(a2, FieldMemOperand(v0, i + kPointerSize)); |
5484 } | 5484 } |
5485 if ((size % (2 * kPointerSize)) != 0) { | 5485 if ((size % (2 * kPointerSize)) != 0) { |
5486 __ ld(a3, FieldMemOperand(a1, size - kPointerSize)); | 5486 __ ld(a3, FieldMemOperand(a1, size - kPointerSize)); |
5487 __ sd(a3, FieldMemOperand(v0, size - kPointerSize)); | 5487 __ sd(a3, FieldMemOperand(v0, size - kPointerSize)); |
5488 } | 5488 } |
5489 } | 5489 } |
5490 | 5490 |
5491 | 5491 |
5492 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { | 5492 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
5493 ASSERT(ToRegister(instr->context()).is(cp)); | 5493 DCHECK(ToRegister(instr->context()).is(cp)); |
5494 // Use the fast case closure allocation code that allocates in new | 5494 // Use the fast case closure allocation code that allocates in new |
5495 // space for nested functions that don't need literals cloning. | 5495 // space for nested functions that don't need literals cloning. |
5496 bool pretenure = instr->hydrogen()->pretenure(); | 5496 bool pretenure = instr->hydrogen()->pretenure(); |
5497 if (!pretenure && instr->hydrogen()->has_no_literals()) { | 5497 if (!pretenure && instr->hydrogen()->has_no_literals()) { |
5498 FastNewClosureStub stub(isolate(), | 5498 FastNewClosureStub stub(isolate(), |
5499 instr->hydrogen()->strict_mode(), | 5499 instr->hydrogen()->strict_mode(), |
5500 instr->hydrogen()->is_generator()); | 5500 instr->hydrogen()->is_generator()); |
5501 __ li(a2, Operand(instr->hydrogen()->shared_info())); | 5501 __ li(a2, Operand(instr->hydrogen()->shared_info())); |
5502 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 5502 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
5503 } else { | 5503 } else { |
5504 __ li(a2, Operand(instr->hydrogen()->shared_info())); | 5504 __ li(a2, Operand(instr->hydrogen()->shared_info())); |
5505 __ li(a1, Operand(pretenure ? factory()->true_value() | 5505 __ li(a1, Operand(pretenure ? factory()->true_value() |
5506 : factory()->false_value())); | 5506 : factory()->false_value())); |
5507 __ Push(cp, a2, a1); | 5507 __ Push(cp, a2, a1); |
5508 CallRuntime(Runtime::kNewClosure, 3, instr); | 5508 CallRuntime(Runtime::kNewClosure, 3, instr); |
5509 } | 5509 } |
5510 } | 5510 } |
5511 | 5511 |
5512 | 5512 |
5513 void LCodeGen::DoTypeof(LTypeof* instr) { | 5513 void LCodeGen::DoTypeof(LTypeof* instr) { |
5514 ASSERT(ToRegister(instr->result()).is(v0)); | 5514 DCHECK(ToRegister(instr->result()).is(v0)); |
5515 Register input = ToRegister(instr->value()); | 5515 Register input = ToRegister(instr->value()); |
5516 __ push(input); | 5516 __ push(input); |
5517 CallRuntime(Runtime::kTypeof, 1, instr); | 5517 CallRuntime(Runtime::kTypeof, 1, instr); |
5518 } | 5518 } |
5519 | 5519 |
5520 | 5520 |
5521 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { | 5521 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { |
5522 Register input = ToRegister(instr->value()); | 5522 Register input = ToRegister(instr->value()); |
5523 | 5523 |
5524 Register cmp1 = no_reg; | 5524 Register cmp1 = no_reg; |
5525 Operand cmp2 = Operand(no_reg); | 5525 Operand cmp2 = Operand(no_reg); |
5526 | 5526 |
5527 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), | 5527 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), |
5528 instr->FalseLabel(chunk_), | 5528 instr->FalseLabel(chunk_), |
5529 input, | 5529 input, |
5530 instr->type_literal(), | 5530 instr->type_literal(), |
5531 &cmp1, | 5531 &cmp1, |
5532 &cmp2); | 5532 &cmp2); |
5533 | 5533 |
5534 ASSERT(cmp1.is_valid()); | 5534 DCHECK(cmp1.is_valid()); |
5535 ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid()); | 5535 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid()); |
5536 | 5536 |
5537 if (final_branch_condition != kNoCondition) { | 5537 if (final_branch_condition != kNoCondition) { |
5538 EmitBranch(instr, final_branch_condition, cmp1, cmp2); | 5538 EmitBranch(instr, final_branch_condition, cmp1, cmp2); |
5539 } | 5539 } |
5540 } | 5540 } |
5541 | 5541 |
5542 | 5542 |
5543 Condition LCodeGen::EmitTypeofIs(Label* true_label, | 5543 Condition LCodeGen::EmitTypeofIs(Label* true_label, |
5544 Label* false_label, | 5544 Label* false_label, |
5545 Register input, | 5545 Register input, |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5643 Register temp1 = ToRegister(instr->temp()); | 5643 Register temp1 = ToRegister(instr->temp()); |
5644 | 5644 |
5645 EmitIsConstructCall(temp1, scratch0()); | 5645 EmitIsConstructCall(temp1, scratch0()); |
5646 | 5646 |
5647 EmitBranch(instr, eq, temp1, | 5647 EmitBranch(instr, eq, temp1, |
5648 Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | 5648 Operand(Smi::FromInt(StackFrame::CONSTRUCT))); |
5649 } | 5649 } |
5650 | 5650 |
5651 | 5651 |
5652 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { | 5652 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { |
5653 ASSERT(!temp1.is(temp2)); | 5653 DCHECK(!temp1.is(temp2)); |
5654 // Get the frame pointer for the calling frame. | 5654 // Get the frame pointer for the calling frame. |
5655 __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 5655 __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
5656 | 5656 |
5657 // Skip the arguments adaptor frame if it exists. | 5657 // Skip the arguments adaptor frame if it exists. |
5658 Label check_frame_marker; | 5658 Label check_frame_marker; |
5659 __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); | 5659 __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); |
5660 __ Branch(&check_frame_marker, ne, temp2, | 5660 __ Branch(&check_frame_marker, ne, temp2, |
5661 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 5661 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
5662 __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); | 5662 __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); |
5663 | 5663 |
5664 // Check the marker in the calling frame. | 5664 // Check the marker in the calling frame. |
5665 __ bind(&check_frame_marker); | 5665 __ bind(&check_frame_marker); |
5666 __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | 5666 __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); |
5667 } | 5667 } |
5668 | 5668 |
5669 | 5669 |
5670 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { | 5670 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
5671 if (!info()->IsStub()) { | 5671 if (!info()->IsStub()) { |
5672 // Ensure that we have enough space after the previous lazy-bailout | 5672 // Ensure that we have enough space after the previous lazy-bailout |
5673 // instruction for patching the code here. | 5673 // instruction for patching the code here. |
5674 int current_pc = masm()->pc_offset(); | 5674 int current_pc = masm()->pc_offset(); |
5675 if (current_pc < last_lazy_deopt_pc_ + space_needed) { | 5675 if (current_pc < last_lazy_deopt_pc_ + space_needed) { |
5676 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; | 5676 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; |
5677 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); | 5677 DCHECK_EQ(0, padding_size % Assembler::kInstrSize); |
5678 while (padding_size > 0) { | 5678 while (padding_size > 0) { |
5679 __ nop(); | 5679 __ nop(); |
5680 padding_size -= Assembler::kInstrSize; | 5680 padding_size -= Assembler::kInstrSize; |
5681 } | 5681 } |
5682 } | 5682 } |
5683 } | 5683 } |
5684 last_lazy_deopt_pc_ = masm()->pc_offset(); | 5684 last_lazy_deopt_pc_ = masm()->pc_offset(); |
5685 } | 5685 } |
5686 | 5686 |
5687 | 5687 |
5688 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 5688 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
5689 last_lazy_deopt_pc_ = masm()->pc_offset(); | 5689 last_lazy_deopt_pc_ = masm()->pc_offset(); |
5690 ASSERT(instr->HasEnvironment()); | 5690 DCHECK(instr->HasEnvironment()); |
5691 LEnvironment* env = instr->environment(); | 5691 LEnvironment* env = instr->environment(); |
5692 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5692 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
5693 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 5693 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
5694 } | 5694 } |
5695 | 5695 |
5696 | 5696 |
5697 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | 5697 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
5698 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | 5698 Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
5699 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | 5699 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
5700 // needed return address), even though the implementation of LAZY and EAGER is | 5700 // needed return address), even though the implementation of LAZY and EAGER is |
(...skipping 17 matching lines...) Expand all Loading... |
5718 // Nothing to see here, move on! | 5718 // Nothing to see here, move on! |
5719 } | 5719 } |
5720 | 5720 |
5721 | 5721 |
5722 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { | 5722 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { |
5723 PushSafepointRegistersScope scope(this); | 5723 PushSafepointRegistersScope scope(this); |
5724 LoadContextFromDeferred(instr->context()); | 5724 LoadContextFromDeferred(instr->context()); |
5725 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); | 5725 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); |
5726 RecordSafepointWithLazyDeopt( | 5726 RecordSafepointWithLazyDeopt( |
5727 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 5727 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
5728 ASSERT(instr->HasEnvironment()); | 5728 DCHECK(instr->HasEnvironment()); |
5729 LEnvironment* env = instr->environment(); | 5729 LEnvironment* env = instr->environment(); |
5730 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 5730 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
5731 } | 5731 } |
5732 | 5732 |
5733 | 5733 |
5734 void LCodeGen::DoStackCheck(LStackCheck* instr) { | 5734 void LCodeGen::DoStackCheck(LStackCheck* instr) { |
5735 class DeferredStackCheck V8_FINAL : public LDeferredCode { | 5735 class DeferredStackCheck V8_FINAL : public LDeferredCode { |
5736 public: | 5736 public: |
5737 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) | 5737 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) |
5738 : LDeferredCode(codegen), instr_(instr) { } | 5738 : LDeferredCode(codegen), instr_(instr) { } |
5739 virtual void Generate() V8_OVERRIDE { | 5739 virtual void Generate() V8_OVERRIDE { |
5740 codegen()->DoDeferredStackCheck(instr_); | 5740 codegen()->DoDeferredStackCheck(instr_); |
5741 } | 5741 } |
5742 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5742 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5743 private: | 5743 private: |
5744 LStackCheck* instr_; | 5744 LStackCheck* instr_; |
5745 }; | 5745 }; |
5746 | 5746 |
5747 ASSERT(instr->HasEnvironment()); | 5747 DCHECK(instr->HasEnvironment()); |
5748 LEnvironment* env = instr->environment(); | 5748 LEnvironment* env = instr->environment(); |
5749 // There is no LLazyBailout instruction for stack-checks. We have to | 5749 // There is no LLazyBailout instruction for stack-checks. We have to |
5750 // prepare for lazy deoptimization explicitly here. | 5750 // prepare for lazy deoptimization explicitly here. |
5751 if (instr->hydrogen()->is_function_entry()) { | 5751 if (instr->hydrogen()->is_function_entry()) { |
5752 // Perform stack overflow check. | 5752 // Perform stack overflow check. |
5753 Label done; | 5753 Label done; |
5754 __ LoadRoot(at, Heap::kStackLimitRootIndex); | 5754 __ LoadRoot(at, Heap::kStackLimitRootIndex); |
5755 __ Branch(&done, hs, sp, Operand(at)); | 5755 __ Branch(&done, hs, sp, Operand(at)); |
5756 ASSERT(instr->context()->IsRegister()); | 5756 DCHECK(instr->context()->IsRegister()); |
5757 ASSERT(ToRegister(instr->context()).is(cp)); | 5757 DCHECK(ToRegister(instr->context()).is(cp)); |
5758 CallCode(isolate()->builtins()->StackCheck(), | 5758 CallCode(isolate()->builtins()->StackCheck(), |
5759 RelocInfo::CODE_TARGET, | 5759 RelocInfo::CODE_TARGET, |
5760 instr); | 5760 instr); |
5761 __ bind(&done); | 5761 __ bind(&done); |
5762 } else { | 5762 } else { |
5763 ASSERT(instr->hydrogen()->is_backwards_branch()); | 5763 DCHECK(instr->hydrogen()->is_backwards_branch()); |
5764 // Perform stack overflow check if this goto needs it before jumping. | 5764 // Perform stack overflow check if this goto needs it before jumping. |
5765 DeferredStackCheck* deferred_stack_check = | 5765 DeferredStackCheck* deferred_stack_check = |
5766 new(zone()) DeferredStackCheck(this, instr); | 5766 new(zone()) DeferredStackCheck(this, instr); |
5767 __ LoadRoot(at, Heap::kStackLimitRootIndex); | 5767 __ LoadRoot(at, Heap::kStackLimitRootIndex); |
5768 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); | 5768 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); |
5769 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 5769 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
5770 __ bind(instr->done_label()); | 5770 __ bind(instr->done_label()); |
5771 deferred_stack_check->SetExit(instr->done_label()); | 5771 deferred_stack_check->SetExit(instr->done_label()); |
5772 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5772 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
5773 // Don't record a deoptimization index for the safepoint here. | 5773 // Don't record a deoptimization index for the safepoint here. |
5774 // This will be done explicitly when emitting call and the safepoint in | 5774 // This will be done explicitly when emitting call and the safepoint in |
5775 // the deferred code. | 5775 // the deferred code. |
5776 } | 5776 } |
5777 } | 5777 } |
5778 | 5778 |
5779 | 5779 |
5780 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { | 5780 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { |
5781 // This is a pseudo-instruction that ensures that the environment here is | 5781 // This is a pseudo-instruction that ensures that the environment here is |
5782 // properly registered for deoptimization and records the assembler's PC | 5782 // properly registered for deoptimization and records the assembler's PC |
5783 // offset. | 5783 // offset. |
5784 LEnvironment* environment = instr->environment(); | 5784 LEnvironment* environment = instr->environment(); |
5785 | 5785 |
5786 // If the environment were already registered, we would have no way of | 5786 // If the environment were already registered, we would have no way of |
5787 // backpatching it with the spill slot operands. | 5787 // backpatching it with the spill slot operands. |
5788 ASSERT(!environment->HasBeenRegistered()); | 5788 DCHECK(!environment->HasBeenRegistered()); |
5789 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5789 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
5790 | 5790 |
5791 GenerateOsrPrologue(); | 5791 GenerateOsrPrologue(); |
5792 } | 5792 } |
5793 | 5793 |
5794 | 5794 |
5795 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5795 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
5796 Register result = ToRegister(instr->result()); | 5796 Register result = ToRegister(instr->result()); |
5797 Register object = ToRegister(instr->object()); | 5797 Register object = ToRegister(instr->object()); |
5798 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 5798 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
5799 DeoptimizeIf(eq, instr->environment(), object, Operand(at)); | 5799 DeoptimizeIf(eq, instr->environment(), object, Operand(at)); |
5800 | 5800 |
5801 Register null_value = a5; | 5801 Register null_value = a5; |
5802 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5802 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
5803 DeoptimizeIf(eq, instr->environment(), object, Operand(null_value)); | 5803 DeoptimizeIf(eq, instr->environment(), object, Operand(null_value)); |
5804 | 5804 |
5805 __ And(at, object, kSmiTagMask); | 5805 __ And(at, object, kSmiTagMask); |
5806 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); | 5806 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); |
5807 | 5807 |
5808 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5808 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
5809 __ GetObjectType(object, a1, a1); | 5809 __ GetObjectType(object, a1, a1); |
5810 DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE)); | 5810 DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE)); |
5811 | 5811 |
5812 Label use_cache, call_runtime; | 5812 Label use_cache, call_runtime; |
5813 ASSERT(object.is(a0)); | 5813 DCHECK(object.is(a0)); |
5814 __ CheckEnumCache(null_value, &call_runtime); | 5814 __ CheckEnumCache(null_value, &call_runtime); |
5815 | 5815 |
5816 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset)); | 5816 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset)); |
5817 __ Branch(&use_cache); | 5817 __ Branch(&use_cache); |
5818 | 5818 |
5819 // Get the set of properties to enumerate. | 5819 // Get the set of properties to enumerate. |
5820 __ bind(&call_runtime); | 5820 __ bind(&call_runtime); |
5821 __ push(object); | 5821 __ push(object); |
5822 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5822 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
5823 | 5823 |
5824 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); | 5824 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
5825 ASSERT(result.is(v0)); | 5825 DCHECK(result.is(v0)); |
5826 __ LoadRoot(at, Heap::kMetaMapRootIndex); | 5826 __ LoadRoot(at, Heap::kMetaMapRootIndex); |
5827 DeoptimizeIf(ne, instr->environment(), a1, Operand(at)); | 5827 DeoptimizeIf(ne, instr->environment(), a1, Operand(at)); |
5828 __ bind(&use_cache); | 5828 __ bind(&use_cache); |
5829 } | 5829 } |
5830 | 5830 |
5831 | 5831 |
5832 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5832 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
5833 Register map = ToRegister(instr->map()); | 5833 Register map = ToRegister(instr->map()); |
5834 Register result = ToRegister(instr->result()); | 5834 Register result = ToRegister(instr->result()); |
5835 Label load_cache, done; | 5835 Label load_cache, done; |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5941 __ li(at, scope_info); | 5941 __ li(at, scope_info); |
5942 __ Push(at, ToRegister(instr->function())); | 5942 __ Push(at, ToRegister(instr->function())); |
5943 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5943 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
5944 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5944 RecordSafepoint(Safepoint::kNoLazyDeopt); |
5945 } | 5945 } |
5946 | 5946 |
5947 | 5947 |
5948 #undef __ | 5948 #undef __ |
5949 | 5949 |
5950 } } // namespace v8::internal | 5950 } } // namespace v8::internal |
OLD | NEW |