Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/x64/lithium-codegen-x64.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/lithium-codegen-x64.h ('k') | src/x64/lithium-gap-resolver-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_X64 7 #if V8_TARGET_ARCH_X64
8 8
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/hydrogen-osr.h" 10 #include "src/hydrogen-osr.h"
(...skipping 26 matching lines...) Expand all
37 LCodeGen* codegen_; 37 LCodeGen* codegen_;
38 LPointerMap* pointers_; 38 LPointerMap* pointers_;
39 Safepoint::DeoptMode deopt_mode_; 39 Safepoint::DeoptMode deopt_mode_;
40 }; 40 };
41 41
42 42
43 #define __ masm()-> 43 #define __ masm()->
44 44
45 bool LCodeGen::GenerateCode() { 45 bool LCodeGen::GenerateCode() {
46 LPhase phase("Z_Code generation", chunk()); 46 LPhase phase("Z_Code generation", chunk());
47 ASSERT(is_unused()); 47 DCHECK(is_unused());
48 status_ = GENERATING; 48 status_ = GENERATING;
49 49
50 // Open a frame scope to indicate that there is a frame on the stack. The 50 // Open a frame scope to indicate that there is a frame on the stack. The
51 // MANUAL indicates that the scope shouldn't actually generate code to set up 51 // MANUAL indicates that the scope shouldn't actually generate code to set up
52 // the frame (that is done in GeneratePrologue). 52 // the frame (that is done in GeneratePrologue).
53 FrameScope frame_scope(masm_, StackFrame::MANUAL); 53 FrameScope frame_scope(masm_, StackFrame::MANUAL);
54 54
55 return GeneratePrologue() && 55 return GeneratePrologue() &&
56 GenerateBody() && 56 GenerateBody() &&
57 GenerateDeferredCode() && 57 GenerateDeferredCode() &&
58 GenerateJumpTable() && 58 GenerateJumpTable() &&
59 GenerateSafepointTable(); 59 GenerateSafepointTable();
60 } 60 }
61 61
62 62
63 void LCodeGen::FinishCode(Handle<Code> code) { 63 void LCodeGen::FinishCode(Handle<Code> code) {
64 ASSERT(is_done()); 64 DCHECK(is_done());
65 code->set_stack_slots(GetStackSlotCount()); 65 code->set_stack_slots(GetStackSlotCount());
66 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 66 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
67 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); 67 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
68 PopulateDeoptimizationData(code); 68 PopulateDeoptimizationData(code);
69 } 69 }
70 70
71 71
72 #ifdef _MSC_VER 72 #ifdef _MSC_VER
73 void LCodeGen::MakeSureStackPagesMapped(int offset) { 73 void LCodeGen::MakeSureStackPagesMapped(int offset) {
74 const int kPageSize = 4 * KB; 74 const int kPageSize = 4 * KB;
75 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { 75 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
76 __ movp(Operand(rsp, offset), rax); 76 __ movp(Operand(rsp, offset), rax);
77 } 77 }
78 } 78 }
79 #endif 79 #endif
80 80
81 81
82 void LCodeGen::SaveCallerDoubles() { 82 void LCodeGen::SaveCallerDoubles() {
83 ASSERT(info()->saves_caller_doubles()); 83 DCHECK(info()->saves_caller_doubles());
84 ASSERT(NeedsEagerFrame()); 84 DCHECK(NeedsEagerFrame());
85 Comment(";;; Save clobbered callee double registers"); 85 Comment(";;; Save clobbered callee double registers");
86 int count = 0; 86 int count = 0;
87 BitVector* doubles = chunk()->allocated_double_registers(); 87 BitVector* doubles = chunk()->allocated_double_registers();
88 BitVector::Iterator save_iterator(doubles); 88 BitVector::Iterator save_iterator(doubles);
89 while (!save_iterator.Done()) { 89 while (!save_iterator.Done()) {
90 __ movsd(MemOperand(rsp, count * kDoubleSize), 90 __ movsd(MemOperand(rsp, count * kDoubleSize),
91 XMMRegister::FromAllocationIndex(save_iterator.Current())); 91 XMMRegister::FromAllocationIndex(save_iterator.Current()));
92 save_iterator.Advance(); 92 save_iterator.Advance();
93 count++; 93 count++;
94 } 94 }
95 } 95 }
96 96
97 97
98 void LCodeGen::RestoreCallerDoubles() { 98 void LCodeGen::RestoreCallerDoubles() {
99 ASSERT(info()->saves_caller_doubles()); 99 DCHECK(info()->saves_caller_doubles());
100 ASSERT(NeedsEagerFrame()); 100 DCHECK(NeedsEagerFrame());
101 Comment(";;; Restore clobbered callee double registers"); 101 Comment(";;; Restore clobbered callee double registers");
102 BitVector* doubles = chunk()->allocated_double_registers(); 102 BitVector* doubles = chunk()->allocated_double_registers();
103 BitVector::Iterator save_iterator(doubles); 103 BitVector::Iterator save_iterator(doubles);
104 int count = 0; 104 int count = 0;
105 while (!save_iterator.Done()) { 105 while (!save_iterator.Done()) {
106 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), 106 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
107 MemOperand(rsp, count * kDoubleSize)); 107 MemOperand(rsp, count * kDoubleSize));
108 save_iterator.Advance(); 108 save_iterator.Advance();
109 count++; 109 count++;
110 } 110 }
111 } 111 }
112 112
113 113
114 bool LCodeGen::GeneratePrologue() { 114 bool LCodeGen::GeneratePrologue() {
115 ASSERT(is_generating()); 115 DCHECK(is_generating());
116 116
117 if (info()->IsOptimizing()) { 117 if (info()->IsOptimizing()) {
118 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 118 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
119 119
120 #ifdef DEBUG 120 #ifdef DEBUG
121 if (strlen(FLAG_stop_at) > 0 && 121 if (strlen(FLAG_stop_at) > 0 &&
122 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 122 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
123 __ int3(); 123 __ int3();
124 } 124 }
125 #endif 125 #endif
(...skipping 14 matching lines...) Expand all
140 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset)); 140 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
141 141
142 __ movp(args.GetReceiverOperand(), rcx); 142 __ movp(args.GetReceiverOperand(), rcx);
143 143
144 __ bind(&ok); 144 __ bind(&ok);
145 } 145 }
146 } 146 }
147 147
148 info()->set_prologue_offset(masm_->pc_offset()); 148 info()->set_prologue_offset(masm_->pc_offset());
149 if (NeedsEagerFrame()) { 149 if (NeedsEagerFrame()) {
150 ASSERT(!frame_is_built_); 150 DCHECK(!frame_is_built_);
151 frame_is_built_ = true; 151 frame_is_built_ = true;
152 if (info()->IsStub()) { 152 if (info()->IsStub()) {
153 __ StubPrologue(); 153 __ StubPrologue();
154 } else { 154 } else {
155 __ Prologue(info()->IsCodePreAgingActive()); 155 __ Prologue(info()->IsCodePreAgingActive());
156 } 156 }
157 info()->AddNoFrameRange(0, masm_->pc_offset()); 157 info()->AddNoFrameRange(0, masm_->pc_offset());
158 } 158 }
159 159
160 // Reserve space for the stack slots needed by the code. 160 // Reserve space for the stack slots needed by the code.
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
245 void LCodeGen::GenerateOsrPrologue() { 245 void LCodeGen::GenerateOsrPrologue() {
246 // Generate the OSR entry prologue at the first unknown OSR value, or if there 246 // Generate the OSR entry prologue at the first unknown OSR value, or if there
247 // are none, at the OSR entrypoint instruction. 247 // are none, at the OSR entrypoint instruction.
248 if (osr_pc_offset_ >= 0) return; 248 if (osr_pc_offset_ >= 0) return;
249 249
250 osr_pc_offset_ = masm()->pc_offset(); 250 osr_pc_offset_ = masm()->pc_offset();
251 251
252 // Adjust the frame size, subsuming the unoptimized frame into the 252 // Adjust the frame size, subsuming the unoptimized frame into the
253 // optimized frame. 253 // optimized frame.
254 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 254 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
255 ASSERT(slots >= 0); 255 DCHECK(slots >= 0);
256 __ subp(rsp, Immediate(slots * kPointerSize)); 256 __ subp(rsp, Immediate(slots * kPointerSize));
257 } 257 }
258 258
259 259
260 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 260 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
261 if (instr->IsCall()) { 261 if (instr->IsCall()) {
262 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 262 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
263 } 263 }
264 if (!instr->IsLazyBailout() && !instr->IsGap()) { 264 if (!instr->IsLazyBailout() && !instr->IsGap()) {
265 safepoints_.BumpLastLazySafepointIndex(); 265 safepoints_.BumpLastLazySafepointIndex();
266 } 266 }
267 } 267 }
268 268
269 269
270 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { 270 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
271 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() && 271 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
272 instr->hydrogen_value()->representation().IsInteger32() && 272 instr->hydrogen_value()->representation().IsInteger32() &&
273 instr->result()->IsRegister()) { 273 instr->result()->IsRegister()) {
274 __ AssertZeroExtended(ToRegister(instr->result())); 274 __ AssertZeroExtended(ToRegister(instr->result()));
275 } 275 }
276 276
277 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) { 277 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
278 // We sign extend the dehoisted key at the definition point when the pointer 278 // We sign extend the dehoisted key at the definition point when the pointer
279 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use 279 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
280 // points and MustSignExtendResult is always false. We can't use 280 // points and MustSignExtendResult is always false. We can't use
281 // STATIC_ASSERT here as the pointer size is 32-bit for x32. 281 // STATIC_ASSERT here as the pointer size is 32-bit for x32.
282 ASSERT(kPointerSize == kInt64Size); 282 DCHECK(kPointerSize == kInt64Size);
283 if (instr->result()->IsRegister()) { 283 if (instr->result()->IsRegister()) {
284 Register result_reg = ToRegister(instr->result()); 284 Register result_reg = ToRegister(instr->result());
285 __ movsxlq(result_reg, result_reg); 285 __ movsxlq(result_reg, result_reg);
286 } else { 286 } else {
287 // Sign extend the 32bit result in the stack slots. 287 // Sign extend the 32bit result in the stack slots.
288 ASSERT(instr->result()->IsStackSlot()); 288 DCHECK(instr->result()->IsStackSlot());
289 Operand src = ToOperand(instr->result()); 289 Operand src = ToOperand(instr->result());
290 __ movsxlq(kScratchRegister, src); 290 __ movsxlq(kScratchRegister, src);
291 __ movq(src, kScratchRegister); 291 __ movq(src, kScratchRegister);
292 } 292 }
293 } 293 }
294 } 294 }
295 295
296 296
297 bool LCodeGen::GenerateJumpTable() { 297 bool LCodeGen::GenerateJumpTable() {
298 Label needs_frame; 298 Label needs_frame;
299 if (jump_table_.length() > 0) { 299 if (jump_table_.length() > 0) {
300 Comment(";;; -------------------- Jump table --------------------"); 300 Comment(";;; -------------------- Jump table --------------------");
301 } 301 }
302 for (int i = 0; i < jump_table_.length(); i++) { 302 for (int i = 0; i < jump_table_.length(); i++) {
303 __ bind(&jump_table_[i].label); 303 __ bind(&jump_table_[i].label);
304 Address entry = jump_table_[i].address; 304 Address entry = jump_table_[i].address;
305 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; 305 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
306 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 306 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
307 if (id == Deoptimizer::kNotDeoptimizationEntry) { 307 if (id == Deoptimizer::kNotDeoptimizationEntry) {
308 Comment(";;; jump table entry %d.", i); 308 Comment(";;; jump table entry %d.", i);
309 } else { 309 } else {
310 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 310 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
311 } 311 }
312 if (jump_table_[i].needs_frame) { 312 if (jump_table_[i].needs_frame) {
313 ASSERT(!info()->saves_caller_doubles()); 313 DCHECK(!info()->saves_caller_doubles());
314 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); 314 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
315 if (needs_frame.is_bound()) { 315 if (needs_frame.is_bound()) {
316 __ jmp(&needs_frame); 316 __ jmp(&needs_frame);
317 } else { 317 } else {
318 __ bind(&needs_frame); 318 __ bind(&needs_frame);
319 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset)); 319 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
320 __ pushq(rbp); 320 __ pushq(rbp);
321 __ movp(rbp, rsp); 321 __ movp(rbp, rsp);
322 __ Push(rsi); 322 __ Push(rsi);
323 // This variant of deopt can only be used with stubs. Since we don't 323 // This variant of deopt can only be used with stubs. Since we don't
324 // have a function pointer to install in the stack frame that we're 324 // have a function pointer to install in the stack frame that we're
325 // building, install a special marker there instead. 325 // building, install a special marker there instead.
326 ASSERT(info()->IsStub()); 326 DCHECK(info()->IsStub());
327 __ Move(rsi, Smi::FromInt(StackFrame::STUB)); 327 __ Move(rsi, Smi::FromInt(StackFrame::STUB));
328 __ Push(rsi); 328 __ Push(rsi);
329 __ movp(rsi, MemOperand(rsp, kPointerSize)); 329 __ movp(rsi, MemOperand(rsp, kPointerSize));
330 __ call(kScratchRegister); 330 __ call(kScratchRegister);
331 } 331 }
332 } else { 332 } else {
333 if (info()->saves_caller_doubles()) { 333 if (info()->saves_caller_doubles()) {
334 ASSERT(info()->IsStub()); 334 DCHECK(info()->IsStub());
335 RestoreCallerDoubles(); 335 RestoreCallerDoubles();
336 } 336 }
337 __ call(entry, RelocInfo::RUNTIME_ENTRY); 337 __ call(entry, RelocInfo::RUNTIME_ENTRY);
338 } 338 }
339 } 339 }
340 return !is_aborted(); 340 return !is_aborted();
341 } 341 }
342 342
343 343
344 bool LCodeGen::GenerateDeferredCode() { 344 bool LCodeGen::GenerateDeferredCode() {
345 ASSERT(is_generating()); 345 DCHECK(is_generating());
346 if (deferred_.length() > 0) { 346 if (deferred_.length() > 0) {
347 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 347 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
348 LDeferredCode* code = deferred_[i]; 348 LDeferredCode* code = deferred_[i];
349 349
350 HValue* value = 350 HValue* value =
351 instructions_->at(code->instruction_index())->hydrogen_value(); 351 instructions_->at(code->instruction_index())->hydrogen_value();
352 RecordAndWritePosition( 352 RecordAndWritePosition(
353 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 353 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
354 354
355 Comment(";;; <@%d,#%d> " 355 Comment(";;; <@%d,#%d> "
356 "-------------------- Deferred %s --------------------", 356 "-------------------- Deferred %s --------------------",
357 code->instruction_index(), 357 code->instruction_index(),
358 code->instr()->hydrogen_value()->id(), 358 code->instr()->hydrogen_value()->id(),
359 code->instr()->Mnemonic()); 359 code->instr()->Mnemonic());
360 __ bind(code->entry()); 360 __ bind(code->entry());
361 if (NeedsDeferredFrame()) { 361 if (NeedsDeferredFrame()) {
362 Comment(";;; Build frame"); 362 Comment(";;; Build frame");
363 ASSERT(!frame_is_built_); 363 DCHECK(!frame_is_built_);
364 ASSERT(info()->IsStub()); 364 DCHECK(info()->IsStub());
365 frame_is_built_ = true; 365 frame_is_built_ = true;
366 // Build the frame in such a way that esi isn't trashed. 366 // Build the frame in such a way that esi isn't trashed.
367 __ pushq(rbp); // Caller's frame pointer. 367 __ pushq(rbp); // Caller's frame pointer.
368 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset)); 368 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
369 __ Push(Smi::FromInt(StackFrame::STUB)); 369 __ Push(Smi::FromInt(StackFrame::STUB));
370 __ leap(rbp, Operand(rsp, 2 * kPointerSize)); 370 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
371 Comment(";;; Deferred code"); 371 Comment(";;; Deferred code");
372 } 372 }
373 code->Generate(); 373 code->Generate();
374 if (NeedsDeferredFrame()) { 374 if (NeedsDeferredFrame()) {
375 __ bind(code->done()); 375 __ bind(code->done());
376 Comment(";;; Destroy frame"); 376 Comment(";;; Destroy frame");
377 ASSERT(frame_is_built_); 377 DCHECK(frame_is_built_);
378 frame_is_built_ = false; 378 frame_is_built_ = false;
379 __ movp(rsp, rbp); 379 __ movp(rsp, rbp);
380 __ popq(rbp); 380 __ popq(rbp);
381 } 381 }
382 __ jmp(code->exit()); 382 __ jmp(code->exit());
383 } 383 }
384 } 384 }
385 385
386 // Deferred code is the last part of the instruction sequence. Mark 386 // Deferred code is the last part of the instruction sequence. Mark
387 // the generated code as done unless we bailed out. 387 // the generated code as done unless we bailed out.
388 if (!is_aborted()) status_ = DONE; 388 if (!is_aborted()) status_ = DONE;
389 return !is_aborted(); 389 return !is_aborted();
390 } 390 }
391 391
392 392
393 bool LCodeGen::GenerateSafepointTable() { 393 bool LCodeGen::GenerateSafepointTable() {
394 ASSERT(is_done()); 394 DCHECK(is_done());
395 safepoints_.Emit(masm(), GetStackSlotCount()); 395 safepoints_.Emit(masm(), GetStackSlotCount());
396 return !is_aborted(); 396 return !is_aborted();
397 } 397 }
398 398
399 399
400 Register LCodeGen::ToRegister(int index) const { 400 Register LCodeGen::ToRegister(int index) const {
401 return Register::FromAllocationIndex(index); 401 return Register::FromAllocationIndex(index);
402 } 402 }
403 403
404 404
405 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 405 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
406 return XMMRegister::FromAllocationIndex(index); 406 return XMMRegister::FromAllocationIndex(index);
407 } 407 }
408 408
409 409
410 Register LCodeGen::ToRegister(LOperand* op) const { 410 Register LCodeGen::ToRegister(LOperand* op) const {
411 ASSERT(op->IsRegister()); 411 DCHECK(op->IsRegister());
412 return ToRegister(op->index()); 412 return ToRegister(op->index());
413 } 413 }
414 414
415 415
416 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 416 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
417 ASSERT(op->IsDoubleRegister()); 417 DCHECK(op->IsDoubleRegister());
418 return ToDoubleRegister(op->index()); 418 return ToDoubleRegister(op->index());
419 } 419 }
420 420
421 421
422 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { 422 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
423 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 423 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
424 } 424 }
425 425
426 426
427 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const { 427 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
(...skipping 10 matching lines...) Expand all
438 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 438 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
439 return ToRepresentation(op, Representation::Integer32()); 439 return ToRepresentation(op, Representation::Integer32());
440 } 440 }
441 441
442 442
443 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, 443 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
444 const Representation& r) const { 444 const Representation& r) const {
445 HConstant* constant = chunk_->LookupConstant(op); 445 HConstant* constant = chunk_->LookupConstant(op);
446 int32_t value = constant->Integer32Value(); 446 int32_t value = constant->Integer32Value();
447 if (r.IsInteger32()) return value; 447 if (r.IsInteger32()) return value;
448 ASSERT(SmiValuesAre31Bits() && r.IsSmiOrTagged()); 448 DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
449 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value))); 449 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
450 } 450 }
451 451
452 452
453 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 453 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
454 HConstant* constant = chunk_->LookupConstant(op); 454 HConstant* constant = chunk_->LookupConstant(op);
455 return Smi::FromInt(constant->Integer32Value()); 455 return Smi::FromInt(constant->Integer32Value());
456 } 456 }
457 457
458 458
459 double LCodeGen::ToDouble(LConstantOperand* op) const { 459 double LCodeGen::ToDouble(LConstantOperand* op) const {
460 HConstant* constant = chunk_->LookupConstant(op); 460 HConstant* constant = chunk_->LookupConstant(op);
461 ASSERT(constant->HasDoubleValue()); 461 DCHECK(constant->HasDoubleValue());
462 return constant->DoubleValue(); 462 return constant->DoubleValue();
463 } 463 }
464 464
465 465
466 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { 466 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
467 HConstant* constant = chunk_->LookupConstant(op); 467 HConstant* constant = chunk_->LookupConstant(op);
468 ASSERT(constant->HasExternalReferenceValue()); 468 DCHECK(constant->HasExternalReferenceValue());
469 return constant->ExternalReferenceValue(); 469 return constant->ExternalReferenceValue();
470 } 470 }
471 471
472 472
473 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 473 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
474 HConstant* constant = chunk_->LookupConstant(op); 474 HConstant* constant = chunk_->LookupConstant(op);
475 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 475 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
476 return constant->handle(isolate()); 476 return constant->handle(isolate());
477 } 477 }
478 478
479 479
480 static int ArgumentsOffsetWithoutFrame(int index) { 480 static int ArgumentsOffsetWithoutFrame(int index) {
481 ASSERT(index < 0); 481 DCHECK(index < 0);
482 return -(index + 1) * kPointerSize + kPCOnStackSize; 482 return -(index + 1) * kPointerSize + kPCOnStackSize;
483 } 483 }
484 484
485 485
486 Operand LCodeGen::ToOperand(LOperand* op) const { 486 Operand LCodeGen::ToOperand(LOperand* op) const {
487 // Does not handle registers. In X64 assembler, plain registers are not 487 // Does not handle registers. In X64 assembler, plain registers are not
488 // representable as an Operand. 488 // representable as an Operand.
489 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 489 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
490 if (NeedsEagerFrame()) { 490 if (NeedsEagerFrame()) {
491 return Operand(rbp, StackSlotOffset(op->index())); 491 return Operand(rbp, StackSlotOffset(op->index()));
492 } else { 492 } else {
493 // Retrieve parameter without eager stack-frame relative to the 493 // Retrieve parameter without eager stack-frame relative to the
494 // stack-pointer. 494 // stack-pointer.
495 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index())); 495 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
496 } 496 }
497 } 497 }
498 498
499 499
(...skipping 14 matching lines...) Expand all
514 : Translation::kSelfLiteralId; 514 : Translation::kSelfLiteralId;
515 515
516 switch (environment->frame_type()) { 516 switch (environment->frame_type()) {
517 case JS_FUNCTION: 517 case JS_FUNCTION:
518 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 518 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
519 break; 519 break;
520 case JS_CONSTRUCT: 520 case JS_CONSTRUCT:
521 translation->BeginConstructStubFrame(closure_id, translation_size); 521 translation->BeginConstructStubFrame(closure_id, translation_size);
522 break; 522 break;
523 case JS_GETTER: 523 case JS_GETTER:
524 ASSERT(translation_size == 1); 524 DCHECK(translation_size == 1);
525 ASSERT(height == 0); 525 DCHECK(height == 0);
526 translation->BeginGetterStubFrame(closure_id); 526 translation->BeginGetterStubFrame(closure_id);
527 break; 527 break;
528 case JS_SETTER: 528 case JS_SETTER:
529 ASSERT(translation_size == 2); 529 DCHECK(translation_size == 2);
530 ASSERT(height == 0); 530 DCHECK(height == 0);
531 translation->BeginSetterStubFrame(closure_id); 531 translation->BeginSetterStubFrame(closure_id);
532 break; 532 break;
533 case ARGUMENTS_ADAPTOR: 533 case ARGUMENTS_ADAPTOR:
534 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 534 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
535 break; 535 break;
536 case STUB: 536 case STUB:
537 translation->BeginCompiledStubFrame(); 537 translation->BeginCompiledStubFrame();
538 break; 538 break;
539 } 539 }
540 540
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
619 UNREACHABLE(); 619 UNREACHABLE();
620 } 620 }
621 } 621 }
622 622
623 623
624 void LCodeGen::CallCodeGeneric(Handle<Code> code, 624 void LCodeGen::CallCodeGeneric(Handle<Code> code,
625 RelocInfo::Mode mode, 625 RelocInfo::Mode mode,
626 LInstruction* instr, 626 LInstruction* instr,
627 SafepointMode safepoint_mode, 627 SafepointMode safepoint_mode,
628 int argc) { 628 int argc) {
629 ASSERT(instr != NULL); 629 DCHECK(instr != NULL);
630 __ call(code, mode); 630 __ call(code, mode);
631 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc); 631 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
632 632
633 // Signal that we don't inline smi code before these stubs in the 633 // Signal that we don't inline smi code before these stubs in the
634 // optimizing code generator. 634 // optimizing code generator.
635 if (code->kind() == Code::BINARY_OP_IC || 635 if (code->kind() == Code::BINARY_OP_IC ||
636 code->kind() == Code::COMPARE_IC) { 636 code->kind() == Code::COMPARE_IC) {
637 __ nop(); 637 __ nop();
638 } 638 }
639 } 639 }
640 640
641 641
642 void LCodeGen::CallCode(Handle<Code> code, 642 void LCodeGen::CallCode(Handle<Code> code,
643 RelocInfo::Mode mode, 643 RelocInfo::Mode mode,
644 LInstruction* instr) { 644 LInstruction* instr) {
645 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0); 645 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
646 } 646 }
647 647
648 648
649 void LCodeGen::CallRuntime(const Runtime::Function* function, 649 void LCodeGen::CallRuntime(const Runtime::Function* function,
650 int num_arguments, 650 int num_arguments,
651 LInstruction* instr, 651 LInstruction* instr,
652 SaveFPRegsMode save_doubles) { 652 SaveFPRegsMode save_doubles) {
653 ASSERT(instr != NULL); 653 DCHECK(instr != NULL);
654 ASSERT(instr->HasPointerMap()); 654 DCHECK(instr->HasPointerMap());
655 655
656 __ CallRuntime(function, num_arguments, save_doubles); 656 __ CallRuntime(function, num_arguments, save_doubles);
657 657
658 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); 658 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
659 } 659 }
660 660
661 661
662 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 662 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
663 if (context->IsRegister()) { 663 if (context->IsRegister()) {
664 if (!ToRegister(context).is(rsi)) { 664 if (!ToRegister(context).is(rsi)) {
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
723 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 723 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
724 deoptimizations_.Add(environment, environment->zone()); 724 deoptimizations_.Add(environment, environment->zone());
725 } 725 }
726 } 726 }
727 727
728 728
729 void LCodeGen::DeoptimizeIf(Condition cc, 729 void LCodeGen::DeoptimizeIf(Condition cc,
730 LEnvironment* environment, 730 LEnvironment* environment,
731 Deoptimizer::BailoutType bailout_type) { 731 Deoptimizer::BailoutType bailout_type) {
732 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 732 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
733 ASSERT(environment->HasBeenRegistered()); 733 DCHECK(environment->HasBeenRegistered());
734 int id = environment->deoptimization_index(); 734 int id = environment->deoptimization_index();
735 ASSERT(info()->IsOptimizing() || info()->IsStub()); 735 DCHECK(info()->IsOptimizing() || info()->IsStub());
736 Address entry = 736 Address entry =
737 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 737 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
738 if (entry == NULL) { 738 if (entry == NULL) {
739 Abort(kBailoutWasNotPrepared); 739 Abort(kBailoutWasNotPrepared);
740 return; 740 return;
741 } 741 }
742 742
743 if (DeoptEveryNTimes()) { 743 if (DeoptEveryNTimes()) {
744 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 744 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
745 Label no_deopt; 745 Label no_deopt;
746 __ pushfq(); 746 __ pushfq();
747 __ pushq(rax); 747 __ pushq(rax);
748 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister); 748 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
749 __ movl(rax, count_operand); 749 __ movl(rax, count_operand);
750 __ subl(rax, Immediate(1)); 750 __ subl(rax, Immediate(1));
751 __ j(not_zero, &no_deopt, Label::kNear); 751 __ j(not_zero, &no_deopt, Label::kNear);
752 if (FLAG_trap_on_deopt) __ int3(); 752 if (FLAG_trap_on_deopt) __ int3();
753 __ movl(rax, Immediate(FLAG_deopt_every_n_times)); 753 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
754 __ movl(count_operand, rax); 754 __ movl(count_operand, rax);
755 __ popq(rax); 755 __ popq(rax);
756 __ popfq(); 756 __ popfq();
757 ASSERT(frame_is_built_); 757 DCHECK(frame_is_built_);
758 __ call(entry, RelocInfo::RUNTIME_ENTRY); 758 __ call(entry, RelocInfo::RUNTIME_ENTRY);
759 __ bind(&no_deopt); 759 __ bind(&no_deopt);
760 __ movl(count_operand, rax); 760 __ movl(count_operand, rax);
761 __ popq(rax); 761 __ popq(rax);
762 __ popfq(); 762 __ popfq();
763 } 763 }
764 764
765 if (info()->ShouldTrapOnDeopt()) { 765 if (info()->ShouldTrapOnDeopt()) {
766 Label done; 766 Label done;
767 if (cc != no_condition) { 767 if (cc != no_condition) {
768 __ j(NegateCondition(cc), &done, Label::kNear); 768 __ j(NegateCondition(cc), &done, Label::kNear);
769 } 769 }
770 __ int3(); 770 __ int3();
771 __ bind(&done); 771 __ bind(&done);
772 } 772 }
773 773
774 ASSERT(info()->IsStub() || frame_is_built_); 774 DCHECK(info()->IsStub() || frame_is_built_);
775 // Go through jump table if we need to handle condition, build frame, or 775 // Go through jump table if we need to handle condition, build frame, or
776 // restore caller doubles. 776 // restore caller doubles.
777 if (cc == no_condition && frame_is_built_ && 777 if (cc == no_condition && frame_is_built_ &&
778 !info()->saves_caller_doubles()) { 778 !info()->saves_caller_doubles()) {
779 __ call(entry, RelocInfo::RUNTIME_ENTRY); 779 __ call(entry, RelocInfo::RUNTIME_ENTRY);
780 } else { 780 } else {
781 // We often have several deopts to the same entry, reuse the last 781 // We often have several deopts to the same entry, reuse the last
782 // jump entry if this is the case. 782 // jump entry if this is the case.
783 if (jump_table_.is_empty() || 783 if (jump_table_.is_empty() ||
784 jump_table_.last().address != entry || 784 jump_table_.last().address != entry ||
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
855 int result = deoptimization_literals_.length(); 855 int result = deoptimization_literals_.length();
856 for (int i = 0; i < deoptimization_literals_.length(); ++i) { 856 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
857 if (deoptimization_literals_[i].is_identical_to(literal)) return i; 857 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
858 } 858 }
859 deoptimization_literals_.Add(literal, zone()); 859 deoptimization_literals_.Add(literal, zone());
860 return result; 860 return result;
861 } 861 }
862 862
863 863
864 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { 864 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
865 ASSERT(deoptimization_literals_.length() == 0); 865 DCHECK(deoptimization_literals_.length() == 0);
866 866
867 const ZoneList<Handle<JSFunction> >* inlined_closures = 867 const ZoneList<Handle<JSFunction> >* inlined_closures =
868 chunk()->inlined_closures(); 868 chunk()->inlined_closures();
869 869
870 for (int i = 0, length = inlined_closures->length(); 870 for (int i = 0, length = inlined_closures->length();
871 i < length; 871 i < length;
872 i++) { 872 i++) {
873 DefineDeoptimizationLiteral(inlined_closures->at(i)); 873 DefineDeoptimizationLiteral(inlined_closures->at(i));
874 } 874 }
875 875
876 inlined_function_count_ = deoptimization_literals_.length(); 876 inlined_function_count_ = deoptimization_literals_.length();
877 } 877 }
878 878
879 879
880 void LCodeGen::RecordSafepointWithLazyDeopt( 880 void LCodeGen::RecordSafepointWithLazyDeopt(
881 LInstruction* instr, SafepointMode safepoint_mode, int argc) { 881 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
882 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 882 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
883 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 883 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
884 } else { 884 } else {
885 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS); 885 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
886 RecordSafepointWithRegisters( 886 RecordSafepointWithRegisters(
887 instr->pointer_map(), argc, Safepoint::kLazyDeopt); 887 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
888 } 888 }
889 } 889 }
890 890
891 891
892 void LCodeGen::RecordSafepoint( 892 void LCodeGen::RecordSafepoint(
893 LPointerMap* pointers, 893 LPointerMap* pointers,
894 Safepoint::Kind kind, 894 Safepoint::Kind kind,
895 int arguments, 895 int arguments,
896 Safepoint::DeoptMode deopt_mode) { 896 Safepoint::DeoptMode deopt_mode) {
897 ASSERT(kind == expected_safepoint_kind_); 897 DCHECK(kind == expected_safepoint_kind_);
898 898
899 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 899 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
900 900
901 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), 901 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
902 kind, arguments, deopt_mode); 902 kind, arguments, deopt_mode);
903 for (int i = 0; i < operands->length(); i++) { 903 for (int i = 0; i < operands->length(); i++) {
904 LOperand* pointer = operands->at(i); 904 LOperand* pointer = operands->at(i);
905 if (pointer->IsStackSlot()) { 905 if (pointer->IsStackSlot()) {
906 safepoint.DefinePointerSlot(pointer->index(), zone()); 906 safepoint.DefinePointerSlot(pointer->index(), zone());
907 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 907 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
976 DoGap(instr); 976 DoGap(instr);
977 } 977 }
978 978
979 979
980 void LCodeGen::DoParameter(LParameter* instr) { 980 void LCodeGen::DoParameter(LParameter* instr) {
981 // Nothing to do. 981 // Nothing to do.
982 } 982 }
983 983
984 984
985 void LCodeGen::DoCallStub(LCallStub* instr) { 985 void LCodeGen::DoCallStub(LCallStub* instr) {
986 ASSERT(ToRegister(instr->context()).is(rsi)); 986 DCHECK(ToRegister(instr->context()).is(rsi));
987 ASSERT(ToRegister(instr->result()).is(rax)); 987 DCHECK(ToRegister(instr->result()).is(rax));
988 switch (instr->hydrogen()->major_key()) { 988 switch (instr->hydrogen()->major_key()) {
989 case CodeStub::RegExpExec: { 989 case CodeStub::RegExpExec: {
990 RegExpExecStub stub(isolate()); 990 RegExpExecStub stub(isolate());
991 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 991 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
992 break; 992 break;
993 } 993 }
994 case CodeStub::SubString: { 994 case CodeStub::SubString: {
995 SubStringStub stub(isolate()); 995 SubStringStub stub(isolate());
996 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 996 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
997 break; 997 break;
(...skipping 10 matching lines...) Expand all
1008 1008
1009 1009
1010 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 1010 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1011 GenerateOsrPrologue(); 1011 GenerateOsrPrologue();
1012 } 1012 }
1013 1013
1014 1014
1015 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { 1015 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1016 Register dividend = ToRegister(instr->dividend()); 1016 Register dividend = ToRegister(instr->dividend());
1017 int32_t divisor = instr->divisor(); 1017 int32_t divisor = instr->divisor();
1018 ASSERT(dividend.is(ToRegister(instr->result()))); 1018 DCHECK(dividend.is(ToRegister(instr->result())));
1019 1019
1020 // Theoretically, a variation of the branch-free code for integer division by 1020 // Theoretically, a variation of the branch-free code for integer division by
1021 // a power of 2 (calculating the remainder via an additional multiplication 1021 // a power of 2 (calculating the remainder via an additional multiplication
1022 // (which gets simplified to an 'and') and subtraction) should be faster, and 1022 // (which gets simplified to an 'and') and subtraction) should be faster, and
1023 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 1023 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1024 // indicate that positive dividends are heavily favored, so the branching 1024 // indicate that positive dividends are heavily favored, so the branching
1025 // version performs better. 1025 // version performs better.
1026 HMod* hmod = instr->hydrogen(); 1026 HMod* hmod = instr->hydrogen();
1027 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1027 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1028 Label dividend_is_not_negative, done; 1028 Label dividend_is_not_negative, done;
(...skipping 12 matching lines...) Expand all
1041 1041
1042 __ bind(&dividend_is_not_negative); 1042 __ bind(&dividend_is_not_negative);
1043 __ andl(dividend, Immediate(mask)); 1043 __ andl(dividend, Immediate(mask));
1044 __ bind(&done); 1044 __ bind(&done);
1045 } 1045 }
1046 1046
1047 1047
1048 void LCodeGen::DoModByConstI(LModByConstI* instr) { 1048 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1049 Register dividend = ToRegister(instr->dividend()); 1049 Register dividend = ToRegister(instr->dividend());
1050 int32_t divisor = instr->divisor(); 1050 int32_t divisor = instr->divisor();
1051 ASSERT(ToRegister(instr->result()).is(rax)); 1051 DCHECK(ToRegister(instr->result()).is(rax));
1052 1052
1053 if (divisor == 0) { 1053 if (divisor == 0) {
1054 DeoptimizeIf(no_condition, instr->environment()); 1054 DeoptimizeIf(no_condition, instr->environment());
1055 return; 1055 return;
1056 } 1056 }
1057 1057
1058 __ TruncatingDiv(dividend, Abs(divisor)); 1058 __ TruncatingDiv(dividend, Abs(divisor));
1059 __ imull(rdx, rdx, Immediate(Abs(divisor))); 1059 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1060 __ movl(rax, dividend); 1060 __ movl(rax, dividend);
1061 __ subl(rax, rdx); 1061 __ subl(rax, rdx);
1062 1062
1063 // Check for negative zero. 1063 // Check for negative zero.
1064 HMod* hmod = instr->hydrogen(); 1064 HMod* hmod = instr->hydrogen();
1065 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1065 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1066 Label remainder_not_zero; 1066 Label remainder_not_zero;
1067 __ j(not_zero, &remainder_not_zero, Label::kNear); 1067 __ j(not_zero, &remainder_not_zero, Label::kNear);
1068 __ cmpl(dividend, Immediate(0)); 1068 __ cmpl(dividend, Immediate(0));
1069 DeoptimizeIf(less, instr->environment()); 1069 DeoptimizeIf(less, instr->environment());
1070 __ bind(&remainder_not_zero); 1070 __ bind(&remainder_not_zero);
1071 } 1071 }
1072 } 1072 }
1073 1073
1074 1074
1075 void LCodeGen::DoModI(LModI* instr) { 1075 void LCodeGen::DoModI(LModI* instr) {
1076 HMod* hmod = instr->hydrogen(); 1076 HMod* hmod = instr->hydrogen();
1077 1077
1078 Register left_reg = ToRegister(instr->left()); 1078 Register left_reg = ToRegister(instr->left());
1079 ASSERT(left_reg.is(rax)); 1079 DCHECK(left_reg.is(rax));
1080 Register right_reg = ToRegister(instr->right()); 1080 Register right_reg = ToRegister(instr->right());
1081 ASSERT(!right_reg.is(rax)); 1081 DCHECK(!right_reg.is(rax));
1082 ASSERT(!right_reg.is(rdx)); 1082 DCHECK(!right_reg.is(rdx));
1083 Register result_reg = ToRegister(instr->result()); 1083 Register result_reg = ToRegister(instr->result());
1084 ASSERT(result_reg.is(rdx)); 1084 DCHECK(result_reg.is(rdx));
1085 1085
1086 Label done; 1086 Label done;
1087 // Check for x % 0, idiv would signal a divide error. We have to 1087 // Check for x % 0, idiv would signal a divide error. We have to
1088 // deopt in this case because we can't return a NaN. 1088 // deopt in this case because we can't return a NaN.
1089 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 1089 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1090 __ testl(right_reg, right_reg); 1090 __ testl(right_reg, right_reg);
1091 DeoptimizeIf(zero, instr->environment()); 1091 DeoptimizeIf(zero, instr->environment());
1092 } 1092 }
1093 1093
1094 // Check for kMinInt % -1, idiv would signal a divide error. We 1094 // Check for kMinInt % -1, idiv would signal a divide error. We
(...skipping 29 matching lines...) Expand all
1124 __ bind(&positive_left); 1124 __ bind(&positive_left);
1125 } 1125 }
1126 __ idivl(right_reg); 1126 __ idivl(right_reg);
1127 __ bind(&done); 1127 __ bind(&done);
1128 } 1128 }
1129 1129
1130 1130
1131 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1131 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1132 Register dividend = ToRegister(instr->dividend()); 1132 Register dividend = ToRegister(instr->dividend());
1133 int32_t divisor = instr->divisor(); 1133 int32_t divisor = instr->divisor();
1134 ASSERT(dividend.is(ToRegister(instr->result()))); 1134 DCHECK(dividend.is(ToRegister(instr->result())));
1135 1135
1136 // If the divisor is positive, things are easy: There can be no deopts and we 1136 // If the divisor is positive, things are easy: There can be no deopts and we
1137 // can simply do an arithmetic right shift. 1137 // can simply do an arithmetic right shift.
1138 if (divisor == 1) return; 1138 if (divisor == 1) return;
1139 int32_t shift = WhichPowerOf2Abs(divisor); 1139 int32_t shift = WhichPowerOf2Abs(divisor);
1140 if (divisor > 1) { 1140 if (divisor > 1) {
1141 __ sarl(dividend, Immediate(shift)); 1141 __ sarl(dividend, Immediate(shift));
1142 return; 1142 return;
1143 } 1143 }
1144 1144
(...skipping 23 matching lines...) Expand all
1168 __ jmp(&done, Label::kNear); 1168 __ jmp(&done, Label::kNear);
1169 __ bind(&not_kmin_int); 1169 __ bind(&not_kmin_int);
1170 __ sarl(dividend, Immediate(shift)); 1170 __ sarl(dividend, Immediate(shift));
1171 __ bind(&done); 1171 __ bind(&done);
1172 } 1172 }
1173 1173
1174 1174
1175 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1175 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1176 Register dividend = ToRegister(instr->dividend()); 1176 Register dividend = ToRegister(instr->dividend());
1177 int32_t divisor = instr->divisor(); 1177 int32_t divisor = instr->divisor();
1178 ASSERT(ToRegister(instr->result()).is(rdx)); 1178 DCHECK(ToRegister(instr->result()).is(rdx));
1179 1179
1180 if (divisor == 0) { 1180 if (divisor == 0) {
1181 DeoptimizeIf(no_condition, instr->environment()); 1181 DeoptimizeIf(no_condition, instr->environment());
1182 return; 1182 return;
1183 } 1183 }
1184 1184
1185 // Check for (0 / -x) that will produce negative zero. 1185 // Check for (0 / -x) that will produce negative zero.
1186 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1186 HMathFloorOfDiv* hdiv = instr->hydrogen();
1187 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1187 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1188 __ testl(dividend, dividend); 1188 __ testl(dividend, dividend);
1189 DeoptimizeIf(zero, instr->environment()); 1189 DeoptimizeIf(zero, instr->environment());
1190 } 1190 }
1191 1191
1192 // Easy case: We need no dynamic check for the dividend and the flooring 1192 // Easy case: We need no dynamic check for the dividend and the flooring
1193 // division is the same as the truncating division. 1193 // division is the same as the truncating division.
1194 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1194 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1195 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1195 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1196 __ TruncatingDiv(dividend, Abs(divisor)); 1196 __ TruncatingDiv(dividend, Abs(divisor));
1197 if (divisor < 0) __ negl(rdx); 1197 if (divisor < 0) __ negl(rdx);
1198 return; 1198 return;
1199 } 1199 }
1200 1200
1201 // In the general case we may need to adjust before and after the truncating 1201 // In the general case we may need to adjust before and after the truncating
1202 // division to get a flooring division. 1202 // division to get a flooring division.
1203 Register temp = ToRegister(instr->temp3()); 1203 Register temp = ToRegister(instr->temp3());
1204 ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx)); 1204 DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1205 Label needs_adjustment, done; 1205 Label needs_adjustment, done;
1206 __ cmpl(dividend, Immediate(0)); 1206 __ cmpl(dividend, Immediate(0));
1207 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); 1207 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1208 __ TruncatingDiv(dividend, Abs(divisor)); 1208 __ TruncatingDiv(dividend, Abs(divisor));
1209 if (divisor < 0) __ negl(rdx); 1209 if (divisor < 0) __ negl(rdx);
1210 __ jmp(&done, Label::kNear); 1210 __ jmp(&done, Label::kNear);
1211 __ bind(&needs_adjustment); 1211 __ bind(&needs_adjustment);
1212 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1)); 1212 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1213 __ TruncatingDiv(temp, Abs(divisor)); 1213 __ TruncatingDiv(temp, Abs(divisor));
1214 if (divisor < 0) __ negl(rdx); 1214 if (divisor < 0) __ negl(rdx);
1215 __ decl(rdx); 1215 __ decl(rdx);
1216 __ bind(&done); 1216 __ bind(&done);
1217 } 1217 }
1218 1218
1219 1219
1220 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1220 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1221 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1221 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1222 HBinaryOperation* hdiv = instr->hydrogen(); 1222 HBinaryOperation* hdiv = instr->hydrogen();
1223 Register dividend = ToRegister(instr->dividend()); 1223 Register dividend = ToRegister(instr->dividend());
1224 Register divisor = ToRegister(instr->divisor()); 1224 Register divisor = ToRegister(instr->divisor());
1225 Register remainder = ToRegister(instr->temp()); 1225 Register remainder = ToRegister(instr->temp());
1226 Register result = ToRegister(instr->result()); 1226 Register result = ToRegister(instr->result());
1227 ASSERT(dividend.is(rax)); 1227 DCHECK(dividend.is(rax));
1228 ASSERT(remainder.is(rdx)); 1228 DCHECK(remainder.is(rdx));
1229 ASSERT(result.is(rax)); 1229 DCHECK(result.is(rax));
1230 ASSERT(!divisor.is(rax)); 1230 DCHECK(!divisor.is(rax));
1231 ASSERT(!divisor.is(rdx)); 1231 DCHECK(!divisor.is(rdx));
1232 1232
1233 // Check for x / 0. 1233 // Check for x / 0.
1234 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1234 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1235 __ testl(divisor, divisor); 1235 __ testl(divisor, divisor);
1236 DeoptimizeIf(zero, instr->environment()); 1236 DeoptimizeIf(zero, instr->environment());
1237 } 1237 }
1238 1238
1239 // Check for (0 / -x) that will produce negative zero. 1239 // Check for (0 / -x) that will produce negative zero.
1240 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1240 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1241 Label dividend_not_zero; 1241 Label dividend_not_zero;
(...skipping 25 matching lines...) Expand all
1267 __ sarl(remainder, Immediate(31)); 1267 __ sarl(remainder, Immediate(31));
1268 __ addl(result, remainder); 1268 __ addl(result, remainder);
1269 __ bind(&done); 1269 __ bind(&done);
1270 } 1270 }
1271 1271
1272 1272
1273 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1273 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1274 Register dividend = ToRegister(instr->dividend()); 1274 Register dividend = ToRegister(instr->dividend());
1275 int32_t divisor = instr->divisor(); 1275 int32_t divisor = instr->divisor();
1276 Register result = ToRegister(instr->result()); 1276 Register result = ToRegister(instr->result());
1277 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor))); 1277 DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
1278 ASSERT(!result.is(dividend)); 1278 DCHECK(!result.is(dividend));
1279 1279
1280 // Check for (0 / -x) that will produce negative zero. 1280 // Check for (0 / -x) that will produce negative zero.
1281 HDiv* hdiv = instr->hydrogen(); 1281 HDiv* hdiv = instr->hydrogen();
1282 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1282 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1283 __ testl(dividend, dividend); 1283 __ testl(dividend, dividend);
1284 DeoptimizeIf(zero, instr->environment()); 1284 DeoptimizeIf(zero, instr->environment());
1285 } 1285 }
1286 // Check for (kMinInt / -1). 1286 // Check for (kMinInt / -1).
1287 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1287 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1288 __ cmpl(dividend, Immediate(kMinInt)); 1288 __ cmpl(dividend, Immediate(kMinInt));
(...skipping 15 matching lines...) Expand all
1304 __ addl(result, dividend); 1304 __ addl(result, dividend);
1305 __ sarl(result, Immediate(shift)); 1305 __ sarl(result, Immediate(shift));
1306 } 1306 }
1307 if (divisor < 0) __ negl(result); 1307 if (divisor < 0) __ negl(result);
1308 } 1308 }
1309 1309
1310 1310
1311 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1311 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1312 Register dividend = ToRegister(instr->dividend()); 1312 Register dividend = ToRegister(instr->dividend());
1313 int32_t divisor = instr->divisor(); 1313 int32_t divisor = instr->divisor();
1314 ASSERT(ToRegister(instr->result()).is(rdx)); 1314 DCHECK(ToRegister(instr->result()).is(rdx));
1315 1315
1316 if (divisor == 0) { 1316 if (divisor == 0) {
1317 DeoptimizeIf(no_condition, instr->environment()); 1317 DeoptimizeIf(no_condition, instr->environment());
1318 return; 1318 return;
1319 } 1319 }
1320 1320
1321 // Check for (0 / -x) that will produce negative zero. 1321 // Check for (0 / -x) that will produce negative zero.
1322 HDiv* hdiv = instr->hydrogen(); 1322 HDiv* hdiv = instr->hydrogen();
1323 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1323 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1324 __ testl(dividend, dividend); 1324 __ testl(dividend, dividend);
(...skipping 11 matching lines...) Expand all
1336 } 1336 }
1337 } 1337 }
1338 1338
1339 1339
1340 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1340 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1341 void LCodeGen::DoDivI(LDivI* instr) { 1341 void LCodeGen::DoDivI(LDivI* instr) {
1342 HBinaryOperation* hdiv = instr->hydrogen(); 1342 HBinaryOperation* hdiv = instr->hydrogen();
1343 Register dividend = ToRegister(instr->dividend()); 1343 Register dividend = ToRegister(instr->dividend());
1344 Register divisor = ToRegister(instr->divisor()); 1344 Register divisor = ToRegister(instr->divisor());
1345 Register remainder = ToRegister(instr->temp()); 1345 Register remainder = ToRegister(instr->temp());
1346 ASSERT(dividend.is(rax)); 1346 DCHECK(dividend.is(rax));
1347 ASSERT(remainder.is(rdx)); 1347 DCHECK(remainder.is(rdx));
1348 ASSERT(ToRegister(instr->result()).is(rax)); 1348 DCHECK(ToRegister(instr->result()).is(rax));
1349 ASSERT(!divisor.is(rax)); 1349 DCHECK(!divisor.is(rax));
1350 ASSERT(!divisor.is(rdx)); 1350 DCHECK(!divisor.is(rdx));
1351 1351
1352 // Check for x / 0. 1352 // Check for x / 0.
1353 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1353 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1354 __ testl(divisor, divisor); 1354 __ testl(divisor, divisor);
1355 DeoptimizeIf(zero, instr->environment()); 1355 DeoptimizeIf(zero, instr->environment());
1356 } 1356 }
1357 1357
1358 // Check for (0 / -x) that will produce negative zero. 1358 // Check for (0 / -x) that will produce negative zero.
1359 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1359 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1360 Label dividend_not_zero; 1360 Label dividend_not_zero;
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
1467 Label done; 1467 Label done;
1468 if (instr->hydrogen_value()->representation().IsSmi()) { 1468 if (instr->hydrogen_value()->representation().IsSmi()) {
1469 __ testp(left, left); 1469 __ testp(left, left);
1470 } else { 1470 } else {
1471 __ testl(left, left); 1471 __ testl(left, left);
1472 } 1472 }
1473 __ j(not_zero, &done, Label::kNear); 1473 __ j(not_zero, &done, Label::kNear);
1474 if (right->IsConstantOperand()) { 1474 if (right->IsConstantOperand()) {
1475 // Constant can't be represented as 32-bit Smi due to immediate size 1475 // Constant can't be represented as 32-bit Smi due to immediate size
1476 // limit. 1476 // limit.
1477 ASSERT(SmiValuesAre32Bits() 1477 DCHECK(SmiValuesAre32Bits()
1478 ? !instr->hydrogen_value()->representation().IsSmi() 1478 ? !instr->hydrogen_value()->representation().IsSmi()
1479 : SmiValuesAre31Bits()); 1479 : SmiValuesAre31Bits());
1480 if (ToInteger32(LConstantOperand::cast(right)) < 0) { 1480 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1481 DeoptimizeIf(no_condition, instr->environment()); 1481 DeoptimizeIf(no_condition, instr->environment());
1482 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { 1482 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1483 __ cmpl(kScratchRegister, Immediate(0)); 1483 __ cmpl(kScratchRegister, Immediate(0));
1484 DeoptimizeIf(less, instr->environment()); 1484 DeoptimizeIf(less, instr->environment());
1485 } 1485 }
1486 } else if (right->IsStackSlot()) { 1486 } else if (right->IsStackSlot()) {
1487 if (instr->hydrogen_value()->representation().IsSmi()) { 1487 if (instr->hydrogen_value()->representation().IsSmi()) {
(...skipping 12 matching lines...) Expand all
1500 DeoptimizeIf(sign, instr->environment()); 1500 DeoptimizeIf(sign, instr->environment());
1501 } 1501 }
1502 __ bind(&done); 1502 __ bind(&done);
1503 } 1503 }
1504 } 1504 }
1505 1505
1506 1506
1507 void LCodeGen::DoBitI(LBitI* instr) { 1507 void LCodeGen::DoBitI(LBitI* instr) {
1508 LOperand* left = instr->left(); 1508 LOperand* left = instr->left();
1509 LOperand* right = instr->right(); 1509 LOperand* right = instr->right();
1510 ASSERT(left->Equals(instr->result())); 1510 DCHECK(left->Equals(instr->result()));
1511 ASSERT(left->IsRegister()); 1511 DCHECK(left->IsRegister());
1512 1512
1513 if (right->IsConstantOperand()) { 1513 if (right->IsConstantOperand()) {
1514 int32_t right_operand = 1514 int32_t right_operand =
1515 ToRepresentation(LConstantOperand::cast(right), 1515 ToRepresentation(LConstantOperand::cast(right),
1516 instr->hydrogen()->right()->representation()); 1516 instr->hydrogen()->right()->representation());
1517 switch (instr->op()) { 1517 switch (instr->op()) {
1518 case Token::BIT_AND: 1518 case Token::BIT_AND:
1519 __ andl(ToRegister(left), Immediate(right_operand)); 1519 __ andl(ToRegister(left), Immediate(right_operand));
1520 break; 1520 break;
1521 case Token::BIT_OR: 1521 case Token::BIT_OR:
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1553 __ xorl(ToRegister(left), ToOperand(right)); 1553 __ xorl(ToRegister(left), ToOperand(right));
1554 } else { 1554 } else {
1555 __ xorp(ToRegister(left), ToOperand(right)); 1555 __ xorp(ToRegister(left), ToOperand(right));
1556 } 1556 }
1557 break; 1557 break;
1558 default: 1558 default:
1559 UNREACHABLE(); 1559 UNREACHABLE();
1560 break; 1560 break;
1561 } 1561 }
1562 } else { 1562 } else {
1563 ASSERT(right->IsRegister()); 1563 DCHECK(right->IsRegister());
1564 switch (instr->op()) { 1564 switch (instr->op()) {
1565 case Token::BIT_AND: 1565 case Token::BIT_AND:
1566 if (instr->IsInteger32()) { 1566 if (instr->IsInteger32()) {
1567 __ andl(ToRegister(left), ToRegister(right)); 1567 __ andl(ToRegister(left), ToRegister(right));
1568 } else { 1568 } else {
1569 __ andp(ToRegister(left), ToRegister(right)); 1569 __ andp(ToRegister(left), ToRegister(right));
1570 } 1570 }
1571 break; 1571 break;
1572 case Token::BIT_OR: 1572 case Token::BIT_OR:
1573 if (instr->IsInteger32()) { 1573 if (instr->IsInteger32()) {
(...skipping 13 matching lines...) Expand all
1587 UNREACHABLE(); 1587 UNREACHABLE();
1588 break; 1588 break;
1589 } 1589 }
1590 } 1590 }
1591 } 1591 }
1592 1592
1593 1593
1594 void LCodeGen::DoShiftI(LShiftI* instr) { 1594 void LCodeGen::DoShiftI(LShiftI* instr) {
1595 LOperand* left = instr->left(); 1595 LOperand* left = instr->left();
1596 LOperand* right = instr->right(); 1596 LOperand* right = instr->right();
1597 ASSERT(left->Equals(instr->result())); 1597 DCHECK(left->Equals(instr->result()));
1598 ASSERT(left->IsRegister()); 1598 DCHECK(left->IsRegister());
1599 if (right->IsRegister()) { 1599 if (right->IsRegister()) {
1600 ASSERT(ToRegister(right).is(rcx)); 1600 DCHECK(ToRegister(right).is(rcx));
1601 1601
1602 switch (instr->op()) { 1602 switch (instr->op()) {
1603 case Token::ROR: 1603 case Token::ROR:
1604 __ rorl_cl(ToRegister(left)); 1604 __ rorl_cl(ToRegister(left));
1605 break; 1605 break;
1606 case Token::SAR: 1606 case Token::SAR:
1607 __ sarl_cl(ToRegister(left)); 1607 __ sarl_cl(ToRegister(left));
1608 break; 1608 break;
1609 case Token::SHR: 1609 case Token::SHR:
1610 __ shrl_cl(ToRegister(left)); 1610 __ shrl_cl(ToRegister(left));
(...skipping 30 matching lines...) Expand all
1641 __ testl(ToRegister(left), ToRegister(left)); 1641 __ testl(ToRegister(left), ToRegister(left));
1642 DeoptimizeIf(negative, instr->environment()); 1642 DeoptimizeIf(negative, instr->environment());
1643 } 1643 }
1644 break; 1644 break;
1645 case Token::SHL: 1645 case Token::SHL:
1646 if (shift_count != 0) { 1646 if (shift_count != 0) {
1647 if (instr->hydrogen_value()->representation().IsSmi()) { 1647 if (instr->hydrogen_value()->representation().IsSmi()) {
1648 if (SmiValuesAre32Bits()) { 1648 if (SmiValuesAre32Bits()) {
1649 __ shlp(ToRegister(left), Immediate(shift_count)); 1649 __ shlp(ToRegister(left), Immediate(shift_count));
1650 } else { 1650 } else {
1651 ASSERT(SmiValuesAre31Bits()); 1651 DCHECK(SmiValuesAre31Bits());
1652 if (instr->can_deopt()) { 1652 if (instr->can_deopt()) {
1653 if (shift_count != 1) { 1653 if (shift_count != 1) {
1654 __ shll(ToRegister(left), Immediate(shift_count - 1)); 1654 __ shll(ToRegister(left), Immediate(shift_count - 1));
1655 } 1655 }
1656 __ Integer32ToSmi(ToRegister(left), ToRegister(left)); 1656 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1657 DeoptimizeIf(overflow, instr->environment()); 1657 DeoptimizeIf(overflow, instr->environment());
1658 } else { 1658 } else {
1659 __ shll(ToRegister(left), Immediate(shift_count)); 1659 __ shll(ToRegister(left), Immediate(shift_count));
1660 } 1660 }
1661 } 1661 }
1662 } else { 1662 } else {
1663 __ shll(ToRegister(left), Immediate(shift_count)); 1663 __ shll(ToRegister(left), Immediate(shift_count));
1664 } 1664 }
1665 } 1665 }
1666 break; 1666 break;
1667 default: 1667 default:
1668 UNREACHABLE(); 1668 UNREACHABLE();
1669 break; 1669 break;
1670 } 1670 }
1671 } 1671 }
1672 } 1672 }
1673 1673
1674 1674
1675 void LCodeGen::DoSubI(LSubI* instr) { 1675 void LCodeGen::DoSubI(LSubI* instr) {
1676 LOperand* left = instr->left(); 1676 LOperand* left = instr->left();
1677 LOperand* right = instr->right(); 1677 LOperand* right = instr->right();
1678 ASSERT(left->Equals(instr->result())); 1678 DCHECK(left->Equals(instr->result()));
1679 1679
1680 if (right->IsConstantOperand()) { 1680 if (right->IsConstantOperand()) {
1681 int32_t right_operand = 1681 int32_t right_operand =
1682 ToRepresentation(LConstantOperand::cast(right), 1682 ToRepresentation(LConstantOperand::cast(right),
1683 instr->hydrogen()->right()->representation()); 1683 instr->hydrogen()->right()->representation());
1684 __ subl(ToRegister(left), Immediate(right_operand)); 1684 __ subl(ToRegister(left), Immediate(right_operand));
1685 } else if (right->IsRegister()) { 1685 } else if (right->IsRegister()) {
1686 if (instr->hydrogen_value()->representation().IsSmi()) { 1686 if (instr->hydrogen_value()->representation().IsSmi()) {
1687 __ subp(ToRegister(left), ToRegister(right)); 1687 __ subp(ToRegister(left), ToRegister(right));
1688 } else { 1688 } else {
(...skipping 22 matching lines...) Expand all
1711 } 1711 }
1712 } 1712 }
1713 1713
1714 1714
1715 void LCodeGen::DoConstantS(LConstantS* instr) { 1715 void LCodeGen::DoConstantS(LConstantS* instr) {
1716 __ Move(ToRegister(instr->result()), instr->value()); 1716 __ Move(ToRegister(instr->result()), instr->value());
1717 } 1717 }
1718 1718
1719 1719
1720 void LCodeGen::DoConstantD(LConstantD* instr) { 1720 void LCodeGen::DoConstantD(LConstantD* instr) {
1721 ASSERT(instr->result()->IsDoubleRegister()); 1721 DCHECK(instr->result()->IsDoubleRegister());
1722 XMMRegister res = ToDoubleRegister(instr->result()); 1722 XMMRegister res = ToDoubleRegister(instr->result());
1723 double v = instr->value(); 1723 double v = instr->value();
1724 uint64_t int_val = BitCast<uint64_t, double>(v); 1724 uint64_t int_val = BitCast<uint64_t, double>(v);
1725 // Use xor to produce +0.0 in a fast and compact way, but avoid to 1725 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1726 // do so if the constant is -0.0. 1726 // do so if the constant is -0.0.
1727 if (int_val == 0) { 1727 if (int_val == 0) {
1728 __ xorps(res, res); 1728 __ xorps(res, res);
1729 } else { 1729 } else {
1730 Register tmp = ToRegister(instr->temp()); 1730 Register tmp = ToRegister(instr->temp());
1731 __ Set(tmp, int_val); 1731 __ Set(tmp, int_val);
(...skipping 19 matching lines...) Expand all
1751 Register map = ToRegister(instr->value()); 1751 Register map = ToRegister(instr->value());
1752 __ EnumLength(result, map); 1752 __ EnumLength(result, map);
1753 } 1753 }
1754 1754
1755 1755
1756 void LCodeGen::DoDateField(LDateField* instr) { 1756 void LCodeGen::DoDateField(LDateField* instr) {
1757 Register object = ToRegister(instr->date()); 1757 Register object = ToRegister(instr->date());
1758 Register result = ToRegister(instr->result()); 1758 Register result = ToRegister(instr->result());
1759 Smi* index = instr->index(); 1759 Smi* index = instr->index();
1760 Label runtime, done, not_date_object; 1760 Label runtime, done, not_date_object;
1761 ASSERT(object.is(result)); 1761 DCHECK(object.is(result));
1762 ASSERT(object.is(rax)); 1762 DCHECK(object.is(rax));
1763 1763
1764 Condition cc = masm()->CheckSmi(object); 1764 Condition cc = masm()->CheckSmi(object);
1765 DeoptimizeIf(cc, instr->environment()); 1765 DeoptimizeIf(cc, instr->environment());
1766 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister); 1766 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1767 DeoptimizeIf(not_equal, instr->environment()); 1767 DeoptimizeIf(not_equal, instr->environment());
1768 1768
1769 if (index->value() == 0) { 1769 if (index->value() == 0) {
1770 __ movp(result, FieldOperand(object, JSDate::kValueOffset)); 1770 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1771 } else { 1771 } else {
1772 if (index->value() < JSDate::kFirstUncachedField) { 1772 if (index->value() < JSDate::kFirstUncachedField) {
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
1847 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1847 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1848 int encoding_mask = 1848 int encoding_mask =
1849 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 1849 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1850 ? one_byte_seq_type : two_byte_seq_type; 1850 ? one_byte_seq_type : two_byte_seq_type;
1851 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); 1851 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1852 } 1852 }
1853 1853
1854 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1854 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1855 if (instr->value()->IsConstantOperand()) { 1855 if (instr->value()->IsConstantOperand()) {
1856 int value = ToInteger32(LConstantOperand::cast(instr->value())); 1856 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1857 ASSERT_LE(0, value); 1857 DCHECK_LE(0, value);
1858 if (encoding == String::ONE_BYTE_ENCODING) { 1858 if (encoding == String::ONE_BYTE_ENCODING) {
1859 ASSERT_LE(value, String::kMaxOneByteCharCode); 1859 DCHECK_LE(value, String::kMaxOneByteCharCode);
1860 __ movb(operand, Immediate(value)); 1860 __ movb(operand, Immediate(value));
1861 } else { 1861 } else {
1862 ASSERT_LE(value, String::kMaxUtf16CodeUnit); 1862 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1863 __ movw(operand, Immediate(value)); 1863 __ movw(operand, Immediate(value));
1864 } 1864 }
1865 } else { 1865 } else {
1866 Register value = ToRegister(instr->value()); 1866 Register value = ToRegister(instr->value());
1867 if (encoding == String::ONE_BYTE_ENCODING) { 1867 if (encoding == String::ONE_BYTE_ENCODING) {
1868 __ movb(operand, value); 1868 __ movb(operand, value);
1869 } else { 1869 } else {
1870 __ movw(operand, value); 1870 __ movw(operand, value);
1871 } 1871 }
1872 } 1872 }
1873 } 1873 }
1874 1874
1875 1875
1876 void LCodeGen::DoAddI(LAddI* instr) { 1876 void LCodeGen::DoAddI(LAddI* instr) {
1877 LOperand* left = instr->left(); 1877 LOperand* left = instr->left();
1878 LOperand* right = instr->right(); 1878 LOperand* right = instr->right();
1879 1879
1880 Representation target_rep = instr->hydrogen()->representation(); 1880 Representation target_rep = instr->hydrogen()->representation();
1881 bool is_p = target_rep.IsSmi() || target_rep.IsExternal(); 1881 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1882 1882
1883 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { 1883 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1884 if (right->IsConstantOperand()) { 1884 if (right->IsConstantOperand()) {
1885 // No support for smi-immediates for 32-bit SMI. 1885 // No support for smi-immediates for 32-bit SMI.
1886 ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits()); 1886 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1887 int32_t offset = 1887 int32_t offset =
1888 ToRepresentation(LConstantOperand::cast(right), 1888 ToRepresentation(LConstantOperand::cast(right),
1889 instr->hydrogen()->right()->representation()); 1889 instr->hydrogen()->right()->representation());
1890 if (is_p) { 1890 if (is_p) {
1891 __ leap(ToRegister(instr->result()), 1891 __ leap(ToRegister(instr->result()),
1892 MemOperand(ToRegister(left), offset)); 1892 MemOperand(ToRegister(left), offset));
1893 } else { 1893 } else {
1894 __ leal(ToRegister(instr->result()), 1894 __ leal(ToRegister(instr->result()),
1895 MemOperand(ToRegister(left), offset)); 1895 MemOperand(ToRegister(left), offset));
1896 } 1896 }
1897 } else { 1897 } else {
1898 Operand address(ToRegister(left), ToRegister(right), times_1, 0); 1898 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1899 if (is_p) { 1899 if (is_p) {
1900 __ leap(ToRegister(instr->result()), address); 1900 __ leap(ToRegister(instr->result()), address);
1901 } else { 1901 } else {
1902 __ leal(ToRegister(instr->result()), address); 1902 __ leal(ToRegister(instr->result()), address);
1903 } 1903 }
1904 } 1904 }
1905 } else { 1905 } else {
1906 if (right->IsConstantOperand()) { 1906 if (right->IsConstantOperand()) {
1907 // No support for smi-immediates for 32-bit SMI. 1907 // No support for smi-immediates for 32-bit SMI.
1908 ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits()); 1908 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1909 int32_t right_operand = 1909 int32_t right_operand =
1910 ToRepresentation(LConstantOperand::cast(right), 1910 ToRepresentation(LConstantOperand::cast(right),
1911 instr->hydrogen()->right()->representation()); 1911 instr->hydrogen()->right()->representation());
1912 if (is_p) { 1912 if (is_p) {
1913 __ addp(ToRegister(left), Immediate(right_operand)); 1913 __ addp(ToRegister(left), Immediate(right_operand));
1914 } else { 1914 } else {
1915 __ addl(ToRegister(left), Immediate(right_operand)); 1915 __ addl(ToRegister(left), Immediate(right_operand));
1916 } 1916 }
1917 } else if (right->IsRegister()) { 1917 } else if (right->IsRegister()) {
1918 if (is_p) { 1918 if (is_p) {
(...skipping 11 matching lines...) Expand all
1930 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1930 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1931 DeoptimizeIf(overflow, instr->environment()); 1931 DeoptimizeIf(overflow, instr->environment());
1932 } 1932 }
1933 } 1933 }
1934 } 1934 }
1935 1935
1936 1936
1937 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1937 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1938 LOperand* left = instr->left(); 1938 LOperand* left = instr->left();
1939 LOperand* right = instr->right(); 1939 LOperand* right = instr->right();
1940 ASSERT(left->Equals(instr->result())); 1940 DCHECK(left->Equals(instr->result()));
1941 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1941 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1942 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1942 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1943 Label return_left; 1943 Label return_left;
1944 Condition condition = (operation == HMathMinMax::kMathMin) 1944 Condition condition = (operation == HMathMinMax::kMathMin)
1945 ? less_equal 1945 ? less_equal
1946 : greater_equal; 1946 : greater_equal;
1947 Register left_reg = ToRegister(left); 1947 Register left_reg = ToRegister(left);
1948 if (right->IsConstantOperand()) { 1948 if (right->IsConstantOperand()) {
1949 Immediate right_imm = Immediate( 1949 Immediate right_imm = Immediate(
1950 ToRepresentation(LConstantOperand::cast(right), 1950 ToRepresentation(LConstantOperand::cast(right),
1951 instr->hydrogen()->right()->representation())); 1951 instr->hydrogen()->right()->representation()));
1952 ASSERT(SmiValuesAre32Bits() 1952 DCHECK(SmiValuesAre32Bits()
1953 ? !instr->hydrogen()->representation().IsSmi() 1953 ? !instr->hydrogen()->representation().IsSmi()
1954 : SmiValuesAre31Bits()); 1954 : SmiValuesAre31Bits());
1955 __ cmpl(left_reg, right_imm); 1955 __ cmpl(left_reg, right_imm);
1956 __ j(condition, &return_left, Label::kNear); 1956 __ j(condition, &return_left, Label::kNear);
1957 __ movp(left_reg, right_imm); 1957 __ movp(left_reg, right_imm);
1958 } else if (right->IsRegister()) { 1958 } else if (right->IsRegister()) {
1959 Register right_reg = ToRegister(right); 1959 Register right_reg = ToRegister(right);
1960 if (instr->hydrogen_value()->representation().IsSmi()) { 1960 if (instr->hydrogen_value()->representation().IsSmi()) {
1961 __ cmpp(left_reg, right_reg); 1961 __ cmpp(left_reg, right_reg);
1962 } else { 1962 } else {
1963 __ cmpl(left_reg, right_reg); 1963 __ cmpl(left_reg, right_reg);
1964 } 1964 }
1965 __ j(condition, &return_left, Label::kNear); 1965 __ j(condition, &return_left, Label::kNear);
1966 __ movp(left_reg, right_reg); 1966 __ movp(left_reg, right_reg);
1967 } else { 1967 } else {
1968 Operand right_op = ToOperand(right); 1968 Operand right_op = ToOperand(right);
1969 if (instr->hydrogen_value()->representation().IsSmi()) { 1969 if (instr->hydrogen_value()->representation().IsSmi()) {
1970 __ cmpp(left_reg, right_op); 1970 __ cmpp(left_reg, right_op);
1971 } else { 1971 } else {
1972 __ cmpl(left_reg, right_op); 1972 __ cmpl(left_reg, right_op);
1973 } 1973 }
1974 __ j(condition, &return_left, Label::kNear); 1974 __ j(condition, &return_left, Label::kNear);
1975 __ movp(left_reg, right_op); 1975 __ movp(left_reg, right_op);
1976 } 1976 }
1977 __ bind(&return_left); 1977 __ bind(&return_left);
1978 } else { 1978 } else {
1979 ASSERT(instr->hydrogen()->representation().IsDouble()); 1979 DCHECK(instr->hydrogen()->representation().IsDouble());
1980 Label check_nan_left, check_zero, return_left, return_right; 1980 Label check_nan_left, check_zero, return_left, return_right;
1981 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; 1981 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1982 XMMRegister left_reg = ToDoubleRegister(left); 1982 XMMRegister left_reg = ToDoubleRegister(left);
1983 XMMRegister right_reg = ToDoubleRegister(right); 1983 XMMRegister right_reg = ToDoubleRegister(right);
1984 __ ucomisd(left_reg, right_reg); 1984 __ ucomisd(left_reg, right_reg);
1985 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. 1985 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1986 __ j(equal, &check_zero, Label::kNear); // left == right. 1986 __ j(equal, &check_zero, Label::kNear); // left == right.
1987 __ j(condition, &return_left, Label::kNear); 1987 __ j(condition, &return_left, Label::kNear);
1988 __ jmp(&return_right, Label::kNear); 1988 __ jmp(&return_right, Label::kNear);
1989 1989
(...skipping 20 matching lines...) Expand all
2010 __ bind(&return_left); 2010 __ bind(&return_left);
2011 } 2011 }
2012 } 2012 }
2013 2013
2014 2014
2015 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 2015 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2016 XMMRegister left = ToDoubleRegister(instr->left()); 2016 XMMRegister left = ToDoubleRegister(instr->left());
2017 XMMRegister right = ToDoubleRegister(instr->right()); 2017 XMMRegister right = ToDoubleRegister(instr->right());
2018 XMMRegister result = ToDoubleRegister(instr->result()); 2018 XMMRegister result = ToDoubleRegister(instr->result());
2019 // All operations except MOD are computed in-place. 2019 // All operations except MOD are computed in-place.
2020 ASSERT(instr->op() == Token::MOD || left.is(result)); 2020 DCHECK(instr->op() == Token::MOD || left.is(result));
2021 switch (instr->op()) { 2021 switch (instr->op()) {
2022 case Token::ADD: 2022 case Token::ADD:
2023 __ addsd(left, right); 2023 __ addsd(left, right);
2024 break; 2024 break;
2025 case Token::SUB: 2025 case Token::SUB:
2026 __ subsd(left, right); 2026 __ subsd(left, right);
2027 break; 2027 break;
2028 case Token::MUL: 2028 case Token::MUL:
2029 __ mulsd(left, right); 2029 __ mulsd(left, right);
2030 break; 2030 break;
2031 case Token::DIV: 2031 case Token::DIV:
2032 __ divsd(left, right); 2032 __ divsd(left, right);
2033 // Don't delete this mov. It may improve performance on some CPUs, 2033 // Don't delete this mov. It may improve performance on some CPUs,
2034 // when there is a mulsd depending on the result 2034 // when there is a mulsd depending on the result
2035 __ movaps(left, left); 2035 __ movaps(left, left);
2036 break; 2036 break;
2037 case Token::MOD: { 2037 case Token::MOD: {
2038 XMMRegister xmm_scratch = double_scratch0(); 2038 XMMRegister xmm_scratch = double_scratch0();
2039 __ PrepareCallCFunction(2); 2039 __ PrepareCallCFunction(2);
2040 __ movaps(xmm_scratch, left); 2040 __ movaps(xmm_scratch, left);
2041 ASSERT(right.is(xmm1)); 2041 DCHECK(right.is(xmm1));
2042 __ CallCFunction( 2042 __ CallCFunction(
2043 ExternalReference::mod_two_doubles_operation(isolate()), 2); 2043 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2044 __ movaps(result, xmm_scratch); 2044 __ movaps(result, xmm_scratch);
2045 break; 2045 break;
2046 } 2046 }
2047 default: 2047 default:
2048 UNREACHABLE(); 2048 UNREACHABLE();
2049 break; 2049 break;
2050 } 2050 }
2051 } 2051 }
2052 2052
2053 2053
2054 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2054 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2055 ASSERT(ToRegister(instr->context()).is(rsi)); 2055 DCHECK(ToRegister(instr->context()).is(rsi));
2056 ASSERT(ToRegister(instr->left()).is(rdx)); 2056 DCHECK(ToRegister(instr->left()).is(rdx));
2057 ASSERT(ToRegister(instr->right()).is(rax)); 2057 DCHECK(ToRegister(instr->right()).is(rax));
2058 ASSERT(ToRegister(instr->result()).is(rax)); 2058 DCHECK(ToRegister(instr->result()).is(rax));
2059 2059
2060 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); 2060 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
2061 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2061 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2062 } 2062 }
2063 2063
2064 2064
2065 template<class InstrType> 2065 template<class InstrType>
2066 void LCodeGen::EmitBranch(InstrType instr, Condition cc) { 2066 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2067 int left_block = instr->TrueDestination(chunk_); 2067 int left_block = instr->TrueDestination(chunk_);
2068 int right_block = instr->FalseDestination(chunk_); 2068 int right_block = instr->FalseDestination(chunk_);
(...skipping 23 matching lines...) Expand all
2092 2092
2093 2093
2094 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 2094 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2095 __ int3(); 2095 __ int3();
2096 } 2096 }
2097 2097
2098 2098
2099 void LCodeGen::DoBranch(LBranch* instr) { 2099 void LCodeGen::DoBranch(LBranch* instr) {
2100 Representation r = instr->hydrogen()->value()->representation(); 2100 Representation r = instr->hydrogen()->value()->representation();
2101 if (r.IsInteger32()) { 2101 if (r.IsInteger32()) {
2102 ASSERT(!info()->IsStub()); 2102 DCHECK(!info()->IsStub());
2103 Register reg = ToRegister(instr->value()); 2103 Register reg = ToRegister(instr->value());
2104 __ testl(reg, reg); 2104 __ testl(reg, reg);
2105 EmitBranch(instr, not_zero); 2105 EmitBranch(instr, not_zero);
2106 } else if (r.IsSmi()) { 2106 } else if (r.IsSmi()) {
2107 ASSERT(!info()->IsStub()); 2107 DCHECK(!info()->IsStub());
2108 Register reg = ToRegister(instr->value()); 2108 Register reg = ToRegister(instr->value());
2109 __ testp(reg, reg); 2109 __ testp(reg, reg);
2110 EmitBranch(instr, not_zero); 2110 EmitBranch(instr, not_zero);
2111 } else if (r.IsDouble()) { 2111 } else if (r.IsDouble()) {
2112 ASSERT(!info()->IsStub()); 2112 DCHECK(!info()->IsStub());
2113 XMMRegister reg = ToDoubleRegister(instr->value()); 2113 XMMRegister reg = ToDoubleRegister(instr->value());
2114 XMMRegister xmm_scratch = double_scratch0(); 2114 XMMRegister xmm_scratch = double_scratch0();
2115 __ xorps(xmm_scratch, xmm_scratch); 2115 __ xorps(xmm_scratch, xmm_scratch);
2116 __ ucomisd(reg, xmm_scratch); 2116 __ ucomisd(reg, xmm_scratch);
2117 EmitBranch(instr, not_equal); 2117 EmitBranch(instr, not_equal);
2118 } else { 2118 } else {
2119 ASSERT(r.IsTagged()); 2119 DCHECK(r.IsTagged());
2120 Register reg = ToRegister(instr->value()); 2120 Register reg = ToRegister(instr->value());
2121 HType type = instr->hydrogen()->value()->type(); 2121 HType type = instr->hydrogen()->value()->type();
2122 if (type.IsBoolean()) { 2122 if (type.IsBoolean()) {
2123 ASSERT(!info()->IsStub()); 2123 DCHECK(!info()->IsStub());
2124 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2124 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2125 EmitBranch(instr, equal); 2125 EmitBranch(instr, equal);
2126 } else if (type.IsSmi()) { 2126 } else if (type.IsSmi()) {
2127 ASSERT(!info()->IsStub()); 2127 DCHECK(!info()->IsStub());
2128 __ SmiCompare(reg, Smi::FromInt(0)); 2128 __ SmiCompare(reg, Smi::FromInt(0));
2129 EmitBranch(instr, not_equal); 2129 EmitBranch(instr, not_equal);
2130 } else if (type.IsJSArray()) { 2130 } else if (type.IsJSArray()) {
2131 ASSERT(!info()->IsStub()); 2131 DCHECK(!info()->IsStub());
2132 EmitBranch(instr, no_condition); 2132 EmitBranch(instr, no_condition);
2133 } else if (type.IsHeapNumber()) { 2133 } else if (type.IsHeapNumber()) {
2134 ASSERT(!info()->IsStub()); 2134 DCHECK(!info()->IsStub());
2135 XMMRegister xmm_scratch = double_scratch0(); 2135 XMMRegister xmm_scratch = double_scratch0();
2136 __ xorps(xmm_scratch, xmm_scratch); 2136 __ xorps(xmm_scratch, xmm_scratch);
2137 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); 2137 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2138 EmitBranch(instr, not_equal); 2138 EmitBranch(instr, not_equal);
2139 } else if (type.IsString()) { 2139 } else if (type.IsString()) {
2140 ASSERT(!info()->IsStub()); 2140 DCHECK(!info()->IsStub());
2141 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); 2141 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2142 EmitBranch(instr, not_equal); 2142 EmitBranch(instr, not_equal);
2143 } else { 2143 } else {
2144 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 2144 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2145 // Avoid deopts in the case where we've never executed this path before. 2145 // Avoid deopts in the case where we've never executed this path before.
2146 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); 2146 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2147 2147
2148 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 2148 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2149 // undefined -> false. 2149 // undefined -> false.
2150 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); 2150 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
2373 __ addp(rsp, Immediate(kDoubleSize)); 2373 __ addp(rsp, Immediate(kDoubleSize));
2374 2374
2375 int offset = sizeof(kHoleNanUpper32); 2375 int offset = sizeof(kHoleNanUpper32);
2376 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32)); 2376 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2377 EmitBranch(instr, equal); 2377 EmitBranch(instr, equal);
2378 } 2378 }
2379 2379
2380 2380
2381 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { 2381 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2382 Representation rep = instr->hydrogen()->value()->representation(); 2382 Representation rep = instr->hydrogen()->value()->representation();
2383 ASSERT(!rep.IsInteger32()); 2383 DCHECK(!rep.IsInteger32());
2384 2384
2385 if (rep.IsDouble()) { 2385 if (rep.IsDouble()) {
2386 XMMRegister value = ToDoubleRegister(instr->value()); 2386 XMMRegister value = ToDoubleRegister(instr->value());
2387 XMMRegister xmm_scratch = double_scratch0(); 2387 XMMRegister xmm_scratch = double_scratch0();
2388 __ xorps(xmm_scratch, xmm_scratch); 2388 __ xorps(xmm_scratch, xmm_scratch);
2389 __ ucomisd(xmm_scratch, value); 2389 __ ucomisd(xmm_scratch, value);
2390 EmitFalseBranch(instr, not_equal); 2390 EmitFalseBranch(instr, not_equal);
2391 __ movmskpd(kScratchRegister, value); 2391 __ movmskpd(kScratchRegister, value);
2392 __ testl(kScratchRegister, Immediate(1)); 2392 __ testl(kScratchRegister, Immediate(1));
2393 EmitBranch(instr, not_zero); 2393 EmitBranch(instr, not_zero);
2394 } else { 2394 } else {
2395 Register value = ToRegister(instr->value()); 2395 Register value = ToRegister(instr->value());
2396 Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); 2396 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2397 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); 2397 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2398 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset), 2398 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2399 Immediate(0x1)); 2399 Immediate(0x1));
2400 EmitFalseBranch(instr, no_overflow); 2400 EmitFalseBranch(instr, no_overflow);
2401 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset), 2401 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2402 Immediate(0x00000000)); 2402 Immediate(0x00000000));
2403 EmitBranch(instr, equal); 2403 EmitBranch(instr, equal);
2404 } 2404 }
2405 } 2405 }
2406 2406
2407 2407
2408 Condition LCodeGen::EmitIsObject(Register input, 2408 Condition LCodeGen::EmitIsObject(Register input,
2409 Label* is_not_object, 2409 Label* is_not_object,
2410 Label* is_object) { 2410 Label* is_object) {
2411 ASSERT(!input.is(kScratchRegister)); 2411 DCHECK(!input.is(kScratchRegister));
2412 2412
2413 __ JumpIfSmi(input, is_not_object); 2413 __ JumpIfSmi(input, is_not_object);
2414 2414
2415 __ CompareRoot(input, Heap::kNullValueRootIndex); 2415 __ CompareRoot(input, Heap::kNullValueRootIndex);
2416 __ j(equal, is_object); 2416 __ j(equal, is_object);
2417 2417
2418 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); 2418 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2419 // Undetectable objects behave like undefined. 2419 // Undetectable objects behave like undefined.
2420 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), 2420 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2421 Immediate(1 << Map::kIsUndetectable)); 2421 Immediate(1 << Map::kIsUndetectable));
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
2490 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2490 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2491 } 2491 }
2492 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset)); 2492 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2493 __ testb(FieldOperand(temp, Map::kBitFieldOffset), 2493 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2494 Immediate(1 << Map::kIsUndetectable)); 2494 Immediate(1 << Map::kIsUndetectable));
2495 EmitBranch(instr, not_zero); 2495 EmitBranch(instr, not_zero);
2496 } 2496 }
2497 2497
2498 2498
2499 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2499 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2500 ASSERT(ToRegister(instr->context()).is(rsi)); 2500 DCHECK(ToRegister(instr->context()).is(rsi));
2501 Token::Value op = instr->op(); 2501 Token::Value op = instr->op();
2502 2502
2503 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2503 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2504 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2504 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2505 2505
2506 Condition condition = TokenToCondition(op, false); 2506 Condition condition = TokenToCondition(op, false);
2507 __ testp(rax, rax); 2507 __ testp(rax, rax);
2508 2508
2509 EmitBranch(instr, condition); 2509 EmitBranch(instr, condition);
2510 } 2510 }
2511 2511
2512 2512
2513 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2513 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2514 InstanceType from = instr->from(); 2514 InstanceType from = instr->from();
2515 InstanceType to = instr->to(); 2515 InstanceType to = instr->to();
2516 if (from == FIRST_TYPE) return to; 2516 if (from == FIRST_TYPE) return to;
2517 ASSERT(from == to || to == LAST_TYPE); 2517 DCHECK(from == to || to == LAST_TYPE);
2518 return from; 2518 return from;
2519 } 2519 }
2520 2520
2521 2521
2522 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2522 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2523 InstanceType from = instr->from(); 2523 InstanceType from = instr->from();
2524 InstanceType to = instr->to(); 2524 InstanceType to = instr->to();
2525 if (from == to) return equal; 2525 if (from == to) return equal;
2526 if (to == LAST_TYPE) return above_equal; 2526 if (to == LAST_TYPE) return above_equal;
2527 if (from == FIRST_TYPE) return below_equal; 2527 if (from == FIRST_TYPE) return below_equal;
(...skipping 14 matching lines...) Expand all
2542 } 2542 }
2543 2543
2544 2544
2545 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2545 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2546 Register input = ToRegister(instr->value()); 2546 Register input = ToRegister(instr->value());
2547 Register result = ToRegister(instr->result()); 2547 Register result = ToRegister(instr->result());
2548 2548
2549 __ AssertString(input); 2549 __ AssertString(input);
2550 2550
2551 __ movl(result, FieldOperand(input, String::kHashFieldOffset)); 2551 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2552 ASSERT(String::kHashShift >= kSmiTagSize); 2552 DCHECK(String::kHashShift >= kSmiTagSize);
2553 __ IndexFromHash(result, result); 2553 __ IndexFromHash(result, result);
2554 } 2554 }
2555 2555
2556 2556
2557 void LCodeGen::DoHasCachedArrayIndexAndBranch( 2557 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2558 LHasCachedArrayIndexAndBranch* instr) { 2558 LHasCachedArrayIndexAndBranch* instr) {
2559 Register input = ToRegister(instr->value()); 2559 Register input = ToRegister(instr->value());
2560 2560
2561 __ testl(FieldOperand(input, String::kHashFieldOffset), 2561 __ testl(FieldOperand(input, String::kHashFieldOffset),
2562 Immediate(String::kContainsCachedArrayIndexMask)); 2562 Immediate(String::kContainsCachedArrayIndexMask));
2563 EmitBranch(instr, equal); 2563 EmitBranch(instr, equal);
2564 } 2564 }
2565 2565
2566 2566
2567 // Branches to a label or falls through with the answer in the z flag. 2567 // Branches to a label or falls through with the answer in the z flag.
2568 // Trashes the temp register. 2568 // Trashes the temp register.
2569 void LCodeGen::EmitClassOfTest(Label* is_true, 2569 void LCodeGen::EmitClassOfTest(Label* is_true,
2570 Label* is_false, 2570 Label* is_false,
2571 Handle<String> class_name, 2571 Handle<String> class_name,
2572 Register input, 2572 Register input,
2573 Register temp, 2573 Register temp,
2574 Register temp2) { 2574 Register temp2) {
2575 ASSERT(!input.is(temp)); 2575 DCHECK(!input.is(temp));
2576 ASSERT(!input.is(temp2)); 2576 DCHECK(!input.is(temp2));
2577 ASSERT(!temp.is(temp2)); 2577 DCHECK(!temp.is(temp2));
2578 2578
2579 __ JumpIfSmi(input, is_false); 2579 __ JumpIfSmi(input, is_false);
2580 2580
2581 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { 2581 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2582 // Assuming the following assertions, we can use the same compares to test 2582 // Assuming the following assertions, we can use the same compares to test
2583 // for both being a function type and being in the object type range. 2583 // for both being a function type and being in the object type range.
2584 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 2584 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2585 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == 2585 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2586 FIRST_SPEC_OBJECT_TYPE + 1); 2586 FIRST_SPEC_OBJECT_TYPE + 1);
2587 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == 2587 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2619 // instance class name from there. 2619 // instance class name from there.
2620 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2620 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2621 __ movp(temp, FieldOperand(temp, 2621 __ movp(temp, FieldOperand(temp,
2622 SharedFunctionInfo::kInstanceClassNameOffset)); 2622 SharedFunctionInfo::kInstanceClassNameOffset));
2623 // The class name we are testing against is internalized since it's a literal. 2623 // The class name we are testing against is internalized since it's a literal.
2624 // The name in the constructor is internalized because of the way the context 2624 // The name in the constructor is internalized because of the way the context
2625 // is booted. This routine isn't expected to work for random API-created 2625 // is booted. This routine isn't expected to work for random API-created
2626 // classes and it doesn't have to because you can't access it with natives 2626 // classes and it doesn't have to because you can't access it with natives
2627 // syntax. Since both sides are internalized it is sufficient to use an 2627 // syntax. Since both sides are internalized it is sufficient to use an
2628 // identity comparison. 2628 // identity comparison.
2629 ASSERT(class_name->IsInternalizedString()); 2629 DCHECK(class_name->IsInternalizedString());
2630 __ Cmp(temp, class_name); 2630 __ Cmp(temp, class_name);
2631 // End with the answer in the z flag. 2631 // End with the answer in the z flag.
2632 } 2632 }
2633 2633
2634 2634
2635 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2635 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2636 Register input = ToRegister(instr->value()); 2636 Register input = ToRegister(instr->value());
2637 Register temp = ToRegister(instr->temp()); 2637 Register temp = ToRegister(instr->temp());
2638 Register temp2 = ToRegister(instr->temp2()); 2638 Register temp2 = ToRegister(instr->temp2());
2639 Handle<String> class_name = instr->hydrogen()->class_name(); 2639 Handle<String> class_name = instr->hydrogen()->class_name();
2640 2640
2641 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2641 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2642 class_name, input, temp, temp2); 2642 class_name, input, temp, temp2);
2643 2643
2644 EmitBranch(instr, equal); 2644 EmitBranch(instr, equal);
2645 } 2645 }
2646 2646
2647 2647
2648 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2648 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2649 Register reg = ToRegister(instr->value()); 2649 Register reg = ToRegister(instr->value());
2650 2650
2651 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); 2651 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2652 EmitBranch(instr, equal); 2652 EmitBranch(instr, equal);
2653 } 2653 }
2654 2654
2655 2655
2656 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2656 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2657 ASSERT(ToRegister(instr->context()).is(rsi)); 2657 DCHECK(ToRegister(instr->context()).is(rsi));
2658 InstanceofStub stub(isolate(), InstanceofStub::kNoFlags); 2658 InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
2659 __ Push(ToRegister(instr->left())); 2659 __ Push(ToRegister(instr->left()));
2660 __ Push(ToRegister(instr->right())); 2660 __ Push(ToRegister(instr->right()));
2661 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2661 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2662 Label true_value, done; 2662 Label true_value, done;
2663 __ testp(rax, rax); 2663 __ testp(rax, rax);
2664 __ j(zero, &true_value, Label::kNear); 2664 __ j(zero, &true_value, Label::kNear);
2665 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); 2665 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2666 __ jmp(&done, Label::kNear); 2666 __ jmp(&done, Label::kNear);
2667 __ bind(&true_value); 2667 __ bind(&true_value);
(...skipping 11 matching lines...) Expand all
2679 virtual void Generate() V8_OVERRIDE { 2679 virtual void Generate() V8_OVERRIDE {
2680 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); 2680 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2681 } 2681 }
2682 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 2682 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2683 Label* map_check() { return &map_check_; } 2683 Label* map_check() { return &map_check_; }
2684 private: 2684 private:
2685 LInstanceOfKnownGlobal* instr_; 2685 LInstanceOfKnownGlobal* instr_;
2686 Label map_check_; 2686 Label map_check_;
2687 }; 2687 };
2688 2688
2689 ASSERT(ToRegister(instr->context()).is(rsi)); 2689 DCHECK(ToRegister(instr->context()).is(rsi));
2690 DeferredInstanceOfKnownGlobal* deferred; 2690 DeferredInstanceOfKnownGlobal* deferred;
2691 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); 2691 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2692 2692
2693 Label done, false_result; 2693 Label done, false_result;
2694 Register object = ToRegister(instr->value()); 2694 Register object = ToRegister(instr->value());
2695 2695
2696 // A Smi is not an instance of anything. 2696 // A Smi is not an instance of anything.
2697 __ JumpIfSmi(object, &false_result, Label::kNear); 2697 __ JumpIfSmi(object, &false_result, Label::kNear);
2698 2698
2699 // This is the inlined call site instanceof cache. The two occurences of the 2699 // This is the inlined call site instanceof cache. The two occurences of the
2700 // hole value will be patched to the last map/result pair generated by the 2700 // hole value will be patched to the last map/result pair generated by the
2701 // instanceof stub. 2701 // instanceof stub.
2702 Label cache_miss; 2702 Label cache_miss;
2703 // Use a temp register to avoid memory operands with variable lengths. 2703 // Use a temp register to avoid memory operands with variable lengths.
2704 Register map = ToRegister(instr->temp()); 2704 Register map = ToRegister(instr->temp());
2705 __ movp(map, FieldOperand(object, HeapObject::kMapOffset)); 2705 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2706 __ bind(deferred->map_check()); // Label for calculating code patching. 2706 __ bind(deferred->map_check()); // Label for calculating code patching.
2707 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value()); 2707 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2708 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL); 2708 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2709 __ cmpp(map, Operand(kScratchRegister, 0)); 2709 __ cmpp(map, Operand(kScratchRegister, 0));
2710 __ j(not_equal, &cache_miss, Label::kNear); 2710 __ j(not_equal, &cache_miss, Label::kNear);
2711 // Patched to load either true or false. 2711 // Patched to load either true or false.
2712 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); 2712 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2713 #ifdef DEBUG 2713 #ifdef DEBUG
2714 // Check that the code size between patch label and patch sites is invariant. 2714 // Check that the code size between patch label and patch sites is invariant.
2715 Label end_of_patched_code; 2715 Label end_of_patched_code;
2716 __ bind(&end_of_patched_code); 2716 __ bind(&end_of_patched_code);
2717 ASSERT(true); 2717 DCHECK(true);
2718 #endif 2718 #endif
2719 __ jmp(&done, Label::kNear); 2719 __ jmp(&done, Label::kNear);
2720 2720
2721 // The inlined call site cache did not match. Check for null and string 2721 // The inlined call site cache did not match. Check for null and string
2722 // before calling the deferred code. 2722 // before calling the deferred code.
2723 __ bind(&cache_miss); // Null is not an instance of anything. 2723 __ bind(&cache_miss); // Null is not an instance of anything.
2724 __ CompareRoot(object, Heap::kNullValueRootIndex); 2724 __ CompareRoot(object, Heap::kNullValueRootIndex);
2725 __ j(equal, &false_result, Label::kNear); 2725 __ j(equal, &false_result, Label::kNear);
2726 2726
2727 // String values are not instances of anything. 2727 // String values are not instances of anything.
(...skipping 14 matching lines...) Expand all
2742 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>( 2742 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2743 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck); 2743 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2744 InstanceofStub stub(isolate(), flags); 2744 InstanceofStub stub(isolate(), flags);
2745 2745
2746 __ Push(ToRegister(instr->value())); 2746 __ Push(ToRegister(instr->value()));
2747 __ Push(instr->function()); 2747 __ Push(instr->function());
2748 2748
2749 static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16; 2749 static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16;
2750 int delta = 2750 int delta =
2751 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; 2751 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2752 ASSERT(delta >= 0); 2752 DCHECK(delta >= 0);
2753 __ PushImm32(delta); 2753 __ PushImm32(delta);
2754 2754
2755 // We are pushing three values on the stack but recording a 2755 // We are pushing three values on the stack but recording a
2756 // safepoint with two arguments because stub is going to 2756 // safepoint with two arguments because stub is going to
2757 // remove the third argument from the stack before jumping 2757 // remove the third argument from the stack before jumping
2758 // to instanceof builtin on the slow path. 2758 // to instanceof builtin on the slow path.
2759 CallCodeGeneric(stub.GetCode(), 2759 CallCodeGeneric(stub.GetCode(),
2760 RelocInfo::CODE_TARGET, 2760 RelocInfo::CODE_TARGET,
2761 instr, 2761 instr,
2762 RECORD_SAFEPOINT_WITH_REGISTERS, 2762 RECORD_SAFEPOINT_WITH_REGISTERS,
2763 2); 2763 2);
2764 ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check)); 2764 DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2765 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 2765 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2766 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2766 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2767 // Move result to a register that survives the end of the 2767 // Move result to a register that survives the end of the
2768 // PushSafepointRegisterScope. 2768 // PushSafepointRegisterScope.
2769 __ movp(kScratchRegister, rax); 2769 __ movp(kScratchRegister, rax);
2770 } 2770 }
2771 __ testp(kScratchRegister, kScratchRegister); 2771 __ testp(kScratchRegister, kScratchRegister);
2772 Label load_false; 2772 Label load_false;
2773 Label done; 2773 Label done;
2774 __ j(not_zero, &load_false, Label::kNear); 2774 __ j(not_zero, &load_false, Label::kNear);
2775 __ LoadRoot(rax, Heap::kTrueValueRootIndex); 2775 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2776 __ jmp(&done, Label::kNear); 2776 __ jmp(&done, Label::kNear);
2777 __ bind(&load_false); 2777 __ bind(&load_false);
2778 __ LoadRoot(rax, Heap::kFalseValueRootIndex); 2778 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2779 __ bind(&done); 2779 __ bind(&done);
2780 } 2780 }
2781 2781
2782 2782
2783 void LCodeGen::DoCmpT(LCmpT* instr) { 2783 void LCodeGen::DoCmpT(LCmpT* instr) {
2784 ASSERT(ToRegister(instr->context()).is(rsi)); 2784 DCHECK(ToRegister(instr->context()).is(rsi));
2785 Token::Value op = instr->op(); 2785 Token::Value op = instr->op();
2786 2786
2787 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2787 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2788 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2788 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2789 2789
2790 Condition condition = TokenToCondition(op, false); 2790 Condition condition = TokenToCondition(op, false);
2791 Label true_value, done; 2791 Label true_value, done;
2792 __ testp(rax, rax); 2792 __ testp(rax, rax);
2793 __ j(condition, &true_value, Label::kNear); 2793 __ j(condition, &true_value, Label::kNear);
2794 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); 2794 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
2841 Register result = ToRegister(instr->result()); 2841 Register result = ToRegister(instr->result());
2842 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle()); 2842 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2843 if (instr->hydrogen()->RequiresHoleCheck()) { 2843 if (instr->hydrogen()->RequiresHoleCheck()) {
2844 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 2844 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2845 DeoptimizeIf(equal, instr->environment()); 2845 DeoptimizeIf(equal, instr->environment());
2846 } 2846 }
2847 } 2847 }
2848 2848
2849 2849
2850 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 2850 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2851 ASSERT(ToRegister(instr->context()).is(rsi)); 2851 DCHECK(ToRegister(instr->context()).is(rsi));
2852 ASSERT(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); 2852 DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
2853 ASSERT(ToRegister(instr->result()).is(rax)); 2853 DCHECK(ToRegister(instr->result()).is(rax));
2854 2854
2855 __ Move(LoadIC::NameRegister(), instr->name()); 2855 __ Move(LoadIC::NameRegister(), instr->name());
2856 if (FLAG_vector_ics) { 2856 if (FLAG_vector_ics) {
2857 Register vector = ToRegister(instr->temp_vector()); 2857 Register vector = ToRegister(instr->temp_vector());
2858 ASSERT(vector.is(LoadIC::VectorRegister())); 2858 DCHECK(vector.is(LoadIC::VectorRegister()));
2859 __ Move(vector, instr->hydrogen()->feedback_vector()); 2859 __ Move(vector, instr->hydrogen()->feedback_vector());
2860 // No need to allocate this register. 2860 // No need to allocate this register.
2861 ASSERT(LoadIC::SlotRegister().is(rax)); 2861 DCHECK(LoadIC::SlotRegister().is(rax));
2862 __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot())); 2862 __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
2863 } 2863 }
2864 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; 2864 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2865 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); 2865 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2866 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2866 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2867 } 2867 }
2868 2868
2869 2869
2870 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { 2870 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2871 Register value = ToRegister(instr->value()); 2871 Register value = ToRegister(instr->value());
2872 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle(); 2872 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2873 2873
2874 // If the cell we are storing to contains the hole it could have 2874 // If the cell we are storing to contains the hole it could have
2875 // been deleted from the property dictionary. In that case, we need 2875 // been deleted from the property dictionary. In that case, we need
2876 // to update the property details in the property dictionary to mark 2876 // to update the property details in the property dictionary to mark
2877 // it as no longer deleted. We deoptimize in that case. 2877 // it as no longer deleted. We deoptimize in that case.
2878 if (instr->hydrogen()->RequiresHoleCheck()) { 2878 if (instr->hydrogen()->RequiresHoleCheck()) {
2879 // We have a temp because CompareRoot might clobber kScratchRegister. 2879 // We have a temp because CompareRoot might clobber kScratchRegister.
2880 Register cell = ToRegister(instr->temp()); 2880 Register cell = ToRegister(instr->temp());
2881 ASSERT(!value.is(cell)); 2881 DCHECK(!value.is(cell));
2882 __ Move(cell, cell_handle, RelocInfo::CELL); 2882 __ Move(cell, cell_handle, RelocInfo::CELL);
2883 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex); 2883 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2884 DeoptimizeIf(equal, instr->environment()); 2884 DeoptimizeIf(equal, instr->environment());
2885 // Store the value. 2885 // Store the value.
2886 __ movp(Operand(cell, 0), value); 2886 __ movp(Operand(cell, 0), value);
2887 } else { 2887 } else {
2888 // Store the value. 2888 // Store the value.
2889 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL); 2889 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2890 __ movp(Operand(kScratchRegister, 0), value); 2890 __ movp(Operand(kScratchRegister, 0), value);
2891 } 2891 }
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
2947 } 2947 }
2948 2948
2949 2949
2950 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2950 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2951 HObjectAccess access = instr->hydrogen()->access(); 2951 HObjectAccess access = instr->hydrogen()->access();
2952 int offset = access.offset(); 2952 int offset = access.offset();
2953 2953
2954 if (access.IsExternalMemory()) { 2954 if (access.IsExternalMemory()) {
2955 Register result = ToRegister(instr->result()); 2955 Register result = ToRegister(instr->result());
2956 if (instr->object()->IsConstantOperand()) { 2956 if (instr->object()->IsConstantOperand()) {
2957 ASSERT(result.is(rax)); 2957 DCHECK(result.is(rax));
2958 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object()))); 2958 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2959 } else { 2959 } else {
2960 Register object = ToRegister(instr->object()); 2960 Register object = ToRegister(instr->object());
2961 __ Load(result, MemOperand(object, offset), access.representation()); 2961 __ Load(result, MemOperand(object, offset), access.representation());
2962 } 2962 }
2963 return; 2963 return;
2964 } 2964 }
2965 2965
2966 Register object = ToRegister(instr->object()); 2966 Register object = ToRegister(instr->object());
2967 if (instr->hydrogen()->representation().IsDouble()) { 2967 if (instr->hydrogen()->representation().IsDouble()) {
(...skipping 12 matching lines...) Expand all
2980 if (representation.IsSmi() && SmiValuesAre32Bits() && 2980 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2981 instr->hydrogen()->representation().IsInteger32()) { 2981 instr->hydrogen()->representation().IsInteger32()) {
2982 if (FLAG_debug_code) { 2982 if (FLAG_debug_code) {
2983 Register scratch = kScratchRegister; 2983 Register scratch = kScratchRegister;
2984 __ Load(scratch, FieldOperand(object, offset), representation); 2984 __ Load(scratch, FieldOperand(object, offset), representation);
2985 __ AssertSmi(scratch); 2985 __ AssertSmi(scratch);
2986 } 2986 }
2987 2987
2988 // Read int value directly from upper half of the smi. 2988 // Read int value directly from upper half of the smi.
2989 STATIC_ASSERT(kSmiTag == 0); 2989 STATIC_ASSERT(kSmiTag == 0);
2990 ASSERT(kSmiTagSize + kSmiShiftSize == 32); 2990 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
2991 offset += kPointerSize / 2; 2991 offset += kPointerSize / 2;
2992 representation = Representation::Integer32(); 2992 representation = Representation::Integer32();
2993 } 2993 }
2994 __ Load(result, FieldOperand(object, offset), representation); 2994 __ Load(result, FieldOperand(object, offset), representation);
2995 } 2995 }
2996 2996
2997 2997
2998 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 2998 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2999 ASSERT(ToRegister(instr->context()).is(rsi)); 2999 DCHECK(ToRegister(instr->context()).is(rsi));
3000 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); 3000 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3001 ASSERT(ToRegister(instr->result()).is(rax)); 3001 DCHECK(ToRegister(instr->result()).is(rax));
3002 3002
3003 __ Move(LoadIC::NameRegister(), instr->name()); 3003 __ Move(LoadIC::NameRegister(), instr->name());
3004 if (FLAG_vector_ics) { 3004 if (FLAG_vector_ics) {
3005 Register vector = ToRegister(instr->temp_vector()); 3005 Register vector = ToRegister(instr->temp_vector());
3006 ASSERT(vector.is(LoadIC::VectorRegister())); 3006 DCHECK(vector.is(LoadIC::VectorRegister()));
3007 __ Move(vector, instr->hydrogen()->feedback_vector()); 3007 __ Move(vector, instr->hydrogen()->feedback_vector());
3008 // No need to allocate this register. 3008 // No need to allocate this register.
3009 ASSERT(LoadIC::SlotRegister().is(rax)); 3009 DCHECK(LoadIC::SlotRegister().is(rax));
3010 __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot())); 3010 __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
3011 } 3011 }
3012 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); 3012 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3013 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3013 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3014 } 3014 }
3015 3015
3016 3016
3017 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 3017 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3018 Register function = ToRegister(instr->function()); 3018 Register function = ToRegister(instr->function());
3019 Register result = ToRegister(instr->result()); 3019 Register result = ToRegister(instr->result());
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
3197 int offset = instr->base_offset(); 3197 int offset = instr->base_offset();
3198 3198
3199 if (kPointerSize == kInt32Size && !key->IsConstantOperand() && 3199 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3200 instr->hydrogen()->IsDehoisted()) { 3200 instr->hydrogen()->IsDehoisted()) {
3201 // Sign extend key because it could be a 32 bit negative value 3201 // Sign extend key because it could be a 32 bit negative value
3202 // and the dehoisted address computation happens in 64 bits 3202 // and the dehoisted address computation happens in 64 bits
3203 __ movsxlq(ToRegister(key), ToRegister(key)); 3203 __ movsxlq(ToRegister(key), ToRegister(key));
3204 } 3204 }
3205 if (representation.IsInteger32() && SmiValuesAre32Bits() && 3205 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3206 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { 3206 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3207 ASSERT(!requires_hole_check); 3207 DCHECK(!requires_hole_check);
3208 if (FLAG_debug_code) { 3208 if (FLAG_debug_code) {
3209 Register scratch = kScratchRegister; 3209 Register scratch = kScratchRegister;
3210 __ Load(scratch, 3210 __ Load(scratch,
3211 BuildFastArrayOperand(instr->elements(), 3211 BuildFastArrayOperand(instr->elements(),
3212 key, 3212 key,
3213 instr->hydrogen()->key()->representation(), 3213 instr->hydrogen()->key()->representation(),
3214 FAST_ELEMENTS, 3214 FAST_ELEMENTS,
3215 offset), 3215 offset),
3216 Representation::Smi()); 3216 Representation::Smi());
3217 __ AssertSmi(scratch); 3217 __ AssertSmi(scratch);
3218 } 3218 }
3219 // Read int value directly from upper half of the smi. 3219 // Read int value directly from upper half of the smi.
3220 STATIC_ASSERT(kSmiTag == 0); 3220 STATIC_ASSERT(kSmiTag == 0);
3221 ASSERT(kSmiTagSize + kSmiShiftSize == 32); 3221 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3222 offset += kPointerSize / 2; 3222 offset += kPointerSize / 2;
3223 } 3223 }
3224 3224
3225 __ Load(result, 3225 __ Load(result,
3226 BuildFastArrayOperand(instr->elements(), 3226 BuildFastArrayOperand(instr->elements(),
3227 key, 3227 key,
3228 instr->hydrogen()->key()->representation(), 3228 instr->hydrogen()->key()->representation(),
3229 FAST_ELEMENTS, 3229 FAST_ELEMENTS,
3230 offset), 3230 offset),
3231 representation); 3231 representation);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
3265 if (key->IsConstantOperand()) { 3265 if (key->IsConstantOperand()) {
3266 int32_t constant_value = ToInteger32(LConstantOperand::cast(key)); 3266 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3267 if (constant_value & 0xF0000000) { 3267 if (constant_value & 0xF0000000) {
3268 Abort(kArrayIndexConstantValueTooBig); 3268 Abort(kArrayIndexConstantValueTooBig);
3269 } 3269 }
3270 return Operand(elements_pointer_reg, 3270 return Operand(elements_pointer_reg,
3271 (constant_value << shift_size) + offset); 3271 (constant_value << shift_size) + offset);
3272 } else { 3272 } else {
3273 // Take the tag bit into account while computing the shift size. 3273 // Take the tag bit into account while computing the shift size.
3274 if (key_representation.IsSmi() && (shift_size >= 1)) { 3274 if (key_representation.IsSmi() && (shift_size >= 1)) {
3275 ASSERT(SmiValuesAre31Bits()); 3275 DCHECK(SmiValuesAre31Bits());
3276 shift_size -= kSmiTagSize; 3276 shift_size -= kSmiTagSize;
3277 } 3277 }
3278 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); 3278 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3279 return Operand(elements_pointer_reg, 3279 return Operand(elements_pointer_reg,
3280 ToRegister(key), 3280 ToRegister(key),
3281 scale_factor, 3281 scale_factor,
3282 offset); 3282 offset);
3283 } 3283 }
3284 } 3284 }
3285 3285
3286 3286
3287 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3287 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3288 ASSERT(ToRegister(instr->context()).is(rsi)); 3288 DCHECK(ToRegister(instr->context()).is(rsi));
3289 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); 3289 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3290 ASSERT(ToRegister(instr->key()).is(LoadIC::NameRegister())); 3290 DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
3291 3291
3292 if (FLAG_vector_ics) { 3292 if (FLAG_vector_ics) {
3293 Register vector = ToRegister(instr->temp_vector()); 3293 Register vector = ToRegister(instr->temp_vector());
3294 ASSERT(vector.is(LoadIC::VectorRegister())); 3294 DCHECK(vector.is(LoadIC::VectorRegister()));
3295 __ Move(vector, instr->hydrogen()->feedback_vector()); 3295 __ Move(vector, instr->hydrogen()->feedback_vector());
3296 // No need to allocate this register. 3296 // No need to allocate this register.
3297 ASSERT(LoadIC::SlotRegister().is(rax)); 3297 DCHECK(LoadIC::SlotRegister().is(rax));
3298 __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot())); 3298 __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
3299 } 3299 }
3300 3300
3301 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); 3301 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3302 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3302 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3303 } 3303 }
3304 3304
3305 3305
3306 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3306 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3307 Register result = ToRegister(instr->result()); 3307 Register result = ToRegister(instr->result());
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
3405 3405
3406 __ bind(&receiver_ok); 3406 __ bind(&receiver_ok);
3407 } 3407 }
3408 3408
3409 3409
3410 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3410 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3411 Register receiver = ToRegister(instr->receiver()); 3411 Register receiver = ToRegister(instr->receiver());
3412 Register function = ToRegister(instr->function()); 3412 Register function = ToRegister(instr->function());
3413 Register length = ToRegister(instr->length()); 3413 Register length = ToRegister(instr->length());
3414 Register elements = ToRegister(instr->elements()); 3414 Register elements = ToRegister(instr->elements());
3415 ASSERT(receiver.is(rax)); // Used for parameter count. 3415 DCHECK(receiver.is(rax)); // Used for parameter count.
3416 ASSERT(function.is(rdi)); // Required by InvokeFunction. 3416 DCHECK(function.is(rdi)); // Required by InvokeFunction.
3417 ASSERT(ToRegister(instr->result()).is(rax)); 3417 DCHECK(ToRegister(instr->result()).is(rax));
3418 3418
3419 // Copy the arguments to this function possibly from the 3419 // Copy the arguments to this function possibly from the
3420 // adaptor frame below it. 3420 // adaptor frame below it.
3421 const uint32_t kArgumentsLimit = 1 * KB; 3421 const uint32_t kArgumentsLimit = 1 * KB;
3422 __ cmpp(length, Immediate(kArgumentsLimit)); 3422 __ cmpp(length, Immediate(kArgumentsLimit));
3423 DeoptimizeIf(above, instr->environment()); 3423 DeoptimizeIf(above, instr->environment());
3424 3424
3425 __ Push(receiver); 3425 __ Push(receiver);
3426 __ movp(receiver, length); 3426 __ movp(receiver, length);
3427 3427
3428 // Loop through the arguments pushing them onto the execution 3428 // Loop through the arguments pushing them onto the execution
3429 // stack. 3429 // stack.
3430 Label invoke, loop; 3430 Label invoke, loop;
3431 // length is a small non-negative integer, due to the test above. 3431 // length is a small non-negative integer, due to the test above.
3432 __ testl(length, length); 3432 __ testl(length, length);
3433 __ j(zero, &invoke, Label::kNear); 3433 __ j(zero, &invoke, Label::kNear);
3434 __ bind(&loop); 3434 __ bind(&loop);
3435 StackArgumentsAccessor args(elements, length, 3435 StackArgumentsAccessor args(elements, length,
3436 ARGUMENTS_DONT_CONTAIN_RECEIVER); 3436 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3437 __ Push(args.GetArgumentOperand(0)); 3437 __ Push(args.GetArgumentOperand(0));
3438 __ decl(length); 3438 __ decl(length);
3439 __ j(not_zero, &loop); 3439 __ j(not_zero, &loop);
3440 3440
3441 // Invoke the function. 3441 // Invoke the function.
3442 __ bind(&invoke); 3442 __ bind(&invoke);
3443 ASSERT(instr->HasPointerMap()); 3443 DCHECK(instr->HasPointerMap());
3444 LPointerMap* pointers = instr->pointer_map(); 3444 LPointerMap* pointers = instr->pointer_map();
3445 SafepointGenerator safepoint_generator( 3445 SafepointGenerator safepoint_generator(
3446 this, pointers, Safepoint::kLazyDeopt); 3446 this, pointers, Safepoint::kLazyDeopt);
3447 ParameterCount actual(rax); 3447 ParameterCount actual(rax);
3448 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); 3448 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3449 } 3449 }
3450 3450
3451 3451
3452 void LCodeGen::DoPushArgument(LPushArgument* instr) { 3452 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3453 LOperand* argument = instr->value(); 3453 LOperand* argument = instr->value();
(...skipping 11 matching lines...) Expand all
3465 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); 3465 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3466 } 3466 }
3467 3467
3468 3468
3469 void LCodeGen::DoContext(LContext* instr) { 3469 void LCodeGen::DoContext(LContext* instr) {
3470 Register result = ToRegister(instr->result()); 3470 Register result = ToRegister(instr->result());
3471 if (info()->IsOptimizing()) { 3471 if (info()->IsOptimizing()) {
3472 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset)); 3472 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3473 } else { 3473 } else {
3474 // If there is no frame, the context must be in rsi. 3474 // If there is no frame, the context must be in rsi.
3475 ASSERT(result.is(rsi)); 3475 DCHECK(result.is(rsi));
3476 } 3476 }
3477 } 3477 }
3478 3478
3479 3479
3480 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3480 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3481 ASSERT(ToRegister(instr->context()).is(rsi)); 3481 DCHECK(ToRegister(instr->context()).is(rsi));
3482 __ Push(rsi); // The context is the first argument. 3482 __ Push(rsi); // The context is the first argument.
3483 __ Push(instr->hydrogen()->pairs()); 3483 __ Push(instr->hydrogen()->pairs());
3484 __ Push(Smi::FromInt(instr->hydrogen()->flags())); 3484 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3485 CallRuntime(Runtime::kDeclareGlobals, 3, instr); 3485 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3486 } 3486 }
3487 3487
3488 3488
3489 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3489 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3490 int formal_parameter_count, 3490 int formal_parameter_count,
3491 int arity, 3491 int arity,
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
3526 SafepointGenerator generator( 3526 SafepointGenerator generator(
3527 this, pointers, Safepoint::kLazyDeopt); 3527 this, pointers, Safepoint::kLazyDeopt);
3528 ParameterCount count(arity); 3528 ParameterCount count(arity);
3529 ParameterCount expected(formal_parameter_count); 3529 ParameterCount expected(formal_parameter_count);
3530 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); 3530 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3531 } 3531 }
3532 } 3532 }
3533 3533
3534 3534
3535 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 3535 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3536 ASSERT(ToRegister(instr->result()).is(rax)); 3536 DCHECK(ToRegister(instr->result()).is(rax));
3537 3537
3538 LPointerMap* pointers = instr->pointer_map(); 3538 LPointerMap* pointers = instr->pointer_map();
3539 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3539 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3540 3540
3541 if (instr->target()->IsConstantOperand()) { 3541 if (instr->target()->IsConstantOperand()) {
3542 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3542 LConstantOperand* target = LConstantOperand::cast(instr->target());
3543 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3543 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3544 generator.BeforeCall(__ CallSize(code)); 3544 generator.BeforeCall(__ CallSize(code));
3545 __ call(code, RelocInfo::CODE_TARGET); 3545 __ call(code, RelocInfo::CODE_TARGET);
3546 } else { 3546 } else {
3547 ASSERT(instr->target()->IsRegister()); 3547 DCHECK(instr->target()->IsRegister());
3548 Register target = ToRegister(instr->target()); 3548 Register target = ToRegister(instr->target());
3549 generator.BeforeCall(__ CallSize(target)); 3549 generator.BeforeCall(__ CallSize(target));
3550 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); 3550 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3551 __ call(target); 3551 __ call(target);
3552 } 3552 }
3553 generator.AfterCall(); 3553 generator.AfterCall();
3554 } 3554 }
3555 3555
3556 3556
3557 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { 3557 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3558 ASSERT(ToRegister(instr->function()).is(rdi)); 3558 DCHECK(ToRegister(instr->function()).is(rdi));
3559 ASSERT(ToRegister(instr->result()).is(rax)); 3559 DCHECK(ToRegister(instr->result()).is(rax));
3560 3560
3561 if (instr->hydrogen()->pass_argument_count()) { 3561 if (instr->hydrogen()->pass_argument_count()) {
3562 __ Set(rax, instr->arity()); 3562 __ Set(rax, instr->arity());
3563 } 3563 }
3564 3564
3565 // Change context. 3565 // Change context.
3566 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); 3566 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3567 3567
3568 LPointerMap* pointers = instr->pointer_map(); 3568 LPointerMap* pointers = instr->pointer_map();
3569 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3569 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
3660 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3660 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3661 : LDeferredCode(codegen), instr_(instr) { } 3661 : LDeferredCode(codegen), instr_(instr) { }
3662 virtual void Generate() V8_OVERRIDE { 3662 virtual void Generate() V8_OVERRIDE {
3663 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3663 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3664 } 3664 }
3665 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 3665 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3666 private: 3666 private:
3667 LMathAbs* instr_; 3667 LMathAbs* instr_;
3668 }; 3668 };
3669 3669
3670 ASSERT(instr->value()->Equals(instr->result())); 3670 DCHECK(instr->value()->Equals(instr->result()));
3671 Representation r = instr->hydrogen()->value()->representation(); 3671 Representation r = instr->hydrogen()->value()->representation();
3672 3672
3673 if (r.IsDouble()) { 3673 if (r.IsDouble()) {
3674 XMMRegister scratch = double_scratch0(); 3674 XMMRegister scratch = double_scratch0();
3675 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3675 XMMRegister input_reg = ToDoubleRegister(instr->value());
3676 __ xorps(scratch, scratch); 3676 __ xorps(scratch, scratch);
3677 __ subsd(scratch, input_reg); 3677 __ subsd(scratch, input_reg);
3678 __ andps(input_reg, scratch); 3678 __ andps(input_reg, scratch);
3679 } else if (r.IsInteger32()) { 3679 } else if (r.IsInteger32()) {
3680 EmitIntegerMathAbs(instr); 3680 EmitIntegerMathAbs(instr);
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after
3828 } else { 3828 } else {
3829 Operand input = ToOperand(instr->value()); 3829 Operand input = ToOperand(instr->value());
3830 __ sqrtsd(output, input); 3830 __ sqrtsd(output, input);
3831 } 3831 }
3832 } 3832 }
3833 3833
3834 3834
3835 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3835 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3836 XMMRegister xmm_scratch = double_scratch0(); 3836 XMMRegister xmm_scratch = double_scratch0();
3837 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3837 XMMRegister input_reg = ToDoubleRegister(instr->value());
3838 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3838 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3839 3839
3840 // Note that according to ECMA-262 15.8.2.13: 3840 // Note that according to ECMA-262 15.8.2.13:
3841 // Math.pow(-Infinity, 0.5) == Infinity 3841 // Math.pow(-Infinity, 0.5) == Infinity
3842 // Math.sqrt(-Infinity) == NaN 3842 // Math.sqrt(-Infinity) == NaN
3843 Label done, sqrt; 3843 Label done, sqrt;
3844 // Check base for -Infinity. According to IEEE-754, double-precision 3844 // Check base for -Infinity. According to IEEE-754, double-precision
3845 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared. 3845 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3846 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000)); 3846 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3847 __ movq(xmm_scratch, kScratchRegister); 3847 __ movq(xmm_scratch, kScratchRegister);
3848 __ ucomisd(xmm_scratch, input_reg); 3848 __ ucomisd(xmm_scratch, input_reg);
(...skipping 14 matching lines...) Expand all
3863 __ bind(&done); 3863 __ bind(&done);
3864 } 3864 }
3865 3865
3866 3866
3867 void LCodeGen::DoPower(LPower* instr) { 3867 void LCodeGen::DoPower(LPower* instr) {
3868 Representation exponent_type = instr->hydrogen()->right()->representation(); 3868 Representation exponent_type = instr->hydrogen()->right()->representation();
3869 // Having marked this as a call, we can use any registers. 3869 // Having marked this as a call, we can use any registers.
3870 // Just make sure that the input/output registers are the expected ones. 3870 // Just make sure that the input/output registers are the expected ones.
3871 3871
3872 Register exponent = rdx; 3872 Register exponent = rdx;
3873 ASSERT(!instr->right()->IsRegister() || 3873 DCHECK(!instr->right()->IsRegister() ||
3874 ToRegister(instr->right()).is(exponent)); 3874 ToRegister(instr->right()).is(exponent));
3875 ASSERT(!instr->right()->IsDoubleRegister() || 3875 DCHECK(!instr->right()->IsDoubleRegister() ||
3876 ToDoubleRegister(instr->right()).is(xmm1)); 3876 ToDoubleRegister(instr->right()).is(xmm1));
3877 ASSERT(ToDoubleRegister(instr->left()).is(xmm2)); 3877 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3878 ASSERT(ToDoubleRegister(instr->result()).is(xmm3)); 3878 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3879 3879
3880 if (exponent_type.IsSmi()) { 3880 if (exponent_type.IsSmi()) {
3881 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3881 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3882 __ CallStub(&stub); 3882 __ CallStub(&stub);
3883 } else if (exponent_type.IsTagged()) { 3883 } else if (exponent_type.IsTagged()) {
3884 Label no_deopt; 3884 Label no_deopt;
3885 __ JumpIfSmi(exponent, &no_deopt, Label::kNear); 3885 __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
3886 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx); 3886 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
3887 DeoptimizeIf(not_equal, instr->environment()); 3887 DeoptimizeIf(not_equal, instr->environment());
3888 __ bind(&no_deopt); 3888 __ bind(&no_deopt);
3889 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3889 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3890 __ CallStub(&stub); 3890 __ CallStub(&stub);
3891 } else if (exponent_type.IsInteger32()) { 3891 } else if (exponent_type.IsInteger32()) {
3892 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3892 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3893 __ CallStub(&stub); 3893 __ CallStub(&stub);
3894 } else { 3894 } else {
3895 ASSERT(exponent_type.IsDouble()); 3895 DCHECK(exponent_type.IsDouble());
3896 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3896 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3897 __ CallStub(&stub); 3897 __ CallStub(&stub);
3898 } 3898 }
3899 } 3899 }
3900 3900
3901 3901
3902 void LCodeGen::DoMathExp(LMathExp* instr) { 3902 void LCodeGen::DoMathExp(LMathExp* instr) {
3903 XMMRegister input = ToDoubleRegister(instr->value()); 3903 XMMRegister input = ToDoubleRegister(instr->value());
3904 XMMRegister result = ToDoubleRegister(instr->result()); 3904 XMMRegister result = ToDoubleRegister(instr->result());
3905 XMMRegister temp0 = double_scratch0(); 3905 XMMRegister temp0 = double_scratch0();
3906 Register temp1 = ToRegister(instr->temp1()); 3906 Register temp1 = ToRegister(instr->temp1());
3907 Register temp2 = ToRegister(instr->temp2()); 3907 Register temp2 = ToRegister(instr->temp2());
3908 3908
3909 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); 3909 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3910 } 3910 }
3911 3911
3912 3912
3913 void LCodeGen::DoMathLog(LMathLog* instr) { 3913 void LCodeGen::DoMathLog(LMathLog* instr) {
3914 ASSERT(instr->value()->Equals(instr->result())); 3914 DCHECK(instr->value()->Equals(instr->result()));
3915 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3915 XMMRegister input_reg = ToDoubleRegister(instr->value());
3916 XMMRegister xmm_scratch = double_scratch0(); 3916 XMMRegister xmm_scratch = double_scratch0();
3917 Label positive, done, zero; 3917 Label positive, done, zero;
3918 __ xorps(xmm_scratch, xmm_scratch); 3918 __ xorps(xmm_scratch, xmm_scratch);
3919 __ ucomisd(input_reg, xmm_scratch); 3919 __ ucomisd(input_reg, xmm_scratch);
3920 __ j(above, &positive, Label::kNear); 3920 __ j(above, &positive, Label::kNear);
3921 __ j(not_carry, &zero, Label::kNear); 3921 __ j(not_carry, &zero, Label::kNear);
3922 ExternalReference nan = 3922 ExternalReference nan =
3923 ExternalReference::address_of_canonical_non_hole_nan(); 3923 ExternalReference::address_of_canonical_non_hole_nan();
3924 Operand nan_operand = masm()->ExternalOperand(nan); 3924 Operand nan_operand = masm()->ExternalOperand(nan);
(...skipping 26 matching lines...) Expand all
3951 3951
3952 __ j(not_zero, &not_zero_input); 3952 __ j(not_zero, &not_zero_input);
3953 __ Set(result, 63); // 63^31 == 32 3953 __ Set(result, 63); // 63^31 == 32
3954 3954
3955 __ bind(&not_zero_input); 3955 __ bind(&not_zero_input);
3956 __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x. 3956 __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
3957 } 3957 }
3958 3958
3959 3959
3960 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 3960 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3961 ASSERT(ToRegister(instr->context()).is(rsi)); 3961 DCHECK(ToRegister(instr->context()).is(rsi));
3962 ASSERT(ToRegister(instr->function()).is(rdi)); 3962 DCHECK(ToRegister(instr->function()).is(rdi));
3963 ASSERT(instr->HasPointerMap()); 3963 DCHECK(instr->HasPointerMap());
3964 3964
3965 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); 3965 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3966 if (known_function.is_null()) { 3966 if (known_function.is_null()) {
3967 LPointerMap* pointers = instr->pointer_map(); 3967 LPointerMap* pointers = instr->pointer_map();
3968 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3968 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3969 ParameterCount count(instr->arity()); 3969 ParameterCount count(instr->arity());
3970 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator); 3970 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
3971 } else { 3971 } else {
3972 CallKnownFunction(known_function, 3972 CallKnownFunction(known_function,
3973 instr->hydrogen()->formal_parameter_count(), 3973 instr->hydrogen()->formal_parameter_count(),
3974 instr->arity(), 3974 instr->arity(),
3975 instr, 3975 instr,
3976 RDI_CONTAINS_TARGET); 3976 RDI_CONTAINS_TARGET);
3977 } 3977 }
3978 } 3978 }
3979 3979
3980 3980
3981 void LCodeGen::DoCallFunction(LCallFunction* instr) { 3981 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3982 ASSERT(ToRegister(instr->context()).is(rsi)); 3982 DCHECK(ToRegister(instr->context()).is(rsi));
3983 ASSERT(ToRegister(instr->function()).is(rdi)); 3983 DCHECK(ToRegister(instr->function()).is(rdi));
3984 ASSERT(ToRegister(instr->result()).is(rax)); 3984 DCHECK(ToRegister(instr->result()).is(rax));
3985 3985
3986 int arity = instr->arity(); 3986 int arity = instr->arity();
3987 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); 3987 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
3988 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3988 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3989 } 3989 }
3990 3990
3991 3991
3992 void LCodeGen::DoCallNew(LCallNew* instr) { 3992 void LCodeGen::DoCallNew(LCallNew* instr) {
3993 ASSERT(ToRegister(instr->context()).is(rsi)); 3993 DCHECK(ToRegister(instr->context()).is(rsi));
3994 ASSERT(ToRegister(instr->constructor()).is(rdi)); 3994 DCHECK(ToRegister(instr->constructor()).is(rdi));
3995 ASSERT(ToRegister(instr->result()).is(rax)); 3995 DCHECK(ToRegister(instr->result()).is(rax));
3996 3996
3997 __ Set(rax, instr->arity()); 3997 __ Set(rax, instr->arity());
3998 // No cell in ebx for construct type feedback in optimized code 3998 // No cell in ebx for construct type feedback in optimized code
3999 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); 3999 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4000 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); 4000 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4001 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4001 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4002 } 4002 }
4003 4003
4004 4004
4005 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 4005 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4006 ASSERT(ToRegister(instr->context()).is(rsi)); 4006 DCHECK(ToRegister(instr->context()).is(rsi));
4007 ASSERT(ToRegister(instr->constructor()).is(rdi)); 4007 DCHECK(ToRegister(instr->constructor()).is(rdi));
4008 ASSERT(ToRegister(instr->result()).is(rax)); 4008 DCHECK(ToRegister(instr->result()).is(rax));
4009 4009
4010 __ Set(rax, instr->arity()); 4010 __ Set(rax, instr->arity());
4011 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); 4011 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4012 ElementsKind kind = instr->hydrogen()->elements_kind(); 4012 ElementsKind kind = instr->hydrogen()->elements_kind();
4013 AllocationSiteOverrideMode override_mode = 4013 AllocationSiteOverrideMode override_mode =
4014 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 4014 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4015 ? DISABLE_ALLOCATION_SITES 4015 ? DISABLE_ALLOCATION_SITES
4016 : DONT_OVERRIDE; 4016 : DONT_OVERRIDE;
4017 4017
4018 if (instr->arity() == 0) { 4018 if (instr->arity() == 0) {
(...skipping 22 matching lines...) Expand all
4041 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4041 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4042 __ bind(&done); 4042 __ bind(&done);
4043 } else { 4043 } else {
4044 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); 4044 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4045 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4045 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4046 } 4046 }
4047 } 4047 }
4048 4048
4049 4049
4050 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 4050 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4051 ASSERT(ToRegister(instr->context()).is(rsi)); 4051 DCHECK(ToRegister(instr->context()).is(rsi));
4052 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); 4052 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4053 } 4053 }
4054 4054
4055 4055
4056 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 4056 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4057 Register function = ToRegister(instr->function()); 4057 Register function = ToRegister(instr->function());
4058 Register code_object = ToRegister(instr->code_object()); 4058 Register code_object = ToRegister(instr->code_object());
4059 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize)); 4059 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
4060 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); 4060 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4061 } 4061 }
(...skipping 13 matching lines...) Expand all
4075 4075
4076 4076
4077 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 4077 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4078 HStoreNamedField* hinstr = instr->hydrogen(); 4078 HStoreNamedField* hinstr = instr->hydrogen();
4079 Representation representation = instr->representation(); 4079 Representation representation = instr->representation();
4080 4080
4081 HObjectAccess access = hinstr->access(); 4081 HObjectAccess access = hinstr->access();
4082 int offset = access.offset(); 4082 int offset = access.offset();
4083 4083
4084 if (access.IsExternalMemory()) { 4084 if (access.IsExternalMemory()) {
4085 ASSERT(!hinstr->NeedsWriteBarrier()); 4085 DCHECK(!hinstr->NeedsWriteBarrier());
4086 Register value = ToRegister(instr->value()); 4086 Register value = ToRegister(instr->value());
4087 if (instr->object()->IsConstantOperand()) { 4087 if (instr->object()->IsConstantOperand()) {
4088 ASSERT(value.is(rax)); 4088 DCHECK(value.is(rax));
4089 LConstantOperand* object = LConstantOperand::cast(instr->object()); 4089 LConstantOperand* object = LConstantOperand::cast(instr->object());
4090 __ store_rax(ToExternalReference(object)); 4090 __ store_rax(ToExternalReference(object));
4091 } else { 4091 } else {
4092 Register object = ToRegister(instr->object()); 4092 Register object = ToRegister(instr->object());
4093 __ Store(MemOperand(object, offset), value, representation); 4093 __ Store(MemOperand(object, offset), value, representation);
4094 } 4094 }
4095 return; 4095 return;
4096 } 4096 }
4097 4097
4098 Register object = ToRegister(instr->object()); 4098 Register object = ToRegister(instr->object());
4099 __ AssertNotSmi(object); 4099 __ AssertNotSmi(object);
4100 4100
4101 ASSERT(!representation.IsSmi() || 4101 DCHECK(!representation.IsSmi() ||
4102 !instr->value()->IsConstantOperand() || 4102 !instr->value()->IsConstantOperand() ||
4103 IsInteger32Constant(LConstantOperand::cast(instr->value()))); 4103 IsInteger32Constant(LConstantOperand::cast(instr->value())));
4104 if (representation.IsDouble()) { 4104 if (representation.IsDouble()) {
4105 ASSERT(access.IsInobject()); 4105 DCHECK(access.IsInobject());
4106 ASSERT(!hinstr->has_transition()); 4106 DCHECK(!hinstr->has_transition());
4107 ASSERT(!hinstr->NeedsWriteBarrier()); 4107 DCHECK(!hinstr->NeedsWriteBarrier());
4108 XMMRegister value = ToDoubleRegister(instr->value()); 4108 XMMRegister value = ToDoubleRegister(instr->value());
4109 __ movsd(FieldOperand(object, offset), value); 4109 __ movsd(FieldOperand(object, offset), value);
4110 return; 4110 return;
4111 } 4111 }
4112 4112
4113 if (hinstr->has_transition()) { 4113 if (hinstr->has_transition()) {
4114 Handle<Map> transition = hinstr->transition_map(); 4114 Handle<Map> transition = hinstr->transition_map();
4115 AddDeprecationDependency(transition); 4115 AddDeprecationDependency(transition);
4116 if (!hinstr->NeedsWriteBarrierForMap()) { 4116 if (!hinstr->NeedsWriteBarrierForMap()) {
4117 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition); 4117 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
(...skipping 11 matching lines...) Expand all
4129 4129
4130 // Do the store. 4130 // Do the store.
4131 Register write_register = object; 4131 Register write_register = object;
4132 if (!access.IsInobject()) { 4132 if (!access.IsInobject()) {
4133 write_register = ToRegister(instr->temp()); 4133 write_register = ToRegister(instr->temp());
4134 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); 4134 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4135 } 4135 }
4136 4136
4137 if (representation.IsSmi() && SmiValuesAre32Bits() && 4137 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4138 hinstr->value()->representation().IsInteger32()) { 4138 hinstr->value()->representation().IsInteger32()) {
4139 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4139 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4140 if (FLAG_debug_code) { 4140 if (FLAG_debug_code) {
4141 Register scratch = kScratchRegister; 4141 Register scratch = kScratchRegister;
4142 __ Load(scratch, FieldOperand(write_register, offset), representation); 4142 __ Load(scratch, FieldOperand(write_register, offset), representation);
4143 __ AssertSmi(scratch); 4143 __ AssertSmi(scratch);
4144 } 4144 }
4145 // Store int value directly to upper half of the smi. 4145 // Store int value directly to upper half of the smi.
4146 STATIC_ASSERT(kSmiTag == 0); 4146 STATIC_ASSERT(kSmiTag == 0);
4147 ASSERT(kSmiTagSize + kSmiShiftSize == 32); 4147 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4148 offset += kPointerSize / 2; 4148 offset += kPointerSize / 2;
4149 representation = Representation::Integer32(); 4149 representation = Representation::Integer32();
4150 } 4150 }
4151 4151
4152 Operand operand = FieldOperand(write_register, offset); 4152 Operand operand = FieldOperand(write_register, offset);
4153 4153
4154 if (instr->value()->IsRegister()) { 4154 if (instr->value()->IsRegister()) {
4155 Register value = ToRegister(instr->value()); 4155 Register value = ToRegister(instr->value());
4156 __ Store(operand, value, representation); 4156 __ Store(operand, value, representation);
4157 } else { 4157 } else {
4158 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 4158 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4159 if (IsInteger32Constant(operand_value)) { 4159 if (IsInteger32Constant(operand_value)) {
4160 ASSERT(!hinstr->NeedsWriteBarrier()); 4160 DCHECK(!hinstr->NeedsWriteBarrier());
4161 int32_t value = ToInteger32(operand_value); 4161 int32_t value = ToInteger32(operand_value);
4162 if (representation.IsSmi()) { 4162 if (representation.IsSmi()) {
4163 __ Move(operand, Smi::FromInt(value)); 4163 __ Move(operand, Smi::FromInt(value));
4164 4164
4165 } else { 4165 } else {
4166 __ movl(operand, Immediate(value)); 4166 __ movl(operand, Immediate(value));
4167 } 4167 }
4168 4168
4169 } else { 4169 } else {
4170 Handle<Object> handle_value = ToHandle(operand_value); 4170 Handle<Object> handle_value = ToHandle(operand_value);
4171 ASSERT(!hinstr->NeedsWriteBarrier()); 4171 DCHECK(!hinstr->NeedsWriteBarrier());
4172 __ Move(operand, handle_value); 4172 __ Move(operand, handle_value);
4173 } 4173 }
4174 } 4174 }
4175 4175
4176 if (hinstr->NeedsWriteBarrier()) { 4176 if (hinstr->NeedsWriteBarrier()) {
4177 Register value = ToRegister(instr->value()); 4177 Register value = ToRegister(instr->value());
4178 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; 4178 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4179 // Update the write barrier for the object for in-object properties. 4179 // Update the write barrier for the object for in-object properties.
4180 __ RecordWriteField(write_register, 4180 __ RecordWriteField(write_register,
4181 offset, 4181 offset,
4182 value, 4182 value,
4183 temp, 4183 temp,
4184 kSaveFPRegs, 4184 kSaveFPRegs,
4185 EMIT_REMEMBERED_SET, 4185 EMIT_REMEMBERED_SET,
4186 hinstr->SmiCheckForWriteBarrier(), 4186 hinstr->SmiCheckForWriteBarrier(),
4187 hinstr->PointersToHereCheckForValue()); 4187 hinstr->PointersToHereCheckForValue());
4188 } 4188 }
4189 } 4189 }
4190 4190
4191 4191
4192 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4192 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4193 ASSERT(ToRegister(instr->context()).is(rsi)); 4193 DCHECK(ToRegister(instr->context()).is(rsi));
4194 ASSERT(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); 4194 DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
4195 ASSERT(ToRegister(instr->value()).is(StoreIC::ValueRegister())); 4195 DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
4196 4196
4197 __ Move(StoreIC::NameRegister(), instr->hydrogen()->name()); 4197 __ Move(StoreIC::NameRegister(), instr->hydrogen()->name());
4198 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); 4198 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4199 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4199 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4200 } 4200 }
4201 4201
4202 4202
4203 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 4203 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4204 Representation representation = instr->hydrogen()->length()->representation(); 4204 Representation representation = instr->hydrogen()->length()->representation();
4205 ASSERT(representation.Equals(instr->hydrogen()->index()->representation())); 4205 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4206 ASSERT(representation.IsSmiOrInteger32()); 4206 DCHECK(representation.IsSmiOrInteger32());
4207 4207
4208 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal; 4208 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
4209 if (instr->length()->IsConstantOperand()) { 4209 if (instr->length()->IsConstantOperand()) {
4210 int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); 4210 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4211 Register index = ToRegister(instr->index()); 4211 Register index = ToRegister(instr->index());
4212 if (representation.IsSmi()) { 4212 if (representation.IsSmi()) {
4213 __ Cmp(index, Smi::FromInt(length)); 4213 __ Cmp(index, Smi::FromInt(length));
4214 } else { 4214 } else {
4215 __ cmpl(index, Immediate(length)); 4215 __ cmpl(index, Immediate(length));
4216 } 4216 }
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
4372 int offset = instr->base_offset(); 4372 int offset = instr->base_offset();
4373 Representation representation = hinstr->value()->representation(); 4373 Representation representation = hinstr->value()->representation();
4374 4374
4375 if (kPointerSize == kInt32Size && !key->IsConstantOperand() && 4375 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4376 instr->hydrogen()->IsDehoisted()) { 4376 instr->hydrogen()->IsDehoisted()) {
4377 // Sign extend key because it could be a 32 bit negative value 4377 // Sign extend key because it could be a 32 bit negative value
4378 // and the dehoisted address computation happens in 64 bits 4378 // and the dehoisted address computation happens in 64 bits
4379 __ movsxlq(ToRegister(key), ToRegister(key)); 4379 __ movsxlq(ToRegister(key), ToRegister(key));
4380 } 4380 }
4381 if (representation.IsInteger32() && SmiValuesAre32Bits()) { 4381 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4382 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4382 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4383 ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS); 4383 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4384 if (FLAG_debug_code) { 4384 if (FLAG_debug_code) {
4385 Register scratch = kScratchRegister; 4385 Register scratch = kScratchRegister;
4386 __ Load(scratch, 4386 __ Load(scratch,
4387 BuildFastArrayOperand(instr->elements(), 4387 BuildFastArrayOperand(instr->elements(),
4388 key, 4388 key,
4389 instr->hydrogen()->key()->representation(), 4389 instr->hydrogen()->key()->representation(),
4390 FAST_ELEMENTS, 4390 FAST_ELEMENTS,
4391 offset), 4391 offset),
4392 Representation::Smi()); 4392 Representation::Smi());
4393 __ AssertSmi(scratch); 4393 __ AssertSmi(scratch);
4394 } 4394 }
4395 // Store int value directly to upper half of the smi. 4395 // Store int value directly to upper half of the smi.
4396 STATIC_ASSERT(kSmiTag == 0); 4396 STATIC_ASSERT(kSmiTag == 0);
4397 ASSERT(kSmiTagSize + kSmiShiftSize == 32); 4397 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4398 offset += kPointerSize / 2; 4398 offset += kPointerSize / 2;
4399 } 4399 }
4400 4400
4401 Operand operand = 4401 Operand operand =
4402 BuildFastArrayOperand(instr->elements(), 4402 BuildFastArrayOperand(instr->elements(),
4403 key, 4403 key,
4404 instr->hydrogen()->key()->representation(), 4404 instr->hydrogen()->key()->representation(),
4405 FAST_ELEMENTS, 4405 FAST_ELEMENTS,
4406 offset); 4406 offset);
4407 if (instr->value()->IsRegister()) { 4407 if (instr->value()->IsRegister()) {
4408 __ Store(operand, ToRegister(instr->value()), representation); 4408 __ Store(operand, ToRegister(instr->value()), representation);
4409 } else { 4409 } else {
4410 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 4410 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4411 if (IsInteger32Constant(operand_value)) { 4411 if (IsInteger32Constant(operand_value)) {
4412 int32_t value = ToInteger32(operand_value); 4412 int32_t value = ToInteger32(operand_value);
4413 if (representation.IsSmi()) { 4413 if (representation.IsSmi()) {
4414 __ Move(operand, Smi::FromInt(value)); 4414 __ Move(operand, Smi::FromInt(value));
4415 4415
4416 } else { 4416 } else {
4417 __ movl(operand, Immediate(value)); 4417 __ movl(operand, Immediate(value));
4418 } 4418 }
4419 } else { 4419 } else {
4420 Handle<Object> handle_value = ToHandle(operand_value); 4420 Handle<Object> handle_value = ToHandle(operand_value);
4421 __ Move(operand, handle_value); 4421 __ Move(operand, handle_value);
4422 } 4422 }
4423 } 4423 }
4424 4424
4425 if (hinstr->NeedsWriteBarrier()) { 4425 if (hinstr->NeedsWriteBarrier()) {
4426 Register elements = ToRegister(instr->elements()); 4426 Register elements = ToRegister(instr->elements());
4427 ASSERT(instr->value()->IsRegister()); 4427 DCHECK(instr->value()->IsRegister());
4428 Register value = ToRegister(instr->value()); 4428 Register value = ToRegister(instr->value());
4429 ASSERT(!key->IsConstantOperand()); 4429 DCHECK(!key->IsConstantOperand());
4430 SmiCheck check_needed = hinstr->value()->type().IsHeapObject() 4430 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4431 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4431 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4432 // Compute address of modified element and store it into key register. 4432 // Compute address of modified element and store it into key register.
4433 Register key_reg(ToRegister(key)); 4433 Register key_reg(ToRegister(key));
4434 __ leap(key_reg, operand); 4434 __ leap(key_reg, operand);
4435 __ RecordWrite(elements, 4435 __ RecordWrite(elements,
4436 key_reg, 4436 key_reg,
4437 value, 4437 value,
4438 kSaveFPRegs, 4438 kSaveFPRegs,
4439 EMIT_REMEMBERED_SET, 4439 EMIT_REMEMBERED_SET,
4440 check_needed, 4440 check_needed,
4441 hinstr->PointersToHereCheckForValue()); 4441 hinstr->PointersToHereCheckForValue());
4442 } 4442 }
4443 } 4443 }
4444 4444
4445 4445
4446 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4446 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4447 if (instr->is_typed_elements()) { 4447 if (instr->is_typed_elements()) {
4448 DoStoreKeyedExternalArray(instr); 4448 DoStoreKeyedExternalArray(instr);
4449 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4449 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4450 DoStoreKeyedFixedDoubleArray(instr); 4450 DoStoreKeyedFixedDoubleArray(instr);
4451 } else { 4451 } else {
4452 DoStoreKeyedFixedArray(instr); 4452 DoStoreKeyedFixedArray(instr);
4453 } 4453 }
4454 } 4454 }
4455 4455
4456 4456
4457 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4457 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4458 ASSERT(ToRegister(instr->context()).is(rsi)); 4458 DCHECK(ToRegister(instr->context()).is(rsi));
4459 ASSERT(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); 4459 DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
4460 ASSERT(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); 4460 DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
4461 ASSERT(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); 4461 DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
4462 4462
4463 Handle<Code> ic = instr->strict_mode() == STRICT 4463 Handle<Code> ic = instr->strict_mode() == STRICT
4464 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 4464 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4465 : isolate()->builtins()->KeyedStoreIC_Initialize(); 4465 : isolate()->builtins()->KeyedStoreIC_Initialize();
4466 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4466 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4467 } 4467 }
4468 4468
4469 4469
4470 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4470 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4471 Register object_reg = ToRegister(instr->object()); 4471 Register object_reg = ToRegister(instr->object());
4472 4472
4473 Handle<Map> from_map = instr->original_map(); 4473 Handle<Map> from_map = instr->original_map();
4474 Handle<Map> to_map = instr->transitioned_map(); 4474 Handle<Map> to_map = instr->transitioned_map();
4475 ElementsKind from_kind = instr->from_kind(); 4475 ElementsKind from_kind = instr->from_kind();
4476 ElementsKind to_kind = instr->to_kind(); 4476 ElementsKind to_kind = instr->to_kind();
4477 4477
4478 Label not_applicable; 4478 Label not_applicable;
4479 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); 4479 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4480 __ j(not_equal, &not_applicable); 4480 __ j(not_equal, &not_applicable);
4481 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4481 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4482 Register new_map_reg = ToRegister(instr->new_map_temp()); 4482 Register new_map_reg = ToRegister(instr->new_map_temp());
4483 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); 4483 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4484 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); 4484 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4485 // Write barrier. 4485 // Write barrier.
4486 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()), 4486 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
4487 kDontSaveFPRegs); 4487 kDontSaveFPRegs);
4488 } else { 4488 } else {
4489 ASSERT(object_reg.is(rax)); 4489 DCHECK(object_reg.is(rax));
4490 ASSERT(ToRegister(instr->context()).is(rsi)); 4490 DCHECK(ToRegister(instr->context()).is(rsi));
4491 PushSafepointRegistersScope scope(this); 4491 PushSafepointRegistersScope scope(this);
4492 __ Move(rbx, to_map); 4492 __ Move(rbx, to_map);
4493 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; 4493 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4494 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); 4494 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4495 __ CallStub(&stub); 4495 __ CallStub(&stub);
4496 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); 4496 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4497 } 4497 }
4498 __ bind(&not_applicable); 4498 __ bind(&not_applicable);
4499 } 4499 }
4500 4500
4501 4501
4502 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4502 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4503 Register object = ToRegister(instr->object()); 4503 Register object = ToRegister(instr->object());
4504 Register temp = ToRegister(instr->temp()); 4504 Register temp = ToRegister(instr->temp());
4505 Label no_memento_found; 4505 Label no_memento_found;
4506 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); 4506 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4507 DeoptimizeIf(equal, instr->environment()); 4507 DeoptimizeIf(equal, instr->environment());
4508 __ bind(&no_memento_found); 4508 __ bind(&no_memento_found);
4509 } 4509 }
4510 4510
4511 4511
4512 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4512 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4513 ASSERT(ToRegister(instr->context()).is(rsi)); 4513 DCHECK(ToRegister(instr->context()).is(rsi));
4514 ASSERT(ToRegister(instr->left()).is(rdx)); 4514 DCHECK(ToRegister(instr->left()).is(rdx));
4515 ASSERT(ToRegister(instr->right()).is(rax)); 4515 DCHECK(ToRegister(instr->right()).is(rax));
4516 StringAddStub stub(isolate(), 4516 StringAddStub stub(isolate(),
4517 instr->hydrogen()->flags(), 4517 instr->hydrogen()->flags(),
4518 instr->hydrogen()->pretenure_flag()); 4518 instr->hydrogen()->pretenure_flag());
4519 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4519 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4520 } 4520 }
4521 4521
4522 4522
4523 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4523 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4524 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { 4524 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4525 public: 4525 public:
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
4584 codegen()->DoDeferredStringCharFromCode(instr_); 4584 codegen()->DoDeferredStringCharFromCode(instr_);
4585 } 4585 }
4586 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4586 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4587 private: 4587 private:
4588 LStringCharFromCode* instr_; 4588 LStringCharFromCode* instr_;
4589 }; 4589 };
4590 4590
4591 DeferredStringCharFromCode* deferred = 4591 DeferredStringCharFromCode* deferred =
4592 new(zone()) DeferredStringCharFromCode(this, instr); 4592 new(zone()) DeferredStringCharFromCode(this, instr);
4593 4593
4594 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); 4594 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4595 Register char_code = ToRegister(instr->char_code()); 4595 Register char_code = ToRegister(instr->char_code());
4596 Register result = ToRegister(instr->result()); 4596 Register result = ToRegister(instr->result());
4597 ASSERT(!char_code.is(result)); 4597 DCHECK(!char_code.is(result));
4598 4598
4599 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode)); 4599 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4600 __ j(above, deferred->entry()); 4600 __ j(above, deferred->entry());
4601 __ movsxlq(char_code, char_code); 4601 __ movsxlq(char_code, char_code);
4602 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 4602 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4603 __ movp(result, FieldOperand(result, 4603 __ movp(result, FieldOperand(result,
4604 char_code, times_pointer_size, 4604 char_code, times_pointer_size,
4605 FixedArray::kHeaderSize)); 4605 FixedArray::kHeaderSize));
4606 __ CompareRoot(result, Heap::kUndefinedValueRootIndex); 4606 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4607 __ j(equal, deferred->entry()); 4607 __ j(equal, deferred->entry());
(...skipping 13 matching lines...) Expand all
4621 PushSafepointRegistersScope scope(this); 4621 PushSafepointRegistersScope scope(this);
4622 __ Integer32ToSmi(char_code, char_code); 4622 __ Integer32ToSmi(char_code, char_code);
4623 __ Push(char_code); 4623 __ Push(char_code);
4624 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); 4624 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4625 __ StoreToSafepointRegisterSlot(result, rax); 4625 __ StoreToSafepointRegisterSlot(result, rax);
4626 } 4626 }
4627 4627
4628 4628
4629 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4629 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4630 LOperand* input = instr->value(); 4630 LOperand* input = instr->value();
4631 ASSERT(input->IsRegister() || input->IsStackSlot()); 4631 DCHECK(input->IsRegister() || input->IsStackSlot());
4632 LOperand* output = instr->result(); 4632 LOperand* output = instr->result();
4633 ASSERT(output->IsDoubleRegister()); 4633 DCHECK(output->IsDoubleRegister());
4634 if (input->IsRegister()) { 4634 if (input->IsRegister()) {
4635 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); 4635 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4636 } else { 4636 } else {
4637 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); 4637 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4638 } 4638 }
4639 } 4639 }
4640 4640
4641 4641
4642 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4642 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4643 LOperand* input = instr->value(); 4643 LOperand* input = instr->value();
(...skipping 11 matching lines...) Expand all
4655 virtual void Generate() V8_OVERRIDE { 4655 virtual void Generate() V8_OVERRIDE {
4656 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), 4656 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4657 instr_->temp2(), SIGNED_INT32); 4657 instr_->temp2(), SIGNED_INT32);
4658 } 4658 }
4659 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4659 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4660 private: 4660 private:
4661 LNumberTagI* instr_; 4661 LNumberTagI* instr_;
4662 }; 4662 };
4663 4663
4664 LOperand* input = instr->value(); 4664 LOperand* input = instr->value();
4665 ASSERT(input->IsRegister() && input->Equals(instr->result())); 4665 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4666 Register reg = ToRegister(input); 4666 Register reg = ToRegister(input);
4667 4667
4668 if (SmiValuesAre32Bits()) { 4668 if (SmiValuesAre32Bits()) {
4669 __ Integer32ToSmi(reg, reg); 4669 __ Integer32ToSmi(reg, reg);
4670 } else { 4670 } else {
4671 ASSERT(SmiValuesAre31Bits()); 4671 DCHECK(SmiValuesAre31Bits());
4672 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); 4672 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4673 __ Integer32ToSmi(reg, reg); 4673 __ Integer32ToSmi(reg, reg);
4674 __ j(overflow, deferred->entry()); 4674 __ j(overflow, deferred->entry());
4675 __ bind(deferred->exit()); 4675 __ bind(deferred->exit());
4676 } 4676 }
4677 } 4677 }
4678 4678
4679 4679
4680 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4680 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4681 class DeferredNumberTagU V8_FINAL : public LDeferredCode { 4681 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4682 public: 4682 public:
4683 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4683 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4684 : LDeferredCode(codegen), instr_(instr) { } 4684 : LDeferredCode(codegen), instr_(instr) { }
4685 virtual void Generate() V8_OVERRIDE { 4685 virtual void Generate() V8_OVERRIDE {
4686 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), 4686 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4687 instr_->temp2(), UNSIGNED_INT32); 4687 instr_->temp2(), UNSIGNED_INT32);
4688 } 4688 }
4689 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4689 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4690 private: 4690 private:
4691 LNumberTagU* instr_; 4691 LNumberTagU* instr_;
4692 }; 4692 };
4693 4693
4694 LOperand* input = instr->value(); 4694 LOperand* input = instr->value();
4695 ASSERT(input->IsRegister() && input->Equals(instr->result())); 4695 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4696 Register reg = ToRegister(input); 4696 Register reg = ToRegister(input);
4697 4697
4698 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4698 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4699 __ cmpl(reg, Immediate(Smi::kMaxValue)); 4699 __ cmpl(reg, Immediate(Smi::kMaxValue));
4700 __ j(above, deferred->entry()); 4700 __ j(above, deferred->entry());
4701 __ Integer32ToSmi(reg, reg); 4701 __ Integer32ToSmi(reg, reg);
4702 __ bind(deferred->exit()); 4702 __ bind(deferred->exit());
4703 } 4703 }
4704 4704
4705 4705
4706 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, 4706 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4707 LOperand* value, 4707 LOperand* value,
4708 LOperand* temp1, 4708 LOperand* temp1,
4709 LOperand* temp2, 4709 LOperand* temp2,
4710 IntegerSignedness signedness) { 4710 IntegerSignedness signedness) {
4711 Label done, slow; 4711 Label done, slow;
4712 Register reg = ToRegister(value); 4712 Register reg = ToRegister(value);
4713 Register tmp = ToRegister(temp1); 4713 Register tmp = ToRegister(temp1);
4714 XMMRegister temp_xmm = ToDoubleRegister(temp2); 4714 XMMRegister temp_xmm = ToDoubleRegister(temp2);
4715 4715
4716 // Load value into temp_xmm which will be preserved across potential call to 4716 // Load value into temp_xmm which will be preserved across potential call to
4717 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable 4717 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4718 // XMM registers on x64). 4718 // XMM registers on x64).
4719 if (signedness == SIGNED_INT32) { 4719 if (signedness == SIGNED_INT32) {
4720 ASSERT(SmiValuesAre31Bits()); 4720 DCHECK(SmiValuesAre31Bits());
4721 // There was overflow, so bits 30 and 31 of the original integer 4721 // There was overflow, so bits 30 and 31 of the original integer
4722 // disagree. Try to allocate a heap number in new space and store 4722 // disagree. Try to allocate a heap number in new space and store
4723 // the value in there. If that fails, call the runtime system. 4723 // the value in there. If that fails, call the runtime system.
4724 __ SmiToInteger32(reg, reg); 4724 __ SmiToInteger32(reg, reg);
4725 __ xorl(reg, Immediate(0x80000000)); 4725 __ xorl(reg, Immediate(0x80000000));
4726 __ cvtlsi2sd(temp_xmm, reg); 4726 __ cvtlsi2sd(temp_xmm, reg);
4727 } else { 4727 } else {
4728 ASSERT(signedness == UNSIGNED_INT32); 4728 DCHECK(signedness == UNSIGNED_INT32);
4729 __ LoadUint32(temp_xmm, reg); 4729 __ LoadUint32(temp_xmm, reg);
4730 } 4730 }
4731 4731
4732 if (FLAG_inline_new) { 4732 if (FLAG_inline_new) {
4733 __ AllocateHeapNumber(reg, tmp, &slow); 4733 __ AllocateHeapNumber(reg, tmp, &slow);
4734 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar); 4734 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
4735 } 4735 }
4736 4736
4737 // Slow case: Call the runtime system to do the number allocation. 4737 // Slow case: Call the runtime system to do the number allocation.
4738 __ bind(&slow); 4738 __ bind(&slow);
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
4827 } 4827 }
4828 __ Integer32ToSmi(output, input); 4828 __ Integer32ToSmi(output, input);
4829 if (hchange->CheckFlag(HValue::kCanOverflow) && 4829 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4830 !hchange->value()->CheckFlag(HValue::kUint32)) { 4830 !hchange->value()->CheckFlag(HValue::kUint32)) {
4831 DeoptimizeIf(overflow, instr->environment()); 4831 DeoptimizeIf(overflow, instr->environment());
4832 } 4832 }
4833 } 4833 }
4834 4834
4835 4835
4836 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4836 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4837 ASSERT(instr->value()->Equals(instr->result())); 4837 DCHECK(instr->value()->Equals(instr->result()));
4838 Register input = ToRegister(instr->value()); 4838 Register input = ToRegister(instr->value());
4839 if (instr->needs_check()) { 4839 if (instr->needs_check()) {
4840 Condition is_smi = __ CheckSmi(input); 4840 Condition is_smi = __ CheckSmi(input);
4841 DeoptimizeIf(NegateCondition(is_smi), instr->environment()); 4841 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
4842 } else { 4842 } else {
4843 __ AssertSmi(input); 4843 __ AssertSmi(input);
4844 } 4844 }
4845 __ SmiToInteger32(input, input); 4845 __ SmiToInteger32(input, input);
4846 } 4846 }
4847 4847
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
4888 4888
4889 // Convert undefined (and hole) to NaN. Compute NaN as 0/0. 4889 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4890 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); 4890 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4891 DeoptimizeIf(not_equal, env); 4891 DeoptimizeIf(not_equal, env);
4892 4892
4893 __ xorps(result_reg, result_reg); 4893 __ xorps(result_reg, result_reg);
4894 __ divsd(result_reg, result_reg); 4894 __ divsd(result_reg, result_reg);
4895 __ jmp(&done, Label::kNear); 4895 __ jmp(&done, Label::kNear);
4896 } 4896 }
4897 } else { 4897 } else {
4898 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); 4898 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4899 } 4899 }
4900 4900
4901 // Smi to XMM conversion 4901 // Smi to XMM conversion
4902 __ bind(&load_smi); 4902 __ bind(&load_smi);
4903 __ SmiToInteger32(kScratchRegister, input_reg); 4903 __ SmiToInteger32(kScratchRegister, input_reg);
4904 __ Cvtlsi2sd(result_reg, kScratchRegister); 4904 __ Cvtlsi2sd(result_reg, kScratchRegister);
4905 __ bind(&done); 4905 __ bind(&done);
4906 } 4906 }
4907 4907
4908 4908
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
4959 : LDeferredCode(codegen), instr_(instr) { } 4959 : LDeferredCode(codegen), instr_(instr) { }
4960 virtual void Generate() V8_OVERRIDE { 4960 virtual void Generate() V8_OVERRIDE {
4961 codegen()->DoDeferredTaggedToI(instr_, done()); 4961 codegen()->DoDeferredTaggedToI(instr_, done());
4962 } 4962 }
4963 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4963 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4964 private: 4964 private:
4965 LTaggedToI* instr_; 4965 LTaggedToI* instr_;
4966 }; 4966 };
4967 4967
4968 LOperand* input = instr->value(); 4968 LOperand* input = instr->value();
4969 ASSERT(input->IsRegister()); 4969 DCHECK(input->IsRegister());
4970 ASSERT(input->Equals(instr->result())); 4970 DCHECK(input->Equals(instr->result()));
4971 Register input_reg = ToRegister(input); 4971 Register input_reg = ToRegister(input);
4972 4972
4973 if (instr->hydrogen()->value()->representation().IsSmi()) { 4973 if (instr->hydrogen()->value()->representation().IsSmi()) {
4974 __ SmiToInteger32(input_reg, input_reg); 4974 __ SmiToInteger32(input_reg, input_reg);
4975 } else { 4975 } else {
4976 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 4976 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4977 __ JumpIfNotSmi(input_reg, deferred->entry()); 4977 __ JumpIfNotSmi(input_reg, deferred->entry());
4978 __ SmiToInteger32(input_reg, input_reg); 4978 __ SmiToInteger32(input_reg, input_reg);
4979 __ bind(deferred->exit()); 4979 __ bind(deferred->exit());
4980 } 4980 }
4981 } 4981 }
4982 4982
4983 4983
4984 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4984 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4985 LOperand* input = instr->value(); 4985 LOperand* input = instr->value();
4986 ASSERT(input->IsRegister()); 4986 DCHECK(input->IsRegister());
4987 LOperand* result = instr->result(); 4987 LOperand* result = instr->result();
4988 ASSERT(result->IsDoubleRegister()); 4988 DCHECK(result->IsDoubleRegister());
4989 4989
4990 Register input_reg = ToRegister(input); 4990 Register input_reg = ToRegister(input);
4991 XMMRegister result_reg = ToDoubleRegister(result); 4991 XMMRegister result_reg = ToDoubleRegister(result);
4992 4992
4993 HValue* value = instr->hydrogen()->value(); 4993 HValue* value = instr->hydrogen()->value();
4994 NumberUntagDMode mode = value->representation().IsSmi() 4994 NumberUntagDMode mode = value->representation().IsSmi()
4995 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 4995 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4996 4996
4997 EmitNumberUntagD(input_reg, result_reg, 4997 EmitNumberUntagD(input_reg, result_reg,
4998 instr->hydrogen()->can_convert_undefined_to_nan(), 4998 instr->hydrogen()->can_convert_undefined_to_nan(),
4999 instr->hydrogen()->deoptimize_on_minus_zero(), 4999 instr->hydrogen()->deoptimize_on_minus_zero(),
5000 instr->environment(), 5000 instr->environment(),
5001 mode); 5001 mode);
5002 } 5002 }
5003 5003
5004 5004
5005 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5005 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5006 LOperand* input = instr->value(); 5006 LOperand* input = instr->value();
5007 ASSERT(input->IsDoubleRegister()); 5007 DCHECK(input->IsDoubleRegister());
5008 LOperand* result = instr->result(); 5008 LOperand* result = instr->result();
5009 ASSERT(result->IsRegister()); 5009 DCHECK(result->IsRegister());
5010 5010
5011 XMMRegister input_reg = ToDoubleRegister(input); 5011 XMMRegister input_reg = ToDoubleRegister(input);
5012 Register result_reg = ToRegister(result); 5012 Register result_reg = ToRegister(result);
5013 5013
5014 if (instr->truncating()) { 5014 if (instr->truncating()) {
5015 __ TruncateDoubleToI(result_reg, input_reg); 5015 __ TruncateDoubleToI(result_reg, input_reg);
5016 } else { 5016 } else {
5017 Label bailout, done; 5017 Label bailout, done;
5018 XMMRegister xmm_scratch = double_scratch0(); 5018 XMMRegister xmm_scratch = double_scratch0();
5019 __ DoubleToI(result_reg, input_reg, xmm_scratch, 5019 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5020 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); 5020 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5021 5021
5022 __ jmp(&done, Label::kNear); 5022 __ jmp(&done, Label::kNear);
5023 __ bind(&bailout); 5023 __ bind(&bailout);
5024 DeoptimizeIf(no_condition, instr->environment()); 5024 DeoptimizeIf(no_condition, instr->environment());
5025 __ bind(&done); 5025 __ bind(&done);
5026 } 5026 }
5027 } 5027 }
5028 5028
5029 5029
5030 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 5030 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5031 LOperand* input = instr->value(); 5031 LOperand* input = instr->value();
5032 ASSERT(input->IsDoubleRegister()); 5032 DCHECK(input->IsDoubleRegister());
5033 LOperand* result = instr->result(); 5033 LOperand* result = instr->result();
5034 ASSERT(result->IsRegister()); 5034 DCHECK(result->IsRegister());
5035 5035
5036 XMMRegister input_reg = ToDoubleRegister(input); 5036 XMMRegister input_reg = ToDoubleRegister(input);
5037 Register result_reg = ToRegister(result); 5037 Register result_reg = ToRegister(result);
5038 5038
5039 Label bailout, done; 5039 Label bailout, done;
5040 XMMRegister xmm_scratch = double_scratch0(); 5040 XMMRegister xmm_scratch = double_scratch0();
5041 __ DoubleToI(result_reg, input_reg, xmm_scratch, 5041 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5042 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); 5042 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5043 5043
5044 __ jmp(&done, Label::kNear); 5044 __ jmp(&done, Label::kNear);
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
5091 Immediate(static_cast<int8_t>(last))); 5091 Immediate(static_cast<int8_t>(last)));
5092 DeoptimizeIf(above, instr->environment()); 5092 DeoptimizeIf(above, instr->environment());
5093 } 5093 }
5094 } 5094 }
5095 } else { 5095 } else {
5096 uint8_t mask; 5096 uint8_t mask;
5097 uint8_t tag; 5097 uint8_t tag;
5098 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 5098 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5099 5099
5100 if (IsPowerOf2(mask)) { 5100 if (IsPowerOf2(mask)) {
5101 ASSERT(tag == 0 || IsPowerOf2(tag)); 5101 DCHECK(tag == 0 || IsPowerOf2(tag));
5102 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), 5102 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5103 Immediate(mask)); 5103 Immediate(mask));
5104 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment()); 5104 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5105 } else { 5105 } else {
5106 __ movzxbl(kScratchRegister, 5106 __ movzxbl(kScratchRegister,
5107 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); 5107 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5108 __ andb(kScratchRegister, Immediate(mask)); 5108 __ andb(kScratchRegister, Immediate(mask));
5109 __ cmpb(kScratchRegister, Immediate(tag)); 5109 __ cmpb(kScratchRegister, Immediate(tag));
5110 DeoptimizeIf(not_equal, instr->environment()); 5110 DeoptimizeIf(not_equal, instr->environment());
5111 } 5111 }
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
5155 5155
5156 if (instr->hydrogen()->IsStabilityCheck()) { 5156 if (instr->hydrogen()->IsStabilityCheck()) {
5157 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5157 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5158 for (int i = 0; i < maps->size(); ++i) { 5158 for (int i = 0; i < maps->size(); ++i) {
5159 AddStabilityDependency(maps->at(i).handle()); 5159 AddStabilityDependency(maps->at(i).handle());
5160 } 5160 }
5161 return; 5161 return;
5162 } 5162 }
5163 5163
5164 LOperand* input = instr->value(); 5164 LOperand* input = instr->value();
5165 ASSERT(input->IsRegister()); 5165 DCHECK(input->IsRegister());
5166 Register reg = ToRegister(input); 5166 Register reg = ToRegister(input);
5167 5167
5168 DeferredCheckMaps* deferred = NULL; 5168 DeferredCheckMaps* deferred = NULL;
5169 if (instr->hydrogen()->HasMigrationTarget()) { 5169 if (instr->hydrogen()->HasMigrationTarget()) {
5170 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); 5170 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5171 __ bind(deferred->check_maps()); 5171 __ bind(deferred->check_maps());
5172 } 5172 }
5173 5173
5174 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5174 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5175 Label success; 5175 Label success;
(...skipping 17 matching lines...) Expand all
5193 5193
5194 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5194 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5195 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); 5195 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5196 XMMRegister xmm_scratch = double_scratch0(); 5196 XMMRegister xmm_scratch = double_scratch0();
5197 Register result_reg = ToRegister(instr->result()); 5197 Register result_reg = ToRegister(instr->result());
5198 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); 5198 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5199 } 5199 }
5200 5200
5201 5201
5202 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5202 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5203 ASSERT(instr->unclamped()->Equals(instr->result())); 5203 DCHECK(instr->unclamped()->Equals(instr->result()));
5204 Register value_reg = ToRegister(instr->result()); 5204 Register value_reg = ToRegister(instr->result());
5205 __ ClampUint8(value_reg); 5205 __ ClampUint8(value_reg);
5206 } 5206 }
5207 5207
5208 5208
5209 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5209 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5210 ASSERT(instr->unclamped()->Equals(instr->result())); 5210 DCHECK(instr->unclamped()->Equals(instr->result()));
5211 Register input_reg = ToRegister(instr->unclamped()); 5211 Register input_reg = ToRegister(instr->unclamped());
5212 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); 5212 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5213 XMMRegister xmm_scratch = double_scratch0(); 5213 XMMRegister xmm_scratch = double_scratch0();
5214 Label is_smi, done, heap_number; 5214 Label is_smi, done, heap_number;
5215 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; 5215 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5216 __ JumpIfSmi(input_reg, &is_smi, dist); 5216 __ JumpIfSmi(input_reg, &is_smi, dist);
5217 5217
5218 // Check for heap number 5218 // Check for heap number
5219 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5219 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5220 factory()->heap_number_map()); 5220 factory()->heap_number_map());
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
5284 5284
5285 Register result = ToRegister(instr->result()); 5285 Register result = ToRegister(instr->result());
5286 Register temp = ToRegister(instr->temp()); 5286 Register temp = ToRegister(instr->temp());
5287 5287
5288 // Allocate memory for the object. 5288 // Allocate memory for the object.
5289 AllocationFlags flags = TAG_OBJECT; 5289 AllocationFlags flags = TAG_OBJECT;
5290 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5290 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5291 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5291 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5292 } 5292 }
5293 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5293 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5294 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5294 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5295 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5295 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5296 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); 5296 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5297 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5297 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5298 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5298 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5299 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); 5299 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5300 } 5300 }
5301 5301
5302 if (instr->size()->IsConstantOperand()) { 5302 if (instr->size()->IsConstantOperand()) {
5303 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5303 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5304 if (size <= Page::kMaxRegularHeapObjectSize) { 5304 if (size <= Page::kMaxRegularHeapObjectSize) {
5305 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); 5305 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5306 } else { 5306 } else {
5307 __ jmp(deferred->entry()); 5307 __ jmp(deferred->entry());
5308 } 5308 }
(...skipping 27 matching lines...) Expand all
5336 Register result = ToRegister(instr->result()); 5336 Register result = ToRegister(instr->result());
5337 5337
5338 // TODO(3095996): Get rid of this. For now, we need to make the 5338 // TODO(3095996): Get rid of this. For now, we need to make the
5339 // result register contain a valid pointer because it is already 5339 // result register contain a valid pointer because it is already
5340 // contained in the register pointer map. 5340 // contained in the register pointer map.
5341 __ Move(result, Smi::FromInt(0)); 5341 __ Move(result, Smi::FromInt(0));
5342 5342
5343 PushSafepointRegistersScope scope(this); 5343 PushSafepointRegistersScope scope(this);
5344 if (instr->size()->IsRegister()) { 5344 if (instr->size()->IsRegister()) {
5345 Register size = ToRegister(instr->size()); 5345 Register size = ToRegister(instr->size());
5346 ASSERT(!size.is(result)); 5346 DCHECK(!size.is(result));
5347 __ Integer32ToSmi(size, size); 5347 __ Integer32ToSmi(size, size);
5348 __ Push(size); 5348 __ Push(size);
5349 } else { 5349 } else {
5350 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5350 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5351 __ Push(Smi::FromInt(size)); 5351 __ Push(Smi::FromInt(size));
5352 } 5352 }
5353 5353
5354 int flags = 0; 5354 int flags = 0;
5355 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5355 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5356 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5356 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5357 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5357 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5358 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); 5358 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5359 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5359 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5360 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5360 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5361 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); 5361 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5362 } else { 5362 } else {
5363 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5363 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5364 } 5364 }
5365 __ Push(Smi::FromInt(flags)); 5365 __ Push(Smi::FromInt(flags));
5366 5366
5367 CallRuntimeFromDeferred( 5367 CallRuntimeFromDeferred(
5368 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 5368 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5369 __ StoreToSafepointRegisterSlot(result, rax); 5369 __ StoreToSafepointRegisterSlot(result, rax);
5370 } 5370 }
5371 5371
5372 5372
5373 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 5373 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5374 ASSERT(ToRegister(instr->value()).is(rax)); 5374 DCHECK(ToRegister(instr->value()).is(rax));
5375 __ Push(rax); 5375 __ Push(rax);
5376 CallRuntime(Runtime::kToFastProperties, 1, instr); 5376 CallRuntime(Runtime::kToFastProperties, 1, instr);
5377 } 5377 }
5378 5378
5379 5379
5380 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 5380 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5381 ASSERT(ToRegister(instr->context()).is(rsi)); 5381 DCHECK(ToRegister(instr->context()).is(rsi));
5382 Label materialized; 5382 Label materialized;
5383 // Registers will be used as follows: 5383 // Registers will be used as follows:
5384 // rcx = literals array. 5384 // rcx = literals array.
5385 // rbx = regexp literal. 5385 // rbx = regexp literal.
5386 // rax = regexp literal clone. 5386 // rax = regexp literal clone.
5387 int literal_offset = 5387 int literal_offset =
5388 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); 5388 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5389 __ Move(rcx, instr->hydrogen()->literals()); 5389 __ Move(rcx, instr->hydrogen()->literals());
5390 __ movp(rbx, FieldOperand(rcx, literal_offset)); 5390 __ movp(rbx, FieldOperand(rcx, literal_offset));
5391 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); 5391 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
(...skipping 30 matching lines...) Expand all
5422 __ movp(FieldOperand(rax, i + kPointerSize), rcx); 5422 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
5423 } 5423 }
5424 if ((size % (2 * kPointerSize)) != 0) { 5424 if ((size % (2 * kPointerSize)) != 0) {
5425 __ movp(rdx, FieldOperand(rbx, size - kPointerSize)); 5425 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
5426 __ movp(FieldOperand(rax, size - kPointerSize), rdx); 5426 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
5427 } 5427 }
5428 } 5428 }
5429 5429
5430 5430
5431 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 5431 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5432 ASSERT(ToRegister(instr->context()).is(rsi)); 5432 DCHECK(ToRegister(instr->context()).is(rsi));
5433 // Use the fast case closure allocation code that allocates in new 5433 // Use the fast case closure allocation code that allocates in new
5434 // space for nested functions that don't need literals cloning. 5434 // space for nested functions that don't need literals cloning.
5435 bool pretenure = instr->hydrogen()->pretenure(); 5435 bool pretenure = instr->hydrogen()->pretenure();
5436 if (!pretenure && instr->hydrogen()->has_no_literals()) { 5436 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5437 FastNewClosureStub stub(isolate(), 5437 FastNewClosureStub stub(isolate(),
5438 instr->hydrogen()->strict_mode(), 5438 instr->hydrogen()->strict_mode(),
5439 instr->hydrogen()->is_generator()); 5439 instr->hydrogen()->is_generator());
5440 __ Move(rbx, instr->hydrogen()->shared_info()); 5440 __ Move(rbx, instr->hydrogen()->shared_info());
5441 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5441 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5442 } else { 5442 } else {
5443 __ Push(rsi); 5443 __ Push(rsi);
5444 __ Push(instr->hydrogen()->shared_info()); 5444 __ Push(instr->hydrogen()->shared_info());
5445 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex : 5445 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
5446 Heap::kFalseValueRootIndex); 5446 Heap::kFalseValueRootIndex);
5447 CallRuntime(Runtime::kNewClosure, 3, instr); 5447 CallRuntime(Runtime::kNewClosure, 3, instr);
5448 } 5448 }
5449 } 5449 }
5450 5450
5451 5451
5452 void LCodeGen::DoTypeof(LTypeof* instr) { 5452 void LCodeGen::DoTypeof(LTypeof* instr) {
5453 ASSERT(ToRegister(instr->context()).is(rsi)); 5453 DCHECK(ToRegister(instr->context()).is(rsi));
5454 LOperand* input = instr->value(); 5454 LOperand* input = instr->value();
5455 EmitPushTaggedOperand(input); 5455 EmitPushTaggedOperand(input);
5456 CallRuntime(Runtime::kTypeof, 1, instr); 5456 CallRuntime(Runtime::kTypeof, 1, instr);
5457 } 5457 }
5458 5458
5459 5459
5460 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { 5460 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5461 ASSERT(!operand->IsDoubleRegister()); 5461 DCHECK(!operand->IsDoubleRegister());
5462 if (operand->IsConstantOperand()) { 5462 if (operand->IsConstantOperand()) {
5463 __ Push(ToHandle(LConstantOperand::cast(operand))); 5463 __ Push(ToHandle(LConstantOperand::cast(operand)));
5464 } else if (operand->IsRegister()) { 5464 } else if (operand->IsRegister()) {
5465 __ Push(ToRegister(operand)); 5465 __ Push(ToRegister(operand));
5466 } else { 5466 } else {
5467 __ Push(ToOperand(operand)); 5467 __ Push(ToOperand(operand));
5468 } 5468 }
5469 } 5469 }
5470 5470
5471 5471
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
5592 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5592 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5593 __ Nop(padding_size); 5593 __ Nop(padding_size);
5594 } 5594 }
5595 } 5595 }
5596 last_lazy_deopt_pc_ = masm()->pc_offset(); 5596 last_lazy_deopt_pc_ = masm()->pc_offset();
5597 } 5597 }
5598 5598
5599 5599
5600 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5600 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5601 last_lazy_deopt_pc_ = masm()->pc_offset(); 5601 last_lazy_deopt_pc_ = masm()->pc_offset();
5602 ASSERT(instr->HasEnvironment()); 5602 DCHECK(instr->HasEnvironment());
5603 LEnvironment* env = instr->environment(); 5603 LEnvironment* env = instr->environment();
5604 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5604 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5605 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5605 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5606 } 5606 }
5607 5607
5608 5608
5609 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 5609 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5610 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 5610 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5611 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 5611 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5612 // needed return address), even though the implementation of LAZY and EAGER is 5612 // needed return address), even though the implementation of LAZY and EAGER is
(...skipping 16 matching lines...) Expand all
5629 void LCodeGen::DoDummyUse(LDummyUse* instr) { 5629 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5630 // Nothing to see here, move on! 5630 // Nothing to see here, move on!
5631 } 5631 }
5632 5632
5633 5633
5634 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 5634 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5635 PushSafepointRegistersScope scope(this); 5635 PushSafepointRegistersScope scope(this);
5636 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 5636 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5637 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 5637 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5638 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); 5638 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5639 ASSERT(instr->HasEnvironment()); 5639 DCHECK(instr->HasEnvironment());
5640 LEnvironment* env = instr->environment(); 5640 LEnvironment* env = instr->environment();
5641 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5641 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5642 } 5642 }
5643 5643
5644 5644
5645 void LCodeGen::DoStackCheck(LStackCheck* instr) { 5645 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5646 class DeferredStackCheck V8_FINAL : public LDeferredCode { 5646 class DeferredStackCheck V8_FINAL : public LDeferredCode {
5647 public: 5647 public:
5648 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 5648 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5649 : LDeferredCode(codegen), instr_(instr) { } 5649 : LDeferredCode(codegen), instr_(instr) { }
5650 virtual void Generate() V8_OVERRIDE { 5650 virtual void Generate() V8_OVERRIDE {
5651 codegen()->DoDeferredStackCheck(instr_); 5651 codegen()->DoDeferredStackCheck(instr_);
5652 } 5652 }
5653 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5653 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5654 private: 5654 private:
5655 LStackCheck* instr_; 5655 LStackCheck* instr_;
5656 }; 5656 };
5657 5657
5658 ASSERT(instr->HasEnvironment()); 5658 DCHECK(instr->HasEnvironment());
5659 LEnvironment* env = instr->environment(); 5659 LEnvironment* env = instr->environment();
5660 // There is no LLazyBailout instruction for stack-checks. We have to 5660 // There is no LLazyBailout instruction for stack-checks. We have to
5661 // prepare for lazy deoptimization explicitly here. 5661 // prepare for lazy deoptimization explicitly here.
5662 if (instr->hydrogen()->is_function_entry()) { 5662 if (instr->hydrogen()->is_function_entry()) {
5663 // Perform stack overflow check. 5663 // Perform stack overflow check.
5664 Label done; 5664 Label done;
5665 __ CompareRoot(rsp, Heap::kStackLimitRootIndex); 5665 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5666 __ j(above_equal, &done, Label::kNear); 5666 __ j(above_equal, &done, Label::kNear);
5667 5667
5668 ASSERT(instr->context()->IsRegister()); 5668 DCHECK(instr->context()->IsRegister());
5669 ASSERT(ToRegister(instr->context()).is(rsi)); 5669 DCHECK(ToRegister(instr->context()).is(rsi));
5670 CallCode(isolate()->builtins()->StackCheck(), 5670 CallCode(isolate()->builtins()->StackCheck(),
5671 RelocInfo::CODE_TARGET, 5671 RelocInfo::CODE_TARGET,
5672 instr); 5672 instr);
5673 __ bind(&done); 5673 __ bind(&done);
5674 } else { 5674 } else {
5675 ASSERT(instr->hydrogen()->is_backwards_branch()); 5675 DCHECK(instr->hydrogen()->is_backwards_branch());
5676 // Perform stack overflow check if this goto needs it before jumping. 5676 // Perform stack overflow check if this goto needs it before jumping.
5677 DeferredStackCheck* deferred_stack_check = 5677 DeferredStackCheck* deferred_stack_check =
5678 new(zone()) DeferredStackCheck(this, instr); 5678 new(zone()) DeferredStackCheck(this, instr);
5679 __ CompareRoot(rsp, Heap::kStackLimitRootIndex); 5679 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5680 __ j(below, deferred_stack_check->entry()); 5680 __ j(below, deferred_stack_check->entry());
5681 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5681 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5682 __ bind(instr->done_label()); 5682 __ bind(instr->done_label());
5683 deferred_stack_check->SetExit(instr->done_label()); 5683 deferred_stack_check->SetExit(instr->done_label());
5684 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5684 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5685 // Don't record a deoptimization index for the safepoint here. 5685 // Don't record a deoptimization index for the safepoint here.
5686 // This will be done explicitly when emitting call and the safepoint in 5686 // This will be done explicitly when emitting call and the safepoint in
5687 // the deferred code. 5687 // the deferred code.
5688 } 5688 }
5689 } 5689 }
5690 5690
5691 5691
5692 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 5692 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5693 // This is a pseudo-instruction that ensures that the environment here is 5693 // This is a pseudo-instruction that ensures that the environment here is
5694 // properly registered for deoptimization and records the assembler's PC 5694 // properly registered for deoptimization and records the assembler's PC
5695 // offset. 5695 // offset.
5696 LEnvironment* environment = instr->environment(); 5696 LEnvironment* environment = instr->environment();
5697 5697
5698 // If the environment were already registered, we would have no way of 5698 // If the environment were already registered, we would have no way of
5699 // backpatching it with the spill slot operands. 5699 // backpatching it with the spill slot operands.
5700 ASSERT(!environment->HasBeenRegistered()); 5700 DCHECK(!environment->HasBeenRegistered());
5701 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5701 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5702 5702
5703 GenerateOsrPrologue(); 5703 GenerateOsrPrologue();
5704 } 5704 }
5705 5705
5706 5706
5707 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5707 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5708 ASSERT(ToRegister(instr->context()).is(rsi)); 5708 DCHECK(ToRegister(instr->context()).is(rsi));
5709 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); 5709 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
5710 DeoptimizeIf(equal, instr->environment()); 5710 DeoptimizeIf(equal, instr->environment());
5711 5711
5712 Register null_value = rdi; 5712 Register null_value = rdi;
5713 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 5713 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5714 __ cmpp(rax, null_value); 5714 __ cmpp(rax, null_value);
5715 DeoptimizeIf(equal, instr->environment()); 5715 DeoptimizeIf(equal, instr->environment());
5716 5716
5717 Condition cc = masm()->CheckSmi(rax); 5717 Condition cc = masm()->CheckSmi(rax);
5718 DeoptimizeIf(cc, instr->environment()); 5718 DeoptimizeIf(cc, instr->environment());
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
5852 CallRuntime(Runtime::kPushBlockContext, 2, instr); 5852 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5853 RecordSafepoint(Safepoint::kNoLazyDeopt); 5853 RecordSafepoint(Safepoint::kNoLazyDeopt);
5854 } 5854 }
5855 5855
5856 5856
5857 #undef __ 5857 #undef __
5858 5858
5859 } } // namespace v8::internal 5859 } } // namespace v8::internal
5860 5860
5861 #endif // V8_TARGET_ARCH_X64 5861 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/lithium-codegen-x64.h ('k') | src/x64/lithium-gap-resolver-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698