Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(628)

Side by Side Diff: src/ia32/macro-assembler-ia32.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/macro-assembler-ia32.h ('k') | src/ia32/regexp-macro-assembler-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_IA32 7 #if V8_TARGET_ARCH_IA32
8 8
9 #include "src/bootstrapper.h" 9 #include "src/bootstrapper.h"
10 #include "src/codegen.h" 10 #include "src/codegen.h"
(...skipping 15 matching lines...) Expand all
26 has_frame_(false) { 26 has_frame_(false) {
27 if (isolate() != NULL) { 27 if (isolate() != NULL) {
28 // TODO(titzer): should we just use a null handle here instead? 28 // TODO(titzer): should we just use a null handle here instead?
29 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), 29 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
30 isolate()); 30 isolate());
31 } 31 }
32 } 32 }
33 33
34 34
35 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { 35 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
36 ASSERT(!r.IsDouble()); 36 DCHECK(!r.IsDouble());
37 if (r.IsInteger8()) { 37 if (r.IsInteger8()) {
38 movsx_b(dst, src); 38 movsx_b(dst, src);
39 } else if (r.IsUInteger8()) { 39 } else if (r.IsUInteger8()) {
40 movzx_b(dst, src); 40 movzx_b(dst, src);
41 } else if (r.IsInteger16()) { 41 } else if (r.IsInteger16()) {
42 movsx_w(dst, src); 42 movsx_w(dst, src);
43 } else if (r.IsUInteger16()) { 43 } else if (r.IsUInteger16()) {
44 movzx_w(dst, src); 44 movzx_w(dst, src);
45 } else { 45 } else {
46 mov(dst, src); 46 mov(dst, src);
47 } 47 }
48 } 48 }
49 49
50 50
51 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) { 51 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
52 ASSERT(!r.IsDouble()); 52 DCHECK(!r.IsDouble());
53 if (r.IsInteger8() || r.IsUInteger8()) { 53 if (r.IsInteger8() || r.IsUInteger8()) {
54 mov_b(dst, src); 54 mov_b(dst, src);
55 } else if (r.IsInteger16() || r.IsUInteger16()) { 55 } else if (r.IsInteger16() || r.IsUInteger16()) {
56 mov_w(dst, src); 56 mov_w(dst, src);
57 } else { 57 } else {
58 if (r.IsHeapObject()) { 58 if (r.IsHeapObject()) {
59 AssertNotSmi(src); 59 AssertNotSmi(src);
60 } else if (r.IsSmi()) { 60 } else if (r.IsSmi()) {
61 AssertSmi(src); 61 AssertSmi(src);
62 } 62 }
(...skipping 13 matching lines...) Expand all
76 mov(destination, Immediate(index)); 76 mov(destination, Immediate(index));
77 mov(destination, Operand::StaticArray(destination, 77 mov(destination, Operand::StaticArray(destination,
78 times_pointer_size, 78 times_pointer_size,
79 roots_array_start)); 79 roots_array_start));
80 } 80 }
81 81
82 82
83 void MacroAssembler::StoreRoot(Register source, 83 void MacroAssembler::StoreRoot(Register source,
84 Register scratch, 84 Register scratch,
85 Heap::RootListIndex index) { 85 Heap::RootListIndex index) {
86 ASSERT(Heap::RootCanBeWrittenAfterInitialization(index)); 86 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
87 ExternalReference roots_array_start = 87 ExternalReference roots_array_start =
88 ExternalReference::roots_array_start(isolate()); 88 ExternalReference::roots_array_start(isolate());
89 mov(scratch, Immediate(index)); 89 mov(scratch, Immediate(index));
90 mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start), 90 mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
91 source); 91 source);
92 } 92 }
93 93
94 94
95 void MacroAssembler::CompareRoot(Register with, 95 void MacroAssembler::CompareRoot(Register with,
96 Register scratch, 96 Register scratch,
97 Heap::RootListIndex index) { 97 Heap::RootListIndex index) {
98 ExternalReference roots_array_start = 98 ExternalReference roots_array_start =
99 ExternalReference::roots_array_start(isolate()); 99 ExternalReference::roots_array_start(isolate());
100 mov(scratch, Immediate(index)); 100 mov(scratch, Immediate(index));
101 cmp(with, Operand::StaticArray(scratch, 101 cmp(with, Operand::StaticArray(scratch,
102 times_pointer_size, 102 times_pointer_size,
103 roots_array_start)); 103 roots_array_start));
104 } 104 }
105 105
106 106
107 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { 107 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
108 ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index)); 108 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
109 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); 109 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
110 cmp(with, value); 110 cmp(with, value);
111 } 111 }
112 112
113 113
114 void MacroAssembler::CompareRoot(const Operand& with, 114 void MacroAssembler::CompareRoot(const Operand& with,
115 Heap::RootListIndex index) { 115 Heap::RootListIndex index) {
116 ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index)); 116 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
117 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); 117 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
118 cmp(with, value); 118 cmp(with, value);
119 } 119 }
120 120
121 121
122 void MacroAssembler::InNewSpace( 122 void MacroAssembler::InNewSpace(
123 Register object, 123 Register object,
124 Register scratch, 124 Register scratch,
125 Condition cc, 125 Condition cc,
126 Label* condition_met, 126 Label* condition_met,
127 Label::Distance condition_met_distance) { 127 Label::Distance condition_met_distance) {
128 ASSERT(cc == equal || cc == not_equal); 128 DCHECK(cc == equal || cc == not_equal);
129 if (scratch.is(object)) { 129 if (scratch.is(object)) {
130 and_(scratch, Immediate(~Page::kPageAlignmentMask)); 130 and_(scratch, Immediate(~Page::kPageAlignmentMask));
131 } else { 131 } else {
132 mov(scratch, Immediate(~Page::kPageAlignmentMask)); 132 mov(scratch, Immediate(~Page::kPageAlignmentMask));
133 and_(scratch, object); 133 and_(scratch, object);
134 } 134 }
135 // Check that we can use a test_b. 135 // Check that we can use a test_b.
136 ASSERT(MemoryChunk::IN_FROM_SPACE < 8); 136 DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
137 ASSERT(MemoryChunk::IN_TO_SPACE < 8); 137 DCHECK(MemoryChunk::IN_TO_SPACE < 8);
138 int mask = (1 << MemoryChunk::IN_FROM_SPACE) 138 int mask = (1 << MemoryChunk::IN_FROM_SPACE)
139 | (1 << MemoryChunk::IN_TO_SPACE); 139 | (1 << MemoryChunk::IN_TO_SPACE);
140 // If non-zero, the page belongs to new-space. 140 // If non-zero, the page belongs to new-space.
141 test_b(Operand(scratch, MemoryChunk::kFlagsOffset), 141 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
142 static_cast<uint8_t>(mask)); 142 static_cast<uint8_t>(mask));
143 j(cc, condition_met, condition_met_distance); 143 j(cc, condition_met, condition_met_distance);
144 } 144 }
145 145
146 146
147 void MacroAssembler::RememberedSetHelper( 147 void MacroAssembler::RememberedSetHelper(
(...skipping 21 matching lines...) Expand all
169 mov(Operand::StaticVariable(store_buffer), scratch); 169 mov(Operand::StaticVariable(store_buffer), scratch);
170 // Call stub on end of buffer. 170 // Call stub on end of buffer.
171 // Check for end of buffer. 171 // Check for end of buffer.
172 test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); 172 test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
173 if (and_then == kReturnAtEnd) { 173 if (and_then == kReturnAtEnd) {
174 Label buffer_overflowed; 174 Label buffer_overflowed;
175 j(not_equal, &buffer_overflowed, Label::kNear); 175 j(not_equal, &buffer_overflowed, Label::kNear);
176 ret(0); 176 ret(0);
177 bind(&buffer_overflowed); 177 bind(&buffer_overflowed);
178 } else { 178 } else {
179 ASSERT(and_then == kFallThroughAtEnd); 179 DCHECK(and_then == kFallThroughAtEnd);
180 j(equal, &done, Label::kNear); 180 j(equal, &done, Label::kNear);
181 } 181 }
182 StoreBufferOverflowStub store_buffer_overflow = 182 StoreBufferOverflowStub store_buffer_overflow =
183 StoreBufferOverflowStub(isolate(), save_fp); 183 StoreBufferOverflowStub(isolate(), save_fp);
184 CallStub(&store_buffer_overflow); 184 CallStub(&store_buffer_overflow);
185 if (and_then == kReturnAtEnd) { 185 if (and_then == kReturnAtEnd) {
186 ret(0); 186 ret(0);
187 } else { 187 } else {
188 ASSERT(and_then == kFallThroughAtEnd); 188 DCHECK(and_then == kFallThroughAtEnd);
189 bind(&done); 189 bind(&done);
190 } 190 }
191 } 191 }
192 192
193 193
194 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg, 194 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
195 XMMRegister scratch_reg, 195 XMMRegister scratch_reg,
196 Register result_reg) { 196 Register result_reg) {
197 Label done; 197 Label done;
198 Label conv_failure; 198 Label conv_failure;
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
248 bind(&done); 248 bind(&done);
249 } 249 }
250 250
251 251
252 void MacroAssembler::DoubleToI(Register result_reg, 252 void MacroAssembler::DoubleToI(Register result_reg,
253 XMMRegister input_reg, 253 XMMRegister input_reg,
254 XMMRegister scratch, 254 XMMRegister scratch,
255 MinusZeroMode minus_zero_mode, 255 MinusZeroMode minus_zero_mode,
256 Label* conversion_failed, 256 Label* conversion_failed,
257 Label::Distance dst) { 257 Label::Distance dst) {
258 ASSERT(!input_reg.is(scratch)); 258 DCHECK(!input_reg.is(scratch));
259 cvttsd2si(result_reg, Operand(input_reg)); 259 cvttsd2si(result_reg, Operand(input_reg));
260 Cvtsi2sd(scratch, Operand(result_reg)); 260 Cvtsi2sd(scratch, Operand(result_reg));
261 ucomisd(scratch, input_reg); 261 ucomisd(scratch, input_reg);
262 j(not_equal, conversion_failed, dst); 262 j(not_equal, conversion_failed, dst);
263 j(parity_even, conversion_failed, dst); // NaN. 263 j(parity_even, conversion_failed, dst); // NaN.
264 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { 264 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
265 Label done; 265 Label done;
266 // The integer converted back is equal to the original. We 266 // The integer converted back is equal to the original. We
267 // only have to test if we got -0 as an input. 267 // only have to test if we got -0 as an input.
268 test(result_reg, Operand(result_reg)); 268 test(result_reg, Operand(result_reg));
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
344 bind(&done); 344 bind(&done);
345 } 345 }
346 346
347 347
348 void MacroAssembler::TaggedToI(Register result_reg, 348 void MacroAssembler::TaggedToI(Register result_reg,
349 Register input_reg, 349 Register input_reg,
350 XMMRegister temp, 350 XMMRegister temp,
351 MinusZeroMode minus_zero_mode, 351 MinusZeroMode minus_zero_mode,
352 Label* lost_precision) { 352 Label* lost_precision) {
353 Label done; 353 Label done;
354 ASSERT(!temp.is(xmm0)); 354 DCHECK(!temp.is(xmm0));
355 355
356 cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 356 cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
357 isolate()->factory()->heap_number_map()); 357 isolate()->factory()->heap_number_map());
358 j(not_equal, lost_precision, Label::kNear); 358 j(not_equal, lost_precision, Label::kNear);
359 359
360 ASSERT(!temp.is(no_xmm_reg)); 360 DCHECK(!temp.is(no_xmm_reg));
361 361
362 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 362 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
363 cvttsd2si(result_reg, Operand(xmm0)); 363 cvttsd2si(result_reg, Operand(xmm0));
364 Cvtsi2sd(temp, Operand(result_reg)); 364 Cvtsi2sd(temp, Operand(result_reg));
365 ucomisd(xmm0, temp); 365 ucomisd(xmm0, temp);
366 RecordComment("Deferred TaggedToI: lost precision"); 366 RecordComment("Deferred TaggedToI: lost precision");
367 j(not_equal, lost_precision, Label::kNear); 367 j(not_equal, lost_precision, Label::kNear);
368 RecordComment("Deferred TaggedToI: NaN"); 368 RecordComment("Deferred TaggedToI: NaN");
369 j(parity_even, lost_precision, Label::kNear); 369 j(parity_even, lost_precision, Label::kNear);
370 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { 370 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
(...skipping 28 matching lines...) Expand all
399 SaveFPRegsMode save_fp, 399 SaveFPRegsMode save_fp,
400 RememberedSetAction remembered_set_action, 400 RememberedSetAction remembered_set_action,
401 SmiCheck smi_check, 401 SmiCheck smi_check,
402 PointersToHereCheck pointers_to_here_check_for_value) { 402 PointersToHereCheck pointers_to_here_check_for_value) {
403 // First, check if a write barrier is even needed. The tests below 403 // First, check if a write barrier is even needed. The tests below
404 // catch stores of Smis. 404 // catch stores of Smis.
405 Label done; 405 Label done;
406 406
407 // Skip barrier if writing a smi. 407 // Skip barrier if writing a smi.
408 if (smi_check == INLINE_SMI_CHECK) { 408 if (smi_check == INLINE_SMI_CHECK) {
409 ASSERT_EQ(0, kSmiTag); 409 DCHECK_EQ(0, kSmiTag);
410 test(value, Immediate(kSmiTagMask)); 410 test(value, Immediate(kSmiTagMask));
411 j(zero, &done); 411 j(zero, &done);
412 } 412 }
413 413
414 // Array access: calculate the destination address in the same manner as 414 // Array access: calculate the destination address in the same manner as
415 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset 415 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
416 // into an array of words. 416 // into an array of words.
417 Register dst = index; 417 Register dst = index;
418 lea(dst, Operand(object, index, times_half_pointer_size, 418 lea(dst, Operand(object, index, times_half_pointer_size,
419 FixedArray::kHeaderSize - kHeapObjectTag)); 419 FixedArray::kHeaderSize - kHeapObjectTag));
(...skipping 25 matching lines...) Expand all
445 // catch stores of Smis. 445 // catch stores of Smis.
446 Label done; 446 Label done;
447 447
448 // Skip barrier if writing a smi. 448 // Skip barrier if writing a smi.
449 if (smi_check == INLINE_SMI_CHECK) { 449 if (smi_check == INLINE_SMI_CHECK) {
450 JumpIfSmi(value, &done, Label::kNear); 450 JumpIfSmi(value, &done, Label::kNear);
451 } 451 }
452 452
453 // Although the object register is tagged, the offset is relative to the start 453 // Although the object register is tagged, the offset is relative to the start
454 // of the object, so so offset must be a multiple of kPointerSize. 454 // of the object, so so offset must be a multiple of kPointerSize.
455 ASSERT(IsAligned(offset, kPointerSize)); 455 DCHECK(IsAligned(offset, kPointerSize));
456 456
457 lea(dst, FieldOperand(object, offset)); 457 lea(dst, FieldOperand(object, offset));
458 if (emit_debug_code()) { 458 if (emit_debug_code()) {
459 Label ok; 459 Label ok;
460 test_b(dst, (1 << kPointerSizeLog2) - 1); 460 test_b(dst, (1 << kPointerSizeLog2) - 1);
461 j(zero, &ok, Label::kNear); 461 j(zero, &ok, Label::kNear);
462 int3(); 462 int3();
463 bind(&ok); 463 bind(&ok);
464 } 464 }
465 465
(...skipping 23 matching lines...) Expand all
489 Register value = scratch2; 489 Register value = scratch2;
490 if (emit_debug_code()) { 490 if (emit_debug_code()) {
491 Label ok; 491 Label ok;
492 lea(address, FieldOperand(object, HeapObject::kMapOffset)); 492 lea(address, FieldOperand(object, HeapObject::kMapOffset));
493 test_b(address, (1 << kPointerSizeLog2) - 1); 493 test_b(address, (1 << kPointerSizeLog2) - 1);
494 j(zero, &ok, Label::kNear); 494 j(zero, &ok, Label::kNear);
495 int3(); 495 int3();
496 bind(&ok); 496 bind(&ok);
497 } 497 }
498 498
499 ASSERT(!object.is(value)); 499 DCHECK(!object.is(value));
500 ASSERT(!object.is(address)); 500 DCHECK(!object.is(address));
501 ASSERT(!value.is(address)); 501 DCHECK(!value.is(address));
502 AssertNotSmi(object); 502 AssertNotSmi(object);
503 503
504 if (!FLAG_incremental_marking) { 504 if (!FLAG_incremental_marking) {
505 return; 505 return;
506 } 506 }
507 507
508 // Compute the address. 508 // Compute the address.
509 lea(address, FieldOperand(object, HeapObject::kMapOffset)); 509 lea(address, FieldOperand(object, HeapObject::kMapOffset));
510 510
511 // A single check of the map's pages interesting flag suffices, since it is 511 // A single check of the map's pages interesting flag suffices, since it is
512 // only set during incremental collection, and then it's also guaranteed that 512 // only set during incremental collection, and then it's also guaranteed that
513 // the from object's page's interesting flag is also set. This optimization 513 // the from object's page's interesting flag is also set. This optimization
514 // relies on the fact that maps can never be in new space. 514 // relies on the fact that maps can never be in new space.
515 ASSERT(!isolate()->heap()->InNewSpace(*map)); 515 DCHECK(!isolate()->heap()->InNewSpace(*map));
516 CheckPageFlagForMap(map, 516 CheckPageFlagForMap(map,
517 MemoryChunk::kPointersToHereAreInterestingMask, 517 MemoryChunk::kPointersToHereAreInterestingMask,
518 zero, 518 zero,
519 &done, 519 &done,
520 Label::kNear); 520 Label::kNear);
521 521
522 RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET, 522 RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
523 save_fp); 523 save_fp);
524 CallStub(&stub); 524 CallStub(&stub);
525 525
(...skipping 14 matching lines...) Expand all
540 540
541 541
542 void MacroAssembler::RecordWrite( 542 void MacroAssembler::RecordWrite(
543 Register object, 543 Register object,
544 Register address, 544 Register address,
545 Register value, 545 Register value,
546 SaveFPRegsMode fp_mode, 546 SaveFPRegsMode fp_mode,
547 RememberedSetAction remembered_set_action, 547 RememberedSetAction remembered_set_action,
548 SmiCheck smi_check, 548 SmiCheck smi_check,
549 PointersToHereCheck pointers_to_here_check_for_value) { 549 PointersToHereCheck pointers_to_here_check_for_value) {
550 ASSERT(!object.is(value)); 550 DCHECK(!object.is(value));
551 ASSERT(!object.is(address)); 551 DCHECK(!object.is(address));
552 ASSERT(!value.is(address)); 552 DCHECK(!value.is(address));
553 AssertNotSmi(object); 553 AssertNotSmi(object);
554 554
555 if (remembered_set_action == OMIT_REMEMBERED_SET && 555 if (remembered_set_action == OMIT_REMEMBERED_SET &&
556 !FLAG_incremental_marking) { 556 !FLAG_incremental_marking) {
557 return; 557 return;
558 } 558 }
559 559
560 if (emit_debug_code()) { 560 if (emit_debug_code()) {
561 Label ok; 561 Label ok;
562 cmp(value, Operand(address, 0)); 562 cmp(value, Operand(address, 0));
(...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after
950 cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset), 950 cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
951 Immediate(Smi::FromInt(type))); 951 Immediate(Smi::FromInt(type)));
952 Check(equal, kStackFrameTypesMustMatch); 952 Check(equal, kStackFrameTypesMustMatch);
953 } 953 }
954 leave(); 954 leave();
955 } 955 }
956 956
957 957
958 void MacroAssembler::EnterExitFramePrologue() { 958 void MacroAssembler::EnterExitFramePrologue() {
959 // Set up the frame structure on the stack. 959 // Set up the frame structure on the stack.
960 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); 960 DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
961 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); 961 DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
962 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); 962 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
963 push(ebp); 963 push(ebp);
964 mov(ebp, esp); 964 mov(ebp, esp);
965 965
966 // Reserve room for entry stack pointer and push the code object. 966 // Reserve room for entry stack pointer and push the code object.
967 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); 967 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
968 push(Immediate(0)); // Saved entry sp, patched before call. 968 push(Immediate(0)); // Saved entry sp, patched before call.
969 push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot. 969 push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
970 970
971 // Save the frame pointer and the context in top. 971 // Save the frame pointer and the context in top.
972 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate()); 972 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
973 ExternalReference context_address(Isolate::kContextAddress, isolate()); 973 ExternalReference context_address(Isolate::kContextAddress, isolate());
974 mov(Operand::StaticVariable(c_entry_fp_address), ebp); 974 mov(Operand::StaticVariable(c_entry_fp_address), ebp);
975 mov(Operand::StaticVariable(context_address), esi); 975 mov(Operand::StaticVariable(context_address), esi);
976 } 976 }
977 977
978 978
979 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { 979 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
980 // Optionally save all XMM registers. 980 // Optionally save all XMM registers.
981 if (save_doubles) { 981 if (save_doubles) {
982 int space = XMMRegister::kMaxNumRegisters * kDoubleSize + 982 int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
983 argc * kPointerSize; 983 argc * kPointerSize;
984 sub(esp, Immediate(space)); 984 sub(esp, Immediate(space));
985 const int offset = -2 * kPointerSize; 985 const int offset = -2 * kPointerSize;
986 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { 986 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
987 XMMRegister reg = XMMRegister::from_code(i); 987 XMMRegister reg = XMMRegister::from_code(i);
988 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); 988 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
989 } 989 }
990 } else { 990 } else {
991 sub(esp, Immediate(argc * kPointerSize)); 991 sub(esp, Immediate(argc * kPointerSize));
992 } 992 }
993 993
994 // Get the required frame alignment for the OS. 994 // Get the required frame alignment for the OS.
995 const int kFrameAlignment = base::OS::ActivationFrameAlignment(); 995 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
996 if (kFrameAlignment > 0) { 996 if (kFrameAlignment > 0) {
997 ASSERT(IsPowerOf2(kFrameAlignment)); 997 DCHECK(IsPowerOf2(kFrameAlignment));
998 and_(esp, -kFrameAlignment); 998 and_(esp, -kFrameAlignment);
999 } 999 }
1000 1000
1001 // Patch the saved entry sp. 1001 // Patch the saved entry sp.
1002 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp); 1002 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
1003 } 1003 }
1004 1004
1005 1005
1006 void MacroAssembler::EnterExitFrame(bool save_doubles) { 1006 void MacroAssembler::EnterExitFrame(bool save_doubles) {
1007 EnterExitFramePrologue(); 1007 EnterExitFramePrologue();
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
1212 JumpToHandlerEntry(); 1212 JumpToHandlerEntry();
1213 } 1213 }
1214 1214
1215 1215
1216 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, 1216 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1217 Register scratch1, 1217 Register scratch1,
1218 Register scratch2, 1218 Register scratch2,
1219 Label* miss) { 1219 Label* miss) {
1220 Label same_contexts; 1220 Label same_contexts;
1221 1221
1222 ASSERT(!holder_reg.is(scratch1)); 1222 DCHECK(!holder_reg.is(scratch1));
1223 ASSERT(!holder_reg.is(scratch2)); 1223 DCHECK(!holder_reg.is(scratch2));
1224 ASSERT(!scratch1.is(scratch2)); 1224 DCHECK(!scratch1.is(scratch2));
1225 1225
1226 // Load current lexical context from the stack frame. 1226 // Load current lexical context from the stack frame.
1227 mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset)); 1227 mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
1228 1228
1229 // When generating debug code, make sure the lexical context is set. 1229 // When generating debug code, make sure the lexical context is set.
1230 if (emit_debug_code()) { 1230 if (emit_debug_code()) {
1231 cmp(scratch1, Immediate(0)); 1231 cmp(scratch1, Immediate(0));
1232 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext); 1232 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
1233 } 1233 }
1234 // Load the native context of the current context. 1234 // Load the native context of the current context.
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
1360 for (int i = 0; i < kNumberDictionaryProbes; i++) { 1360 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1361 // Use r2 for index calculations and keep the hash intact in r0. 1361 // Use r2 for index calculations and keep the hash intact in r0.
1362 mov(r2, r0); 1362 mov(r2, r0);
1363 // Compute the masked index: (hash + i + i * i) & mask. 1363 // Compute the masked index: (hash + i + i * i) & mask.
1364 if (i > 0) { 1364 if (i > 0) {
1365 add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i))); 1365 add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
1366 } 1366 }
1367 and_(r2, r1); 1367 and_(r2, r1);
1368 1368
1369 // Scale the index by multiplying by the entry size. 1369 // Scale the index by multiplying by the entry size.
1370 ASSERT(SeededNumberDictionary::kEntrySize == 3); 1370 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1371 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 1371 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
1372 1372
1373 // Check if the key matches. 1373 // Check if the key matches.
1374 cmp(key, FieldOperand(elements, 1374 cmp(key, FieldOperand(elements,
1375 r2, 1375 r2,
1376 times_pointer_size, 1376 times_pointer_size,
1377 SeededNumberDictionary::kElementsStartOffset)); 1377 SeededNumberDictionary::kElementsStartOffset));
1378 if (i != (kNumberDictionaryProbes - 1)) { 1378 if (i != (kNumberDictionaryProbes - 1)) {
1379 j(equal, &done); 1379 j(equal, &done);
1380 } else { 1380 } else {
1381 j(not_equal, miss); 1381 j(not_equal, miss);
1382 } 1382 }
1383 } 1383 }
1384 1384
1385 bind(&done); 1385 bind(&done);
1386 // Check that the value is a normal propety. 1386 // Check that the value is a normal propety.
1387 const int kDetailsOffset = 1387 const int kDetailsOffset =
1388 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; 1388 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1389 ASSERT_EQ(NORMAL, 0); 1389 DCHECK_EQ(NORMAL, 0);
1390 test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), 1390 test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
1391 Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize)); 1391 Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
1392 j(not_zero, miss); 1392 j(not_zero, miss);
1393 1393
1394 // Get the value at the masked, scaled index. 1394 // Get the value at the masked, scaled index.
1395 const int kValueOffset = 1395 const int kValueOffset =
1396 SeededNumberDictionary::kElementsStartOffset + kPointerSize; 1396 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1397 mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); 1397 mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
1398 } 1398 }
1399 1399
1400 1400
1401 void MacroAssembler::LoadAllocationTopHelper(Register result, 1401 void MacroAssembler::LoadAllocationTopHelper(Register result,
1402 Register scratch, 1402 Register scratch,
1403 AllocationFlags flags) { 1403 AllocationFlags flags) {
1404 ExternalReference allocation_top = 1404 ExternalReference allocation_top =
1405 AllocationUtils::GetAllocationTopReference(isolate(), flags); 1405 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1406 1406
1407 // Just return if allocation top is already known. 1407 // Just return if allocation top is already known.
1408 if ((flags & RESULT_CONTAINS_TOP) != 0) { 1408 if ((flags & RESULT_CONTAINS_TOP) != 0) {
1409 // No use of scratch if allocation top is provided. 1409 // No use of scratch if allocation top is provided.
1410 ASSERT(scratch.is(no_reg)); 1410 DCHECK(scratch.is(no_reg));
1411 #ifdef DEBUG 1411 #ifdef DEBUG
1412 // Assert that result actually contains top on entry. 1412 // Assert that result actually contains top on entry.
1413 cmp(result, Operand::StaticVariable(allocation_top)); 1413 cmp(result, Operand::StaticVariable(allocation_top));
1414 Check(equal, kUnexpectedAllocationTop); 1414 Check(equal, kUnexpectedAllocationTop);
1415 #endif 1415 #endif
1416 return; 1416 return;
1417 } 1417 }
1418 1418
1419 // Move address of new object to result. Use scratch register if available. 1419 // Move address of new object to result. Use scratch register if available.
1420 if (scratch.is(no_reg)) { 1420 if (scratch.is(no_reg)) {
(...skipping 24 matching lines...) Expand all
1445 } 1445 }
1446 } 1446 }
1447 1447
1448 1448
1449 void MacroAssembler::Allocate(int object_size, 1449 void MacroAssembler::Allocate(int object_size,
1450 Register result, 1450 Register result,
1451 Register result_end, 1451 Register result_end,
1452 Register scratch, 1452 Register scratch,
1453 Label* gc_required, 1453 Label* gc_required,
1454 AllocationFlags flags) { 1454 AllocationFlags flags) {
1455 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); 1455 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1456 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); 1456 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1457 if (!FLAG_inline_new) { 1457 if (!FLAG_inline_new) {
1458 if (emit_debug_code()) { 1458 if (emit_debug_code()) {
1459 // Trash the registers to simulate an allocation failure. 1459 // Trash the registers to simulate an allocation failure.
1460 mov(result, Immediate(0x7091)); 1460 mov(result, Immediate(0x7091));
1461 if (result_end.is_valid()) { 1461 if (result_end.is_valid()) {
1462 mov(result_end, Immediate(0x7191)); 1462 mov(result_end, Immediate(0x7191));
1463 } 1463 }
1464 if (scratch.is_valid()) { 1464 if (scratch.is_valid()) {
1465 mov(scratch, Immediate(0x7291)); 1465 mov(scratch, Immediate(0x7291));
1466 } 1466 }
1467 } 1467 }
1468 jmp(gc_required); 1468 jmp(gc_required);
1469 return; 1469 return;
1470 } 1470 }
1471 ASSERT(!result.is(result_end)); 1471 DCHECK(!result.is(result_end));
1472 1472
1473 // Load address of new object into result. 1473 // Load address of new object into result.
1474 LoadAllocationTopHelper(result, scratch, flags); 1474 LoadAllocationTopHelper(result, scratch, flags);
1475 1475
1476 ExternalReference allocation_limit = 1476 ExternalReference allocation_limit =
1477 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 1477 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1478 1478
1479 // Align the next allocation. Storing the filler map without checking top is 1479 // Align the next allocation. Storing the filler map without checking top is
1480 // safe in new-space because the limit of the heap is aligned there. 1480 // safe in new-space because the limit of the heap is aligned there.
1481 if ((flags & DOUBLE_ALIGNMENT) != 0) { 1481 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1482 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); 1482 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1483 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); 1483 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1484 Label aligned; 1484 Label aligned;
1485 test(result, Immediate(kDoubleAlignmentMask)); 1485 test(result, Immediate(kDoubleAlignmentMask));
1486 j(zero, &aligned, Label::kNear); 1486 j(zero, &aligned, Label::kNear);
1487 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { 1487 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1488 cmp(result, Operand::StaticVariable(allocation_limit)); 1488 cmp(result, Operand::StaticVariable(allocation_limit));
1489 j(above_equal, gc_required); 1489 j(above_equal, gc_required);
1490 } 1490 }
1491 mov(Operand(result, 0), 1491 mov(Operand(result, 0),
1492 Immediate(isolate()->factory()->one_pointer_filler_map())); 1492 Immediate(isolate()->factory()->one_pointer_filler_map()));
1493 add(result, Immediate(kDoubleSize / 2)); 1493 add(result, Immediate(kDoubleSize / 2));
(...skipping 15 matching lines...) Expand all
1509 1509
1510 // Tag result if requested. 1510 // Tag result if requested.
1511 bool tag_result = (flags & TAG_OBJECT) != 0; 1511 bool tag_result = (flags & TAG_OBJECT) != 0;
1512 if (top_reg.is(result)) { 1512 if (top_reg.is(result)) {
1513 if (tag_result) { 1513 if (tag_result) {
1514 sub(result, Immediate(object_size - kHeapObjectTag)); 1514 sub(result, Immediate(object_size - kHeapObjectTag));
1515 } else { 1515 } else {
1516 sub(result, Immediate(object_size)); 1516 sub(result, Immediate(object_size));
1517 } 1517 }
1518 } else if (tag_result) { 1518 } else if (tag_result) {
1519 ASSERT(kHeapObjectTag == 1); 1519 DCHECK(kHeapObjectTag == 1);
1520 inc(result); 1520 inc(result);
1521 } 1521 }
1522 } 1522 }
1523 1523
1524 1524
1525 void MacroAssembler::Allocate(int header_size, 1525 void MacroAssembler::Allocate(int header_size,
1526 ScaleFactor element_size, 1526 ScaleFactor element_size,
1527 Register element_count, 1527 Register element_count,
1528 RegisterValueType element_count_type, 1528 RegisterValueType element_count_type,
1529 Register result, 1529 Register result,
1530 Register result_end, 1530 Register result_end,
1531 Register scratch, 1531 Register scratch,
1532 Label* gc_required, 1532 Label* gc_required,
1533 AllocationFlags flags) { 1533 AllocationFlags flags) {
1534 ASSERT((flags & SIZE_IN_WORDS) == 0); 1534 DCHECK((flags & SIZE_IN_WORDS) == 0);
1535 if (!FLAG_inline_new) { 1535 if (!FLAG_inline_new) {
1536 if (emit_debug_code()) { 1536 if (emit_debug_code()) {
1537 // Trash the registers to simulate an allocation failure. 1537 // Trash the registers to simulate an allocation failure.
1538 mov(result, Immediate(0x7091)); 1538 mov(result, Immediate(0x7091));
1539 mov(result_end, Immediate(0x7191)); 1539 mov(result_end, Immediate(0x7191));
1540 if (scratch.is_valid()) { 1540 if (scratch.is_valid()) {
1541 mov(scratch, Immediate(0x7291)); 1541 mov(scratch, Immediate(0x7291));
1542 } 1542 }
1543 // Register element_count is not modified by the function. 1543 // Register element_count is not modified by the function.
1544 } 1544 }
1545 jmp(gc_required); 1545 jmp(gc_required);
1546 return; 1546 return;
1547 } 1547 }
1548 ASSERT(!result.is(result_end)); 1548 DCHECK(!result.is(result_end));
1549 1549
1550 // Load address of new object into result. 1550 // Load address of new object into result.
1551 LoadAllocationTopHelper(result, scratch, flags); 1551 LoadAllocationTopHelper(result, scratch, flags);
1552 1552
1553 ExternalReference allocation_limit = 1553 ExternalReference allocation_limit =
1554 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 1554 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1555 1555
1556 // Align the next allocation. Storing the filler map without checking top is 1556 // Align the next allocation. Storing the filler map without checking top is
1557 // safe in new-space because the limit of the heap is aligned there. 1557 // safe in new-space because the limit of the heap is aligned there.
1558 if ((flags & DOUBLE_ALIGNMENT) != 0) { 1558 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1559 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); 1559 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1560 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); 1560 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1561 Label aligned; 1561 Label aligned;
1562 test(result, Immediate(kDoubleAlignmentMask)); 1562 test(result, Immediate(kDoubleAlignmentMask));
1563 j(zero, &aligned, Label::kNear); 1563 j(zero, &aligned, Label::kNear);
1564 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { 1564 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1565 cmp(result, Operand::StaticVariable(allocation_limit)); 1565 cmp(result, Operand::StaticVariable(allocation_limit));
1566 j(above_equal, gc_required); 1566 j(above_equal, gc_required);
1567 } 1567 }
1568 mov(Operand(result, 0), 1568 mov(Operand(result, 0),
1569 Immediate(isolate()->factory()->one_pointer_filler_map())); 1569 Immediate(isolate()->factory()->one_pointer_filler_map()));
1570 add(result, Immediate(kDoubleSize / 2)); 1570 add(result, Immediate(kDoubleSize / 2));
1571 bind(&aligned); 1571 bind(&aligned);
1572 } 1572 }
1573 1573
1574 // Calculate new top and bail out if space is exhausted. 1574 // Calculate new top and bail out if space is exhausted.
1575 // We assume that element_count*element_size + header_size does not 1575 // We assume that element_count*element_size + header_size does not
1576 // overflow. 1576 // overflow.
1577 if (element_count_type == REGISTER_VALUE_IS_SMI) { 1577 if (element_count_type == REGISTER_VALUE_IS_SMI) {
1578 STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1); 1578 STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
1579 STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2); 1579 STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
1580 STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4); 1580 STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
1581 ASSERT(element_size >= times_2); 1581 DCHECK(element_size >= times_2);
1582 ASSERT(kSmiTagSize == 1); 1582 DCHECK(kSmiTagSize == 1);
1583 element_size = static_cast<ScaleFactor>(element_size - 1); 1583 element_size = static_cast<ScaleFactor>(element_size - 1);
1584 } else { 1584 } else {
1585 ASSERT(element_count_type == REGISTER_VALUE_IS_INT32); 1585 DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
1586 } 1586 }
1587 lea(result_end, Operand(element_count, element_size, header_size)); 1587 lea(result_end, Operand(element_count, element_size, header_size));
1588 add(result_end, result); 1588 add(result_end, result);
1589 j(carry, gc_required); 1589 j(carry, gc_required);
1590 cmp(result_end, Operand::StaticVariable(allocation_limit)); 1590 cmp(result_end, Operand::StaticVariable(allocation_limit));
1591 j(above, gc_required); 1591 j(above, gc_required);
1592 1592
1593 if ((flags & TAG_OBJECT) != 0) { 1593 if ((flags & TAG_OBJECT) != 0) {
1594 ASSERT(kHeapObjectTag == 1); 1594 DCHECK(kHeapObjectTag == 1);
1595 inc(result); 1595 inc(result);
1596 } 1596 }
1597 1597
1598 // Update allocation top. 1598 // Update allocation top.
1599 UpdateAllocationTopHelper(result_end, scratch, flags); 1599 UpdateAllocationTopHelper(result_end, scratch, flags);
1600 } 1600 }
1601 1601
1602 1602
1603 void MacroAssembler::Allocate(Register object_size, 1603 void MacroAssembler::Allocate(Register object_size,
1604 Register result, 1604 Register result,
1605 Register result_end, 1605 Register result_end,
1606 Register scratch, 1606 Register scratch,
1607 Label* gc_required, 1607 Label* gc_required,
1608 AllocationFlags flags) { 1608 AllocationFlags flags) {
1609 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); 1609 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1610 if (!FLAG_inline_new) { 1610 if (!FLAG_inline_new) {
1611 if (emit_debug_code()) { 1611 if (emit_debug_code()) {
1612 // Trash the registers to simulate an allocation failure. 1612 // Trash the registers to simulate an allocation failure.
1613 mov(result, Immediate(0x7091)); 1613 mov(result, Immediate(0x7091));
1614 mov(result_end, Immediate(0x7191)); 1614 mov(result_end, Immediate(0x7191));
1615 if (scratch.is_valid()) { 1615 if (scratch.is_valid()) {
1616 mov(scratch, Immediate(0x7291)); 1616 mov(scratch, Immediate(0x7291));
1617 } 1617 }
1618 // object_size is left unchanged by this function. 1618 // object_size is left unchanged by this function.
1619 } 1619 }
1620 jmp(gc_required); 1620 jmp(gc_required);
1621 return; 1621 return;
1622 } 1622 }
1623 ASSERT(!result.is(result_end)); 1623 DCHECK(!result.is(result_end));
1624 1624
1625 // Load address of new object into result. 1625 // Load address of new object into result.
1626 LoadAllocationTopHelper(result, scratch, flags); 1626 LoadAllocationTopHelper(result, scratch, flags);
1627 1627
1628 ExternalReference allocation_limit = 1628 ExternalReference allocation_limit =
1629 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 1629 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1630 1630
1631 // Align the next allocation. Storing the filler map without checking top is 1631 // Align the next allocation. Storing the filler map without checking top is
1632 // safe in new-space because the limit of the heap is aligned there. 1632 // safe in new-space because the limit of the heap is aligned there.
1633 if ((flags & DOUBLE_ALIGNMENT) != 0) { 1633 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1634 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); 1634 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1635 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); 1635 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1636 Label aligned; 1636 Label aligned;
1637 test(result, Immediate(kDoubleAlignmentMask)); 1637 test(result, Immediate(kDoubleAlignmentMask));
1638 j(zero, &aligned, Label::kNear); 1638 j(zero, &aligned, Label::kNear);
1639 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { 1639 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1640 cmp(result, Operand::StaticVariable(allocation_limit)); 1640 cmp(result, Operand::StaticVariable(allocation_limit));
1641 j(above_equal, gc_required); 1641 j(above_equal, gc_required);
1642 } 1642 }
1643 mov(Operand(result, 0), 1643 mov(Operand(result, 0),
1644 Immediate(isolate()->factory()->one_pointer_filler_map())); 1644 Immediate(isolate()->factory()->one_pointer_filler_map()));
1645 add(result, Immediate(kDoubleSize / 2)); 1645 add(result, Immediate(kDoubleSize / 2));
1646 bind(&aligned); 1646 bind(&aligned);
1647 } 1647 }
1648 1648
1649 // Calculate new top and bail out if space is exhausted. 1649 // Calculate new top and bail out if space is exhausted.
1650 if (!object_size.is(result_end)) { 1650 if (!object_size.is(result_end)) {
1651 mov(result_end, object_size); 1651 mov(result_end, object_size);
1652 } 1652 }
1653 add(result_end, result); 1653 add(result_end, result);
1654 j(carry, gc_required); 1654 j(carry, gc_required);
1655 cmp(result_end, Operand::StaticVariable(allocation_limit)); 1655 cmp(result_end, Operand::StaticVariable(allocation_limit));
1656 j(above, gc_required); 1656 j(above, gc_required);
1657 1657
1658 // Tag result if requested. 1658 // Tag result if requested.
1659 if ((flags & TAG_OBJECT) != 0) { 1659 if ((flags & TAG_OBJECT) != 0) {
1660 ASSERT(kHeapObjectTag == 1); 1660 DCHECK(kHeapObjectTag == 1);
1661 inc(result); 1661 inc(result);
1662 } 1662 }
1663 1663
1664 // Update allocation top. 1664 // Update allocation top.
1665 UpdateAllocationTopHelper(result_end, scratch, flags); 1665 UpdateAllocationTopHelper(result_end, scratch, flags);
1666 } 1666 }
1667 1667
1668 1668
1669 void MacroAssembler::UndoAllocationInNewSpace(Register object) { 1669 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
1670 ExternalReference new_space_allocation_top = 1670 ExternalReference new_space_allocation_top =
(...skipping 28 matching lines...) Expand all
1699 1699
1700 1700
1701 void MacroAssembler::AllocateTwoByteString(Register result, 1701 void MacroAssembler::AllocateTwoByteString(Register result,
1702 Register length, 1702 Register length,
1703 Register scratch1, 1703 Register scratch1,
1704 Register scratch2, 1704 Register scratch2,
1705 Register scratch3, 1705 Register scratch3,
1706 Label* gc_required) { 1706 Label* gc_required) {
1707 // Calculate the number of bytes needed for the characters in the string while 1707 // Calculate the number of bytes needed for the characters in the string while
1708 // observing object alignment. 1708 // observing object alignment.
1709 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 1709 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1710 ASSERT(kShortSize == 2); 1710 DCHECK(kShortSize == 2);
1711 // scratch1 = length * 2 + kObjectAlignmentMask. 1711 // scratch1 = length * 2 + kObjectAlignmentMask.
1712 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); 1712 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
1713 and_(scratch1, Immediate(~kObjectAlignmentMask)); 1713 and_(scratch1, Immediate(~kObjectAlignmentMask));
1714 1714
1715 // Allocate two byte string in new space. 1715 // Allocate two byte string in new space.
1716 Allocate(SeqTwoByteString::kHeaderSize, 1716 Allocate(SeqTwoByteString::kHeaderSize,
1717 times_1, 1717 times_1,
1718 scratch1, 1718 scratch1,
1719 REGISTER_VALUE_IS_INT32, 1719 REGISTER_VALUE_IS_INT32,
1720 result, 1720 result,
(...skipping 14 matching lines...) Expand all
1735 1735
1736 1736
1737 void MacroAssembler::AllocateAsciiString(Register result, 1737 void MacroAssembler::AllocateAsciiString(Register result,
1738 Register length, 1738 Register length,
1739 Register scratch1, 1739 Register scratch1,
1740 Register scratch2, 1740 Register scratch2,
1741 Register scratch3, 1741 Register scratch3,
1742 Label* gc_required) { 1742 Label* gc_required) {
1743 // Calculate the number of bytes needed for the characters in the string while 1743 // Calculate the number of bytes needed for the characters in the string while
1744 // observing object alignment. 1744 // observing object alignment.
1745 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 1745 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1746 mov(scratch1, length); 1746 mov(scratch1, length);
1747 ASSERT(kCharSize == 1); 1747 DCHECK(kCharSize == 1);
1748 add(scratch1, Immediate(kObjectAlignmentMask)); 1748 add(scratch1, Immediate(kObjectAlignmentMask));
1749 and_(scratch1, Immediate(~kObjectAlignmentMask)); 1749 and_(scratch1, Immediate(~kObjectAlignmentMask));
1750 1750
1751 // Allocate ASCII string in new space. 1751 // Allocate ASCII string in new space.
1752 Allocate(SeqOneByteString::kHeaderSize, 1752 Allocate(SeqOneByteString::kHeaderSize,
1753 times_1, 1753 times_1,
1754 scratch1, 1754 scratch1,
1755 REGISTER_VALUE_IS_INT32, 1755 REGISTER_VALUE_IS_INT32,
1756 result, 1756 result,
1757 scratch2, 1757 scratch2,
(...skipping 10 matching lines...) Expand all
1768 mov(FieldOperand(result, String::kHashFieldOffset), 1768 mov(FieldOperand(result, String::kHashFieldOffset),
1769 Immediate(String::kEmptyHashField)); 1769 Immediate(String::kEmptyHashField));
1770 } 1770 }
1771 1771
1772 1772
1773 void MacroAssembler::AllocateAsciiString(Register result, 1773 void MacroAssembler::AllocateAsciiString(Register result,
1774 int length, 1774 int length,
1775 Register scratch1, 1775 Register scratch1,
1776 Register scratch2, 1776 Register scratch2,
1777 Label* gc_required) { 1777 Label* gc_required) {
1778 ASSERT(length > 0); 1778 DCHECK(length > 0);
1779 1779
1780 // Allocate ASCII string in new space. 1780 // Allocate ASCII string in new space.
1781 Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2, 1781 Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
1782 gc_required, TAG_OBJECT); 1782 gc_required, TAG_OBJECT);
1783 1783
1784 // Set the map, length and hash field. 1784 // Set the map, length and hash field.
1785 mov(FieldOperand(result, HeapObject::kMapOffset), 1785 mov(FieldOperand(result, HeapObject::kMapOffset),
1786 Immediate(isolate()->factory()->ascii_string_map())); 1786 Immediate(isolate()->factory()->ascii_string_map()));
1787 mov(FieldOperand(result, String::kLengthOffset), 1787 mov(FieldOperand(result, String::kLengthOffset),
1788 Immediate(Smi::FromInt(length))); 1788 Immediate(Smi::FromInt(length)));
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
1856 // Many variants of movsb, loop unrolling, word moves, and indexed operands 1856 // Many variants of movsb, loop unrolling, word moves, and indexed operands
1857 // have been tried here already, and this is fastest. 1857 // have been tried here already, and this is fastest.
1858 // A simpler loop is faster on small copies, but 30% slower on large ones. 1858 // A simpler loop is faster on small copies, but 30% slower on large ones.
1859 // The cld() instruction must have been emitted, to set the direction flag(), 1859 // The cld() instruction must have been emitted, to set the direction flag(),
1860 // before calling this function. 1860 // before calling this function.
1861 void MacroAssembler::CopyBytes(Register source, 1861 void MacroAssembler::CopyBytes(Register source,
1862 Register destination, 1862 Register destination,
1863 Register length, 1863 Register length,
1864 Register scratch) { 1864 Register scratch) {
1865 Label short_loop, len4, len8, len12, done, short_string; 1865 Label short_loop, len4, len8, len12, done, short_string;
1866 ASSERT(source.is(esi)); 1866 DCHECK(source.is(esi));
1867 ASSERT(destination.is(edi)); 1867 DCHECK(destination.is(edi));
1868 ASSERT(length.is(ecx)); 1868 DCHECK(length.is(ecx));
1869 cmp(length, Immediate(4)); 1869 cmp(length, Immediate(4));
1870 j(below, &short_string, Label::kNear); 1870 j(below, &short_string, Label::kNear);
1871 1871
1872 // Because source is 4-byte aligned in our uses of this function, 1872 // Because source is 4-byte aligned in our uses of this function,
1873 // we keep source aligned for the rep_movs call by copying the odd bytes 1873 // we keep source aligned for the rep_movs call by copying the odd bytes
1874 // at the end of the ranges. 1874 // at the end of the ranges.
1875 mov(scratch, Operand(source, length, times_1, -4)); 1875 mov(scratch, Operand(source, length, times_1, -4));
1876 mov(Operand(destination, length, times_1, -4), scratch); 1876 mov(Operand(destination, length, times_1, -4), scratch);
1877 1877
1878 cmp(length, Immediate(8)); 1878 cmp(length, Immediate(8));
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
1928 bind(&entry); 1928 bind(&entry);
1929 cmp(start_offset, end_offset); 1929 cmp(start_offset, end_offset);
1930 j(less, &loop); 1930 j(less, &loop);
1931 } 1931 }
1932 1932
1933 1933
1934 void MacroAssembler::BooleanBitTest(Register object, 1934 void MacroAssembler::BooleanBitTest(Register object,
1935 int field_offset, 1935 int field_offset,
1936 int bit_index) { 1936 int bit_index) {
1937 bit_index += kSmiTagSize + kSmiShiftSize; 1937 bit_index += kSmiTagSize + kSmiShiftSize;
1938 ASSERT(IsPowerOf2(kBitsPerByte)); 1938 DCHECK(IsPowerOf2(kBitsPerByte));
1939 int byte_index = bit_index / kBitsPerByte; 1939 int byte_index = bit_index / kBitsPerByte;
1940 int byte_bit_index = bit_index & (kBitsPerByte - 1); 1940 int byte_bit_index = bit_index & (kBitsPerByte - 1);
1941 test_b(FieldOperand(object, field_offset + byte_index), 1941 test_b(FieldOperand(object, field_offset + byte_index),
1942 static_cast<byte>(1 << byte_bit_index)); 1942 static_cast<byte>(1 << byte_bit_index));
1943 } 1943 }
1944 1944
1945 1945
1946 1946
1947 void MacroAssembler::NegativeZeroTest(Register result, 1947 void MacroAssembler::NegativeZeroTest(Register result,
1948 Register op, 1948 Register op,
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
2024 bind(&non_instance); 2024 bind(&non_instance);
2025 mov(result, FieldOperand(result, Map::kConstructorOffset)); 2025 mov(result, FieldOperand(result, Map::kConstructorOffset));
2026 } 2026 }
2027 2027
2028 // All done. 2028 // All done.
2029 bind(&done); 2029 bind(&done);
2030 } 2030 }
2031 2031
2032 2032
2033 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { 2033 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
2034 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. 2034 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
2035 call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); 2035 call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
2036 } 2036 }
2037 2037
2038 2038
2039 void MacroAssembler::TailCallStub(CodeStub* stub) { 2039 void MacroAssembler::TailCallStub(CodeStub* stub) {
2040 jmp(stub->GetCode(), RelocInfo::CODE_TARGET); 2040 jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
2041 } 2041 }
2042 2042
2043 2043
2044 void MacroAssembler::StubReturn(int argc) { 2044 void MacroAssembler::StubReturn(int argc) {
2045 ASSERT(argc >= 1 && generating_stub()); 2045 DCHECK(argc >= 1 && generating_stub());
2046 ret((argc - 1) * kPointerSize); 2046 ret((argc - 1) * kPointerSize);
2047 } 2047 }
2048 2048
2049 2049
2050 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { 2050 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2051 return has_frame_ || !stub->SometimesSetsUpAFrame(); 2051 return has_frame_ || !stub->SometimesSetsUpAFrame();
2052 } 2052 }
2053 2053
2054 2054
2055 void MacroAssembler::IndexFromHash(Register hash, Register index) { 2055 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2056 // The assert checks that the constants for the maximum number of digits 2056 // The assert checks that the constants for the maximum number of digits
2057 // for an array index cached in the hash field and the number of bits 2057 // for an array index cached in the hash field and the number of bits
2058 // reserved for it does not conflict. 2058 // reserved for it does not conflict.
2059 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < 2059 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2060 (1 << String::kArrayIndexValueBits)); 2060 (1 << String::kArrayIndexValueBits));
2061 if (!index.is(hash)) { 2061 if (!index.is(hash)) {
2062 mov(index, hash); 2062 mov(index, hash);
2063 } 2063 }
2064 DecodeFieldToSmi<String::ArrayIndexValueBits>(index); 2064 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
2065 } 2065 }
2066 2066
2067 2067
2068 void MacroAssembler::CallRuntime(const Runtime::Function* f, 2068 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2069 int num_arguments, 2069 int num_arguments,
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
2135 int stack_space, 2135 int stack_space,
2136 Operand return_value_operand, 2136 Operand return_value_operand,
2137 Operand* context_restore_operand) { 2137 Operand* context_restore_operand) {
2138 ExternalReference next_address = 2138 ExternalReference next_address =
2139 ExternalReference::handle_scope_next_address(isolate()); 2139 ExternalReference::handle_scope_next_address(isolate());
2140 ExternalReference limit_address = 2140 ExternalReference limit_address =
2141 ExternalReference::handle_scope_limit_address(isolate()); 2141 ExternalReference::handle_scope_limit_address(isolate());
2142 ExternalReference level_address = 2142 ExternalReference level_address =
2143 ExternalReference::handle_scope_level_address(isolate()); 2143 ExternalReference::handle_scope_level_address(isolate());
2144 2144
2145 ASSERT(edx.is(function_address)); 2145 DCHECK(edx.is(function_address));
2146 // Allocate HandleScope in callee-save registers. 2146 // Allocate HandleScope in callee-save registers.
2147 mov(ebx, Operand::StaticVariable(next_address)); 2147 mov(ebx, Operand::StaticVariable(next_address));
2148 mov(edi, Operand::StaticVariable(limit_address)); 2148 mov(edi, Operand::StaticVariable(limit_address));
2149 add(Operand::StaticVariable(level_address), Immediate(1)); 2149 add(Operand::StaticVariable(level_address), Immediate(1));
2150 2150
2151 if (FLAG_log_timer_events) { 2151 if (FLAG_log_timer_events) {
2152 FrameScope frame(this, StackFrame::MANUAL); 2152 FrameScope frame(this, StackFrame::MANUAL);
2153 PushSafepointRegisters(); 2153 PushSafepointRegisters();
2154 PrepareCallCFunction(1, eax); 2154 PrepareCallCFunction(1, eax);
2155 mov(Operand(esp, 0), 2155 mov(Operand(esp, 0),
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
2292 const Operand& code_operand, 2292 const Operand& code_operand,
2293 Label* done, 2293 Label* done,
2294 bool* definitely_mismatches, 2294 bool* definitely_mismatches,
2295 InvokeFlag flag, 2295 InvokeFlag flag,
2296 Label::Distance done_near, 2296 Label::Distance done_near,
2297 const CallWrapper& call_wrapper) { 2297 const CallWrapper& call_wrapper) {
2298 bool definitely_matches = false; 2298 bool definitely_matches = false;
2299 *definitely_mismatches = false; 2299 *definitely_mismatches = false;
2300 Label invoke; 2300 Label invoke;
2301 if (expected.is_immediate()) { 2301 if (expected.is_immediate()) {
2302 ASSERT(actual.is_immediate()); 2302 DCHECK(actual.is_immediate());
2303 if (expected.immediate() == actual.immediate()) { 2303 if (expected.immediate() == actual.immediate()) {
2304 definitely_matches = true; 2304 definitely_matches = true;
2305 } else { 2305 } else {
2306 mov(eax, actual.immediate()); 2306 mov(eax, actual.immediate());
2307 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; 2307 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2308 if (expected.immediate() == sentinel) { 2308 if (expected.immediate() == sentinel) {
2309 // Don't worry about adapting arguments for builtins that 2309 // Don't worry about adapting arguments for builtins that
2310 // don't want that done. Skip adaption code by making it look 2310 // don't want that done. Skip adaption code by making it look
2311 // like we have a match between expected and actual number of 2311 // like we have a match between expected and actual number of
2312 // arguments. 2312 // arguments.
2313 definitely_matches = true; 2313 definitely_matches = true;
2314 } else { 2314 } else {
2315 *definitely_mismatches = true; 2315 *definitely_mismatches = true;
2316 mov(ebx, expected.immediate()); 2316 mov(ebx, expected.immediate());
2317 } 2317 }
2318 } 2318 }
2319 } else { 2319 } else {
2320 if (actual.is_immediate()) { 2320 if (actual.is_immediate()) {
2321 // Expected is in register, actual is immediate. This is the 2321 // Expected is in register, actual is immediate. This is the
2322 // case when we invoke function values without going through the 2322 // case when we invoke function values without going through the
2323 // IC mechanism. 2323 // IC mechanism.
2324 cmp(expected.reg(), actual.immediate()); 2324 cmp(expected.reg(), actual.immediate());
2325 j(equal, &invoke); 2325 j(equal, &invoke);
2326 ASSERT(expected.reg().is(ebx)); 2326 DCHECK(expected.reg().is(ebx));
2327 mov(eax, actual.immediate()); 2327 mov(eax, actual.immediate());
2328 } else if (!expected.reg().is(actual.reg())) { 2328 } else if (!expected.reg().is(actual.reg())) {
2329 // Both expected and actual are in (different) registers. This 2329 // Both expected and actual are in (different) registers. This
2330 // is the case when we invoke functions using call and apply. 2330 // is the case when we invoke functions using call and apply.
2331 cmp(expected.reg(), actual.reg()); 2331 cmp(expected.reg(), actual.reg());
2332 j(equal, &invoke); 2332 j(equal, &invoke);
2333 ASSERT(actual.reg().is(eax)); 2333 DCHECK(actual.reg().is(eax));
2334 ASSERT(expected.reg().is(ebx)); 2334 DCHECK(expected.reg().is(ebx));
2335 } 2335 }
2336 } 2336 }
2337 2337
2338 if (!definitely_matches) { 2338 if (!definitely_matches) {
2339 Handle<Code> adaptor = 2339 Handle<Code> adaptor =
2340 isolate()->builtins()->ArgumentsAdaptorTrampoline(); 2340 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2341 if (!code_constant.is_null()) { 2341 if (!code_constant.is_null()) {
2342 mov(edx, Immediate(code_constant)); 2342 mov(edx, Immediate(code_constant));
2343 add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); 2343 add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2344 } else if (!code_operand.is_reg(edx)) { 2344 } else if (!code_operand.is_reg(edx)) {
(...skipping 14 matching lines...) Expand all
2359 } 2359 }
2360 } 2360 }
2361 2361
2362 2362
2363 void MacroAssembler::InvokeCode(const Operand& code, 2363 void MacroAssembler::InvokeCode(const Operand& code,
2364 const ParameterCount& expected, 2364 const ParameterCount& expected,
2365 const ParameterCount& actual, 2365 const ParameterCount& actual,
2366 InvokeFlag flag, 2366 InvokeFlag flag,
2367 const CallWrapper& call_wrapper) { 2367 const CallWrapper& call_wrapper) {
2368 // You can't call a function without a valid frame. 2368 // You can't call a function without a valid frame.
2369 ASSERT(flag == JUMP_FUNCTION || has_frame()); 2369 DCHECK(flag == JUMP_FUNCTION || has_frame());
2370 2370
2371 Label done; 2371 Label done;
2372 bool definitely_mismatches = false; 2372 bool definitely_mismatches = false;
2373 InvokePrologue(expected, actual, Handle<Code>::null(), code, 2373 InvokePrologue(expected, actual, Handle<Code>::null(), code,
2374 &done, &definitely_mismatches, flag, Label::kNear, 2374 &done, &definitely_mismatches, flag, Label::kNear,
2375 call_wrapper); 2375 call_wrapper);
2376 if (!definitely_mismatches) { 2376 if (!definitely_mismatches) {
2377 if (flag == CALL_FUNCTION) { 2377 if (flag == CALL_FUNCTION) {
2378 call_wrapper.BeforeCall(CallSize(code)); 2378 call_wrapper.BeforeCall(CallSize(code));
2379 call(code); 2379 call(code);
2380 call_wrapper.AfterCall(); 2380 call_wrapper.AfterCall();
2381 } else { 2381 } else {
2382 ASSERT(flag == JUMP_FUNCTION); 2382 DCHECK(flag == JUMP_FUNCTION);
2383 jmp(code); 2383 jmp(code);
2384 } 2384 }
2385 bind(&done); 2385 bind(&done);
2386 } 2386 }
2387 } 2387 }
2388 2388
2389 2389
2390 void MacroAssembler::InvokeFunction(Register fun, 2390 void MacroAssembler::InvokeFunction(Register fun,
2391 const ParameterCount& actual, 2391 const ParameterCount& actual,
2392 InvokeFlag flag, 2392 InvokeFlag flag,
2393 const CallWrapper& call_wrapper) { 2393 const CallWrapper& call_wrapper) {
2394 // You can't call a function without a valid frame. 2394 // You can't call a function without a valid frame.
2395 ASSERT(flag == JUMP_FUNCTION || has_frame()); 2395 DCHECK(flag == JUMP_FUNCTION || has_frame());
2396 2396
2397 ASSERT(fun.is(edi)); 2397 DCHECK(fun.is(edi));
2398 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); 2398 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2399 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); 2399 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2400 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); 2400 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
2401 SmiUntag(ebx); 2401 SmiUntag(ebx);
2402 2402
2403 ParameterCount expected(ebx); 2403 ParameterCount expected(ebx);
2404 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), 2404 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2405 expected, actual, flag, call_wrapper); 2405 expected, actual, flag, call_wrapper);
2406 } 2406 }
2407 2407
2408 2408
2409 void MacroAssembler::InvokeFunction(Register fun, 2409 void MacroAssembler::InvokeFunction(Register fun,
2410 const ParameterCount& expected, 2410 const ParameterCount& expected,
2411 const ParameterCount& actual, 2411 const ParameterCount& actual,
2412 InvokeFlag flag, 2412 InvokeFlag flag,
2413 const CallWrapper& call_wrapper) { 2413 const CallWrapper& call_wrapper) {
2414 // You can't call a function without a valid frame. 2414 // You can't call a function without a valid frame.
2415 ASSERT(flag == JUMP_FUNCTION || has_frame()); 2415 DCHECK(flag == JUMP_FUNCTION || has_frame());
2416 2416
2417 ASSERT(fun.is(edi)); 2417 DCHECK(fun.is(edi));
2418 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); 2418 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2419 2419
2420 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), 2420 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2421 expected, actual, flag, call_wrapper); 2421 expected, actual, flag, call_wrapper);
2422 } 2422 }
2423 2423
2424 2424
2425 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, 2425 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2426 const ParameterCount& expected, 2426 const ParameterCount& expected,
2427 const ParameterCount& actual, 2427 const ParameterCount& actual,
2428 InvokeFlag flag, 2428 InvokeFlag flag,
2429 const CallWrapper& call_wrapper) { 2429 const CallWrapper& call_wrapper) {
2430 LoadHeapObject(edi, function); 2430 LoadHeapObject(edi, function);
2431 InvokeFunction(edi, expected, actual, flag, call_wrapper); 2431 InvokeFunction(edi, expected, actual, flag, call_wrapper);
2432 } 2432 }
2433 2433
2434 2434
2435 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 2435 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2436 InvokeFlag flag, 2436 InvokeFlag flag,
2437 const CallWrapper& call_wrapper) { 2437 const CallWrapper& call_wrapper) {
2438 // You can't call a builtin without a valid frame. 2438 // You can't call a builtin without a valid frame.
2439 ASSERT(flag == JUMP_FUNCTION || has_frame()); 2439 DCHECK(flag == JUMP_FUNCTION || has_frame());
2440 2440
2441 // Rely on the assertion to check that the number of provided 2441 // Rely on the assertion to check that the number of provided
2442 // arguments match the expected number of arguments. Fake a 2442 // arguments match the expected number of arguments. Fake a
2443 // parameter count to avoid emitting code to do the check. 2443 // parameter count to avoid emitting code to do the check.
2444 ParameterCount expected(0); 2444 ParameterCount expected(0);
2445 GetBuiltinFunction(edi, id); 2445 GetBuiltinFunction(edi, id);
2446 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), 2446 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2447 expected, expected, flag, call_wrapper); 2447 expected, expected, flag, call_wrapper);
2448 } 2448 }
2449 2449
2450 2450
2451 void MacroAssembler::GetBuiltinFunction(Register target, 2451 void MacroAssembler::GetBuiltinFunction(Register target,
2452 Builtins::JavaScript id) { 2452 Builtins::JavaScript id) {
2453 // Load the JavaScript builtin function from the builtins object. 2453 // Load the JavaScript builtin function from the builtins object.
2454 mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 2454 mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2455 mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset)); 2455 mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
2456 mov(target, FieldOperand(target, 2456 mov(target, FieldOperand(target,
2457 JSBuiltinsObject::OffsetOfFunctionWithId(id))); 2457 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2458 } 2458 }
2459 2459
2460 2460
2461 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 2461 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2462 ASSERT(!target.is(edi)); 2462 DCHECK(!target.is(edi));
2463 // Load the JavaScript builtin function from the builtins object. 2463 // Load the JavaScript builtin function from the builtins object.
2464 GetBuiltinFunction(edi, id); 2464 GetBuiltinFunction(edi, id);
2465 // Load the code entry point from the function into the target register. 2465 // Load the code entry point from the function into the target register.
2466 mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset)); 2466 mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
2467 } 2467 }
2468 2468
2469 2469
2470 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { 2470 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2471 if (context_chain_length > 0) { 2471 if (context_chain_length > 0) {
2472 // Move up the chain of contexts to the context containing the slot. 2472 // Move up the chain of contexts to the context containing the slot.
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
2565 2565
2566 Operand MacroAssembler::SafepointRegisterSlot(Register reg) { 2566 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2567 return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); 2567 return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2568 } 2568 }
2569 2569
2570 2570
2571 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { 2571 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2572 // The registers are pushed starting with the lowest encoding, 2572 // The registers are pushed starting with the lowest encoding,
2573 // which means that lowest encodings are furthest away from 2573 // which means that lowest encodings are furthest away from
2574 // the stack pointer. 2574 // the stack pointer.
2575 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); 2575 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2576 return kNumSafepointRegisters - reg_code - 1; 2576 return kNumSafepointRegisters - reg_code - 1;
2577 } 2577 }
2578 2578
2579 2579
2580 void MacroAssembler::LoadHeapObject(Register result, 2580 void MacroAssembler::LoadHeapObject(Register result,
2581 Handle<HeapObject> object) { 2581 Handle<HeapObject> object) {
2582 AllowDeferredHandleDereference embedding_raw_address; 2582 AllowDeferredHandleDereference embedding_raw_address;
2583 if (isolate()->heap()->InNewSpace(*object)) { 2583 if (isolate()->heap()->InNewSpace(*object)) {
2584 Handle<Cell> cell = isolate()->factory()->NewCell(object); 2584 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2585 mov(result, Operand::ForCell(cell)); 2585 mov(result, Operand::ForCell(cell));
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
2673 2673
2674 2674
2675 void MacroAssembler::SetCounter(StatsCounter* counter, int value) { 2675 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2676 if (FLAG_native_code_counters && counter->Enabled()) { 2676 if (FLAG_native_code_counters && counter->Enabled()) {
2677 mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value)); 2677 mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
2678 } 2678 }
2679 } 2679 }
2680 2680
2681 2681
2682 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { 2682 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2683 ASSERT(value > 0); 2683 DCHECK(value > 0);
2684 if (FLAG_native_code_counters && counter->Enabled()) { 2684 if (FLAG_native_code_counters && counter->Enabled()) {
2685 Operand operand = Operand::StaticVariable(ExternalReference(counter)); 2685 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2686 if (value == 1) { 2686 if (value == 1) {
2687 inc(operand); 2687 inc(operand);
2688 } else { 2688 } else {
2689 add(operand, Immediate(value)); 2689 add(operand, Immediate(value));
2690 } 2690 }
2691 } 2691 }
2692 } 2692 }
2693 2693
2694 2694
2695 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { 2695 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2696 ASSERT(value > 0); 2696 DCHECK(value > 0);
2697 if (FLAG_native_code_counters && counter->Enabled()) { 2697 if (FLAG_native_code_counters && counter->Enabled()) {
2698 Operand operand = Operand::StaticVariable(ExternalReference(counter)); 2698 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2699 if (value == 1) { 2699 if (value == 1) {
2700 dec(operand); 2700 dec(operand);
2701 } else { 2701 } else {
2702 sub(operand, Immediate(value)); 2702 sub(operand, Immediate(value));
2703 } 2703 }
2704 } 2704 }
2705 } 2705 }
2706 2706
2707 2707
2708 void MacroAssembler::IncrementCounter(Condition cc, 2708 void MacroAssembler::IncrementCounter(Condition cc,
2709 StatsCounter* counter, 2709 StatsCounter* counter,
2710 int value) { 2710 int value) {
2711 ASSERT(value > 0); 2711 DCHECK(value > 0);
2712 if (FLAG_native_code_counters && counter->Enabled()) { 2712 if (FLAG_native_code_counters && counter->Enabled()) {
2713 Label skip; 2713 Label skip;
2714 j(NegateCondition(cc), &skip); 2714 j(NegateCondition(cc), &skip);
2715 pushfd(); 2715 pushfd();
2716 IncrementCounter(counter, value); 2716 IncrementCounter(counter, value);
2717 popfd(); 2717 popfd();
2718 bind(&skip); 2718 bind(&skip);
2719 } 2719 }
2720 } 2720 }
2721 2721
2722 2722
2723 void MacroAssembler::DecrementCounter(Condition cc, 2723 void MacroAssembler::DecrementCounter(Condition cc,
2724 StatsCounter* counter, 2724 StatsCounter* counter,
2725 int value) { 2725 int value) {
2726 ASSERT(value > 0); 2726 DCHECK(value > 0);
2727 if (FLAG_native_code_counters && counter->Enabled()) { 2727 if (FLAG_native_code_counters && counter->Enabled()) {
2728 Label skip; 2728 Label skip;
2729 j(NegateCondition(cc), &skip); 2729 j(NegateCondition(cc), &skip);
2730 pushfd(); 2730 pushfd();
2731 DecrementCounter(counter, value); 2731 DecrementCounter(counter, value);
2732 popfd(); 2732 popfd();
2733 bind(&skip); 2733 bind(&skip);
2734 } 2734 }
2735 } 2735 }
2736 2736
(...skipping 28 matching lines...) Expand all
2765 Abort(reason); 2765 Abort(reason);
2766 // will not return here 2766 // will not return here
2767 bind(&L); 2767 bind(&L);
2768 } 2768 }
2769 2769
2770 2770
2771 void MacroAssembler::CheckStackAlignment() { 2771 void MacroAssembler::CheckStackAlignment() {
2772 int frame_alignment = base::OS::ActivationFrameAlignment(); 2772 int frame_alignment = base::OS::ActivationFrameAlignment();
2773 int frame_alignment_mask = frame_alignment - 1; 2773 int frame_alignment_mask = frame_alignment - 1;
2774 if (frame_alignment > kPointerSize) { 2774 if (frame_alignment > kPointerSize) {
2775 ASSERT(IsPowerOf2(frame_alignment)); 2775 DCHECK(IsPowerOf2(frame_alignment));
2776 Label alignment_as_expected; 2776 Label alignment_as_expected;
2777 test(esp, Immediate(frame_alignment_mask)); 2777 test(esp, Immediate(frame_alignment_mask));
2778 j(zero, &alignment_as_expected); 2778 j(zero, &alignment_as_expected);
2779 // Abort if stack is not aligned. 2779 // Abort if stack is not aligned.
2780 int3(); 2780 int3();
2781 bind(&alignment_as_expected); 2781 bind(&alignment_as_expected);
2782 } 2782 }
2783 } 2783 }
2784 2784
2785 2785
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2820 2820
2821 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { 2821 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
2822 mov(dst, FieldOperand(map, Map::kBitField3Offset)); 2822 mov(dst, FieldOperand(map, Map::kBitField3Offset));
2823 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); 2823 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
2824 } 2824 }
2825 2825
2826 2826
2827 void MacroAssembler::LoadPowerOf2(XMMRegister dst, 2827 void MacroAssembler::LoadPowerOf2(XMMRegister dst,
2828 Register scratch, 2828 Register scratch,
2829 int power) { 2829 int power) {
2830 ASSERT(is_uintn(power + HeapNumber::kExponentBias, 2830 DCHECK(is_uintn(power + HeapNumber::kExponentBias,
2831 HeapNumber::kExponentBits)); 2831 HeapNumber::kExponentBits));
2832 mov(scratch, Immediate(power + HeapNumber::kExponentBias)); 2832 mov(scratch, Immediate(power + HeapNumber::kExponentBias));
2833 movd(dst, scratch); 2833 movd(dst, scratch);
2834 psllq(dst, HeapNumber::kMantissaBits); 2834 psllq(dst, HeapNumber::kMantissaBits);
2835 } 2835 }
2836 2836
2837 2837
2838 void MacroAssembler::LookupNumberStringCache(Register object, 2838 void MacroAssembler::LookupNumberStringCache(Register object,
2839 Register result, 2839 Register result,
2840 Register scratch1, 2840 Register scratch1,
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
2940 mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset)); 2940 mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
2941 movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset)); 2941 movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2942 movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset)); 2942 movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2943 2943
2944 // Check that both are flat ASCII strings. 2944 // Check that both are flat ASCII strings.
2945 const int kFlatAsciiStringMask = 2945 const int kFlatAsciiStringMask =
2946 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; 2946 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2947 const int kFlatAsciiStringTag = 2947 const int kFlatAsciiStringTag =
2948 kStringTag | kOneByteStringTag | kSeqStringTag; 2948 kStringTag | kOneByteStringTag | kSeqStringTag;
2949 // Interleave bits from both instance types and compare them in one check. 2949 // Interleave bits from both instance types and compare them in one check.
2950 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); 2950 DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2951 and_(scratch1, kFlatAsciiStringMask); 2951 and_(scratch1, kFlatAsciiStringMask);
2952 and_(scratch2, kFlatAsciiStringMask); 2952 and_(scratch2, kFlatAsciiStringMask);
2953 lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); 2953 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2954 cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3)); 2954 cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
2955 j(not_equal, failure); 2955 j(not_equal, failure);
2956 } 2956 }
2957 2957
2958 2958
2959 void MacroAssembler::JumpIfNotUniqueName(Operand operand, 2959 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
2960 Label* not_unique_name, 2960 Label* not_unique_name,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3005 } 3005 }
3006 3006
3007 3007
3008 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { 3008 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
3009 int frame_alignment = base::OS::ActivationFrameAlignment(); 3009 int frame_alignment = base::OS::ActivationFrameAlignment();
3010 if (frame_alignment != 0) { 3010 if (frame_alignment != 0) {
3011 // Make stack end at alignment and make room for num_arguments words 3011 // Make stack end at alignment and make room for num_arguments words
3012 // and the original value of esp. 3012 // and the original value of esp.
3013 mov(scratch, esp); 3013 mov(scratch, esp);
3014 sub(esp, Immediate((num_arguments + 1) * kPointerSize)); 3014 sub(esp, Immediate((num_arguments + 1) * kPointerSize));
3015 ASSERT(IsPowerOf2(frame_alignment)); 3015 DCHECK(IsPowerOf2(frame_alignment));
3016 and_(esp, -frame_alignment); 3016 and_(esp, -frame_alignment);
3017 mov(Operand(esp, num_arguments * kPointerSize), scratch); 3017 mov(Operand(esp, num_arguments * kPointerSize), scratch);
3018 } else { 3018 } else {
3019 sub(esp, Immediate(num_arguments * kPointerSize)); 3019 sub(esp, Immediate(num_arguments * kPointerSize));
3020 } 3020 }
3021 } 3021 }
3022 3022
3023 3023
3024 void MacroAssembler::CallCFunction(ExternalReference function, 3024 void MacroAssembler::CallCFunction(ExternalReference function,
3025 int num_arguments) { 3025 int num_arguments) {
3026 // Trashing eax is ok as it will be the return value. 3026 // Trashing eax is ok as it will be the return value.
3027 mov(eax, Immediate(function)); 3027 mov(eax, Immediate(function));
3028 CallCFunction(eax, num_arguments); 3028 CallCFunction(eax, num_arguments);
3029 } 3029 }
3030 3030
3031 3031
3032 void MacroAssembler::CallCFunction(Register function, 3032 void MacroAssembler::CallCFunction(Register function,
3033 int num_arguments) { 3033 int num_arguments) {
3034 ASSERT(has_frame()); 3034 DCHECK(has_frame());
3035 // Check stack alignment. 3035 // Check stack alignment.
3036 if (emit_debug_code()) { 3036 if (emit_debug_code()) {
3037 CheckStackAlignment(); 3037 CheckStackAlignment();
3038 } 3038 }
3039 3039
3040 call(function); 3040 call(function);
3041 if (base::OS::ActivationFrameAlignment() != 0) { 3041 if (base::OS::ActivationFrameAlignment() != 0) {
3042 mov(esp, Operand(esp, num_arguments * kPointerSize)); 3042 mov(esp, Operand(esp, num_arguments * kPointerSize));
3043 } else { 3043 } else {
3044 add(esp, Immediate(num_arguments * kPointerSize)); 3044 add(esp, Immediate(num_arguments * kPointerSize));
(...skipping 30 matching lines...) Expand all
3075 #endif 3075 #endif
3076 3076
3077 3077
3078 CodePatcher::CodePatcher(byte* address, int size) 3078 CodePatcher::CodePatcher(byte* address, int size)
3079 : address_(address), 3079 : address_(address),
3080 size_(size), 3080 size_(size),
3081 masm_(NULL, address, size + Assembler::kGap) { 3081 masm_(NULL, address, size + Assembler::kGap) {
3082 // Create a new macro assembler pointing to the address of the code to patch. 3082 // Create a new macro assembler pointing to the address of the code to patch.
3083 // The size is adjusted with kGap on order for the assembler to generate size 3083 // The size is adjusted with kGap on order for the assembler to generate size
3084 // bytes of instructions without failing with buffer size constraints. 3084 // bytes of instructions without failing with buffer size constraints.
3085 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 3085 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3086 } 3086 }
3087 3087
3088 3088
3089 CodePatcher::~CodePatcher() { 3089 CodePatcher::~CodePatcher() {
3090 // Indicate that code has changed. 3090 // Indicate that code has changed.
3091 CpuFeatures::FlushICache(address_, size_); 3091 CpuFeatures::FlushICache(address_, size_);
3092 3092
3093 // Check that the code was patched as expected. 3093 // Check that the code was patched as expected.
3094 ASSERT(masm_.pc_ == address_ + size_); 3094 DCHECK(masm_.pc_ == address_ + size_);
3095 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 3095 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3096 } 3096 }
3097 3097
3098 3098
3099 void MacroAssembler::CheckPageFlag( 3099 void MacroAssembler::CheckPageFlag(
3100 Register object, 3100 Register object,
3101 Register scratch, 3101 Register scratch,
3102 int mask, 3102 int mask,
3103 Condition cc, 3103 Condition cc,
3104 Label* condition_met, 3104 Label* condition_met,
3105 Label::Distance condition_met_distance) { 3105 Label::Distance condition_met_distance) {
3106 ASSERT(cc == zero || cc == not_zero); 3106 DCHECK(cc == zero || cc == not_zero);
3107 if (scratch.is(object)) { 3107 if (scratch.is(object)) {
3108 and_(scratch, Immediate(~Page::kPageAlignmentMask)); 3108 and_(scratch, Immediate(~Page::kPageAlignmentMask));
3109 } else { 3109 } else {
3110 mov(scratch, Immediate(~Page::kPageAlignmentMask)); 3110 mov(scratch, Immediate(~Page::kPageAlignmentMask));
3111 and_(scratch, object); 3111 and_(scratch, object);
3112 } 3112 }
3113 if (mask < (1 << kBitsPerByte)) { 3113 if (mask < (1 << kBitsPerByte)) {
3114 test_b(Operand(scratch, MemoryChunk::kFlagsOffset), 3114 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
3115 static_cast<uint8_t>(mask)); 3115 static_cast<uint8_t>(mask));
3116 } else { 3116 } else {
3117 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); 3117 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
3118 } 3118 }
3119 j(cc, condition_met, condition_met_distance); 3119 j(cc, condition_met, condition_met_distance);
3120 } 3120 }
3121 3121
3122 3122
3123 void MacroAssembler::CheckPageFlagForMap( 3123 void MacroAssembler::CheckPageFlagForMap(
3124 Handle<Map> map, 3124 Handle<Map> map,
3125 int mask, 3125 int mask,
3126 Condition cc, 3126 Condition cc,
3127 Label* condition_met, 3127 Label* condition_met,
3128 Label::Distance condition_met_distance) { 3128 Label::Distance condition_met_distance) {
3129 ASSERT(cc == zero || cc == not_zero); 3129 DCHECK(cc == zero || cc == not_zero);
3130 Page* page = Page::FromAddress(map->address()); 3130 Page* page = Page::FromAddress(map->address());
3131 ExternalReference reference(ExternalReference::page_flags(page)); 3131 ExternalReference reference(ExternalReference::page_flags(page));
3132 // The inlined static address check of the page's flags relies 3132 // The inlined static address check of the page's flags relies
3133 // on maps never being compacted. 3133 // on maps never being compacted.
3134 ASSERT(!isolate()->heap()->mark_compact_collector()-> 3134 DCHECK(!isolate()->heap()->mark_compact_collector()->
3135 IsOnEvacuationCandidate(*map)); 3135 IsOnEvacuationCandidate(*map));
3136 if (mask < (1 << kBitsPerByte)) { 3136 if (mask < (1 << kBitsPerByte)) {
3137 test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask)); 3137 test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
3138 } else { 3138 } else {
3139 test(Operand::StaticVariable(reference), Immediate(mask)); 3139 test(Operand::StaticVariable(reference), Immediate(mask));
3140 } 3140 }
3141 j(cc, condition_met, condition_met_distance); 3141 j(cc, condition_met, condition_met_distance);
3142 } 3142 }
3143 3143
3144 3144
(...skipping 10 matching lines...) Expand all
3155 3155
3156 3156
3157 void MacroAssembler::JumpIfBlack(Register object, 3157 void MacroAssembler::JumpIfBlack(Register object,
3158 Register scratch0, 3158 Register scratch0,
3159 Register scratch1, 3159 Register scratch1,
3160 Label* on_black, 3160 Label* on_black,
3161 Label::Distance on_black_near) { 3161 Label::Distance on_black_near) {
3162 HasColor(object, scratch0, scratch1, 3162 HasColor(object, scratch0, scratch1,
3163 on_black, on_black_near, 3163 on_black, on_black_near,
3164 1, 0); // kBlackBitPattern. 3164 1, 0); // kBlackBitPattern.
3165 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 3165 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3166 } 3166 }
3167 3167
3168 3168
3169 void MacroAssembler::HasColor(Register object, 3169 void MacroAssembler::HasColor(Register object,
3170 Register bitmap_scratch, 3170 Register bitmap_scratch,
3171 Register mask_scratch, 3171 Register mask_scratch,
3172 Label* has_color, 3172 Label* has_color,
3173 Label::Distance has_color_distance, 3173 Label::Distance has_color_distance,
3174 int first_bit, 3174 int first_bit,
3175 int second_bit) { 3175 int second_bit) {
3176 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx)); 3176 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
3177 3177
3178 GetMarkBits(object, bitmap_scratch, mask_scratch); 3178 GetMarkBits(object, bitmap_scratch, mask_scratch);
3179 3179
3180 Label other_color, word_boundary; 3180 Label other_color, word_boundary;
3181 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); 3181 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3182 j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear); 3182 j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
3183 add(mask_scratch, mask_scratch); // Shift left 1 by adding. 3183 add(mask_scratch, mask_scratch); // Shift left 1 by adding.
3184 j(zero, &word_boundary, Label::kNear); 3184 j(zero, &word_boundary, Label::kNear);
3185 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); 3185 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3186 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); 3186 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3187 jmp(&other_color, Label::kNear); 3187 jmp(&other_color, Label::kNear);
3188 3188
3189 bind(&word_boundary); 3189 bind(&word_boundary);
3190 test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1); 3190 test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
3191 3191
3192 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); 3192 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3193 bind(&other_color); 3193 bind(&other_color);
3194 } 3194 }
3195 3195
3196 3196
3197 void MacroAssembler::GetMarkBits(Register addr_reg, 3197 void MacroAssembler::GetMarkBits(Register addr_reg,
3198 Register bitmap_reg, 3198 Register bitmap_reg,
3199 Register mask_reg) { 3199 Register mask_reg) {
3200 ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx)); 3200 DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
3201 mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); 3201 mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
3202 and_(bitmap_reg, addr_reg); 3202 and_(bitmap_reg, addr_reg);
3203 mov(ecx, addr_reg); 3203 mov(ecx, addr_reg);
3204 int shift = 3204 int shift =
3205 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; 3205 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
3206 shr(ecx, shift); 3206 shr(ecx, shift);
3207 and_(ecx, 3207 and_(ecx,
3208 (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1)); 3208 (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
3209 3209
3210 add(bitmap_reg, ecx); 3210 add(bitmap_reg, ecx);
3211 mov(ecx, addr_reg); 3211 mov(ecx, addr_reg);
3212 shr(ecx, kPointerSizeLog2); 3212 shr(ecx, kPointerSizeLog2);
3213 and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1); 3213 and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
3214 mov(mask_reg, Immediate(1)); 3214 mov(mask_reg, Immediate(1));
3215 shl_cl(mask_reg); 3215 shl_cl(mask_reg);
3216 } 3216 }
3217 3217
3218 3218
3219 void MacroAssembler::EnsureNotWhite( 3219 void MacroAssembler::EnsureNotWhite(
3220 Register value, 3220 Register value,
3221 Register bitmap_scratch, 3221 Register bitmap_scratch,
3222 Register mask_scratch, 3222 Register mask_scratch,
3223 Label* value_is_white_and_not_data, 3223 Label* value_is_white_and_not_data,
3224 Label::Distance distance) { 3224 Label::Distance distance) {
3225 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx)); 3225 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
3226 GetMarkBits(value, bitmap_scratch, mask_scratch); 3226 GetMarkBits(value, bitmap_scratch, mask_scratch);
3227 3227
3228 // If the value is black or grey we don't need to do anything. 3228 // If the value is black or grey we don't need to do anything.
3229 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); 3229 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3230 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 3230 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3231 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); 3231 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3232 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); 3232 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3233 3233
3234 Label done; 3234 Label done;
3235 3235
3236 // Since both black and grey have a 1 in the first position and white does 3236 // Since both black and grey have a 1 in the first position and white does
3237 // not have a 1 there we only need to check one bit. 3237 // not have a 1 there we only need to check one bit.
3238 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); 3238 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3239 j(not_zero, &done, Label::kNear); 3239 j(not_zero, &done, Label::kNear);
3240 3240
3241 if (emit_debug_code()) { 3241 if (emit_debug_code()) {
3242 // Check for impossible bit pattern. 3242 // Check for impossible bit pattern.
(...skipping 17 matching lines...) Expand all
3260 3260
3261 // Check for heap-number 3261 // Check for heap-number
3262 mov(map, FieldOperand(value, HeapObject::kMapOffset)); 3262 mov(map, FieldOperand(value, HeapObject::kMapOffset));
3263 cmp(map, isolate()->factory()->heap_number_map()); 3263 cmp(map, isolate()->factory()->heap_number_map());
3264 j(not_equal, &not_heap_number, Label::kNear); 3264 j(not_equal, &not_heap_number, Label::kNear);
3265 mov(length, Immediate(HeapNumber::kSize)); 3265 mov(length, Immediate(HeapNumber::kSize));
3266 jmp(&is_data_object, Label::kNear); 3266 jmp(&is_data_object, Label::kNear);
3267 3267
3268 bind(&not_heap_number); 3268 bind(&not_heap_number);
3269 // Check for strings. 3269 // Check for strings.
3270 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); 3270 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3271 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); 3271 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3272 // If it's a string and it's not a cons string then it's an object containing 3272 // If it's a string and it's not a cons string then it's an object containing
3273 // no GC pointers. 3273 // no GC pointers.
3274 Register instance_type = ecx; 3274 Register instance_type = ecx;
3275 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); 3275 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3276 test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask); 3276 test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
3277 j(not_zero, value_is_white_and_not_data); 3277 j(not_zero, value_is_white_and_not_data);
3278 // It's a non-indirect (non-cons and non-slice) string. 3278 // It's a non-indirect (non-cons and non-slice) string.
3279 // If it's external, the length is just ExternalString::kSize. 3279 // If it's external, the length is just ExternalString::kSize.
3280 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). 3280 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3281 Label not_external; 3281 Label not_external;
3282 // External strings are the only ones with the kExternalStringTag bit 3282 // External strings are the only ones with the kExternalStringTag bit
3283 // set. 3283 // set.
3284 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); 3284 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3285 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); 3285 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3286 test_b(instance_type, kExternalStringTag); 3286 test_b(instance_type, kExternalStringTag);
3287 j(zero, &not_external, Label::kNear); 3287 j(zero, &not_external, Label::kNear);
3288 mov(length, Immediate(ExternalString::kSize)); 3288 mov(length, Immediate(ExternalString::kSize));
3289 jmp(&is_data_object, Label::kNear); 3289 jmp(&is_data_object, Label::kNear);
3290 3290
3291 bind(&not_external); 3291 bind(&not_external);
3292 // Sequential string, either ASCII or UC16. 3292 // Sequential string, either ASCII or UC16.
3293 ASSERT(kOneByteStringTag == 0x04); 3293 DCHECK(kOneByteStringTag == 0x04);
3294 and_(length, Immediate(kStringEncodingMask)); 3294 and_(length, Immediate(kStringEncodingMask));
3295 xor_(length, Immediate(kStringEncodingMask)); 3295 xor_(length, Immediate(kStringEncodingMask));
3296 add(length, Immediate(0x04)); 3296 add(length, Immediate(0x04));
3297 // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted 3297 // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
3298 // by 2. If we multiply the string length as smi by this, it still 3298 // by 2. If we multiply the string length as smi by this, it still
3299 // won't overflow a 32-bit value. 3299 // won't overflow a 32-bit value.
3300 ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize); 3300 DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
3301 ASSERT(SeqOneByteString::kMaxSize <= 3301 DCHECK(SeqOneByteString::kMaxSize <=
3302 static_cast<int>(0xffffffffu >> (2 + kSmiTagSize))); 3302 static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
3303 imul(length, FieldOperand(value, String::kLengthOffset)); 3303 imul(length, FieldOperand(value, String::kLengthOffset));
3304 shr(length, 2 + kSmiTagSize + kSmiShiftSize); 3304 shr(length, 2 + kSmiTagSize + kSmiShiftSize);
3305 add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); 3305 add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
3306 and_(length, Immediate(~kObjectAlignmentMask)); 3306 and_(length, Immediate(~kObjectAlignmentMask));
3307 3307
3308 bind(&is_data_object); 3308 bind(&is_data_object);
3309 // Value is a data object, and it is white. Mark it black. Since we know 3309 // Value is a data object, and it is white. Mark it black. Since we know
3310 // that the object is white we can make it black by flipping one bit. 3310 // that the object is white we can make it black by flipping one bit.
3311 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); 3311 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
3391 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize), 3391 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
3392 Immediate(isolate()->factory()->allocation_memento_map())); 3392 Immediate(isolate()->factory()->allocation_memento_map()));
3393 } 3393 }
3394 3394
3395 3395
3396 void MacroAssembler::JumpIfDictionaryInPrototypeChain( 3396 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3397 Register object, 3397 Register object,
3398 Register scratch0, 3398 Register scratch0,
3399 Register scratch1, 3399 Register scratch1,
3400 Label* found) { 3400 Label* found) {
3401 ASSERT(!scratch1.is(scratch0)); 3401 DCHECK(!scratch1.is(scratch0));
3402 Factory* factory = isolate()->factory(); 3402 Factory* factory = isolate()->factory();
3403 Register current = scratch0; 3403 Register current = scratch0;
3404 Label loop_again; 3404 Label loop_again;
3405 3405
3406 // scratch contained elements pointer. 3406 // scratch contained elements pointer.
3407 mov(current, object); 3407 mov(current, object);
3408 3408
3409 // Loop based on the map going up the prototype chain. 3409 // Loop based on the map going up the prototype chain.
3410 bind(&loop_again); 3410 bind(&loop_again);
3411 mov(current, FieldOperand(current, HeapObject::kMapOffset)); 3411 mov(current, FieldOperand(current, HeapObject::kMapOffset));
3412 mov(scratch1, FieldOperand(current, Map::kBitField2Offset)); 3412 mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
3413 DecodeField<Map::ElementsKindBits>(scratch1); 3413 DecodeField<Map::ElementsKindBits>(scratch1);
3414 cmp(scratch1, Immediate(DICTIONARY_ELEMENTS)); 3414 cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
3415 j(equal, found); 3415 j(equal, found);
3416 mov(current, FieldOperand(current, Map::kPrototypeOffset)); 3416 mov(current, FieldOperand(current, Map::kPrototypeOffset));
3417 cmp(current, Immediate(factory->null_value())); 3417 cmp(current, Immediate(factory->null_value()));
3418 j(not_equal, &loop_again); 3418 j(not_equal, &loop_again);
3419 } 3419 }
3420 3420
3421 3421
3422 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { 3422 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
3423 ASSERT(!dividend.is(eax)); 3423 DCHECK(!dividend.is(eax));
3424 ASSERT(!dividend.is(edx)); 3424 DCHECK(!dividend.is(edx));
3425 MultiplierAndShift ms(divisor); 3425 MultiplierAndShift ms(divisor);
3426 mov(eax, Immediate(ms.multiplier())); 3426 mov(eax, Immediate(ms.multiplier()));
3427 imul(dividend); 3427 imul(dividend);
3428 if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend); 3428 if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
3429 if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend); 3429 if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
3430 if (ms.shift() > 0) sar(edx, ms.shift()); 3430 if (ms.shift() > 0) sar(edx, ms.shift());
3431 mov(eax, dividend); 3431 mov(eax, dividend);
3432 shr(eax, 31); 3432 shr(eax, 31);
3433 add(edx, eax); 3433 add(edx, eax);
3434 } 3434 }
3435 3435
3436 3436
3437 } } // namespace v8::internal 3437 } } // namespace v8::internal
3438 3438
3439 #endif // V8_TARGET_ARCH_IA32 3439 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/macro-assembler-ia32.h ('k') | src/ia32/regexp-macro-assembler-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698