OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_X87 | 7 #if V8_TARGET_ARCH_X87 |
8 | 8 |
9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
(...skipping 15 matching lines...) Expand all Loading... |
26 has_frame_(false) { | 26 has_frame_(false) { |
27 if (isolate() != NULL) { | 27 if (isolate() != NULL) { |
28 // TODO(titzer): should we just use a null handle here instead? | 28 // TODO(titzer): should we just use a null handle here instead? |
29 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | 29 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
30 isolate()); | 30 isolate()); |
31 } | 31 } |
32 } | 32 } |
33 | 33 |
34 | 34 |
35 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { | 35 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { |
36 ASSERT(!r.IsDouble()); | 36 DCHECK(!r.IsDouble()); |
37 if (r.IsInteger8()) { | 37 if (r.IsInteger8()) { |
38 movsx_b(dst, src); | 38 movsx_b(dst, src); |
39 } else if (r.IsUInteger8()) { | 39 } else if (r.IsUInteger8()) { |
40 movzx_b(dst, src); | 40 movzx_b(dst, src); |
41 } else if (r.IsInteger16()) { | 41 } else if (r.IsInteger16()) { |
42 movsx_w(dst, src); | 42 movsx_w(dst, src); |
43 } else if (r.IsUInteger16()) { | 43 } else if (r.IsUInteger16()) { |
44 movzx_w(dst, src); | 44 movzx_w(dst, src); |
45 } else { | 45 } else { |
46 mov(dst, src); | 46 mov(dst, src); |
47 } | 47 } |
48 } | 48 } |
49 | 49 |
50 | 50 |
51 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) { | 51 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) { |
52 ASSERT(!r.IsDouble()); | 52 DCHECK(!r.IsDouble()); |
53 if (r.IsInteger8() || r.IsUInteger8()) { | 53 if (r.IsInteger8() || r.IsUInteger8()) { |
54 mov_b(dst, src); | 54 mov_b(dst, src); |
55 } else if (r.IsInteger16() || r.IsUInteger16()) { | 55 } else if (r.IsInteger16() || r.IsUInteger16()) { |
56 mov_w(dst, src); | 56 mov_w(dst, src); |
57 } else { | 57 } else { |
58 if (r.IsHeapObject()) { | 58 if (r.IsHeapObject()) { |
59 AssertNotSmi(src); | 59 AssertNotSmi(src); |
60 } else if (r.IsSmi()) { | 60 } else if (r.IsSmi()) { |
61 AssertSmi(src); | 61 AssertSmi(src); |
62 } | 62 } |
(...skipping 13 matching lines...) Expand all Loading... |
76 mov(destination, Immediate(index)); | 76 mov(destination, Immediate(index)); |
77 mov(destination, Operand::StaticArray(destination, | 77 mov(destination, Operand::StaticArray(destination, |
78 times_pointer_size, | 78 times_pointer_size, |
79 roots_array_start)); | 79 roots_array_start)); |
80 } | 80 } |
81 | 81 |
82 | 82 |
83 void MacroAssembler::StoreRoot(Register source, | 83 void MacroAssembler::StoreRoot(Register source, |
84 Register scratch, | 84 Register scratch, |
85 Heap::RootListIndex index) { | 85 Heap::RootListIndex index) { |
86 ASSERT(Heap::RootCanBeWrittenAfterInitialization(index)); | 86 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); |
87 ExternalReference roots_array_start = | 87 ExternalReference roots_array_start = |
88 ExternalReference::roots_array_start(isolate()); | 88 ExternalReference::roots_array_start(isolate()); |
89 mov(scratch, Immediate(index)); | 89 mov(scratch, Immediate(index)); |
90 mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start), | 90 mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start), |
91 source); | 91 source); |
92 } | 92 } |
93 | 93 |
94 | 94 |
95 void MacroAssembler::CompareRoot(Register with, | 95 void MacroAssembler::CompareRoot(Register with, |
96 Register scratch, | 96 Register scratch, |
97 Heap::RootListIndex index) { | 97 Heap::RootListIndex index) { |
98 ExternalReference roots_array_start = | 98 ExternalReference roots_array_start = |
99 ExternalReference::roots_array_start(isolate()); | 99 ExternalReference::roots_array_start(isolate()); |
100 mov(scratch, Immediate(index)); | 100 mov(scratch, Immediate(index)); |
101 cmp(with, Operand::StaticArray(scratch, | 101 cmp(with, Operand::StaticArray(scratch, |
102 times_pointer_size, | 102 times_pointer_size, |
103 roots_array_start)); | 103 roots_array_start)); |
104 } | 104 } |
105 | 105 |
106 | 106 |
107 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { | 107 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { |
108 ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index)); | 108 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index)); |
109 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); | 109 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); |
110 cmp(with, value); | 110 cmp(with, value); |
111 } | 111 } |
112 | 112 |
113 | 113 |
114 void MacroAssembler::CompareRoot(const Operand& with, | 114 void MacroAssembler::CompareRoot(const Operand& with, |
115 Heap::RootListIndex index) { | 115 Heap::RootListIndex index) { |
116 ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index)); | 116 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index)); |
117 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); | 117 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]); |
118 cmp(with, value); | 118 cmp(with, value); |
119 } | 119 } |
120 | 120 |
121 | 121 |
122 void MacroAssembler::InNewSpace( | 122 void MacroAssembler::InNewSpace( |
123 Register object, | 123 Register object, |
124 Register scratch, | 124 Register scratch, |
125 Condition cc, | 125 Condition cc, |
126 Label* condition_met, | 126 Label* condition_met, |
127 Label::Distance condition_met_distance) { | 127 Label::Distance condition_met_distance) { |
128 ASSERT(cc == equal || cc == not_equal); | 128 DCHECK(cc == equal || cc == not_equal); |
129 if (scratch.is(object)) { | 129 if (scratch.is(object)) { |
130 and_(scratch, Immediate(~Page::kPageAlignmentMask)); | 130 and_(scratch, Immediate(~Page::kPageAlignmentMask)); |
131 } else { | 131 } else { |
132 mov(scratch, Immediate(~Page::kPageAlignmentMask)); | 132 mov(scratch, Immediate(~Page::kPageAlignmentMask)); |
133 and_(scratch, object); | 133 and_(scratch, object); |
134 } | 134 } |
135 // Check that we can use a test_b. | 135 // Check that we can use a test_b. |
136 ASSERT(MemoryChunk::IN_FROM_SPACE < 8); | 136 DCHECK(MemoryChunk::IN_FROM_SPACE < 8); |
137 ASSERT(MemoryChunk::IN_TO_SPACE < 8); | 137 DCHECK(MemoryChunk::IN_TO_SPACE < 8); |
138 int mask = (1 << MemoryChunk::IN_FROM_SPACE) | 138 int mask = (1 << MemoryChunk::IN_FROM_SPACE) |
139 | (1 << MemoryChunk::IN_TO_SPACE); | 139 | (1 << MemoryChunk::IN_TO_SPACE); |
140 // If non-zero, the page belongs to new-space. | 140 // If non-zero, the page belongs to new-space. |
141 test_b(Operand(scratch, MemoryChunk::kFlagsOffset), | 141 test_b(Operand(scratch, MemoryChunk::kFlagsOffset), |
142 static_cast<uint8_t>(mask)); | 142 static_cast<uint8_t>(mask)); |
143 j(cc, condition_met, condition_met_distance); | 143 j(cc, condition_met, condition_met_distance); |
144 } | 144 } |
145 | 145 |
146 | 146 |
147 void MacroAssembler::RememberedSetHelper( | 147 void MacroAssembler::RememberedSetHelper( |
(...skipping 20 matching lines...) Expand all Loading... |
168 mov(Operand::StaticVariable(store_buffer), scratch); | 168 mov(Operand::StaticVariable(store_buffer), scratch); |
169 // Call stub on end of buffer. | 169 // Call stub on end of buffer. |
170 // Check for end of buffer. | 170 // Check for end of buffer. |
171 test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); | 171 test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); |
172 if (and_then == kReturnAtEnd) { | 172 if (and_then == kReturnAtEnd) { |
173 Label buffer_overflowed; | 173 Label buffer_overflowed; |
174 j(not_equal, &buffer_overflowed, Label::kNear); | 174 j(not_equal, &buffer_overflowed, Label::kNear); |
175 ret(0); | 175 ret(0); |
176 bind(&buffer_overflowed); | 176 bind(&buffer_overflowed); |
177 } else { | 177 } else { |
178 ASSERT(and_then == kFallThroughAtEnd); | 178 DCHECK(and_then == kFallThroughAtEnd); |
179 j(equal, &done, Label::kNear); | 179 j(equal, &done, Label::kNear); |
180 } | 180 } |
181 StoreBufferOverflowStub store_buffer_overflow = | 181 StoreBufferOverflowStub store_buffer_overflow = |
182 StoreBufferOverflowStub(isolate()); | 182 StoreBufferOverflowStub(isolate()); |
183 CallStub(&store_buffer_overflow); | 183 CallStub(&store_buffer_overflow); |
184 if (and_then == kReturnAtEnd) { | 184 if (and_then == kReturnAtEnd) { |
185 ret(0); | 185 ret(0); |
186 } else { | 186 } else { |
187 ASSERT(and_then == kFallThroughAtEnd); | 187 DCHECK(and_then == kFallThroughAtEnd); |
188 bind(&done); | 188 bind(&done); |
189 } | 189 } |
190 } | 190 } |
191 | 191 |
192 | 192 |
193 void MacroAssembler::ClampUint8(Register reg) { | 193 void MacroAssembler::ClampUint8(Register reg) { |
194 Label done; | 194 Label done; |
195 test(reg, Immediate(0xFFFFFF00)); | 195 test(reg, Immediate(0xFFFFFF00)); |
196 j(zero, &done, Label::kNear); | 196 j(zero, &done, Label::kNear); |
197 setcc(negative, reg); // 1 if negative, 0 if positive. | 197 setcc(negative, reg); // 1 if negative, 0 if positive. |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
321 Register index, | 321 Register index, |
322 RememberedSetAction remembered_set_action, | 322 RememberedSetAction remembered_set_action, |
323 SmiCheck smi_check, | 323 SmiCheck smi_check, |
324 PointersToHereCheck pointers_to_here_check_for_value) { | 324 PointersToHereCheck pointers_to_here_check_for_value) { |
325 // First, check if a write barrier is even needed. The tests below | 325 // First, check if a write barrier is even needed. The tests below |
326 // catch stores of Smis. | 326 // catch stores of Smis. |
327 Label done; | 327 Label done; |
328 | 328 |
329 // Skip barrier if writing a smi. | 329 // Skip barrier if writing a smi. |
330 if (smi_check == INLINE_SMI_CHECK) { | 330 if (smi_check == INLINE_SMI_CHECK) { |
331 ASSERT_EQ(0, kSmiTag); | 331 DCHECK_EQ(0, kSmiTag); |
332 test(value, Immediate(kSmiTagMask)); | 332 test(value, Immediate(kSmiTagMask)); |
333 j(zero, &done); | 333 j(zero, &done); |
334 } | 334 } |
335 | 335 |
336 // Array access: calculate the destination address in the same manner as | 336 // Array access: calculate the destination address in the same manner as |
337 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset | 337 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset |
338 // into an array of words. | 338 // into an array of words. |
339 Register dst = index; | 339 Register dst = index; |
340 lea(dst, Operand(object, index, times_half_pointer_size, | 340 lea(dst, Operand(object, index, times_half_pointer_size, |
341 FixedArray::kHeaderSize - kHeapObjectTag)); | 341 FixedArray::kHeaderSize - kHeapObjectTag)); |
(...skipping 24 matching lines...) Expand all Loading... |
366 // catch stores of Smis. | 366 // catch stores of Smis. |
367 Label done; | 367 Label done; |
368 | 368 |
369 // Skip barrier if writing a smi. | 369 // Skip barrier if writing a smi. |
370 if (smi_check == INLINE_SMI_CHECK) { | 370 if (smi_check == INLINE_SMI_CHECK) { |
371 JumpIfSmi(value, &done, Label::kNear); | 371 JumpIfSmi(value, &done, Label::kNear); |
372 } | 372 } |
373 | 373 |
374 // Although the object register is tagged, the offset is relative to the start | 374 // Although the object register is tagged, the offset is relative to the start |
375 // of the object, so so offset must be a multiple of kPointerSize. | 375 // of the object, so so offset must be a multiple of kPointerSize. |
376 ASSERT(IsAligned(offset, kPointerSize)); | 376 DCHECK(IsAligned(offset, kPointerSize)); |
377 | 377 |
378 lea(dst, FieldOperand(object, offset)); | 378 lea(dst, FieldOperand(object, offset)); |
379 if (emit_debug_code()) { | 379 if (emit_debug_code()) { |
380 Label ok; | 380 Label ok; |
381 test_b(dst, (1 << kPointerSizeLog2) - 1); | 381 test_b(dst, (1 << kPointerSizeLog2) - 1); |
382 j(zero, &ok, Label::kNear); | 382 j(zero, &ok, Label::kNear); |
383 int3(); | 383 int3(); |
384 bind(&ok); | 384 bind(&ok); |
385 } | 385 } |
386 | 386 |
(...skipping 22 matching lines...) Expand all Loading... |
409 Register value = scratch2; | 409 Register value = scratch2; |
410 if (emit_debug_code()) { | 410 if (emit_debug_code()) { |
411 Label ok; | 411 Label ok; |
412 lea(address, FieldOperand(object, HeapObject::kMapOffset)); | 412 lea(address, FieldOperand(object, HeapObject::kMapOffset)); |
413 test_b(address, (1 << kPointerSizeLog2) - 1); | 413 test_b(address, (1 << kPointerSizeLog2) - 1); |
414 j(zero, &ok, Label::kNear); | 414 j(zero, &ok, Label::kNear); |
415 int3(); | 415 int3(); |
416 bind(&ok); | 416 bind(&ok); |
417 } | 417 } |
418 | 418 |
419 ASSERT(!object.is(value)); | 419 DCHECK(!object.is(value)); |
420 ASSERT(!object.is(address)); | 420 DCHECK(!object.is(address)); |
421 ASSERT(!value.is(address)); | 421 DCHECK(!value.is(address)); |
422 AssertNotSmi(object); | 422 AssertNotSmi(object); |
423 | 423 |
424 if (!FLAG_incremental_marking) { | 424 if (!FLAG_incremental_marking) { |
425 return; | 425 return; |
426 } | 426 } |
427 | 427 |
428 // Compute the address. | 428 // Compute the address. |
429 lea(address, FieldOperand(object, HeapObject::kMapOffset)); | 429 lea(address, FieldOperand(object, HeapObject::kMapOffset)); |
430 | 430 |
431 // A single check of the map's pages interesting flag suffices, since it is | 431 // A single check of the map's pages interesting flag suffices, since it is |
432 // only set during incremental collection, and then it's also guaranteed that | 432 // only set during incremental collection, and then it's also guaranteed that |
433 // the from object's page's interesting flag is also set. This optimization | 433 // the from object's page's interesting flag is also set. This optimization |
434 // relies on the fact that maps can never be in new space. | 434 // relies on the fact that maps can never be in new space. |
435 ASSERT(!isolate()->heap()->InNewSpace(*map)); | 435 DCHECK(!isolate()->heap()->InNewSpace(*map)); |
436 CheckPageFlagForMap(map, | 436 CheckPageFlagForMap(map, |
437 MemoryChunk::kPointersToHereAreInterestingMask, | 437 MemoryChunk::kPointersToHereAreInterestingMask, |
438 zero, | 438 zero, |
439 &done, | 439 &done, |
440 Label::kNear); | 440 Label::kNear); |
441 | 441 |
442 RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET); | 442 RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET); |
443 CallStub(&stub); | 443 CallStub(&stub); |
444 | 444 |
445 bind(&done); | 445 bind(&done); |
(...skipping 12 matching lines...) Expand all Loading... |
458 } | 458 } |
459 | 459 |
460 | 460 |
461 void MacroAssembler::RecordWrite( | 461 void MacroAssembler::RecordWrite( |
462 Register object, | 462 Register object, |
463 Register address, | 463 Register address, |
464 Register value, | 464 Register value, |
465 RememberedSetAction remembered_set_action, | 465 RememberedSetAction remembered_set_action, |
466 SmiCheck smi_check, | 466 SmiCheck smi_check, |
467 PointersToHereCheck pointers_to_here_check_for_value) { | 467 PointersToHereCheck pointers_to_here_check_for_value) { |
468 ASSERT(!object.is(value)); | 468 DCHECK(!object.is(value)); |
469 ASSERT(!object.is(address)); | 469 DCHECK(!object.is(address)); |
470 ASSERT(!value.is(address)); | 470 DCHECK(!value.is(address)); |
471 AssertNotSmi(object); | 471 AssertNotSmi(object); |
472 | 472 |
473 if (remembered_set_action == OMIT_REMEMBERED_SET && | 473 if (remembered_set_action == OMIT_REMEMBERED_SET && |
474 !FLAG_incremental_marking) { | 474 !FLAG_incremental_marking) { |
475 return; | 475 return; |
476 } | 476 } |
477 | 477 |
478 if (emit_debug_code()) { | 478 if (emit_debug_code()) { |
479 Label ok; | 479 Label ok; |
480 cmp(value, Operand(address, 0)); | 480 cmp(value, Operand(address, 0)); |
(...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
864 cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset), | 864 cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset), |
865 Immediate(Smi::FromInt(type))); | 865 Immediate(Smi::FromInt(type))); |
866 Check(equal, kStackFrameTypesMustMatch); | 866 Check(equal, kStackFrameTypesMustMatch); |
867 } | 867 } |
868 leave(); | 868 leave(); |
869 } | 869 } |
870 | 870 |
871 | 871 |
872 void MacroAssembler::EnterExitFramePrologue() { | 872 void MacroAssembler::EnterExitFramePrologue() { |
873 // Set up the frame structure on the stack. | 873 // Set up the frame structure on the stack. |
874 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); | 874 DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); |
875 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); | 875 DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); |
876 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); | 876 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); |
877 push(ebp); | 877 push(ebp); |
878 mov(ebp, esp); | 878 mov(ebp, esp); |
879 | 879 |
880 // Reserve room for entry stack pointer and push the code object. | 880 // Reserve room for entry stack pointer and push the code object. |
881 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); | 881 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize); |
882 push(Immediate(0)); // Saved entry sp, patched before call. | 882 push(Immediate(0)); // Saved entry sp, patched before call. |
883 push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot. | 883 push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot. |
884 | 884 |
885 // Save the frame pointer and the context in top. | 885 // Save the frame pointer and the context in top. |
886 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate()); | 886 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate()); |
887 ExternalReference context_address(Isolate::kContextAddress, isolate()); | 887 ExternalReference context_address(Isolate::kContextAddress, isolate()); |
888 mov(Operand::StaticVariable(c_entry_fp_address), ebp); | 888 mov(Operand::StaticVariable(c_entry_fp_address), ebp); |
889 mov(Operand::StaticVariable(context_address), esi); | 889 mov(Operand::StaticVariable(context_address), esi); |
890 } | 890 } |
891 | 891 |
892 | 892 |
893 void MacroAssembler::EnterExitFrameEpilogue(int argc) { | 893 void MacroAssembler::EnterExitFrameEpilogue(int argc) { |
894 sub(esp, Immediate(argc * kPointerSize)); | 894 sub(esp, Immediate(argc * kPointerSize)); |
895 | 895 |
896 // Get the required frame alignment for the OS. | 896 // Get the required frame alignment for the OS. |
897 const int kFrameAlignment = base::OS::ActivationFrameAlignment(); | 897 const int kFrameAlignment = base::OS::ActivationFrameAlignment(); |
898 if (kFrameAlignment > 0) { | 898 if (kFrameAlignment > 0) { |
899 ASSERT(IsPowerOf2(kFrameAlignment)); | 899 DCHECK(IsPowerOf2(kFrameAlignment)); |
900 and_(esp, -kFrameAlignment); | 900 and_(esp, -kFrameAlignment); |
901 } | 901 } |
902 | 902 |
903 // Patch the saved entry sp. | 903 // Patch the saved entry sp. |
904 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp); | 904 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp); |
905 } | 905 } |
906 | 906 |
907 | 907 |
908 void MacroAssembler::EnterExitFrame() { | 908 void MacroAssembler::EnterExitFrame() { |
909 EnterExitFramePrologue(); | 909 EnterExitFramePrologue(); |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1105 JumpToHandlerEntry(); | 1105 JumpToHandlerEntry(); |
1106 } | 1106 } |
1107 | 1107 |
1108 | 1108 |
1109 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 1109 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
1110 Register scratch1, | 1110 Register scratch1, |
1111 Register scratch2, | 1111 Register scratch2, |
1112 Label* miss) { | 1112 Label* miss) { |
1113 Label same_contexts; | 1113 Label same_contexts; |
1114 | 1114 |
1115 ASSERT(!holder_reg.is(scratch1)); | 1115 DCHECK(!holder_reg.is(scratch1)); |
1116 ASSERT(!holder_reg.is(scratch2)); | 1116 DCHECK(!holder_reg.is(scratch2)); |
1117 ASSERT(!scratch1.is(scratch2)); | 1117 DCHECK(!scratch1.is(scratch2)); |
1118 | 1118 |
1119 // Load current lexical context from the stack frame. | 1119 // Load current lexical context from the stack frame. |
1120 mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset)); | 1120 mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset)); |
1121 | 1121 |
1122 // When generating debug code, make sure the lexical context is set. | 1122 // When generating debug code, make sure the lexical context is set. |
1123 if (emit_debug_code()) { | 1123 if (emit_debug_code()) { |
1124 cmp(scratch1, Immediate(0)); | 1124 cmp(scratch1, Immediate(0)); |
1125 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext); | 1125 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext); |
1126 } | 1126 } |
1127 // Load the native context of the current context. | 1127 // Load the native context of the current context. |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1253 for (int i = 0; i < kNumberDictionaryProbes; i++) { | 1253 for (int i = 0; i < kNumberDictionaryProbes; i++) { |
1254 // Use r2 for index calculations and keep the hash intact in r0. | 1254 // Use r2 for index calculations and keep the hash intact in r0. |
1255 mov(r2, r0); | 1255 mov(r2, r0); |
1256 // Compute the masked index: (hash + i + i * i) & mask. | 1256 // Compute the masked index: (hash + i + i * i) & mask. |
1257 if (i > 0) { | 1257 if (i > 0) { |
1258 add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i))); | 1258 add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i))); |
1259 } | 1259 } |
1260 and_(r2, r1); | 1260 and_(r2, r1); |
1261 | 1261 |
1262 // Scale the index by multiplying by the entry size. | 1262 // Scale the index by multiplying by the entry size. |
1263 ASSERT(SeededNumberDictionary::kEntrySize == 3); | 1263 DCHECK(SeededNumberDictionary::kEntrySize == 3); |
1264 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 | 1264 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 |
1265 | 1265 |
1266 // Check if the key matches. | 1266 // Check if the key matches. |
1267 cmp(key, FieldOperand(elements, | 1267 cmp(key, FieldOperand(elements, |
1268 r2, | 1268 r2, |
1269 times_pointer_size, | 1269 times_pointer_size, |
1270 SeededNumberDictionary::kElementsStartOffset)); | 1270 SeededNumberDictionary::kElementsStartOffset)); |
1271 if (i != (kNumberDictionaryProbes - 1)) { | 1271 if (i != (kNumberDictionaryProbes - 1)) { |
1272 j(equal, &done); | 1272 j(equal, &done); |
1273 } else { | 1273 } else { |
1274 j(not_equal, miss); | 1274 j(not_equal, miss); |
1275 } | 1275 } |
1276 } | 1276 } |
1277 | 1277 |
1278 bind(&done); | 1278 bind(&done); |
1279 // Check that the value is a normal propety. | 1279 // Check that the value is a normal propety. |
1280 const int kDetailsOffset = | 1280 const int kDetailsOffset = |
1281 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; | 1281 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; |
1282 ASSERT_EQ(NORMAL, 0); | 1282 DCHECK_EQ(NORMAL, 0); |
1283 test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), | 1283 test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), |
1284 Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize)); | 1284 Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize)); |
1285 j(not_zero, miss); | 1285 j(not_zero, miss); |
1286 | 1286 |
1287 // Get the value at the masked, scaled index. | 1287 // Get the value at the masked, scaled index. |
1288 const int kValueOffset = | 1288 const int kValueOffset = |
1289 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | 1289 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
1290 mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); | 1290 mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); |
1291 } | 1291 } |
1292 | 1292 |
1293 | 1293 |
1294 void MacroAssembler::LoadAllocationTopHelper(Register result, | 1294 void MacroAssembler::LoadAllocationTopHelper(Register result, |
1295 Register scratch, | 1295 Register scratch, |
1296 AllocationFlags flags) { | 1296 AllocationFlags flags) { |
1297 ExternalReference allocation_top = | 1297 ExternalReference allocation_top = |
1298 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 1298 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
1299 | 1299 |
1300 // Just return if allocation top is already known. | 1300 // Just return if allocation top is already known. |
1301 if ((flags & RESULT_CONTAINS_TOP) != 0) { | 1301 if ((flags & RESULT_CONTAINS_TOP) != 0) { |
1302 // No use of scratch if allocation top is provided. | 1302 // No use of scratch if allocation top is provided. |
1303 ASSERT(scratch.is(no_reg)); | 1303 DCHECK(scratch.is(no_reg)); |
1304 #ifdef DEBUG | 1304 #ifdef DEBUG |
1305 // Assert that result actually contains top on entry. | 1305 // Assert that result actually contains top on entry. |
1306 cmp(result, Operand::StaticVariable(allocation_top)); | 1306 cmp(result, Operand::StaticVariable(allocation_top)); |
1307 Check(equal, kUnexpectedAllocationTop); | 1307 Check(equal, kUnexpectedAllocationTop); |
1308 #endif | 1308 #endif |
1309 return; | 1309 return; |
1310 } | 1310 } |
1311 | 1311 |
1312 // Move address of new object to result. Use scratch register if available. | 1312 // Move address of new object to result. Use scratch register if available. |
1313 if (scratch.is(no_reg)) { | 1313 if (scratch.is(no_reg)) { |
(...skipping 24 matching lines...) Expand all Loading... |
1338 } | 1338 } |
1339 } | 1339 } |
1340 | 1340 |
1341 | 1341 |
1342 void MacroAssembler::Allocate(int object_size, | 1342 void MacroAssembler::Allocate(int object_size, |
1343 Register result, | 1343 Register result, |
1344 Register result_end, | 1344 Register result_end, |
1345 Register scratch, | 1345 Register scratch, |
1346 Label* gc_required, | 1346 Label* gc_required, |
1347 AllocationFlags flags) { | 1347 AllocationFlags flags) { |
1348 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); | 1348 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); |
1349 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | 1349 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); |
1350 if (!FLAG_inline_new) { | 1350 if (!FLAG_inline_new) { |
1351 if (emit_debug_code()) { | 1351 if (emit_debug_code()) { |
1352 // Trash the registers to simulate an allocation failure. | 1352 // Trash the registers to simulate an allocation failure. |
1353 mov(result, Immediate(0x7091)); | 1353 mov(result, Immediate(0x7091)); |
1354 if (result_end.is_valid()) { | 1354 if (result_end.is_valid()) { |
1355 mov(result_end, Immediate(0x7191)); | 1355 mov(result_end, Immediate(0x7191)); |
1356 } | 1356 } |
1357 if (scratch.is_valid()) { | 1357 if (scratch.is_valid()) { |
1358 mov(scratch, Immediate(0x7291)); | 1358 mov(scratch, Immediate(0x7291)); |
1359 } | 1359 } |
1360 } | 1360 } |
1361 jmp(gc_required); | 1361 jmp(gc_required); |
1362 return; | 1362 return; |
1363 } | 1363 } |
1364 ASSERT(!result.is(result_end)); | 1364 DCHECK(!result.is(result_end)); |
1365 | 1365 |
1366 // Load address of new object into result. | 1366 // Load address of new object into result. |
1367 LoadAllocationTopHelper(result, scratch, flags); | 1367 LoadAllocationTopHelper(result, scratch, flags); |
1368 | 1368 |
1369 ExternalReference allocation_limit = | 1369 ExternalReference allocation_limit = |
1370 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1370 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
1371 | 1371 |
1372 // Align the next allocation. Storing the filler map without checking top is | 1372 // Align the next allocation. Storing the filler map without checking top is |
1373 // safe in new-space because the limit of the heap is aligned there. | 1373 // safe in new-space because the limit of the heap is aligned there. |
1374 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1374 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1375 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1375 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
1376 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1376 DCHECK(kPointerAlignment * 2 == kDoubleAlignment); |
1377 Label aligned; | 1377 Label aligned; |
1378 test(result, Immediate(kDoubleAlignmentMask)); | 1378 test(result, Immediate(kDoubleAlignmentMask)); |
1379 j(zero, &aligned, Label::kNear); | 1379 j(zero, &aligned, Label::kNear); |
1380 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 1380 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
1381 cmp(result, Operand::StaticVariable(allocation_limit)); | 1381 cmp(result, Operand::StaticVariable(allocation_limit)); |
1382 j(above_equal, gc_required); | 1382 j(above_equal, gc_required); |
1383 } | 1383 } |
1384 mov(Operand(result, 0), | 1384 mov(Operand(result, 0), |
1385 Immediate(isolate()->factory()->one_pointer_filler_map())); | 1385 Immediate(isolate()->factory()->one_pointer_filler_map())); |
1386 add(result, Immediate(kDoubleSize / 2)); | 1386 add(result, Immediate(kDoubleSize / 2)); |
(...skipping 15 matching lines...) Expand all Loading... |
1402 | 1402 |
1403 // Tag result if requested. | 1403 // Tag result if requested. |
1404 bool tag_result = (flags & TAG_OBJECT) != 0; | 1404 bool tag_result = (flags & TAG_OBJECT) != 0; |
1405 if (top_reg.is(result)) { | 1405 if (top_reg.is(result)) { |
1406 if (tag_result) { | 1406 if (tag_result) { |
1407 sub(result, Immediate(object_size - kHeapObjectTag)); | 1407 sub(result, Immediate(object_size - kHeapObjectTag)); |
1408 } else { | 1408 } else { |
1409 sub(result, Immediate(object_size)); | 1409 sub(result, Immediate(object_size)); |
1410 } | 1410 } |
1411 } else if (tag_result) { | 1411 } else if (tag_result) { |
1412 ASSERT(kHeapObjectTag == 1); | 1412 DCHECK(kHeapObjectTag == 1); |
1413 inc(result); | 1413 inc(result); |
1414 } | 1414 } |
1415 } | 1415 } |
1416 | 1416 |
1417 | 1417 |
1418 void MacroAssembler::Allocate(int header_size, | 1418 void MacroAssembler::Allocate(int header_size, |
1419 ScaleFactor element_size, | 1419 ScaleFactor element_size, |
1420 Register element_count, | 1420 Register element_count, |
1421 RegisterValueType element_count_type, | 1421 RegisterValueType element_count_type, |
1422 Register result, | 1422 Register result, |
1423 Register result_end, | 1423 Register result_end, |
1424 Register scratch, | 1424 Register scratch, |
1425 Label* gc_required, | 1425 Label* gc_required, |
1426 AllocationFlags flags) { | 1426 AllocationFlags flags) { |
1427 ASSERT((flags & SIZE_IN_WORDS) == 0); | 1427 DCHECK((flags & SIZE_IN_WORDS) == 0); |
1428 if (!FLAG_inline_new) { | 1428 if (!FLAG_inline_new) { |
1429 if (emit_debug_code()) { | 1429 if (emit_debug_code()) { |
1430 // Trash the registers to simulate an allocation failure. | 1430 // Trash the registers to simulate an allocation failure. |
1431 mov(result, Immediate(0x7091)); | 1431 mov(result, Immediate(0x7091)); |
1432 mov(result_end, Immediate(0x7191)); | 1432 mov(result_end, Immediate(0x7191)); |
1433 if (scratch.is_valid()) { | 1433 if (scratch.is_valid()) { |
1434 mov(scratch, Immediate(0x7291)); | 1434 mov(scratch, Immediate(0x7291)); |
1435 } | 1435 } |
1436 // Register element_count is not modified by the function. | 1436 // Register element_count is not modified by the function. |
1437 } | 1437 } |
1438 jmp(gc_required); | 1438 jmp(gc_required); |
1439 return; | 1439 return; |
1440 } | 1440 } |
1441 ASSERT(!result.is(result_end)); | 1441 DCHECK(!result.is(result_end)); |
1442 | 1442 |
1443 // Load address of new object into result. | 1443 // Load address of new object into result. |
1444 LoadAllocationTopHelper(result, scratch, flags); | 1444 LoadAllocationTopHelper(result, scratch, flags); |
1445 | 1445 |
1446 ExternalReference allocation_limit = | 1446 ExternalReference allocation_limit = |
1447 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1447 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
1448 | 1448 |
1449 // Align the next allocation. Storing the filler map without checking top is | 1449 // Align the next allocation. Storing the filler map without checking top is |
1450 // safe in new-space because the limit of the heap is aligned there. | 1450 // safe in new-space because the limit of the heap is aligned there. |
1451 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1451 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1452 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1452 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
1453 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1453 DCHECK(kPointerAlignment * 2 == kDoubleAlignment); |
1454 Label aligned; | 1454 Label aligned; |
1455 test(result, Immediate(kDoubleAlignmentMask)); | 1455 test(result, Immediate(kDoubleAlignmentMask)); |
1456 j(zero, &aligned, Label::kNear); | 1456 j(zero, &aligned, Label::kNear); |
1457 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 1457 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
1458 cmp(result, Operand::StaticVariable(allocation_limit)); | 1458 cmp(result, Operand::StaticVariable(allocation_limit)); |
1459 j(above_equal, gc_required); | 1459 j(above_equal, gc_required); |
1460 } | 1460 } |
1461 mov(Operand(result, 0), | 1461 mov(Operand(result, 0), |
1462 Immediate(isolate()->factory()->one_pointer_filler_map())); | 1462 Immediate(isolate()->factory()->one_pointer_filler_map())); |
1463 add(result, Immediate(kDoubleSize / 2)); | 1463 add(result, Immediate(kDoubleSize / 2)); |
1464 bind(&aligned); | 1464 bind(&aligned); |
1465 } | 1465 } |
1466 | 1466 |
1467 // Calculate new top and bail out if space is exhausted. | 1467 // Calculate new top and bail out if space is exhausted. |
1468 // We assume that element_count*element_size + header_size does not | 1468 // We assume that element_count*element_size + header_size does not |
1469 // overflow. | 1469 // overflow. |
1470 if (element_count_type == REGISTER_VALUE_IS_SMI) { | 1470 if (element_count_type == REGISTER_VALUE_IS_SMI) { |
1471 STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1); | 1471 STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1); |
1472 STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2); | 1472 STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2); |
1473 STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4); | 1473 STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4); |
1474 ASSERT(element_size >= times_2); | 1474 DCHECK(element_size >= times_2); |
1475 ASSERT(kSmiTagSize == 1); | 1475 DCHECK(kSmiTagSize == 1); |
1476 element_size = static_cast<ScaleFactor>(element_size - 1); | 1476 element_size = static_cast<ScaleFactor>(element_size - 1); |
1477 } else { | 1477 } else { |
1478 ASSERT(element_count_type == REGISTER_VALUE_IS_INT32); | 1478 DCHECK(element_count_type == REGISTER_VALUE_IS_INT32); |
1479 } | 1479 } |
1480 lea(result_end, Operand(element_count, element_size, header_size)); | 1480 lea(result_end, Operand(element_count, element_size, header_size)); |
1481 add(result_end, result); | 1481 add(result_end, result); |
1482 j(carry, gc_required); | 1482 j(carry, gc_required); |
1483 cmp(result_end, Operand::StaticVariable(allocation_limit)); | 1483 cmp(result_end, Operand::StaticVariable(allocation_limit)); |
1484 j(above, gc_required); | 1484 j(above, gc_required); |
1485 | 1485 |
1486 if ((flags & TAG_OBJECT) != 0) { | 1486 if ((flags & TAG_OBJECT) != 0) { |
1487 ASSERT(kHeapObjectTag == 1); | 1487 DCHECK(kHeapObjectTag == 1); |
1488 inc(result); | 1488 inc(result); |
1489 } | 1489 } |
1490 | 1490 |
1491 // Update allocation top. | 1491 // Update allocation top. |
1492 UpdateAllocationTopHelper(result_end, scratch, flags); | 1492 UpdateAllocationTopHelper(result_end, scratch, flags); |
1493 } | 1493 } |
1494 | 1494 |
1495 | 1495 |
1496 void MacroAssembler::Allocate(Register object_size, | 1496 void MacroAssembler::Allocate(Register object_size, |
1497 Register result, | 1497 Register result, |
1498 Register result_end, | 1498 Register result_end, |
1499 Register scratch, | 1499 Register scratch, |
1500 Label* gc_required, | 1500 Label* gc_required, |
1501 AllocationFlags flags) { | 1501 AllocationFlags flags) { |
1502 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); | 1502 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); |
1503 if (!FLAG_inline_new) { | 1503 if (!FLAG_inline_new) { |
1504 if (emit_debug_code()) { | 1504 if (emit_debug_code()) { |
1505 // Trash the registers to simulate an allocation failure. | 1505 // Trash the registers to simulate an allocation failure. |
1506 mov(result, Immediate(0x7091)); | 1506 mov(result, Immediate(0x7091)); |
1507 mov(result_end, Immediate(0x7191)); | 1507 mov(result_end, Immediate(0x7191)); |
1508 if (scratch.is_valid()) { | 1508 if (scratch.is_valid()) { |
1509 mov(scratch, Immediate(0x7291)); | 1509 mov(scratch, Immediate(0x7291)); |
1510 } | 1510 } |
1511 // object_size is left unchanged by this function. | 1511 // object_size is left unchanged by this function. |
1512 } | 1512 } |
1513 jmp(gc_required); | 1513 jmp(gc_required); |
1514 return; | 1514 return; |
1515 } | 1515 } |
1516 ASSERT(!result.is(result_end)); | 1516 DCHECK(!result.is(result_end)); |
1517 | 1517 |
1518 // Load address of new object into result. | 1518 // Load address of new object into result. |
1519 LoadAllocationTopHelper(result, scratch, flags); | 1519 LoadAllocationTopHelper(result, scratch, flags); |
1520 | 1520 |
1521 ExternalReference allocation_limit = | 1521 ExternalReference allocation_limit = |
1522 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1522 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
1523 | 1523 |
1524 // Align the next allocation. Storing the filler map without checking top is | 1524 // Align the next allocation. Storing the filler map without checking top is |
1525 // safe in new-space because the limit of the heap is aligned there. | 1525 // safe in new-space because the limit of the heap is aligned there. |
1526 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1526 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1527 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1527 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
1528 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1528 DCHECK(kPointerAlignment * 2 == kDoubleAlignment); |
1529 Label aligned; | 1529 Label aligned; |
1530 test(result, Immediate(kDoubleAlignmentMask)); | 1530 test(result, Immediate(kDoubleAlignmentMask)); |
1531 j(zero, &aligned, Label::kNear); | 1531 j(zero, &aligned, Label::kNear); |
1532 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 1532 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
1533 cmp(result, Operand::StaticVariable(allocation_limit)); | 1533 cmp(result, Operand::StaticVariable(allocation_limit)); |
1534 j(above_equal, gc_required); | 1534 j(above_equal, gc_required); |
1535 } | 1535 } |
1536 mov(Operand(result, 0), | 1536 mov(Operand(result, 0), |
1537 Immediate(isolate()->factory()->one_pointer_filler_map())); | 1537 Immediate(isolate()->factory()->one_pointer_filler_map())); |
1538 add(result, Immediate(kDoubleSize / 2)); | 1538 add(result, Immediate(kDoubleSize / 2)); |
1539 bind(&aligned); | 1539 bind(&aligned); |
1540 } | 1540 } |
1541 | 1541 |
1542 // Calculate new top and bail out if space is exhausted. | 1542 // Calculate new top and bail out if space is exhausted. |
1543 if (!object_size.is(result_end)) { | 1543 if (!object_size.is(result_end)) { |
1544 mov(result_end, object_size); | 1544 mov(result_end, object_size); |
1545 } | 1545 } |
1546 add(result_end, result); | 1546 add(result_end, result); |
1547 j(carry, gc_required); | 1547 j(carry, gc_required); |
1548 cmp(result_end, Operand::StaticVariable(allocation_limit)); | 1548 cmp(result_end, Operand::StaticVariable(allocation_limit)); |
1549 j(above, gc_required); | 1549 j(above, gc_required); |
1550 | 1550 |
1551 // Tag result if requested. | 1551 // Tag result if requested. |
1552 if ((flags & TAG_OBJECT) != 0) { | 1552 if ((flags & TAG_OBJECT) != 0) { |
1553 ASSERT(kHeapObjectTag == 1); | 1553 DCHECK(kHeapObjectTag == 1); |
1554 inc(result); | 1554 inc(result); |
1555 } | 1555 } |
1556 | 1556 |
1557 // Update allocation top. | 1557 // Update allocation top. |
1558 UpdateAllocationTopHelper(result_end, scratch, flags); | 1558 UpdateAllocationTopHelper(result_end, scratch, flags); |
1559 } | 1559 } |
1560 | 1560 |
1561 | 1561 |
1562 void MacroAssembler::UndoAllocationInNewSpace(Register object) { | 1562 void MacroAssembler::UndoAllocationInNewSpace(Register object) { |
1563 ExternalReference new_space_allocation_top = | 1563 ExternalReference new_space_allocation_top = |
(...skipping 28 matching lines...) Expand all Loading... |
1592 | 1592 |
1593 | 1593 |
1594 void MacroAssembler::AllocateTwoByteString(Register result, | 1594 void MacroAssembler::AllocateTwoByteString(Register result, |
1595 Register length, | 1595 Register length, |
1596 Register scratch1, | 1596 Register scratch1, |
1597 Register scratch2, | 1597 Register scratch2, |
1598 Register scratch3, | 1598 Register scratch3, |
1599 Label* gc_required) { | 1599 Label* gc_required) { |
1600 // Calculate the number of bytes needed for the characters in the string while | 1600 // Calculate the number of bytes needed for the characters in the string while |
1601 // observing object alignment. | 1601 // observing object alignment. |
1602 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1602 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
1603 ASSERT(kShortSize == 2); | 1603 DCHECK(kShortSize == 2); |
1604 // scratch1 = length * 2 + kObjectAlignmentMask. | 1604 // scratch1 = length * 2 + kObjectAlignmentMask. |
1605 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); | 1605 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); |
1606 and_(scratch1, Immediate(~kObjectAlignmentMask)); | 1606 and_(scratch1, Immediate(~kObjectAlignmentMask)); |
1607 | 1607 |
1608 // Allocate two byte string in new space. | 1608 // Allocate two byte string in new space. |
1609 Allocate(SeqTwoByteString::kHeaderSize, | 1609 Allocate(SeqTwoByteString::kHeaderSize, |
1610 times_1, | 1610 times_1, |
1611 scratch1, | 1611 scratch1, |
1612 REGISTER_VALUE_IS_INT32, | 1612 REGISTER_VALUE_IS_INT32, |
1613 result, | 1613 result, |
(...skipping 14 matching lines...) Expand all Loading... |
1628 | 1628 |
1629 | 1629 |
1630 void MacroAssembler::AllocateAsciiString(Register result, | 1630 void MacroAssembler::AllocateAsciiString(Register result, |
1631 Register length, | 1631 Register length, |
1632 Register scratch1, | 1632 Register scratch1, |
1633 Register scratch2, | 1633 Register scratch2, |
1634 Register scratch3, | 1634 Register scratch3, |
1635 Label* gc_required) { | 1635 Label* gc_required) { |
1636 // Calculate the number of bytes needed for the characters in the string while | 1636 // Calculate the number of bytes needed for the characters in the string while |
1637 // observing object alignment. | 1637 // observing object alignment. |
1638 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1638 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
1639 mov(scratch1, length); | 1639 mov(scratch1, length); |
1640 ASSERT(kCharSize == 1); | 1640 DCHECK(kCharSize == 1); |
1641 add(scratch1, Immediate(kObjectAlignmentMask)); | 1641 add(scratch1, Immediate(kObjectAlignmentMask)); |
1642 and_(scratch1, Immediate(~kObjectAlignmentMask)); | 1642 and_(scratch1, Immediate(~kObjectAlignmentMask)); |
1643 | 1643 |
1644 // Allocate ASCII string in new space. | 1644 // Allocate ASCII string in new space. |
1645 Allocate(SeqOneByteString::kHeaderSize, | 1645 Allocate(SeqOneByteString::kHeaderSize, |
1646 times_1, | 1646 times_1, |
1647 scratch1, | 1647 scratch1, |
1648 REGISTER_VALUE_IS_INT32, | 1648 REGISTER_VALUE_IS_INT32, |
1649 result, | 1649 result, |
1650 scratch2, | 1650 scratch2, |
(...skipping 10 matching lines...) Expand all Loading... |
1661 mov(FieldOperand(result, String::kHashFieldOffset), | 1661 mov(FieldOperand(result, String::kHashFieldOffset), |
1662 Immediate(String::kEmptyHashField)); | 1662 Immediate(String::kEmptyHashField)); |
1663 } | 1663 } |
1664 | 1664 |
1665 | 1665 |
1666 void MacroAssembler::AllocateAsciiString(Register result, | 1666 void MacroAssembler::AllocateAsciiString(Register result, |
1667 int length, | 1667 int length, |
1668 Register scratch1, | 1668 Register scratch1, |
1669 Register scratch2, | 1669 Register scratch2, |
1670 Label* gc_required) { | 1670 Label* gc_required) { |
1671 ASSERT(length > 0); | 1671 DCHECK(length > 0); |
1672 | 1672 |
1673 // Allocate ASCII string in new space. | 1673 // Allocate ASCII string in new space. |
1674 Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2, | 1674 Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2, |
1675 gc_required, TAG_OBJECT); | 1675 gc_required, TAG_OBJECT); |
1676 | 1676 |
1677 // Set the map, length and hash field. | 1677 // Set the map, length and hash field. |
1678 mov(FieldOperand(result, HeapObject::kMapOffset), | 1678 mov(FieldOperand(result, HeapObject::kMapOffset), |
1679 Immediate(isolate()->factory()->ascii_string_map())); | 1679 Immediate(isolate()->factory()->ascii_string_map())); |
1680 mov(FieldOperand(result, String::kLengthOffset), | 1680 mov(FieldOperand(result, String::kLengthOffset), |
1681 Immediate(Smi::FromInt(length))); | 1681 Immediate(Smi::FromInt(length))); |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1749 // Many variants of movsb, loop unrolling, word moves, and indexed operands | 1749 // Many variants of movsb, loop unrolling, word moves, and indexed operands |
1750 // have been tried here already, and this is fastest. | 1750 // have been tried here already, and this is fastest. |
1751 // A simpler loop is faster on small copies, but 30% slower on large ones. | 1751 // A simpler loop is faster on small copies, but 30% slower on large ones. |
1752 // The cld() instruction must have been emitted, to set the direction flag(), | 1752 // The cld() instruction must have been emitted, to set the direction flag(), |
1753 // before calling this function. | 1753 // before calling this function. |
1754 void MacroAssembler::CopyBytes(Register source, | 1754 void MacroAssembler::CopyBytes(Register source, |
1755 Register destination, | 1755 Register destination, |
1756 Register length, | 1756 Register length, |
1757 Register scratch) { | 1757 Register scratch) { |
1758 Label short_loop, len4, len8, len12, done, short_string; | 1758 Label short_loop, len4, len8, len12, done, short_string; |
1759 ASSERT(source.is(esi)); | 1759 DCHECK(source.is(esi)); |
1760 ASSERT(destination.is(edi)); | 1760 DCHECK(destination.is(edi)); |
1761 ASSERT(length.is(ecx)); | 1761 DCHECK(length.is(ecx)); |
1762 cmp(length, Immediate(4)); | 1762 cmp(length, Immediate(4)); |
1763 j(below, &short_string, Label::kNear); | 1763 j(below, &short_string, Label::kNear); |
1764 | 1764 |
1765 // Because source is 4-byte aligned in our uses of this function, | 1765 // Because source is 4-byte aligned in our uses of this function, |
1766 // we keep source aligned for the rep_movs call by copying the odd bytes | 1766 // we keep source aligned for the rep_movs call by copying the odd bytes |
1767 // at the end of the ranges. | 1767 // at the end of the ranges. |
1768 mov(scratch, Operand(source, length, times_1, -4)); | 1768 mov(scratch, Operand(source, length, times_1, -4)); |
1769 mov(Operand(destination, length, times_1, -4), scratch); | 1769 mov(Operand(destination, length, times_1, -4), scratch); |
1770 | 1770 |
1771 cmp(length, Immediate(8)); | 1771 cmp(length, Immediate(8)); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1821 bind(&entry); | 1821 bind(&entry); |
1822 cmp(start_offset, end_offset); | 1822 cmp(start_offset, end_offset); |
1823 j(less, &loop); | 1823 j(less, &loop); |
1824 } | 1824 } |
1825 | 1825 |
1826 | 1826 |
1827 void MacroAssembler::BooleanBitTest(Register object, | 1827 void MacroAssembler::BooleanBitTest(Register object, |
1828 int field_offset, | 1828 int field_offset, |
1829 int bit_index) { | 1829 int bit_index) { |
1830 bit_index += kSmiTagSize + kSmiShiftSize; | 1830 bit_index += kSmiTagSize + kSmiShiftSize; |
1831 ASSERT(IsPowerOf2(kBitsPerByte)); | 1831 DCHECK(IsPowerOf2(kBitsPerByte)); |
1832 int byte_index = bit_index / kBitsPerByte; | 1832 int byte_index = bit_index / kBitsPerByte; |
1833 int byte_bit_index = bit_index & (kBitsPerByte - 1); | 1833 int byte_bit_index = bit_index & (kBitsPerByte - 1); |
1834 test_b(FieldOperand(object, field_offset + byte_index), | 1834 test_b(FieldOperand(object, field_offset + byte_index), |
1835 static_cast<byte>(1 << byte_bit_index)); | 1835 static_cast<byte>(1 << byte_bit_index)); |
1836 } | 1836 } |
1837 | 1837 |
1838 | 1838 |
1839 | 1839 |
1840 void MacroAssembler::NegativeZeroTest(Register result, | 1840 void MacroAssembler::NegativeZeroTest(Register result, |
1841 Register op, | 1841 Register op, |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1917 bind(&non_instance); | 1917 bind(&non_instance); |
1918 mov(result, FieldOperand(result, Map::kConstructorOffset)); | 1918 mov(result, FieldOperand(result, Map::kConstructorOffset)); |
1919 } | 1919 } |
1920 | 1920 |
1921 // All done. | 1921 // All done. |
1922 bind(&done); | 1922 bind(&done); |
1923 } | 1923 } |
1924 | 1924 |
1925 | 1925 |
1926 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { | 1926 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { |
1927 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. | 1927 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. |
1928 call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); | 1928 call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); |
1929 } | 1929 } |
1930 | 1930 |
1931 | 1931 |
1932 void MacroAssembler::TailCallStub(CodeStub* stub) { | 1932 void MacroAssembler::TailCallStub(CodeStub* stub) { |
1933 jmp(stub->GetCode(), RelocInfo::CODE_TARGET); | 1933 jmp(stub->GetCode(), RelocInfo::CODE_TARGET); |
1934 } | 1934 } |
1935 | 1935 |
1936 | 1936 |
1937 void MacroAssembler::StubReturn(int argc) { | 1937 void MacroAssembler::StubReturn(int argc) { |
1938 ASSERT(argc >= 1 && generating_stub()); | 1938 DCHECK(argc >= 1 && generating_stub()); |
1939 ret((argc - 1) * kPointerSize); | 1939 ret((argc - 1) * kPointerSize); |
1940 } | 1940 } |
1941 | 1941 |
1942 | 1942 |
1943 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | 1943 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { |
1944 return has_frame_ || !stub->SometimesSetsUpAFrame(); | 1944 return has_frame_ || !stub->SometimesSetsUpAFrame(); |
1945 } | 1945 } |
1946 | 1946 |
1947 | 1947 |
1948 void MacroAssembler::IndexFromHash(Register hash, Register index) { | 1948 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
1949 // The assert checks that the constants for the maximum number of digits | 1949 // The assert checks that the constants for the maximum number of digits |
1950 // for an array index cached in the hash field and the number of bits | 1950 // for an array index cached in the hash field and the number of bits |
1951 // reserved for it does not conflict. | 1951 // reserved for it does not conflict. |
1952 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | 1952 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < |
1953 (1 << String::kArrayIndexValueBits)); | 1953 (1 << String::kArrayIndexValueBits)); |
1954 if (!index.is(hash)) { | 1954 if (!index.is(hash)) { |
1955 mov(index, hash); | 1955 mov(index, hash); |
1956 } | 1956 } |
1957 DecodeFieldToSmi<String::ArrayIndexValueBits>(index); | 1957 DecodeFieldToSmi<String::ArrayIndexValueBits>(index); |
1958 } | 1958 } |
1959 | 1959 |
1960 | 1960 |
1961 void MacroAssembler::CallRuntime(const Runtime::Function* f, | 1961 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
1962 int num_arguments) { | 1962 int num_arguments) { |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2027 int stack_space, | 2027 int stack_space, |
2028 Operand return_value_operand, | 2028 Operand return_value_operand, |
2029 Operand* context_restore_operand) { | 2029 Operand* context_restore_operand) { |
2030 ExternalReference next_address = | 2030 ExternalReference next_address = |
2031 ExternalReference::handle_scope_next_address(isolate()); | 2031 ExternalReference::handle_scope_next_address(isolate()); |
2032 ExternalReference limit_address = | 2032 ExternalReference limit_address = |
2033 ExternalReference::handle_scope_limit_address(isolate()); | 2033 ExternalReference::handle_scope_limit_address(isolate()); |
2034 ExternalReference level_address = | 2034 ExternalReference level_address = |
2035 ExternalReference::handle_scope_level_address(isolate()); | 2035 ExternalReference::handle_scope_level_address(isolate()); |
2036 | 2036 |
2037 ASSERT(edx.is(function_address)); | 2037 DCHECK(edx.is(function_address)); |
2038 // Allocate HandleScope in callee-save registers. | 2038 // Allocate HandleScope in callee-save registers. |
2039 mov(ebx, Operand::StaticVariable(next_address)); | 2039 mov(ebx, Operand::StaticVariable(next_address)); |
2040 mov(edi, Operand::StaticVariable(limit_address)); | 2040 mov(edi, Operand::StaticVariable(limit_address)); |
2041 add(Operand::StaticVariable(level_address), Immediate(1)); | 2041 add(Operand::StaticVariable(level_address), Immediate(1)); |
2042 | 2042 |
2043 if (FLAG_log_timer_events) { | 2043 if (FLAG_log_timer_events) { |
2044 FrameScope frame(this, StackFrame::MANUAL); | 2044 FrameScope frame(this, StackFrame::MANUAL); |
2045 PushSafepointRegisters(); | 2045 PushSafepointRegisters(); |
2046 PrepareCallCFunction(1, eax); | 2046 PrepareCallCFunction(1, eax); |
2047 mov(Operand(esp, 0), | 2047 mov(Operand(esp, 0), |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2184 const Operand& code_operand, | 2184 const Operand& code_operand, |
2185 Label* done, | 2185 Label* done, |
2186 bool* definitely_mismatches, | 2186 bool* definitely_mismatches, |
2187 InvokeFlag flag, | 2187 InvokeFlag flag, |
2188 Label::Distance done_near, | 2188 Label::Distance done_near, |
2189 const CallWrapper& call_wrapper) { | 2189 const CallWrapper& call_wrapper) { |
2190 bool definitely_matches = false; | 2190 bool definitely_matches = false; |
2191 *definitely_mismatches = false; | 2191 *definitely_mismatches = false; |
2192 Label invoke; | 2192 Label invoke; |
2193 if (expected.is_immediate()) { | 2193 if (expected.is_immediate()) { |
2194 ASSERT(actual.is_immediate()); | 2194 DCHECK(actual.is_immediate()); |
2195 if (expected.immediate() == actual.immediate()) { | 2195 if (expected.immediate() == actual.immediate()) { |
2196 definitely_matches = true; | 2196 definitely_matches = true; |
2197 } else { | 2197 } else { |
2198 mov(eax, actual.immediate()); | 2198 mov(eax, actual.immediate()); |
2199 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; | 2199 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
2200 if (expected.immediate() == sentinel) { | 2200 if (expected.immediate() == sentinel) { |
2201 // Don't worry about adapting arguments for builtins that | 2201 // Don't worry about adapting arguments for builtins that |
2202 // don't want that done. Skip adaption code by making it look | 2202 // don't want that done. Skip adaption code by making it look |
2203 // like we have a match between expected and actual number of | 2203 // like we have a match between expected and actual number of |
2204 // arguments. | 2204 // arguments. |
2205 definitely_matches = true; | 2205 definitely_matches = true; |
2206 } else { | 2206 } else { |
2207 *definitely_mismatches = true; | 2207 *definitely_mismatches = true; |
2208 mov(ebx, expected.immediate()); | 2208 mov(ebx, expected.immediate()); |
2209 } | 2209 } |
2210 } | 2210 } |
2211 } else { | 2211 } else { |
2212 if (actual.is_immediate()) { | 2212 if (actual.is_immediate()) { |
2213 // Expected is in register, actual is immediate. This is the | 2213 // Expected is in register, actual is immediate. This is the |
2214 // case when we invoke function values without going through the | 2214 // case when we invoke function values without going through the |
2215 // IC mechanism. | 2215 // IC mechanism. |
2216 cmp(expected.reg(), actual.immediate()); | 2216 cmp(expected.reg(), actual.immediate()); |
2217 j(equal, &invoke); | 2217 j(equal, &invoke); |
2218 ASSERT(expected.reg().is(ebx)); | 2218 DCHECK(expected.reg().is(ebx)); |
2219 mov(eax, actual.immediate()); | 2219 mov(eax, actual.immediate()); |
2220 } else if (!expected.reg().is(actual.reg())) { | 2220 } else if (!expected.reg().is(actual.reg())) { |
2221 // Both expected and actual are in (different) registers. This | 2221 // Both expected and actual are in (different) registers. This |
2222 // is the case when we invoke functions using call and apply. | 2222 // is the case when we invoke functions using call and apply. |
2223 cmp(expected.reg(), actual.reg()); | 2223 cmp(expected.reg(), actual.reg()); |
2224 j(equal, &invoke); | 2224 j(equal, &invoke); |
2225 ASSERT(actual.reg().is(eax)); | 2225 DCHECK(actual.reg().is(eax)); |
2226 ASSERT(expected.reg().is(ebx)); | 2226 DCHECK(expected.reg().is(ebx)); |
2227 } | 2227 } |
2228 } | 2228 } |
2229 | 2229 |
2230 if (!definitely_matches) { | 2230 if (!definitely_matches) { |
2231 Handle<Code> adaptor = | 2231 Handle<Code> adaptor = |
2232 isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 2232 isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
2233 if (!code_constant.is_null()) { | 2233 if (!code_constant.is_null()) { |
2234 mov(edx, Immediate(code_constant)); | 2234 mov(edx, Immediate(code_constant)); |
2235 add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); | 2235 add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); |
2236 } else if (!code_operand.is_reg(edx)) { | 2236 } else if (!code_operand.is_reg(edx)) { |
(...skipping 14 matching lines...) Expand all Loading... |
2251 } | 2251 } |
2252 } | 2252 } |
2253 | 2253 |
2254 | 2254 |
2255 void MacroAssembler::InvokeCode(const Operand& code, | 2255 void MacroAssembler::InvokeCode(const Operand& code, |
2256 const ParameterCount& expected, | 2256 const ParameterCount& expected, |
2257 const ParameterCount& actual, | 2257 const ParameterCount& actual, |
2258 InvokeFlag flag, | 2258 InvokeFlag flag, |
2259 const CallWrapper& call_wrapper) { | 2259 const CallWrapper& call_wrapper) { |
2260 // You can't call a function without a valid frame. | 2260 // You can't call a function without a valid frame. |
2261 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 2261 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
2262 | 2262 |
2263 Label done; | 2263 Label done; |
2264 bool definitely_mismatches = false; | 2264 bool definitely_mismatches = false; |
2265 InvokePrologue(expected, actual, Handle<Code>::null(), code, | 2265 InvokePrologue(expected, actual, Handle<Code>::null(), code, |
2266 &done, &definitely_mismatches, flag, Label::kNear, | 2266 &done, &definitely_mismatches, flag, Label::kNear, |
2267 call_wrapper); | 2267 call_wrapper); |
2268 if (!definitely_mismatches) { | 2268 if (!definitely_mismatches) { |
2269 if (flag == CALL_FUNCTION) { | 2269 if (flag == CALL_FUNCTION) { |
2270 call_wrapper.BeforeCall(CallSize(code)); | 2270 call_wrapper.BeforeCall(CallSize(code)); |
2271 call(code); | 2271 call(code); |
2272 call_wrapper.AfterCall(); | 2272 call_wrapper.AfterCall(); |
2273 } else { | 2273 } else { |
2274 ASSERT(flag == JUMP_FUNCTION); | 2274 DCHECK(flag == JUMP_FUNCTION); |
2275 jmp(code); | 2275 jmp(code); |
2276 } | 2276 } |
2277 bind(&done); | 2277 bind(&done); |
2278 } | 2278 } |
2279 } | 2279 } |
2280 | 2280 |
2281 | 2281 |
2282 void MacroAssembler::InvokeFunction(Register fun, | 2282 void MacroAssembler::InvokeFunction(Register fun, |
2283 const ParameterCount& actual, | 2283 const ParameterCount& actual, |
2284 InvokeFlag flag, | 2284 InvokeFlag flag, |
2285 const CallWrapper& call_wrapper) { | 2285 const CallWrapper& call_wrapper) { |
2286 // You can't call a function without a valid frame. | 2286 // You can't call a function without a valid frame. |
2287 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 2287 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
2288 | 2288 |
2289 ASSERT(fun.is(edi)); | 2289 DCHECK(fun.is(edi)); |
2290 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); | 2290 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); |
2291 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); | 2291 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); |
2292 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); | 2292 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); |
2293 SmiUntag(ebx); | 2293 SmiUntag(ebx); |
2294 | 2294 |
2295 ParameterCount expected(ebx); | 2295 ParameterCount expected(ebx); |
2296 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), | 2296 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), |
2297 expected, actual, flag, call_wrapper); | 2297 expected, actual, flag, call_wrapper); |
2298 } | 2298 } |
2299 | 2299 |
2300 | 2300 |
2301 void MacroAssembler::InvokeFunction(Register fun, | 2301 void MacroAssembler::InvokeFunction(Register fun, |
2302 const ParameterCount& expected, | 2302 const ParameterCount& expected, |
2303 const ParameterCount& actual, | 2303 const ParameterCount& actual, |
2304 InvokeFlag flag, | 2304 InvokeFlag flag, |
2305 const CallWrapper& call_wrapper) { | 2305 const CallWrapper& call_wrapper) { |
2306 // You can't call a function without a valid frame. | 2306 // You can't call a function without a valid frame. |
2307 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 2307 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
2308 | 2308 |
2309 ASSERT(fun.is(edi)); | 2309 DCHECK(fun.is(edi)); |
2310 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); | 2310 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); |
2311 | 2311 |
2312 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), | 2312 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), |
2313 expected, actual, flag, call_wrapper); | 2313 expected, actual, flag, call_wrapper); |
2314 } | 2314 } |
2315 | 2315 |
2316 | 2316 |
2317 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | 2317 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
2318 const ParameterCount& expected, | 2318 const ParameterCount& expected, |
2319 const ParameterCount& actual, | 2319 const ParameterCount& actual, |
2320 InvokeFlag flag, | 2320 InvokeFlag flag, |
2321 const CallWrapper& call_wrapper) { | 2321 const CallWrapper& call_wrapper) { |
2322 LoadHeapObject(edi, function); | 2322 LoadHeapObject(edi, function); |
2323 InvokeFunction(edi, expected, actual, flag, call_wrapper); | 2323 InvokeFunction(edi, expected, actual, flag, call_wrapper); |
2324 } | 2324 } |
2325 | 2325 |
2326 | 2326 |
2327 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | 2327 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
2328 InvokeFlag flag, | 2328 InvokeFlag flag, |
2329 const CallWrapper& call_wrapper) { | 2329 const CallWrapper& call_wrapper) { |
2330 // You can't call a builtin without a valid frame. | 2330 // You can't call a builtin without a valid frame. |
2331 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 2331 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
2332 | 2332 |
2333 // Rely on the assertion to check that the number of provided | 2333 // Rely on the assertion to check that the number of provided |
2334 // arguments match the expected number of arguments. Fake a | 2334 // arguments match the expected number of arguments. Fake a |
2335 // parameter count to avoid emitting code to do the check. | 2335 // parameter count to avoid emitting code to do the check. |
2336 ParameterCount expected(0); | 2336 ParameterCount expected(0); |
2337 GetBuiltinFunction(edi, id); | 2337 GetBuiltinFunction(edi, id); |
2338 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), | 2338 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), |
2339 expected, expected, flag, call_wrapper); | 2339 expected, expected, flag, call_wrapper); |
2340 } | 2340 } |
2341 | 2341 |
2342 | 2342 |
2343 void MacroAssembler::GetBuiltinFunction(Register target, | 2343 void MacroAssembler::GetBuiltinFunction(Register target, |
2344 Builtins::JavaScript id) { | 2344 Builtins::JavaScript id) { |
2345 // Load the JavaScript builtin function from the builtins object. | 2345 // Load the JavaScript builtin function from the builtins object. |
2346 mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2346 mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2347 mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset)); | 2347 mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset)); |
2348 mov(target, FieldOperand(target, | 2348 mov(target, FieldOperand(target, |
2349 JSBuiltinsObject::OffsetOfFunctionWithId(id))); | 2349 JSBuiltinsObject::OffsetOfFunctionWithId(id))); |
2350 } | 2350 } |
2351 | 2351 |
2352 | 2352 |
2353 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { | 2353 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
2354 ASSERT(!target.is(edi)); | 2354 DCHECK(!target.is(edi)); |
2355 // Load the JavaScript builtin function from the builtins object. | 2355 // Load the JavaScript builtin function from the builtins object. |
2356 GetBuiltinFunction(edi, id); | 2356 GetBuiltinFunction(edi, id); |
2357 // Load the code entry point from the function into the target register. | 2357 // Load the code entry point from the function into the target register. |
2358 mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset)); | 2358 mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset)); |
2359 } | 2359 } |
2360 | 2360 |
2361 | 2361 |
2362 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 2362 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
2363 if (context_chain_length > 0) { | 2363 if (context_chain_length > 0) { |
2364 // Move up the chain of contexts to the context containing the slot. | 2364 // Move up the chain of contexts to the context containing the slot. |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2457 | 2457 |
2458 Operand MacroAssembler::SafepointRegisterSlot(Register reg) { | 2458 Operand MacroAssembler::SafepointRegisterSlot(Register reg) { |
2459 return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); | 2459 return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); |
2460 } | 2460 } |
2461 | 2461 |
2462 | 2462 |
2463 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 2463 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
2464 // The registers are pushed starting with the lowest encoding, | 2464 // The registers are pushed starting with the lowest encoding, |
2465 // which means that lowest encodings are furthest away from | 2465 // which means that lowest encodings are furthest away from |
2466 // the stack pointer. | 2466 // the stack pointer. |
2467 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); | 2467 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters); |
2468 return kNumSafepointRegisters - reg_code - 1; | 2468 return kNumSafepointRegisters - reg_code - 1; |
2469 } | 2469 } |
2470 | 2470 |
2471 | 2471 |
2472 void MacroAssembler::LoadHeapObject(Register result, | 2472 void MacroAssembler::LoadHeapObject(Register result, |
2473 Handle<HeapObject> object) { | 2473 Handle<HeapObject> object) { |
2474 AllowDeferredHandleDereference embedding_raw_address; | 2474 AllowDeferredHandleDereference embedding_raw_address; |
2475 if (isolate()->heap()->InNewSpace(*object)) { | 2475 if (isolate()->heap()->InNewSpace(*object)) { |
2476 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 2476 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
2477 mov(result, Operand::ForCell(cell)); | 2477 mov(result, Operand::ForCell(cell)); |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2515 pop(scratch); | 2515 pop(scratch); |
2516 add(esp, Immediate(bytes_dropped)); | 2516 add(esp, Immediate(bytes_dropped)); |
2517 push(scratch); | 2517 push(scratch); |
2518 ret(0); | 2518 ret(0); |
2519 } | 2519 } |
2520 } | 2520 } |
2521 | 2521 |
2522 | 2522 |
2523 void MacroAssembler::VerifyX87StackDepth(uint32_t depth) { | 2523 void MacroAssembler::VerifyX87StackDepth(uint32_t depth) { |
2524 // Make sure the floating point stack is either empty or has depth items. | 2524 // Make sure the floating point stack is either empty or has depth items. |
2525 ASSERT(depth <= 7); | 2525 DCHECK(depth <= 7); |
2526 // This is very expensive. | 2526 // This is very expensive. |
2527 ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts); | 2527 DCHECK(FLAG_debug_code && FLAG_enable_slow_asserts); |
2528 | 2528 |
2529 // The top-of-stack (tos) is 7 if there is one item pushed. | 2529 // The top-of-stack (tos) is 7 if there is one item pushed. |
2530 int tos = (8 - depth) % 8; | 2530 int tos = (8 - depth) % 8; |
2531 const int kTopMask = 0x3800; | 2531 const int kTopMask = 0x3800; |
2532 push(eax); | 2532 push(eax); |
2533 fwait(); | 2533 fwait(); |
2534 fnstsw_ax(); | 2534 fnstsw_ax(); |
2535 and_(eax, kTopMask); | 2535 and_(eax, kTopMask); |
2536 shr(eax, 11); | 2536 shr(eax, 11); |
2537 cmp(eax, Immediate(tos)); | 2537 cmp(eax, Immediate(tos)); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2570 | 2570 |
2571 | 2571 |
2572 void MacroAssembler::SetCounter(StatsCounter* counter, int value) { | 2572 void MacroAssembler::SetCounter(StatsCounter* counter, int value) { |
2573 if (FLAG_native_code_counters && counter->Enabled()) { | 2573 if (FLAG_native_code_counters && counter->Enabled()) { |
2574 mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value)); | 2574 mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value)); |
2575 } | 2575 } |
2576 } | 2576 } |
2577 | 2577 |
2578 | 2578 |
2579 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { | 2579 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { |
2580 ASSERT(value > 0); | 2580 DCHECK(value > 0); |
2581 if (FLAG_native_code_counters && counter->Enabled()) { | 2581 if (FLAG_native_code_counters && counter->Enabled()) { |
2582 Operand operand = Operand::StaticVariable(ExternalReference(counter)); | 2582 Operand operand = Operand::StaticVariable(ExternalReference(counter)); |
2583 if (value == 1) { | 2583 if (value == 1) { |
2584 inc(operand); | 2584 inc(operand); |
2585 } else { | 2585 } else { |
2586 add(operand, Immediate(value)); | 2586 add(operand, Immediate(value)); |
2587 } | 2587 } |
2588 } | 2588 } |
2589 } | 2589 } |
2590 | 2590 |
2591 | 2591 |
2592 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { | 2592 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { |
2593 ASSERT(value > 0); | 2593 DCHECK(value > 0); |
2594 if (FLAG_native_code_counters && counter->Enabled()) { | 2594 if (FLAG_native_code_counters && counter->Enabled()) { |
2595 Operand operand = Operand::StaticVariable(ExternalReference(counter)); | 2595 Operand operand = Operand::StaticVariable(ExternalReference(counter)); |
2596 if (value == 1) { | 2596 if (value == 1) { |
2597 dec(operand); | 2597 dec(operand); |
2598 } else { | 2598 } else { |
2599 sub(operand, Immediate(value)); | 2599 sub(operand, Immediate(value)); |
2600 } | 2600 } |
2601 } | 2601 } |
2602 } | 2602 } |
2603 | 2603 |
2604 | 2604 |
2605 void MacroAssembler::IncrementCounter(Condition cc, | 2605 void MacroAssembler::IncrementCounter(Condition cc, |
2606 StatsCounter* counter, | 2606 StatsCounter* counter, |
2607 int value) { | 2607 int value) { |
2608 ASSERT(value > 0); | 2608 DCHECK(value > 0); |
2609 if (FLAG_native_code_counters && counter->Enabled()) { | 2609 if (FLAG_native_code_counters && counter->Enabled()) { |
2610 Label skip; | 2610 Label skip; |
2611 j(NegateCondition(cc), &skip); | 2611 j(NegateCondition(cc), &skip); |
2612 pushfd(); | 2612 pushfd(); |
2613 IncrementCounter(counter, value); | 2613 IncrementCounter(counter, value); |
2614 popfd(); | 2614 popfd(); |
2615 bind(&skip); | 2615 bind(&skip); |
2616 } | 2616 } |
2617 } | 2617 } |
2618 | 2618 |
2619 | 2619 |
2620 void MacroAssembler::DecrementCounter(Condition cc, | 2620 void MacroAssembler::DecrementCounter(Condition cc, |
2621 StatsCounter* counter, | 2621 StatsCounter* counter, |
2622 int value) { | 2622 int value) { |
2623 ASSERT(value > 0); | 2623 DCHECK(value > 0); |
2624 if (FLAG_native_code_counters && counter->Enabled()) { | 2624 if (FLAG_native_code_counters && counter->Enabled()) { |
2625 Label skip; | 2625 Label skip; |
2626 j(NegateCondition(cc), &skip); | 2626 j(NegateCondition(cc), &skip); |
2627 pushfd(); | 2627 pushfd(); |
2628 DecrementCounter(counter, value); | 2628 DecrementCounter(counter, value); |
2629 popfd(); | 2629 popfd(); |
2630 bind(&skip); | 2630 bind(&skip); |
2631 } | 2631 } |
2632 } | 2632 } |
2633 | 2633 |
(...skipping 28 matching lines...) Expand all Loading... |
2662 Abort(reason); | 2662 Abort(reason); |
2663 // will not return here | 2663 // will not return here |
2664 bind(&L); | 2664 bind(&L); |
2665 } | 2665 } |
2666 | 2666 |
2667 | 2667 |
2668 void MacroAssembler::CheckStackAlignment() { | 2668 void MacroAssembler::CheckStackAlignment() { |
2669 int frame_alignment = base::OS::ActivationFrameAlignment(); | 2669 int frame_alignment = base::OS::ActivationFrameAlignment(); |
2670 int frame_alignment_mask = frame_alignment - 1; | 2670 int frame_alignment_mask = frame_alignment - 1; |
2671 if (frame_alignment > kPointerSize) { | 2671 if (frame_alignment > kPointerSize) { |
2672 ASSERT(IsPowerOf2(frame_alignment)); | 2672 DCHECK(IsPowerOf2(frame_alignment)); |
2673 Label alignment_as_expected; | 2673 Label alignment_as_expected; |
2674 test(esp, Immediate(frame_alignment_mask)); | 2674 test(esp, Immediate(frame_alignment_mask)); |
2675 j(zero, &alignment_as_expected); | 2675 j(zero, &alignment_as_expected); |
2676 // Abort if stack is not aligned. | 2676 // Abort if stack is not aligned. |
2677 int3(); | 2677 int3(); |
2678 bind(&alignment_as_expected); | 2678 bind(&alignment_as_expected); |
2679 } | 2679 } |
2680 } | 2680 } |
2681 | 2681 |
2682 | 2682 |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2827 mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset)); | 2827 mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset)); |
2828 movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset)); | 2828 movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset)); |
2829 movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset)); | 2829 movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset)); |
2830 | 2830 |
2831 // Check that both are flat ASCII strings. | 2831 // Check that both are flat ASCII strings. |
2832 const int kFlatAsciiStringMask = | 2832 const int kFlatAsciiStringMask = |
2833 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; | 2833 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; |
2834 const int kFlatAsciiStringTag = | 2834 const int kFlatAsciiStringTag = |
2835 kStringTag | kOneByteStringTag | kSeqStringTag; | 2835 kStringTag | kOneByteStringTag | kSeqStringTag; |
2836 // Interleave bits from both instance types and compare them in one check. | 2836 // Interleave bits from both instance types and compare them in one check. |
2837 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); | 2837 DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); |
2838 and_(scratch1, kFlatAsciiStringMask); | 2838 and_(scratch1, kFlatAsciiStringMask); |
2839 and_(scratch2, kFlatAsciiStringMask); | 2839 and_(scratch2, kFlatAsciiStringMask); |
2840 lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); | 2840 lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); |
2841 cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3)); | 2841 cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3)); |
2842 j(not_equal, failure); | 2842 j(not_equal, failure); |
2843 } | 2843 } |
2844 | 2844 |
2845 | 2845 |
2846 void MacroAssembler::JumpIfNotUniqueName(Operand operand, | 2846 void MacroAssembler::JumpIfNotUniqueName(Operand operand, |
2847 Label* not_unique_name, | 2847 Label* not_unique_name, |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2892 } | 2892 } |
2893 | 2893 |
2894 | 2894 |
2895 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { | 2895 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { |
2896 int frame_alignment = base::OS::ActivationFrameAlignment(); | 2896 int frame_alignment = base::OS::ActivationFrameAlignment(); |
2897 if (frame_alignment != 0) { | 2897 if (frame_alignment != 0) { |
2898 // Make stack end at alignment and make room for num_arguments words | 2898 // Make stack end at alignment and make room for num_arguments words |
2899 // and the original value of esp. | 2899 // and the original value of esp. |
2900 mov(scratch, esp); | 2900 mov(scratch, esp); |
2901 sub(esp, Immediate((num_arguments + 1) * kPointerSize)); | 2901 sub(esp, Immediate((num_arguments + 1) * kPointerSize)); |
2902 ASSERT(IsPowerOf2(frame_alignment)); | 2902 DCHECK(IsPowerOf2(frame_alignment)); |
2903 and_(esp, -frame_alignment); | 2903 and_(esp, -frame_alignment); |
2904 mov(Operand(esp, num_arguments * kPointerSize), scratch); | 2904 mov(Operand(esp, num_arguments * kPointerSize), scratch); |
2905 } else { | 2905 } else { |
2906 sub(esp, Immediate(num_arguments * kPointerSize)); | 2906 sub(esp, Immediate(num_arguments * kPointerSize)); |
2907 } | 2907 } |
2908 } | 2908 } |
2909 | 2909 |
2910 | 2910 |
2911 void MacroAssembler::CallCFunction(ExternalReference function, | 2911 void MacroAssembler::CallCFunction(ExternalReference function, |
2912 int num_arguments) { | 2912 int num_arguments) { |
2913 // Trashing eax is ok as it will be the return value. | 2913 // Trashing eax is ok as it will be the return value. |
2914 mov(eax, Immediate(function)); | 2914 mov(eax, Immediate(function)); |
2915 CallCFunction(eax, num_arguments); | 2915 CallCFunction(eax, num_arguments); |
2916 } | 2916 } |
2917 | 2917 |
2918 | 2918 |
2919 void MacroAssembler::CallCFunction(Register function, | 2919 void MacroAssembler::CallCFunction(Register function, |
2920 int num_arguments) { | 2920 int num_arguments) { |
2921 ASSERT(has_frame()); | 2921 DCHECK(has_frame()); |
2922 // Check stack alignment. | 2922 // Check stack alignment. |
2923 if (emit_debug_code()) { | 2923 if (emit_debug_code()) { |
2924 CheckStackAlignment(); | 2924 CheckStackAlignment(); |
2925 } | 2925 } |
2926 | 2926 |
2927 call(function); | 2927 call(function); |
2928 if (base::OS::ActivationFrameAlignment() != 0) { | 2928 if (base::OS::ActivationFrameAlignment() != 0) { |
2929 mov(esp, Operand(esp, num_arguments * kPointerSize)); | 2929 mov(esp, Operand(esp, num_arguments * kPointerSize)); |
2930 } else { | 2930 } else { |
2931 add(esp, Immediate(num_arguments * kPointerSize)); | 2931 add(esp, Immediate(num_arguments * kPointerSize)); |
(...skipping 30 matching lines...) Expand all Loading... |
2962 #endif | 2962 #endif |
2963 | 2963 |
2964 | 2964 |
2965 CodePatcher::CodePatcher(byte* address, int size) | 2965 CodePatcher::CodePatcher(byte* address, int size) |
2966 : address_(address), | 2966 : address_(address), |
2967 size_(size), | 2967 size_(size), |
2968 masm_(NULL, address, size + Assembler::kGap) { | 2968 masm_(NULL, address, size + Assembler::kGap) { |
2969 // Create a new macro assembler pointing to the address of the code to patch. | 2969 // Create a new macro assembler pointing to the address of the code to patch. |
2970 // The size is adjusted with kGap on order for the assembler to generate size | 2970 // The size is adjusted with kGap on order for the assembler to generate size |
2971 // bytes of instructions without failing with buffer size constraints. | 2971 // bytes of instructions without failing with buffer size constraints. |
2972 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2972 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
2973 } | 2973 } |
2974 | 2974 |
2975 | 2975 |
2976 CodePatcher::~CodePatcher() { | 2976 CodePatcher::~CodePatcher() { |
2977 // Indicate that code has changed. | 2977 // Indicate that code has changed. |
2978 CpuFeatures::FlushICache(address_, size_); | 2978 CpuFeatures::FlushICache(address_, size_); |
2979 | 2979 |
2980 // Check that the code was patched as expected. | 2980 // Check that the code was patched as expected. |
2981 ASSERT(masm_.pc_ == address_ + size_); | 2981 DCHECK(masm_.pc_ == address_ + size_); |
2982 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2982 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
2983 } | 2983 } |
2984 | 2984 |
2985 | 2985 |
2986 void MacroAssembler::CheckPageFlag( | 2986 void MacroAssembler::CheckPageFlag( |
2987 Register object, | 2987 Register object, |
2988 Register scratch, | 2988 Register scratch, |
2989 int mask, | 2989 int mask, |
2990 Condition cc, | 2990 Condition cc, |
2991 Label* condition_met, | 2991 Label* condition_met, |
2992 Label::Distance condition_met_distance) { | 2992 Label::Distance condition_met_distance) { |
2993 ASSERT(cc == zero || cc == not_zero); | 2993 DCHECK(cc == zero || cc == not_zero); |
2994 if (scratch.is(object)) { | 2994 if (scratch.is(object)) { |
2995 and_(scratch, Immediate(~Page::kPageAlignmentMask)); | 2995 and_(scratch, Immediate(~Page::kPageAlignmentMask)); |
2996 } else { | 2996 } else { |
2997 mov(scratch, Immediate(~Page::kPageAlignmentMask)); | 2997 mov(scratch, Immediate(~Page::kPageAlignmentMask)); |
2998 and_(scratch, object); | 2998 and_(scratch, object); |
2999 } | 2999 } |
3000 if (mask < (1 << kBitsPerByte)) { | 3000 if (mask < (1 << kBitsPerByte)) { |
3001 test_b(Operand(scratch, MemoryChunk::kFlagsOffset), | 3001 test_b(Operand(scratch, MemoryChunk::kFlagsOffset), |
3002 static_cast<uint8_t>(mask)); | 3002 static_cast<uint8_t>(mask)); |
3003 } else { | 3003 } else { |
3004 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); | 3004 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); |
3005 } | 3005 } |
3006 j(cc, condition_met, condition_met_distance); | 3006 j(cc, condition_met, condition_met_distance); |
3007 } | 3007 } |
3008 | 3008 |
3009 | 3009 |
3010 void MacroAssembler::CheckPageFlagForMap( | 3010 void MacroAssembler::CheckPageFlagForMap( |
3011 Handle<Map> map, | 3011 Handle<Map> map, |
3012 int mask, | 3012 int mask, |
3013 Condition cc, | 3013 Condition cc, |
3014 Label* condition_met, | 3014 Label* condition_met, |
3015 Label::Distance condition_met_distance) { | 3015 Label::Distance condition_met_distance) { |
3016 ASSERT(cc == zero || cc == not_zero); | 3016 DCHECK(cc == zero || cc == not_zero); |
3017 Page* page = Page::FromAddress(map->address()); | 3017 Page* page = Page::FromAddress(map->address()); |
3018 ExternalReference reference(ExternalReference::page_flags(page)); | 3018 ExternalReference reference(ExternalReference::page_flags(page)); |
3019 // The inlined static address check of the page's flags relies | 3019 // The inlined static address check of the page's flags relies |
3020 // on maps never being compacted. | 3020 // on maps never being compacted. |
3021 ASSERT(!isolate()->heap()->mark_compact_collector()-> | 3021 DCHECK(!isolate()->heap()->mark_compact_collector()-> |
3022 IsOnEvacuationCandidate(*map)); | 3022 IsOnEvacuationCandidate(*map)); |
3023 if (mask < (1 << kBitsPerByte)) { | 3023 if (mask < (1 << kBitsPerByte)) { |
3024 test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask)); | 3024 test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask)); |
3025 } else { | 3025 } else { |
3026 test(Operand::StaticVariable(reference), Immediate(mask)); | 3026 test(Operand::StaticVariable(reference), Immediate(mask)); |
3027 } | 3027 } |
3028 j(cc, condition_met, condition_met_distance); | 3028 j(cc, condition_met, condition_met_distance); |
3029 } | 3029 } |
3030 | 3030 |
3031 | 3031 |
(...skipping 10 matching lines...) Expand all Loading... |
3042 | 3042 |
3043 | 3043 |
3044 void MacroAssembler::JumpIfBlack(Register object, | 3044 void MacroAssembler::JumpIfBlack(Register object, |
3045 Register scratch0, | 3045 Register scratch0, |
3046 Register scratch1, | 3046 Register scratch1, |
3047 Label* on_black, | 3047 Label* on_black, |
3048 Label::Distance on_black_near) { | 3048 Label::Distance on_black_near) { |
3049 HasColor(object, scratch0, scratch1, | 3049 HasColor(object, scratch0, scratch1, |
3050 on_black, on_black_near, | 3050 on_black, on_black_near, |
3051 1, 0); // kBlackBitPattern. | 3051 1, 0); // kBlackBitPattern. |
3052 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | 3052 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
3053 } | 3053 } |
3054 | 3054 |
3055 | 3055 |
3056 void MacroAssembler::HasColor(Register object, | 3056 void MacroAssembler::HasColor(Register object, |
3057 Register bitmap_scratch, | 3057 Register bitmap_scratch, |
3058 Register mask_scratch, | 3058 Register mask_scratch, |
3059 Label* has_color, | 3059 Label* has_color, |
3060 Label::Distance has_color_distance, | 3060 Label::Distance has_color_distance, |
3061 int first_bit, | 3061 int first_bit, |
3062 int second_bit) { | 3062 int second_bit) { |
3063 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx)); | 3063 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx)); |
3064 | 3064 |
3065 GetMarkBits(object, bitmap_scratch, mask_scratch); | 3065 GetMarkBits(object, bitmap_scratch, mask_scratch); |
3066 | 3066 |
3067 Label other_color, word_boundary; | 3067 Label other_color, word_boundary; |
3068 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3068 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3069 j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear); | 3069 j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear); |
3070 add(mask_scratch, mask_scratch); // Shift left 1 by adding. | 3070 add(mask_scratch, mask_scratch); // Shift left 1 by adding. |
3071 j(zero, &word_boundary, Label::kNear); | 3071 j(zero, &word_boundary, Label::kNear); |
3072 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3072 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3073 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); | 3073 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); |
3074 jmp(&other_color, Label::kNear); | 3074 jmp(&other_color, Label::kNear); |
3075 | 3075 |
3076 bind(&word_boundary); | 3076 bind(&word_boundary); |
3077 test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1); | 3077 test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1); |
3078 | 3078 |
3079 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); | 3079 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); |
3080 bind(&other_color); | 3080 bind(&other_color); |
3081 } | 3081 } |
3082 | 3082 |
3083 | 3083 |
3084 void MacroAssembler::GetMarkBits(Register addr_reg, | 3084 void MacroAssembler::GetMarkBits(Register addr_reg, |
3085 Register bitmap_reg, | 3085 Register bitmap_reg, |
3086 Register mask_reg) { | 3086 Register mask_reg) { |
3087 ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx)); | 3087 DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx)); |
3088 mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); | 3088 mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); |
3089 and_(bitmap_reg, addr_reg); | 3089 and_(bitmap_reg, addr_reg); |
3090 mov(ecx, addr_reg); | 3090 mov(ecx, addr_reg); |
3091 int shift = | 3091 int shift = |
3092 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; | 3092 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; |
3093 shr(ecx, shift); | 3093 shr(ecx, shift); |
3094 and_(ecx, | 3094 and_(ecx, |
3095 (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1)); | 3095 (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1)); |
3096 | 3096 |
3097 add(bitmap_reg, ecx); | 3097 add(bitmap_reg, ecx); |
3098 mov(ecx, addr_reg); | 3098 mov(ecx, addr_reg); |
3099 shr(ecx, kPointerSizeLog2); | 3099 shr(ecx, kPointerSizeLog2); |
3100 and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1); | 3100 and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1); |
3101 mov(mask_reg, Immediate(1)); | 3101 mov(mask_reg, Immediate(1)); |
3102 shl_cl(mask_reg); | 3102 shl_cl(mask_reg); |
3103 } | 3103 } |
3104 | 3104 |
3105 | 3105 |
3106 void MacroAssembler::EnsureNotWhite( | 3106 void MacroAssembler::EnsureNotWhite( |
3107 Register value, | 3107 Register value, |
3108 Register bitmap_scratch, | 3108 Register bitmap_scratch, |
3109 Register mask_scratch, | 3109 Register mask_scratch, |
3110 Label* value_is_white_and_not_data, | 3110 Label* value_is_white_and_not_data, |
3111 Label::Distance distance) { | 3111 Label::Distance distance) { |
3112 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx)); | 3112 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx)); |
3113 GetMarkBits(value, bitmap_scratch, mask_scratch); | 3113 GetMarkBits(value, bitmap_scratch, mask_scratch); |
3114 | 3114 |
3115 // If the value is black or grey we don't need to do anything. | 3115 // If the value is black or grey we don't need to do anything. |
3116 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); | 3116 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
3117 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | 3117 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
3118 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); | 3118 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
3119 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | 3119 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
3120 | 3120 |
3121 Label done; | 3121 Label done; |
3122 | 3122 |
3123 // Since both black and grey have a 1 in the first position and white does | 3123 // Since both black and grey have a 1 in the first position and white does |
3124 // not have a 1 there we only need to check one bit. | 3124 // not have a 1 there we only need to check one bit. |
3125 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3125 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3126 j(not_zero, &done, Label::kNear); | 3126 j(not_zero, &done, Label::kNear); |
3127 | 3127 |
3128 if (emit_debug_code()) { | 3128 if (emit_debug_code()) { |
3129 // Check for impossible bit pattern. | 3129 // Check for impossible bit pattern. |
(...skipping 17 matching lines...) Expand all Loading... |
3147 | 3147 |
3148 // Check for heap-number | 3148 // Check for heap-number |
3149 mov(map, FieldOperand(value, HeapObject::kMapOffset)); | 3149 mov(map, FieldOperand(value, HeapObject::kMapOffset)); |
3150 cmp(map, isolate()->factory()->heap_number_map()); | 3150 cmp(map, isolate()->factory()->heap_number_map()); |
3151 j(not_equal, ¬_heap_number, Label::kNear); | 3151 j(not_equal, ¬_heap_number, Label::kNear); |
3152 mov(length, Immediate(HeapNumber::kSize)); | 3152 mov(length, Immediate(HeapNumber::kSize)); |
3153 jmp(&is_data_object, Label::kNear); | 3153 jmp(&is_data_object, Label::kNear); |
3154 | 3154 |
3155 bind(¬_heap_number); | 3155 bind(¬_heap_number); |
3156 // Check for strings. | 3156 // Check for strings. |
3157 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 3157 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
3158 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 3158 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
3159 // If it's a string and it's not a cons string then it's an object containing | 3159 // If it's a string and it's not a cons string then it's an object containing |
3160 // no GC pointers. | 3160 // no GC pointers. |
3161 Register instance_type = ecx; | 3161 Register instance_type = ecx; |
3162 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); | 3162 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); |
3163 test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask); | 3163 test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask); |
3164 j(not_zero, value_is_white_and_not_data); | 3164 j(not_zero, value_is_white_and_not_data); |
3165 // It's a non-indirect (non-cons and non-slice) string. | 3165 // It's a non-indirect (non-cons and non-slice) string. |
3166 // If it's external, the length is just ExternalString::kSize. | 3166 // If it's external, the length is just ExternalString::kSize. |
3167 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). | 3167 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). |
3168 Label not_external; | 3168 Label not_external; |
3169 // External strings are the only ones with the kExternalStringTag bit | 3169 // External strings are the only ones with the kExternalStringTag bit |
3170 // set. | 3170 // set. |
3171 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); | 3171 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); |
3172 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); | 3172 DCHECK_EQ(0, kConsStringTag & kExternalStringTag); |
3173 test_b(instance_type, kExternalStringTag); | 3173 test_b(instance_type, kExternalStringTag); |
3174 j(zero, ¬_external, Label::kNear); | 3174 j(zero, ¬_external, Label::kNear); |
3175 mov(length, Immediate(ExternalString::kSize)); | 3175 mov(length, Immediate(ExternalString::kSize)); |
3176 jmp(&is_data_object, Label::kNear); | 3176 jmp(&is_data_object, Label::kNear); |
3177 | 3177 |
3178 bind(¬_external); | 3178 bind(¬_external); |
3179 // Sequential string, either ASCII or UC16. | 3179 // Sequential string, either ASCII or UC16. |
3180 ASSERT(kOneByteStringTag == 0x04); | 3180 DCHECK(kOneByteStringTag == 0x04); |
3181 and_(length, Immediate(kStringEncodingMask)); | 3181 and_(length, Immediate(kStringEncodingMask)); |
3182 xor_(length, Immediate(kStringEncodingMask)); | 3182 xor_(length, Immediate(kStringEncodingMask)); |
3183 add(length, Immediate(0x04)); | 3183 add(length, Immediate(0x04)); |
3184 // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted | 3184 // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted |
3185 // by 2. If we multiply the string length as smi by this, it still | 3185 // by 2. If we multiply the string length as smi by this, it still |
3186 // won't overflow a 32-bit value. | 3186 // won't overflow a 32-bit value. |
3187 ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize); | 3187 DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize); |
3188 ASSERT(SeqOneByteString::kMaxSize <= | 3188 DCHECK(SeqOneByteString::kMaxSize <= |
3189 static_cast<int>(0xffffffffu >> (2 + kSmiTagSize))); | 3189 static_cast<int>(0xffffffffu >> (2 + kSmiTagSize))); |
3190 imul(length, FieldOperand(value, String::kLengthOffset)); | 3190 imul(length, FieldOperand(value, String::kLengthOffset)); |
3191 shr(length, 2 + kSmiTagSize + kSmiShiftSize); | 3191 shr(length, 2 + kSmiTagSize + kSmiShiftSize); |
3192 add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); | 3192 add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); |
3193 and_(length, Immediate(~kObjectAlignmentMask)); | 3193 and_(length, Immediate(~kObjectAlignmentMask)); |
3194 | 3194 |
3195 bind(&is_data_object); | 3195 bind(&is_data_object); |
3196 // Value is a data object, and it is white. Mark it black. Since we know | 3196 // Value is a data object, and it is white. Mark it black. Since we know |
3197 // that the object is white we can make it black by flipping one bit. | 3197 // that the object is white we can make it black by flipping one bit. |
3198 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); | 3198 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3278 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize), | 3278 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize), |
3279 Immediate(isolate()->factory()->allocation_memento_map())); | 3279 Immediate(isolate()->factory()->allocation_memento_map())); |
3280 } | 3280 } |
3281 | 3281 |
3282 | 3282 |
3283 void MacroAssembler::JumpIfDictionaryInPrototypeChain( | 3283 void MacroAssembler::JumpIfDictionaryInPrototypeChain( |
3284 Register object, | 3284 Register object, |
3285 Register scratch0, | 3285 Register scratch0, |
3286 Register scratch1, | 3286 Register scratch1, |
3287 Label* found) { | 3287 Label* found) { |
3288 ASSERT(!scratch1.is(scratch0)); | 3288 DCHECK(!scratch1.is(scratch0)); |
3289 Factory* factory = isolate()->factory(); | 3289 Factory* factory = isolate()->factory(); |
3290 Register current = scratch0; | 3290 Register current = scratch0; |
3291 Label loop_again; | 3291 Label loop_again; |
3292 | 3292 |
3293 // scratch contained elements pointer. | 3293 // scratch contained elements pointer. |
3294 mov(current, object); | 3294 mov(current, object); |
3295 | 3295 |
3296 // Loop based on the map going up the prototype chain. | 3296 // Loop based on the map going up the prototype chain. |
3297 bind(&loop_again); | 3297 bind(&loop_again); |
3298 mov(current, FieldOperand(current, HeapObject::kMapOffset)); | 3298 mov(current, FieldOperand(current, HeapObject::kMapOffset)); |
3299 mov(scratch1, FieldOperand(current, Map::kBitField2Offset)); | 3299 mov(scratch1, FieldOperand(current, Map::kBitField2Offset)); |
3300 DecodeField<Map::ElementsKindBits>(scratch1); | 3300 DecodeField<Map::ElementsKindBits>(scratch1); |
3301 cmp(scratch1, Immediate(DICTIONARY_ELEMENTS)); | 3301 cmp(scratch1, Immediate(DICTIONARY_ELEMENTS)); |
3302 j(equal, found); | 3302 j(equal, found); |
3303 mov(current, FieldOperand(current, Map::kPrototypeOffset)); | 3303 mov(current, FieldOperand(current, Map::kPrototypeOffset)); |
3304 cmp(current, Immediate(factory->null_value())); | 3304 cmp(current, Immediate(factory->null_value())); |
3305 j(not_equal, &loop_again); | 3305 j(not_equal, &loop_again); |
3306 } | 3306 } |
3307 | 3307 |
3308 | 3308 |
3309 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { | 3309 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { |
3310 ASSERT(!dividend.is(eax)); | 3310 DCHECK(!dividend.is(eax)); |
3311 ASSERT(!dividend.is(edx)); | 3311 DCHECK(!dividend.is(edx)); |
3312 MultiplierAndShift ms(divisor); | 3312 MultiplierAndShift ms(divisor); |
3313 mov(eax, Immediate(ms.multiplier())); | 3313 mov(eax, Immediate(ms.multiplier())); |
3314 imul(dividend); | 3314 imul(dividend); |
3315 if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend); | 3315 if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend); |
3316 if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend); | 3316 if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend); |
3317 if (ms.shift() > 0) sar(edx, ms.shift()); | 3317 if (ms.shift() > 0) sar(edx, ms.shift()); |
3318 mov(eax, dividend); | 3318 mov(eax, dividend); |
3319 shr(eax, 31); | 3319 shr(eax, 31); |
3320 add(edx, eax); | 3320 add(edx, eax); |
3321 } | 3321 } |
3322 | 3322 |
3323 | 3323 |
3324 } } // namespace v8::internal | 3324 } } // namespace v8::internal |
3325 | 3325 |
3326 #endif // V8_TARGET_ARCH_X87 | 3326 #endif // V8_TARGET_ARCH_X87 |
OLD | NEW |