OLD | NEW |
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
78 CompareRoot(rsp, Heap::kStackLimitRootIndex); | 78 CompareRoot(rsp, Heap::kStackLimitRootIndex); |
79 j(below, on_stack_overflow); | 79 j(below, on_stack_overflow); |
80 } | 80 } |
81 | 81 |
82 | 82 |
83 void MacroAssembler::RecordWriteHelper(Register object, | 83 void MacroAssembler::RecordWriteHelper(Register object, |
84 Register addr, | 84 Register addr, |
85 Register scratch) { | 85 Register scratch) { |
86 if (FLAG_debug_code) { | 86 if (FLAG_debug_code) { |
87 // Check that the object is not in new space. | 87 // Check that the object is not in new space. |
88 Label not_in_new_space; | 88 NearLabel not_in_new_space; |
89 InNewSpace(object, scratch, not_equal, ¬_in_new_space); | 89 InNewSpace(object, scratch, not_equal, ¬_in_new_space); |
90 Abort("new-space object passed to RecordWriteHelper"); | 90 Abort("new-space object passed to RecordWriteHelper"); |
91 bind(¬_in_new_space); | 91 bind(¬_in_new_space); |
92 } | 92 } |
93 | 93 |
94 // Compute the page start address from the heap object pointer, and reuse | 94 // Compute the page start address from the heap object pointer, and reuse |
95 // the 'object' register for it. | 95 // the 'object' register for it. |
96 and_(object, Immediate(~Page::kPageAlignmentMask)); | 96 and_(object, Immediate(~Page::kPageAlignmentMask)); |
97 | 97 |
98 // Compute number of region covering addr. See Page::GetRegionNumberForAddress | 98 // Compute number of region covering addr. See Page::GetRegionNumberForAddress |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
164 } | 164 } |
165 | 165 |
166 | 166 |
167 void MacroAssembler::RecordWriteNonSmi(Register object, | 167 void MacroAssembler::RecordWriteNonSmi(Register object, |
168 int offset, | 168 int offset, |
169 Register scratch, | 169 Register scratch, |
170 Register index) { | 170 Register index) { |
171 Label done; | 171 Label done; |
172 | 172 |
173 if (FLAG_debug_code) { | 173 if (FLAG_debug_code) { |
174 Label okay; | 174 NearLabel okay; |
175 JumpIfNotSmi(object, &okay); | 175 JumpIfNotSmi(object, &okay); |
176 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); | 176 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); |
177 bind(&okay); | 177 bind(&okay); |
178 | 178 |
179 if (offset == 0) { | 179 if (offset == 0) { |
180 // index must be int32. | 180 // index must be int32. |
181 Register tmp = index.is(rax) ? rbx : rax; | 181 Register tmp = index.is(rax) ? rbx : rax; |
182 push(tmp); | 182 push(tmp); |
183 movl(tmp, index); | 183 movl(tmp, index); |
184 cmpq(tmp, index); | 184 cmpq(tmp, index); |
(...skipping 29 matching lines...) Expand all Loading... |
214 | 214 |
215 // Clobber all input registers when running with the debug-code flag | 215 // Clobber all input registers when running with the debug-code flag |
216 // turned on to provoke errors. | 216 // turned on to provoke errors. |
217 if (FLAG_debug_code) { | 217 if (FLAG_debug_code) { |
218 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 218 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
219 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 219 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
220 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 220 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
221 } | 221 } |
222 } | 222 } |
223 | 223 |
224 | |
225 void MacroAssembler::InNewSpace(Register object, | |
226 Register scratch, | |
227 Condition cc, | |
228 Label* branch) { | |
229 if (Serializer::enabled()) { | |
230 // Can't do arithmetic on external references if it might get serialized. | |
231 // The mask isn't really an address. We load it as an external reference in | |
232 // case the size of the new space is different between the snapshot maker | |
233 // and the running system. | |
234 if (scratch.is(object)) { | |
235 movq(kScratchRegister, ExternalReference::new_space_mask()); | |
236 and_(scratch, kScratchRegister); | |
237 } else { | |
238 movq(scratch, ExternalReference::new_space_mask()); | |
239 and_(scratch, object); | |
240 } | |
241 movq(kScratchRegister, ExternalReference::new_space_start()); | |
242 cmpq(scratch, kScratchRegister); | |
243 j(cc, branch); | |
244 } else { | |
245 ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask()))); | |
246 intptr_t new_space_start = | |
247 reinterpret_cast<intptr_t>(Heap::NewSpaceStart()); | |
248 movq(kScratchRegister, -new_space_start, RelocInfo::NONE); | |
249 if (scratch.is(object)) { | |
250 addq(scratch, kScratchRegister); | |
251 } else { | |
252 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); | |
253 } | |
254 and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask()))); | |
255 j(cc, branch); | |
256 } | |
257 } | |
258 | |
259 | |
260 void MacroAssembler::Assert(Condition cc, const char* msg) { | 224 void MacroAssembler::Assert(Condition cc, const char* msg) { |
261 if (FLAG_debug_code) Check(cc, msg); | 225 if (FLAG_debug_code) Check(cc, msg); |
262 } | 226 } |
263 | 227 |
264 | 228 |
265 void MacroAssembler::AssertFastElements(Register elements) { | 229 void MacroAssembler::AssertFastElements(Register elements) { |
266 if (FLAG_debug_code) { | 230 if (FLAG_debug_code) { |
267 Label ok; | 231 NearLabel ok; |
268 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), | 232 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), |
269 Heap::kFixedArrayMapRootIndex); | 233 Heap::kFixedArrayMapRootIndex); |
270 j(equal, &ok); | 234 j(equal, &ok); |
271 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), | 235 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), |
272 Heap::kFixedCOWArrayMapRootIndex); | 236 Heap::kFixedCOWArrayMapRootIndex); |
273 j(equal, &ok); | 237 j(equal, &ok); |
274 Abort("JSObject with fast elements map has slow elements"); | 238 Abort("JSObject with fast elements map has slow elements"); |
275 bind(&ok); | 239 bind(&ok); |
276 } | 240 } |
277 } | 241 } |
278 | 242 |
279 | 243 |
280 void MacroAssembler::Check(Condition cc, const char* msg) { | 244 void MacroAssembler::Check(Condition cc, const char* msg) { |
281 Label L; | 245 NearLabel L; |
282 j(cc, &L); | 246 j(cc, &L); |
283 Abort(msg); | 247 Abort(msg); |
284 // will not return here | 248 // will not return here |
285 bind(&L); | 249 bind(&L); |
286 } | 250 } |
287 | 251 |
288 | 252 |
289 void MacroAssembler::CheckStackAlignment() { | 253 void MacroAssembler::CheckStackAlignment() { |
290 int frame_alignment = OS::ActivationFrameAlignment(); | 254 int frame_alignment = OS::ActivationFrameAlignment(); |
291 int frame_alignment_mask = frame_alignment - 1; | 255 int frame_alignment_mask = frame_alignment - 1; |
292 if (frame_alignment > kPointerSize) { | 256 if (frame_alignment > kPointerSize) { |
293 ASSERT(IsPowerOf2(frame_alignment)); | 257 ASSERT(IsPowerOf2(frame_alignment)); |
294 Label alignment_as_expected; | 258 NearLabel alignment_as_expected; |
295 testq(rsp, Immediate(frame_alignment_mask)); | 259 testq(rsp, Immediate(frame_alignment_mask)); |
296 j(zero, &alignment_as_expected); | 260 j(zero, &alignment_as_expected); |
297 // Abort if stack is not aligned. | 261 // Abort if stack is not aligned. |
298 int3(); | 262 int3(); |
299 bind(&alignment_as_expected); | 263 bind(&alignment_as_expected); |
300 } | 264 } |
301 } | 265 } |
302 | 266 |
303 | 267 |
304 void MacroAssembler::NegativeZeroTest(Register result, | 268 void MacroAssembler::NegativeZeroTest(Register result, |
305 Register op, | 269 Register op, |
306 Label* then_label) { | 270 Label* then_label) { |
307 Label ok; | 271 NearLabel ok; |
308 testl(result, result); | 272 testl(result, result); |
309 j(not_zero, &ok); | 273 j(not_zero, &ok); |
310 testl(op, op); | 274 testl(op, op); |
311 j(sign, then_label); | 275 j(sign, then_label); |
312 bind(&ok); | 276 bind(&ok); |
313 } | 277 } |
314 | 278 |
315 | 279 |
316 void MacroAssembler::Abort(const char* msg) { | 280 void MacroAssembler::Abort(const char* msg) { |
317 // We want to pass the msg string like a smi to avoid GC | 281 // We want to pass the msg string like a smi to avoid GC |
(...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
635 movq(dst, Immediate(static_cast<int32_t>(x))); | 599 movq(dst, Immediate(static_cast<int32_t>(x))); |
636 } else { | 600 } else { |
637 movq(kScratchRegister, x, RelocInfo::NONE); | 601 movq(kScratchRegister, x, RelocInfo::NONE); |
638 movq(dst, kScratchRegister); | 602 movq(dst, kScratchRegister); |
639 } | 603 } |
640 } | 604 } |
641 | 605 |
642 // ---------------------------------------------------------------------------- | 606 // ---------------------------------------------------------------------------- |
643 // Smi tagging, untagging and tag detection. | 607 // Smi tagging, untagging and tag detection. |
644 | 608 |
645 static int kSmiShift = kSmiTagSize + kSmiShiftSize; | |
646 | |
647 Register MacroAssembler::GetSmiConstant(Smi* source) { | 609 Register MacroAssembler::GetSmiConstant(Smi* source) { |
648 int value = source->value(); | 610 int value = source->value(); |
649 if (value == 0) { | 611 if (value == 0) { |
650 xorl(kScratchRegister, kScratchRegister); | 612 xorl(kScratchRegister, kScratchRegister); |
651 return kScratchRegister; | 613 return kScratchRegister; |
652 } | 614 } |
653 if (value == 1) { | 615 if (value == 1) { |
654 return kSmiConstantRegister; | 616 return kSmiConstantRegister; |
655 } | 617 } |
656 LoadSmiConstant(kScratchRegister, source); | 618 LoadSmiConstant(kScratchRegister, source); |
657 return kScratchRegister; | 619 return kScratchRegister; |
658 } | 620 } |
659 | 621 |
660 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { | 622 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { |
661 if (FLAG_debug_code) { | 623 if (FLAG_debug_code) { |
662 movq(dst, | 624 movq(dst, |
663 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), | 625 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), |
664 RelocInfo::NONE); | 626 RelocInfo::NONE); |
665 cmpq(dst, kSmiConstantRegister); | 627 cmpq(dst, kSmiConstantRegister); |
666 if (allow_stub_calls()) { | 628 if (allow_stub_calls()) { |
667 Assert(equal, "Uninitialized kSmiConstantRegister"); | 629 Assert(equal, "Uninitialized kSmiConstantRegister"); |
668 } else { | 630 } else { |
669 Label ok; | 631 NearLabel ok; |
670 j(equal, &ok); | 632 j(equal, &ok); |
671 int3(); | 633 int3(); |
672 bind(&ok); | 634 bind(&ok); |
673 } | 635 } |
674 } | 636 } |
675 if (source->value() == 0) { | 637 if (source->value() == 0) { |
676 xorl(dst, dst); | 638 xorl(dst, dst); |
677 return; | 639 return; |
678 } | 640 } |
679 int value = source->value(); | 641 int value = source->value(); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
718 | 680 |
719 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { | 681 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
720 ASSERT_EQ(0, kSmiTag); | 682 ASSERT_EQ(0, kSmiTag); |
721 if (!dst.is(src)) { | 683 if (!dst.is(src)) { |
722 movl(dst, src); | 684 movl(dst, src); |
723 } | 685 } |
724 shl(dst, Immediate(kSmiShift)); | 686 shl(dst, Immediate(kSmiShift)); |
725 } | 687 } |
726 | 688 |
727 | 689 |
728 void MacroAssembler::Integer32ToSmi(Register dst, | |
729 Register src, | |
730 Label* on_overflow) { | |
731 ASSERT_EQ(0, kSmiTag); | |
732 // 32-bit integer always fits in a long smi. | |
733 if (!dst.is(src)) { | |
734 movl(dst, src); | |
735 } | |
736 shl(dst, Immediate(kSmiShift)); | |
737 } | |
738 | |
739 | |
740 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { | 690 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { |
741 if (FLAG_debug_code) { | 691 if (FLAG_debug_code) { |
742 testb(dst, Immediate(0x01)); | 692 testb(dst, Immediate(0x01)); |
743 Label ok; | 693 NearLabel ok; |
744 j(zero, &ok); | 694 j(zero, &ok); |
745 if (allow_stub_calls()) { | 695 if (allow_stub_calls()) { |
746 Abort("Integer32ToSmiField writing to non-smi location"); | 696 Abort("Integer32ToSmiField writing to non-smi location"); |
747 } else { | 697 } else { |
748 int3(); | 698 int3(); |
749 } | 699 } |
750 bind(&ok); | 700 bind(&ok); |
751 } | 701 } |
752 ASSERT(kSmiShift % kBitsPerByte == 0); | 702 ASSERT(kSmiShift % kBitsPerByte == 0); |
753 movl(Operand(dst, kSmiShift / kBitsPerByte), src); | 703 movl(Operand(dst, kSmiShift / kBitsPerByte), src); |
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
942 | 892 |
943 | 893 |
944 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { | 894 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { |
945 // An unsigned 32-bit integer value is valid as long as the high bit | 895 // An unsigned 32-bit integer value is valid as long as the high bit |
946 // is not set. | 896 // is not set. |
947 testl(src, src); | 897 testl(src, src); |
948 return positive; | 898 return positive; |
949 } | 899 } |
950 | 900 |
951 | 901 |
952 void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) { | |
953 if (dst.is(src)) { | |
954 ASSERT(!dst.is(kScratchRegister)); | |
955 movq(kScratchRegister, src); | |
956 neg(dst); // Low 32 bits are retained as zero by negation. | |
957 // Test if result is zero or Smi::kMinValue. | |
958 cmpq(dst, kScratchRegister); | |
959 j(not_equal, on_smi_result); | |
960 movq(src, kScratchRegister); | |
961 } else { | |
962 movq(dst, src); | |
963 neg(dst); | |
964 cmpq(dst, src); | |
965 // If the result is zero or Smi::kMinValue, negation failed to create a smi. | |
966 j(not_equal, on_smi_result); | |
967 } | |
968 } | |
969 | |
970 | |
971 void MacroAssembler::SmiAdd(Register dst, | |
972 Register src1, | |
973 Register src2, | |
974 Label* on_not_smi_result) { | |
975 ASSERT(!dst.is(src2)); | |
976 if (on_not_smi_result == NULL) { | |
977 // No overflow checking. Use only when it's known that | |
978 // overflowing is impossible. | |
979 if (dst.is(src1)) { | |
980 addq(dst, src2); | |
981 } else { | |
982 movq(dst, src1); | |
983 addq(dst, src2); | |
984 } | |
985 Assert(no_overflow, "Smi addition overflow"); | |
986 } else if (dst.is(src1)) { | |
987 movq(kScratchRegister, src1); | |
988 addq(kScratchRegister, src2); | |
989 j(overflow, on_not_smi_result); | |
990 movq(dst, kScratchRegister); | |
991 } else { | |
992 movq(dst, src1); | |
993 addq(dst, src2); | |
994 j(overflow, on_not_smi_result); | |
995 } | |
996 } | |
997 | |
998 | |
999 void MacroAssembler::SmiSub(Register dst, | |
1000 Register src1, | |
1001 Register src2, | |
1002 Label* on_not_smi_result) { | |
1003 ASSERT(!dst.is(src2)); | |
1004 if (on_not_smi_result == NULL) { | |
1005 // No overflow checking. Use only when it's known that | |
1006 // overflowing is impossible (e.g., subtracting two positive smis). | |
1007 if (dst.is(src1)) { | |
1008 subq(dst, src2); | |
1009 } else { | |
1010 movq(dst, src1); | |
1011 subq(dst, src2); | |
1012 } | |
1013 Assert(no_overflow, "Smi subtraction overflow"); | |
1014 } else if (dst.is(src1)) { | |
1015 cmpq(dst, src2); | |
1016 j(overflow, on_not_smi_result); | |
1017 subq(dst, src2); | |
1018 } else { | |
1019 movq(dst, src1); | |
1020 subq(dst, src2); | |
1021 j(overflow, on_not_smi_result); | |
1022 } | |
1023 } | |
1024 | |
1025 | |
1026 void MacroAssembler::SmiSub(Register dst, | |
1027 Register src1, | |
1028 const Operand& src2, | |
1029 Label* on_not_smi_result) { | |
1030 if (on_not_smi_result == NULL) { | |
1031 // No overflow checking. Use only when it's known that | |
1032 // overflowing is impossible (e.g., subtracting two positive smis). | |
1033 if (dst.is(src1)) { | |
1034 subq(dst, src2); | |
1035 } else { | |
1036 movq(dst, src1); | |
1037 subq(dst, src2); | |
1038 } | |
1039 Assert(no_overflow, "Smi subtraction overflow"); | |
1040 } else if (dst.is(src1)) { | |
1041 movq(kScratchRegister, src2); | |
1042 cmpq(src1, kScratchRegister); | |
1043 j(overflow, on_not_smi_result); | |
1044 subq(src1, kScratchRegister); | |
1045 } else { | |
1046 movq(dst, src1); | |
1047 subq(dst, src2); | |
1048 j(overflow, on_not_smi_result); | |
1049 } | |
1050 } | |
1051 | |
1052 void MacroAssembler::SmiMul(Register dst, | |
1053 Register src1, | |
1054 Register src2, | |
1055 Label* on_not_smi_result) { | |
1056 ASSERT(!dst.is(src2)); | |
1057 ASSERT(!dst.is(kScratchRegister)); | |
1058 ASSERT(!src1.is(kScratchRegister)); | |
1059 ASSERT(!src2.is(kScratchRegister)); | |
1060 | |
1061 if (dst.is(src1)) { | |
1062 Label failure, zero_correct_result; | |
1063 movq(kScratchRegister, src1); // Create backup for later testing. | |
1064 SmiToInteger64(dst, src1); | |
1065 imul(dst, src2); | |
1066 j(overflow, &failure); | |
1067 | |
1068 // Check for negative zero result. If product is zero, and one | |
1069 // argument is negative, go to slow case. | |
1070 Label correct_result; | |
1071 testq(dst, dst); | |
1072 j(not_zero, &correct_result); | |
1073 | |
1074 movq(dst, kScratchRegister); | |
1075 xor_(dst, src2); | |
1076 j(positive, &zero_correct_result); // Result was positive zero. | |
1077 | |
1078 bind(&failure); // Reused failure exit, restores src1. | |
1079 movq(src1, kScratchRegister); | |
1080 jmp(on_not_smi_result); | |
1081 | |
1082 bind(&zero_correct_result); | |
1083 xor_(dst, dst); | |
1084 | |
1085 bind(&correct_result); | |
1086 } else { | |
1087 SmiToInteger64(dst, src1); | |
1088 imul(dst, src2); | |
1089 j(overflow, on_not_smi_result); | |
1090 // Check for negative zero result. If product is zero, and one | |
1091 // argument is negative, go to slow case. | |
1092 Label correct_result; | |
1093 testq(dst, dst); | |
1094 j(not_zero, &correct_result); | |
1095 // One of src1 and src2 is zero, the check whether the other is | |
1096 // negative. | |
1097 movq(kScratchRegister, src1); | |
1098 xor_(kScratchRegister, src2); | |
1099 j(negative, on_not_smi_result); | |
1100 bind(&correct_result); | |
1101 } | |
1102 } | |
1103 | |
1104 | |
1105 void MacroAssembler::SmiTryAddConstant(Register dst, | |
1106 Register src, | |
1107 Smi* constant, | |
1108 Label* on_not_smi_result) { | |
1109 // Does not assume that src is a smi. | |
1110 ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask)); | |
1111 ASSERT_EQ(0, kSmiTag); | |
1112 ASSERT(!dst.is(kScratchRegister)); | |
1113 ASSERT(!src.is(kScratchRegister)); | |
1114 | |
1115 JumpIfNotSmi(src, on_not_smi_result); | |
1116 Register tmp = (dst.is(src) ? kScratchRegister : dst); | |
1117 LoadSmiConstant(tmp, constant); | |
1118 addq(tmp, src); | |
1119 j(overflow, on_not_smi_result); | |
1120 if (dst.is(src)) { | |
1121 movq(dst, tmp); | |
1122 } | |
1123 } | |
1124 | |
1125 | |
1126 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { | 902 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { |
1127 if (constant->value() == 0) { | 903 if (constant->value() == 0) { |
1128 if (!dst.is(src)) { | 904 if (!dst.is(src)) { |
1129 movq(dst, src); | 905 movq(dst, src); |
1130 } | 906 } |
1131 return; | 907 return; |
1132 } else if (dst.is(src)) { | 908 } else if (dst.is(src)) { |
1133 ASSERT(!dst.is(kScratchRegister)); | 909 ASSERT(!dst.is(kScratchRegister)); |
1134 switch (constant->value()) { | 910 switch (constant->value()) { |
1135 case 1: | 911 case 1: |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1172 } | 948 } |
1173 | 949 |
1174 | 950 |
1175 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { | 951 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
1176 if (constant->value() != 0) { | 952 if (constant->value() != 0) { |
1177 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); | 953 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); |
1178 } | 954 } |
1179 } | 955 } |
1180 | 956 |
1181 | 957 |
1182 void MacroAssembler::SmiAddConstant(Register dst, | |
1183 Register src, | |
1184 Smi* constant, | |
1185 Label* on_not_smi_result) { | |
1186 if (constant->value() == 0) { | |
1187 if (!dst.is(src)) { | |
1188 movq(dst, src); | |
1189 } | |
1190 } else if (dst.is(src)) { | |
1191 ASSERT(!dst.is(kScratchRegister)); | |
1192 | |
1193 LoadSmiConstant(kScratchRegister, constant); | |
1194 addq(kScratchRegister, src); | |
1195 j(overflow, on_not_smi_result); | |
1196 movq(dst, kScratchRegister); | |
1197 } else { | |
1198 LoadSmiConstant(dst, constant); | |
1199 addq(dst, src); | |
1200 j(overflow, on_not_smi_result); | |
1201 } | |
1202 } | |
1203 | |
1204 | |
1205 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { | 958 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { |
1206 if (constant->value() == 0) { | 959 if (constant->value() == 0) { |
1207 if (!dst.is(src)) { | 960 if (!dst.is(src)) { |
1208 movq(dst, src); | 961 movq(dst, src); |
1209 } | 962 } |
1210 } else if (dst.is(src)) { | 963 } else if (dst.is(src)) { |
1211 ASSERT(!dst.is(kScratchRegister)); | 964 ASSERT(!dst.is(kScratchRegister)); |
1212 Register constant_reg = GetSmiConstant(constant); | 965 Register constant_reg = GetSmiConstant(constant); |
1213 subq(dst, constant_reg); | 966 subq(dst, constant_reg); |
1214 } else { | 967 } else { |
1215 if (constant->value() == Smi::kMinValue) { | 968 if (constant->value() == Smi::kMinValue) { |
1216 LoadSmiConstant(dst, constant); | 969 LoadSmiConstant(dst, constant); |
1217 // Adding and subtracting the min-value gives the same result, it only | 970 // Adding and subtracting the min-value gives the same result, it only |
1218 // differs on the overflow bit, which we don't check here. | 971 // differs on the overflow bit, which we don't check here. |
1219 addq(dst, src); | 972 addq(dst, src); |
1220 } else { | 973 } else { |
1221 // Subtract by adding the negation. | 974 // Subtract by adding the negation. |
1222 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); | 975 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); |
1223 addq(dst, src); | 976 addq(dst, src); |
1224 } | 977 } |
1225 } | 978 } |
1226 } | 979 } |
1227 | 980 |
1228 | 981 |
1229 void MacroAssembler::SmiSubConstant(Register dst, | 982 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { |
1230 Register src, | 983 // No overflow checking. Use only when it's known that |
1231 Smi* constant, | 984 // overflowing is impossible (e.g., subtracting two positive smis). |
1232 Label* on_not_smi_result) { | 985 ASSERT(!dst.is(src2)); |
1233 if (constant->value() == 0) { | 986 if (dst.is(src1)) { |
1234 if (!dst.is(src)) { | 987 subq(dst, src2); |
1235 movq(dst, src); | |
1236 } | |
1237 } else if (dst.is(src)) { | |
1238 ASSERT(!dst.is(kScratchRegister)); | |
1239 if (constant->value() == Smi::kMinValue) { | |
1240 // Subtracting min-value from any non-negative value will overflow. | |
1241 // We test the non-negativeness before doing the subtraction. | |
1242 testq(src, src); | |
1243 j(not_sign, on_not_smi_result); | |
1244 LoadSmiConstant(kScratchRegister, constant); | |
1245 subq(dst, kScratchRegister); | |
1246 } else { | |
1247 // Subtract by adding the negation. | |
1248 LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value())); | |
1249 addq(kScratchRegister, dst); | |
1250 j(overflow, on_not_smi_result); | |
1251 movq(dst, kScratchRegister); | |
1252 } | |
1253 } else { | 988 } else { |
1254 if (constant->value() == Smi::kMinValue) { | 989 movq(dst, src1); |
1255 // Subtracting min-value from any non-negative value will overflow. | 990 subq(dst, src2); |
1256 // We test the non-negativeness before doing the subtraction. | |
1257 testq(src, src); | |
1258 j(not_sign, on_not_smi_result); | |
1259 LoadSmiConstant(dst, constant); | |
1260 // Adding and subtracting the min-value gives the same result, it only | |
1261 // differs on the overflow bit, which we don't check here. | |
1262 addq(dst, src); | |
1263 } else { | |
1264 // Subtract by adding the negation. | |
1265 LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); | |
1266 addq(dst, src); | |
1267 j(overflow, on_not_smi_result); | |
1268 } | |
1269 } | 991 } |
| 992 Assert(no_overflow, "Smi subtraction overflow"); |
1270 } | 993 } |
1271 | 994 |
1272 | 995 |
1273 void MacroAssembler::SmiDiv(Register dst, | 996 void MacroAssembler::SmiAdd(Register dst, |
1274 Register src1, | 997 Register src1, |
1275 Register src2, | 998 Register src2) { |
1276 Label* on_not_smi_result) { | 999 ASSERT(!dst.is(src2)); |
1277 ASSERT(!src1.is(kScratchRegister)); | 1000 // No overflow checking. Use only when it's known that |
1278 ASSERT(!src2.is(kScratchRegister)); | 1001 // overflowing is impossible. |
1279 ASSERT(!dst.is(kScratchRegister)); | 1002 if (dst.is(src1)) { |
1280 ASSERT(!src2.is(rax)); | 1003 addq(dst, src2); |
1281 ASSERT(!src2.is(rdx)); | 1004 } else { |
1282 ASSERT(!src1.is(rdx)); | 1005 movq(dst, src1); |
1283 | 1006 addq(dst, src2); |
1284 // Check for 0 divisor (result is +/-Infinity). | |
1285 Label positive_divisor; | |
1286 testq(src2, src2); | |
1287 j(zero, on_not_smi_result); | |
1288 | |
1289 if (src1.is(rax)) { | |
1290 movq(kScratchRegister, src1); | |
1291 } | 1007 } |
1292 SmiToInteger32(rax, src1); | 1008 Assert(no_overflow, "Smi addition overflow"); |
1293 // We need to rule out dividing Smi::kMinValue by -1, since that would | |
1294 // overflow in idiv and raise an exception. | |
1295 // We combine this with negative zero test (negative zero only happens | |
1296 // when dividing zero by a negative number). | |
1297 | |
1298 // We overshoot a little and go to slow case if we divide min-value | |
1299 // by any negative value, not just -1. | |
1300 Label safe_div; | |
1301 testl(rax, Immediate(0x7fffffff)); | |
1302 j(not_zero, &safe_div); | |
1303 testq(src2, src2); | |
1304 if (src1.is(rax)) { | |
1305 j(positive, &safe_div); | |
1306 movq(src1, kScratchRegister); | |
1307 jmp(on_not_smi_result); | |
1308 } else { | |
1309 j(negative, on_not_smi_result); | |
1310 } | |
1311 bind(&safe_div); | |
1312 | |
1313 SmiToInteger32(src2, src2); | |
1314 // Sign extend src1 into edx:eax. | |
1315 cdq(); | |
1316 idivl(src2); | |
1317 Integer32ToSmi(src2, src2); | |
1318 // Check that the remainder is zero. | |
1319 testl(rdx, rdx); | |
1320 if (src1.is(rax)) { | |
1321 Label smi_result; | |
1322 j(zero, &smi_result); | |
1323 movq(src1, kScratchRegister); | |
1324 jmp(on_not_smi_result); | |
1325 bind(&smi_result); | |
1326 } else { | |
1327 j(not_zero, on_not_smi_result); | |
1328 } | |
1329 if (!dst.is(src1) && src1.is(rax)) { | |
1330 movq(src1, kScratchRegister); | |
1331 } | |
1332 Integer32ToSmi(dst, rax); | |
1333 } | 1009 } |
1334 | 1010 |
1335 | 1011 |
1336 void MacroAssembler::SmiMod(Register dst, | 1012 void MacroAssembler::SmiSub(Register dst, |
1337 Register src1, | 1013 Register src1, |
1338 Register src2, | 1014 const Operand& src2) { |
1339 Label* on_not_smi_result) { | 1015 // No overflow checking. Use only when it's known that |
1340 ASSERT(!dst.is(kScratchRegister)); | 1016 // overflowing is impossible (e.g., subtracting two positive smis). |
1341 ASSERT(!src1.is(kScratchRegister)); | 1017 if (dst.is(src1)) { |
1342 ASSERT(!src2.is(kScratchRegister)); | 1018 subq(dst, src2); |
1343 ASSERT(!src2.is(rax)); | 1019 } else { |
1344 ASSERT(!src2.is(rdx)); | 1020 movq(dst, src1); |
1345 ASSERT(!src1.is(rdx)); | 1021 subq(dst, src2); |
1346 ASSERT(!src1.is(src2)); | |
1347 | |
1348 testq(src2, src2); | |
1349 j(zero, on_not_smi_result); | |
1350 | |
1351 if (src1.is(rax)) { | |
1352 movq(kScratchRegister, src1); | |
1353 } | 1022 } |
1354 SmiToInteger32(rax, src1); | 1023 Assert(no_overflow, "Smi subtraction overflow"); |
1355 SmiToInteger32(src2, src2); | |
1356 | |
1357 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow). | |
1358 Label safe_div; | |
1359 cmpl(rax, Immediate(Smi::kMinValue)); | |
1360 j(not_equal, &safe_div); | |
1361 cmpl(src2, Immediate(-1)); | |
1362 j(not_equal, &safe_div); | |
1363 // Retag inputs and go slow case. | |
1364 Integer32ToSmi(src2, src2); | |
1365 if (src1.is(rax)) { | |
1366 movq(src1, kScratchRegister); | |
1367 } | |
1368 jmp(on_not_smi_result); | |
1369 bind(&safe_div); | |
1370 | |
1371 // Sign extend eax into edx:eax. | |
1372 cdq(); | |
1373 idivl(src2); | |
1374 // Restore smi tags on inputs. | |
1375 Integer32ToSmi(src2, src2); | |
1376 if (src1.is(rax)) { | |
1377 movq(src1, kScratchRegister); | |
1378 } | |
1379 // Check for a negative zero result. If the result is zero, and the | |
1380 // dividend is negative, go slow to return a floating point negative zero. | |
1381 Label smi_result; | |
1382 testl(rdx, rdx); | |
1383 j(not_zero, &smi_result); | |
1384 testq(src1, src1); | |
1385 j(negative, on_not_smi_result); | |
1386 bind(&smi_result); | |
1387 Integer32ToSmi(dst, rdx); | |
1388 } | 1024 } |
1389 | 1025 |
1390 | 1026 |
1391 void MacroAssembler::SmiNot(Register dst, Register src) { | 1027 void MacroAssembler::SmiNot(Register dst, Register src) { |
1392 ASSERT(!dst.is(kScratchRegister)); | 1028 ASSERT(!dst.is(kScratchRegister)); |
1393 ASSERT(!src.is(kScratchRegister)); | 1029 ASSERT(!src.is(kScratchRegister)); |
1394 // Set tag and padding bits before negating, so that they are zero afterwards. | 1030 // Set tag and padding bits before negating, so that they are zero afterwards. |
1395 movl(kScratchRegister, Immediate(~0)); | 1031 movl(kScratchRegister, Immediate(~0)); |
1396 if (dst.is(src)) { | 1032 if (dst.is(src)) { |
1397 xor_(dst, kScratchRegister); | 1033 xor_(dst, kScratchRegister); |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1473 if (dst.is(src)) { | 1109 if (dst.is(src)) { |
1474 sar(dst, Immediate(shift_value + kSmiShift)); | 1110 sar(dst, Immediate(shift_value + kSmiShift)); |
1475 shl(dst, Immediate(kSmiShift)); | 1111 shl(dst, Immediate(kSmiShift)); |
1476 } else { | 1112 } else { |
1477 UNIMPLEMENTED(); // Not used. | 1113 UNIMPLEMENTED(); // Not used. |
1478 } | 1114 } |
1479 } | 1115 } |
1480 } | 1116 } |
1481 | 1117 |
1482 | 1118 |
1483 void MacroAssembler::SmiShiftLogicalRightConstant(Register dst, | |
1484 Register src, | |
1485 int shift_value, | |
1486 Label* on_not_smi_result) { | |
1487 // Logic right shift interprets its result as an *unsigned* number. | |
1488 if (dst.is(src)) { | |
1489 UNIMPLEMENTED(); // Not used. | |
1490 } else { | |
1491 movq(dst, src); | |
1492 if (shift_value == 0) { | |
1493 testq(dst, dst); | |
1494 j(negative, on_not_smi_result); | |
1495 } | |
1496 shr(dst, Immediate(shift_value + kSmiShift)); | |
1497 shl(dst, Immediate(kSmiShift)); | |
1498 } | |
1499 } | |
1500 | |
1501 | |
1502 void MacroAssembler::SmiShiftLeftConstant(Register dst, | 1119 void MacroAssembler::SmiShiftLeftConstant(Register dst, |
1503 Register src, | 1120 Register src, |
1504 int shift_value) { | 1121 int shift_value) { |
1505 if (!dst.is(src)) { | 1122 if (!dst.is(src)) { |
1506 movq(dst, src); | 1123 movq(dst, src); |
1507 } | 1124 } |
1508 if (shift_value > 0) { | 1125 if (shift_value > 0) { |
1509 shl(dst, Immediate(shift_value)); | 1126 shl(dst, Immediate(shift_value)); |
1510 } | 1127 } |
1511 } | 1128 } |
1512 | 1129 |
1513 | 1130 |
1514 void MacroAssembler::SmiShiftLeft(Register dst, | 1131 void MacroAssembler::SmiShiftLeft(Register dst, |
1515 Register src1, | 1132 Register src1, |
1516 Register src2) { | 1133 Register src2) { |
1517 ASSERT(!dst.is(rcx)); | 1134 ASSERT(!dst.is(rcx)); |
1518 Label result_ok; | 1135 NearLabel result_ok; |
1519 // Untag shift amount. | 1136 // Untag shift amount. |
1520 if (!dst.is(src1)) { | 1137 if (!dst.is(src1)) { |
1521 movq(dst, src1); | 1138 movq(dst, src1); |
1522 } | 1139 } |
1523 SmiToInteger32(rcx, src2); | 1140 SmiToInteger32(rcx, src2); |
1524 // Shift amount specified by lower 5 bits, not six as the shl opcode. | 1141 // Shift amount specified by lower 5 bits, not six as the shl opcode. |
1525 and_(rcx, Immediate(0x1f)); | 1142 and_(rcx, Immediate(0x1f)); |
1526 shl_cl(dst); | 1143 shl_cl(dst); |
1527 } | 1144 } |
1528 | 1145 |
1529 | 1146 |
1530 void MacroAssembler::SmiShiftLogicalRight(Register dst, | |
1531 Register src1, | |
1532 Register src2, | |
1533 Label* on_not_smi_result) { | |
1534 ASSERT(!dst.is(kScratchRegister)); | |
1535 ASSERT(!src1.is(kScratchRegister)); | |
1536 ASSERT(!src2.is(kScratchRegister)); | |
1537 ASSERT(!dst.is(rcx)); | |
1538 Label result_ok; | |
1539 if (src1.is(rcx) || src2.is(rcx)) { | |
1540 movq(kScratchRegister, rcx); | |
1541 } | |
1542 if (!dst.is(src1)) { | |
1543 movq(dst, src1); | |
1544 } | |
1545 SmiToInteger32(rcx, src2); | |
1546 orl(rcx, Immediate(kSmiShift)); | |
1547 shr_cl(dst); // Shift is rcx modulo 0x1f + 32. | |
1548 shl(dst, Immediate(kSmiShift)); | |
1549 testq(dst, dst); | |
1550 if (src1.is(rcx) || src2.is(rcx)) { | |
1551 Label positive_result; | |
1552 j(positive, &positive_result); | |
1553 if (src1.is(rcx)) { | |
1554 movq(src1, kScratchRegister); | |
1555 } else { | |
1556 movq(src2, kScratchRegister); | |
1557 } | |
1558 jmp(on_not_smi_result); | |
1559 bind(&positive_result); | |
1560 } else { | |
1561 j(negative, on_not_smi_result); // src2 was zero and src1 negative. | |
1562 } | |
1563 } | |
1564 | |
1565 | |
1566 void MacroAssembler::SmiShiftArithmeticRight(Register dst, | 1147 void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
1567 Register src1, | 1148 Register src1, |
1568 Register src2) { | 1149 Register src2) { |
1569 ASSERT(!dst.is(kScratchRegister)); | 1150 ASSERT(!dst.is(kScratchRegister)); |
1570 ASSERT(!src1.is(kScratchRegister)); | 1151 ASSERT(!src1.is(kScratchRegister)); |
1571 ASSERT(!src2.is(kScratchRegister)); | 1152 ASSERT(!src2.is(kScratchRegister)); |
1572 ASSERT(!dst.is(rcx)); | 1153 ASSERT(!dst.is(rcx)); |
1573 if (src1.is(rcx)) { | 1154 if (src1.is(rcx)) { |
1574 movq(kScratchRegister, src1); | 1155 movq(kScratchRegister, src1); |
1575 } else if (src2.is(rcx)) { | 1156 } else if (src2.is(rcx)) { |
1576 movq(kScratchRegister, src2); | 1157 movq(kScratchRegister, src2); |
1577 } | 1158 } |
1578 if (!dst.is(src1)) { | 1159 if (!dst.is(src1)) { |
1579 movq(dst, src1); | 1160 movq(dst, src1); |
1580 } | 1161 } |
1581 SmiToInteger32(rcx, src2); | 1162 SmiToInteger32(rcx, src2); |
1582 orl(rcx, Immediate(kSmiShift)); | 1163 orl(rcx, Immediate(kSmiShift)); |
1583 sar_cl(dst); // Shift 32 + original rcx & 0x1f. | 1164 sar_cl(dst); // Shift 32 + original rcx & 0x1f. |
1584 shl(dst, Immediate(kSmiShift)); | 1165 shl(dst, Immediate(kSmiShift)); |
1585 if (src1.is(rcx)) { | 1166 if (src1.is(rcx)) { |
1586 movq(src1, kScratchRegister); | 1167 movq(src1, kScratchRegister); |
1587 } else if (src2.is(rcx)) { | 1168 } else if (src2.is(rcx)) { |
1588 movq(src2, kScratchRegister); | 1169 movq(src2, kScratchRegister); |
1589 } | 1170 } |
1590 } | 1171 } |
1591 | 1172 |
1592 | 1173 |
1593 void MacroAssembler::SelectNonSmi(Register dst, | |
1594 Register src1, | |
1595 Register src2, | |
1596 Label* on_not_smis) { | |
1597 ASSERT(!dst.is(kScratchRegister)); | |
1598 ASSERT(!src1.is(kScratchRegister)); | |
1599 ASSERT(!src2.is(kScratchRegister)); | |
1600 ASSERT(!dst.is(src1)); | |
1601 ASSERT(!dst.is(src2)); | |
1602 // Both operands must not be smis. | |
1603 #ifdef DEBUG | |
1604 if (allow_stub_calls()) { // Check contains a stub call. | |
1605 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2)); | |
1606 Check(not_both_smis, "Both registers were smis in SelectNonSmi."); | |
1607 } | |
1608 #endif | |
1609 ASSERT_EQ(0, kSmiTag); | |
1610 ASSERT_EQ(0, Smi::FromInt(0)); | |
1611 movl(kScratchRegister, Immediate(kSmiTagMask)); | |
1612 and_(kScratchRegister, src1); | |
1613 testl(kScratchRegister, src2); | |
1614 // If non-zero then both are smis. | |
1615 j(not_zero, on_not_smis); | |
1616 | |
1617 // Exactly one operand is a smi. | |
1618 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); | |
1619 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. | |
1620 subq(kScratchRegister, Immediate(1)); | |
1621 // If src1 is a smi, then scratch register all 1s, else it is all 0s. | |
1622 movq(dst, src1); | |
1623 xor_(dst, src2); | |
1624 and_(dst, kScratchRegister); | |
1625 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. | |
1626 xor_(dst, src1); | |
1627 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. | |
1628 } | |
1629 | |
1630 | |
1631 SmiIndex MacroAssembler::SmiToIndex(Register dst, | 1174 SmiIndex MacroAssembler::SmiToIndex(Register dst, |
1632 Register src, | 1175 Register src, |
1633 int shift) { | 1176 int shift) { |
1634 ASSERT(is_uint6(shift)); | 1177 ASSERT(is_uint6(shift)); |
1635 // There is a possible optimization if shift is in the range 60-63, but that | 1178 // There is a possible optimization if shift is in the range 60-63, but that |
1636 // will (and must) never happen. | 1179 // will (and must) never happen. |
1637 if (!dst.is(src)) { | 1180 if (!dst.is(src)) { |
1638 movq(dst, src); | 1181 movq(dst, src); |
1639 } | 1182 } |
1640 if (shift < kSmiShift) { | 1183 if (shift < kSmiShift) { |
(...skipping 15 matching lines...) Expand all Loading... |
1656 neg(dst); | 1199 neg(dst); |
1657 if (shift < kSmiShift) { | 1200 if (shift < kSmiShift) { |
1658 sar(dst, Immediate(kSmiShift - shift)); | 1201 sar(dst, Immediate(kSmiShift - shift)); |
1659 } else { | 1202 } else { |
1660 shl(dst, Immediate(shift - kSmiShift)); | 1203 shl(dst, Immediate(shift - kSmiShift)); |
1661 } | 1204 } |
1662 return SmiIndex(dst, times_1); | 1205 return SmiIndex(dst, times_1); |
1663 } | 1206 } |
1664 | 1207 |
1665 | 1208 |
1666 void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) { | |
1667 ASSERT_EQ(0, kSmiTag); | |
1668 Condition smi = CheckSmi(src); | |
1669 j(smi, on_smi); | |
1670 } | |
1671 | |
1672 | |
1673 void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) { | |
1674 Condition smi = CheckSmi(src); | |
1675 j(NegateCondition(smi), on_not_smi); | |
1676 } | |
1677 | |
1678 | |
1679 void MacroAssembler::JumpIfNotPositiveSmi(Register src, | |
1680 Label* on_not_positive_smi) { | |
1681 Condition positive_smi = CheckPositiveSmi(src); | |
1682 j(NegateCondition(positive_smi), on_not_positive_smi); | |
1683 } | |
1684 | |
1685 | |
1686 void MacroAssembler::JumpIfSmiEqualsConstant(Register src, | |
1687 Smi* constant, | |
1688 Label* on_equals) { | |
1689 SmiCompare(src, constant); | |
1690 j(equal, on_equals); | |
1691 } | |
1692 | |
1693 | |
1694 void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) { | |
1695 Condition is_valid = CheckInteger32ValidSmiValue(src); | |
1696 j(NegateCondition(is_valid), on_invalid); | |
1697 } | |
1698 | |
1699 | |
1700 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src, | |
1701 Label* on_invalid) { | |
1702 Condition is_valid = CheckUInteger32ValidSmiValue(src); | |
1703 j(NegateCondition(is_valid), on_invalid); | |
1704 } | |
1705 | |
1706 | |
1707 void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2, | |
1708 Label* on_not_both_smi) { | |
1709 Condition both_smi = CheckBothSmi(src1, src2); | |
1710 j(NegateCondition(both_smi), on_not_both_smi); | |
1711 } | |
1712 | |
1713 | |
1714 void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2, | |
1715 Label* on_not_both_smi) { | |
1716 Condition both_smi = CheckBothPositiveSmi(src1, src2); | |
1717 j(NegateCondition(both_smi), on_not_both_smi); | |
1718 } | |
1719 | |
1720 | |
1721 | |
1722 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object, | |
1723 Register second_object, | |
1724 Register scratch1, | |
1725 Register scratch2, | |
1726 Label* on_fail) { | |
1727 // Check that both objects are not smis. | |
1728 Condition either_smi = CheckEitherSmi(first_object, second_object); | |
1729 j(either_smi, on_fail); | |
1730 | |
1731 // Load instance type for both strings. | |
1732 movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset)); | |
1733 movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset)); | |
1734 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset)); | |
1735 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset)); | |
1736 | |
1737 // Check that both are flat ascii strings. | |
1738 ASSERT(kNotStringTag != 0); | |
1739 const int kFlatAsciiStringMask = | |
1740 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; | |
1741 const int kFlatAsciiStringTag = ASCII_STRING_TYPE; | |
1742 | |
1743 andl(scratch1, Immediate(kFlatAsciiStringMask)); | |
1744 andl(scratch2, Immediate(kFlatAsciiStringMask)); | |
1745 // Interleave the bits to check both scratch1 and scratch2 in one test. | |
1746 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); | |
1747 lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); | |
1748 cmpl(scratch1, | |
1749 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); | |
1750 j(not_equal, on_fail); | |
1751 } | |
1752 | |
1753 | |
1754 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( | |
1755 Register instance_type, | |
1756 Register scratch, | |
1757 Label *failure) { | |
1758 if (!scratch.is(instance_type)) { | |
1759 movl(scratch, instance_type); | |
1760 } | |
1761 | |
1762 const int kFlatAsciiStringMask = | |
1763 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; | |
1764 | |
1765 andl(scratch, Immediate(kFlatAsciiStringMask)); | |
1766 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag)); | |
1767 j(not_equal, failure); | |
1768 } | |
1769 | |
1770 | |
1771 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( | |
1772 Register first_object_instance_type, | |
1773 Register second_object_instance_type, | |
1774 Register scratch1, | |
1775 Register scratch2, | |
1776 Label* on_fail) { | |
1777 // Load instance type for both strings. | |
1778 movq(scratch1, first_object_instance_type); | |
1779 movq(scratch2, second_object_instance_type); | |
1780 | |
1781 // Check that both are flat ascii strings. | |
1782 ASSERT(kNotStringTag != 0); | |
1783 const int kFlatAsciiStringMask = | |
1784 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; | |
1785 const int kFlatAsciiStringTag = ASCII_STRING_TYPE; | |
1786 | |
1787 andl(scratch1, Immediate(kFlatAsciiStringMask)); | |
1788 andl(scratch2, Immediate(kFlatAsciiStringMask)); | |
1789 // Interleave the bits to check both scratch1 and scratch2 in one test. | |
1790 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); | |
1791 lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); | |
1792 cmpl(scratch1, | |
1793 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); | |
1794 j(not_equal, on_fail); | |
1795 } | |
1796 | |
1797 | |
1798 void MacroAssembler::Move(Register dst, Handle<Object> source) { | 1209 void MacroAssembler::Move(Register dst, Handle<Object> source) { |
1799 ASSERT(!source->IsFailure()); | 1210 ASSERT(!source->IsFailure()); |
1800 if (source->IsSmi()) { | 1211 if (source->IsSmi()) { |
1801 Move(dst, Smi::cast(*source)); | 1212 Move(dst, Smi::cast(*source)); |
1802 } else { | 1213 } else { |
1803 movq(dst, source, RelocInfo::EMBEDDED_OBJECT); | 1214 movq(dst, source, RelocInfo::EMBEDDED_OBJECT); |
1804 } | 1215 } |
1805 } | 1216 } |
1806 | 1217 |
1807 | 1218 |
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1987 bool is_heap_object) { | 1398 bool is_heap_object) { |
1988 if (!is_heap_object) { | 1399 if (!is_heap_object) { |
1989 JumpIfSmi(obj, fail); | 1400 JumpIfSmi(obj, fail); |
1990 } | 1401 } |
1991 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map); | 1402 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map); |
1992 j(not_equal, fail); | 1403 j(not_equal, fail); |
1993 } | 1404 } |
1994 | 1405 |
1995 | 1406 |
1996 void MacroAssembler::AbortIfNotNumber(Register object) { | 1407 void MacroAssembler::AbortIfNotNumber(Register object) { |
1997 Label ok; | 1408 NearLabel ok; |
1998 Condition is_smi = CheckSmi(object); | 1409 Condition is_smi = CheckSmi(object); |
1999 j(is_smi, &ok); | 1410 j(is_smi, &ok); |
2000 Cmp(FieldOperand(object, HeapObject::kMapOffset), | 1411 Cmp(FieldOperand(object, HeapObject::kMapOffset), |
2001 Factory::heap_number_map()); | 1412 Factory::heap_number_map()); |
2002 Assert(equal, "Operand not a number"); | 1413 Assert(equal, "Operand not a number"); |
2003 bind(&ok); | 1414 bind(&ok); |
2004 } | 1415 } |
2005 | 1416 |
2006 | 1417 |
2007 void MacroAssembler::AbortIfSmi(Register object) { | 1418 void MacroAssembler::AbortIfSmi(Register object) { |
2008 Label ok; | 1419 NearLabel ok; |
2009 Condition is_smi = CheckSmi(object); | 1420 Condition is_smi = CheckSmi(object); |
2010 Assert(NegateCondition(is_smi), "Operand is a smi"); | 1421 Assert(NegateCondition(is_smi), "Operand is a smi"); |
2011 } | 1422 } |
2012 | 1423 |
2013 | 1424 |
2014 void MacroAssembler::AbortIfNotSmi(Register object) { | 1425 void MacroAssembler::AbortIfNotSmi(Register object) { |
2015 Label ok; | 1426 NearLabel ok; |
2016 Condition is_smi = CheckSmi(object); | 1427 Condition is_smi = CheckSmi(object); |
2017 Assert(is_smi, "Operand is not a smi"); | 1428 Assert(is_smi, "Operand is not a smi"); |
2018 } | 1429 } |
2019 | 1430 |
2020 | 1431 |
2021 void MacroAssembler::AbortIfNotRootValue(Register src, | 1432 void MacroAssembler::AbortIfNotRootValue(Register src, |
2022 Heap::RootListIndex root_value_index, | 1433 Heap::RootListIndex root_value_index, |
2023 const char* message) { | 1434 const char* message) { |
2024 ASSERT(!src.is(kScratchRegister)); | 1435 ASSERT(!src.is(kScratchRegister)); |
2025 LoadRoot(kScratchRegister, root_value_index); | 1436 LoadRoot(kScratchRegister, root_value_index); |
(...skipping 19 matching lines...) Expand all Loading... |
2045 Label* miss) { | 1456 Label* miss) { |
2046 // Check that the receiver isn't a smi. | 1457 // Check that the receiver isn't a smi. |
2047 testl(function, Immediate(kSmiTagMask)); | 1458 testl(function, Immediate(kSmiTagMask)); |
2048 j(zero, miss); | 1459 j(zero, miss); |
2049 | 1460 |
2050 // Check that the function really is a function. | 1461 // Check that the function really is a function. |
2051 CmpObjectType(function, JS_FUNCTION_TYPE, result); | 1462 CmpObjectType(function, JS_FUNCTION_TYPE, result); |
2052 j(not_equal, miss); | 1463 j(not_equal, miss); |
2053 | 1464 |
2054 // Make sure that the function has an instance prototype. | 1465 // Make sure that the function has an instance prototype. |
2055 Label non_instance; | 1466 NearLabel non_instance; |
2056 testb(FieldOperand(result, Map::kBitFieldOffset), | 1467 testb(FieldOperand(result, Map::kBitFieldOffset), |
2057 Immediate(1 << Map::kHasNonInstancePrototype)); | 1468 Immediate(1 << Map::kHasNonInstancePrototype)); |
2058 j(not_zero, &non_instance); | 1469 j(not_zero, &non_instance); |
2059 | 1470 |
2060 // Get the prototype or initial map from the function. | 1471 // Get the prototype or initial map from the function. |
2061 movq(result, | 1472 movq(result, |
2062 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 1473 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
2063 | 1474 |
2064 // If the prototype or initial map is the hole, don't return it and | 1475 // If the prototype or initial map is the hole, don't return it and |
2065 // simply miss the cache instead. This will allow us to allocate a | 1476 // simply miss the cache instead. This will allow us to allocate a |
2066 // prototype object on-demand in the runtime system. | 1477 // prototype object on-demand in the runtime system. |
2067 CompareRoot(result, Heap::kTheHoleValueRootIndex); | 1478 CompareRoot(result, Heap::kTheHoleValueRootIndex); |
2068 j(equal, miss); | 1479 j(equal, miss); |
2069 | 1480 |
2070 // If the function does not have an initial map, we're done. | 1481 // If the function does not have an initial map, we're done. |
2071 Label done; | 1482 NearLabel done; |
2072 CmpObjectType(result, MAP_TYPE, kScratchRegister); | 1483 CmpObjectType(result, MAP_TYPE, kScratchRegister); |
2073 j(not_equal, &done); | 1484 j(not_equal, &done); |
2074 | 1485 |
2075 // Get the prototype from the initial map. | 1486 // Get the prototype from the initial map. |
2076 movq(result, FieldOperand(result, Map::kPrototypeOffset)); | 1487 movq(result, FieldOperand(result, Map::kPrototypeOffset)); |
2077 jmp(&done); | 1488 jmp(&done); |
2078 | 1489 |
2079 // Non-instance prototype: Fetch prototype from constructor field | 1490 // Non-instance prototype: Fetch prototype from constructor field |
2080 // in initial map. | 1491 // in initial map. |
2081 bind(&non_instance); | 1492 bind(&non_instance); |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2126 void MacroAssembler::DebugBreak() { | 1537 void MacroAssembler::DebugBreak() { |
2127 ASSERT(allow_stub_calls()); | 1538 ASSERT(allow_stub_calls()); |
2128 xor_(rax, rax); // no arguments | 1539 xor_(rax, rax); // no arguments |
2129 movq(rbx, ExternalReference(Runtime::kDebugBreak)); | 1540 movq(rbx, ExternalReference(Runtime::kDebugBreak)); |
2130 CEntryStub ces(1); | 1541 CEntryStub ces(1); |
2131 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); | 1542 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); |
2132 } | 1543 } |
2133 #endif // ENABLE_DEBUGGER_SUPPORT | 1544 #endif // ENABLE_DEBUGGER_SUPPORT |
2134 | 1545 |
2135 | 1546 |
2136 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | |
2137 const ParameterCount& actual, | |
2138 Handle<Code> code_constant, | |
2139 Register code_register, | |
2140 Label* done, | |
2141 InvokeFlag flag) { | |
2142 bool definitely_matches = false; | |
2143 Label invoke; | |
2144 if (expected.is_immediate()) { | |
2145 ASSERT(actual.is_immediate()); | |
2146 if (expected.immediate() == actual.immediate()) { | |
2147 definitely_matches = true; | |
2148 } else { | |
2149 Set(rax, actual.immediate()); | |
2150 if (expected.immediate() == | |
2151 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { | |
2152 // Don't worry about adapting arguments for built-ins that | |
2153 // don't want that done. Skip adaption code by making it look | |
2154 // like we have a match between expected and actual number of | |
2155 // arguments. | |
2156 definitely_matches = true; | |
2157 } else { | |
2158 Set(rbx, expected.immediate()); | |
2159 } | |
2160 } | |
2161 } else { | |
2162 if (actual.is_immediate()) { | |
2163 // Expected is in register, actual is immediate. This is the | |
2164 // case when we invoke function values without going through the | |
2165 // IC mechanism. | |
2166 cmpq(expected.reg(), Immediate(actual.immediate())); | |
2167 j(equal, &invoke); | |
2168 ASSERT(expected.reg().is(rbx)); | |
2169 Set(rax, actual.immediate()); | |
2170 } else if (!expected.reg().is(actual.reg())) { | |
2171 // Both expected and actual are in (different) registers. This | |
2172 // is the case when we invoke functions using call and apply. | |
2173 cmpq(expected.reg(), actual.reg()); | |
2174 j(equal, &invoke); | |
2175 ASSERT(actual.reg().is(rax)); | |
2176 ASSERT(expected.reg().is(rbx)); | |
2177 } | |
2178 } | |
2179 | |
2180 if (!definitely_matches) { | |
2181 Handle<Code> adaptor = | |
2182 Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); | |
2183 if (!code_constant.is_null()) { | |
2184 movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT); | |
2185 addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag)); | |
2186 } else if (!code_register.is(rdx)) { | |
2187 movq(rdx, code_register); | |
2188 } | |
2189 | |
2190 if (flag == CALL_FUNCTION) { | |
2191 Call(adaptor, RelocInfo::CODE_TARGET); | |
2192 jmp(done); | |
2193 } else { | |
2194 Jump(adaptor, RelocInfo::CODE_TARGET); | |
2195 } | |
2196 bind(&invoke); | |
2197 } | |
2198 } | |
2199 | |
2200 | |
2201 void MacroAssembler::InvokeCode(Register code, | 1547 void MacroAssembler::InvokeCode(Register code, |
2202 const ParameterCount& expected, | 1548 const ParameterCount& expected, |
2203 const ParameterCount& actual, | 1549 const ParameterCount& actual, |
2204 InvokeFlag flag) { | 1550 InvokeFlag flag) { |
2205 Label done; | 1551 NearLabel done; |
2206 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); | 1552 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); |
2207 if (flag == CALL_FUNCTION) { | 1553 if (flag == CALL_FUNCTION) { |
2208 call(code); | 1554 call(code); |
2209 } else { | 1555 } else { |
2210 ASSERT(flag == JUMP_FUNCTION); | 1556 ASSERT(flag == JUMP_FUNCTION); |
2211 jmp(code); | 1557 jmp(code); |
2212 } | 1558 } |
2213 bind(&done); | 1559 bind(&done); |
2214 } | 1560 } |
2215 | 1561 |
2216 | 1562 |
2217 void MacroAssembler::InvokeCode(Handle<Code> code, | 1563 void MacroAssembler::InvokeCode(Handle<Code> code, |
2218 const ParameterCount& expected, | 1564 const ParameterCount& expected, |
2219 const ParameterCount& actual, | 1565 const ParameterCount& actual, |
2220 RelocInfo::Mode rmode, | 1566 RelocInfo::Mode rmode, |
2221 InvokeFlag flag) { | 1567 InvokeFlag flag) { |
2222 Label done; | 1568 NearLabel done; |
2223 Register dummy = rax; | 1569 Register dummy = rax; |
2224 InvokePrologue(expected, actual, code, dummy, &done, flag); | 1570 InvokePrologue(expected, actual, code, dummy, &done, flag); |
2225 if (flag == CALL_FUNCTION) { | 1571 if (flag == CALL_FUNCTION) { |
2226 Call(code, rmode); | 1572 Call(code, rmode); |
2227 } else { | 1573 } else { |
2228 ASSERT(flag == JUMP_FUNCTION); | 1574 ASSERT(flag == JUMP_FUNCTION); |
2229 Jump(code, rmode); | 1575 Jump(code, rmode); |
2230 } | 1576 } |
2231 bind(&done); | 1577 bind(&done); |
2232 } | 1578 } |
(...skipping 635 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2868 CPU::FlushICache(address_, size_); | 2214 CPU::FlushICache(address_, size_); |
2869 | 2215 |
2870 // Check that the code was patched as expected. | 2216 // Check that the code was patched as expected. |
2871 ASSERT(masm_.pc_ == address_ + size_); | 2217 ASSERT(masm_.pc_ == address_ + size_); |
2872 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2218 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
2873 } | 2219 } |
2874 | 2220 |
2875 } } // namespace v8::internal | 2221 } } // namespace v8::internal |
2876 | 2222 |
2877 #endif // V8_TARGET_ARCH_X64 | 2223 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |