Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(344)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 205343013: Introduce andp, notp, orp and xorp for x64 port (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebased with bleeding_edge Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after
269 Condition cc, 269 Condition cc,
270 Label* branch, 270 Label* branch,
271 Label::Distance distance) { 271 Label::Distance distance) {
272 if (Serializer::enabled()) { 272 if (Serializer::enabled()) {
273 // Can't do arithmetic on external references if it might get serialized. 273 // Can't do arithmetic on external references if it might get serialized.
274 // The mask isn't really an address. We load it as an external reference in 274 // The mask isn't really an address. We load it as an external reference in
275 // case the size of the new space is different between the snapshot maker 275 // case the size of the new space is different between the snapshot maker
276 // and the running system. 276 // and the running system.
277 if (scratch.is(object)) { 277 if (scratch.is(object)) {
278 Move(kScratchRegister, ExternalReference::new_space_mask(isolate())); 278 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
279 and_(scratch, kScratchRegister); 279 andp(scratch, kScratchRegister);
280 } else { 280 } else {
281 Move(scratch, ExternalReference::new_space_mask(isolate())); 281 Move(scratch, ExternalReference::new_space_mask(isolate()));
282 and_(scratch, object); 282 andp(scratch, object);
283 } 283 }
284 Move(kScratchRegister, ExternalReference::new_space_start(isolate())); 284 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
285 cmpp(scratch, kScratchRegister); 285 cmpp(scratch, kScratchRegister);
286 j(cc, branch, distance); 286 j(cc, branch, distance);
287 } else { 287 } else {
288 ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))); 288 ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
289 intptr_t new_space_start = 289 intptr_t new_space_start =
290 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart()); 290 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
291 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start), 291 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
292 Assembler::RelocInfoNone()); 292 Assembler::RelocInfoNone());
293 if (scratch.is(object)) { 293 if (scratch.is(object)) {
294 addp(scratch, kScratchRegister); 294 addp(scratch, kScratchRegister);
295 } else { 295 } else {
296 leap(scratch, Operand(object, kScratchRegister, times_1, 0)); 296 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
297 } 297 }
298 and_(scratch, 298 andp(scratch,
299 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask()))); 299 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
300 j(cc, branch, distance); 300 j(cc, branch, distance);
301 } 301 }
302 } 302 }
303 303
304 304
305 void MacroAssembler::RecordWriteField( 305 void MacroAssembler::RecordWriteField(
306 Register object, 306 Register object,
307 int offset, 307 int offset,
308 Register value, 308 Register value,
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
569 void MacroAssembler::IndexFromHash(Register hash, Register index) { 569 void MacroAssembler::IndexFromHash(Register hash, Register index) {
570 // The assert checks that the constants for the maximum number of digits 570 // The assert checks that the constants for the maximum number of digits
571 // for an array index cached in the hash field and the number of bits 571 // for an array index cached in the hash field and the number of bits
572 // reserved for it does not conflict. 572 // reserved for it does not conflict.
573 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < 573 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
574 (1 << String::kArrayIndexValueBits)); 574 (1 << String::kArrayIndexValueBits));
575 // We want the smi-tagged index in key. Even if we subsequently go to 575 // We want the smi-tagged index in key. Even if we subsequently go to
576 // the slow case, converting the key to a smi is always valid. 576 // the slow case, converting the key to a smi is always valid.
577 // key: string key 577 // key: string key
578 // hash: key's hash field, including its array index value. 578 // hash: key's hash field, including its array index value.
579 and_(hash, Immediate(String::kArrayIndexValueMask)); 579 andp(hash, Immediate(String::kArrayIndexValueMask));
580 shr(hash, Immediate(String::kHashShift)); 580 shr(hash, Immediate(String::kHashShift));
581 // Here we actually clobber the key which will be used if calling into 581 // Here we actually clobber the key which will be used if calling into
582 // runtime later. However as the new key is the numeric value of a string key 582 // runtime later. However as the new key is the numeric value of a string key
583 // there is no difference in using either key. 583 // there is no difference in using either key.
584 Integer32ToSmi(index, hash); 584 Integer32ToSmi(index, hash);
585 } 585 }
586 586
587 587
588 void MacroAssembler::CallRuntime(const Runtime::Function* f, 588 void MacroAssembler::CallRuntime(const Runtime::Function* f,
589 int num_arguments, 589 int num_arguments,
(...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after
995 return !is_intn(x, kMaxBits); 995 return !is_intn(x, kMaxBits);
996 } 996 }
997 997
998 998
999 void MacroAssembler::SafeMove(Register dst, Smi* src) { 999 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1000 ASSERT(!dst.is(kScratchRegister)); 1000 ASSERT(!dst.is(kScratchRegister));
1001 ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi. 1001 ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
1002 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { 1002 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1003 Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); 1003 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1004 Move(kScratchRegister, Smi::FromInt(jit_cookie())); 1004 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1005 xor_(dst, kScratchRegister); 1005 xorq(dst, kScratchRegister);
1006 } else { 1006 } else {
1007 Move(dst, src); 1007 Move(dst, src);
1008 } 1008 }
1009 } 1009 }
1010 1010
1011 1011
1012 void MacroAssembler::SafePush(Smi* src) { 1012 void MacroAssembler::SafePush(Smi* src) {
1013 ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi. 1013 ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
1014 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { 1014 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1015 Push(Smi::FromInt(src->value() ^ jit_cookie())); 1015 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1016 Move(kScratchRegister, Smi::FromInt(jit_cookie())); 1016 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1017 xor_(Operand(rsp, 0), kScratchRegister); 1017 xorq(Operand(rsp, 0), kScratchRegister);
1018 } else { 1018 } else {
1019 Push(src); 1019 Push(src);
1020 } 1020 }
1021 } 1021 }
1022 1022
1023 1023
1024 Register MacroAssembler::GetSmiConstant(Smi* source) { 1024 Register MacroAssembler::GetSmiConstant(Smi* source) {
1025 int value = source->value(); 1025 int value = source->value();
1026 if (value == 0) { 1026 if (value == 0) {
1027 xorl(kScratchRegister, kScratchRegister); 1027 xorl(kScratchRegister, kScratchRegister);
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after
1248 } 1248 }
1249 1249
1250 1250
1251 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2, 1251 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1252 Label* on_not_smis, 1252 Label* on_not_smis,
1253 Label::Distance near_jump) { 1253 Label::Distance near_jump) {
1254 if (dst.is(src1) || dst.is(src2)) { 1254 if (dst.is(src1) || dst.is(src2)) {
1255 ASSERT(!src1.is(kScratchRegister)); 1255 ASSERT(!src1.is(kScratchRegister));
1256 ASSERT(!src2.is(kScratchRegister)); 1256 ASSERT(!src2.is(kScratchRegister));
1257 movp(kScratchRegister, src1); 1257 movp(kScratchRegister, src1);
1258 or_(kScratchRegister, src2); 1258 orp(kScratchRegister, src2);
1259 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump); 1259 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1260 movp(dst, kScratchRegister); 1260 movp(dst, kScratchRegister);
1261 } else { 1261 } else {
1262 movp(dst, src1); 1262 movp(dst, src1);
1263 or_(dst, src2); 1263 orp(dst, src2);
1264 JumpIfNotSmi(dst, on_not_smis, near_jump); 1264 JumpIfNotSmi(dst, on_not_smis, near_jump);
1265 } 1265 }
1266 } 1266 }
1267 1267
1268 1268
1269 Condition MacroAssembler::CheckSmi(Register src) { 1269 Condition MacroAssembler::CheckSmi(Register src) {
1270 STATIC_ASSERT(kSmiTag == 0); 1270 STATIC_ASSERT(kSmiTag == 0);
1271 testb(src, Immediate(kSmiTagMask)); 1271 testb(src, Immediate(kSmiTagMask));
1272 return zero; 1272 return zero;
1273 } 1273 }
(...skipping 26 matching lines...) Expand all
1300 return zero; 1300 return zero;
1301 } 1301 }
1302 1302
1303 1303
1304 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, 1304 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1305 Register second) { 1305 Register second) {
1306 if (first.is(second)) { 1306 if (first.is(second)) {
1307 return CheckNonNegativeSmi(first); 1307 return CheckNonNegativeSmi(first);
1308 } 1308 }
1309 movp(kScratchRegister, first); 1309 movp(kScratchRegister, first);
1310 or_(kScratchRegister, second); 1310 orp(kScratchRegister, second);
1311 rol(kScratchRegister, Immediate(1)); 1311 rol(kScratchRegister, Immediate(1));
1312 testl(kScratchRegister, Immediate(3)); 1312 testl(kScratchRegister, Immediate(3));
1313 return zero; 1313 return zero;
1314 } 1314 }
1315 1315
1316 1316
1317 Condition MacroAssembler::CheckEitherSmi(Register first, 1317 Condition MacroAssembler::CheckEitherSmi(Register first,
1318 Register second, 1318 Register second,
1319 Register scratch) { 1319 Register scratch) {
1320 if (first.is(second)) { 1320 if (first.is(second)) {
(...skipping 471 matching lines...) Expand 10 before | Expand all | Expand 10 after
1792 imulp(dst, src2); 1792 imulp(dst, src2);
1793 j(overflow, &failure, Label::kNear); 1793 j(overflow, &failure, Label::kNear);
1794 1794
1795 // Check for negative zero result. If product is zero, and one 1795 // Check for negative zero result. If product is zero, and one
1796 // argument is negative, go to slow case. 1796 // argument is negative, go to slow case.
1797 Label correct_result; 1797 Label correct_result;
1798 testp(dst, dst); 1798 testp(dst, dst);
1799 j(not_zero, &correct_result, Label::kNear); 1799 j(not_zero, &correct_result, Label::kNear);
1800 1800
1801 movp(dst, kScratchRegister); 1801 movp(dst, kScratchRegister);
1802 xor_(dst, src2); 1802 xorp(dst, src2);
1803 // Result was positive zero. 1803 // Result was positive zero.
1804 j(positive, &zero_correct_result, Label::kNear); 1804 j(positive, &zero_correct_result, Label::kNear);
1805 1805
1806 bind(&failure); // Reused failure exit, restores src1. 1806 bind(&failure); // Reused failure exit, restores src1.
1807 movp(src1, kScratchRegister); 1807 movp(src1, kScratchRegister);
1808 jmp(on_not_smi_result, near_jump); 1808 jmp(on_not_smi_result, near_jump);
1809 1809
1810 bind(&zero_correct_result); 1810 bind(&zero_correct_result);
1811 Set(dst, 0); 1811 Set(dst, 0);
1812 1812
1813 bind(&correct_result); 1813 bind(&correct_result);
1814 } else { 1814 } else {
1815 SmiToInteger64(dst, src1); 1815 SmiToInteger64(dst, src1);
1816 imulp(dst, src2); 1816 imulp(dst, src2);
1817 j(overflow, on_not_smi_result, near_jump); 1817 j(overflow, on_not_smi_result, near_jump);
1818 // Check for negative zero result. If product is zero, and one 1818 // Check for negative zero result. If product is zero, and one
1819 // argument is negative, go to slow case. 1819 // argument is negative, go to slow case.
1820 Label correct_result; 1820 Label correct_result;
1821 testp(dst, dst); 1821 testp(dst, dst);
1822 j(not_zero, &correct_result, Label::kNear); 1822 j(not_zero, &correct_result, Label::kNear);
1823 // One of src1 and src2 is zero, the check whether the other is 1823 // One of src1 and src2 is zero, the check whether the other is
1824 // negative. 1824 // negative.
1825 movp(kScratchRegister, src1); 1825 movp(kScratchRegister, src1);
1826 xor_(kScratchRegister, src2); 1826 xorp(kScratchRegister, src2);
1827 j(negative, on_not_smi_result, near_jump); 1827 j(negative, on_not_smi_result, near_jump);
1828 bind(&correct_result); 1828 bind(&correct_result);
1829 } 1829 }
1830 } 1830 }
1831 1831
1832 1832
1833 void MacroAssembler::SmiDiv(Register dst, 1833 void MacroAssembler::SmiDiv(Register dst,
1834 Register src1, 1834 Register src1,
1835 Register src2, 1835 Register src2,
1836 Label* on_not_smi_result, 1836 Label* on_not_smi_result,
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
1948 Integer32ToSmi(dst, rdx); 1948 Integer32ToSmi(dst, rdx);
1949 } 1949 }
1950 1950
1951 1951
1952 void MacroAssembler::SmiNot(Register dst, Register src) { 1952 void MacroAssembler::SmiNot(Register dst, Register src) {
1953 ASSERT(!dst.is(kScratchRegister)); 1953 ASSERT(!dst.is(kScratchRegister));
1954 ASSERT(!src.is(kScratchRegister)); 1954 ASSERT(!src.is(kScratchRegister));
1955 // Set tag and padding bits before negating, so that they are zero afterwards. 1955 // Set tag and padding bits before negating, so that they are zero afterwards.
1956 movl(kScratchRegister, Immediate(~0)); 1956 movl(kScratchRegister, Immediate(~0));
1957 if (dst.is(src)) { 1957 if (dst.is(src)) {
1958 xor_(dst, kScratchRegister); 1958 xorp(dst, kScratchRegister);
1959 } else { 1959 } else {
1960 leap(dst, Operand(src, kScratchRegister, times_1, 0)); 1960 leap(dst, Operand(src, kScratchRegister, times_1, 0));
1961 } 1961 }
1962 not_(dst); 1962 notp(dst);
1963 } 1963 }
1964 1964
1965 1965
1966 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { 1966 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1967 ASSERT(!dst.is(src2)); 1967 ASSERT(!dst.is(src2));
1968 if (!dst.is(src1)) { 1968 if (!dst.is(src1)) {
1969 movp(dst, src1); 1969 movp(dst, src1);
1970 } 1970 }
1971 and_(dst, src2); 1971 andp(dst, src2);
1972 } 1972 }
1973 1973
1974 1974
1975 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { 1975 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1976 if (constant->value() == 0) { 1976 if (constant->value() == 0) {
1977 Set(dst, 0); 1977 Set(dst, 0);
1978 } else if (dst.is(src)) { 1978 } else if (dst.is(src)) {
1979 ASSERT(!dst.is(kScratchRegister)); 1979 ASSERT(!dst.is(kScratchRegister));
1980 Register constant_reg = GetSmiConstant(constant); 1980 Register constant_reg = GetSmiConstant(constant);
1981 and_(dst, constant_reg); 1981 andp(dst, constant_reg);
1982 } else { 1982 } else {
1983 LoadSmiConstant(dst, constant); 1983 LoadSmiConstant(dst, constant);
1984 and_(dst, src); 1984 andp(dst, src);
1985 } 1985 }
1986 } 1986 }
1987 1987
1988 1988
1989 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { 1989 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1990 if (!dst.is(src1)) { 1990 if (!dst.is(src1)) {
1991 ASSERT(!src1.is(src2)); 1991 ASSERT(!src1.is(src2));
1992 movp(dst, src1); 1992 movp(dst, src1);
1993 } 1993 }
1994 or_(dst, src2); 1994 orp(dst, src2);
1995 } 1995 }
1996 1996
1997 1997
1998 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { 1998 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1999 if (dst.is(src)) { 1999 if (dst.is(src)) {
2000 ASSERT(!dst.is(kScratchRegister)); 2000 ASSERT(!dst.is(kScratchRegister));
2001 Register constant_reg = GetSmiConstant(constant); 2001 Register constant_reg = GetSmiConstant(constant);
2002 or_(dst, constant_reg); 2002 orp(dst, constant_reg);
2003 } else { 2003 } else {
2004 LoadSmiConstant(dst, constant); 2004 LoadSmiConstant(dst, constant);
2005 or_(dst, src); 2005 orp(dst, src);
2006 } 2006 }
2007 } 2007 }
2008 2008
2009 2009
2010 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { 2010 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2011 if (!dst.is(src1)) { 2011 if (!dst.is(src1)) {
2012 ASSERT(!src1.is(src2)); 2012 ASSERT(!src1.is(src2));
2013 movp(dst, src1); 2013 movp(dst, src1);
2014 } 2014 }
2015 xor_(dst, src2); 2015 xorp(dst, src2);
2016 } 2016 }
2017 2017
2018 2018
2019 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { 2019 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2020 if (dst.is(src)) { 2020 if (dst.is(src)) {
2021 ASSERT(!dst.is(kScratchRegister)); 2021 ASSERT(!dst.is(kScratchRegister));
2022 Register constant_reg = GetSmiConstant(constant); 2022 Register constant_reg = GetSmiConstant(constant);
2023 xor_(dst, constant_reg); 2023 xorp(dst, constant_reg);
2024 } else { 2024 } else {
2025 LoadSmiConstant(dst, constant); 2025 LoadSmiConstant(dst, constant);
2026 xor_(dst, src); 2026 xorp(dst, src);
2027 } 2027 }
2028 } 2028 }
2029 2029
2030 2030
2031 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, 2031 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2032 Register src, 2032 Register src,
2033 int shift_value) { 2033 int shift_value) {
2034 ASSERT(is_uint5(shift_value)); 2034 ASSERT(is_uint5(shift_value));
2035 if (shift_value > 0) { 2035 if (shift_value > 0) {
2036 if (dst.is(src)) { 2036 if (dst.is(src)) {
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
2076 void MacroAssembler::SmiShiftLeft(Register dst, 2076 void MacroAssembler::SmiShiftLeft(Register dst,
2077 Register src1, 2077 Register src1,
2078 Register src2) { 2078 Register src2) {
2079 ASSERT(!dst.is(rcx)); 2079 ASSERT(!dst.is(rcx));
2080 // Untag shift amount. 2080 // Untag shift amount.
2081 if (!dst.is(src1)) { 2081 if (!dst.is(src1)) {
2082 movq(dst, src1); 2082 movq(dst, src1);
2083 } 2083 }
2084 SmiToInteger32(rcx, src2); 2084 SmiToInteger32(rcx, src2);
2085 // Shift amount specified by lower 5 bits, not six as the shl opcode. 2085 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2086 and_(rcx, Immediate(0x1f)); 2086 andq(rcx, Immediate(0x1f));
2087 shl_cl(dst); 2087 shl_cl(dst);
2088 } 2088 }
2089 2089
2090 2090
2091 void MacroAssembler::SmiShiftLogicalRight(Register dst, 2091 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2092 Register src1, 2092 Register src1,
2093 Register src2, 2093 Register src2,
2094 Label* on_not_smi_result, 2094 Label* on_not_smi_result,
2095 Label::Distance near_jump) { 2095 Label::Distance near_jump) {
2096 ASSERT(!dst.is(kScratchRegister)); 2096 ASSERT(!dst.is(kScratchRegister));
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
2165 ASSERT(!dst.is(src1)); 2165 ASSERT(!dst.is(src1));
2166 ASSERT(!dst.is(src2)); 2166 ASSERT(!dst.is(src2));
2167 // Both operands must not be smis. 2167 // Both operands must not be smis.
2168 #ifdef DEBUG 2168 #ifdef DEBUG
2169 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2)); 2169 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2170 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi); 2170 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2171 #endif 2171 #endif
2172 STATIC_ASSERT(kSmiTag == 0); 2172 STATIC_ASSERT(kSmiTag == 0);
2173 ASSERT_EQ(0, Smi::FromInt(0)); 2173 ASSERT_EQ(0, Smi::FromInt(0));
2174 movl(kScratchRegister, Immediate(kSmiTagMask)); 2174 movl(kScratchRegister, Immediate(kSmiTagMask));
2175 and_(kScratchRegister, src1); 2175 andp(kScratchRegister, src1);
2176 testl(kScratchRegister, src2); 2176 testl(kScratchRegister, src2);
2177 // If non-zero then both are smis. 2177 // If non-zero then both are smis.
2178 j(not_zero, on_not_smis, near_jump); 2178 j(not_zero, on_not_smis, near_jump);
2179 2179
2180 // Exactly one operand is a smi. 2180 // Exactly one operand is a smi.
2181 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); 2181 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2182 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. 2182 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2183 subp(kScratchRegister, Immediate(1)); 2183 subp(kScratchRegister, Immediate(1));
2184 // If src1 is a smi, then scratch register all 1s, else it is all 0s. 2184 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2185 movp(dst, src1); 2185 movp(dst, src1);
2186 xor_(dst, src2); 2186 xorp(dst, src2);
2187 and_(dst, kScratchRegister); 2187 andp(dst, kScratchRegister);
2188 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. 2188 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2189 xor_(dst, src1); 2189 xorp(dst, src1);
2190 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. 2190 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2191 } 2191 }
2192 2192
2193 2193
2194 SmiIndex MacroAssembler::SmiToIndex(Register dst, 2194 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2195 Register src, 2195 Register src,
2196 int shift) { 2196 int shift) {
2197 ASSERT(is_uint6(shift)); 2197 ASSERT(is_uint6(shift));
2198 // There is a possible optimization if shift is in the range 60-63, but that 2198 // There is a possible optimization if shift is in the range 60-63, but that
2199 // will (and must) never happen. 2199 // will (and must) never happen.
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
2256 2256
2257 2257
2258 void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) { 2258 void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
2259 Pop(scratch); 2259 Pop(scratch);
2260 // Low bits. 2260 // Low bits.
2261 shr(scratch, Immediate(kSmiShift)); 2261 shr(scratch, Immediate(kSmiShift));
2262 Pop(dst); 2262 Pop(dst);
2263 shr(dst, Immediate(kSmiShift)); 2263 shr(dst, Immediate(kSmiShift));
2264 // High bits. 2264 // High bits.
2265 shl(dst, Immediate(64 - kSmiShift)); 2265 shl(dst, Immediate(64 - kSmiShift));
2266 or_(dst, scratch); 2266 orp(dst, scratch);
2267 } 2267 }
2268 2268
2269 2269
2270 void MacroAssembler::Test(const Operand& src, Smi* source) { 2270 void MacroAssembler::Test(const Operand& src, Smi* source) {
2271 testl(Operand(src, kIntSize), Immediate(source->value())); 2271 testl(Operand(src, kIntSize), Immediate(source->value()));
2272 } 2272 }
2273 2273
2274 2274
2275 // ---------------------------------------------------------------------------- 2275 // ----------------------------------------------------------------------------
2276 2276
(...skipping 25 matching lines...) Expand all
2302 Label is_smi; 2302 Label is_smi;
2303 Label load_result_from_cache; 2303 Label load_result_from_cache;
2304 JumpIfSmi(object, &is_smi); 2304 JumpIfSmi(object, &is_smi);
2305 CheckMap(object, 2305 CheckMap(object,
2306 isolate()->factory()->heap_number_map(), 2306 isolate()->factory()->heap_number_map(),
2307 not_found, 2307 not_found,
2308 DONT_DO_SMI_CHECK); 2308 DONT_DO_SMI_CHECK);
2309 2309
2310 STATIC_ASSERT(8 == kDoubleSize); 2310 STATIC_ASSERT(8 == kDoubleSize);
2311 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); 2311 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2312 xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset)); 2312 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2313 and_(scratch, mask); 2313 andp(scratch, mask);
2314 // Each entry in string cache consists of two pointer sized fields, 2314 // Each entry in string cache consists of two pointer sized fields,
2315 // but times_twice_pointer_size (multiplication by 16) scale factor 2315 // but times_twice_pointer_size (multiplication by 16) scale factor
2316 // is not supported by addrmode on x64 platform. 2316 // is not supported by addrmode on x64 platform.
2317 // So we have to premultiply entry index before lookup. 2317 // So we have to premultiply entry index before lookup.
2318 shl(scratch, Immediate(kPointerSizeLog2 + 1)); 2318 shl(scratch, Immediate(kPointerSizeLog2 + 1));
2319 2319
2320 Register index = scratch; 2320 Register index = scratch;
2321 Register probe = mask; 2321 Register probe = mask;
2322 movp(probe, 2322 movp(probe,
2323 FieldOperand(number_string_cache, 2323 FieldOperand(number_string_cache,
2324 index, 2324 index,
2325 times_1, 2325 times_1,
2326 FixedArray::kHeaderSize)); 2326 FixedArray::kHeaderSize));
2327 JumpIfSmi(probe, not_found); 2327 JumpIfSmi(probe, not_found);
2328 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); 2328 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2329 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); 2329 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2330 j(parity_even, not_found); // Bail out if NaN is involved. 2330 j(parity_even, not_found); // Bail out if NaN is involved.
2331 j(not_equal, not_found); // The cache did not contain this value. 2331 j(not_equal, not_found); // The cache did not contain this value.
2332 jmp(&load_result_from_cache); 2332 jmp(&load_result_from_cache);
2333 2333
2334 bind(&is_smi); 2334 bind(&is_smi);
2335 SmiToInteger32(scratch, object); 2335 SmiToInteger32(scratch, object);
2336 and_(scratch, mask); 2336 andp(scratch, mask);
2337 // Each entry in string cache consists of two pointer sized fields, 2337 // Each entry in string cache consists of two pointer sized fields,
2338 // but times_twice_pointer_size (multiplication by 16) scale factor 2338 // but times_twice_pointer_size (multiplication by 16) scale factor
2339 // is not supported by addrmode on x64 platform. 2339 // is not supported by addrmode on x64 platform.
2340 // So we have to premultiply entry index before lookup. 2340 // So we have to premultiply entry index before lookup.
2341 shl(scratch, Immediate(kPointerSizeLog2 + 1)); 2341 shl(scratch, Immediate(kPointerSizeLog2 + 1));
2342 2342
2343 // Check if the entry is the smi we are looking for. 2343 // Check if the entry is the smi we are looking for.
2344 cmpp(object, 2344 cmpp(object,
2345 FieldOperand(number_string_cache, 2345 FieldOperand(number_string_cache,
2346 index, 2346 index,
(...skipping 987 matching lines...) Expand 10 before | Expand all | Expand 10 after
3334 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { 3334 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3335 movp(dst, FieldOperand(map, Map::kBitField3Offset)); 3335 movp(dst, FieldOperand(map, Map::kBitField3Offset));
3336 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); 3336 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3337 } 3337 }
3338 3338
3339 3339
3340 void MacroAssembler::EnumLength(Register dst, Register map) { 3340 void MacroAssembler::EnumLength(Register dst, Register map) {
3341 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); 3341 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3342 movp(dst, FieldOperand(map, Map::kBitField3Offset)); 3342 movp(dst, FieldOperand(map, Map::kBitField3Offset));
3343 Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask)); 3343 Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
3344 and_(dst, kScratchRegister); 3344 andp(dst, kScratchRegister);
3345 } 3345 }
3346 3346
3347 3347
3348 void MacroAssembler::DispatchMap(Register obj, 3348 void MacroAssembler::DispatchMap(Register obj,
3349 Register unused, 3349 Register unused,
3350 Handle<Map> map, 3350 Handle<Map> map,
3351 Handle<Code> success, 3351 Handle<Code> success,
3352 SmiCheckType smi_check_type) { 3352 SmiCheckType smi_check_type) {
3353 Label fail; 3353 Label fail;
3354 if (smi_check_type == DO_SMI_CHECK) { 3354 if (smi_check_type == DO_SMI_CHECK) {
(...skipping 480 matching lines...) Expand 10 before | Expand all | Expand 10 after
3835 } 3835 }
3836 } else if (arg_stack_space > 0) { 3836 } else if (arg_stack_space > 0) {
3837 subp(rsp, Immediate(arg_stack_space * kRegisterSize)); 3837 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
3838 } 3838 }
3839 3839
3840 // Get the required frame alignment for the OS. 3840 // Get the required frame alignment for the OS.
3841 const int kFrameAlignment = OS::ActivationFrameAlignment(); 3841 const int kFrameAlignment = OS::ActivationFrameAlignment();
3842 if (kFrameAlignment > 0) { 3842 if (kFrameAlignment > 0) {
3843 ASSERT(IsPowerOf2(kFrameAlignment)); 3843 ASSERT(IsPowerOf2(kFrameAlignment));
3844 ASSERT(is_int8(kFrameAlignment)); 3844 ASSERT(is_int8(kFrameAlignment));
3845 and_(rsp, Immediate(-kFrameAlignment)); 3845 andp(rsp, Immediate(-kFrameAlignment));
3846 } 3846 }
3847 3847
3848 // Patch the saved entry sp. 3848 // Patch the saved entry sp.
3849 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp); 3849 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3850 } 3850 }
3851 3851
3852 3852
3853 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) { 3853 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3854 EnterExitFramePrologue(true); 3854 EnterExitFramePrologue(true);
3855 3855
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
4061 decl(r1); 4061 decl(r1);
4062 4062
4063 // Generate an unrolled loop that performs a few probes before giving up. 4063 // Generate an unrolled loop that performs a few probes before giving up.
4064 for (int i = 0; i < kNumberDictionaryProbes; i++) { 4064 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4065 // Use r2 for index calculations and keep the hash intact in r0. 4065 // Use r2 for index calculations and keep the hash intact in r0.
4066 movp(r2, r0); 4066 movp(r2, r0);
4067 // Compute the masked index: (hash + i + i * i) & mask. 4067 // Compute the masked index: (hash + i + i * i) & mask.
4068 if (i > 0) { 4068 if (i > 0) {
4069 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i))); 4069 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4070 } 4070 }
4071 and_(r2, r1); 4071 andp(r2, r1);
4072 4072
4073 // Scale the index by multiplying by the entry size. 4073 // Scale the index by multiplying by the entry size.
4074 ASSERT(SeededNumberDictionary::kEntrySize == 3); 4074 ASSERT(SeededNumberDictionary::kEntrySize == 3);
4075 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 4075 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4076 4076
4077 // Check if the key matches. 4077 // Check if the key matches.
4078 cmpp(key, FieldOperand(elements, 4078 cmpp(key, FieldOperand(elements,
4079 r2, 4079 r2,
4080 times_pointer_size, 4080 times_pointer_size,
4081 SeededNumberDictionary::kElementsStartOffset)); 4081 SeededNumberDictionary::kElementsStartOffset));
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
4286 addp(result, Immediate(kHeapObjectTag)); 4286 addp(result, Immediate(kHeapObjectTag));
4287 } 4287 }
4288 } 4288 }
4289 4289
4290 4290
4291 void MacroAssembler::UndoAllocationInNewSpace(Register object) { 4291 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4292 ExternalReference new_space_allocation_top = 4292 ExternalReference new_space_allocation_top =
4293 ExternalReference::new_space_allocation_top_address(isolate()); 4293 ExternalReference::new_space_allocation_top_address(isolate());
4294 4294
4295 // Make sure the object has no tag before resetting top. 4295 // Make sure the object has no tag before resetting top.
4296 and_(object, Immediate(~kHeapObjectTagMask)); 4296 andp(object, Immediate(~kHeapObjectTagMask));
4297 Operand top_operand = ExternalOperand(new_space_allocation_top); 4297 Operand top_operand = ExternalOperand(new_space_allocation_top);
4298 #ifdef DEBUG 4298 #ifdef DEBUG
4299 cmpp(object, top_operand); 4299 cmpp(object, top_operand);
4300 Check(below, kUndoAllocationOfNonAllocatedMemory); 4300 Check(below, kUndoAllocationOfNonAllocatedMemory);
4301 #endif 4301 #endif
4302 movp(top_operand, object); 4302 movp(top_operand, object);
4303 } 4303 }
4304 4304
4305 4305
4306 void MacroAssembler::AllocateHeapNumber(Register result, 4306 void MacroAssembler::AllocateHeapNumber(Register result,
(...skipping 15 matching lines...) Expand all
4322 Register scratch3, 4322 Register scratch3,
4323 Label* gc_required) { 4323 Label* gc_required) {
4324 // Calculate the number of bytes needed for the characters in the string while 4324 // Calculate the number of bytes needed for the characters in the string while
4325 // observing object alignment. 4325 // observing object alignment.
4326 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize & 4326 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4327 kObjectAlignmentMask; 4327 kObjectAlignmentMask;
4328 ASSERT(kShortSize == 2); 4328 ASSERT(kShortSize == 2);
4329 // scratch1 = length * 2 + kObjectAlignmentMask. 4329 // scratch1 = length * 2 + kObjectAlignmentMask.
4330 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + 4330 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4331 kHeaderAlignment)); 4331 kHeaderAlignment));
4332 and_(scratch1, Immediate(~kObjectAlignmentMask)); 4332 andp(scratch1, Immediate(~kObjectAlignmentMask));
4333 if (kHeaderAlignment > 0) { 4333 if (kHeaderAlignment > 0) {
4334 subp(scratch1, Immediate(kHeaderAlignment)); 4334 subp(scratch1, Immediate(kHeaderAlignment));
4335 } 4335 }
4336 4336
4337 // Allocate two byte string in new space. 4337 // Allocate two byte string in new space.
4338 Allocate(SeqTwoByteString::kHeaderSize, 4338 Allocate(SeqTwoByteString::kHeaderSize,
4339 times_1, 4339 times_1,
4340 scratch1, 4340 scratch1,
4341 result, 4341 result,
4342 scratch2, 4342 scratch2,
(...skipping 17 matching lines...) Expand all
4360 Register scratch2, 4360 Register scratch2,
4361 Register scratch3, 4361 Register scratch3,
4362 Label* gc_required) { 4362 Label* gc_required) {
4363 // Calculate the number of bytes needed for the characters in the string while 4363 // Calculate the number of bytes needed for the characters in the string while
4364 // observing object alignment. 4364 // observing object alignment.
4365 const int kHeaderAlignment = SeqOneByteString::kHeaderSize & 4365 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4366 kObjectAlignmentMask; 4366 kObjectAlignmentMask;
4367 movl(scratch1, length); 4367 movl(scratch1, length);
4368 ASSERT(kCharSize == 1); 4368 ASSERT(kCharSize == 1);
4369 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); 4369 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4370 and_(scratch1, Immediate(~kObjectAlignmentMask)); 4370 andp(scratch1, Immediate(~kObjectAlignmentMask));
4371 if (kHeaderAlignment > 0) { 4371 if (kHeaderAlignment > 0) {
4372 subp(scratch1, Immediate(kHeaderAlignment)); 4372 subp(scratch1, Immediate(kHeaderAlignment));
4373 } 4373 }
4374 4374
4375 // Allocate ASCII string in new space. 4375 // Allocate ASCII string in new space.
4376 Allocate(SeqOneByteString::kHeaderSize, 4376 Allocate(SeqOneByteString::kHeaderSize,
4377 times_1, 4377 times_1,
4378 scratch1, 4378 scratch1,
4379 result, 4379 result,
4380 scratch2, 4380 scratch2,
(...skipping 332 matching lines...) Expand 10 before | Expand all | Expand 10 after
4713 int frame_alignment = OS::ActivationFrameAlignment(); 4713 int frame_alignment = OS::ActivationFrameAlignment();
4714 ASSERT(frame_alignment != 0); 4714 ASSERT(frame_alignment != 0);
4715 ASSERT(num_arguments >= 0); 4715 ASSERT(num_arguments >= 0);
4716 4716
4717 // Make stack end at alignment and allocate space for arguments and old rsp. 4717 // Make stack end at alignment and allocate space for arguments and old rsp.
4718 movp(kScratchRegister, rsp); 4718 movp(kScratchRegister, rsp);
4719 ASSERT(IsPowerOf2(frame_alignment)); 4719 ASSERT(IsPowerOf2(frame_alignment));
4720 int argument_slots_on_stack = 4720 int argument_slots_on_stack =
4721 ArgumentStackSlotsForCFunctionCall(num_arguments); 4721 ArgumentStackSlotsForCFunctionCall(num_arguments);
4722 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); 4722 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
4723 and_(rsp, Immediate(-frame_alignment)); 4723 andp(rsp, Immediate(-frame_alignment));
4724 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister); 4724 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
4725 } 4725 }
4726 4726
4727 4727
4728 void MacroAssembler::CallCFunction(ExternalReference function, 4728 void MacroAssembler::CallCFunction(ExternalReference function,
4729 int num_arguments) { 4729 int num_arguments) {
4730 LoadAddress(rax, function); 4730 LoadAddress(rax, function);
4731 CallCFunction(rax, num_arguments); 4731 CallCFunction(rax, num_arguments);
4732 } 4732 }
4733 4733
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
4782 4782
4783 void MacroAssembler::CheckPageFlag( 4783 void MacroAssembler::CheckPageFlag(
4784 Register object, 4784 Register object,
4785 Register scratch, 4785 Register scratch,
4786 int mask, 4786 int mask,
4787 Condition cc, 4787 Condition cc,
4788 Label* condition_met, 4788 Label* condition_met,
4789 Label::Distance condition_met_distance) { 4789 Label::Distance condition_met_distance) {
4790 ASSERT(cc == zero || cc == not_zero); 4790 ASSERT(cc == zero || cc == not_zero);
4791 if (scratch.is(object)) { 4791 if (scratch.is(object)) {
4792 and_(scratch, Immediate(~Page::kPageAlignmentMask)); 4792 andp(scratch, Immediate(~Page::kPageAlignmentMask));
4793 } else { 4793 } else {
4794 movp(scratch, Immediate(~Page::kPageAlignmentMask)); 4794 movp(scratch, Immediate(~Page::kPageAlignmentMask));
4795 and_(scratch, object); 4795 andp(scratch, object);
4796 } 4796 }
4797 if (mask < (1 << kBitsPerByte)) { 4797 if (mask < (1 << kBitsPerByte)) {
4798 testb(Operand(scratch, MemoryChunk::kFlagsOffset), 4798 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4799 Immediate(static_cast<uint8_t>(mask))); 4799 Immediate(static_cast<uint8_t>(mask)));
4800 } else { 4800 } else {
4801 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); 4801 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4802 } 4802 }
4803 j(cc, condition_met, condition_met_distance); 4803 j(cc, condition_met, condition_met_distance);
4804 } 4804 }
4805 4805
4806 4806
4807 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, 4807 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
4808 Register scratch, 4808 Register scratch,
4809 Label* if_deprecated) { 4809 Label* if_deprecated) {
4810 if (map->CanBeDeprecated()) { 4810 if (map->CanBeDeprecated()) {
4811 Move(scratch, map); 4811 Move(scratch, map);
4812 movp(scratch, FieldOperand(scratch, Map::kBitField3Offset)); 4812 movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
4813 SmiToInteger32(scratch, scratch); 4813 SmiToInteger32(scratch, scratch);
4814 and_(scratch, Immediate(Map::Deprecated::kMask)); 4814 andp(scratch, Immediate(Map::Deprecated::kMask));
4815 j(not_zero, if_deprecated); 4815 j(not_zero, if_deprecated);
4816 } 4816 }
4817 } 4817 }
4818 4818
4819 4819
4820 void MacroAssembler::JumpIfBlack(Register object, 4820 void MacroAssembler::JumpIfBlack(Register object,
4821 Register bitmap_scratch, 4821 Register bitmap_scratch,
4822 Register mask_scratch, 4822 Register mask_scratch,
4823 Label* on_black, 4823 Label* on_black,
4824 Label::Distance on_black_distance) { 4824 Label::Distance on_black_distance) {
4825 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx)); 4825 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4826 GetMarkBits(object, bitmap_scratch, mask_scratch); 4826 GetMarkBits(object, bitmap_scratch, mask_scratch);
4827 4827
4828 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 4828 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4829 // The mask_scratch register contains a 1 at the position of the first bit 4829 // The mask_scratch register contains a 1 at the position of the first bit
4830 // and a 0 at all other positions, including the position of the second bit. 4830 // and a 0 at all other positions, including the position of the second bit.
4831 movp(rcx, mask_scratch); 4831 movp(rcx, mask_scratch);
4832 // Make rcx into a mask that covers both marking bits using the operation 4832 // Make rcx into a mask that covers both marking bits using the operation
4833 // rcx = mask | (mask << 1). 4833 // rcx = mask | (mask << 1).
4834 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); 4834 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4835 // Note that we are using a 4-byte aligned 8-byte load. 4835 // Note that we are using a 4-byte aligned 8-byte load.
4836 and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); 4836 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4837 cmpp(mask_scratch, rcx); 4837 cmpp(mask_scratch, rcx);
4838 j(equal, on_black, on_black_distance); 4838 j(equal, on_black, on_black_distance);
4839 } 4839 }
4840 4840
4841 4841
4842 // Detect some, but not all, common pointer-free objects. This is used by the 4842 // Detect some, but not all, common pointer-free objects. This is used by the
4843 // incremental write barrier which doesn't care about oddballs (they are always 4843 // incremental write barrier which doesn't care about oddballs (they are always
4844 // marked black immediately so this code is not hit). 4844 // marked black immediately so this code is not hit).
4845 void MacroAssembler::JumpIfDataObject( 4845 void MacroAssembler::JumpIfDataObject(
4846 Register value, 4846 Register value,
(...skipping 14 matching lines...) Expand all
4861 bind(&is_data_object); 4861 bind(&is_data_object);
4862 } 4862 }
4863 4863
4864 4864
4865 void MacroAssembler::GetMarkBits(Register addr_reg, 4865 void MacroAssembler::GetMarkBits(Register addr_reg,
4866 Register bitmap_reg, 4866 Register bitmap_reg,
4867 Register mask_reg) { 4867 Register mask_reg) {
4868 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx)); 4868 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4869 movp(bitmap_reg, addr_reg); 4869 movp(bitmap_reg, addr_reg);
4870 // Sign extended 32 bit immediate. 4870 // Sign extended 32 bit immediate.
4871 and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); 4871 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4872 movp(rcx, addr_reg); 4872 movp(rcx, addr_reg);
4873 int shift = 4873 int shift =
4874 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; 4874 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4875 shrl(rcx, Immediate(shift)); 4875 shrl(rcx, Immediate(shift));
4876 and_(rcx, 4876 andp(rcx,
4877 Immediate((Page::kPageAlignmentMask >> shift) & 4877 Immediate((Page::kPageAlignmentMask >> shift) &
4878 ~(Bitmap::kBytesPerCell - 1))); 4878 ~(Bitmap::kBytesPerCell - 1)));
4879 4879
4880 addp(bitmap_reg, rcx); 4880 addp(bitmap_reg, rcx);
4881 movp(rcx, addr_reg); 4881 movp(rcx, addr_reg);
4882 shrl(rcx, Immediate(kPointerSizeLog2)); 4882 shrl(rcx, Immediate(kPointerSizeLog2));
4883 and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); 4883 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4884 movl(mask_reg, Immediate(1)); 4884 movl(mask_reg, Immediate(1));
4885 shl_cl(mask_reg); 4885 shl_cl(mask_reg);
4886 } 4886 }
4887 4887
4888 4888
4889 void MacroAssembler::EnsureNotWhite( 4889 void MacroAssembler::EnsureNotWhite(
4890 Register value, 4890 Register value,
4891 Register bitmap_scratch, 4891 Register bitmap_scratch,
4892 Register mask_scratch, 4892 Register mask_scratch,
4893 Label* value_is_white_and_not_data, 4893 Label* value_is_white_and_not_data,
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
4954 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); 4954 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4955 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); 4955 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4956 testb(instance_type, Immediate(kExternalStringTag)); 4956 testb(instance_type, Immediate(kExternalStringTag));
4957 j(zero, &not_external, Label::kNear); 4957 j(zero, &not_external, Label::kNear);
4958 movp(length, Immediate(ExternalString::kSize)); 4958 movp(length, Immediate(ExternalString::kSize));
4959 jmp(&is_data_object, Label::kNear); 4959 jmp(&is_data_object, Label::kNear);
4960 4960
4961 bind(&not_external); 4961 bind(&not_external);
4962 // Sequential string, either ASCII or UC16. 4962 // Sequential string, either ASCII or UC16.
4963 ASSERT(kOneByteStringTag == 0x04); 4963 ASSERT(kOneByteStringTag == 0x04);
4964 and_(length, Immediate(kStringEncodingMask)); 4964 andp(length, Immediate(kStringEncodingMask));
4965 xor_(length, Immediate(kStringEncodingMask)); 4965 xorp(length, Immediate(kStringEncodingMask));
4966 addp(length, Immediate(0x04)); 4966 addp(length, Immediate(0x04));
4967 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. 4967 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4968 imulp(length, FieldOperand(value, String::kLengthOffset)); 4968 imulp(length, FieldOperand(value, String::kLengthOffset));
4969 shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); 4969 shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4970 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); 4970 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4971 and_(length, Immediate(~kObjectAlignmentMask)); 4971 andp(length, Immediate(~kObjectAlignmentMask));
4972 4972
4973 bind(&is_data_object); 4973 bind(&is_data_object);
4974 // Value is a data object, and it is white. Mark it black. Since we know 4974 // Value is a data object, and it is white. Mark it black. Since we know
4975 // that the object is white we can make it black by flipping one bit. 4975 // that the object is white we can make it black by flipping one bit.
4976 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); 4976 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4977 4977
4978 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); 4978 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4979 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length); 4979 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4980 4980
4981 bind(&done); 4981 bind(&done);
4982 } 4982 }
4983 4983
4984 4984
4985 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { 4985 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
4986 Label next, start; 4986 Label next, start;
4987 Register empty_fixed_array_value = r8; 4987 Register empty_fixed_array_value = r8;
4988 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); 4988 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
5057 ASSERT(!scratch1.is(scratch0)); 5057 ASSERT(!scratch1.is(scratch0));
5058 Register current = scratch0; 5058 Register current = scratch0;
5059 Label loop_again; 5059 Label loop_again;
5060 5060
5061 movp(current, object); 5061 movp(current, object);
5062 5062
5063 // Loop based on the map going up the prototype chain. 5063 // Loop based on the map going up the prototype chain.
5064 bind(&loop_again); 5064 bind(&loop_again);
5065 movp(current, FieldOperand(current, HeapObject::kMapOffset)); 5065 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5066 movp(scratch1, FieldOperand(current, Map::kBitField2Offset)); 5066 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5067 and_(scratch1, Immediate(Map::kElementsKindMask)); 5067 andp(scratch1, Immediate(Map::kElementsKindMask));
5068 shr(scratch1, Immediate(Map::kElementsKindShift)); 5068 shr(scratch1, Immediate(Map::kElementsKindShift));
5069 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS)); 5069 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5070 j(equal, found); 5070 j(equal, found);
5071 movp(current, FieldOperand(current, Map::kPrototypeOffset)); 5071 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5072 CompareRoot(current, Heap::kNullValueRootIndex); 5072 CompareRoot(current, Heap::kNullValueRootIndex);
5073 j(not_equal, &loop_again); 5073 j(not_equal, &loop_again);
5074 } 5074 }
5075 5075
5076 5076
5077 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { 5077 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5078 ASSERT(!dividend.is(rax)); 5078 ASSERT(!dividend.is(rax));
5079 ASSERT(!dividend.is(rdx)); 5079 ASSERT(!dividend.is(rdx));
5080 MultiplierAndShift ms(divisor); 5080 MultiplierAndShift ms(divisor);
5081 movl(rax, Immediate(ms.multiplier())); 5081 movl(rax, Immediate(ms.multiplier()));
5082 imull(dividend); 5082 imull(dividend);
5083 if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend); 5083 if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
5084 if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend); 5084 if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
5085 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); 5085 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
5086 movl(rax, dividend); 5086 movl(rax, dividend);
5087 shrl(rax, Immediate(31)); 5087 shrl(rax, Immediate(31));
5088 addl(rdx, rax); 5088 addl(rdx, rax);
5089 } 5089 }
5090 5090
5091 5091
5092 } } // namespace v8::internal 5092 } } // namespace v8::internal
5093 5093
5094 #endif // V8_TARGET_ARCH_X64 5094 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698