OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 559 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
570 // The assert checks that the constants for the maximum number of digits | 570 // The assert checks that the constants for the maximum number of digits |
571 // for an array index cached in the hash field and the number of bits | 571 // for an array index cached in the hash field and the number of bits |
572 // reserved for it does not conflict. | 572 // reserved for it does not conflict. |
573 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | 573 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < |
574 (1 << String::kArrayIndexValueBits)); | 574 (1 << String::kArrayIndexValueBits)); |
575 // We want the smi-tagged index in key. Even if we subsequently go to | 575 // We want the smi-tagged index in key. Even if we subsequently go to |
576 // the slow case, converting the key to a smi is always valid. | 576 // the slow case, converting the key to a smi is always valid. |
577 // key: string key | 577 // key: string key |
578 // hash: key's hash field, including its array index value. | 578 // hash: key's hash field, including its array index value. |
579 andp(hash, Immediate(String::kArrayIndexValueMask)); | 579 andp(hash, Immediate(String::kArrayIndexValueMask)); |
580 shr(hash, Immediate(String::kHashShift)); | 580 shrp(hash, Immediate(String::kHashShift)); |
581 // Here we actually clobber the key which will be used if calling into | 581 // Here we actually clobber the key which will be used if calling into |
582 // runtime later. However as the new key is the numeric value of a string key | 582 // runtime later. However as the new key is the numeric value of a string key |
583 // there is no difference in using either key. | 583 // there is no difference in using either key. |
584 Integer32ToSmi(index, hash); | 584 Integer32ToSmi(index, hash); |
585 } | 585 } |
586 | 586 |
587 | 587 |
588 void MacroAssembler::CallRuntime(const Runtime::Function* f, | 588 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
589 int num_arguments, | 589 int num_arguments, |
590 SaveFPRegsMode save_doubles) { | 590 SaveFPRegsMode save_doubles) { |
(...skipping 498 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1089 negp(dst); | 1089 negp(dst); |
1090 } | 1090 } |
1091 } | 1091 } |
1092 | 1092 |
1093 | 1093 |
1094 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { | 1094 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
1095 STATIC_ASSERT(kSmiTag == 0); | 1095 STATIC_ASSERT(kSmiTag == 0); |
1096 if (!dst.is(src)) { | 1096 if (!dst.is(src)) { |
1097 movl(dst, src); | 1097 movl(dst, src); |
1098 } | 1098 } |
1099 shl(dst, Immediate(kSmiShift)); | 1099 shlp(dst, Immediate(kSmiShift)); |
1100 } | 1100 } |
1101 | 1101 |
1102 | 1102 |
1103 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { | 1103 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { |
1104 if (emit_debug_code()) { | 1104 if (emit_debug_code()) { |
1105 testb(dst, Immediate(0x01)); | 1105 testb(dst, Immediate(0x01)); |
1106 Label ok; | 1106 Label ok; |
1107 j(zero, &ok, Label::kNear); | 1107 j(zero, &ok, Label::kNear); |
1108 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation); | 1108 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation); |
1109 bind(&ok); | 1109 bind(&ok); |
1110 } | 1110 } |
1111 ASSERT(kSmiShift % kBitsPerByte == 0); | 1111 ASSERT(kSmiShift % kBitsPerByte == 0); |
1112 movl(Operand(dst, kSmiShift / kBitsPerByte), src); | 1112 movl(Operand(dst, kSmiShift / kBitsPerByte), src); |
1113 } | 1113 } |
1114 | 1114 |
1115 | 1115 |
1116 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, | 1116 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, |
1117 Register src, | 1117 Register src, |
1118 int constant) { | 1118 int constant) { |
1119 if (dst.is(src)) { | 1119 if (dst.is(src)) { |
1120 addl(dst, Immediate(constant)); | 1120 addl(dst, Immediate(constant)); |
1121 } else { | 1121 } else { |
1122 leal(dst, Operand(src, constant)); | 1122 leal(dst, Operand(src, constant)); |
1123 } | 1123 } |
1124 shl(dst, Immediate(kSmiShift)); | 1124 shlp(dst, Immediate(kSmiShift)); |
1125 } | 1125 } |
1126 | 1126 |
1127 | 1127 |
1128 void MacroAssembler::SmiToInteger32(Register dst, Register src) { | 1128 void MacroAssembler::SmiToInteger32(Register dst, Register src) { |
1129 STATIC_ASSERT(kSmiTag == 0); | 1129 STATIC_ASSERT(kSmiTag == 0); |
1130 if (!dst.is(src)) { | 1130 if (!dst.is(src)) { |
1131 movp(dst, src); | 1131 movp(dst, src); |
1132 } | 1132 } |
1133 shr(dst, Immediate(kSmiShift)); | 1133 shrq(dst, Immediate(kSmiShift)); |
1134 } | 1134 } |
1135 | 1135 |
1136 | 1136 |
1137 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { | 1137 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { |
1138 movl(dst, Operand(src, kSmiShift / kBitsPerByte)); | 1138 movl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
1139 } | 1139 } |
1140 | 1140 |
1141 | 1141 |
1142 void MacroAssembler::SmiToInteger64(Register dst, Register src) { | 1142 void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
1143 STATIC_ASSERT(kSmiTag == 0); | 1143 STATIC_ASSERT(kSmiTag == 0); |
1144 if (!dst.is(src)) { | 1144 if (!dst.is(src)) { |
1145 movp(dst, src); | 1145 movp(dst, src); |
1146 } | 1146 } |
1147 sar(dst, Immediate(kSmiShift)); | 1147 sarq(dst, Immediate(kSmiShift)); |
1148 } | 1148 } |
1149 | 1149 |
1150 | 1150 |
1151 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { | 1151 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { |
1152 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); | 1152 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); |
1153 } | 1153 } |
1154 | 1154 |
1155 | 1155 |
1156 void MacroAssembler::SmiTest(Register src) { | 1156 void MacroAssembler::SmiTest(Register src) { |
1157 AssertSmi(src); | 1157 AssertSmi(src); |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1222 ASSERT(power >= 0); | 1222 ASSERT(power >= 0); |
1223 ASSERT(power < 64); | 1223 ASSERT(power < 64); |
1224 if (power == 0) { | 1224 if (power == 0) { |
1225 SmiToInteger64(dst, src); | 1225 SmiToInteger64(dst, src); |
1226 return; | 1226 return; |
1227 } | 1227 } |
1228 if (!dst.is(src)) { | 1228 if (!dst.is(src)) { |
1229 movp(dst, src); | 1229 movp(dst, src); |
1230 } | 1230 } |
1231 if (power < kSmiShift) { | 1231 if (power < kSmiShift) { |
1232 sar(dst, Immediate(kSmiShift - power)); | 1232 sarp(dst, Immediate(kSmiShift - power)); |
1233 } else if (power > kSmiShift) { | 1233 } else if (power > kSmiShift) { |
1234 shl(dst, Immediate(power - kSmiShift)); | 1234 shlp(dst, Immediate(power - kSmiShift)); |
1235 } | 1235 } |
1236 } | 1236 } |
1237 | 1237 |
1238 | 1238 |
1239 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, | 1239 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, |
1240 Register src, | 1240 Register src, |
1241 int power) { | 1241 int power) { |
1242 ASSERT((0 <= power) && (power < 32)); | 1242 ASSERT((0 <= power) && (power < 32)); |
1243 if (dst.is(src)) { | 1243 if (dst.is(src)) { |
1244 shr(dst, Immediate(power + kSmiShift)); | 1244 shrp(dst, Immediate(power + kSmiShift)); |
1245 } else { | 1245 } else { |
1246 UNIMPLEMENTED(); // Not used. | 1246 UNIMPLEMENTED(); // Not used. |
1247 } | 1247 } |
1248 } | 1248 } |
1249 | 1249 |
1250 | 1250 |
1251 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2, | 1251 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2, |
1252 Label* on_not_smis, | 1252 Label* on_not_smis, |
1253 Label::Distance near_jump) { | 1253 Label::Distance near_jump) { |
1254 if (dst.is(src1) || dst.is(src2)) { | 1254 if (dst.is(src1) || dst.is(src2)) { |
(...skipping 22 matching lines...) Expand all Loading... |
1277 STATIC_ASSERT(kSmiTag == 0); | 1277 STATIC_ASSERT(kSmiTag == 0); |
1278 testb(src, Immediate(kSmiTagMask)); | 1278 testb(src, Immediate(kSmiTagMask)); |
1279 return zero; | 1279 return zero; |
1280 } | 1280 } |
1281 | 1281 |
1282 | 1282 |
1283 Condition MacroAssembler::CheckNonNegativeSmi(Register src) { | 1283 Condition MacroAssembler::CheckNonNegativeSmi(Register src) { |
1284 STATIC_ASSERT(kSmiTag == 0); | 1284 STATIC_ASSERT(kSmiTag == 0); |
1285 // Test that both bits of the mask 0x8000000000000001 are zero. | 1285 // Test that both bits of the mask 0x8000000000000001 are zero. |
1286 movp(kScratchRegister, src); | 1286 movp(kScratchRegister, src); |
1287 rol(kScratchRegister, Immediate(1)); | 1287 rolp(kScratchRegister, Immediate(1)); |
1288 testb(kScratchRegister, Immediate(3)); | 1288 testb(kScratchRegister, Immediate(3)); |
1289 return zero; | 1289 return zero; |
1290 } | 1290 } |
1291 | 1291 |
1292 | 1292 |
1293 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { | 1293 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { |
1294 if (first.is(second)) { | 1294 if (first.is(second)) { |
1295 return CheckSmi(first); | 1295 return CheckSmi(first); |
1296 } | 1296 } |
1297 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); | 1297 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); |
1298 leal(kScratchRegister, Operand(first, second, times_1, 0)); | 1298 leal(kScratchRegister, Operand(first, second, times_1, 0)); |
1299 testb(kScratchRegister, Immediate(0x03)); | 1299 testb(kScratchRegister, Immediate(0x03)); |
1300 return zero; | 1300 return zero; |
1301 } | 1301 } |
1302 | 1302 |
1303 | 1303 |
1304 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, | 1304 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, |
1305 Register second) { | 1305 Register second) { |
1306 if (first.is(second)) { | 1306 if (first.is(second)) { |
1307 return CheckNonNegativeSmi(first); | 1307 return CheckNonNegativeSmi(first); |
1308 } | 1308 } |
1309 movp(kScratchRegister, first); | 1309 movp(kScratchRegister, first); |
1310 orp(kScratchRegister, second); | 1310 orp(kScratchRegister, second); |
1311 rol(kScratchRegister, Immediate(1)); | 1311 rolp(kScratchRegister, Immediate(1)); |
1312 testl(kScratchRegister, Immediate(3)); | 1312 testl(kScratchRegister, Immediate(3)); |
1313 return zero; | 1313 return zero; |
1314 } | 1314 } |
1315 | 1315 |
1316 | 1316 |
1317 Condition MacroAssembler::CheckEitherSmi(Register first, | 1317 Condition MacroAssembler::CheckEitherSmi(Register first, |
1318 Register second, | 1318 Register second, |
1319 Register scratch) { | 1319 Register scratch) { |
1320 if (first.is(second)) { | 1320 if (first.is(second)) { |
1321 return CheckSmi(first); | 1321 return CheckSmi(first); |
(...skipping 705 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2027 } | 2027 } |
2028 } | 2028 } |
2029 | 2029 |
2030 | 2030 |
2031 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, | 2031 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, |
2032 Register src, | 2032 Register src, |
2033 int shift_value) { | 2033 int shift_value) { |
2034 ASSERT(is_uint5(shift_value)); | 2034 ASSERT(is_uint5(shift_value)); |
2035 if (shift_value > 0) { | 2035 if (shift_value > 0) { |
2036 if (dst.is(src)) { | 2036 if (dst.is(src)) { |
2037 sar(dst, Immediate(shift_value + kSmiShift)); | 2037 sarp(dst, Immediate(shift_value + kSmiShift)); |
2038 shl(dst, Immediate(kSmiShift)); | 2038 shlp(dst, Immediate(kSmiShift)); |
2039 } else { | 2039 } else { |
2040 UNIMPLEMENTED(); // Not used. | 2040 UNIMPLEMENTED(); // Not used. |
2041 } | 2041 } |
2042 } | 2042 } |
2043 } | 2043 } |
2044 | 2044 |
2045 | 2045 |
2046 void MacroAssembler::SmiShiftLeftConstant(Register dst, | 2046 void MacroAssembler::SmiShiftLeftConstant(Register dst, |
2047 Register src, | 2047 Register src, |
2048 int shift_value) { | 2048 int shift_value) { |
2049 if (!dst.is(src)) { | 2049 if (!dst.is(src)) { |
2050 movp(dst, src); | 2050 movp(dst, src); |
2051 } | 2051 } |
2052 if (shift_value > 0) { | 2052 if (shift_value > 0) { |
2053 shl(dst, Immediate(shift_value)); | 2053 shlp(dst, Immediate(shift_value)); |
2054 } | 2054 } |
2055 } | 2055 } |
2056 | 2056 |
2057 | 2057 |
2058 void MacroAssembler::SmiShiftLogicalRightConstant( | 2058 void MacroAssembler::SmiShiftLogicalRightConstant( |
2059 Register dst, Register src, int shift_value, | 2059 Register dst, Register src, int shift_value, |
2060 Label* on_not_smi_result, Label::Distance near_jump) { | 2060 Label* on_not_smi_result, Label::Distance near_jump) { |
2061 // Logic right shift interprets its result as an *unsigned* number. | 2061 // Logic right shift interprets its result as an *unsigned* number. |
2062 if (dst.is(src)) { | 2062 if (dst.is(src)) { |
2063 UNIMPLEMENTED(); // Not used. | 2063 UNIMPLEMENTED(); // Not used. |
2064 } else { | 2064 } else { |
2065 movp(dst, src); | 2065 movp(dst, src); |
2066 if (shift_value == 0) { | 2066 if (shift_value == 0) { |
2067 testp(dst, dst); | 2067 testp(dst, dst); |
2068 j(negative, on_not_smi_result, near_jump); | 2068 j(negative, on_not_smi_result, near_jump); |
2069 } | 2069 } |
2070 shr(dst, Immediate(shift_value + kSmiShift)); | 2070 shrq(dst, Immediate(shift_value + kSmiShift)); |
2071 shl(dst, Immediate(kSmiShift)); | 2071 shlq(dst, Immediate(kSmiShift)); |
2072 } | 2072 } |
2073 } | 2073 } |
2074 | 2074 |
2075 | 2075 |
2076 void MacroAssembler::SmiShiftLeft(Register dst, | 2076 void MacroAssembler::SmiShiftLeft(Register dst, |
2077 Register src1, | 2077 Register src1, |
2078 Register src2) { | 2078 Register src2) { |
2079 ASSERT(!dst.is(rcx)); | 2079 ASSERT(!dst.is(rcx)); |
2080 // Untag shift amount. | 2080 // Untag shift amount. |
2081 if (!dst.is(src1)) { | 2081 if (!dst.is(src1)) { |
2082 movq(dst, src1); | 2082 movq(dst, src1); |
2083 } | 2083 } |
2084 SmiToInteger32(rcx, src2); | 2084 SmiToInteger32(rcx, src2); |
2085 // Shift amount specified by lower 5 bits, not six as the shl opcode. | 2085 // Shift amount specified by lower 5 bits, not six as the shl opcode. |
2086 andq(rcx, Immediate(0x1f)); | 2086 andq(rcx, Immediate(0x1f)); |
2087 shl_cl(dst); | 2087 shlq_cl(dst); |
2088 } | 2088 } |
2089 | 2089 |
2090 | 2090 |
2091 void MacroAssembler::SmiShiftLogicalRight(Register dst, | 2091 void MacroAssembler::SmiShiftLogicalRight(Register dst, |
2092 Register src1, | 2092 Register src1, |
2093 Register src2, | 2093 Register src2, |
2094 Label* on_not_smi_result, | 2094 Label* on_not_smi_result, |
2095 Label::Distance near_jump) { | 2095 Label::Distance near_jump) { |
2096 ASSERT(!dst.is(kScratchRegister)); | 2096 ASSERT(!dst.is(kScratchRegister)); |
2097 ASSERT(!src1.is(kScratchRegister)); | 2097 ASSERT(!src1.is(kScratchRegister)); |
2098 ASSERT(!src2.is(kScratchRegister)); | 2098 ASSERT(!src2.is(kScratchRegister)); |
2099 ASSERT(!dst.is(rcx)); | 2099 ASSERT(!dst.is(rcx)); |
2100 // dst and src1 can be the same, because the one case that bails out | 2100 // dst and src1 can be the same, because the one case that bails out |
2101 // is a shift by 0, which leaves dst, and therefore src1, unchanged. | 2101 // is a shift by 0, which leaves dst, and therefore src1, unchanged. |
2102 if (src1.is(rcx) || src2.is(rcx)) { | 2102 if (src1.is(rcx) || src2.is(rcx)) { |
2103 movq(kScratchRegister, rcx); | 2103 movq(kScratchRegister, rcx); |
2104 } | 2104 } |
2105 if (!dst.is(src1)) { | 2105 if (!dst.is(src1)) { |
2106 movq(dst, src1); | 2106 movq(dst, src1); |
2107 } | 2107 } |
2108 SmiToInteger32(rcx, src2); | 2108 SmiToInteger32(rcx, src2); |
2109 orl(rcx, Immediate(kSmiShift)); | 2109 orl(rcx, Immediate(kSmiShift)); |
2110 shr_cl(dst); // Shift is rcx modulo 0x1f + 32. | 2110 shrq_cl(dst); // Shift is rcx modulo 0x1f + 32. |
2111 shl(dst, Immediate(kSmiShift)); | 2111 shlq(dst, Immediate(kSmiShift)); |
2112 testq(dst, dst); | 2112 testq(dst, dst); |
2113 if (src1.is(rcx) || src2.is(rcx)) { | 2113 if (src1.is(rcx) || src2.is(rcx)) { |
2114 Label positive_result; | 2114 Label positive_result; |
2115 j(positive, &positive_result, Label::kNear); | 2115 j(positive, &positive_result, Label::kNear); |
2116 if (src1.is(rcx)) { | 2116 if (src1.is(rcx)) { |
2117 movq(src1, kScratchRegister); | 2117 movq(src1, kScratchRegister); |
2118 } else { | 2118 } else { |
2119 movq(src2, kScratchRegister); | 2119 movq(src2, kScratchRegister); |
2120 } | 2120 } |
2121 jmp(on_not_smi_result, near_jump); | 2121 jmp(on_not_smi_result, near_jump); |
(...skipping 15 matching lines...) Expand all Loading... |
2137 if (src1.is(rcx)) { | 2137 if (src1.is(rcx)) { |
2138 movp(kScratchRegister, src1); | 2138 movp(kScratchRegister, src1); |
2139 } else if (src2.is(rcx)) { | 2139 } else if (src2.is(rcx)) { |
2140 movp(kScratchRegister, src2); | 2140 movp(kScratchRegister, src2); |
2141 } | 2141 } |
2142 if (!dst.is(src1)) { | 2142 if (!dst.is(src1)) { |
2143 movp(dst, src1); | 2143 movp(dst, src1); |
2144 } | 2144 } |
2145 SmiToInteger32(rcx, src2); | 2145 SmiToInteger32(rcx, src2); |
2146 orl(rcx, Immediate(kSmiShift)); | 2146 orl(rcx, Immediate(kSmiShift)); |
2147 sar_cl(dst); // Shift 32 + original rcx & 0x1f. | 2147 sarp_cl(dst); // Shift 32 + original rcx & 0x1f. |
2148 shl(dst, Immediate(kSmiShift)); | 2148 shlp(dst, Immediate(kSmiShift)); |
2149 if (src1.is(rcx)) { | 2149 if (src1.is(rcx)) { |
2150 movp(src1, kScratchRegister); | 2150 movp(src1, kScratchRegister); |
2151 } else if (src2.is(rcx)) { | 2151 } else if (src2.is(rcx)) { |
2152 movp(src2, kScratchRegister); | 2152 movp(src2, kScratchRegister); |
2153 } | 2153 } |
2154 } | 2154 } |
2155 | 2155 |
2156 | 2156 |
2157 void MacroAssembler::SelectNonSmi(Register dst, | 2157 void MacroAssembler::SelectNonSmi(Register dst, |
2158 Register src1, | 2158 Register src1, |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2194 SmiIndex MacroAssembler::SmiToIndex(Register dst, | 2194 SmiIndex MacroAssembler::SmiToIndex(Register dst, |
2195 Register src, | 2195 Register src, |
2196 int shift) { | 2196 int shift) { |
2197 ASSERT(is_uint6(shift)); | 2197 ASSERT(is_uint6(shift)); |
2198 // There is a possible optimization if shift is in the range 60-63, but that | 2198 // There is a possible optimization if shift is in the range 60-63, but that |
2199 // will (and must) never happen. | 2199 // will (and must) never happen. |
2200 if (!dst.is(src)) { | 2200 if (!dst.is(src)) { |
2201 movq(dst, src); | 2201 movq(dst, src); |
2202 } | 2202 } |
2203 if (shift < kSmiShift) { | 2203 if (shift < kSmiShift) { |
2204 sar(dst, Immediate(kSmiShift - shift)); | 2204 sarq(dst, Immediate(kSmiShift - shift)); |
2205 } else { | 2205 } else { |
2206 shl(dst, Immediate(shift - kSmiShift)); | 2206 shlq(dst, Immediate(shift - kSmiShift)); |
2207 } | 2207 } |
2208 return SmiIndex(dst, times_1); | 2208 return SmiIndex(dst, times_1); |
2209 } | 2209 } |
2210 | 2210 |
2211 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, | 2211 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
2212 Register src, | 2212 Register src, |
2213 int shift) { | 2213 int shift) { |
2214 // Register src holds a positive smi. | 2214 // Register src holds a positive smi. |
2215 ASSERT(is_uint6(shift)); | 2215 ASSERT(is_uint6(shift)); |
2216 if (!dst.is(src)) { | 2216 if (!dst.is(src)) { |
2217 movq(dst, src); | 2217 movq(dst, src); |
2218 } | 2218 } |
2219 negq(dst); | 2219 negq(dst); |
2220 if (shift < kSmiShift) { | 2220 if (shift < kSmiShift) { |
2221 sar(dst, Immediate(kSmiShift - shift)); | 2221 sarq(dst, Immediate(kSmiShift - shift)); |
2222 } else { | 2222 } else { |
2223 shl(dst, Immediate(shift - kSmiShift)); | 2223 shlq(dst, Immediate(shift - kSmiShift)); |
2224 } | 2224 } |
2225 return SmiIndex(dst, times_1); | 2225 return SmiIndex(dst, times_1); |
2226 } | 2226 } |
2227 | 2227 |
2228 | 2228 |
2229 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { | 2229 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { |
2230 ASSERT_EQ(0, kSmiShift % kBitsPerByte); | 2230 ASSERT_EQ(0, kSmiShift % kBitsPerByte); |
2231 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); | 2231 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
2232 } | 2232 } |
2233 | 2233 |
2234 | 2234 |
2235 void MacroAssembler::Push(Smi* source) { | 2235 void MacroAssembler::Push(Smi* source) { |
2236 intptr_t smi = reinterpret_cast<intptr_t>(source); | 2236 intptr_t smi = reinterpret_cast<intptr_t>(source); |
2237 if (is_int32(smi)) { | 2237 if (is_int32(smi)) { |
2238 Push(Immediate(static_cast<int32_t>(smi))); | 2238 Push(Immediate(static_cast<int32_t>(smi))); |
2239 } else { | 2239 } else { |
2240 Register constant = GetSmiConstant(source); | 2240 Register constant = GetSmiConstant(source); |
2241 Push(constant); | 2241 Push(constant); |
2242 } | 2242 } |
2243 } | 2243 } |
2244 | 2244 |
2245 | 2245 |
2246 void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) { | 2246 void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) { |
2247 movp(scratch, src); | 2247 movp(scratch, src); |
2248 // High bits. | 2248 // High bits. |
2249 shr(src, Immediate(64 - kSmiShift)); | 2249 shrp(src, Immediate(64 - kSmiShift)); |
2250 shl(src, Immediate(kSmiShift)); | 2250 shlp(src, Immediate(kSmiShift)); |
2251 Push(src); | 2251 Push(src); |
2252 // Low bits. | 2252 // Low bits. |
2253 shl(scratch, Immediate(kSmiShift)); | 2253 shlp(scratch, Immediate(kSmiShift)); |
2254 Push(scratch); | 2254 Push(scratch); |
2255 } | 2255 } |
2256 | 2256 |
2257 | 2257 |
2258 void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) { | 2258 void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) { |
2259 Pop(scratch); | 2259 Pop(scratch); |
2260 // Low bits. | 2260 // Low bits. |
2261 shr(scratch, Immediate(kSmiShift)); | 2261 shrp(scratch, Immediate(kSmiShift)); |
2262 Pop(dst); | 2262 Pop(dst); |
2263 shr(dst, Immediate(kSmiShift)); | 2263 shrp(dst, Immediate(kSmiShift)); |
2264 // High bits. | 2264 // High bits. |
2265 shl(dst, Immediate(64 - kSmiShift)); | 2265 shlp(dst, Immediate(64 - kSmiShift)); |
2266 orp(dst, scratch); | 2266 orp(dst, scratch); |
2267 } | 2267 } |
2268 | 2268 |
2269 | 2269 |
2270 void MacroAssembler::Test(const Operand& src, Smi* source) { | 2270 void MacroAssembler::Test(const Operand& src, Smi* source) { |
2271 testl(Operand(src, kIntSize), Immediate(source->value())); | 2271 testl(Operand(src, kIntSize), Immediate(source->value())); |
2272 } | 2272 } |
2273 | 2273 |
2274 | 2274 |
2275 // ---------------------------------------------------------------------------- | 2275 // ---------------------------------------------------------------------------- |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2308 DONT_DO_SMI_CHECK); | 2308 DONT_DO_SMI_CHECK); |
2309 | 2309 |
2310 STATIC_ASSERT(8 == kDoubleSize); | 2310 STATIC_ASSERT(8 == kDoubleSize); |
2311 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); | 2311 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); |
2312 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset)); | 2312 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset)); |
2313 andp(scratch, mask); | 2313 andp(scratch, mask); |
2314 // Each entry in string cache consists of two pointer sized fields, | 2314 // Each entry in string cache consists of two pointer sized fields, |
2315 // but times_twice_pointer_size (multiplication by 16) scale factor | 2315 // but times_twice_pointer_size (multiplication by 16) scale factor |
2316 // is not supported by addrmode on x64 platform. | 2316 // is not supported by addrmode on x64 platform. |
2317 // So we have to premultiply entry index before lookup. | 2317 // So we have to premultiply entry index before lookup. |
2318 shl(scratch, Immediate(kPointerSizeLog2 + 1)); | 2318 shlp(scratch, Immediate(kPointerSizeLog2 + 1)); |
2319 | 2319 |
2320 Register index = scratch; | 2320 Register index = scratch; |
2321 Register probe = mask; | 2321 Register probe = mask; |
2322 movp(probe, | 2322 movp(probe, |
2323 FieldOperand(number_string_cache, | 2323 FieldOperand(number_string_cache, |
2324 index, | 2324 index, |
2325 times_1, | 2325 times_1, |
2326 FixedArray::kHeaderSize)); | 2326 FixedArray::kHeaderSize)); |
2327 JumpIfSmi(probe, not_found); | 2327 JumpIfSmi(probe, not_found); |
2328 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); | 2328 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); |
2329 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); | 2329 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); |
2330 j(parity_even, not_found); // Bail out if NaN is involved. | 2330 j(parity_even, not_found); // Bail out if NaN is involved. |
2331 j(not_equal, not_found); // The cache did not contain this value. | 2331 j(not_equal, not_found); // The cache did not contain this value. |
2332 jmp(&load_result_from_cache); | 2332 jmp(&load_result_from_cache); |
2333 | 2333 |
2334 bind(&is_smi); | 2334 bind(&is_smi); |
2335 SmiToInteger32(scratch, object); | 2335 SmiToInteger32(scratch, object); |
2336 andp(scratch, mask); | 2336 andp(scratch, mask); |
2337 // Each entry in string cache consists of two pointer sized fields, | 2337 // Each entry in string cache consists of two pointer sized fields, |
2338 // but times_twice_pointer_size (multiplication by 16) scale factor | 2338 // but times_twice_pointer_size (multiplication by 16) scale factor |
2339 // is not supported by addrmode on x64 platform. | 2339 // is not supported by addrmode on x64 platform. |
2340 // So we have to premultiply entry index before lookup. | 2340 // So we have to premultiply entry index before lookup. |
2341 shl(scratch, Immediate(kPointerSizeLog2 + 1)); | 2341 shlp(scratch, Immediate(kPointerSizeLog2 + 1)); |
2342 | 2342 |
2343 // Check if the entry is the smi we are looking for. | 2343 // Check if the entry is the smi we are looking for. |
2344 cmpp(object, | 2344 cmpp(object, |
2345 FieldOperand(number_string_cache, | 2345 FieldOperand(number_string_cache, |
2346 index, | 2346 index, |
2347 times_1, | 2347 times_1, |
2348 FixedArray::kHeaderSize)); | 2348 FixedArray::kHeaderSize)); |
2349 j(not_equal, not_found); | 2349 j(not_equal, not_found); |
2350 | 2350 |
2351 // Get the result from the cache. | 2351 // Get the result from the cache. |
(...skipping 534 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2886 Pop(ExternalOperand(handler_address)); | 2886 Pop(ExternalOperand(handler_address)); |
2887 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); | 2887 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); |
2888 } | 2888 } |
2889 | 2889 |
2890 | 2890 |
2891 void MacroAssembler::JumpToHandlerEntry() { | 2891 void MacroAssembler::JumpToHandlerEntry() { |
2892 // Compute the handler entry address and jump to it. The handler table is | 2892 // Compute the handler entry address and jump to it. The handler table is |
2893 // a fixed array of (smi-tagged) code offsets. | 2893 // a fixed array of (smi-tagged) code offsets. |
2894 // rax = exception, rdi = code object, rdx = state. | 2894 // rax = exception, rdi = code object, rdx = state. |
2895 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset)); | 2895 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset)); |
2896 shr(rdx, Immediate(StackHandler::kKindWidth)); | 2896 shrp(rdx, Immediate(StackHandler::kKindWidth)); |
2897 movp(rdx, | 2897 movp(rdx, |
2898 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize)); | 2898 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize)); |
2899 SmiToInteger64(rdx, rdx); | 2899 SmiToInteger64(rdx, rdx); |
2900 leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize)); | 2900 leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize)); |
2901 jmp(rdi); | 2901 jmp(rdi); |
2902 } | 2902 } |
2903 | 2903 |
2904 | 2904 |
2905 void MacroAssembler::Throw(Register value) { | 2905 void MacroAssembler::Throw(Register value) { |
2906 // Adjust this code if not the case. | 2906 // Adjust this code if not the case. |
(...skipping 1968 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4875 shrl(rcx, Immediate(shift)); | 4875 shrl(rcx, Immediate(shift)); |
4876 andp(rcx, | 4876 andp(rcx, |
4877 Immediate((Page::kPageAlignmentMask >> shift) & | 4877 Immediate((Page::kPageAlignmentMask >> shift) & |
4878 ~(Bitmap::kBytesPerCell - 1))); | 4878 ~(Bitmap::kBytesPerCell - 1))); |
4879 | 4879 |
4880 addp(bitmap_reg, rcx); | 4880 addp(bitmap_reg, rcx); |
4881 movp(rcx, addr_reg); | 4881 movp(rcx, addr_reg); |
4882 shrl(rcx, Immediate(kPointerSizeLog2)); | 4882 shrl(rcx, Immediate(kPointerSizeLog2)); |
4883 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); | 4883 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); |
4884 movl(mask_reg, Immediate(1)); | 4884 movl(mask_reg, Immediate(1)); |
4885 shl_cl(mask_reg); | 4885 shlp_cl(mask_reg); |
4886 } | 4886 } |
4887 | 4887 |
4888 | 4888 |
4889 void MacroAssembler::EnsureNotWhite( | 4889 void MacroAssembler::EnsureNotWhite( |
4890 Register value, | 4890 Register value, |
4891 Register bitmap_scratch, | 4891 Register bitmap_scratch, |
4892 Register mask_scratch, | 4892 Register mask_scratch, |
4893 Label* value_is_white_and_not_data, | 4893 Label* value_is_white_and_not_data, |
4894 Label::Distance distance) { | 4894 Label::Distance distance) { |
4895 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx)); | 4895 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx)); |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4959 jmp(&is_data_object, Label::kNear); | 4959 jmp(&is_data_object, Label::kNear); |
4960 | 4960 |
4961 bind(¬_external); | 4961 bind(¬_external); |
4962 // Sequential string, either ASCII or UC16. | 4962 // Sequential string, either ASCII or UC16. |
4963 ASSERT(kOneByteStringTag == 0x04); | 4963 ASSERT(kOneByteStringTag == 0x04); |
4964 andp(length, Immediate(kStringEncodingMask)); | 4964 andp(length, Immediate(kStringEncodingMask)); |
4965 xorp(length, Immediate(kStringEncodingMask)); | 4965 xorp(length, Immediate(kStringEncodingMask)); |
4966 addp(length, Immediate(0x04)); | 4966 addp(length, Immediate(0x04)); |
4967 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. | 4967 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. |
4968 imulp(length, FieldOperand(value, String::kLengthOffset)); | 4968 imulp(length, FieldOperand(value, String::kLengthOffset)); |
4969 shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); | 4969 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); |
4970 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); | 4970 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); |
4971 andp(length, Immediate(~kObjectAlignmentMask)); | 4971 andp(length, Immediate(~kObjectAlignmentMask)); |
4972 | 4972 |
4973 bind(&is_data_object); | 4973 bind(&is_data_object); |
4974 // Value is a data object, and it is white. Mark it black. Since we know | 4974 // Value is a data object, and it is white. Mark it black. Since we know |
4975 // that the object is white we can make it black by flipping one bit. | 4975 // that the object is white we can make it black by flipping one bit. |
4976 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); | 4976 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); |
4977 | 4977 |
4978 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); | 4978 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); |
4979 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length); | 4979 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length); |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5058 Register current = scratch0; | 5058 Register current = scratch0; |
5059 Label loop_again; | 5059 Label loop_again; |
5060 | 5060 |
5061 movp(current, object); | 5061 movp(current, object); |
5062 | 5062 |
5063 // Loop based on the map going up the prototype chain. | 5063 // Loop based on the map going up the prototype chain. |
5064 bind(&loop_again); | 5064 bind(&loop_again); |
5065 movp(current, FieldOperand(current, HeapObject::kMapOffset)); | 5065 movp(current, FieldOperand(current, HeapObject::kMapOffset)); |
5066 movp(scratch1, FieldOperand(current, Map::kBitField2Offset)); | 5066 movp(scratch1, FieldOperand(current, Map::kBitField2Offset)); |
5067 andp(scratch1, Immediate(Map::kElementsKindMask)); | 5067 andp(scratch1, Immediate(Map::kElementsKindMask)); |
5068 shr(scratch1, Immediate(Map::kElementsKindShift)); | 5068 shrp(scratch1, Immediate(Map::kElementsKindShift)); |
5069 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS)); | 5069 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS)); |
5070 j(equal, found); | 5070 j(equal, found); |
5071 movp(current, FieldOperand(current, Map::kPrototypeOffset)); | 5071 movp(current, FieldOperand(current, Map::kPrototypeOffset)); |
5072 CompareRoot(current, Heap::kNullValueRootIndex); | 5072 CompareRoot(current, Heap::kNullValueRootIndex); |
5073 j(not_equal, &loop_again); | 5073 j(not_equal, &loop_again); |
5074 } | 5074 } |
5075 | 5075 |
5076 | 5076 |
5077 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { | 5077 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { |
5078 ASSERT(!dividend.is(rax)); | 5078 ASSERT(!dividend.is(rax)); |
5079 ASSERT(!dividend.is(rdx)); | 5079 ASSERT(!dividend.is(rdx)); |
5080 MultiplierAndShift ms(divisor); | 5080 MultiplierAndShift ms(divisor); |
5081 movl(rax, Immediate(ms.multiplier())); | 5081 movl(rax, Immediate(ms.multiplier())); |
5082 imull(dividend); | 5082 imull(dividend); |
5083 if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend); | 5083 if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend); |
5084 if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend); | 5084 if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend); |
5085 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); | 5085 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); |
5086 movl(rax, dividend); | 5086 movl(rax, dividend); |
5087 shrl(rax, Immediate(31)); | 5087 shrl(rax, Immediate(31)); |
5088 addl(rdx, rax); | 5088 addl(rdx, rax); |
5089 } | 5089 } |
5090 | 5090 |
5091 | 5091 |
5092 } } // namespace v8::internal | 5092 } } // namespace v8::internal |
5093 | 5093 |
5094 #endif // V8_TARGET_ARCH_X64 | 5094 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |