Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 442 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 453 Label* not_numbers); | 453 Label* not_numbers); |
| 454 | 454 |
| 455 // Takes the operands in rdx and rax and loads them as integers in rax | 455 // Takes the operands in rdx and rax and loads them as integers in rax |
| 456 // and rcx. | 456 // and rcx. |
| 457 static void LoadAsIntegers(MacroAssembler* masm, | 457 static void LoadAsIntegers(MacroAssembler* masm, |
| 458 Label* operand_conversion_failure, | 458 Label* operand_conversion_failure, |
| 459 Register heap_number_map); | 459 Register heap_number_map); |
| 460 // As above, but we know the operands to be numbers. In that case, | 460 // As above, but we know the operands to be numbers. In that case, |
| 461 // conversion can't fail. | 461 // conversion can't fail. |
| 462 static void LoadNumbersAsIntegers(MacroAssembler* masm); | 462 static void LoadNumbersAsIntegers(MacroAssembler* masm); |
| 463 | |
| 464 // Tries to convert two values to smis losslessly. | |
| 465 // This fails if either argument is not a Smi nor a HeapNumber, | |
| 466 // or if it's a HeapNumber with a value that can't be converted | |
| 467 // losslessly to a Smi. In that case, control transitions to the | |
| 468 // on_not_smis label. | |
| 469 // On success, either control goes to the on_success label, | |
| 470 // or it falls through at the end of the code. | |
| 471 // (If you just want to fall through in all cases, put the success | |
|
William Hesse
2011/04/08 12:01:27
Why let the user of this function make a mistake.
Lasse Reichstein
2011/04/08 12:07:46
Done.
| |
| 472 // label at the fall-through point). | |
| 473 // At this point, both first and second holds Smi tagged values. | |
| 474 // One of first or second must be non-Smi. | |
| 475 static void NumbersToSmis(MacroAssembler* masm, | |
| 476 Register first, | |
| 477 Register second, | |
| 478 Register scratch1, | |
| 479 Register scratch2, | |
| 480 Register scratch3, | |
| 481 Label* on_success, | |
| 482 Label* on_not_smis); | |
| 463 }; | 483 }; |
| 464 | 484 |
| 465 | 485 |
| 466 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | 486 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
| 467 // 1. Move arguments into rdx, rax except for DIV and MOD, which need the | 487 // 1. Move arguments into rdx, rax except for DIV and MOD, which need the |
| 468 // dividend in rax and rdx free for the division. Use rax, rbx for those. | 488 // dividend in rax and rdx free for the division. Use rax, rbx for those. |
| 469 Comment load_comment(masm, "-- Load arguments"); | 489 Comment load_comment(masm, "-- Load arguments"); |
| 470 Register left = rdx; | 490 Register left = rdx; |
| 471 Register right = rax; | 491 Register right = rax; |
| 472 if (op_ == Token::DIV || op_ == Token::MOD) { | 492 if (op_ == Token::DIV || op_ == Token::MOD) { |
| (...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1098 overwrite_name, | 1118 overwrite_name, |
| 1099 TRBinaryOpIC::GetName(operands_type_)); | 1119 TRBinaryOpIC::GetName(operands_type_)); |
| 1100 return name_; | 1120 return name_; |
| 1101 } | 1121 } |
| 1102 | 1122 |
| 1103 | 1123 |
| 1104 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 1124 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
| 1105 Label* slow, | 1125 Label* slow, |
| 1106 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 1126 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
| 1107 | 1127 |
| 1128 // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. | |
| 1129 Register left = rdx; | |
| 1130 Register right = rax; | |
| 1131 | |
| 1108 // We only generate heapnumber answers for overflowing calculations | 1132 // We only generate heapnumber answers for overflowing calculations |
| 1109 // for the four basic arithmetic operations. | 1133 // for the four basic arithmetic operations and logical right shift by 0. |
| 1110 bool generate_inline_heapnumber_results = | 1134 bool generate_inline_heapnumber_results = |
| 1111 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && | 1135 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && |
| 1112 (op_ == Token::ADD || op_ == Token::SUB || | 1136 (op_ == Token::ADD || op_ == Token::SUB || |
| 1113 op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); | 1137 op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); |
| 1114 | 1138 |
| 1115 // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. | |
| 1116 Register left = rdx; | |
| 1117 Register right = rax; | |
| 1118 | |
| 1119 | |
| 1120 // Smi check of both operands. If op is BIT_OR, the check is delayed | 1139 // Smi check of both operands. If op is BIT_OR, the check is delayed |
| 1121 // until after the OR operation. | 1140 // until after the OR operation. |
| 1122 Label not_smis; | 1141 Label not_smis; |
| 1123 Label use_fp_on_smis; | 1142 Label use_fp_on_smis; |
| 1124 Label restore_MOD_registers; // Only used if op_ == Token::MOD. | 1143 Label fail; |
| 1125 | 1144 |
| 1126 if (op_ != Token::BIT_OR) { | 1145 if (op_ != Token::BIT_OR) { |
| 1127 Comment smi_check_comment(masm, "-- Smi check arguments"); | 1146 Comment smi_check_comment(masm, "-- Smi check arguments"); |
| 1128 __ JumpIfNotBothSmi(left, right, ¬_smis); | 1147 __ JumpIfNotBothSmi(left, right, ¬_smis); |
| 1129 } | 1148 } |
| 1130 | 1149 |
| 1150 Label smi_values; | |
| 1151 __ bind(&smi_values); | |
| 1131 // Perform the operation. | 1152 // Perform the operation. |
| 1132 Comment perform_smi(masm, "-- Perform smi operation"); | 1153 Comment perform_smi(masm, "-- Perform smi operation"); |
| 1133 switch (op_) { | 1154 switch (op_) { |
| 1134 case Token::ADD: | 1155 case Token::ADD: |
| 1135 ASSERT(right.is(rax)); | 1156 ASSERT(right.is(rax)); |
| 1136 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. | 1157 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. |
| 1137 break; | 1158 break; |
| 1138 | 1159 |
| 1139 case Token::SUB: | 1160 case Token::SUB: |
| 1140 __ SmiSub(left, left, right, &use_fp_on_smis); | 1161 __ SmiSub(left, left, right, &use_fp_on_smis); |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 1159 // SmiMod will not accept left in rdx or right in rax. | 1180 // SmiMod will not accept left in rdx or right in rax. |
| 1160 left = rcx; | 1181 left = rcx; |
| 1161 right = rbx; | 1182 right = rbx; |
| 1162 __ movq(rbx, rax); | 1183 __ movq(rbx, rax); |
| 1163 __ movq(rcx, rdx); | 1184 __ movq(rcx, rdx); |
| 1164 __ SmiMod(rax, left, right, &use_fp_on_smis); | 1185 __ SmiMod(rax, left, right, &use_fp_on_smis); |
| 1165 break; | 1186 break; |
| 1166 | 1187 |
| 1167 case Token::BIT_OR: { | 1188 case Token::BIT_OR: { |
| 1168 ASSERT(right.is(rax)); | 1189 ASSERT(right.is(rax)); |
| 1169 __ movq(rcx, right); // Save the right operand. | 1190 __ SmiOrIfSmis(right, right, left, ¬_smis); // BIT_OR is commutative. |
| 1170 __ SmiOr(right, right, left); // BIT_OR is commutative. | |
| 1171 __ JumpIfNotSmi(right, ¬_smis); // Test delayed until after BIT_OR. | |
| 1172 break; | 1191 break; |
| 1173 } | 1192 } |
| 1174 case Token::BIT_XOR: | 1193 case Token::BIT_XOR: |
| 1175 ASSERT(right.is(rax)); | 1194 ASSERT(right.is(rax)); |
| 1176 __ SmiXor(right, right, left); // BIT_XOR is commutative. | 1195 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
| 1177 break; | 1196 break; |
| 1178 | 1197 |
| 1179 case Token::BIT_AND: | 1198 case Token::BIT_AND: |
| 1180 ASSERT(right.is(rax)); | 1199 ASSERT(right.is(rax)); |
| 1181 __ SmiAnd(right, right, left); // BIT_AND is commutative. | 1200 __ SmiAnd(right, right, left); // BIT_AND is commutative. |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1226 case Token::ADD: __ addsd(xmm0, xmm1); break; | 1245 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 1227 case Token::SUB: __ subsd(xmm0, xmm1); break; | 1246 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 1228 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 1247 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 1229 case Token::DIV: __ divsd(xmm0, xmm1); break; | 1248 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 1230 default: UNREACHABLE(); | 1249 default: UNREACHABLE(); |
| 1231 } | 1250 } |
| 1232 } | 1251 } |
| 1233 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); | 1252 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
| 1234 __ movq(rax, rcx); | 1253 __ movq(rax, rcx); |
| 1235 __ ret(0); | 1254 __ ret(0); |
| 1255 } else { | |
| 1256 __ jmp(&fail); | |
| 1236 } | 1257 } |
| 1237 } | 1258 } |
| 1238 | 1259 |
| 1239 // 7. Non-smi operands reach the end of the code generated by | 1260 // 7. Non-smi operands reach the end of the code generated by |
| 1240 // GenerateSmiCode, and fall through to subsequent code, | 1261 // GenerateSmiCode, and fall through to subsequent code, |
| 1241 // with the operands in rdx and rax. | 1262 // with the operands in rdx and rax. |
| 1263 // But first we check if non-smi values are HeapNumbers holding | |
| 1264 // values that could be smi. | |
| 1265 __ bind(¬_smis); | |
| 1242 Comment done_comment(masm, "-- Enter non-smi code"); | 1266 Comment done_comment(masm, "-- Enter non-smi code"); |
| 1243 __ bind(¬_smis); | 1267 FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx, |
| 1244 if (op_ == Token::BIT_OR) { | 1268 &smi_values, &fail); |
| 1245 __ movq(right, rcx); | 1269 __ jmp(&smi_values); |
| 1246 } | 1270 __ bind(&fail); |
| 1247 } | 1271 } |
| 1248 | 1272 |
| 1249 | 1273 |
| 1250 void TypeRecordingBinaryOpStub::GenerateFloatingPointCode( | 1274 void TypeRecordingBinaryOpStub::GenerateFloatingPointCode( |
| 1251 MacroAssembler* masm, | 1275 MacroAssembler* masm, |
| 1252 Label* allocation_failure, | 1276 Label* allocation_failure, |
| 1253 Label* non_numeric_failure) { | 1277 Label* non_numeric_failure) { |
| 1254 switch (op_) { | 1278 switch (op_) { |
| 1255 case Token::ADD: | 1279 case Token::ADD: |
| 1256 case Token::SUB: | 1280 case Token::SUB: |
| (...skipping 801 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2058 __ cvtlsi2sd(xmm0, kScratchRegister); | 2082 __ cvtlsi2sd(xmm0, kScratchRegister); |
| 2059 __ JumpIfNotSmi(rax, &load_nonsmi_rax); | 2083 __ JumpIfNotSmi(rax, &load_nonsmi_rax); |
| 2060 | 2084 |
| 2061 __ bind(&load_smi_rax); | 2085 __ bind(&load_smi_rax); |
| 2062 __ SmiToInteger32(kScratchRegister, rax); | 2086 __ SmiToInteger32(kScratchRegister, rax); |
| 2063 __ cvtlsi2sd(xmm1, kScratchRegister); | 2087 __ cvtlsi2sd(xmm1, kScratchRegister); |
| 2064 __ bind(&done); | 2088 __ bind(&done); |
| 2065 } | 2089 } |
| 2066 | 2090 |
| 2067 | 2091 |
| 2092 void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, | |
| 2093 Register first, | |
| 2094 Register second, | |
| 2095 Register scratch1, | |
| 2096 Register scratch2, | |
| 2097 Register scratch3, | |
| 2098 Label* on_success, | |
| 2099 Label* on_not_smis) { | |
| 2100 Register heap_number_root = scratch3; | |
| 2101 Register smi_result = scratch1; | |
| 2102 __ LoadRoot(heap_number_root, Heap::kHeapNumberMapRootIndex); | |
|
William Hesse
2011/04/08 12:01:27
Shouldn't this be called heap_number_map?
Lasse Reichstein
2011/04/08 12:07:46
Absolutely.
| |
| 2103 | |
| 2104 NearLabel first_smi, check_second; | |
| 2105 __ JumpIfSmi(first, &first_smi); | |
| 2106 __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_root); | |
| 2107 __ j(not_equal, on_not_smis); | |
| 2108 // Convert HeapNumber to smi if possible. | |
| 2109 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); | |
| 2110 __ movq(scratch2, xmm0); | |
| 2111 __ cvttsd2siq(smi_result, xmm0); | |
| 2112 // Check if conversion was successful by converting back and | |
| 2113 // comparing to the original double's bits. | |
| 2114 __ cvtlsi2sd(xmm1, smi_result); | |
| 2115 __ movq(kScratchRegister, xmm1); | |
| 2116 __ cmpq(scratch2, kScratchRegister); | |
| 2117 __ j(not_equal, on_not_smis); | |
| 2118 __ Integer32ToSmi(first, smi_result); | |
| 2119 | |
| 2120 __ bind(&check_second); | |
| 2121 __ JumpIfSmi(second, on_success); | |
| 2122 __ bind(&first_smi); | |
| 2123 if (FLAG_debug_code) { | |
| 2124 // One of first or second should be non-smi if we get here. | |
|
William Hesse
2011/04/08 12:01:27
Second should be non-smi if we get here.
Lasse Reichstein
2011/04/08 12:07:46
Done.
| |
| 2125 __ AbortIfSmi(second); | |
| 2126 } | |
| 2127 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_root); | |
| 2128 __ j(not_equal, on_not_smis); | |
| 2129 // Convert second to smi, if possible. | |
| 2130 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); | |
| 2131 __ movq(scratch2, xmm0); | |
| 2132 __ cvttsd2siq(smi_result, xmm0); | |
| 2133 __ cvtlsi2sd(xmm1, smi_result); | |
| 2134 __ movq(kScratchRegister, xmm1); | |
| 2135 __ cmpq(scratch2, kScratchRegister); | |
| 2136 __ j(not_equal, on_not_smis); | |
| 2137 __ Integer32ToSmi(second, smi_result); | |
| 2138 } | |
| 2139 | |
| 2140 | |
| 2068 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { | 2141 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { |
| 2069 Label slow, done; | 2142 Label slow, done; |
| 2070 | 2143 |
| 2071 if (op_ == Token::SUB) { | 2144 if (op_ == Token::SUB) { |
| 2072 if (include_smi_code_) { | 2145 if (include_smi_code_) { |
| 2073 // Check whether the value is a smi. | 2146 // Check whether the value is a smi. |
| 2074 Label try_float; | 2147 Label try_float; |
| 2075 __ JumpIfNotSmi(rax, &try_float); | 2148 __ JumpIfNotSmi(rax, &try_float); |
| 2076 if (negative_zero_ == kIgnoreNegativeZero) { | 2149 if (negative_zero_ == kIgnoreNegativeZero) { |
| 2077 __ SmiCompare(rax, Smi::FromInt(0)); | 2150 __ SmiCompare(rax, Smi::FromInt(0)); |
| (...skipping 3064 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5142 // Do a tail call to the rewritten stub. | 5215 // Do a tail call to the rewritten stub. |
| 5143 __ jmp(rdi); | 5216 __ jmp(rdi); |
| 5144 } | 5217 } |
| 5145 | 5218 |
| 5146 | 5219 |
| 5147 #undef __ | 5220 #undef __ |
| 5148 | 5221 |
| 5149 } } // namespace v8::internal | 5222 } } // namespace v8::internal |
| 5150 | 5223 |
| 5151 #endif // V8_TARGET_ARCH_X64 | 5224 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |