Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 146022: X64: Addition binary operation. (Closed)
Patch Set: Addressed review comments (and updated from svn) Created 11 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/x64/assembler-x64-inl.h ('k') | src/x64/macro-assembler-x64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
136 FLAG_print_builtin_ast = true; 136 FLAG_print_builtin_ast = true;
137 #endif 137 #endif
138 FLAG_use_ic = false; 138 FLAG_use_ic = false;
139 139
140 Handle<JSFunction> test_function = Compiler::Compile( 140 Handle<JSFunction> test_function = Compiler::Compile(
141 Factory::NewStringFromAscii(CStrVector( 141 Factory::NewStringFromAscii(CStrVector(
142 "// Put all code in anonymous function to avoid global scope.\n" 142 "// Put all code in anonymous function to avoid global scope.\n"
143 "(function(){" 143 "(function(){"
144 " function test_if_then_else(x, y, z){" 144 " function test_if_then_else(x, y, z){"
145 " if (x) {" 145 " if (x) {"
146 " x = y;" 146 " x = y + 2;"
147 " } else {" 147 " } else {"
148 " x = z;" 148 " x = z + 2;"
149 " }" 149 " }"
150 " return x;" 150 " return x;"
151 " }" 151 " }"
152 "\n" 152 "\n"
153 " function test_recursion_with_base(x, y, z, w) {" 153 " function test_recursion_with_base(x, y, z, w) {"
154 " if (x) {" 154 " if (x) {"
155 " x = x;" 155 " x = x;"
156 " } else {" 156 " } else {"
157 " x = test_recursion_with_base(y, z, w, 0);" 157 " x = test_recursion_with_base(y, z, w, 0);"
158 " }" 158 " }"
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
204 204
205 bool pending_exceptions; 205 bool pending_exceptions;
206 Handle<Object> result = 206 Handle<Object> result =
207 Execution::Call(test_function, 207 Execution::Call(test_function,
208 Handle<Object>::cast(test_function), 208 Handle<Object>::cast(test_function),
209 0, 209 0,
210 NULL, 210 NULL,
211 &pending_exceptions); 211 &pending_exceptions);
212 // Function compiles and runs, but returns a JSFunction object. 212 // Function compiles and runs, but returns a JSFunction object.
213 CHECK(result->IsSmi()); 213 CHECK(result->IsSmi());
214 CHECK_EQ(47, Smi::cast(*result)->value()); 214 CHECK_EQ(49, Smi::cast(*result)->value());
215 } 215 }
216 216
217 217
218 void CodeGenerator::GenCode(FunctionLiteral* function) { 218 void CodeGenerator::GenCode(FunctionLiteral* function) {
219 // Record the position for debugging purposes. 219 // Record the position for debugging purposes.
220 CodeForFunctionPosition(function); 220 CodeForFunctionPosition(function);
221 ZoneList<Statement*>* body = function->body(); 221 ZoneList<Statement*>* body = function->body();
222 222
223 // Initialize state. 223 // Initialize state.
224 ASSERT(scope_ == NULL); 224 ASSERT(scope_ == NULL);
(...skipping 1120 matching lines...) Expand 10 before | Expand all | Expand 10 after
1345 1345
1346 1346
1347 void CodeGenerator::VisitUnaryOperation(UnaryOperation* a) { 1347 void CodeGenerator::VisitUnaryOperation(UnaryOperation* a) {
1348 UNIMPLEMENTED(); 1348 UNIMPLEMENTED();
1349 } 1349 }
1350 1350
1351 void CodeGenerator::VisitCountOperation(CountOperation* a) { 1351 void CodeGenerator::VisitCountOperation(CountOperation* a) {
1352 UNIMPLEMENTED(); 1352 UNIMPLEMENTED();
1353 } 1353 }
1354 1354
1355 void CodeGenerator::VisitBinaryOperation(BinaryOperation* a) { 1355 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
1356 UNIMPLEMENTED(); 1356 // TODO(X64): This code was copied verbatim from codegen-ia32.
1357 // Either find a reason to change it or move it to a shared location.
1358
1359 // Note that due to an optimization in comparison operations (typeof
1360 // compared to a string literal), we can evaluate a binary expression such
1361 // as AND or OR and not leave a value on the frame or in the cc register.
1362 Comment cmnt(masm_, "[ BinaryOperation");
1363 Token::Value op = node->op();
1364
1365 // According to ECMA-262 section 11.11, page 58, the binary logical
1366 // operators must yield the result of one of the two expressions
1367 // before any ToBoolean() conversions. This means that the value
1368 // produced by a && or || operator is not necessarily a boolean.
1369
1370 // NOTE: If the left hand side produces a materialized value (not
1371 // control flow), we force the right hand side to do the same. This
1372 // is necessary because we assume that if we get control flow on the
1373 // last path out of an expression we got it on all paths.
1374 if (op == Token::AND) {
1375 JumpTarget is_true;
1376 ControlDestination dest(&is_true, destination()->false_target(), true);
1377 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
1378
1379 if (dest.false_was_fall_through()) {
1380 // The current false target was used as the fall-through. If
1381 // there are no dangling jumps to is_true then the left
1382 // subexpression was unconditionally false. Otherwise we have
1383 // paths where we do have to evaluate the right subexpression.
1384 if (is_true.is_linked()) {
1385 // We need to compile the right subexpression. If the jump to
1386 // the current false target was a forward jump then we have a
1387 // valid frame, we have just bound the false target, and we
1388 // have to jump around the code for the right subexpression.
1389 if (has_valid_frame()) {
1390 destination()->false_target()->Unuse();
1391 destination()->false_target()->Jump();
1392 }
1393 is_true.Bind();
1394 // The left subexpression compiled to control flow, so the
1395 // right one is free to do so as well.
1396 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
1397 } else {
1398 // We have actually just jumped to or bound the current false
1399 // target but the current control destination is not marked as
1400 // used.
1401 destination()->Use(false);
1402 }
1403
1404 } else if (dest.is_used()) {
1405 // The left subexpression compiled to control flow (and is_true
1406 // was just bound), so the right is free to do so as well.
1407 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
1408
1409 } else {
1410 // We have a materialized value on the frame, so we exit with
1411 // one on all paths. There are possibly also jumps to is_true
1412 // from nested subexpressions.
1413 JumpTarget pop_and_continue;
1414 JumpTarget exit;
1415
1416 // Avoid popping the result if it converts to 'false' using the
1417 // standard ToBoolean() conversion as described in ECMA-262,
1418 // section 9.2, page 30.
1419 //
1420 // Duplicate the TOS value. The duplicate will be popped by
1421 // ToBoolean.
1422 frame_->Dup();
1423 ControlDestination dest(&pop_and_continue, &exit, true);
1424 ToBoolean(&dest);
1425
1426 // Pop the result of evaluating the first part.
1427 frame_->Drop();
1428
1429 // Compile right side expression.
1430 is_true.Bind();
1431 Load(node->right());
1432
1433 // Exit (always with a materialized value).
1434 exit.Bind();
1435 }
1436
1437 } else if (op == Token::OR) {
1438 JumpTarget is_false;
1439 ControlDestination dest(destination()->true_target(), &is_false, false);
1440 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
1441
1442 if (dest.true_was_fall_through()) {
1443 // The current true target was used as the fall-through. If
1444 // there are no dangling jumps to is_false then the left
1445 // subexpression was unconditionally true. Otherwise we have
1446 // paths where we do have to evaluate the right subexpression.
1447 if (is_false.is_linked()) {
1448 // We need to compile the right subexpression. If the jump to
1449 // the current true target was a forward jump then we have a
1450 // valid frame, we have just bound the true target, and we
1451 // have to jump around the code for the right subexpression.
1452 if (has_valid_frame()) {
1453 destination()->true_target()->Unuse();
1454 destination()->true_target()->Jump();
1455 }
1456 is_false.Bind();
1457 // The left subexpression compiled to control flow, so the
1458 // right one is free to do so as well.
1459 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
1460 } else {
1461 // We have just jumped to or bound the current true target but
1462 // the current control destination is not marked as used.
1463 destination()->Use(true);
1464 }
1465
1466 } else if (dest.is_used()) {
1467 // The left subexpression compiled to control flow (and is_false
1468 // was just bound), so the right is free to do so as well.
1469 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
1470
1471 } else {
1472 // We have a materialized value on the frame, so we exit with
1473 // one on all paths. There are possibly also jumps to is_false
1474 // from nested subexpressions.
1475 JumpTarget pop_and_continue;
1476 JumpTarget exit;
1477
1478 // Avoid popping the result if it converts to 'true' using the
1479 // standard ToBoolean() conversion as described in ECMA-262,
1480 // section 9.2, page 30.
1481 //
1482 // Duplicate the TOS value. The duplicate will be popped by
1483 // ToBoolean.
1484 frame_->Dup();
1485 ControlDestination dest(&exit, &pop_and_continue, false);
1486 ToBoolean(&dest);
1487
1488 // Pop the result of evaluating the first part.
1489 frame_->Drop();
1490
1491 // Compile right side expression.
1492 is_false.Bind();
1493 Load(node->right());
1494
1495 // Exit (always with a materialized value).
1496 exit.Bind();
1497 }
1498
1499 } else {
1500 // NOTE: The code below assumes that the slow cases (calls to runtime)
1501 // never return a constant/immutable object.
1502 OverwriteMode overwrite_mode = NO_OVERWRITE;
1503 if (node->left()->AsBinaryOperation() != NULL &&
1504 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
1505 overwrite_mode = OVERWRITE_LEFT;
1506 } else if (node->right()->AsBinaryOperation() != NULL &&
1507 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
1508 overwrite_mode = OVERWRITE_RIGHT;
1509 }
1510
1511 Load(node->left());
1512 Load(node->right());
1513 GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
1514 }
1357 } 1515 }
1358 1516
1517
1518
1359 void CodeGenerator::VisitCompareOperation(CompareOperation* a) { 1519 void CodeGenerator::VisitCompareOperation(CompareOperation* a) {
1360 UNIMPLEMENTED(); 1520 UNIMPLEMENTED();
1361 } 1521 }
1362 1522
1363 void CodeGenerator::VisitThisFunction(ThisFunction* a) { 1523 void CodeGenerator::VisitThisFunction(ThisFunction* a) {
1364 UNIMPLEMENTED(); 1524 UNIMPLEMENTED();
1365 } 1525 }
1366 1526
1367 void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) { 1527 void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
1368 UNIMPLEMENTED(); 1528 UNIMPLEMENTED();
(...skipping 571 matching lines...) Expand 10 before | Expand all | Expand 10 after
1940 2100
1941 2101
1942 void CodeGenerator::LoadGlobalReceiver() { 2102 void CodeGenerator::LoadGlobalReceiver() {
1943 Result temp = allocator_->Allocate(); 2103 Result temp = allocator_->Allocate();
1944 Register reg = temp.reg(); 2104 Register reg = temp.reg();
1945 __ movq(reg, GlobalObject()); 2105 __ movq(reg, GlobalObject());
1946 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset)); 2106 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
1947 frame_->Push(&temp); 2107 frame_->Push(&temp);
1948 } 2108 }
1949 2109
2110
2111 // Flag that indicates whether or not the code that handles smi arguments
2112 // should be placed in the stub, inlined, or omitted entirely.
2113 enum GenericBinaryFlags {
2114 SMI_CODE_IN_STUB,
2115 SMI_CODE_INLINED
2116 };
2117
2118
2119 class FloatingPointHelper : public AllStatic {
2120 public:
2121 // Code pattern for loading a floating point value. Input value must
2122 // be either a smi or a heap number object (fp value). Requirements:
2123 // operand in src register. Returns operand as floating point number
2124 // in XMM register
2125 static void LoadFloatOperand(MacroAssembler* masm,
2126 Register src,
2127 XMMRegister dst);
2128 // Code pattern for loading floating point values. Input values must
2129 // be either smi or heap number objects (fp values). Requirements:
2130 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
2131 // floating point numbers in XMM registers.
2132 static void LoadFloatOperands(MacroAssembler* masm,
2133 XMMRegister dst1,
2134 XMMRegister dst2);
2135
2136 // Code pattern for loading floating point values onto the fp stack.
2137 // Input values must be either smi or heap number objects (fp values).
2138 // Requirements:
2139 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
2140 // floating point numbers on fp stack.
2141 static void LoadFloatOperands(MacroAssembler* masm);
2142
2143 // Code pattern for loading a floating point value and converting it
2144 // to a 32 bit integer. Input value must be either a smi or a heap number
2145 // object.
2146 // Returns operands as 32-bit sign extended integers in a general purpose
2147 // registers.
2148 static void LoadInt32Operand(MacroAssembler* masm,
2149 const Operand& src,
2150 Register dst);
2151
2152 // Test if operands are smi or number objects (fp). Requirements:
2153 // operand_1 in eax, operand_2 in edx; falls through on float
2154 // operands, jumps to the non_float label otherwise.
2155 static void CheckFloatOperands(MacroAssembler* masm,
2156 Label* non_float);
2157 // Allocate a heap number in new space with undefined value.
2158 // Returns tagged pointer in result, or jumps to need_gc if new space is full.
2159 static void AllocateHeapNumber(MacroAssembler* masm,
2160 Label* need_gc,
2161 Register scratch,
2162 Register result);
2163 };
2164
2165
2166 class GenericBinaryOpStub: public CodeStub {
2167 public:
2168 GenericBinaryOpStub(Token::Value op,
2169 OverwriteMode mode,
2170 GenericBinaryFlags flags)
2171 : op_(op), mode_(mode), flags_(flags) {
2172 ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
2173 }
2174
2175 void GenerateSmiCode(MacroAssembler* masm, Label* slow);
2176
2177 private:
2178 Token::Value op_;
2179 OverwriteMode mode_;
2180 GenericBinaryFlags flags_;
2181
2182 const char* GetName();
2183
2184 #ifdef DEBUG
2185 void Print() {
2186 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
2187 Token::String(op_),
2188 static_cast<int>(mode_),
2189 static_cast<int>(flags_));
2190 }
2191 #endif
2192
2193 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
2194 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
2195 class OpBits: public BitField<Token::Value, 2, 13> {};
2196 class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
2197
2198 Major MajorKey() { return GenericBinaryOp; }
2199 int MinorKey() {
2200 // Encode the parameters in a unique 16 bit value.
2201 return OpBits::encode(op_)
2202 | ModeBits::encode(mode_)
2203 | FlagBits::encode(flags_);
2204 }
2205 void Generate(MacroAssembler* masm);
2206 };
2207
2208
2209 void CodeGenerator::GenericBinaryOperation(Token::Value op,
2210 SmiAnalysis* type,
2211 OverwriteMode overwrite_mode) {
2212 Comment cmnt(masm_, "[ BinaryOperation");
2213 Comment cmnt_token(masm_, Token::String(op));
2214
2215 if (op == Token::COMMA) {
2216 // Simply discard left value.
2217 frame_->Nip(1);
2218 return;
2219 }
2220
2221 // Set the flags based on the operation, type and loop nesting level.
2222 GenericBinaryFlags flags;
2223 switch (op) {
2224 case Token::BIT_OR:
2225 case Token::BIT_AND:
2226 case Token::BIT_XOR:
2227 case Token::SHL:
2228 case Token::SHR:
2229 case Token::SAR:
2230 // Bit operations always assume they likely operate on Smis. Still only
2231 // generate the inline Smi check code if this operation is part of a loop.
2232 flags = (loop_nesting() > 0)
2233 ? SMI_CODE_INLINED
2234 : SMI_CODE_IN_STUB;
2235 break;
2236
2237 default:
2238 // By default only inline the Smi check code for likely smis if this
2239 // operation is part of a loop.
2240 flags = ((loop_nesting() > 0) && type->IsLikelySmi())
2241 ? SMI_CODE_INLINED
2242 : SMI_CODE_IN_STUB;
2243 break;
2244 }
2245
2246 Result right = frame_->Pop();
2247 Result left = frame_->Pop();
2248
2249 if (op == Token::ADD) {
2250 bool left_is_string = left.static_type().is_jsstring();
2251 bool right_is_string = right.static_type().is_jsstring();
2252 if (left_is_string || right_is_string) {
2253 frame_->Push(&left);
2254 frame_->Push(&right);
2255 Result answer;
2256 if (left_is_string) {
2257 if (right_is_string) {
2258 // TODO(lrn): if (left.is_constant() && right.is_constant())
2259 // -- do a compile time cons, if allocation during codegen is allowed.
2260 answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
2261 } else {
2262 answer =
2263 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
2264 }
2265 } else if (right_is_string) {
2266 answer =
2267 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
2268 }
2269 answer.set_static_type(StaticType::jsstring());
2270 frame_->Push(&answer);
2271 return;
2272 }
2273 // Neither operand is known to be a string.
2274 }
2275
2276 bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
2277 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
2278 bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
2279 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
2280 bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
2281
2282 if (left_is_smi && right_is_smi) {
2283 // Compute the constant result at compile time, and leave it on the frame.
2284 int left_int = Smi::cast(*left.handle())->value();
2285 int right_int = Smi::cast(*right.handle())->value();
2286 if (FoldConstantSmis(op, left_int, right_int)) return;
2287 }
2288
2289 if (left_is_non_smi || right_is_non_smi) {
2290 // Set flag so that we go straight to the slow case, with no smi code.
2291 generate_no_smi_code = true;
2292 } else if (right_is_smi) {
2293 ConstantSmiBinaryOperation(op, &left, right.handle(),
2294 type, false, overwrite_mode);
2295 return;
2296 } else if (left_is_smi) {
2297 ConstantSmiBinaryOperation(op, &right, left.handle(),
2298 type, true, overwrite_mode);
2299 return;
2300 }
2301
2302 if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
2303 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
2304 } else {
2305 frame_->Push(&left);
2306 frame_->Push(&right);
2307 // If we know the arguments aren't smis, use the binary operation stub
2308 // that does not check for the fast smi case.
2309 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
2310 if (generate_no_smi_code) {
2311 flags = SMI_CODE_INLINED;
2312 }
2313 GenericBinaryOpStub stub(op, overwrite_mode, flags);
2314 Result answer = frame_->CallStub(&stub, 2);
2315 frame_->Push(&answer);
2316 }
2317 }
2318
2319
1950 // Emit a LoadIC call to get the value from receiver and leave it in 2320 // Emit a LoadIC call to get the value from receiver and leave it in
1951 // dst. The receiver register is restored after the call. 2321 // dst. The receiver register is restored after the call.
1952 class DeferredReferenceGetNamedValue: public DeferredCode { 2322 class DeferredReferenceGetNamedValue: public DeferredCode {
1953 public: 2323 public:
1954 DeferredReferenceGetNamedValue(Register dst, 2324 DeferredReferenceGetNamedValue(Register dst,
1955 Register receiver, 2325 Register receiver,
1956 Handle<String> name) 2326 Handle<String> name)
1957 : dst_(dst), receiver_(receiver), name_(name) { 2327 : dst_(dst), receiver_(receiver), name_(name) {
1958 set_comment("[ DeferredReferenceGetNamedValue"); 2328 set_comment("[ DeferredReferenceGetNamedValue");
1959 } 2329 }
(...skipping 25 matching lines...) Expand all
1985 // Here we use masm_-> instead of the __ macro because this is the 2355 // Here we use masm_-> instead of the __ macro because this is the
1986 // instruction that gets patched and coverage code gets in the way. 2356 // instruction that gets patched and coverage code gets in the way.
1987 masm_->testq(rax, Immediate(-delta_to_patch_site)); 2357 masm_->testq(rax, Immediate(-delta_to_patch_site));
1988 __ IncrementCounter(&Counters::named_load_inline_miss, 1); 2358 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
1989 2359
1990 if (!dst_.is(rax)) __ movq(dst_, rax); 2360 if (!dst_.is(rax)) __ movq(dst_, rax);
1991 __ pop(receiver_); 2361 __ pop(receiver_);
1992 } 2362 }
1993 2363
1994 2364
2365
2366
2367 // The result of src + value is in dst. It either overflowed or was not
2368 // smi tagged. Undo the speculative addition and call the appropriate
2369 // specialized stub for add. The result is left in dst.
2370 class DeferredInlineSmiAdd: public DeferredCode {
2371 public:
2372 DeferredInlineSmiAdd(Register dst,
2373 Smi* value,
2374 OverwriteMode overwrite_mode)
2375 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
2376 set_comment("[ DeferredInlineSmiAdd");
2377 }
2378
2379 virtual void Generate();
2380
2381 private:
2382 Register dst_;
2383 Smi* value_;
2384 OverwriteMode overwrite_mode_;
2385 };
2386
2387
2388 void DeferredInlineSmiAdd::Generate() {
2389 // Undo the optimistic add operation and call the shared stub.
2390 __ subq(dst_, Immediate(value_));
2391 __ push(dst_);
2392 __ push(Immediate(value_));
2393 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
2394 __ CallStub(&igostub);
2395 if (!dst_.is(rax)) __ movq(dst_, rax);
2396 }
2397
2398
2399 // The result of value + src is in dst. It either overflowed or was not
2400 // smi tagged. Undo the speculative addition and call the appropriate
2401 // specialized stub for add. The result is left in dst.
2402 class DeferredInlineSmiAddReversed: public DeferredCode {
2403 public:
2404 DeferredInlineSmiAddReversed(Register dst,
2405 Smi* value,
2406 OverwriteMode overwrite_mode)
2407 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
2408 set_comment("[ DeferredInlineSmiAddReversed");
2409 }
2410
2411 virtual void Generate();
2412
2413 private:
2414 Register dst_;
2415 Smi* value_;
2416 OverwriteMode overwrite_mode_;
2417 };
2418
2419
2420 void DeferredInlineSmiAddReversed::Generate() {
2421 // Undo the optimistic add operation and call the shared stub.
2422 __ subq(dst_, Immediate(value_));
2423 __ push(Immediate(value_));
2424 __ push(dst_);
2425 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
2426 __ CallStub(&igostub);
2427 if (!dst_.is(rax)) __ movq(dst_, rax);
2428 }
2429
2430
2431 // The result of src - value is in dst. It either overflowed or was not
2432 // smi tagged. Undo the speculative subtraction and call the
2433 // appropriate specialized stub for subtract. The result is left in
2434 // dst.
2435 class DeferredInlineSmiSub: public DeferredCode {
2436 public:
2437 DeferredInlineSmiSub(Register dst,
2438 Smi* value,
2439 OverwriteMode overwrite_mode)
2440 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
2441 set_comment("[ DeferredInlineSmiSub");
2442 }
2443
2444 virtual void Generate();
2445
2446 private:
2447 Register dst_;
2448 Smi* value_;
2449 OverwriteMode overwrite_mode_;
2450 };
2451
2452
2453 void DeferredInlineSmiSub::Generate() {
2454 // Undo the optimistic sub operation and call the shared stub.
2455 __ addq(dst_, Immediate(value_));
2456 __ push(dst_);
2457 __ push(Immediate(value_));
2458 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
2459 __ CallStub(&igostub);
2460 if (!dst_.is(rax)) __ movq(dst_, rax);
2461 }
2462
2463
2464 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
2465 Result* operand,
2466 Handle<Object> value,
2467 SmiAnalysis* type,
2468 bool reversed,
2469 OverwriteMode overwrite_mode) {
2470 // NOTE: This is an attempt to inline (a bit) more of the code for
2471 // some possible smi operations (like + and -) when (at least) one
2472 // of the operands is a constant smi.
2473 // Consumes the argument "operand".
2474
2475 // TODO(199): Optimize some special cases of operations involving a
2476 // smi literal (multiply by 2, shift by 0, etc.).
2477 if (IsUnsafeSmi(value)) {
2478 Result unsafe_operand(value);
2479 if (reversed) {
2480 LikelySmiBinaryOperation(op, &unsafe_operand, operand,
2481 overwrite_mode);
2482 } else {
2483 LikelySmiBinaryOperation(op, operand, &unsafe_operand,
2484 overwrite_mode);
2485 }
2486 ASSERT(!operand->is_valid());
2487 return;
2488 }
2489
2490 // Get the literal value.
2491 Smi* smi_value = Smi::cast(*value);
2492
2493 switch (op) {
2494 case Token::ADD: {
2495 operand->ToRegister();
2496 frame_->Spill(operand->reg());
2497
2498 // Optimistically add. Call the specialized add stub if the
2499 // result is not a smi or overflows.
2500 DeferredCode* deferred = NULL;
2501 if (reversed) {
2502 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
2503 smi_value,
2504 overwrite_mode);
2505 } else {
2506 deferred = new DeferredInlineSmiAdd(operand->reg(),
2507 smi_value,
2508 overwrite_mode);
2509 }
2510 __ movq(kScratchRegister, value, RelocInfo::NONE);
2511 __ addl(operand->reg(), kScratchRegister);
2512 deferred->Branch(overflow);
2513 __ testl(operand->reg(), Immediate(kSmiTagMask));
2514 deferred->Branch(not_zero);
2515 deferred->BindExit();
2516 frame_->Push(operand);
2517 break;
2518 }
2519 // TODO(X64): Move other implementations from ia32 to here.
2520 default: {
2521 Result constant_operand(value);
2522 if (reversed) {
2523 LikelySmiBinaryOperation(op, &constant_operand, operand,
2524 overwrite_mode);
2525 } else {
2526 LikelySmiBinaryOperation(op, operand, &constant_operand,
2527 overwrite_mode);
2528 }
2529 break;
2530 }
2531 }
2532 ASSERT(!operand->is_valid());
2533 }
2534
1995 #undef __ 2535 #undef __
1996 #define __ ACCESS_MASM(masm) 2536 #define __ ACCESS_MASM(masm)
1997 2537
1998 2538
1999 Handle<String> Reference::GetName() { 2539 Handle<String> Reference::GetName() {
2000 ASSERT(type_ == NAMED); 2540 ASSERT(type_ == NAMED);
2001 Property* property = expression_->AsProperty(); 2541 Property* property = expression_->AsProperty();
2002 if (property == NULL) { 2542 if (property == NULL) {
2003 // Global variable reference treated as a named property reference. 2543 // Global variable reference treated as a named property reference.
2004 VariableProxy* proxy = expression_->AsVariableProxy(); 2544 VariableProxy* proxy = expression_->AsVariableProxy();
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
2231 // Return 1/0 for true/false in rax. 2771 // Return 1/0 for true/false in rax.
2232 __ bind(&true_result); 2772 __ bind(&true_result);
2233 __ movq(rax, Immediate(1)); 2773 __ movq(rax, Immediate(1));
2234 __ ret(1 * kPointerSize); 2774 __ ret(1 * kPointerSize);
2235 __ bind(&false_result); 2775 __ bind(&false_result);
2236 __ xor_(rax, rax); 2776 __ xor_(rax, rax);
2237 __ ret(1 * kPointerSize); 2777 __ ret(1 * kPointerSize);
2238 } 2778 }
2239 2779
2240 2780
2241 // Flag that indicates whether or not the code that handles smi arguments
2242 // should be placed in the stub, inlined, or omitted entirely.
2243 enum GenericBinaryFlags {
2244 SMI_CODE_IN_STUB,
2245 SMI_CODE_INLINED
2246 };
2247 2781
2782 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
2783 return false; // UNIMPLEMENTED.
2784 }
2248 2785
2249 class GenericBinaryOpStub: public CodeStub { 2786 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
2250 public: 2787 Result* left,
2251 GenericBinaryOpStub(Token::Value op, 2788 Result* right,
2252 OverwriteMode mode, 2789 OverwriteMode overwrite_mode) {
2253 GenericBinaryFlags flags) 2790 UNIMPLEMENTED();
2254 : op_(op), mode_(mode), flags_(flags) {
2255 ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
2256 }
2257
2258 void GenerateSmiCode(MacroAssembler* masm, Label* slow);
2259
2260 private:
2261 Token::Value op_;
2262 OverwriteMode mode_;
2263 GenericBinaryFlags flags_;
2264
2265 const char* GetName();
2266
2267 #ifdef DEBUG
2268 void Print() {
2269 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
2270 Token::String(op_),
2271 static_cast<int>(mode_),
2272 static_cast<int>(flags_));
2273 }
2274 #endif
2275
2276 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
2277 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
2278 class OpBits: public BitField<Token::Value, 2, 13> {};
2279 class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
2280
2281 Major MajorKey() { return GenericBinaryOp; }
2282 int MinorKey() {
2283 // Encode the parameters in a unique 16 bit value.
2284 return OpBits::encode(op_)
2285 | ModeBits::encode(mode_)
2286 | FlagBits::encode(flags_);
2287 }
2288 void Generate(MacroAssembler* masm);
2289 };
2290
2291
2292 const char* GenericBinaryOpStub::GetName() {
2293 switch (op_) {
2294 case Token::ADD: return "GenericBinaryOpStub_ADD";
2295 case Token::SUB: return "GenericBinaryOpStub_SUB";
2296 case Token::MUL: return "GenericBinaryOpStub_MUL";
2297 case Token::DIV: return "GenericBinaryOpStub_DIV";
2298 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
2299 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
2300 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
2301 case Token::SAR: return "GenericBinaryOpStub_SAR";
2302 case Token::SHL: return "GenericBinaryOpStub_SHL";
2303 case Token::SHR: return "GenericBinaryOpStub_SHR";
2304 default: return "GenericBinaryOpStub";
2305 }
2306 } 2791 }
2307 2792
2308 2793
2309 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { 2794 // End of CodeGenerator implementation.
2310 // Perform fast-case smi code for the operation (rax <op> rbx) and
2311 // leave result in register rax.
2312
2313 // Prepare the smi check of both operands by or'ing them together
2314 // before checking against the smi mask.
2315 __ movq(rcx, rbx);
2316 __ or_(rcx, rax);
2317
2318 switch (op_) {
2319 case Token::ADD:
2320 __ addl(rax, rbx); // add optimistically
2321 __ j(overflow, slow);
2322 __ movsxlq(rax, rax); // Sign extend eax into rax.
2323 break;
2324
2325 case Token::SUB:
2326 __ subl(rax, rbx); // subtract optimistically
2327 __ j(overflow, slow);
2328 __ movsxlq(rax, rax); // Sign extend eax into rax.
2329 break;
2330
2331 case Token::DIV:
2332 case Token::MOD:
2333 // Sign extend rax into rdx:rax
2334 // (also sign extends eax into edx if eax is Smi).
2335 __ cqo();
2336 // Check for 0 divisor.
2337 __ testq(rbx, rbx);
2338 __ j(zero, slow);
2339 break;
2340
2341 default:
2342 // Fall-through to smi check.
2343 break;
2344 }
2345
2346 // Perform the actual smi check.
2347 ASSERT(kSmiTag == 0); // adjust zero check if not the case
2348 __ testl(rcx, Immediate(kSmiTagMask));
2349 __ j(not_zero, slow);
2350
2351 switch (op_) {
2352 case Token::ADD:
2353 case Token::SUB:
2354 // Do nothing here.
2355 break;
2356
2357 case Token::MUL:
2358 // If the smi tag is 0 we can just leave the tag on one operand.
2359 ASSERT(kSmiTag == 0); // adjust code below if not the case
2360 // Remove tag from one of the operands (but keep sign).
2361 __ sar(rax, Immediate(kSmiTagSize));
2362 // Do multiplication.
2363 __ imull(rax, rbx); // multiplication of smis; result in eax
2364 // Go slow on overflows.
2365 __ j(overflow, slow);
2366 // Check for negative zero result.
2367 __ movsxlq(rax, rax); // Sign extend eax into rax.
2368 __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y
2369 break;
2370
2371 case Token::DIV:
2372 // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
2373 __ idiv(rbx);
2374 // Check that the remainder is zero.
2375 __ testq(rdx, rdx);
2376 __ j(not_zero, slow);
2377 // Check for the corner case of dividing the most negative smi
2378 // by -1. We cannot use the overflow flag, since it is not set
2379 // by idiv instruction.
2380 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
2381 // TODO(X64): TODO(Smi): Smi implementation dependent constant.
2382 // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
2383 __ cmpq(rax, Immediate(0x40000000));
2384 __ j(equal, slow);
2385 // Check for negative zero result.
2386 __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y
2387 // Tag the result and store it in register rax.
2388 ASSERT(kSmiTagSize == kTimes2); // adjust code if not the case
2389 __ lea(rax, Operand(rax, rax, kTimes1, kSmiTag));
2390 break;
2391
2392 case Token::MOD:
2393 // Divide rdx:rax by rbx.
2394 __ idiv(rbx);
2395 // Check for negative zero result.
2396 __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y
2397 // Move remainder to register rax.
2398 __ movq(rax, rdx);
2399 break;
2400
2401 case Token::BIT_OR:
2402 __ or_(rax, rbx);
2403 break;
2404
2405 case Token::BIT_AND:
2406 __ and_(rax, rbx);
2407 break;
2408
2409 case Token::BIT_XOR:
2410 __ xor_(rax, rbx);
2411 break;
2412
2413 case Token::SHL:
2414 case Token::SHR:
2415 case Token::SAR:
2416 // Move the second operand into register ecx.
2417 __ movq(rcx, rbx);
2418 // Remove tags from operands (but keep sign).
2419 __ sar(rax, Immediate(kSmiTagSize));
2420 __ sar(rcx, Immediate(kSmiTagSize));
2421 // Perform the operation.
2422 switch (op_) {
2423 case Token::SAR:
2424 __ sar(rax);
2425 // No checks of result necessary
2426 break;
2427 case Token::SHR:
2428 __ shrl(rax); // ecx is implicit shift register
2429 // Check that the *unsigned* result fits in a smi.
2430 // Neither of the two high-order bits can be set:
2431 // - 0x80000000: high bit would be lost when smi tagging.
2432 // - 0x40000000: this number would convert to negative when
2433 // Smi tagging these two cases can only happen with shifts
2434 // by 0 or 1 when handed a valid smi.
2435 __ testq(rax, Immediate(0xc0000000));
2436 __ j(not_zero, slow);
2437 break;
2438 case Token::SHL:
2439 __ shll(rax);
2440 // TODO(Smi): Significant change if Smi changes.
2441 // Check that the *signed* result fits in a smi.
2442 // It does, if the 30th and 31st bits are equal, since then
2443 // shifting the SmiTag in at the bottom doesn't change the sign.
2444 ASSERT(kSmiTagSize == 1);
2445 __ cmpl(rax, Immediate(0xc0000000));
2446 __ j(sign, slow);
2447 __ movsxlq(rax, rax); // Extend new sign of eax into rax.
2448 break;
2449 default:
2450 UNREACHABLE();
2451 }
2452 // Tag the result and store it in register eax.
2453 ASSERT(kSmiTagSize == kTimes2); // adjust code if not the case
2454 __ lea(rax, Operand(rax, rax, kTimes1, kSmiTag));
2455 break;
2456
2457 default:
2458 UNREACHABLE();
2459 break;
2460 }
2461 }
2462
2463
2464 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
2465 }
2466
2467 2795
2468 void UnarySubStub::Generate(MacroAssembler* masm) { 2796 void UnarySubStub::Generate(MacroAssembler* masm) {
2797 UNIMPLEMENTED();
2469 } 2798 }
2470 2799
2471 class CompareStub: public CodeStub { 2800 class CompareStub: public CodeStub {
2472 public: 2801 public:
2473 CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } 2802 CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
2474 2803
2475 void Generate(MacroAssembler* masm); 2804 void Generate(MacroAssembler* masm);
2476 2805
2477 private: 2806 private:
2478 Condition cc_; 2807 Condition cc_;
(...skipping 547 matching lines...) Expand 10 before | Expand all | Expand 10 after
3026 __ pop(r14); 3355 __ pop(r14);
3027 __ pop(r13); 3356 __ pop(r13);
3028 __ pop(r12); 3357 __ pop(r12);
3029 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers 3358 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
3030 3359
3031 // Restore frame pointer and return. 3360 // Restore frame pointer and return.
3032 __ pop(rbp); 3361 __ pop(rbp);
3033 __ ret(0); 3362 __ ret(0);
3034 } 3363 }
3035 3364
3365
3366 // -----------------------------------------------------------------------------
3367 // Implementation of stubs.
3368
3369 // Stub classes have public member named masm, not masm_.
3370
3371
3372 void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
3373 Label* need_gc,
3374 Register scratch,
3375 Register result) {
3376 ExternalReference allocation_top =
3377 ExternalReference::new_space_allocation_top_address();
3378 ExternalReference allocation_limit =
3379 ExternalReference::new_space_allocation_limit_address();
3380 __ movq(scratch, allocation_top); // scratch: address of allocation top.
3381 __ movq(result, Operand(scratch, 0));
3382 __ addq(result, Immediate(HeapNumber::kSize)); // New top.
3383 __ movq(kScratchRegister, allocation_limit);
3384 __ cmpq(result, Operand(kScratchRegister, 0));
3385 __ j(above, need_gc);
3386
3387 __ movq(Operand(scratch, 0), result); // store new top
3388 __ addq(result, Immediate(kHeapObjectTag - HeapNumber::kSize));
3389 __ movq(kScratchRegister,
3390 Factory::heap_number_map(),
3391 RelocInfo::EMBEDDED_OBJECT);
3392 __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3393 // Tag old top and use as result.
3394 }
3395
3396
3397
3398 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
3399 Register src,
3400 XMMRegister dst) {
3401 Label load_smi, done;
3402
3403 __ testl(src, Immediate(kSmiTagMask));
3404 __ j(zero, &load_smi);
3405 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
3406 __ jmp(&done);
3407
3408 __ bind(&load_smi);
3409 __ sar(src, Immediate(kSmiTagSize));
3410 __ cvtlsi2sd(dst, src);
3411
3412 __ bind(&done);
3413 }
3414
3415
3416 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
3417 XMMRegister dst1,
3418 XMMRegister dst2) {
3419 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
3420 LoadFloatOperand(masm, kScratchRegister, dst1);
3421 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
3422 LoadFloatOperand(masm, kScratchRegister, dst2);
3423 }
3424
3425
3426 void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
3427 const Operand& src,
3428 Register dst) {
3429 // TODO(X64): Convert number operands to int32 values.
3430 // Don't convert a Smi to a double first.
3431 UNIMPLEMENTED();
3432 }
3433
3434
3435 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
3436 Label load_smi_1, load_smi_2, done_load_1, done;
3437 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
3438 __ testl(kScratchRegister, Immediate(kSmiTagMask));
3439 __ j(zero, &load_smi_1);
3440 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
3441 __ bind(&done_load_1);
3442
3443 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
3444 __ testl(kScratchRegister, Immediate(kSmiTagMask));
3445 __ j(zero, &load_smi_2);
3446 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
3447 __ jmp(&done);
3448
3449 __ bind(&load_smi_1);
3450 __ sar(kScratchRegister, Immediate(kSmiTagSize));
3451 __ push(kScratchRegister);
3452 __ fild_s(Operand(rsp, 0));
3453 __ pop(kScratchRegister);
3454 __ jmp(&done_load_1);
3455
3456 __ bind(&load_smi_2);
3457 __ sar(kScratchRegister, Immediate(kSmiTagSize));
3458 __ push(kScratchRegister);
3459 __ fild_s(Operand(rsp, 0));
3460 __ pop(kScratchRegister);
3461
3462 __ bind(&done);
3463 }
3464
3465
3466 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
3467 Label* non_float) {
3468 Label test_other, done;
3469 // Test if both operands are floats or smi -> scratch=k_is_float;
3470 // Otherwise scratch = k_not_float.
3471 __ testl(rdx, Immediate(kSmiTagMask));
3472 __ j(zero, &test_other); // argument in rdx is OK
3473 __ movq(kScratchRegister,
3474 Factory::heap_number_map(),
3475 RelocInfo::EMBEDDED_OBJECT);
3476 __ cmpq(kScratchRegister, FieldOperand(rdx, HeapObject::kMapOffset));
3477 __ j(not_equal, non_float); // argument in rdx is not a number -> NaN
3478
3479 __ bind(&test_other);
3480 __ testl(rax, Immediate(kSmiTagMask));
3481 __ j(zero, &done); // argument in eax is OK
3482 __ movq(kScratchRegister,
3483 Factory::heap_number_map(),
3484 RelocInfo::EMBEDDED_OBJECT);
3485 __ cmpq(kScratchRegister, FieldOperand(rax, HeapObject::kMapOffset));
3486 __ j(not_equal, non_float); // argument in rax is not a number -> NaN
3487
3488 // Fall-through: Both operands are numbers.
3489 __ bind(&done);
3490 }
3491
3492
3493 const char* GenericBinaryOpStub::GetName() {
3494 switch (op_) {
3495 case Token::ADD: return "GenericBinaryOpStub_ADD";
3496 case Token::SUB: return "GenericBinaryOpStub_SUB";
3497 case Token::MUL: return "GenericBinaryOpStub_MUL";
3498 case Token::DIV: return "GenericBinaryOpStub_DIV";
3499 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
3500 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
3501 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
3502 case Token::SAR: return "GenericBinaryOpStub_SAR";
3503 case Token::SHL: return "GenericBinaryOpStub_SHL";
3504 case Token::SHR: return "GenericBinaryOpStub_SHR";
3505 default: return "GenericBinaryOpStub";
3506 }
3507 }
3508
3509 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
3510 // Perform fast-case smi code for the operation (rax <op> rbx) and
3511 // leave result in register rax.
3512
3513 // Prepare the smi check of both operands by or'ing them together
3514 // before checking against the smi mask.
3515 __ movq(rcx, rbx);
3516 __ or_(rcx, rax);
3517
3518 switch (op_) {
3519 case Token::ADD:
3520 __ addl(rax, rbx); // add optimistically
3521 __ j(overflow, slow);
3522 __ movsxlq(rax, rax); // Sign extend eax into rax.
3523 break;
3524
3525 case Token::SUB:
3526 __ subl(rax, rbx); // subtract optimistically
3527 __ j(overflow, slow);
3528 __ movsxlq(rax, rax); // Sign extend eax into rax.
3529 break;
3530
3531 case Token::DIV:
3532 case Token::MOD:
3533 // Sign extend rax into rdx:rax
3534 // (also sign extends eax into edx if eax is Smi).
3535 __ cqo();
3536 // Check for 0 divisor.
3537 __ testq(rbx, rbx);
3538 __ j(zero, slow);
3539 break;
3540
3541 default:
3542 // Fall-through to smi check.
3543 break;
3544 }
3545
3546 // Perform the actual smi check.
3547 ASSERT(kSmiTag == 0); // adjust zero check if not the case
3548 __ testl(rcx, Immediate(kSmiTagMask));
3549 __ j(not_zero, slow);
3550
3551 switch (op_) {
3552 case Token::ADD:
3553 case Token::SUB:
3554 // Do nothing here.
3555 break;
3556
3557 case Token::MUL:
3558 // If the smi tag is 0 we can just leave the tag on one operand.
3559 ASSERT(kSmiTag == 0); // adjust code below if not the case
3560 // Remove tag from one of the operands (but keep sign).
3561 __ sar(rax, Immediate(kSmiTagSize));
3562 // Do multiplication.
3563 __ imull(rax, rbx); // multiplication of smis; result in eax
3564 // Go slow on overflows.
3565 __ j(overflow, slow);
3566 // Check for negative zero result.
3567 __ movsxlq(rax, rax); // Sign extend eax into rax.
3568 __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y
3569 break;
3570
3571 case Token::DIV:
3572 // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
3573 __ idiv(rbx);
3574 // Check that the remainder is zero.
3575 __ testq(rdx, rdx);
3576 __ j(not_zero, slow);
3577 // Check for the corner case of dividing the most negative smi
3578 // by -1. We cannot use the overflow flag, since it is not set
3579 // by idiv instruction.
3580 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3581 // TODO(X64): TODO(Smi): Smi implementation dependent constant.
3582 // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
3583 __ cmpq(rax, Immediate(0x40000000));
3584 __ j(equal, slow);
3585 // Check for negative zero result.
3586 __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y
3587 // Tag the result and store it in register rax.
3588 ASSERT(kSmiTagSize == kTimes2); // adjust code if not the case
3589 __ lea(rax, Operand(rax, rax, kTimes1, kSmiTag));
3590 break;
3591
3592 case Token::MOD:
3593 // Divide rdx:rax by rbx.
3594 __ idiv(rbx);
3595 // Check for negative zero result.
3596 __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y
3597 // Move remainder to register rax.
3598 __ movq(rax, rdx);
3599 break;
3600
3601 case Token::BIT_OR:
3602 __ or_(rax, rbx);
3603 break;
3604
3605 case Token::BIT_AND:
3606 __ and_(rax, rbx);
3607 break;
3608
3609 case Token::BIT_XOR:
3610 ASSERT_EQ(0, kSmiTag);
3611 __ xor_(rax, rbx);
3612 break;
3613
3614 case Token::SHL:
3615 case Token::SHR:
3616 case Token::SAR:
3617 // Move the second operand into register ecx.
3618 __ movq(rcx, rbx);
3619 // Remove tags from operands (but keep sign).
3620 __ sar(rax, Immediate(kSmiTagSize));
3621 __ sar(rcx, Immediate(kSmiTagSize));
3622 // Perform the operation.
3623 switch (op_) {
3624 case Token::SAR:
3625 __ sar(rax);
3626 // No checks of result necessary
3627 break;
3628 case Token::SHR:
3629 __ shrl(rax); // rcx is implicit shift register
3630 // Check that the *unsigned* result fits in a smi.
3631 // Neither of the two high-order bits can be set:
3632 // - 0x80000000: high bit would be lost when smi tagging.
3633 // - 0x40000000: this number would convert to negative when
3634 // Smi tagging these two cases can only happen with shifts
3635 // by 0 or 1 when handed a valid smi.
3636 __ testq(rax, Immediate(0xc0000000));
3637 __ j(not_zero, slow);
3638 break;
3639 case Token::SHL:
3640 __ shll(rax);
3641 // TODO(Smi): Significant change if Smi changes.
3642 // Check that the *signed* result fits in a smi.
3643 // It does, if the 30th and 31st bits are equal, since then
3644 // shifting the SmiTag in at the bottom doesn't change the sign.
3645 ASSERT(kSmiTagSize == 1);
3646 __ cmpl(rax, Immediate(0xc0000000));
3647 __ j(sign, slow);
3648 __ movsxlq(rax, rax); // Extend new sign of eax into rax.
3649 break;
3650 default:
3651 UNREACHABLE();
3652 }
3653 // Tag the result and store it in register eax.
3654 ASSERT(kSmiTagSize == kTimes2); // adjust code if not the case
3655 __ lea(rax, Operand(rax, rax, kTimes1, kSmiTag));
3656 break;
3657
3658 default:
3659 UNREACHABLE();
3660 break;
3661 }
3662 }
3663
3664
3665 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
3666 Label call_runtime;
3667
3668 if (flags_ == SMI_CODE_IN_STUB) {
3669 // The fast case smi code wasn't inlined in the stub caller
3670 // code. Generate it here to speed up common operations.
3671 Label slow;
3672 __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
3673 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
3674 GenerateSmiCode(masm, &slow);
3675 __ ret(2 * kPointerSize); // remove both operands
3676
3677 // Too bad. The fast case smi code didn't succeed.
3678 __ bind(&slow);
3679 }
3680
3681 // Setup registers.
3682 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // get y
3683 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // get x
3684
3685 // Floating point case.
3686 switch (op_) {
3687 case Token::ADD:
3688 case Token::SUB:
3689 case Token::MUL:
3690 case Token::DIV: {
3691 // rax: y
3692 // rdx: x
3693 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
3694 // Fast-case: Both operands are numbers.
3695 // Allocate a heap number, if needed.
3696 Label skip_allocation;
3697 switch (mode_) {
3698 case OVERWRITE_LEFT:
3699 __ movq(rax, rdx);
3700 // Fall through!
3701 case OVERWRITE_RIGHT:
3702 // If the argument in rax is already an object, we skip the
3703 // allocation of a heap number.
3704 __ testl(rax, Immediate(kSmiTagMask));
3705 __ j(not_zero, &skip_allocation);
3706 // Fall through!
3707 case NO_OVERWRITE:
3708 FloatingPointHelper::AllocateHeapNumber(masm,
3709 &call_runtime,
3710 rcx,
3711 rax);
3712 __ bind(&skip_allocation);
3713 break;
3714 default: UNREACHABLE();
3715 }
3716 // xmm4 and xmm5 are volatile XMM registers.
3717 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
3718
3719 switch (op_) {
3720 case Token::ADD: __ addsd(xmm4, xmm5); break;
3721 case Token::SUB: __ subsd(xmm4, xmm5); break;
3722 case Token::MUL: __ mulsd(xmm4, xmm5); break;
3723 case Token::DIV: __ divsd(xmm4, xmm5); break;
3724 default: UNREACHABLE();
3725 }
3726 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
3727 __ ret(2 * kPointerSize);
3728 }
3729 case Token::MOD: {
3730 // For MOD we go directly to runtime in the non-smi case.
3731 break;
3732 }
3733 case Token::BIT_OR:
3734 case Token::BIT_AND:
3735 case Token::BIT_XOR:
3736 case Token::SAR:
3737 case Token::SHL:
3738 case Token::SHR: {
3739 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
3740 // TODO(X64): Don't convert a Smi to float and then back to int32
3741 // afterwards.
3742 FloatingPointHelper::LoadFloatOperands(masm);
3743
3744 Label skip_allocation, non_smi_result, operand_conversion_failure;
3745
3746 // Reserve space for converted numbers.
3747 __ subq(rsp, Immediate(2 * kPointerSize));
3748
3749 bool use_sse3 = CpuFeatures::IsSupported(CpuFeatures::SSE3);
3750 if (use_sse3) {
3751 // Truncate the operands to 32-bit integers and check for
3752 // exceptions in doing so.
3753 CpuFeatures::Scope scope(CpuFeatures::SSE3);
3754 __ fisttp_s(Operand(rsp, 0 * kPointerSize));
3755 __ fisttp_s(Operand(rsp, 1 * kPointerSize));
3756 __ fnstsw_ax();
3757 __ testl(rax, Immediate(1));
3758 __ j(not_zero, &operand_conversion_failure);
3759 } else {
3760 // Check if right operand is int32.
3761 __ fist_s(Operand(rsp, 0 * kPointerSize));
3762 __ fild_s(Operand(rsp, 0 * kPointerSize));
3763 __ fucompp();
3764 __ fnstsw_ax();
3765 __ sahf(); // TODO(X64): Not available.
3766 __ j(not_zero, &operand_conversion_failure);
3767 __ j(parity_even, &operand_conversion_failure);
3768
3769 // Check if left operand is int32.
3770 __ fist_s(Operand(rsp, 1 * kPointerSize));
3771 __ fild_s(Operand(rsp, 1 * kPointerSize));
3772 __ fucompp();
3773 __ fnstsw_ax();
3774 __ sahf(); // TODO(X64): Not available. Test bits in ax directly
3775 __ j(not_zero, &operand_conversion_failure);
3776 __ j(parity_even, &operand_conversion_failure);
3777 }
3778
3779 // Get int32 operands and perform bitop.
3780 __ pop(rcx);
3781 __ pop(rax);
3782 switch (op_) {
3783 case Token::BIT_OR: __ or_(rax, rcx); break;
3784 case Token::BIT_AND: __ and_(rax, rcx); break;
3785 case Token::BIT_XOR: __ xor_(rax, rcx); break;
3786 case Token::SAR: __ sar(rax); break;
3787 case Token::SHL: __ shl(rax); break;
3788 case Token::SHR: __ shr(rax); break;
3789 default: UNREACHABLE();
3790 }
3791 if (op_ == Token::SHR) {
3792 // Check if result is non-negative and fits in a smi.
3793 __ testl(rax, Immediate(0xc0000000));
3794 __ j(not_zero, &non_smi_result);
3795 } else {
3796 // Check if result fits in a smi.
3797 __ cmpl(rax, Immediate(0xc0000000));
3798 __ j(negative, &non_smi_result);
3799 }
3800 // Tag smi result and return.
3801 ASSERT(kSmiTagSize == kTimes2); // adjust code if not the case
3802 __ lea(rax, Operand(rax, rax, kTimes1, kSmiTag));
3803 __ ret(2 * kPointerSize);
3804
3805 // All ops except SHR return a signed int32 that we load in a HeapNumber.
3806 if (op_ != Token::SHR) {
3807 __ bind(&non_smi_result);
3808 // Allocate a heap number if needed.
3809 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
3810 switch (mode_) {
3811 case OVERWRITE_LEFT:
3812 case OVERWRITE_RIGHT:
3813 // If the operand was an object, we skip the
3814 // allocation of a heap number.
3815 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
3816 1 * kPointerSize : 2 * kPointerSize));
3817 __ testl(rax, Immediate(kSmiTagMask));
3818 __ j(not_zero, &skip_allocation);
3819 // Fall through!
3820 case NO_OVERWRITE:
3821 FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
3822 rcx, rax);
3823 __ bind(&skip_allocation);
3824 break;
3825 default: UNREACHABLE();
3826 }
3827 // Store the result in the HeapNumber and return.
3828 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
3829 __ fild_s(Operand(rsp, 1 * kPointerSize));
3830 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
3831 __ ret(2 * kPointerSize);
3832 }
3833
3834 // Clear the FPU exception flag and reset the stack before calling
3835 // the runtime system.
3836 __ bind(&operand_conversion_failure);
3837 __ addq(rsp, Immediate(2 * kPointerSize));
3838 if (use_sse3) {
3839 // If we've used the SSE3 instructions for truncating the
3840 // floating point values to integers and it failed, we have a
3841 // pending #IA exception. Clear it.
3842 __ fnclex();
3843 } else {
3844 // The non-SSE3 variant does early bailout if the right
3845 // operand isn't a 32-bit integer, so we may have a single
3846 // value on the FPU stack we need to get rid of.
3847 __ ffree(0);
3848 }
3849
3850 // SHR should return uint32 - go to runtime for non-smi/negative result.
3851 if (op_ == Token::SHR) {
3852 __ bind(&non_smi_result);
3853 }
3854 __ movq(rax, Operand(rsp, 1 * kPointerSize));
3855 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
3856 break;
3857 }
3858 default: UNREACHABLE(); break;
3859 }
3860
3861 // If all else fails, use the runtime system to get the correct
3862 // result.
3863 __ bind(&call_runtime);
3864 // Disable builtin-calls until JS builtins can compile and run.
3865 __ Abort("Disabled until builtins compile and run.");
3866 switch (op_) {
3867 case Token::ADD:
3868 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3869 break;
3870 case Token::SUB:
3871 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3872 break;
3873 case Token::MUL:
3874 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3875 break;
3876 case Token::DIV:
3877 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3878 break;
3879 case Token::MOD:
3880 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3881 break;
3882 case Token::BIT_OR:
3883 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3884 break;
3885 case Token::BIT_AND:
3886 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3887 break;
3888 case Token::BIT_XOR:
3889 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3890 break;
3891 case Token::SAR:
3892 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3893 break;
3894 case Token::SHL:
3895 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3896 break;
3897 case Token::SHR:
3898 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3899 break;
3900 default:
3901 UNREACHABLE();
3902 }
3903 }
3904
3905
3036 #undef __ 3906 #undef __
3037 3907
3038 } } // namespace v8::internal 3908 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/x64/assembler-x64-inl.h ('k') | src/x64/macro-assembler-x64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698