Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(368)

Side by Side Diff: src/codegen-ia32.cc

Issue 6075: Move code for code generator static member functions, code generation... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 12 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/codegen-arm.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 714 matching lines...) Expand 10 before | Expand all | Expand 10 after
725 } 725 }
726 } 726 }
727 727
728 // Code generation state must be reset. 728 // Code generation state must be reset.
729 scope_ = NULL; 729 scope_ = NULL;
730 ASSERT(!has_cc()); 730 ASSERT(!has_cc());
731 ASSERT(state_ == NULL); 731 ASSERT(state_ == NULL);
732 } 732 }
733 733
734 734
735 #undef __
736 #define __ masm->
737
738 Operand Ia32CodeGenerator::SlotOperand(CodeGenerator* cgen,
739 Slot* slot,
740 Register tmp) {
741 // Currently, this assertion will fail if we try to assign to
742 // a constant variable that is constant because it is read-only
743 // (such as the variable referring to a named function expression).
744 // We need to implement assignments to read-only variables.
745 // Ideally, we should do this during AST generation (by converting
746 // such assignments into expression statements); however, in general
747 // we may not be able to make the decision until past AST generation,
748 // that is when the entire program is known.
749 ASSERT(slot != NULL);
750 int index = slot->index();
751 switch (slot->type()) {
752 case Slot::PARAMETER: return ParameterOperand(cgen, index);
753
754 case Slot::LOCAL: {
755 ASSERT(0 <= index && index < cgen->scope()->num_stack_slots());
756 const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
757 return Operand(ebp, kLocal0Offset - index * kPointerSize);
758 }
759
760 case Slot::CONTEXT: {
761 MacroAssembler* masm = cgen->masm();
762 // Follow the context chain if necessary.
763 ASSERT(!tmp.is(esi)); // do not overwrite context register
764 Register context = esi;
765 int chain_length =
766 cgen->scope()->ContextChainLength(slot->var()->scope());
767 for (int i = chain_length; i-- > 0;) {
768 // Load the closure.
769 // (All contexts, even 'with' contexts, have a closure,
770 // and it is the same for all contexts inside a function.
771 // There is no need to go to the function context first.)
772 __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
773 // Load the function context (which is the incoming, outer context).
774 __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
775 context = tmp;
776 }
777 // We may have a 'with' context now. Get the function context.
778 // (In fact this mov may never be the needed, since the scope analysis
779 // may not permit a direct context access in this case and thus we are
780 // always at a function context. However it is safe to dereference be-
781 // cause the function context of a function context is itself. Before
782 // deleting this mov we should try to create a counter-example first,
783 // though...)
784 __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
785 return ContextOperand(tmp, index);
786 }
787
788 default:
789 UNREACHABLE();
790 return Operand(eax);
791 }
792 }
793
794
795 #undef __
796 #define __ masm_->
797
798 // Loads a value on TOS. If it is a boolean value, the result may have been 735 // Loads a value on TOS. If it is a boolean value, the result may have been
799 // (partially) translated into branches, or it may have set the condition code 736 // (partially) translated into branches, or it may have set the condition code
800 // register. If force_cc is set, the value is forced to set the condition code 737 // register. If force_cc is set, the value is forced to set the condition code
801 // register and no value is pushed. If the condition code register was set, 738 // register and no value is pushed. If the condition code register was set,
802 // has_cc() is true and cc_reg_ contains the condition to test for 'true'. 739 // has_cc() is true and cc_reg_ contains the condition to test for 'true'.
803 void Ia32CodeGenerator::LoadCondition(Expression* x, 740 void Ia32CodeGenerator::LoadCondition(Expression* x,
804 CodeGenState::AccessType access, 741 CodeGenState::AccessType access,
805 Label* true_target, 742 Label* true_target,
806 Label* false_target, 743 Label* false_target,
807 bool force_cc) { 744 bool force_cc) {
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
952 __ pop(eax); 889 __ pop(eax);
953 __ mov(TOS, eax); 890 __ mov(TOS, eax);
954 } else { 891 } else {
955 __ pop(eax); 892 __ pop(eax);
956 __ add(Operand(esp), Immediate(size * kPointerSize)); 893 __ add(Operand(esp), Immediate(size * kPointerSize));
957 __ push(eax); 894 __ push(eax);
958 } 895 }
959 } 896 }
960 897
961 898
962 #undef __
963 #define __ masm->
964
965 void Property::GenerateStoreCode(CodeGenerator* cgen,
966 Reference* ref,
967 InitState init_state) {
968 MacroAssembler* masm = cgen->masm();
969 Comment cmnt(masm, "[ Store to Property");
970 __ RecordPosition(position());
971 Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key());
972 }
973
974
975 void VariableProxy::GenerateStoreCode(CodeGenerator* cgen,
976 Reference* ref,
977 InitState init_state) {
978 MacroAssembler* masm = cgen->masm();
979 Comment cmnt(masm, "[ Store to VariableProxy");
980 Variable* node = var();
981
982 Expression* expr = node->rewrite();
983 if (expr != NULL) {
984 expr->GenerateStoreCode(cgen, ref, init_state);
985 } else {
986 ASSERT(node->is_global());
987 if (node->AsProperty() != NULL) {
988 __ RecordPosition(node->AsProperty()->position());
989 }
990 Expression* key = new Literal(node->name());
991 Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key);
992 }
993 }
994
995
996 void Slot::GenerateStoreCode(CodeGenerator* cgen,
997 Reference* ref,
998 InitState init_state) {
999 MacroAssembler* masm = cgen->masm();
1000 Comment cmnt(masm, "[ Store to Slot");
1001
1002 if (type() == Slot::LOOKUP) {
1003 ASSERT(var()->mode() == Variable::DYNAMIC);
1004
1005 // For now, just do a runtime call.
1006 __ push(esi);
1007 __ push(Immediate(var()->name()));
1008
1009 if (init_state == CONST_INIT) {
1010 // Same as the case for a normal store, but ignores attribute
1011 // (e.g. READ_ONLY) of context slot so that we can initialize const
1012 // properties (introduced via eval("const foo = (some expr);")). Also,
1013 // uses the current function context instead of the top context.
1014 //
1015 // Note that we must declare the foo upon entry of eval(), via a
1016 // context slot declaration, but we cannot initialize it at the same
1017 // time, because the const declaration may be at the end of the eval
1018 // code (sigh...) and the const variable may have been used before
1019 // (where its value is 'undefined'). Thus, we can only do the
1020 // initialization when we actually encounter the expression and when
1021 // the expression operands are defined and valid, and thus we need the
1022 // split into 2 operations: declaration of the context slot followed
1023 // by initialization.
1024 __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
1025 } else {
1026 __ CallRuntime(Runtime::kStoreContextSlot, 3);
1027 }
1028 // Storing a variable must keep the (new) value on the expression
1029 // stack. This is necessary for compiling assignment expressions.
1030 __ push(eax);
1031
1032 } else {
1033 ASSERT(var()->mode() != Variable::DYNAMIC);
1034
1035 Label exit;
1036 if (init_state == CONST_INIT) {
1037 ASSERT(var()->mode() == Variable::CONST);
1038 // Only the first const initialization must be executed (the slot
1039 // still contains 'the hole' value). When the assignment is executed,
1040 // the code is identical to a normal store (see below).
1041 Comment cmnt(masm, "[ Init const");
1042 __ mov(eax, Ia32CodeGenerator::SlotOperand(cgen, this, ecx));
1043 __ cmp(eax, Factory::the_hole_value());
1044 __ j(not_equal, &exit);
1045 }
1046
1047 // We must execute the store.
1048 // Storing a variable must keep the (new) value on the stack. This is
1049 // necessary for compiling assignment expressions. ecx may be loaded
1050 // with context; used below in RecordWrite.
1051 //
1052 // Note: We will reach here even with node->var()->mode() ==
1053 // Variable::CONST because of const declarations which will initialize
1054 // consts to 'the hole' value and by doing so, end up calling this
1055 // code.
1056 __ pop(eax);
1057 __ mov(Ia32CodeGenerator::SlotOperand(cgen, this, ecx), eax);
1058 __ push(eax); // RecordWrite may destroy the value in eax.
1059 if (type() == Slot::CONTEXT) {
1060 // ecx is loaded with context when calling SlotOperand above.
1061 int offset = FixedArray::kHeaderSize + index() * kPointerSize;
1062 __ RecordWrite(ecx, offset, eax, ebx);
1063 }
1064 // If we definitely did not jump over the assignment, we do not need to
1065 // bind the exit label. Doing so can defeat peephole optimization.
1066 if (init_state == CONST_INIT) __ bind(&exit);
1067 }
1068 }
1069
1070
1071 class ToBooleanStub: public CodeStub { 899 class ToBooleanStub: public CodeStub {
1072 public: 900 public:
1073 ToBooleanStub() { } 901 ToBooleanStub() { }
1074 902
1075 void Generate(MacroAssembler* masm); 903 void Generate(MacroAssembler* masm);
1076 904
1077 private: 905 private:
1078 906
1079 Major MajorKey() { return ToBoolean; } 907 Major MajorKey() { return ToBoolean; }
1080 908
1081 int MinorKey() { return 0; } 909 int MinorKey() { return 0; }
1082 910
1083 const char* GetName() { return "ToBooleanStub"; } 911 const char* GetName() { return "ToBooleanStub"; }
1084 912
1085 #ifdef DEBUG 913 #ifdef DEBUG
1086 void Print() { 914 void Print() {
1087 PrintF("ToBooleanStub\n"); 915 PrintF("ToBooleanStub\n");
1088 } 916 }
1089 #endif 917 #endif
1090 }; 918 };
1091 919
1092 920
1093 // NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
1094 void ToBooleanStub::Generate(MacroAssembler* masm) {
1095 Label false_result, true_result, not_string;
1096 __ mov(eax, Operand(esp, 1 * kPointerSize));
1097
1098 // 'null' => false.
1099 __ cmp(eax, Factory::null_value());
1100 __ j(equal, &false_result);
1101
1102 // Get the map and type of the heap object.
1103 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
1104 __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
1105
1106 // Undetectable => false.
1107 __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
1108 __ and_(ebx, 1 << Map::kIsUndetectable);
1109 __ j(not_zero, &false_result);
1110
1111 // JavaScript object => true.
1112 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
1113 __ j(above_equal, &true_result);
1114
1115 // String value => false iff empty.
1116 __ cmp(ecx, FIRST_NONSTRING_TYPE);
1117 __ j(above_equal, &not_string);
1118 __ and_(ecx, kStringSizeMask);
1119 __ cmp(ecx, kShortStringTag);
1120 __ j(not_equal, &true_result); // Empty string is always short.
1121 __ mov(edx, FieldOperand(eax, String::kLengthOffset));
1122 __ shr(edx, String::kShortLengthShift);
1123 __ j(zero, &false_result);
1124 __ jmp(&true_result);
1125
1126 __ bind(&not_string);
1127 // HeapNumber => false iff +0, -0, or NaN.
1128 __ cmp(edx, Factory::heap_number_map());
1129 __ j(not_equal, &true_result);
1130 __ fldz();
1131 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
1132 __ fucompp();
1133 __ push(eax);
1134 __ fnstsw_ax();
1135 __ sahf();
1136 __ pop(eax);
1137 __ j(zero, &false_result);
1138 // Fall through to |true_result|.
1139
1140 // Return 1/0 for true/false in eax.
1141 __ bind(&true_result);
1142 __ mov(eax, 1);
1143 __ ret(1 * kPointerSize);
1144 __ bind(&false_result);
1145 __ mov(eax, 0);
1146 __ ret(1 * kPointerSize);
1147 }
1148
1149
1150 #undef __
1151 #define __ masm_->
1152
1153 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and 921 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
1154 // convert it to a boolean in the condition code register or jump to 922 // convert it to a boolean in the condition code register or jump to
1155 // 'false_target'/'true_target' as appropriate. 923 // 'false_target'/'true_target' as appropriate.
1156 void Ia32CodeGenerator::ToBoolean(Label* true_target, Label* false_target) { 924 void Ia32CodeGenerator::ToBoolean(Label* true_target, Label* false_target) {
1157 Comment cmnt(masm_, "[ ToBoolean"); 925 Comment cmnt(masm_, "[ ToBoolean");
1158 926
1159 // The value to convert should be popped from the stack. 927 // The value to convert should be popped from the stack.
1160 __ pop(eax); 928 __ pop(eax);
1161 929
1162 // Fast case checks. 930 // Fast case checks.
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
1227 ASSERT(var->is_global()); 995 ASSERT(var->is_global());
1228 __ call(ic, RelocInfo::CODE_TARGET_CONTEXT); 996 __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
1229 } else { 997 } else {
1230 __ call(ic, RelocInfo::CODE_TARGET); 998 __ call(ic, RelocInfo::CODE_TARGET);
1231 } 999 }
1232 } 1000 }
1233 __ push(eax); // IC call leaves result in eax, push it out 1001 __ push(eax); // IC call leaves result in eax, push it out
1234 } 1002 }
1235 1003
1236 1004
1237 #undef __
1238 #define __ masm->
1239
1240 void Ia32CodeGenerator::SetReferenceProperty(CodeGenerator* cgen,
1241 Reference* ref,
1242 Expression* key) {
1243 ASSERT(!ref->is_illegal());
1244 MacroAssembler* masm = cgen->masm();
1245
1246 if (ref->type() == Reference::NAMED) {
1247 // Compute the name of the property.
1248 Literal* literal = key->AsLiteral();
1249 Handle<String> name(String::cast(*literal->handle()));
1250
1251 // Call the appropriate IC code.
1252 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
1253 // TODO(1222589): Make the IC grab the values from the stack.
1254 __ pop(eax);
1255 // Setup the name register.
1256 __ Set(ecx, Immediate(name));
1257 __ call(ic, RelocInfo::CODE_TARGET);
1258 } else {
1259 // Access keyed property.
1260 ASSERT(ref->type() == Reference::KEYED);
1261
1262 // Call IC code.
1263 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
1264 // TODO(1222589): Make the IC grab the values from the stack.
1265 __ pop(eax);
1266 __ call(ic, RelocInfo::CODE_TARGET);
1267 }
1268 __ push(eax); // IC call leaves result in eax, push it out
1269 }
1270
1271
1272 class FloatingPointHelper : public AllStatic { 1005 class FloatingPointHelper : public AllStatic {
1273 public: 1006 public:
1274 // Code pattern for loading floating point values. Input values must 1007 // Code pattern for loading floating point values. Input values must
1275 // be either smi or heap number objects (fp values). Requirements: 1008 // be either smi or heap number objects (fp values). Requirements:
1276 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as 1009 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
1277 // floating point numbers on FPU stack. 1010 // floating point numbers on FPU stack.
1278 static void LoadFloatOperands(MacroAssembler* masm, Register scratch); 1011 static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
1279 // Test if operands are smi or number objects (fp). Requirements: 1012 // Test if operands are smi or number objects (fp). Requirements:
1280 // operand_1 in eax, operand_2 in edx; falls through on float 1013 // operand_1 in eax, operand_2 in edx; falls through on float
1281 // operands, jumps to the non_float label otherwise. 1014 // operands, jumps to the non_float label otherwise.
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1334 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; 1067 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
1335 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; 1068 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
1336 case Token::SAR: return "GenericBinaryOpStub_SAR"; 1069 case Token::SAR: return "GenericBinaryOpStub_SAR";
1337 case Token::SHL: return "GenericBinaryOpStub_SHL"; 1070 case Token::SHL: return "GenericBinaryOpStub_SHL";
1338 case Token::SHR: return "GenericBinaryOpStub_SHR"; 1071 case Token::SHR: return "GenericBinaryOpStub_SHR";
1339 default: return "GenericBinaryOpStub"; 1072 default: return "GenericBinaryOpStub";
1340 } 1073 }
1341 } 1074 }
1342 1075
1343 1076
1344 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
1345 Label call_runtime;
1346 __ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y.
1347 __ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x.
1348
1349 // 1. Smi case.
1350 switch (op_) {
1351 case Token::ADD: {
1352 // eax: y.
1353 // edx: x.
1354 Label revert;
1355 __ mov(ecx, Operand(eax));
1356 __ or_(ecx, Operand(edx)); // ecx = x | y.
1357 __ add(eax, Operand(edx)); // Add y optimistically.
1358 // Go slow-path in case of overflow.
1359 __ j(overflow, &revert, not_taken);
1360 // Go slow-path in case of non-smi operands.
1361 ASSERT(kSmiTag == 0); // adjust code below
1362 __ test(ecx, Immediate(kSmiTagMask));
1363 __ j(not_zero, &revert, not_taken);
1364 __ ret(2 * kPointerSize); // Remove all operands.
1365
1366 // Revert optimistic add.
1367 __ bind(&revert);
1368 __ sub(eax, Operand(edx));
1369 break;
1370 }
1371 case Token::SUB: {
1372 // eax: y.
1373 // edx: x.
1374 Label revert;
1375 __ mov(ecx, Operand(edx));
1376 __ or_(ecx, Operand(eax)); // ecx = x | y.
1377 __ sub(edx, Operand(eax)); // Subtract y optimistically.
1378 // Go slow-path in case of overflow.
1379 __ j(overflow, &revert, not_taken);
1380 // Go slow-path in case of non-smi operands.
1381 ASSERT(kSmiTag == 0); // adjust code below
1382 __ test(ecx, Immediate(kSmiTagMask));
1383 __ j(not_zero, &revert, not_taken);
1384 __ mov(eax, Operand(edx));
1385 __ ret(2 * kPointerSize); // Remove all operands.
1386
1387 // Revert optimistic sub.
1388 __ bind(&revert);
1389 __ add(edx, Operand(eax));
1390 break;
1391 }
1392 case Token::MUL: {
1393 // eax: y
1394 // edx: x
1395 // a) both operands smi and result fits into a smi -> return.
1396 // b) at least one of operands non-smi -> non_smi_operands.
1397 // c) result does not fit in a smi -> non_smi_result.
1398 Label non_smi_operands, non_smi_result;
1399 // Tag check.
1400 __ mov(ecx, Operand(edx));
1401 __ or_(ecx, Operand(eax)); // ecx = x | y.
1402 ASSERT(kSmiTag == 0); // Adjust code below.
1403 __ test(ecx, Immediate(kSmiTagMask));
1404 // Jump if not both smi; check if float numbers.
1405 __ j(not_zero, &non_smi_operands, not_taken);
1406
1407 // Get copies of operands.
1408 __ mov(ebx, Operand(eax));
1409 __ mov(ecx, Operand(edx));
1410 // If the smi tag is 0 we can just leave the tag on one operand.
1411 ASSERT(kSmiTag == 0); // adjust code below
1412 // Remove tag from one of the operands (but keep sign).
1413 __ sar(ecx, kSmiTagSize);
1414 // Do multiplication.
1415 __ imul(eax, Operand(ecx)); // Multiplication of Smis; result in eax.
1416 // Go slow on overflows.
1417 __ j(overflow, &non_smi_result, not_taken);
1418 // ...but operands OK for float arithmetic.
1419
1420 // If the result is +0 we may need to check if the result should
1421 // really be -0. Welcome to the -0 fan club.
1422 __ NegativeZeroTest(eax, ebx, edx, ecx, &non_smi_result);
1423
1424 __ ret(2 * kPointerSize);
1425
1426 __ bind(&non_smi_result);
1427 // TODO(1243132): Do not check float operands here.
1428 __ bind(&non_smi_operands);
1429 __ mov(eax, Operand(esp, 1 * kPointerSize));
1430 __ mov(edx, Operand(esp, 2 * kPointerSize));
1431 break;
1432 }
1433 case Token::DIV: {
1434 // eax: y
1435 // edx: x
1436 Label non_smi_operands, non_smi_result, division_by_zero;
1437 __ mov(ebx, Operand(eax)); // Get y
1438 __ mov(eax, Operand(edx)); // Get x
1439
1440 __ cdq(); // Sign extend eax into edx:eax.
1441 // Tag check.
1442 __ mov(ecx, Operand(ebx));
1443 __ or_(ecx, Operand(eax)); // ecx = x | y.
1444 ASSERT(kSmiTag == 0); // Adjust code below.
1445 __ test(ecx, Immediate(kSmiTagMask));
1446 // Jump if not both smi; check if float numbers.
1447 __ j(not_zero, &non_smi_operands, not_taken);
1448 __ test(ebx, Operand(ebx)); // Check for 0 divisor.
1449 __ j(zero, &division_by_zero, not_taken);
1450
1451 __ idiv(ebx);
1452 // Check for the corner case of dividing the most negative smi by -1.
1453 // (We cannot use the overflow flag, since it is not set by idiv.)
1454 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1455 __ cmp(eax, 0x40000000);
1456 __ j(equal, &non_smi_result);
1457 // If the result is +0 we may need to check if the result should
1458 // really be -0. Welcome to the -0 fan club.
1459 __ NegativeZeroTest(eax, ecx, &non_smi_result); // Use ecx = x | y.
1460 __ test(edx, Operand(edx));
1461 // Use floats if there's a remainder.
1462 __ j(not_zero, &non_smi_result, not_taken);
1463 __ shl(eax, kSmiTagSize);
1464 __ ret(2 * kPointerSize); // Remove all operands.
1465
1466 __ bind(&division_by_zero);
1467 __ mov(eax, Operand(esp, 1 * kPointerSize));
1468 __ mov(edx, Operand(esp, 2 * kPointerSize));
1469 __ jmp(&call_runtime); // Division by zero must go through runtime.
1470
1471 __ bind(&non_smi_result);
1472 // TODO(1243132): Do not check float operands here.
1473 __ bind(&non_smi_operands);
1474 __ mov(eax, Operand(esp, 1 * kPointerSize));
1475 __ mov(edx, Operand(esp, 2 * kPointerSize));
1476 break;
1477 }
1478 case Token::MOD: {
1479 Label slow;
1480 __ mov(ebx, Operand(eax)); // get y
1481 __ mov(eax, Operand(edx)); // get x
1482 __ cdq(); // sign extend eax into edx:eax
1483 // tag check
1484 __ mov(ecx, Operand(ebx));
1485 __ or_(ecx, Operand(eax)); // ecx = x | y;
1486 ASSERT(kSmiTag == 0); // adjust code below
1487 __ test(ecx, Immediate(kSmiTagMask));
1488 __ j(not_zero, &slow, not_taken);
1489 __ test(ebx, Operand(ebx)); // test for y == 0
1490 __ j(zero, &slow);
1491
1492 // Fast case: Do integer division and use remainder.
1493 __ idiv(ebx);
1494 __ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y
1495 __ mov(eax, Operand(edx));
1496 __ ret(2 * kPointerSize);
1497
1498 // Slow case: Call runtime operator implementation.
1499 __ bind(&slow);
1500 __ mov(eax, Operand(esp, 1 * kPointerSize));
1501 __ mov(edx, Operand(esp, 2 * kPointerSize));
1502 // Fall through to |call_runtime|.
1503 break;
1504 }
1505 case Token::BIT_OR:
1506 case Token::BIT_AND:
1507 case Token::BIT_XOR:
1508 case Token::SAR:
1509 case Token::SHL:
1510 case Token::SHR: {
1511 // Smi-case for bitops should already have been inlined.
1512 break;
1513 }
1514 default: {
1515 UNREACHABLE();
1516 }
1517 }
1518
1519 // 2. Floating point case.
1520 switch (op_) {
1521 case Token::ADD:
1522 case Token::SUB:
1523 case Token::MUL:
1524 case Token::DIV: {
1525 // eax: y
1526 // edx: x
1527 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
1528 // Fast-case: Both operands are numbers.
1529 // Allocate a heap number, if needed.
1530 Label skip_allocation;
1531 switch (mode_) {
1532 case OVERWRITE_LEFT:
1533 __ mov(eax, Operand(edx));
1534 // Fall through!
1535 case OVERWRITE_RIGHT:
1536 // If the argument in eax is already an object, we skip the
1537 // allocation of a heap number.
1538 __ test(eax, Immediate(kSmiTagMask));
1539 __ j(not_zero, &skip_allocation, not_taken);
1540 // Fall through!
1541 case NO_OVERWRITE:
1542 FloatingPointHelper::AllocateHeapNumber(masm,
1543 &call_runtime,
1544 ecx,
1545 edx);
1546 __ bind(&skip_allocation);
1547 break;
1548 default: UNREACHABLE();
1549 }
1550 FloatingPointHelper::LoadFloatOperands(masm, ecx);
1551
1552 switch (op_) {
1553 case Token::ADD: __ faddp(1); break;
1554 case Token::SUB: __ fsubp(1); break;
1555 case Token::MUL: __ fmulp(1); break;
1556 case Token::DIV: __ fdivp(1); break;
1557 default: UNREACHABLE();
1558 }
1559 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1560 __ ret(2 * kPointerSize);
1561 }
1562 case Token::MOD: {
1563 // For MOD we go directly to runtime in the non-smi case.
1564 break;
1565 }
1566 case Token::BIT_OR:
1567 case Token::BIT_AND:
1568 case Token::BIT_XOR:
1569 case Token::SAR:
1570 case Token::SHL:
1571 case Token::SHR: {
1572 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
1573 FloatingPointHelper::LoadFloatOperands(masm, ecx);
1574
1575 Label non_int32_operands, non_smi_result, skip_allocation;
1576 // Reserve space for converted numbers.
1577 __ sub(Operand(esp), Immediate(2 * kPointerSize));
1578
1579 // Check if right operand is int32.
1580 __ fist_s(Operand(esp, 1 * kPointerSize));
1581 __ fild_s(Operand(esp, 1 * kPointerSize));
1582 __ fucompp();
1583 __ fnstsw_ax();
1584 __ sahf();
1585 __ j(not_zero, &non_int32_operands);
1586 __ j(parity_even, &non_int32_operands);
1587
1588 // Check if left operand is int32.
1589 __ fist_s(Operand(esp, 0 * kPointerSize));
1590 __ fild_s(Operand(esp, 0 * kPointerSize));
1591 __ fucompp();
1592 __ fnstsw_ax();
1593 __ sahf();
1594 __ j(not_zero, &non_int32_operands);
1595 __ j(parity_even, &non_int32_operands);
1596
1597 // Get int32 operands and perform bitop.
1598 __ pop(eax);
1599 __ pop(ecx);
1600 switch (op_) {
1601 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
1602 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1603 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1604 case Token::SAR: __ sar(eax); break;
1605 case Token::SHL: __ shl(eax); break;
1606 case Token::SHR: __ shr(eax); break;
1607 default: UNREACHABLE();
1608 }
1609
1610 // Check if result is non-negative and fits in a smi.
1611 __ test(eax, Immediate(0xc0000000));
1612 __ j(not_zero, &non_smi_result);
1613
1614 // Tag smi result and return.
1615 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
1616 __ lea(eax, Operand(eax, times_2, kSmiTag));
1617 __ ret(2 * kPointerSize);
1618
1619 // All ops except SHR return a signed int32 that we load in a HeapNumber.
1620 if (op_ != Token::SHR) {
1621 __ bind(&non_smi_result);
1622 // Allocate a heap number if needed.
1623 __ mov(ebx, Operand(eax)); // ebx: result
1624 switch (mode_) {
1625 case OVERWRITE_LEFT:
1626 case OVERWRITE_RIGHT:
1627 // If the operand was an object, we skip the
1628 // allocation of a heap number.
1629 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1630 1 * kPointerSize : 2 * kPointerSize));
1631 __ test(eax, Immediate(kSmiTagMask));
1632 __ j(not_zero, &skip_allocation, not_taken);
1633 // Fall through!
1634 case NO_OVERWRITE:
1635 FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
1636 ecx, edx);
1637 __ bind(&skip_allocation);
1638 break;
1639 default: UNREACHABLE();
1640 }
1641 // Store the result in the HeapNumber and return.
1642 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1643 __ fild_s(Operand(esp, 1 * kPointerSize));
1644 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1645 __ ret(2 * kPointerSize);
1646 }
1647 __ bind(&non_int32_operands);
1648 // Restore stacks and operands before calling runtime.
1649 __ ffree(0);
1650 __ add(Operand(esp), Immediate(2 * kPointerSize));
1651
1652 // SHR should return uint32 - go to runtime for non-smi/negative result.
1653 if (op_ == Token::SHR) __ bind(&non_smi_result);
1654 __ mov(eax, Operand(esp, 1 * kPointerSize));
1655 __ mov(edx, Operand(esp, 2 * kPointerSize));
1656 break;
1657 }
1658 default: UNREACHABLE(); break;
1659 }
1660
1661 // 3. If all else fails, use the runtime system to get the correct result.
1662 __ bind(&call_runtime);
1663 switch (op_) {
1664 case Token::ADD:
1665 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1666 break;
1667 case Token::SUB:
1668 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1669 break;
1670 case Token::MUL:
1671 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1672 break;
1673 case Token::DIV:
1674 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1675 break;
1676 case Token::MOD:
1677 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1678 break;
1679 case Token::BIT_OR:
1680 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1681 break;
1682 case Token::BIT_AND:
1683 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1684 break;
1685 case Token::BIT_XOR:
1686 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1687 break;
1688 case Token::SAR:
1689 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1690 break;
1691 case Token::SHL:
1692 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1693 break;
1694 case Token::SHR:
1695 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1696 break;
1697 default:
1698 UNREACHABLE();
1699 }
1700 }
1701
1702
1703 void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
1704 Label* need_gc,
1705 Register scratch1,
1706 Register scratch2) {
1707 ExternalReference allocation_top =
1708 ExternalReference::new_space_allocation_top_address();
1709 ExternalReference allocation_limit =
1710 ExternalReference::new_space_allocation_limit_address();
1711 __ mov(Operand(scratch1), Immediate(allocation_top));
1712 __ mov(eax, Operand(scratch1, 0));
1713 __ lea(scratch2, Operand(eax, HeapNumber::kSize)); // scratch2: new top
1714 __ cmp(scratch2, Operand::StaticVariable(allocation_limit));
1715 __ j(above, need_gc, not_taken);
1716
1717 __ mov(Operand(scratch1, 0), scratch2); // store new top
1718 __ mov(Operand(eax, HeapObject::kMapOffset),
1719 Immediate(Factory::heap_number_map()));
1720 // Tag old top and use as result.
1721 __ add(Operand(eax), Immediate(kHeapObjectTag));
1722 }
1723
1724
1725 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
1726 Register scratch) {
1727 Label load_smi_1, load_smi_2, done_load_1, done;
1728 __ mov(scratch, Operand(esp, 2 * kPointerSize));
1729 __ test(scratch, Immediate(kSmiTagMask));
1730 __ j(zero, &load_smi_1, not_taken);
1731 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
1732 __ bind(&done_load_1);
1733
1734 __ mov(scratch, Operand(esp, 1 * kPointerSize));
1735 __ test(scratch, Immediate(kSmiTagMask));
1736 __ j(zero, &load_smi_2, not_taken);
1737 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
1738 __ jmp(&done);
1739
1740 __ bind(&load_smi_1);
1741 __ sar(scratch, kSmiTagSize);
1742 __ push(scratch);
1743 __ fild_s(Operand(esp, 0));
1744 __ pop(scratch);
1745 __ jmp(&done_load_1);
1746
1747 __ bind(&load_smi_2);
1748 __ sar(scratch, kSmiTagSize);
1749 __ push(scratch);
1750 __ fild_s(Operand(esp, 0));
1751 __ pop(scratch);
1752
1753 __ bind(&done);
1754 }
1755
1756
1757 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
1758 Label* non_float,
1759 Register scratch) {
1760 Label test_other, done;
1761 // Test if both operands are floats or smi -> scratch=k_is_float;
1762 // Otherwise scratch = k_not_float.
1763 __ test(edx, Immediate(kSmiTagMask));
1764 __ j(zero, &test_other, not_taken); // argument in edx is OK
1765 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
1766 __ cmp(scratch, Factory::heap_number_map());
1767 __ j(not_equal, non_float); // argument in edx is not a number -> NaN
1768
1769 __ bind(&test_other);
1770 __ test(eax, Immediate(kSmiTagMask));
1771 __ j(zero, &done); // argument in eax is OK
1772 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
1773 __ cmp(scratch, Factory::heap_number_map());
1774 __ j(not_equal, non_float); // argument in eax is not a number -> NaN
1775
1776 // Fall-through: Both operands are numbers.
1777 __ bind(&done);
1778 }
1779
1780
1781 void UnarySubStub::Generate(MacroAssembler* masm) {
1782 Label undo;
1783 Label slow;
1784 Label done;
1785 Label try_float;
1786
1787 // Check whether the value is a smi.
1788 __ test(eax, Immediate(kSmiTagMask));
1789 __ j(not_zero, &try_float, not_taken);
1790
1791 // Enter runtime system if the value of the expression is zero
1792 // to make sure that we switch between 0 and -0.
1793 __ test(eax, Operand(eax));
1794 __ j(zero, &slow, not_taken);
1795
1796 // The value of the expression is a smi that is not zero. Try
1797 // optimistic subtraction '0 - value'.
1798 __ mov(edx, Operand(eax));
1799 __ Set(eax, Immediate(0));
1800 __ sub(eax, Operand(edx));
1801 __ j(overflow, &undo, not_taken);
1802
1803 // If result is a smi we are done.
1804 __ test(eax, Immediate(kSmiTagMask));
1805 __ j(zero, &done, taken);
1806
1807 // Restore eax and enter runtime system.
1808 __ bind(&undo);
1809 __ mov(eax, Operand(edx));
1810
1811 // Enter runtime system.
1812 __ bind(&slow);
1813 __ pop(ecx); // pop return address
1814 __ push(eax);
1815 __ push(ecx); // push return address
1816 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1817
1818 // Try floating point case.
1819 __ bind(&try_float);
1820 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
1821 __ cmp(edx, Factory::heap_number_map());
1822 __ j(not_equal, &slow);
1823 __ mov(edx, Operand(eax));
1824 // edx: operand
1825 FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx);
1826 // eax: allocated 'empty' number
1827 __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
1828 __ fchs();
1829 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1830
1831 __ bind(&done);
1832
1833 __ StubReturn(1);
1834 }
1835
1836
1837 class ArgumentsAccessStub: public CodeStub { 1077 class ArgumentsAccessStub: public CodeStub {
1838 public: 1078 public:
1839 explicit ArgumentsAccessStub(bool is_length) : is_length_(is_length) { } 1079 explicit ArgumentsAccessStub(bool is_length) : is_length_(is_length) { }
1840 1080
1841 private: 1081 private:
1842 bool is_length_; 1082 bool is_length_;
1843 1083
1844 Major MajorKey() { return ArgumentsAccess; } 1084 Major MajorKey() { return ArgumentsAccess; }
1845 int MinorKey() { return is_length_ ? 1 : 0; } 1085 int MinorKey() { return is_length_ ? 1 : 0; }
1846 void Generate(MacroAssembler* masm); 1086 void Generate(MacroAssembler* masm);
1847 1087
1848 const char* GetName() { return "ArgumentsAccessStub"; } 1088 const char* GetName() { return "ArgumentsAccessStub"; }
1849 1089
1850 #ifdef DEBUG 1090 #ifdef DEBUG
1851 void Print() { 1091 void Print() {
1852 PrintF("ArgumentsAccessStub (is_length %s)\n", 1092 PrintF("ArgumentsAccessStub (is_length %s)\n",
1853 is_length_ ? "true" : "false"); 1093 is_length_ ? "true" : "false");
1854 } 1094 }
1855 #endif 1095 #endif
1856 }; 1096 };
1857 1097
1858 1098
1859 void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
1860 // Check that the key is a smi for non-length access.
1861 Label slow;
1862 if (!is_length_) {
1863 __ mov(ebx, Operand(esp, 1 * kPointerSize)); // skip return address
1864 __ test(ebx, Immediate(kSmiTagMask));
1865 __ j(not_zero, &slow, not_taken);
1866 }
1867
1868 // Check if the calling frame is an arguments adaptor frame.
1869 Label adaptor;
1870 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1871 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
1872 __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
1873 __ j(equal, &adaptor);
1874
1875 // The displacement is used for skipping the return address on the
1876 // stack. It is the offset of the last parameter (if any) relative
1877 // to the frame pointer.
1878 static const int kDisplacement = 1 * kPointerSize;
1879 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
1880
1881 if (is_length_) {
1882 // Do nothing. The length is already in register eax.
1883 } else {
1884 // Check index against formal parameters count limit passed in
1885 // through register eax. Use unsigned comparison to get negative
1886 // check for free.
1887 __ cmp(ebx, Operand(eax));
1888 __ j(above_equal, &slow, not_taken);
1889
1890 // Read the argument from the stack.
1891 __ lea(edx, Operand(ebp, eax, times_2, 0));
1892 __ neg(ebx);
1893 __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
1894 }
1895
1896 // Return the length or the argument.
1897 __ ret(0);
1898
1899 // Arguments adaptor case: Find the length or the actual argument in
1900 // the calling frame.
1901 __ bind(&adaptor);
1902 if (is_length_) {
1903 // Read the arguments length from the adaptor frame.
1904 __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
1905 } else {
1906 // Check index against actual arguments limit found in the
1907 // arguments adaptor frame. Use unsigned comparison to get
1908 // negative check for free.
1909 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
1910 __ cmp(ebx, Operand(ecx));
1911 __ j(above_equal, &slow, not_taken);
1912
1913 // Read the argument from the stack.
1914 __ lea(edx, Operand(edx, ecx, times_2, 0));
1915 __ neg(ebx);
1916 __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
1917 }
1918
1919 // Return the length or the argument.
1920 __ ret(0);
1921
1922 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1923 // by calling the runtime system.
1924 if (!is_length_) {
1925 __ bind(&slow);
1926 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
1927 }
1928 }
1929
1930
1931 #undef __
1932 #define __ masm_->
1933
1934 void Ia32CodeGenerator::GenericBinaryOperation(Token::Value op, 1099 void Ia32CodeGenerator::GenericBinaryOperation(Token::Value op,
1935 OverwriteMode overwrite_mode) { 1100 OverwriteMode overwrite_mode) {
1936 Comment cmnt(masm_, "[ BinaryOperation"); 1101 Comment cmnt(masm_, "[ BinaryOperation");
1937 Comment cmnt_token(masm_, Token::String(op)); 1102 Comment cmnt_token(masm_, Token::String(op));
1938 switch (op) { 1103 switch (op) {
1939 case Token::ADD: 1104 case Token::ADD:
1940 case Token::SUB: 1105 case Token::SUB:
1941 case Token::MUL: 1106 case Token::MUL:
1942 case Token::DIV: 1107 case Token::DIV:
1943 case Token::MOD: { 1108 case Token::MOD: {
(...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after
2359 __ push(Immediate(value)); 1524 __ push(Immediate(value));
2360 __ push(eax); 1525 __ push(eax);
2361 } 1526 }
2362 GenericBinaryOperation(op, overwrite_mode); 1527 GenericBinaryOperation(op, overwrite_mode);
2363 break; 1528 break;
2364 } 1529 }
2365 } 1530 }
2366 } 1531 }
2367 1532
2368 1533
2369 #undef __
2370 #define __ masm->
2371
2372 class CompareStub: public CodeStub { 1534 class CompareStub: public CodeStub {
2373 public: 1535 public:
2374 CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } 1536 CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
2375 1537
2376 void Generate(MacroAssembler* masm); 1538 void Generate(MacroAssembler* masm);
2377 1539
2378 private: 1540 private:
2379 Condition cc_; 1541 Condition cc_;
2380 bool strict_; 1542 bool strict_;
2381 1543
(...skipping 10 matching lines...) Expand all
2392 #ifdef DEBUG 1554 #ifdef DEBUG
2393 void Print() { 1555 void Print() {
2394 PrintF("CompareStub (cc %d), (strict %s)\n", 1556 PrintF("CompareStub (cc %d), (strict %s)\n",
2395 static_cast<int>(cc_), 1557 static_cast<int>(cc_),
2396 strict_ ? "true" : "false"); 1558 strict_ ? "true" : "false");
2397 } 1559 }
2398 #endif 1560 #endif
2399 }; 1561 };
2400 1562
2401 1563
2402 void CompareStub::Generate(MacroAssembler* masm) {
2403 Label call_builtin, done;
2404 // Save the return address (and get it off the stack).
2405 __ pop(ecx);
2406
2407 // Push arguments.
2408 __ push(eax);
2409 __ push(edx);
2410 __ push(ecx);
2411
2412 // Inlined floating point compare.
2413 // Call builtin if operands are not floating point or smi.
2414 FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx);
2415 FloatingPointHelper::LoadFloatOperands(masm, ecx);
2416 __ FCmp();
2417
2418 // Jump to builtin for NaN.
2419 __ j(parity_even, &call_builtin, not_taken);
2420
2421 // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
2422 Label below_lbl, above_lbl;
2423 // use edx, eax to convert unsigned to signed comparison
2424 __ j(below, &below_lbl, not_taken);
2425 __ j(above, &above_lbl, not_taken);
2426
2427 __ xor_(eax, Operand(eax)); // equal
2428 __ ret(2 * kPointerSize);
2429
2430 __ bind(&below_lbl);
2431 __ mov(eax, -1);
2432 __ ret(2 * kPointerSize);
2433
2434 __ bind(&above_lbl);
2435 __ mov(eax, 1);
2436 __ ret(2 * kPointerSize); // eax, edx were pushed
2437
2438 __ bind(&call_builtin);
2439 // must swap argument order
2440 __ pop(ecx);
2441 __ pop(edx);
2442 __ pop(eax);
2443 __ push(edx);
2444 __ push(eax);
2445
2446 // Figure out which native to call and setup the arguments.
2447 Builtins::JavaScript builtin;
2448 if (cc_ == equal) {
2449 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
2450 } else {
2451 builtin = Builtins::COMPARE;
2452 int ncr; // NaN compare result
2453 if (cc_ == less || cc_ == less_equal) {
2454 ncr = GREATER;
2455 } else {
2456 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
2457 ncr = LESS;
2458 }
2459 __ push(Immediate(Smi::FromInt(ncr)));
2460 }
2461
2462 // Restore return address on the stack.
2463 __ push(ecx);
2464
2465 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
2466 // tagged as a small integer.
2467 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
2468 }
2469
2470
2471 void StackCheckStub::Generate(MacroAssembler* masm) {
2472 // Because builtins always remove the receiver from the stack, we
2473 // have to fake one to avoid underflowing the stack. The receiver
2474 // must be inserted below the return address on the stack so we
2475 // temporarily store that in a register.
2476 __ pop(eax);
2477 __ push(Immediate(Smi::FromInt(0)));
2478 __ push(eax);
2479
2480 // Do tail-call to runtime routine.
2481 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
2482 }
2483
2484
2485 #undef __
2486 #define __ masm_->
2487
2488 void Ia32CodeGenerator::Comparison(Condition cc, bool strict) { 1564 void Ia32CodeGenerator::Comparison(Condition cc, bool strict) {
2489 // Strict only makes sense for equality comparisons. 1565 // Strict only makes sense for equality comparisons.
2490 ASSERT(!strict || cc == equal); 1566 ASSERT(!strict || cc == equal);
2491 1567
2492 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. 1568 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
2493 if (cc == greater || cc == less_equal) { 1569 if (cc == greater || cc == less_equal) {
2494 cc = ReverseCondition(cc); 1570 cc = ReverseCondition(cc);
2495 __ pop(edx); 1571 __ pop(edx);
2496 __ pop(eax); 1572 __ pop(eax);
2497 } else { 1573 } else {
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
2586 1662
2587 #ifdef DEBUG 1663 #ifdef DEBUG
2588 void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); } 1664 void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
2589 #endif 1665 #endif
2590 1666
2591 Major MajorKey() { return CallFunction; } 1667 Major MajorKey() { return CallFunction; }
2592 int MinorKey() { return argc_; } 1668 int MinorKey() { return argc_; }
2593 }; 1669 };
2594 1670
2595 1671
2596 #undef __
2597 #define __ masm->
2598
2599 void CallFunctionStub::Generate(MacroAssembler* masm) {
2600 Label slow;
2601
2602 // Get the function to call from the stack.
2603 // +2 ~ receiver, return address
2604 __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
2605
2606 // Check that the function really is a JavaScript function.
2607 __ test(edi, Immediate(kSmiTagMask));
2608 __ j(zero, &slow, not_taken);
2609 // Get the map.
2610 __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
2611 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
2612 __ cmp(ecx, JS_FUNCTION_TYPE);
2613 __ j(not_equal, &slow, not_taken);
2614
2615 // Fast-case: Just invoke the function.
2616 ParameterCount actual(argc_);
2617 __ InvokeFunction(edi, actual, JUMP_FUNCTION);
2618
2619 // Slow-case: Non-function called.
2620 __ bind(&slow);
2621 __ Set(eax, Immediate(argc_));
2622 __ Set(ebx, Immediate(0));
2623 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
2624 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
2625 __ jmp(adaptor, RelocInfo::CODE_TARGET);
2626 }
2627
2628
2629 #undef __
2630 #define __ masm_->
2631
2632 // Call the function just below TOS on the stack with the given 1672 // Call the function just below TOS on the stack with the given
2633 // arguments. The receiver is the TOS. 1673 // arguments. The receiver is the TOS.
2634 void Ia32CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, 1674 void Ia32CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
2635 int position) { 1675 int position) {
2636 // Push the arguments ("left-to-right") on the stack. 1676 // Push the arguments ("left-to-right") on the stack.
2637 for (int i = 0; i < args->length(); i++) Load(args->at(i)); 1677 for (int i = 0; i < args->length(); i++) Load(args->at(i));
2638 1678
2639 // Record the position for debugging purposes. 1679 // Record the position for debugging purposes.
2640 __ RecordPosition(position); 1680 __ RecordPosition(position);
2641 1681
(...skipping 1985 matching lines...) Expand 10 before | Expand all | Expand 10 after
4627 3667
4628 virtual void Generate(); 3668 virtual void Generate();
4629 3669
4630 private: 3670 private:
4631 bool is_postfix_; 3671 bool is_postfix_;
4632 bool is_increment_; 3672 bool is_increment_;
4633 int result_offset_; 3673 int result_offset_;
4634 }; 3674 };
4635 3675
4636 3676
4637 #undef __
4638 #define __ masm->
4639
4640 class RevertToNumberStub: public CodeStub { 3677 class RevertToNumberStub: public CodeStub {
4641 public: 3678 public:
4642 explicit RevertToNumberStub(bool is_increment) 3679 explicit RevertToNumberStub(bool is_increment)
4643 : is_increment_(is_increment) { } 3680 : is_increment_(is_increment) { }
4644 3681
4645 private: 3682 private:
4646 bool is_increment_; 3683 bool is_increment_;
4647 3684
4648 Major MajorKey() { return RevertToNumber; } 3685 Major MajorKey() { return RevertToNumber; }
4649 int MinorKey() { return is_increment_ ? 1 : 0; } 3686 int MinorKey() { return is_increment_ ? 1 : 0; }
4650 void Generate(MacroAssembler* masm); 3687 void Generate(MacroAssembler* masm);
4651 3688
4652 const char* GetName() { return "RevertToNumberStub"; } 3689 const char* GetName() { return "RevertToNumberStub"; }
4653 3690
4654 #ifdef DEBUG 3691 #ifdef DEBUG
4655 void Print() { 3692 void Print() {
4656 PrintF("RevertToNumberStub (is_increment %s)\n", 3693 PrintF("RevertToNumberStub (is_increment %s)\n",
4657 is_increment_ ? "true" : "false"); 3694 is_increment_ ? "true" : "false");
4658 } 3695 }
4659 #endif 3696 #endif
4660 }; 3697 };
4661 3698
4662 3699
4663 void RevertToNumberStub::Generate(MacroAssembler* masm) {
4664 // Revert optimistic increment/decrement.
4665 if (is_increment_) {
4666 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
4667 } else {
4668 __ add(Operand(eax), Immediate(Smi::FromInt(1)));
4669 }
4670
4671 __ pop(ecx);
4672 __ push(eax);
4673 __ push(ecx);
4674 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
4675 // Code never returns due to JUMP_FUNCTION.
4676 }
4677
4678
4679 class CounterOpStub: public CodeStub { 3700 class CounterOpStub: public CodeStub {
4680 public: 3701 public:
4681 CounterOpStub(int result_offset, bool is_postfix, bool is_increment) 3702 CounterOpStub(int result_offset, bool is_postfix, bool is_increment)
4682 : result_offset_(result_offset), 3703 : result_offset_(result_offset),
4683 is_postfix_(is_postfix), 3704 is_postfix_(is_postfix),
4684 is_increment_(is_increment) { } 3705 is_increment_(is_increment) { }
4685 3706
4686 private: 3707 private:
4687 int result_offset_; 3708 int result_offset_;
4688 bool is_postfix_; 3709 bool is_postfix_;
(...skipping 14 matching lines...) Expand all
4703 PrintF("CounterOpStub (result_offset %d), (is_postfix %s)," 3724 PrintF("CounterOpStub (result_offset %d), (is_postfix %s),"
4704 " (is_increment %s)\n", 3725 " (is_increment %s)\n",
4705 result_offset_, 3726 result_offset_,
4706 is_postfix_ ? "true" : "false", 3727 is_postfix_ ? "true" : "false",
4707 is_increment_ ? "true" : "false"); 3728 is_increment_ ? "true" : "false");
4708 } 3729 }
4709 #endif 3730 #endif
4710 }; 3731 };
4711 3732
4712 3733
4713 void CounterOpStub::Generate(MacroAssembler* masm) {
4714 // Store to the result on the stack (skip return address) before
4715 // performing the count operation.
4716 if (is_postfix_) {
4717 __ mov(Operand(esp, result_offset_ + kPointerSize), eax);
4718 }
4719
4720 // Revert optimistic increment/decrement but only for prefix
4721 // counts. For postfix counts it has already been reverted before
4722 // the conversion to numbers.
4723 if (!is_postfix_) {
4724 if (is_increment_) {
4725 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
4726 } else {
4727 __ add(Operand(eax), Immediate(Smi::FromInt(1)));
4728 }
4729 }
4730
4731 // Compute the new value by calling the right JavaScript native.
4732 __ pop(ecx);
4733 __ push(eax);
4734 __ push(ecx);
4735 Builtins::JavaScript builtin = is_increment_ ? Builtins::INC : Builtins::DEC;
4736 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4737 // Code never returns due to JUMP_FUNCTION.
4738 }
4739
4740
4741 #undef __
4742 #define __ masm_->
4743
4744 void CountOperationDeferred::Generate() { 3734 void CountOperationDeferred::Generate() {
4745 if (is_postfix_) { 3735 if (is_postfix_) {
4746 RevertToNumberStub to_number_stub(is_increment_); 3736 RevertToNumberStub to_number_stub(is_increment_);
4747 __ CallStub(&to_number_stub); 3737 __ CallStub(&to_number_stub);
4748 } 3738 }
4749 CounterOpStub stub(result_offset_, is_postfix_, is_increment_); 3739 CounterOpStub stub(result_offset_, is_postfix_, is_increment_);
4750 __ CallStub(&stub); 3740 __ CallStub(&stub);
4751 } 3741 }
4752 3742
4753 3743
(...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after
5171 // call instruction to support patching the exit code in the 4161 // call instruction to support patching the exit code in the
5172 // debugger. See VisitReturnStatement for the full return sequence. 4162 // debugger. See VisitReturnStatement for the full return sequence.
5173 __ mov(esp, Operand(ebp)); 4163 __ mov(esp, Operand(ebp));
5174 __ pop(ebp); 4164 __ pop(ebp);
5175 } 4165 }
5176 4166
5177 4167
5178 #undef __ 4168 #undef __
5179 #define __ masm-> 4169 #define __ masm->
5180 4170
4171 Operand Ia32CodeGenerator::SlotOperand(CodeGenerator* cgen,
4172 Slot* slot,
4173 Register tmp) {
4174 // Currently, this assertion will fail if we try to assign to
4175 // a constant variable that is constant because it is read-only
4176 // (such as the variable referring to a named function expression).
4177 // We need to implement assignments to read-only variables.
4178 // Ideally, we should do this during AST generation (by converting
4179 // such assignments into expression statements); however, in general
4180 // we may not be able to make the decision until past AST generation,
4181 // that is when the entire program is known.
4182 ASSERT(slot != NULL);
4183 int index = slot->index();
4184 switch (slot->type()) {
4185 case Slot::PARAMETER: return ParameterOperand(cgen, index);
4186
4187 case Slot::LOCAL: {
4188 ASSERT(0 <= index && index < cgen->scope()->num_stack_slots());
4189 const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
4190 return Operand(ebp, kLocal0Offset - index * kPointerSize);
4191 }
4192
4193 case Slot::CONTEXT: {
4194 MacroAssembler* masm = cgen->masm();
4195 // Follow the context chain if necessary.
4196 ASSERT(!tmp.is(esi)); // do not overwrite context register
4197 Register context = esi;
4198 int chain_length =
4199 cgen->scope()->ContextChainLength(slot->var()->scope());
4200 for (int i = chain_length; i-- > 0;) {
4201 // Load the closure.
4202 // (All contexts, even 'with' contexts, have a closure,
4203 // and it is the same for all contexts inside a function.
4204 // There is no need to go to the function context first.)
4205 __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
4206 // Load the function context (which is the incoming, outer context).
4207 __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
4208 context = tmp;
4209 }
4210 // We may have a 'with' context now. Get the function context.
4211 // (In fact this mov may never be the needed, since the scope analysis
4212 // may not permit a direct context access in this case and thus we are
4213 // always at a function context. However it is safe to dereference be-
4214 // cause the function context of a function context is itself. Before
4215 // deleting this mov we should try to create a counter-example first,
4216 // though...)
4217 __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
4218 return ContextOperand(tmp, index);
4219 }
4220
4221 default:
4222 UNREACHABLE();
4223 return Operand(eax);
4224 }
4225 }
4226
4227
4228 void Property::GenerateStoreCode(CodeGenerator* cgen,
4229 Reference* ref,
4230 InitState init_state) {
4231 MacroAssembler* masm = cgen->masm();
4232 Comment cmnt(masm, "[ Store to Property");
4233 __ RecordPosition(position());
4234 Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key());
4235 }
4236
4237
4238 void VariableProxy::GenerateStoreCode(CodeGenerator* cgen,
4239 Reference* ref,
4240 InitState init_state) {
4241 MacroAssembler* masm = cgen->masm();
4242 Comment cmnt(masm, "[ Store to VariableProxy");
4243 Variable* node = var();
4244
4245 Expression* expr = node->rewrite();
4246 if (expr != NULL) {
4247 expr->GenerateStoreCode(cgen, ref, init_state);
4248 } else {
4249 ASSERT(node->is_global());
4250 if (node->AsProperty() != NULL) {
4251 __ RecordPosition(node->AsProperty()->position());
4252 }
4253 Expression* key = new Literal(node->name());
4254 Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key);
4255 }
4256 }
4257
4258
4259 void Slot::GenerateStoreCode(CodeGenerator* cgen,
4260 Reference* ref,
4261 InitState init_state) {
4262 MacroAssembler* masm = cgen->masm();
4263 Comment cmnt(masm, "[ Store to Slot");
4264
4265 if (type() == Slot::LOOKUP) {
4266 ASSERT(var()->mode() == Variable::DYNAMIC);
4267
4268 // For now, just do a runtime call.
4269 __ push(esi);
4270 __ push(Immediate(var()->name()));
4271
4272 if (init_state == CONST_INIT) {
4273 // Same as the case for a normal store, but ignores attribute
4274 // (e.g. READ_ONLY) of context slot so that we can initialize const
4275 // properties (introduced via eval("const foo = (some expr);")). Also,
4276 // uses the current function context instead of the top context.
4277 //
4278 // Note that we must declare the foo upon entry of eval(), via a
4279 // context slot declaration, but we cannot initialize it at the same
4280 // time, because the const declaration may be at the end of the eval
4281 // code (sigh...) and the const variable may have been used before
4282 // (where its value is 'undefined'). Thus, we can only do the
4283 // initialization when we actually encounter the expression and when
4284 // the expression operands are defined and valid, and thus we need the
4285 // split into 2 operations: declaration of the context slot followed
4286 // by initialization.
4287 __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4288 } else {
4289 __ CallRuntime(Runtime::kStoreContextSlot, 3);
4290 }
4291 // Storing a variable must keep the (new) value on the expression
4292 // stack. This is necessary for compiling assignment expressions.
4293 __ push(eax);
4294
4295 } else {
4296 ASSERT(var()->mode() != Variable::DYNAMIC);
4297
4298 Label exit;
4299 if (init_state == CONST_INIT) {
4300 ASSERT(var()->mode() == Variable::CONST);
4301 // Only the first const initialization must be executed (the slot
4302 // still contains 'the hole' value). When the assignment is executed,
4303 // the code is identical to a normal store (see below).
4304 Comment cmnt(masm, "[ Init const");
4305 __ mov(eax, Ia32CodeGenerator::SlotOperand(cgen, this, ecx));
4306 __ cmp(eax, Factory::the_hole_value());
4307 __ j(not_equal, &exit);
4308 }
4309
4310 // We must execute the store.
4311 // Storing a variable must keep the (new) value on the stack. This is
4312 // necessary for compiling assignment expressions. ecx may be loaded
4313 // with context; used below in RecordWrite.
4314 //
4315 // Note: We will reach here even with node->var()->mode() ==
4316 // Variable::CONST because of const declarations which will initialize
4317 // consts to 'the hole' value and by doing so, end up calling this
4318 // code.
4319 __ pop(eax);
4320 __ mov(Ia32CodeGenerator::SlotOperand(cgen, this, ecx), eax);
4321 __ push(eax); // RecordWrite may destroy the value in eax.
4322 if (type() == Slot::CONTEXT) {
4323 // ecx is loaded with context when calling SlotOperand above.
4324 int offset = FixedArray::kHeaderSize + index() * kPointerSize;
4325 __ RecordWrite(ecx, offset, eax, ebx);
4326 }
4327 // If we definitely did not jump over the assignment, we do not need to
4328 // bind the exit label. Doing so can defeat peephole optimization.
4329 if (init_state == CONST_INIT) __ bind(&exit);
4330 }
4331 }
4332
4333
4334 // NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
4335 void ToBooleanStub::Generate(MacroAssembler* masm) {
4336 Label false_result, true_result, not_string;
4337 __ mov(eax, Operand(esp, 1 * kPointerSize));
4338
4339 // 'null' => false.
4340 __ cmp(eax, Factory::null_value());
4341 __ j(equal, &false_result);
4342
4343 // Get the map and type of the heap object.
4344 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
4345 __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
4346
4347 // Undetectable => false.
4348 __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
4349 __ and_(ebx, 1 << Map::kIsUndetectable);
4350 __ j(not_zero, &false_result);
4351
4352 // JavaScript object => true.
4353 __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
4354 __ j(above_equal, &true_result);
4355
4356 // String value => false iff empty.
4357 __ cmp(ecx, FIRST_NONSTRING_TYPE);
4358 __ j(above_equal, &not_string);
4359 __ and_(ecx, kStringSizeMask);
4360 __ cmp(ecx, kShortStringTag);
4361 __ j(not_equal, &true_result); // Empty string is always short.
4362 __ mov(edx, FieldOperand(eax, String::kLengthOffset));
4363 __ shr(edx, String::kShortLengthShift);
4364 __ j(zero, &false_result);
4365 __ jmp(&true_result);
4366
4367 __ bind(&not_string);
4368 // HeapNumber => false iff +0, -0, or NaN.
4369 __ cmp(edx, Factory::heap_number_map());
4370 __ j(not_equal, &true_result);
4371 __ fldz();
4372 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
4373 __ fucompp();
4374 __ push(eax);
4375 __ fnstsw_ax();
4376 __ sahf();
4377 __ pop(eax);
4378 __ j(zero, &false_result);
4379 // Fall through to |true_result|.
4380
4381 // Return 1/0 for true/false in eax.
4382 __ bind(&true_result);
4383 __ mov(eax, 1);
4384 __ ret(1 * kPointerSize);
4385 __ bind(&false_result);
4386 __ mov(eax, 0);
4387 __ ret(1 * kPointerSize);
4388 }
4389
4390
4391 void Ia32CodeGenerator::SetReferenceProperty(CodeGenerator* cgen,
4392 Reference* ref,
4393 Expression* key) {
4394 ASSERT(!ref->is_illegal());
4395 MacroAssembler* masm = cgen->masm();
4396
4397 if (ref->type() == Reference::NAMED) {
4398 // Compute the name of the property.
4399 Literal* literal = key->AsLiteral();
4400 Handle<String> name(String::cast(*literal->handle()));
4401
4402 // Call the appropriate IC code.
4403 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
4404 // TODO(1222589): Make the IC grab the values from the stack.
4405 __ pop(eax);
4406 // Setup the name register.
4407 __ Set(ecx, Immediate(name));
4408 __ call(ic, RelocInfo::CODE_TARGET);
4409 } else {
4410 // Access keyed property.
4411 ASSERT(ref->type() == Reference::KEYED);
4412
4413 // Call IC code.
4414 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
4415 // TODO(1222589): Make the IC grab the values from the stack.
4416 __ pop(eax);
4417 __ call(ic, RelocInfo::CODE_TARGET);
4418 }
4419 __ push(eax); // IC call leaves result in eax, push it out
4420 }
4421
4422
4423 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
4424 Label call_runtime;
4425 __ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y.
4426 __ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x.
4427
4428 // 1. Smi case.
4429 switch (op_) {
4430 case Token::ADD: {
4431 // eax: y.
4432 // edx: x.
4433 Label revert;
4434 __ mov(ecx, Operand(eax));
4435 __ or_(ecx, Operand(edx)); // ecx = x | y.
4436 __ add(eax, Operand(edx)); // Add y optimistically.
4437 // Go slow-path in case of overflow.
4438 __ j(overflow, &revert, not_taken);
4439 // Go slow-path in case of non-smi operands.
4440 ASSERT(kSmiTag == 0); // adjust code below
4441 __ test(ecx, Immediate(kSmiTagMask));
4442 __ j(not_zero, &revert, not_taken);
4443 __ ret(2 * kPointerSize); // Remove all operands.
4444
4445 // Revert optimistic add.
4446 __ bind(&revert);
4447 __ sub(eax, Operand(edx));
4448 break;
4449 }
4450 case Token::SUB: {
4451 // eax: y.
4452 // edx: x.
4453 Label revert;
4454 __ mov(ecx, Operand(edx));
4455 __ or_(ecx, Operand(eax)); // ecx = x | y.
4456 __ sub(edx, Operand(eax)); // Subtract y optimistically.
4457 // Go slow-path in case of overflow.
4458 __ j(overflow, &revert, not_taken);
4459 // Go slow-path in case of non-smi operands.
4460 ASSERT(kSmiTag == 0); // adjust code below
4461 __ test(ecx, Immediate(kSmiTagMask));
4462 __ j(not_zero, &revert, not_taken);
4463 __ mov(eax, Operand(edx));
4464 __ ret(2 * kPointerSize); // Remove all operands.
4465
4466 // Revert optimistic sub.
4467 __ bind(&revert);
4468 __ add(edx, Operand(eax));
4469 break;
4470 }
4471 case Token::MUL: {
4472 // eax: y
4473 // edx: x
4474 // a) both operands smi and result fits into a smi -> return.
4475 // b) at least one of operands non-smi -> non_smi_operands.
4476 // c) result does not fit in a smi -> non_smi_result.
4477 Label non_smi_operands, non_smi_result;
4478 // Tag check.
4479 __ mov(ecx, Operand(edx));
4480 __ or_(ecx, Operand(eax)); // ecx = x | y.
4481 ASSERT(kSmiTag == 0); // Adjust code below.
4482 __ test(ecx, Immediate(kSmiTagMask));
4483 // Jump if not both smi; check if float numbers.
4484 __ j(not_zero, &non_smi_operands, not_taken);
4485
4486 // Get copies of operands.
4487 __ mov(ebx, Operand(eax));
4488 __ mov(ecx, Operand(edx));
4489 // If the smi tag is 0 we can just leave the tag on one operand.
4490 ASSERT(kSmiTag == 0); // adjust code below
4491 // Remove tag from one of the operands (but keep sign).
4492 __ sar(ecx, kSmiTagSize);
4493 // Do multiplication.
4494 __ imul(eax, Operand(ecx)); // Multiplication of Smis; result in eax.
4495 // Go slow on overflows.
4496 __ j(overflow, &non_smi_result, not_taken);
4497 // ...but operands OK for float arithmetic.
4498
4499 // If the result is +0 we may need to check if the result should
4500 // really be -0. Welcome to the -0 fan club.
4501 __ NegativeZeroTest(eax, ebx, edx, ecx, &non_smi_result);
4502
4503 __ ret(2 * kPointerSize);
4504
4505 __ bind(&non_smi_result);
4506 // TODO(1243132): Do not check float operands here.
4507 __ bind(&non_smi_operands);
4508 __ mov(eax, Operand(esp, 1 * kPointerSize));
4509 __ mov(edx, Operand(esp, 2 * kPointerSize));
4510 break;
4511 }
4512 case Token::DIV: {
4513 // eax: y
4514 // edx: x
4515 Label non_smi_operands, non_smi_result, division_by_zero;
4516 __ mov(ebx, Operand(eax)); // Get y
4517 __ mov(eax, Operand(edx)); // Get x
4518
4519 __ cdq(); // Sign extend eax into edx:eax.
4520 // Tag check.
4521 __ mov(ecx, Operand(ebx));
4522 __ or_(ecx, Operand(eax)); // ecx = x | y.
4523 ASSERT(kSmiTag == 0); // Adjust code below.
4524 __ test(ecx, Immediate(kSmiTagMask));
4525 // Jump if not both smi; check if float numbers.
4526 __ j(not_zero, &non_smi_operands, not_taken);
4527 __ test(ebx, Operand(ebx)); // Check for 0 divisor.
4528 __ j(zero, &division_by_zero, not_taken);
4529
4530 __ idiv(ebx);
4531 // Check for the corner case of dividing the most negative smi by -1.
4532 // (We cannot use the overflow flag, since it is not set by idiv.)
4533 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4534 __ cmp(eax, 0x40000000);
4535 __ j(equal, &non_smi_result);
4536 // If the result is +0 we may need to check if the result should
4537 // really be -0. Welcome to the -0 fan club.
4538 __ NegativeZeroTest(eax, ecx, &non_smi_result); // Use ecx = x | y.
4539 __ test(edx, Operand(edx));
4540 // Use floats if there's a remainder.
4541 __ j(not_zero, &non_smi_result, not_taken);
4542 __ shl(eax, kSmiTagSize);
4543 __ ret(2 * kPointerSize); // Remove all operands.
4544
4545 __ bind(&division_by_zero);
4546 __ mov(eax, Operand(esp, 1 * kPointerSize));
4547 __ mov(edx, Operand(esp, 2 * kPointerSize));
4548 __ jmp(&call_runtime); // Division by zero must go through runtime.
4549
4550 __ bind(&non_smi_result);
4551 // TODO(1243132): Do not check float operands here.
4552 __ bind(&non_smi_operands);
4553 __ mov(eax, Operand(esp, 1 * kPointerSize));
4554 __ mov(edx, Operand(esp, 2 * kPointerSize));
4555 break;
4556 }
4557 case Token::MOD: {
4558 Label slow;
4559 __ mov(ebx, Operand(eax)); // get y
4560 __ mov(eax, Operand(edx)); // get x
4561 __ cdq(); // sign extend eax into edx:eax
4562 // tag check
4563 __ mov(ecx, Operand(ebx));
4564 __ or_(ecx, Operand(eax)); // ecx = x | y;
4565 ASSERT(kSmiTag == 0); // adjust code below
4566 __ test(ecx, Immediate(kSmiTagMask));
4567 __ j(not_zero, &slow, not_taken);
4568 __ test(ebx, Operand(ebx)); // test for y == 0
4569 __ j(zero, &slow);
4570
4571 // Fast case: Do integer division and use remainder.
4572 __ idiv(ebx);
4573 __ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y
4574 __ mov(eax, Operand(edx));
4575 __ ret(2 * kPointerSize);
4576
4577 // Slow case: Call runtime operator implementation.
4578 __ bind(&slow);
4579 __ mov(eax, Operand(esp, 1 * kPointerSize));
4580 __ mov(edx, Operand(esp, 2 * kPointerSize));
4581 // Fall through to |call_runtime|.
4582 break;
4583 }
4584 case Token::BIT_OR:
4585 case Token::BIT_AND:
4586 case Token::BIT_XOR:
4587 case Token::SAR:
4588 case Token::SHL:
4589 case Token::SHR: {
4590 // Smi-case for bitops should already have been inlined.
4591 break;
4592 }
4593 default: {
4594 UNREACHABLE();
4595 }
4596 }
4597
4598 // 2. Floating point case.
4599 switch (op_) {
4600 case Token::ADD:
4601 case Token::SUB:
4602 case Token::MUL:
4603 case Token::DIV: {
4604 // eax: y
4605 // edx: x
4606 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
4607 // Fast-case: Both operands are numbers.
4608 // Allocate a heap number, if needed.
4609 Label skip_allocation;
4610 switch (mode_) {
4611 case OVERWRITE_LEFT:
4612 __ mov(eax, Operand(edx));
4613 // Fall through!
4614 case OVERWRITE_RIGHT:
4615 // If the argument in eax is already an object, we skip the
4616 // allocation of a heap number.
4617 __ test(eax, Immediate(kSmiTagMask));
4618 __ j(not_zero, &skip_allocation, not_taken);
4619 // Fall through!
4620 case NO_OVERWRITE:
4621 FloatingPointHelper::AllocateHeapNumber(masm,
4622 &call_runtime,
4623 ecx,
4624 edx);
4625 __ bind(&skip_allocation);
4626 break;
4627 default: UNREACHABLE();
4628 }
4629 FloatingPointHelper::LoadFloatOperands(masm, ecx);
4630
4631 switch (op_) {
4632 case Token::ADD: __ faddp(1); break;
4633 case Token::SUB: __ fsubp(1); break;
4634 case Token::MUL: __ fmulp(1); break;
4635 case Token::DIV: __ fdivp(1); break;
4636 default: UNREACHABLE();
4637 }
4638 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
4639 __ ret(2 * kPointerSize);
4640 }
4641 case Token::MOD: {
4642 // For MOD we go directly to runtime in the non-smi case.
4643 break;
4644 }
4645 case Token::BIT_OR:
4646 case Token::BIT_AND:
4647 case Token::BIT_XOR:
4648 case Token::SAR:
4649 case Token::SHL:
4650 case Token::SHR: {
4651 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
4652 FloatingPointHelper::LoadFloatOperands(masm, ecx);
4653
4654 Label non_int32_operands, non_smi_result, skip_allocation;
4655 // Reserve space for converted numbers.
4656 __ sub(Operand(esp), Immediate(2 * kPointerSize));
4657
4658 // Check if right operand is int32.
4659 __ fist_s(Operand(esp, 1 * kPointerSize));
4660 __ fild_s(Operand(esp, 1 * kPointerSize));
4661 __ fucompp();
4662 __ fnstsw_ax();
4663 __ sahf();
4664 __ j(not_zero, &non_int32_operands);
4665 __ j(parity_even, &non_int32_operands);
4666
4667 // Check if left operand is int32.
4668 __ fist_s(Operand(esp, 0 * kPointerSize));
4669 __ fild_s(Operand(esp, 0 * kPointerSize));
4670 __ fucompp();
4671 __ fnstsw_ax();
4672 __ sahf();
4673 __ j(not_zero, &non_int32_operands);
4674 __ j(parity_even, &non_int32_operands);
4675
4676 // Get int32 operands and perform bitop.
4677 __ pop(eax);
4678 __ pop(ecx);
4679 switch (op_) {
4680 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
4681 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
4682 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
4683 case Token::SAR: __ sar(eax); break;
4684 case Token::SHL: __ shl(eax); break;
4685 case Token::SHR: __ shr(eax); break;
4686 default: UNREACHABLE();
4687 }
4688
4689 // Check if result is non-negative and fits in a smi.
4690 __ test(eax, Immediate(0xc0000000));
4691 __ j(not_zero, &non_smi_result);
4692
4693 // Tag smi result and return.
4694 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
4695 __ lea(eax, Operand(eax, times_2, kSmiTag));
4696 __ ret(2 * kPointerSize);
4697
4698 // All ops except SHR return a signed int32 that we load in a HeapNumber.
4699 if (op_ != Token::SHR) {
4700 __ bind(&non_smi_result);
4701 // Allocate a heap number if needed.
4702 __ mov(ebx, Operand(eax)); // ebx: result
4703 switch (mode_) {
4704 case OVERWRITE_LEFT:
4705 case OVERWRITE_RIGHT:
4706 // If the operand was an object, we skip the
4707 // allocation of a heap number.
4708 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
4709 1 * kPointerSize : 2 * kPointerSize));
4710 __ test(eax, Immediate(kSmiTagMask));
4711 __ j(not_zero, &skip_allocation, not_taken);
4712 // Fall through!
4713 case NO_OVERWRITE:
4714 FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
4715 ecx, edx);
4716 __ bind(&skip_allocation);
4717 break;
4718 default: UNREACHABLE();
4719 }
4720 // Store the result in the HeapNumber and return.
4721 __ mov(Operand(esp, 1 * kPointerSize), ebx);
4722 __ fild_s(Operand(esp, 1 * kPointerSize));
4723 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
4724 __ ret(2 * kPointerSize);
4725 }
4726 __ bind(&non_int32_operands);
4727 // Restore stacks and operands before calling runtime.
4728 __ ffree(0);
4729 __ add(Operand(esp), Immediate(2 * kPointerSize));
4730
4731 // SHR should return uint32 - go to runtime for non-smi/negative result.
4732 if (op_ == Token::SHR) __ bind(&non_smi_result);
4733 __ mov(eax, Operand(esp, 1 * kPointerSize));
4734 __ mov(edx, Operand(esp, 2 * kPointerSize));
4735 break;
4736 }
4737 default: UNREACHABLE(); break;
4738 }
4739
4740 // 3. If all else fails, use the runtime system to get the correct result.
4741 __ bind(&call_runtime);
4742 switch (op_) {
4743 case Token::ADD:
4744 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
4745 break;
4746 case Token::SUB:
4747 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
4748 break;
4749 case Token::MUL:
4750 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
4751 break;
4752 case Token::DIV:
4753 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
4754 break;
4755 case Token::MOD:
4756 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
4757 break;
4758 case Token::BIT_OR:
4759 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
4760 break;
4761 case Token::BIT_AND:
4762 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
4763 break;
4764 case Token::BIT_XOR:
4765 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
4766 break;
4767 case Token::SAR:
4768 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
4769 break;
4770 case Token::SHL:
4771 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
4772 break;
4773 case Token::SHR:
4774 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
4775 break;
4776 default:
4777 UNREACHABLE();
4778 }
4779 }
4780
4781
4782 void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
4783 Label* need_gc,
4784 Register scratch1,
4785 Register scratch2) {
4786 ExternalReference allocation_top =
4787 ExternalReference::new_space_allocation_top_address();
4788 ExternalReference allocation_limit =
4789 ExternalReference::new_space_allocation_limit_address();
4790 __ mov(Operand(scratch1), Immediate(allocation_top));
4791 __ mov(eax, Operand(scratch1, 0));
4792 __ lea(scratch2, Operand(eax, HeapNumber::kSize)); // scratch2: new top
4793 __ cmp(scratch2, Operand::StaticVariable(allocation_limit));
4794 __ j(above, need_gc, not_taken);
4795
4796 __ mov(Operand(scratch1, 0), scratch2); // store new top
4797 __ mov(Operand(eax, HeapObject::kMapOffset),
4798 Immediate(Factory::heap_number_map()));
4799 // Tag old top and use as result.
4800 __ add(Operand(eax), Immediate(kHeapObjectTag));
4801 }
4802
4803
4804 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
4805 Register scratch) {
4806 Label load_smi_1, load_smi_2, done_load_1, done;
4807 __ mov(scratch, Operand(esp, 2 * kPointerSize));
4808 __ test(scratch, Immediate(kSmiTagMask));
4809 __ j(zero, &load_smi_1, not_taken);
4810 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
4811 __ bind(&done_load_1);
4812
4813 __ mov(scratch, Operand(esp, 1 * kPointerSize));
4814 __ test(scratch, Immediate(kSmiTagMask));
4815 __ j(zero, &load_smi_2, not_taken);
4816 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
4817 __ jmp(&done);
4818
4819 __ bind(&load_smi_1);
4820 __ sar(scratch, kSmiTagSize);
4821 __ push(scratch);
4822 __ fild_s(Operand(esp, 0));
4823 __ pop(scratch);
4824 __ jmp(&done_load_1);
4825
4826 __ bind(&load_smi_2);
4827 __ sar(scratch, kSmiTagSize);
4828 __ push(scratch);
4829 __ fild_s(Operand(esp, 0));
4830 __ pop(scratch);
4831
4832 __ bind(&done);
4833 }
4834
4835
4836 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
4837 Label* non_float,
4838 Register scratch) {
4839 Label test_other, done;
4840 // Test if both operands are floats or smi -> scratch=k_is_float;
4841 // Otherwise scratch = k_not_float.
4842 __ test(edx, Immediate(kSmiTagMask));
4843 __ j(zero, &test_other, not_taken); // argument in edx is OK
4844 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
4845 __ cmp(scratch, Factory::heap_number_map());
4846 __ j(not_equal, non_float); // argument in edx is not a number -> NaN
4847
4848 __ bind(&test_other);
4849 __ test(eax, Immediate(kSmiTagMask));
4850 __ j(zero, &done); // argument in eax is OK
4851 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
4852 __ cmp(scratch, Factory::heap_number_map());
4853 __ j(not_equal, non_float); // argument in eax is not a number -> NaN
4854
4855 // Fall-through: Both operands are numbers.
4856 __ bind(&done);
4857 }
4858
4859
4860 void UnarySubStub::Generate(MacroAssembler* masm) {
4861 Label undo;
4862 Label slow;
4863 Label done;
4864 Label try_float;
4865
4866 // Check whether the value is a smi.
4867 __ test(eax, Immediate(kSmiTagMask));
4868 __ j(not_zero, &try_float, not_taken);
4869
4870 // Enter runtime system if the value of the expression is zero
4871 // to make sure that we switch between 0 and -0.
4872 __ test(eax, Operand(eax));
4873 __ j(zero, &slow, not_taken);
4874
4875 // The value of the expression is a smi that is not zero. Try
4876 // optimistic subtraction '0 - value'.
4877 __ mov(edx, Operand(eax));
4878 __ Set(eax, Immediate(0));
4879 __ sub(eax, Operand(edx));
4880 __ j(overflow, &undo, not_taken);
4881
4882 // If result is a smi we are done.
4883 __ test(eax, Immediate(kSmiTagMask));
4884 __ j(zero, &done, taken);
4885
4886 // Restore eax and enter runtime system.
4887 __ bind(&undo);
4888 __ mov(eax, Operand(edx));
4889
4890 // Enter runtime system.
4891 __ bind(&slow);
4892 __ pop(ecx); // pop return address
4893 __ push(eax);
4894 __ push(ecx); // push return address
4895 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
4896
4897 // Try floating point case.
4898 __ bind(&try_float);
4899 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
4900 __ cmp(edx, Factory::heap_number_map());
4901 __ j(not_equal, &slow);
4902 __ mov(edx, Operand(eax));
4903 // edx: operand
4904 FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx);
4905 // eax: allocated 'empty' number
4906 __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
4907 __ fchs();
4908 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
4909
4910 __ bind(&done);
4911
4912 __ StubReturn(1);
4913 }
4914
4915
4916 void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
4917 // Check that the key is a smi for non-length access.
4918 Label slow;
4919 if (!is_length_) {
4920 __ mov(ebx, Operand(esp, 1 * kPointerSize)); // skip return address
4921 __ test(ebx, Immediate(kSmiTagMask));
4922 __ j(not_zero, &slow, not_taken);
4923 }
4924
4925 // Check if the calling frame is an arguments adaptor frame.
4926 Label adaptor;
4927 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
4928 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
4929 __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
4930 __ j(equal, &adaptor);
4931
4932 // The displacement is used for skipping the return address on the
4933 // stack. It is the offset of the last parameter (if any) relative
4934 // to the frame pointer.
4935 static const int kDisplacement = 1 * kPointerSize;
4936 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
4937
4938 if (is_length_) {
4939 // Do nothing. The length is already in register eax.
4940 } else {
4941 // Check index against formal parameters count limit passed in
4942 // through register eax. Use unsigned comparison to get negative
4943 // check for free.
4944 __ cmp(ebx, Operand(eax));
4945 __ j(above_equal, &slow, not_taken);
4946
4947 // Read the argument from the stack.
4948 __ lea(edx, Operand(ebp, eax, times_2, 0));
4949 __ neg(ebx);
4950 __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
4951 }
4952
4953 // Return the length or the argument.
4954 __ ret(0);
4955
4956 // Arguments adaptor case: Find the length or the actual argument in
4957 // the calling frame.
4958 __ bind(&adaptor);
4959 if (is_length_) {
4960 // Read the arguments length from the adaptor frame.
4961 __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
4962 } else {
4963 // Check index against actual arguments limit found in the
4964 // arguments adaptor frame. Use unsigned comparison to get
4965 // negative check for free.
4966 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
4967 __ cmp(ebx, Operand(ecx));
4968 __ j(above_equal, &slow, not_taken);
4969
4970 // Read the argument from the stack.
4971 __ lea(edx, Operand(edx, ecx, times_2, 0));
4972 __ neg(ebx);
4973 __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
4974 }
4975
4976 // Return the length or the argument.
4977 __ ret(0);
4978
4979 // Slow-case: Handle non-smi or out-of-bounds access to arguments
4980 // by calling the runtime system.
4981 if (!is_length_) {
4982 __ bind(&slow);
4983 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
4984 }
4985 }
4986
4987
4988 void CompareStub::Generate(MacroAssembler* masm) {
4989 Label call_builtin, done;
4990 // Save the return address (and get it off the stack).
4991 __ pop(ecx);
4992
4993 // Push arguments.
4994 __ push(eax);
4995 __ push(edx);
4996 __ push(ecx);
4997
4998 // Inlined floating point compare.
4999 // Call builtin if operands are not floating point or smi.
5000 FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx);
5001 FloatingPointHelper::LoadFloatOperands(masm, ecx);
5002 __ FCmp();
5003
5004 // Jump to builtin for NaN.
5005 __ j(parity_even, &call_builtin, not_taken);
5006
5007 // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
5008 Label below_lbl, above_lbl;
5009 // use edx, eax to convert unsigned to signed comparison
5010 __ j(below, &below_lbl, not_taken);
5011 __ j(above, &above_lbl, not_taken);
5012
5013 __ xor_(eax, Operand(eax)); // equal
5014 __ ret(2 * kPointerSize);
5015
5016 __ bind(&below_lbl);
5017 __ mov(eax, -1);
5018 __ ret(2 * kPointerSize);
5019
5020 __ bind(&above_lbl);
5021 __ mov(eax, 1);
5022 __ ret(2 * kPointerSize); // eax, edx were pushed
5023
5024 __ bind(&call_builtin);
5025 // must swap argument order
5026 __ pop(ecx);
5027 __ pop(edx);
5028 __ pop(eax);
5029 __ push(edx);
5030 __ push(eax);
5031
5032 // Figure out which native to call and setup the arguments.
5033 Builtins::JavaScript builtin;
5034 if (cc_ == equal) {
5035 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
5036 } else {
5037 builtin = Builtins::COMPARE;
5038 int ncr; // NaN compare result
5039 if (cc_ == less || cc_ == less_equal) {
5040 ncr = GREATER;
5041 } else {
5042 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
5043 ncr = LESS;
5044 }
5045 __ push(Immediate(Smi::FromInt(ncr)));
5046 }
5047
5048 // Restore return address on the stack.
5049 __ push(ecx);
5050
5051 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
5052 // tagged as a small integer.
5053 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
5054 }
5055
5056
5057 void StackCheckStub::Generate(MacroAssembler* masm) {
5058 // Because builtins always remove the receiver from the stack, we
5059 // have to fake one to avoid underflowing the stack. The receiver
5060 // must be inserted below the return address on the stack so we
5061 // temporarily store that in a register.
5062 __ pop(eax);
5063 __ push(Immediate(Smi::FromInt(0)));
5064 __ push(eax);
5065
5066 // Do tail-call to runtime routine.
5067 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
5068 }
5069
5070
5071 void CallFunctionStub::Generate(MacroAssembler* masm) {
5072 Label slow;
5073
5074 // Get the function to call from the stack.
5075 // +2 ~ receiver, return address
5076 __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
5077
5078 // Check that the function really is a JavaScript function.
5079 __ test(edi, Immediate(kSmiTagMask));
5080 __ j(zero, &slow, not_taken);
5081 // Get the map.
5082 __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
5083 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5084 __ cmp(ecx, JS_FUNCTION_TYPE);
5085 __ j(not_equal, &slow, not_taken);
5086
5087 // Fast-case: Just invoke the function.
5088 ParameterCount actual(argc_);
5089 __ InvokeFunction(edi, actual, JUMP_FUNCTION);
5090
5091 // Slow-case: Non-function called.
5092 __ bind(&slow);
5093 __ Set(eax, Immediate(argc_));
5094 __ Set(ebx, Immediate(0));
5095 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
5096 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
5097 __ jmp(adaptor, RelocInfo::CODE_TARGET);
5098 }
5099
5100
5101 void RevertToNumberStub::Generate(MacroAssembler* masm) {
5102 // Revert optimistic increment/decrement.
5103 if (is_increment_) {
5104 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
5105 } else {
5106 __ add(Operand(eax), Immediate(Smi::FromInt(1)));
5107 }
5108
5109 __ pop(ecx);
5110 __ push(eax);
5111 __ push(ecx);
5112 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
5113 // Code never returns due to JUMP_FUNCTION.
5114 }
5115
5116
5117 void CounterOpStub::Generate(MacroAssembler* masm) {
5118 // Store to the result on the stack (skip return address) before
5119 // performing the count operation.
5120 if (is_postfix_) {
5121 __ mov(Operand(esp, result_offset_ + kPointerSize), eax);
5122 }
5123
5124 // Revert optimistic increment/decrement but only for prefix
5125 // counts. For postfix counts it has already been reverted before
5126 // the conversion to numbers.
5127 if (!is_postfix_) {
5128 if (is_increment_) {
5129 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
5130 } else {
5131 __ add(Operand(eax), Immediate(Smi::FromInt(1)));
5132 }
5133 }
5134
5135 // Compute the new value by calling the right JavaScript native.
5136 __ pop(ecx);
5137 __ push(eax);
5138 __ push(ecx);
5139 Builtins::JavaScript builtin = is_increment_ ? Builtins::INC : Builtins::DEC;
5140 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
5141 // Code never returns due to JUMP_FUNCTION.
5142 }
5143
5144
5181 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { 5145 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
5182 ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code 5146 ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
5183 ExternalReference handler_address(Top::k_handler_address); 5147 ExternalReference handler_address(Top::k_handler_address);
5184 __ mov(edx, Operand::StaticVariable(handler_address)); 5148 __ mov(edx, Operand::StaticVariable(handler_address));
5185 __ mov(ecx, Operand(edx, -1 * kPointerSize)); // get next in chain 5149 __ mov(ecx, Operand(edx, -1 * kPointerSize)); // get next in chain
5186 __ mov(Operand::StaticVariable(handler_address), ecx); 5150 __ mov(Operand::StaticVariable(handler_address), ecx);
5187 __ mov(esp, Operand(edx)); 5151 __ mov(esp, Operand(edx));
5188 __ pop(edi); 5152 __ pop(edi);
5189 __ pop(ebp); 5153 __ pop(ebp);
5190 __ pop(edx); // remove code pointer 5154 __ pop(edx); // remove code pointer
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after
5458 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers 5422 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
5459 5423
5460 // Restore frame pointer and return. 5424 // Restore frame pointer and return.
5461 __ pop(ebp); 5425 __ pop(ebp);
5462 __ ret(0); 5426 __ ret(0);
5463 } 5427 }
5464 5428
5465 5429
5466 #undef __ 5430 #undef __
5467 5431
5468
5469 // ----------------------------------------------------------------------------- 5432 // -----------------------------------------------------------------------------
5470 // CodeGenerator interfaces 5433 // CodeGenerator interfaces
5471 5434
5472 // MakeCode() is just a wrapper for CodeGenerator::MakeCode() 5435 // MakeCode() is just a wrapper for CodeGenerator::MakeCode()
5473 // so we don't have to expose the entire CodeGenerator class in 5436 // so we don't have to expose the entire CodeGenerator class in
5474 // the .h file. 5437 // the .h file.
5475 Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun, 5438 Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
5476 Handle<Script> script, 5439 Handle<Script> script,
5477 bool is_eval) { 5440 bool is_eval) {
5478 Handle<Code> code = Ia32CodeGenerator::MakeCode(fun, script, is_eval); 5441 Handle<Code> code = Ia32CodeGenerator::MakeCode(fun, script, is_eval);
5479 if (!code.is_null()) { 5442 if (!code.is_null()) {
5480 Counters::total_compiled_code_size.Increment(code->instruction_size()); 5443 Counters::total_compiled_code_size.Increment(code->instruction_size());
5481 } 5444 }
5482 return code; 5445 return code;
5483 } 5446 }
5484 5447
5485 5448
5486 } } // namespace v8::internal 5449 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/codegen-arm.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698