Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(581)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 196893003: Introduce addp, idivp, imulp and subp for x64 port (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 585 matching lines...) Expand 10 before | Expand all | Expand 10 after
596 if (stash_exponent_copy) { 596 if (stash_exponent_copy) {
597 __ cmpl(MemOperand(rsp, 0), Immediate(0)); 597 __ cmpl(MemOperand(rsp, 0), Immediate(0));
598 } else { 598 } else {
599 __ cmpl(exponent_operand, Immediate(0)); 599 __ cmpl(exponent_operand, Immediate(0));
600 } 600 }
601 __ cmovl(greater, result_reg, scratch1); 601 __ cmovl(greater, result_reg, scratch1);
602 602
603 // Restore registers 603 // Restore registers
604 __ bind(&done); 604 __ bind(&done);
605 if (stash_exponent_copy) { 605 if (stash_exponent_copy) {
606 __ addq(rsp, Immediate(kDoubleSize)); 606 __ addp(rsp, Immediate(kDoubleSize));
607 } 607 }
608 if (!final_result_reg.is(result_reg)) { 608 if (!final_result_reg.is(result_reg)) {
609 ASSERT(final_result_reg.is(rcx)); 609 ASSERT(final_result_reg.is(rcx));
610 __ movl(final_result_reg, result_reg); 610 __ movl(final_result_reg, result_reg);
611 } 611 }
612 __ popq(save_reg); 612 __ popq(save_reg);
613 __ popq(scratch1); 613 __ popq(scratch1);
614 __ ret(0); 614 __ ret(0);
615 } 615 }
616 616
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
780 __ sqrtsd(double_exponent, double_exponent); 780 __ sqrtsd(double_exponent, double_exponent);
781 __ divsd(double_result, double_exponent); 781 __ divsd(double_result, double_exponent);
782 __ jmp(&done); 782 __ jmp(&done);
783 } 783 }
784 784
785 // Using FPU instructions to calculate power. 785 // Using FPU instructions to calculate power.
786 Label fast_power_failed; 786 Label fast_power_failed;
787 __ bind(&fast_power); 787 __ bind(&fast_power);
788 __ fnclex(); // Clear flags to catch exceptions later. 788 __ fnclex(); // Clear flags to catch exceptions later.
789 // Transfer (B)ase and (E)xponent onto the FPU register stack. 789 // Transfer (B)ase and (E)xponent onto the FPU register stack.
790 __ subq(rsp, Immediate(kDoubleSize)); 790 __ subp(rsp, Immediate(kDoubleSize));
791 __ movsd(Operand(rsp, 0), double_exponent); 791 __ movsd(Operand(rsp, 0), double_exponent);
792 __ fld_d(Operand(rsp, 0)); // E 792 __ fld_d(Operand(rsp, 0)); // E
793 __ movsd(Operand(rsp, 0), double_base); 793 __ movsd(Operand(rsp, 0), double_base);
794 __ fld_d(Operand(rsp, 0)); // B, E 794 __ fld_d(Operand(rsp, 0)); // B, E
795 795
796 // Exponent is in st(1) and base is in st(0) 796 // Exponent is in st(1) and base is in st(0)
797 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B) 797 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
798 // FYL2X calculates st(1) * log2(st(0)) 798 // FYL2X calculates st(1) * log2(st(0))
799 __ fyl2x(); // X 799 __ fyl2x(); // X
800 __ fld(0); // X, X 800 __ fld(0); // X, X
801 __ frndint(); // rnd(X), X 801 __ frndint(); // rnd(X), X
802 __ fsub(1); // rnd(X), X-rnd(X) 802 __ fsub(1); // rnd(X), X-rnd(X)
803 __ fxch(1); // X - rnd(X), rnd(X) 803 __ fxch(1); // X - rnd(X), rnd(X)
804 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1 804 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
805 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X) 805 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
806 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X) 806 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
807 __ faddp(1); // 2^(X-rnd(X)), rnd(X) 807 __ faddp(1); // 2^(X-rnd(X)), rnd(X)
808 // FSCALE calculates st(0) * 2^st(1) 808 // FSCALE calculates st(0) * 2^st(1)
809 __ fscale(); // 2^X, rnd(X) 809 __ fscale(); // 2^X, rnd(X)
810 __ fstp(1); 810 __ fstp(1);
811 // Bail out to runtime in case of exceptions in the status word. 811 // Bail out to runtime in case of exceptions in the status word.
812 __ fnstsw_ax(); 812 __ fnstsw_ax();
813 __ testb(rax, Immediate(0x5F)); // Check for all but precision exception. 813 __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
814 __ j(not_zero, &fast_power_failed, Label::kNear); 814 __ j(not_zero, &fast_power_failed, Label::kNear);
815 __ fstp_d(Operand(rsp, 0)); 815 __ fstp_d(Operand(rsp, 0));
816 __ movsd(double_result, Operand(rsp, 0)); 816 __ movsd(double_result, Operand(rsp, 0));
817 __ addq(rsp, Immediate(kDoubleSize)); 817 __ addp(rsp, Immediate(kDoubleSize));
818 __ jmp(&done); 818 __ jmp(&done);
819 819
820 __ bind(&fast_power_failed); 820 __ bind(&fast_power_failed);
821 __ fninit(); 821 __ fninit();
822 __ addq(rsp, Immediate(kDoubleSize)); 822 __ addp(rsp, Immediate(kDoubleSize));
823 __ jmp(&call_runtime); 823 __ jmp(&call_runtime);
824 } 824 }
825 825
826 // Calculate power with integer exponent. 826 // Calculate power with integer exponent.
827 __ bind(&int_exponent); 827 __ bind(&int_exponent);
828 const XMMRegister double_scratch2 = double_exponent; 828 const XMMRegister double_scratch2 = double_exponent;
829 // Back up exponent as we need to check if exponent is negative later. 829 // Back up exponent as we need to check if exponent is negative later.
830 __ movp(scratch, exponent); // Back up exponent. 830 __ movp(scratch, exponent); // Back up exponent.
831 __ movsd(double_scratch, double_base); // Back up base. 831 __ movsd(double_scratch, double_base); // Back up base.
832 __ movsd(double_scratch2, double_result); // Load double_exponent with 1. 832 __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
1043 __ xor_(r8, r8); 1043 __ xor_(r8, r8);
1044 __ testq(rbx, rbx); 1044 __ testq(rbx, rbx);
1045 __ j(zero, &no_parameter_map, Label::kNear); 1045 __ j(zero, &no_parameter_map, Label::kNear);
1046 __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize)); 1046 __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
1047 __ bind(&no_parameter_map); 1047 __ bind(&no_parameter_map);
1048 1048
1049 // 2. Backing store. 1049 // 2. Backing store.
1050 __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize)); 1050 __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
1051 1051
1052 // 3. Arguments object. 1052 // 3. Arguments object.
1053 __ addq(r8, Immediate(Heap::kSloppyArgumentsObjectSize)); 1053 __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
1054 1054
1055 // Do the allocation of all three objects in one go. 1055 // Do the allocation of all three objects in one go.
1056 __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT); 1056 __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
1057 1057
1058 // rax = address of new object(s) (tagged) 1058 // rax = address of new object(s) (tagged)
1059 // rcx = argument count (untagged) 1059 // rcx = argument count (untagged)
1060 // Get the arguments boilerplate from the current native context into rdi. 1060 // Get the arguments boilerplate from the current native context into rdi.
1061 Label has_mapped_parameters, copy; 1061 Label has_mapped_parameters, copy;
1062 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 1062 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1063 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset)); 1063 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
1129 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 1129 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1130 // The mapped parameter thus need to get indices 1130 // The mapped parameter thus need to get indices
1131 // MIN_CONTEXT_SLOTS+parameter_count-1 .. 1131 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1132 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count 1132 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1133 // We loop from right to left. 1133 // We loop from right to left.
1134 Label parameters_loop, parameters_test; 1134 Label parameters_loop, parameters_test;
1135 1135
1136 // Load tagged parameter count into r9. 1136 // Load tagged parameter count into r9.
1137 __ Integer32ToSmi(r9, rbx); 1137 __ Integer32ToSmi(r9, rbx);
1138 __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS)); 1138 __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
1139 __ addq(r8, args.GetArgumentOperand(2)); 1139 __ addp(r8, args.GetArgumentOperand(2));
1140 __ subq(r8, r9); 1140 __ subp(r8, r9);
1141 __ Move(r11, factory->the_hole_value()); 1141 __ Move(r11, factory->the_hole_value());
1142 __ movp(rdx, rdi); 1142 __ movp(rdx, rdi);
1143 __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize)); 1143 __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
1144 // r9 = loop variable (tagged) 1144 // r9 = loop variable (tagged)
1145 // r8 = mapping index (tagged) 1145 // r8 = mapping index (tagged)
1146 // r11 = the hole value 1146 // r11 = the hole value
1147 // rdx = address of parameter map (tagged) 1147 // rdx = address of parameter map (tagged)
1148 // rdi = address of backing store (tagged) 1148 // rdi = address of backing store (tagged)
1149 __ jmp(&parameters_test, Label::kNear); 1149 __ jmp(&parameters_test, Label::kNear);
1150 1150
(...skipping 21 matching lines...) Expand all
1172 __ Move(FieldOperand(rdi, FixedArray::kMapOffset), 1172 __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
1173 factory->fixed_array_map()); 1173 factory->fixed_array_map());
1174 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); 1174 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
1175 1175
1176 Label arguments_loop, arguments_test; 1176 Label arguments_loop, arguments_test;
1177 __ movp(r8, rbx); 1177 __ movp(r8, rbx);
1178 __ movp(rdx, args.GetArgumentOperand(1)); 1178 __ movp(rdx, args.GetArgumentOperand(1));
1179 // Untag rcx for the loop below. 1179 // Untag rcx for the loop below.
1180 __ SmiToInteger64(rcx, rcx); 1180 __ SmiToInteger64(rcx, rcx);
1181 __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0)); 1181 __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
1182 __ subq(rdx, kScratchRegister); 1182 __ subp(rdx, kScratchRegister);
1183 __ jmp(&arguments_test, Label::kNear); 1183 __ jmp(&arguments_test, Label::kNear);
1184 1184
1185 __ bind(&arguments_loop); 1185 __ bind(&arguments_loop);
1186 __ subq(rdx, Immediate(kPointerSize)); 1186 __ subp(rdx, Immediate(kPointerSize));
1187 __ movp(r9, Operand(rdx, 0)); 1187 __ movp(r9, Operand(rdx, 0));
1188 __ movp(FieldOperand(rdi, r8, 1188 __ movp(FieldOperand(rdi, r8,
1189 times_pointer_size, 1189 times_pointer_size,
1190 FixedArray::kHeaderSize), 1190 FixedArray::kHeaderSize),
1191 r9); 1191 r9);
1192 __ addq(r8, Immediate(1)); 1192 __ addp(r8, Immediate(1));
1193 1193
1194 __ bind(&arguments_test); 1194 __ bind(&arguments_test);
1195 __ cmpq(r8, rcx); 1195 __ cmpq(r8, rcx);
1196 __ j(less, &arguments_loop, Label::kNear); 1196 __ j(less, &arguments_loop, Label::kNear);
1197 1197
1198 // Return and remove the on-stack parameters. 1198 // Return and remove the on-stack parameters.
1199 __ ret(3 * kPointerSize); 1199 __ ret(3 * kPointerSize);
1200 1200
1201 // Do the runtime call to allocate the arguments object. 1201 // Do the runtime call to allocate the arguments object.
1202 // rcx = argument count (untagged) 1202 // rcx = argument count (untagged)
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1263 __ movp(args.GetArgumentOperand(1), rdx); 1263 __ movp(args.GetArgumentOperand(1), rdx);
1264 1264
1265 // Try the new space allocation. Start out with computing the size of 1265 // Try the new space allocation. Start out with computing the size of
1266 // the arguments object and the elements array. 1266 // the arguments object and the elements array.
1267 Label add_arguments_object; 1267 Label add_arguments_object;
1268 __ bind(&try_allocate); 1268 __ bind(&try_allocate);
1269 __ testq(rcx, rcx); 1269 __ testq(rcx, rcx);
1270 __ j(zero, &add_arguments_object, Label::kNear); 1270 __ j(zero, &add_arguments_object, Label::kNear);
1271 __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); 1271 __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
1272 __ bind(&add_arguments_object); 1272 __ bind(&add_arguments_object);
1273 __ addq(rcx, Immediate(Heap::kStrictArgumentsObjectSize)); 1273 __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize));
1274 1274
1275 // Do the allocation of both objects in one go. 1275 // Do the allocation of both objects in one go.
1276 __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); 1276 __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
1277 1277
1278 // Get the arguments boilerplate from the current native context. 1278 // Get the arguments boilerplate from the current native context.
1279 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 1279 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1280 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset)); 1280 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
1281 const int offset = 1281 const int offset =
1282 Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX); 1282 Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
1283 __ movp(rdi, Operand(rdi, offset)); 1283 __ movp(rdi, Operand(rdi, offset));
(...skipping 29 matching lines...) Expand all
1313 1313
1314 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); 1314 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
1315 // Untag the length for the loop below. 1315 // Untag the length for the loop below.
1316 __ SmiToInteger64(rcx, rcx); 1316 __ SmiToInteger64(rcx, rcx);
1317 1317
1318 // Copy the fixed array slots. 1318 // Copy the fixed array slots.
1319 Label loop; 1319 Label loop;
1320 __ bind(&loop); 1320 __ bind(&loop);
1321 __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver. 1321 __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
1322 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx); 1322 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
1323 __ addq(rdi, Immediate(kPointerSize)); 1323 __ addp(rdi, Immediate(kPointerSize));
1324 __ subq(rdx, Immediate(kPointerSize)); 1324 __ subp(rdx, Immediate(kPointerSize));
1325 __ decq(rcx); 1325 __ decq(rcx);
1326 __ j(not_zero, &loop); 1326 __ j(not_zero, &loop);
1327 1327
1328 // Return and remove the on-stack parameters. 1328 // Return and remove the on-stack parameters.
1329 __ bind(&done); 1329 __ bind(&done);
1330 __ ret(3 * kPointerSize); 1330 __ ret(3 * kPointerSize);
1331 1331
1332 // Do the runtime call to allocate the arguments object. 1332 // Do the runtime call to allocate the arguments object.
1333 __ bind(&runtime); 1333 __ bind(&runtime);
1334 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); 1334 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after
1534 kScratchRegister); 1534 kScratchRegister);
1535 1535
1536 // Argument 8: Indicate that this is a direct call from JavaScript. 1536 // Argument 8: Indicate that this is a direct call from JavaScript.
1537 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize), 1537 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize),
1538 Immediate(1)); 1538 Immediate(1));
1539 1539
1540 // Argument 7: Start (high end) of backtracking stack memory area. 1540 // Argument 7: Start (high end) of backtracking stack memory area.
1541 __ Move(kScratchRegister, address_of_regexp_stack_memory_address); 1541 __ Move(kScratchRegister, address_of_regexp_stack_memory_address);
1542 __ movp(r9, Operand(kScratchRegister, 0)); 1542 __ movp(r9, Operand(kScratchRegister, 0));
1543 __ Move(kScratchRegister, address_of_regexp_stack_memory_size); 1543 __ Move(kScratchRegister, address_of_regexp_stack_memory_size);
1544 __ addq(r9, Operand(kScratchRegister, 0)); 1544 __ addp(r9, Operand(kScratchRegister, 0));
1545 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9); 1545 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
1546 1546
1547 // Argument 6: Set the number of capture registers to zero to force global 1547 // Argument 6: Set the number of capture registers to zero to force global
1548 // regexps to behave as non-global. This does not affect non-global regexps. 1548 // regexps to behave as non-global. This does not affect non-global regexps.
1549 // Argument 6 is passed in r9 on Linux and on the stack on Windows. 1549 // Argument 6 is passed in r9 on Linux and on the stack on Windows.
1550 #ifdef _WIN64 1550 #ifdef _WIN64
1551 __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize), 1551 __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize),
1552 Immediate(0)); 1552 Immediate(0));
1553 #else 1553 #else
1554 __ Set(r9, 0); 1554 __ Set(r9, 0);
(...skipping 15 matching lines...) Expand all
1570 // r15: original subject string 1570 // r15: original subject string
1571 1571
1572 // Argument 2: Previous index. 1572 // Argument 2: Previous index.
1573 __ movp(arg_reg_2, rbx); 1573 __ movp(arg_reg_2, rbx);
1574 1574
1575 // Argument 4: End of string data 1575 // Argument 4: End of string data
1576 // Argument 3: Start of string data 1576 // Argument 3: Start of string data
1577 Label setup_two_byte, setup_rest, got_length, length_not_from_slice; 1577 Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
1578 // Prepare start and end index of the input. 1578 // Prepare start and end index of the input.
1579 // Load the length from the original sliced string if that is the case. 1579 // Load the length from the original sliced string if that is the case.
1580 __ addq(rbx, r14); 1580 __ addp(rbx, r14);
1581 __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset)); 1581 __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
1582 __ addq(r14, arg_reg_3); // Using arg3 as scratch. 1582 __ addp(r14, arg_reg_3); // Using arg3 as scratch.
1583 1583
1584 // rbx: start index of the input 1584 // rbx: start index of the input
1585 // r14: end index of the input 1585 // r14: end index of the input
1586 // r15: original subject string 1586 // r15: original subject string
1587 __ testb(rcx, rcx); // Last use of rcx as encoding of subject string. 1587 __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
1588 __ j(zero, &setup_two_byte, Label::kNear); 1588 __ j(zero, &setup_two_byte, Label::kNear);
1589 __ lea(arg_reg_4, 1589 __ lea(arg_reg_4,
1590 FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize)); 1590 FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
1591 __ lea(arg_reg_3, 1591 __ lea(arg_reg_3,
1592 FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize)); 1592 FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
1593 __ jmp(&setup_rest, Label::kNear); 1593 __ jmp(&setup_rest, Label::kNear);
1594 __ bind(&setup_two_byte); 1594 __ bind(&setup_two_byte);
1595 __ lea(arg_reg_4, 1595 __ lea(arg_reg_4,
1596 FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize)); 1596 FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
1597 __ lea(arg_reg_3, 1597 __ lea(arg_reg_3,
1598 FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize)); 1598 FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
1599 __ bind(&setup_rest); 1599 __ bind(&setup_rest);
1600 1600
1601 // Argument 1: Original subject string. 1601 // Argument 1: Original subject string.
1602 // The original subject is in the previous stack frame. Therefore we have to 1602 // The original subject is in the previous stack frame. Therefore we have to
1603 // use rbp, which points exactly to one pointer size below the previous rsp. 1603 // use rbp, which points exactly to one pointer size below the previous rsp.
1604 // (Because creating a new stack frame pushes the previous rbp onto the stack 1604 // (Because creating a new stack frame pushes the previous rbp onto the stack
1605 // and thereby moves up rsp by one kPointerSize.) 1605 // and thereby moves up rsp by one kPointerSize.)
1606 __ movp(arg_reg_1, r15); 1606 __ movp(arg_reg_1, r15);
1607 1607
1608 // Locate the code entry and call it. 1608 // Locate the code entry and call it.
1609 __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag)); 1609 __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
1610 __ call(r11); 1610 __ call(r11);
1611 1611
1612 __ LeaveApiExitFrame(true); 1612 __ LeaveApiExitFrame(true);
1613 1613
1614 // Check the result. 1614 // Check the result.
1615 Label success; 1615 Label success;
1616 Label exception; 1616 Label exception;
1617 __ cmpl(rax, Immediate(1)); 1617 __ cmpl(rax, Immediate(1));
1618 // We expect exactly one result since we force the called regexp to behave 1618 // We expect exactly one result since we force the called regexp to behave
1619 // as non-global. 1619 // as non-global.
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
1684 __ LoadAddress(rcx, 1684 __ LoadAddress(rcx,
1685 ExternalReference::address_of_static_offsets_vector(isolate)); 1685 ExternalReference::address_of_static_offsets_vector(isolate));
1686 1686
1687 // rbx: last_match_info backing store (FixedArray) 1687 // rbx: last_match_info backing store (FixedArray)
1688 // rcx: offsets vector 1688 // rcx: offsets vector
1689 // rdx: number of capture registers 1689 // rdx: number of capture registers
1690 Label next_capture, done; 1690 Label next_capture, done;
1691 // Capture register counter starts from number of capture registers and 1691 // Capture register counter starts from number of capture registers and
1692 // counts down until wraping after zero. 1692 // counts down until wraping after zero.
1693 __ bind(&next_capture); 1693 __ bind(&next_capture);
1694 __ subq(rdx, Immediate(1)); 1694 __ subp(rdx, Immediate(1));
1695 __ j(negative, &done, Label::kNear); 1695 __ j(negative, &done, Label::kNear);
1696 // Read the value from the static offsets vector buffer and make it a smi. 1696 // Read the value from the static offsets vector buffer and make it a smi.
1697 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0)); 1697 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
1698 __ Integer32ToSmi(rdi, rdi); 1698 __ Integer32ToSmi(rdi, rdi);
1699 // Store the smi value in the last match info. 1699 // Store the smi value in the last match info.
1700 __ movp(FieldOperand(rbx, 1700 __ movp(FieldOperand(rbx,
1701 rdx, 1701 rdx,
1702 times_pointer_size, 1702 times_pointer_size,
1703 RegExpImpl::kFirstCaptureOffset), 1703 RegExpImpl::kFirstCaptureOffset),
1704 rdi); 1704 rdi);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1748 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); 1748 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1749 if (FLAG_debug_code) { 1749 if (FLAG_debug_code) {
1750 // Assert that we do not have a cons or slice (indirect strings) here. 1750 // Assert that we do not have a cons or slice (indirect strings) here.
1751 // Sequential strings have already been ruled out. 1751 // Sequential strings have already been ruled out.
1752 __ testb(rbx, Immediate(kIsIndirectStringMask)); 1752 __ testb(rbx, Immediate(kIsIndirectStringMask));
1753 __ Assert(zero, kExternalStringExpectedButNotFound); 1753 __ Assert(zero, kExternalStringExpectedButNotFound);
1754 } 1754 }
1755 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset)); 1755 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
1756 // Move the pointer so that offset-wise, it looks like a sequential string. 1756 // Move the pointer so that offset-wise, it looks like a sequential string.
1757 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 1757 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1758 __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 1758 __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1759 STATIC_ASSERT(kTwoByteStringTag == 0); 1759 STATIC_ASSERT(kTwoByteStringTag == 0);
1760 // (8a) Is the external string one byte? If yes, go to (6). 1760 // (8a) Is the external string one byte? If yes, go to (6).
1761 __ testb(rbx, Immediate(kStringEncodingMask)); 1761 __ testb(rbx, Immediate(kStringEncodingMask));
1762 __ j(not_zero, &seq_one_byte_string); // Goto (6). 1762 __ j(not_zero, &seq_one_byte_string); // Goto (6).
1763 1763
1764 // rdi: subject string (flat two-byte) 1764 // rdi: subject string (flat two-byte)
1765 // rax: RegExp data (FixedArray) 1765 // rax: RegExp data (FixedArray)
1766 // (9) Two byte sequential. Load regexp code for one byte. Go to (E). 1766 // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
1767 __ bind(&seq_two_byte_string); 1767 __ bind(&seq_two_byte_string);
1768 __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset)); 1768 __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
1830 Condition cc = GetCondition(); 1830 Condition cc = GetCondition();
1831 Factory* factory = masm->isolate()->factory(); 1831 Factory* factory = masm->isolate()->factory();
1832 1832
1833 Label miss; 1833 Label miss;
1834 CheckInputType(masm, rdx, left_, &miss); 1834 CheckInputType(masm, rdx, left_, &miss);
1835 CheckInputType(masm, rax, right_, &miss); 1835 CheckInputType(masm, rax, right_, &miss);
1836 1836
1837 // Compare two smis. 1837 // Compare two smis.
1838 Label non_smi, smi_done; 1838 Label non_smi, smi_done;
1839 __ JumpIfNotBothSmi(rax, rdx, &non_smi); 1839 __ JumpIfNotBothSmi(rax, rdx, &non_smi);
1840 __ subq(rdx, rax); 1840 __ subp(rdx, rax);
1841 __ j(no_overflow, &smi_done); 1841 __ j(no_overflow, &smi_done);
1842 __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here. 1842 __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
1843 __ bind(&smi_done); 1843 __ bind(&smi_done);
1844 __ movp(rax, rdx); 1844 __ movp(rax, rdx);
1845 __ ret(0); 1845 __ ret(0);
1846 __ bind(&non_smi); 1846 __ bind(&non_smi);
1847 1847
1848 // The compare stub returns a positive, negative, or zero 64-bit integer 1848 // The compare stub returns a positive, negative, or zero 64-bit integer
1849 // value in rax, corresponding to result of comparing the two inputs. 1849 // value in rax, corresponding to result of comparing the two inputs.
1850 // NOTICE! This code is only reached after a smi-fast-case check, so 1850 // NOTICE! This code is only reached after a smi-fast-case check, so
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
1964 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison); 1964 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
1965 __ xorl(rax, rax); 1965 __ xorl(rax, rax);
1966 __ xorl(rcx, rcx); 1966 __ xorl(rcx, rcx);
1967 __ ucomisd(xmm0, xmm1); 1967 __ ucomisd(xmm0, xmm1);
1968 1968
1969 // Don't base result on EFLAGS when a NaN is involved. 1969 // Don't base result on EFLAGS when a NaN is involved.
1970 __ j(parity_even, &unordered, Label::kNear); 1970 __ j(parity_even, &unordered, Label::kNear);
1971 // Return a result of -1, 0, or 1, based on EFLAGS. 1971 // Return a result of -1, 0, or 1, based on EFLAGS.
1972 __ setcc(above, rax); 1972 __ setcc(above, rax);
1973 __ setcc(below, rcx); 1973 __ setcc(below, rcx);
1974 __ subq(rax, rcx); 1974 __ subp(rax, rcx);
1975 __ ret(0); 1975 __ ret(0);
1976 1976
1977 // If one of the numbers was NaN, then the result is always false. 1977 // If one of the numbers was NaN, then the result is always false.
1978 // The cc is never not-equal. 1978 // The cc is never not-equal.
1979 __ bind(&unordered); 1979 __ bind(&unordered);
1980 ASSERT(cc != not_equal); 1980 ASSERT(cc != not_equal);
1981 if (cc == less || cc == less_equal) { 1981 if (cc == less || cc == less_equal) {
1982 __ Set(rax, 1); 1982 __ Set(rax, 1);
1983 } else { 1983 } else {
1984 __ Set(rax, -1); 1984 __ Set(rax, -1);
(...skipping 693 matching lines...) Expand 10 before | Expand all | Expand 10 after
2678 __ pushq(r14); 2678 __ pushq(r14);
2679 __ pushq(r15); 2679 __ pushq(r15);
2680 #ifdef _WIN64 2680 #ifdef _WIN64
2681 __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. 2681 __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2682 __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. 2682 __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2683 #endif 2683 #endif
2684 __ pushq(rbx); 2684 __ pushq(rbx);
2685 2685
2686 #ifdef _WIN64 2686 #ifdef _WIN64
2687 // On Win64 XMM6-XMM15 are callee-save 2687 // On Win64 XMM6-XMM15 are callee-save
2688 __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); 2688 __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2689 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6); 2689 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
2690 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7); 2690 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
2691 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8); 2691 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
2692 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9); 2692 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
2693 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10); 2693 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
2694 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11); 2694 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
2695 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12); 2695 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
2696 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13); 2696 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
2697 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14); 2697 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
2698 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15); 2698 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
2791 __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0)); 2791 __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
2792 __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1)); 2792 __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
2793 __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2)); 2793 __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
2794 __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3)); 2794 __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
2795 __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4)); 2795 __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
2796 __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5)); 2796 __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
2797 __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6)); 2797 __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
2798 __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7)); 2798 __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
2799 __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8)); 2799 __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
2800 __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9)); 2800 __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
2801 __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); 2801 __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2802 #endif 2802 #endif
2803 2803
2804 __ popq(rbx); 2804 __ popq(rbx);
2805 #ifdef _WIN64 2805 #ifdef _WIN64
2806 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI. 2806 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
2807 __ popq(rsi); 2807 __ popq(rsi);
2808 __ popq(rdi); 2808 __ popq(rdi);
2809 #endif 2809 #endif
2810 __ popq(r15); 2810 __ popq(r15);
2811 __ popq(r14); 2811 __ popq(r14);
2812 __ popq(r13); 2812 __ popq(r13);
2813 __ popq(r12); 2813 __ popq(r12);
2814 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers 2814 __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
2815 2815
2816 // Restore frame pointer and return. 2816 // Restore frame pointer and return.
2817 __ popq(rbp); 2817 __ popq(rbp);
2818 __ ret(0); 2818 __ ret(0);
2819 } 2819 }
2820 2820
2821 2821
2822 void InstanceofStub::Generate(MacroAssembler* masm) { 2822 void InstanceofStub::Generate(MacroAssembler* masm) {
2823 // Implements "value instanceof function" operator. 2823 // Implements "value instanceof function" operator.
2824 // Expected input state with no inline cache: 2824 // Expected input state with no inline cache:
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
2893 // Register mapping: 2893 // Register mapping:
2894 // rax is object map. 2894 // rax is object map.
2895 // rdx is function. 2895 // rdx is function.
2896 // rbx is function prototype. 2896 // rbx is function prototype.
2897 if (!HasCallSiteInlineCheck()) { 2897 if (!HasCallSiteInlineCheck()) {
2898 __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); 2898 __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
2899 __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); 2899 __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
2900 } else { 2900 } else {
2901 // Get return address and delta to inlined map check. 2901 // Get return address and delta to inlined map check.
2902 __ movq(kScratchRegister, StackOperandForReturnAddress(0)); 2902 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2903 __ subq(kScratchRegister, args.GetArgumentOperand(2)); 2903 __ subp(kScratchRegister, args.GetArgumentOperand(2));
2904 if (FLAG_debug_code) { 2904 if (FLAG_debug_code) {
2905 __ movl(rdi, Immediate(kWordBeforeMapCheckValue)); 2905 __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
2906 __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi); 2906 __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
2907 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck); 2907 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
2908 } 2908 }
2909 __ movp(kScratchRegister, 2909 __ movp(kScratchRegister,
2910 Operand(kScratchRegister, kOffsetToMapCheckValue)); 2910 Operand(kScratchRegister, kOffsetToMapCheckValue));
2911 __ movp(Operand(kScratchRegister, 0), rax); 2911 __ movp(Operand(kScratchRegister, 0), rax);
2912 } 2912 }
2913 2913
(...skipping 20 matching lines...) Expand all
2934 STATIC_ASSERT(kSmiTag == 0); 2934 STATIC_ASSERT(kSmiTag == 0);
2935 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); 2935 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2936 } else { 2936 } else {
2937 // Store offset of true in the root array at the inline check site. 2937 // Store offset of true in the root array at the inline check site.
2938 int true_offset = 0x100 + 2938 int true_offset = 0x100 +
2939 (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; 2939 (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
2940 // Assert it is a 1-byte signed value. 2940 // Assert it is a 1-byte signed value.
2941 ASSERT(true_offset >= 0 && true_offset < 0x100); 2941 ASSERT(true_offset >= 0 && true_offset < 0x100);
2942 __ movl(rax, Immediate(true_offset)); 2942 __ movl(rax, Immediate(true_offset));
2943 __ movq(kScratchRegister, StackOperandForReturnAddress(0)); 2943 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2944 __ subq(kScratchRegister, args.GetArgumentOperand(2)); 2944 __ subp(kScratchRegister, args.GetArgumentOperand(2));
2945 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); 2945 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
2946 if (FLAG_debug_code) { 2946 if (FLAG_debug_code) {
2947 __ movl(rax, Immediate(kWordBeforeResultValue)); 2947 __ movl(rax, Immediate(kWordBeforeResultValue));
2948 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax); 2948 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
2949 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); 2949 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
2950 } 2950 }
2951 __ Set(rax, 0); 2951 __ Set(rax, 0);
2952 } 2952 }
2953 __ ret((2 + extra_argument_offset) * kPointerSize); 2953 __ ret((2 + extra_argument_offset) * kPointerSize);
2954 2954
2955 __ bind(&is_not_instance); 2955 __ bind(&is_not_instance);
2956 if (!HasCallSiteInlineCheck()) { 2956 if (!HasCallSiteInlineCheck()) {
2957 // We have to store a non-zero value in the cache. 2957 // We have to store a non-zero value in the cache.
2958 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); 2958 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
2959 } else { 2959 } else {
2960 // Store offset of false in the root array at the inline check site. 2960 // Store offset of false in the root array at the inline check site.
2961 int false_offset = 0x100 + 2961 int false_offset = 0x100 +
2962 (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; 2962 (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
2963 // Assert it is a 1-byte signed value. 2963 // Assert it is a 1-byte signed value.
2964 ASSERT(false_offset >= 0 && false_offset < 0x100); 2964 ASSERT(false_offset >= 0 && false_offset < 0x100);
2965 __ movl(rax, Immediate(false_offset)); 2965 __ movl(rax, Immediate(false_offset));
2966 __ movq(kScratchRegister, StackOperandForReturnAddress(0)); 2966 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2967 __ subq(kScratchRegister, args.GetArgumentOperand(2)); 2967 __ subp(kScratchRegister, args.GetArgumentOperand(2));
2968 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); 2968 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
2969 if (FLAG_debug_code) { 2969 if (FLAG_debug_code) {
2970 __ movl(rax, Immediate(kWordBeforeResultValue)); 2970 __ movl(rax, Immediate(kWordBeforeResultValue));
2971 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax); 2971 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
2972 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov); 2972 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
2973 } 2973 }
2974 } 2974 }
2975 __ ret((2 + extra_argument_offset) * kPointerSize); 2975 __ ret((2 + extra_argument_offset) * kPointerSize);
2976 2976
2977 // Slow-case: Go through the JavaScript implementation. 2977 // Slow-case: Go through the JavaScript implementation.
(...skipping 337 matching lines...) Expand 10 before | Expand all | Expand 10 after
3315 Heap::kempty_stringRootIndex); 3315 Heap::kempty_stringRootIndex);
3316 __ j(not_equal, &runtime); 3316 __ j(not_equal, &runtime);
3317 __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset)); 3317 __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
3318 // Update instance type. 3318 // Update instance type.
3319 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset)); 3319 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
3320 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); 3320 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
3321 __ jmp(&underlying_unpacked, Label::kNear); 3321 __ jmp(&underlying_unpacked, Label::kNear);
3322 3322
3323 __ bind(&sliced_string); 3323 __ bind(&sliced_string);
3324 // Sliced string. Fetch parent and correct start index by offset. 3324 // Sliced string. Fetch parent and correct start index by offset.
3325 __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset)); 3325 __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
3326 __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset)); 3326 __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
3327 // Update instance type. 3327 // Update instance type.
3328 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset)); 3328 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
3329 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); 3329 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
3330 __ jmp(&underlying_unpacked, Label::kNear); 3330 __ jmp(&underlying_unpacked, Label::kNear);
3331 3331
3332 __ bind(&seq_or_external_string); 3332 __ bind(&seq_or_external_string);
3333 // Sequential or external string. Just move string to the correct register. 3333 // Sequential or external string. Just move string to the correct register.
3334 __ movp(rdi, rax); 3334 __ movp(rdi, rax);
3335 3335
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
3386 __ j(zero, &sequential_string); 3386 __ j(zero, &sequential_string);
3387 3387
3388 // Handle external string. 3388 // Handle external string.
3389 // Rule out short external strings. 3389 // Rule out short external strings.
3390 STATIC_CHECK(kShortExternalStringTag != 0); 3390 STATIC_CHECK(kShortExternalStringTag != 0);
3391 __ testb(rbx, Immediate(kShortExternalStringMask)); 3391 __ testb(rbx, Immediate(kShortExternalStringMask));
3392 __ j(not_zero, &runtime); 3392 __ j(not_zero, &runtime);
3393 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset)); 3393 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
3394 // Move the pointer so that offset-wise, it looks like a sequential string. 3394 // Move the pointer so that offset-wise, it looks like a sequential string.
3395 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 3395 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3396 __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 3396 __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3397 3397
3398 __ bind(&sequential_string); 3398 __ bind(&sequential_string);
3399 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); 3399 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3400 __ testb(rbx, Immediate(kStringEncodingMask)); 3400 __ testb(rbx, Immediate(kStringEncodingMask));
3401 __ j(zero, &two_byte_sequential); 3401 __ j(zero, &two_byte_sequential);
3402 3402
3403 // Allocate the result. 3403 // Allocate the result.
3404 __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime); 3404 __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
3405 3405
3406 // rax: result string 3406 // rax: result string
(...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after
3634 3634
3635 __ bind(&not_same); 3635 __ bind(&not_same);
3636 3636
3637 // Check that both are sequential ASCII strings. 3637 // Check that both are sequential ASCII strings.
3638 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime); 3638 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
3639 3639
3640 // Inline comparison of ASCII strings. 3640 // Inline comparison of ASCII strings.
3641 __ IncrementCounter(counters->string_compare_native(), 1); 3641 __ IncrementCounter(counters->string_compare_native(), 1);
3642 // Drop arguments from the stack 3642 // Drop arguments from the stack
3643 __ PopReturnAddressTo(rcx); 3643 __ PopReturnAddressTo(rcx);
3644 __ addq(rsp, Immediate(2 * kPointerSize)); 3644 __ addp(rsp, Immediate(2 * kPointerSize));
3645 __ PushReturnAddressFrom(rcx); 3645 __ PushReturnAddressFrom(rcx);
3646 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8); 3646 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
3647 3647
3648 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) 3648 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3649 // tagged as a small integer. 3649 // tagged as a small integer.
3650 __ bind(&runtime); 3650 __ bind(&runtime);
3651 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 3651 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3652 } 3652 }
3653 3653
3654 3654
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
3797 ASSERT(kAllocationDelta >= argc); 3797 ASSERT(kAllocationDelta >= argc);
3798 // Load top. 3798 // Load top.
3799 __ Load(rcx, new_space_allocation_top); 3799 __ Load(rcx, new_space_allocation_top);
3800 3800
3801 // Check if it's the end of elements. 3801 // Check if it's the end of elements.
3802 __ lea(rdx, FieldOperand(rdi, 3802 __ lea(rdx, FieldOperand(rdi,
3803 rax, times_pointer_size, 3803 rax, times_pointer_size,
3804 FixedArray::kHeaderSize - argc * kPointerSize)); 3804 FixedArray::kHeaderSize - argc * kPointerSize));
3805 __ cmpq(rdx, rcx); 3805 __ cmpq(rdx, rcx);
3806 __ j(not_equal, &call_builtin); 3806 __ j(not_equal, &call_builtin);
3807 __ addq(rcx, Immediate(kAllocationDelta * kPointerSize)); 3807 __ addp(rcx, Immediate(kAllocationDelta * kPointerSize));
3808 Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit); 3808 Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
3809 __ cmpq(rcx, limit_operand); 3809 __ cmpq(rcx, limit_operand);
3810 __ j(above, &call_builtin); 3810 __ j(above, &call_builtin);
3811 3811
3812 // We fit and could grow elements. 3812 // We fit and could grow elements.
3813 __ Store(new_space_allocation_top, rcx); 3813 __ Store(new_space_allocation_top, rcx);
3814 3814
3815 // Push the argument... 3815 // Push the argument...
3816 __ movp(Operand(rdx, 0), rbx); 3816 __ movp(Operand(rdx, 0), rbx);
3817 // ... and fill the rest with holes. 3817 // ... and fill the rest with holes.
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
3877 } 3877 }
3878 3878
3879 3879
3880 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { 3880 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
3881 ASSERT(state_ == CompareIC::SMI); 3881 ASSERT(state_ == CompareIC::SMI);
3882 Label miss; 3882 Label miss;
3883 __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear); 3883 __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
3884 3884
3885 if (GetCondition() == equal) { 3885 if (GetCondition() == equal) {
3886 // For equality we do not care about the sign of the result. 3886 // For equality we do not care about the sign of the result.
3887 __ subq(rax, rdx); 3887 __ subp(rax, rdx);
3888 } else { 3888 } else {
3889 Label done; 3889 Label done;
3890 __ subq(rdx, rax); 3890 __ subp(rdx, rax);
3891 __ j(no_overflow, &done, Label::kNear); 3891 __ j(no_overflow, &done, Label::kNear);
3892 // Correct sign of result in case of overflow. 3892 // Correct sign of result in case of overflow.
3893 __ not_(rdx); 3893 __ not_(rdx);
3894 __ bind(&done); 3894 __ bind(&done);
3895 __ movp(rax, rdx); 3895 __ movp(rax, rdx);
3896 } 3896 }
3897 __ ret(0); 3897 __ ret(0);
3898 3898
3899 __ bind(&miss); 3899 __ bind(&miss);
3900 GenerateMiss(masm); 3900 GenerateMiss(masm);
(...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after
4156 Label miss; 4156 Label miss;
4157 Condition either_smi = masm->CheckEitherSmi(rdx, rax); 4157 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
4158 __ j(either_smi, &miss, Label::kNear); 4158 __ j(either_smi, &miss, Label::kNear);
4159 4159
4160 __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx); 4160 __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
4161 __ j(not_equal, &miss, Label::kNear); 4161 __ j(not_equal, &miss, Label::kNear);
4162 __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx); 4162 __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
4163 __ j(not_equal, &miss, Label::kNear); 4163 __ j(not_equal, &miss, Label::kNear);
4164 4164
4165 ASSERT(GetCondition() == equal); 4165 ASSERT(GetCondition() == equal);
4166 __ subq(rax, rdx); 4166 __ subp(rax, rdx);
4167 __ ret(0); 4167 __ ret(0);
4168 4168
4169 __ bind(&miss); 4169 __ bind(&miss);
4170 GenerateMiss(masm); 4170 GenerateMiss(masm);
4171 } 4171 }
4172 4172
4173 4173
4174 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { 4174 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4175 Label miss; 4175 Label miss;
4176 Condition either_smi = masm->CheckEitherSmi(rdx, rax); 4176 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
4177 __ j(either_smi, &miss, Label::kNear); 4177 __ j(either_smi, &miss, Label::kNear);
4178 4178
4179 __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset)); 4179 __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
4180 __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); 4180 __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
4181 __ Cmp(rcx, known_map_); 4181 __ Cmp(rcx, known_map_);
4182 __ j(not_equal, &miss, Label::kNear); 4182 __ j(not_equal, &miss, Label::kNear);
4183 __ Cmp(rbx, known_map_); 4183 __ Cmp(rbx, known_map_);
4184 __ j(not_equal, &miss, Label::kNear); 4184 __ j(not_equal, &miss, Label::kNear);
4185 4185
4186 __ subq(rax, rdx); 4186 __ subp(rax, rdx);
4187 __ ret(0); 4187 __ ret(0);
4188 4188
4189 __ bind(&miss); 4189 __ bind(&miss);
4190 GenerateMiss(masm); 4190 GenerateMiss(masm);
4191 } 4191 }
4192 4192
4193 4193
4194 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { 4194 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4195 { 4195 {
4196 // Call the runtime system in a fresh internal frame. 4196 // Call the runtime system in a fresh internal frame.
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after
4543 Mode mode) { 4543 Mode mode) {
4544 Label on_black; 4544 Label on_black;
4545 Label need_incremental; 4545 Label need_incremental;
4546 Label need_incremental_pop_object; 4546 Label need_incremental_pop_object;
4547 4547
4548 __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask)); 4548 __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
4549 __ and_(regs_.scratch0(), regs_.object()); 4549 __ and_(regs_.scratch0(), regs_.object());
4550 __ movp(regs_.scratch1(), 4550 __ movp(regs_.scratch1(),
4551 Operand(regs_.scratch0(), 4551 Operand(regs_.scratch0(),
4552 MemoryChunk::kWriteBarrierCounterOffset)); 4552 MemoryChunk::kWriteBarrierCounterOffset));
4553 __ subq(regs_.scratch1(), Immediate(1)); 4553 __ subp(regs_.scratch1(), Immediate(1));
4554 __ movp(Operand(regs_.scratch0(), 4554 __ movp(Operand(regs_.scratch0(),
4555 MemoryChunk::kWriteBarrierCounterOffset), 4555 MemoryChunk::kWriteBarrierCounterOffset),
4556 regs_.scratch1()); 4556 regs_.scratch1());
4557 __ j(negative, &need_incremental); 4557 __ j(negative, &need_incremental);
4558 4558
4559 // Let's look at the color of the object: If it is not black we don't have 4559 // Let's look at the color of the object: If it is not black we don't have
4560 // to inform the incremental marker. 4560 // to inform the incremental marker.
4561 __ JumpIfBlack(regs_.object(), 4561 __ JumpIfBlack(regs_.object(),
4562 regs_.scratch0(), 4562 regs_.scratch0(),
4563 regs_.scratch1(), 4563 regs_.scratch1(),
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
4738 const size_t kNumSavedRegisters = 2; 4738 const size_t kNumSavedRegisters = 2;
4739 __ pushq(arg_reg_1); 4739 __ pushq(arg_reg_1);
4740 __ pushq(arg_reg_2); 4740 __ pushq(arg_reg_2);
4741 4741
4742 // Calculate the original stack pointer and store it in the second arg. 4742 // Calculate the original stack pointer and store it in the second arg.
4743 __ lea(arg_reg_2, 4743 __ lea(arg_reg_2,
4744 Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize)); 4744 Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
4745 4745
4746 // Calculate the function address to the first arg. 4746 // Calculate the function address to the first arg.
4747 __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize)); 4747 __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
4748 __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength)); 4748 __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
4749 4749
4750 // Save the remainder of the volatile registers. 4750 // Save the remainder of the volatile registers.
4751 masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2); 4751 masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
4752 4752
4753 // Call the entry hook function. 4753 // Call the entry hook function.
4754 __ Move(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()), 4754 __ Move(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
4755 Assembler::RelocInfoNone()); 4755 Assembler::RelocInfoNone());
4756 4756
4757 AllowExternalCallThatCantCauseGC scope(masm); 4757 AllowExternalCallThatCantCauseGC scope(masm);
4758 4758
(...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after
5150 __ PushReturnAddressFrom(return_address); 5150 __ PushReturnAddressFrom(return_address);
5151 5151
5152 // Allocate the v8::Arguments structure in the arguments' space since 5152 // Allocate the v8::Arguments structure in the arguments' space since
5153 // it's not controlled by GC. 5153 // it's not controlled by GC.
5154 const int kApiStackSpace = 4; 5154 const int kApiStackSpace = 4;
5155 5155
5156 __ PrepareCallApiFunction(kApiStackSpace); 5156 __ PrepareCallApiFunction(kApiStackSpace);
5157 5157
5158 // FunctionCallbackInfo::implicit_args_. 5158 // FunctionCallbackInfo::implicit_args_.
5159 __ movp(StackSpaceOperand(0), scratch); 5159 __ movp(StackSpaceOperand(0), scratch);
5160 __ addq(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize)); 5160 __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
5161 __ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_. 5161 __ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
5162 __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_. 5162 __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
5163 // FunctionCallbackInfo::is_construct_call_. 5163 // FunctionCallbackInfo::is_construct_call_.
5164 __ Set(StackSpaceOperand(3), 0); 5164 __ Set(StackSpaceOperand(3), 0);
5165 5165
5166 #if defined(__MINGW64__) || defined(_WIN64) 5166 #if defined(__MINGW64__) || defined(_WIN64)
5167 Register arguments_arg = rcx; 5167 Register arguments_arg = rcx;
5168 Register callback_arg = rdx; 5168 Register callback_arg = rdx;
5169 #else 5169 #else
5170 Register arguments_arg = rdi; 5170 Register arguments_arg = rdi;
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
5256 return_value_operand, 5256 return_value_operand,
5257 NULL); 5257 NULL);
5258 } 5258 }
5259 5259
5260 5260
5261 #undef __ 5261 #undef __
5262 5262
5263 } } // namespace v8::internal 5263 } } // namespace v8::internal
5264 5264
5265 #endif // V8_TARGET_ARCH_X64 5265 #endif // V8_TARGET_ARCH_X64
OLDNEW
« src/x64/assembler-x64.h ('K') | « src/x64/builtins-x64.cc ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698