Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(778)

Side by Side Diff: src/ia32/macro-assembler-ia32.cc

Issue 27197013: Tweak Math.log on ia32/x64 (Closed) Base URL: git://github.com/v8/v8.git@master
Patch Set: addressed comments Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ia32/lithium-gap-resolver-ia32.cc ('k') | src/ia32/stub-cache-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after
225 225
226 226
227 void MacroAssembler::TruncateDoubleToI(Register result_reg, 227 void MacroAssembler::TruncateDoubleToI(Register result_reg,
228 XMMRegister input_reg) { 228 XMMRegister input_reg) {
229 Label done; 229 Label done;
230 cvttsd2si(result_reg, Operand(input_reg)); 230 cvttsd2si(result_reg, Operand(input_reg));
231 cmp(result_reg, 0x80000000u); 231 cmp(result_reg, 0x80000000u);
232 j(not_equal, &done, Label::kNear); 232 j(not_equal, &done, Label::kNear);
233 233
234 sub(esp, Immediate(kDoubleSize)); 234 sub(esp, Immediate(kDoubleSize));
235 movdbl(MemOperand(esp, 0), input_reg); 235 movsd(MemOperand(esp, 0), input_reg);
236 SlowTruncateToI(result_reg, esp, 0); 236 SlowTruncateToI(result_reg, esp, 0);
237 add(esp, Immediate(kDoubleSize)); 237 add(esp, Immediate(kDoubleSize));
238 bind(&done); 238 bind(&done);
239 } 239 }
240 240
241 241
242 void MacroAssembler::TruncateX87TOSToI(Register result_reg) { 242 void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
243 sub(esp, Immediate(kDoubleSize)); 243 sub(esp, Immediate(kDoubleSize));
244 fst_d(MemOperand(esp, 0)); 244 fst_d(MemOperand(esp, 0));
245 SlowTruncateToI(result_reg, esp, 0); 245 SlowTruncateToI(result_reg, esp, 0);
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 sub(Operand(esp), Immediate(kDoubleSize)); 337 sub(Operand(esp), Immediate(kDoubleSize));
338 fstp_d(Operand(esp, 0)); 338 fstp_d(Operand(esp, 0));
339 SlowTruncateToI(result_reg, esp, 0); 339 SlowTruncateToI(result_reg, esp, 0);
340 add(esp, Immediate(kDoubleSize)); 340 add(esp, Immediate(kDoubleSize));
341 } else { 341 } else {
342 fstp(0); 342 fstp(0);
343 SlowTruncateToI(result_reg, input_reg); 343 SlowTruncateToI(result_reg, input_reg);
344 } 344 }
345 } else if (CpuFeatures::IsSupported(SSE2)) { 345 } else if (CpuFeatures::IsSupported(SSE2)) {
346 CpuFeatureScope scope(this, SSE2); 346 CpuFeatureScope scope(this, SSE2);
347 movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 347 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
348 cvttsd2si(result_reg, Operand(xmm0)); 348 cvttsd2si(result_reg, Operand(xmm0));
349 cmp(result_reg, 0x80000000u); 349 cmp(result_reg, 0x80000000u);
350 j(not_equal, &done, Label::kNear); 350 j(not_equal, &done, Label::kNear);
351 // Check if the input was 0x8000000 (kMinInt). 351 // Check if the input was 0x8000000 (kMinInt).
352 // If no, then we got an overflow and we deoptimize. 352 // If no, then we got an overflow and we deoptimize.
353 ExternalReference min_int = ExternalReference::address_of_min_int(); 353 ExternalReference min_int = ExternalReference::address_of_min_int();
354 ucomisd(xmm0, Operand::StaticVariable(min_int)); 354 ucomisd(xmm0, Operand::StaticVariable(min_int));
355 j(not_equal, &slow_case, Label::kNear); 355 j(not_equal, &slow_case, Label::kNear);
356 j(parity_even, &slow_case, Label::kNear); // NaN. 356 j(parity_even, &slow_case, Label::kNear); // NaN.
357 jmp(&done, Label::kNear); 357 jmp(&done, Label::kNear);
358 358
359 // Slow case. 359 // Slow case.
360 bind(&slow_case); 360 bind(&slow_case);
361 if (input_reg.is(result_reg)) { 361 if (input_reg.is(result_reg)) {
362 // Input is clobbered. Restore number from double scratch. 362 // Input is clobbered. Restore number from double scratch.
363 sub(esp, Immediate(kDoubleSize)); 363 sub(esp, Immediate(kDoubleSize));
364 movdbl(MemOperand(esp, 0), xmm0); 364 movsd(MemOperand(esp, 0), xmm0);
365 SlowTruncateToI(result_reg, esp, 0); 365 SlowTruncateToI(result_reg, esp, 0);
366 add(esp, Immediate(kDoubleSize)); 366 add(esp, Immediate(kDoubleSize));
367 } else { 367 } else {
368 SlowTruncateToI(result_reg, input_reg); 368 SlowTruncateToI(result_reg, input_reg);
369 } 369 }
370 } else { 370 } else {
371 SlowTruncateToI(result_reg, input_reg); 371 SlowTruncateToI(result_reg, input_reg);
372 } 372 }
373 bind(&done); 373 bind(&done);
374 } 374 }
375 375
376 376
377 void MacroAssembler::TaggedToI(Register result_reg, 377 void MacroAssembler::TaggedToI(Register result_reg,
378 Register input_reg, 378 Register input_reg,
379 XMMRegister temp, 379 XMMRegister temp,
380 MinusZeroMode minus_zero_mode, 380 MinusZeroMode minus_zero_mode,
381 Label* lost_precision) { 381 Label* lost_precision) {
382 Label done; 382 Label done;
383 ASSERT(!temp.is(xmm0)); 383 ASSERT(!temp.is(xmm0));
384 384
385 cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 385 cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
386 isolate()->factory()->heap_number_map()); 386 isolate()->factory()->heap_number_map());
387 j(not_equal, lost_precision, Label::kNear); 387 j(not_equal, lost_precision, Label::kNear);
388 388
389 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { 389 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
390 ASSERT(!temp.is(no_xmm_reg)); 390 ASSERT(!temp.is(no_xmm_reg));
391 CpuFeatureScope scope(this, SSE2); 391 CpuFeatureScope scope(this, SSE2);
392 392
393 movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 393 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
394 cvttsd2si(result_reg, Operand(xmm0)); 394 cvttsd2si(result_reg, Operand(xmm0));
395 Cvtsi2sd(temp, Operand(result_reg)); 395 Cvtsi2sd(temp, Operand(result_reg));
396 ucomisd(xmm0, temp); 396 ucomisd(xmm0, temp);
397 RecordComment("Deferred TaggedToI: lost precision"); 397 RecordComment("Deferred TaggedToI: lost precision");
398 j(not_equal, lost_precision, Label::kNear); 398 j(not_equal, lost_precision, Label::kNear);
399 RecordComment("Deferred TaggedToI: NaN"); 399 RecordComment("Deferred TaggedToI: NaN");
400 j(parity_even, lost_precision, Label::kNear); 400 j(parity_even, lost_precision, Label::kNear);
401 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { 401 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
402 test(result_reg, Operand(result_reg)); 402 test(result_reg, Operand(result_reg));
403 j(not_zero, &done, Label::kNear); 403 j(not_zero, &done, Label::kNear);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
445 } 445 }
446 446
447 447
448 void MacroAssembler::LoadUint32(XMMRegister dst, 448 void MacroAssembler::LoadUint32(XMMRegister dst,
449 Register src, 449 Register src,
450 XMMRegister scratch) { 450 XMMRegister scratch) {
451 Label done; 451 Label done;
452 cmp(src, Immediate(0)); 452 cmp(src, Immediate(0));
453 ExternalReference uint32_bias = 453 ExternalReference uint32_bias =
454 ExternalReference::address_of_uint32_bias(); 454 ExternalReference::address_of_uint32_bias();
455 movdbl(scratch, Operand::StaticVariable(uint32_bias)); 455 movsd(scratch, Operand::StaticVariable(uint32_bias));
456 Cvtsi2sd(dst, src); 456 Cvtsi2sd(dst, src);
457 j(not_sign, &done, Label::kNear); 457 j(not_sign, &done, Label::kNear);
458 addsd(dst, scratch); 458 addsd(dst, scratch);
459 bind(&done); 459 bind(&done);
460 } 460 }
461 461
462 462
463 void MacroAssembler::LoadUint32NoSSE2(Register src) { 463 void MacroAssembler::LoadUint32NoSSE2(Register src) {
464 Label done; 464 Label done;
465 push(src); 465 push(src);
(...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after
809 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); 809 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
810 cmp(FieldOperand(maybe_number, offset), 810 cmp(FieldOperand(maybe_number, offset),
811 Immediate(kNaNOrInfinityLowerBoundUpper32)); 811 Immediate(kNaNOrInfinityLowerBoundUpper32));
812 j(greater_equal, &maybe_nan, Label::kNear); 812 j(greater_equal, &maybe_nan, Label::kNear);
813 813
814 bind(&not_nan); 814 bind(&not_nan);
815 ExternalReference canonical_nan_reference = 815 ExternalReference canonical_nan_reference =
816 ExternalReference::address_of_canonical_non_hole_nan(); 816 ExternalReference::address_of_canonical_non_hole_nan();
817 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { 817 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
818 CpuFeatureScope use_sse2(this, SSE2); 818 CpuFeatureScope use_sse2(this, SSE2);
819 movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); 819 movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
820 bind(&have_double_value); 820 bind(&have_double_value);
821 movdbl(FieldOperand(elements, key, times_4, 821 movsd(FieldOperand(elements, key, times_4,
822 FixedDoubleArray::kHeaderSize - elements_offset), 822 FixedDoubleArray::kHeaderSize - elements_offset),
823 scratch2); 823 scratch2);
824 } else { 824 } else {
825 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); 825 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
826 bind(&have_double_value); 826 bind(&have_double_value);
827 fstp_d(FieldOperand(elements, key, times_4, 827 fstp_d(FieldOperand(elements, key, times_4,
828 FixedDoubleArray::kHeaderSize - elements_offset)); 828 FixedDoubleArray::kHeaderSize - elements_offset));
829 } 829 }
830 jmp(&done); 830 jmp(&done);
831 831
832 bind(&maybe_nan); 832 bind(&maybe_nan);
833 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise 833 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
834 // it's an Infinity, and the non-NaN code path applies. 834 // it's an Infinity, and the non-NaN code path applies.
835 j(greater, &is_nan, Label::kNear); 835 j(greater, &is_nan, Label::kNear);
836 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); 836 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
837 j(zero, &not_nan); 837 j(zero, &not_nan);
838 bind(&is_nan); 838 bind(&is_nan);
839 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { 839 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
840 CpuFeatureScope use_sse2(this, SSE2); 840 CpuFeatureScope use_sse2(this, SSE2);
841 movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); 841 movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
842 } else { 842 } else {
843 fld_d(Operand::StaticVariable(canonical_nan_reference)); 843 fld_d(Operand::StaticVariable(canonical_nan_reference));
844 } 844 }
845 jmp(&have_double_value, Label::kNear); 845 jmp(&have_double_value, Label::kNear);
846 846
847 bind(&smi_value); 847 bind(&smi_value);
848 // Value is a smi. Convert to a double and store. 848 // Value is a smi. Convert to a double and store.
849 // Preserve original value. 849 // Preserve original value.
850 mov(scratch1, maybe_number); 850 mov(scratch1, maybe_number);
851 SmiUntag(scratch1); 851 SmiUntag(scratch1);
852 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { 852 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
853 CpuFeatureScope fscope(this, SSE2); 853 CpuFeatureScope fscope(this, SSE2);
854 Cvtsi2sd(scratch2, scratch1); 854 Cvtsi2sd(scratch2, scratch1);
855 movdbl(FieldOperand(elements, key, times_4, 855 movsd(FieldOperand(elements, key, times_4,
856 FixedDoubleArray::kHeaderSize - elements_offset), 856 FixedDoubleArray::kHeaderSize - elements_offset),
857 scratch2); 857 scratch2);
858 } else { 858 } else {
859 push(scratch1); 859 push(scratch1);
860 fild_s(Operand(esp, 0)); 860 fild_s(Operand(esp, 0));
861 pop(scratch1); 861 pop(scratch1);
862 fstp_d(FieldOperand(elements, key, times_4, 862 fstp_d(FieldOperand(elements, key, times_4,
863 FixedDoubleArray::kHeaderSize - elements_offset)); 863 FixedDoubleArray::kHeaderSize - elements_offset));
864 } 864 }
865 bind(&done); 865 bind(&done);
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
1061 1061
1062 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { 1062 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
1063 // Optionally save all XMM registers. 1063 // Optionally save all XMM registers.
1064 if (save_doubles) { 1064 if (save_doubles) {
1065 CpuFeatureScope scope(this, SSE2); 1065 CpuFeatureScope scope(this, SSE2);
1066 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; 1066 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
1067 sub(esp, Immediate(space)); 1067 sub(esp, Immediate(space));
1068 const int offset = -2 * kPointerSize; 1068 const int offset = -2 * kPointerSize;
1069 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { 1069 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1070 XMMRegister reg = XMMRegister::from_code(i); 1070 XMMRegister reg = XMMRegister::from_code(i);
1071 movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); 1071 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
1072 } 1072 }
1073 } else { 1073 } else {
1074 sub(esp, Immediate(argc * kPointerSize)); 1074 sub(esp, Immediate(argc * kPointerSize));
1075 } 1075 }
1076 1076
1077 // Get the required frame alignment for the OS. 1077 // Get the required frame alignment for the OS.
1078 const int kFrameAlignment = OS::ActivationFrameAlignment(); 1078 const int kFrameAlignment = OS::ActivationFrameAlignment();
1079 if (kFrameAlignment > 0) { 1079 if (kFrameAlignment > 0) {
1080 ASSERT(IsPowerOf2(kFrameAlignment)); 1080 ASSERT(IsPowerOf2(kFrameAlignment));
1081 and_(esp, -kFrameAlignment); 1081 and_(esp, -kFrameAlignment);
(...skipping 23 matching lines...) Expand all
1105 } 1105 }
1106 1106
1107 1107
1108 void MacroAssembler::LeaveExitFrame(bool save_doubles) { 1108 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
1109 // Optionally restore all XMM registers. 1109 // Optionally restore all XMM registers.
1110 if (save_doubles) { 1110 if (save_doubles) {
1111 CpuFeatureScope scope(this, SSE2); 1111 CpuFeatureScope scope(this, SSE2);
1112 const int offset = -2 * kPointerSize; 1112 const int offset = -2 * kPointerSize;
1113 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { 1113 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1114 XMMRegister reg = XMMRegister::from_code(i); 1114 XMMRegister reg = XMMRegister::from_code(i);
1115 movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); 1115 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
1116 } 1116 }
1117 } 1117 }
1118 1118
1119 // Get the return address from the stack and restore the frame pointer. 1119 // Get the return address from the stack and restore the frame pointer.
1120 mov(ecx, Operand(ebp, 1 * kPointerSize)); 1120 mov(ecx, Operand(ebp, 1 * kPointerSize));
1121 mov(ebp, Operand(ebp, 0 * kPointerSize)); 1121 mov(ebp, Operand(ebp, 0 * kPointerSize));
1122 1122
1123 // Pop the arguments and the receiver from the caller stack. 1123 // Pop the arguments and the receiver from the caller stack.
1124 lea(esp, Operand(esi, 1 * kPointerSize)); 1124 lea(esp, Operand(esi, 1 * kPointerSize));
1125 1125
(...skipping 1937 matching lines...) Expand 10 before | Expand all | Expand 10 after
3063 Register index = scratch; 3063 Register index = scratch;
3064 Register probe = mask; 3064 Register probe = mask;
3065 mov(probe, 3065 mov(probe,
3066 FieldOperand(number_string_cache, 3066 FieldOperand(number_string_cache,
3067 index, 3067 index,
3068 times_twice_pointer_size, 3068 times_twice_pointer_size,
3069 FixedArray::kHeaderSize)); 3069 FixedArray::kHeaderSize));
3070 JumpIfSmi(probe, not_found); 3070 JumpIfSmi(probe, not_found);
3071 if (CpuFeatures::IsSupported(SSE2)) { 3071 if (CpuFeatures::IsSupported(SSE2)) {
3072 CpuFeatureScope fscope(this, SSE2); 3072 CpuFeatureScope fscope(this, SSE2);
3073 movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); 3073 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3074 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); 3074 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
3075 } else { 3075 } else {
3076 fld_d(FieldOperand(object, HeapNumber::kValueOffset)); 3076 fld_d(FieldOperand(object, HeapNumber::kValueOffset));
3077 fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); 3077 fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
3078 FCmp(); 3078 FCmp();
3079 } 3079 }
3080 j(parity_even, not_found); // Bail out if NaN is involved. 3080 j(parity_even, not_found); // Bail out if NaN is involved.
3081 j(not_equal, not_found); // The cache did not contain this value. 3081 j(not_equal, not_found); // The cache did not contain this value.
3082 jmp(&load_result_from_cache, Label::kNear); 3082 jmp(&load_result_from_cache, Label::kNear);
3083 3083
(...skipping 438 matching lines...) Expand 10 before | Expand all | Expand 10 after
3522 cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top)); 3522 cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3523 j(greater, no_memento_found); 3523 j(greater, no_memento_found);
3524 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize), 3524 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
3525 Immediate(isolate()->factory()->allocation_memento_map())); 3525 Immediate(isolate()->factory()->allocation_memento_map()));
3526 } 3526 }
3527 3527
3528 3528
3529 } } // namespace v8::internal 3529 } } // namespace v8::internal
3530 3530
3531 #endif // V8_TARGET_ARCH_IA32 3531 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-gap-resolver-ia32.cc ('k') | src/ia32/stub-cache-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698