OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "v8.h" | 5 #include "v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_IA32 | 7 #if V8_TARGET_ARCH_IA32 |
8 | 8 |
9 #include "bootstrapper.h" | 9 #include "bootstrapper.h" |
10 #include "codegen.h" | 10 #include "codegen.h" |
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
237 j(no_overflow, &done, Label::kNear); | 237 j(no_overflow, &done, Label::kNear); |
238 | 238 |
239 sub(esp, Immediate(kDoubleSize)); | 239 sub(esp, Immediate(kDoubleSize)); |
240 movsd(MemOperand(esp, 0), input_reg); | 240 movsd(MemOperand(esp, 0), input_reg); |
241 SlowTruncateToI(result_reg, esp, 0); | 241 SlowTruncateToI(result_reg, esp, 0); |
242 add(esp, Immediate(kDoubleSize)); | 242 add(esp, Immediate(kDoubleSize)); |
243 bind(&done); | 243 bind(&done); |
244 } | 244 } |
245 | 245 |
246 | 246 |
247 void MacroAssembler::TruncateX87TOSToI(Register result_reg) { | |
248 sub(esp, Immediate(kDoubleSize)); | |
249 fst_d(MemOperand(esp, 0)); | |
250 SlowTruncateToI(result_reg, esp, 0); | |
251 add(esp, Immediate(kDoubleSize)); | |
252 } | |
253 | |
254 | |
255 void MacroAssembler::X87TOSToI(Register result_reg, | |
256 MinusZeroMode minus_zero_mode, | |
257 Label* conversion_failed, | |
258 Label::Distance dst) { | |
259 Label done; | |
260 sub(esp, Immediate(kPointerSize)); | |
261 fld(0); | |
262 fist_s(MemOperand(esp, 0)); | |
263 fild_s(MemOperand(esp, 0)); | |
264 pop(result_reg); | |
265 FCmp(); | |
266 j(not_equal, conversion_failed, dst); | |
267 j(parity_even, conversion_failed, dst); | |
268 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { | |
269 test(result_reg, Operand(result_reg)); | |
270 j(not_zero, &done, Label::kNear); | |
271 // To check for minus zero, we load the value again as float, and check | |
272 // if that is still 0. | |
273 sub(esp, Immediate(kPointerSize)); | |
274 fst_s(MemOperand(esp, 0)); | |
275 pop(result_reg); | |
276 test(result_reg, Operand(result_reg)); | |
277 j(not_zero, conversion_failed, dst); | |
278 } | |
279 bind(&done); | |
280 } | |
281 | |
282 | |
283 void MacroAssembler::DoubleToI(Register result_reg, | 247 void MacroAssembler::DoubleToI(Register result_reg, |
284 XMMRegister input_reg, | 248 XMMRegister input_reg, |
285 XMMRegister scratch, | 249 XMMRegister scratch, |
286 MinusZeroMode minus_zero_mode, | 250 MinusZeroMode minus_zero_mode, |
287 Label* conversion_failed, | 251 Label* conversion_failed, |
288 Label::Distance dst) { | 252 Label::Distance dst) { |
289 ASSERT(!input_reg.is(scratch)); | 253 ASSERT(!input_reg.is(scratch)); |
290 cvttsd2si(result_reg, Operand(input_reg)); | 254 cvttsd2si(result_reg, Operand(input_reg)); |
291 Cvtsi2sd(scratch, Operand(result_reg)); | 255 Cvtsi2sd(scratch, Operand(result_reg)); |
292 ucomisd(scratch, input_reg); | 256 ucomisd(scratch, input_reg); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
340 if (input_reg.is(result_reg)) { | 304 if (input_reg.is(result_reg)) { |
341 // Input is clobbered. Restore number from fpu stack | 305 // Input is clobbered. Restore number from fpu stack |
342 sub(Operand(esp), Immediate(kDoubleSize)); | 306 sub(Operand(esp), Immediate(kDoubleSize)); |
343 fstp_d(Operand(esp, 0)); | 307 fstp_d(Operand(esp, 0)); |
344 SlowTruncateToI(result_reg, esp, 0); | 308 SlowTruncateToI(result_reg, esp, 0); |
345 add(esp, Immediate(kDoubleSize)); | 309 add(esp, Immediate(kDoubleSize)); |
346 } else { | 310 } else { |
347 fstp(0); | 311 fstp(0); |
348 SlowTruncateToI(result_reg, input_reg); | 312 SlowTruncateToI(result_reg, input_reg); |
349 } | 313 } |
350 } else if (CpuFeatures::IsSupported(SSE2)) { | 314 } else { |
351 CpuFeatureScope scope(this, SSE2); | |
352 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 315 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
353 cvttsd2si(result_reg, Operand(xmm0)); | 316 cvttsd2si(result_reg, Operand(xmm0)); |
354 cmp(result_reg, 0x1); | 317 cmp(result_reg, 0x1); |
355 j(no_overflow, &done, Label::kNear); | 318 j(no_overflow, &done, Label::kNear); |
356 // Check if the input was 0x8000000 (kMinInt). | 319 // Check if the input was 0x8000000 (kMinInt). |
357 // If no, then we got an overflow and we deoptimize. | 320 // If no, then we got an overflow and we deoptimize. |
358 ExternalReference min_int = ExternalReference::address_of_min_int(); | 321 ExternalReference min_int = ExternalReference::address_of_min_int(); |
359 ucomisd(xmm0, Operand::StaticVariable(min_int)); | 322 ucomisd(xmm0, Operand::StaticVariable(min_int)); |
360 j(not_equal, &slow_case, Label::kNear); | 323 j(not_equal, &slow_case, Label::kNear); |
361 j(parity_even, &slow_case, Label::kNear); // NaN. | 324 j(parity_even, &slow_case, Label::kNear); // NaN. |
362 jmp(&done, Label::kNear); | 325 jmp(&done, Label::kNear); |
363 | 326 |
364 // Slow case. | 327 // Slow case. |
365 bind(&slow_case); | 328 bind(&slow_case); |
366 if (input_reg.is(result_reg)) { | 329 if (input_reg.is(result_reg)) { |
367 // Input is clobbered. Restore number from double scratch. | 330 // Input is clobbered. Restore number from double scratch. |
368 sub(esp, Immediate(kDoubleSize)); | 331 sub(esp, Immediate(kDoubleSize)); |
369 movsd(MemOperand(esp, 0), xmm0); | 332 movsd(MemOperand(esp, 0), xmm0); |
370 SlowTruncateToI(result_reg, esp, 0); | 333 SlowTruncateToI(result_reg, esp, 0); |
371 add(esp, Immediate(kDoubleSize)); | 334 add(esp, Immediate(kDoubleSize)); |
372 } else { | 335 } else { |
373 SlowTruncateToI(result_reg, input_reg); | 336 SlowTruncateToI(result_reg, input_reg); |
374 } | 337 } |
375 } else { | |
376 SlowTruncateToI(result_reg, input_reg); | |
377 } | 338 } |
378 bind(&done); | 339 bind(&done); |
379 } | 340 } |
380 | 341 |
381 | 342 |
382 void MacroAssembler::TaggedToI(Register result_reg, | 343 void MacroAssembler::TaggedToI(Register result_reg, |
383 Register input_reg, | 344 Register input_reg, |
384 XMMRegister temp, | 345 XMMRegister temp, |
385 MinusZeroMode minus_zero_mode, | 346 MinusZeroMode minus_zero_mode, |
386 Label* lost_precision) { | 347 Label* lost_precision) { |
387 Label done; | 348 Label done; |
388 ASSERT(!temp.is(xmm0)); | 349 ASSERT(!temp.is(xmm0)); |
389 | 350 |
390 cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 351 cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
391 isolate()->factory()->heap_number_map()); | 352 isolate()->factory()->heap_number_map()); |
392 j(not_equal, lost_precision, Label::kNear); | 353 j(not_equal, lost_precision, Label::kNear); |
393 | 354 |
394 if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { | 355 ASSERT(!temp.is(no_xmm_reg)); |
395 ASSERT(!temp.is(no_xmm_reg)); | |
396 CpuFeatureScope scope(this, SSE2); | |
397 | 356 |
398 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 357 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
399 cvttsd2si(result_reg, Operand(xmm0)); | 358 cvttsd2si(result_reg, Operand(xmm0)); |
400 Cvtsi2sd(temp, Operand(result_reg)); | 359 Cvtsi2sd(temp, Operand(result_reg)); |
401 ucomisd(xmm0, temp); | 360 ucomisd(xmm0, temp); |
402 RecordComment("Deferred TaggedToI: lost precision"); | 361 RecordComment("Deferred TaggedToI: lost precision"); |
403 j(not_equal, lost_precision, Label::kNear); | 362 j(not_equal, lost_precision, Label::kNear); |
404 RecordComment("Deferred TaggedToI: NaN"); | 363 RecordComment("Deferred TaggedToI: NaN"); |
405 j(parity_even, lost_precision, Label::kNear); | 364 j(parity_even, lost_precision, Label::kNear); |
406 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { | 365 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { |
407 test(result_reg, Operand(result_reg)); | 366 test(result_reg, Operand(result_reg)); |
408 j(not_zero, &done, Label::kNear); | 367 j(not_zero, &done, Label::kNear); |
409 movmskpd(result_reg, xmm0); | 368 movmskpd(result_reg, xmm0); |
410 and_(result_reg, 1); | 369 and_(result_reg, 1); |
411 RecordComment("Deferred TaggedToI: minus zero"); | 370 RecordComment("Deferred TaggedToI: minus zero"); |
412 j(not_zero, lost_precision, Label::kNear); | 371 j(not_zero, lost_precision, Label::kNear); |
413 } | |
414 } else { | |
415 // TODO(olivf) Converting a number on the fpu is actually quite slow. We | |
416 // should first try a fast conversion and then bailout to this slow case. | |
417 Label lost_precision_pop, zero_check; | |
418 Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO) | |
419 ? &lost_precision_pop : lost_precision; | |
420 sub(esp, Immediate(kPointerSize)); | |
421 fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); | |
422 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0); | |
423 fist_s(MemOperand(esp, 0)); | |
424 fild_s(MemOperand(esp, 0)); | |
425 FCmp(); | |
426 pop(result_reg); | |
427 j(not_equal, lost_precision_int, Label::kNear); | |
428 j(parity_even, lost_precision_int, Label::kNear); // NaN. | |
429 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { | |
430 test(result_reg, Operand(result_reg)); | |
431 j(zero, &zero_check, Label::kNear); | |
432 fstp(0); | |
433 jmp(&done, Label::kNear); | |
434 bind(&zero_check); | |
435 // To check for minus zero, we load the value again as float, and check | |
436 // if that is still 0. | |
437 sub(esp, Immediate(kPointerSize)); | |
438 fstp_s(Operand(esp, 0)); | |
439 pop(result_reg); | |
440 test(result_reg, Operand(result_reg)); | |
441 j(zero, &done, Label::kNear); | |
442 jmp(lost_precision, Label::kNear); | |
443 | |
444 bind(&lost_precision_pop); | |
445 fstp(0); | |
446 jmp(lost_precision, Label::kNear); | |
447 } | |
448 } | 372 } |
449 bind(&done); | 373 bind(&done); |
450 } | 374 } |
451 | 375 |
452 | 376 |
453 void MacroAssembler::LoadUint32(XMMRegister dst, | 377 void MacroAssembler::LoadUint32(XMMRegister dst, |
454 Register src, | 378 Register src, |
455 XMMRegister scratch) { | 379 XMMRegister scratch) { |
456 Label done; | 380 Label done; |
457 cmp(src, Immediate(0)); | 381 cmp(src, Immediate(0)); |
458 ExternalReference uint32_bias = | 382 ExternalReference uint32_bias = |
459 ExternalReference::address_of_uint32_bias(); | 383 ExternalReference::address_of_uint32_bias(); |
460 movsd(scratch, Operand::StaticVariable(uint32_bias)); | 384 movsd(scratch, Operand::StaticVariable(uint32_bias)); |
461 Cvtsi2sd(dst, src); | 385 Cvtsi2sd(dst, src); |
462 j(not_sign, &done, Label::kNear); | 386 j(not_sign, &done, Label::kNear); |
463 addsd(dst, scratch); | 387 addsd(dst, scratch); |
464 bind(&done); | 388 bind(&done); |
465 } | 389 } |
466 | 390 |
467 | 391 |
468 void MacroAssembler::LoadUint32NoSSE2(Register src) { | |
469 Label done; | |
470 push(src); | |
471 fild_s(Operand(esp, 0)); | |
472 cmp(src, Immediate(0)); | |
473 j(not_sign, &done, Label::kNear); | |
474 ExternalReference uint32_bias = | |
475 ExternalReference::address_of_uint32_bias(); | |
476 fld_d(Operand::StaticVariable(uint32_bias)); | |
477 faddp(1); | |
478 bind(&done); | |
479 add(esp, Immediate(kPointerSize)); | |
480 } | |
481 | |
482 | |
483 void MacroAssembler::RecordWriteArray(Register object, | 392 void MacroAssembler::RecordWriteArray(Register object, |
484 Register value, | 393 Register value, |
485 Register index, | 394 Register index, |
486 SaveFPRegsMode save_fp, | 395 SaveFPRegsMode save_fp, |
487 RememberedSetAction remembered_set_action, | 396 RememberedSetAction remembered_set_action, |
488 SmiCheck smi_check) { | 397 SmiCheck smi_check) { |
489 // First, check if a write barrier is even needed. The tests below | 398 // First, check if a write barrier is even needed. The tests below |
490 // catch stores of Smis. | 399 // catch stores of Smis. |
491 Label done; | 400 Label done; |
492 | 401 |
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
787 } | 696 } |
788 | 697 |
789 | 698 |
790 void MacroAssembler::StoreNumberToDoubleElements( | 699 void MacroAssembler::StoreNumberToDoubleElements( |
791 Register maybe_number, | 700 Register maybe_number, |
792 Register elements, | 701 Register elements, |
793 Register key, | 702 Register key, |
794 Register scratch1, | 703 Register scratch1, |
795 XMMRegister scratch2, | 704 XMMRegister scratch2, |
796 Label* fail, | 705 Label* fail, |
797 bool specialize_for_processor, | |
798 int elements_offset) { | 706 int elements_offset) { |
799 Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value; | 707 Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value; |
800 JumpIfSmi(maybe_number, &smi_value, Label::kNear); | 708 JumpIfSmi(maybe_number, &smi_value, Label::kNear); |
801 | 709 |
802 CheckMap(maybe_number, | 710 CheckMap(maybe_number, |
803 isolate()->factory()->heap_number_map(), | 711 isolate()->factory()->heap_number_map(), |
804 fail, | 712 fail, |
805 DONT_DO_SMI_CHECK); | 713 DONT_DO_SMI_CHECK); |
806 | 714 |
807 // Double value, canonicalize NaN. | 715 // Double value, canonicalize NaN. |
808 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); | 716 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); |
809 cmp(FieldOperand(maybe_number, offset), | 717 cmp(FieldOperand(maybe_number, offset), |
810 Immediate(kNaNOrInfinityLowerBoundUpper32)); | 718 Immediate(kNaNOrInfinityLowerBoundUpper32)); |
811 j(greater_equal, &maybe_nan, Label::kNear); | 719 j(greater_equal, &maybe_nan, Label::kNear); |
812 | 720 |
813 bind(¬_nan); | 721 bind(¬_nan); |
814 ExternalReference canonical_nan_reference = | 722 ExternalReference canonical_nan_reference = |
815 ExternalReference::address_of_canonical_non_hole_nan(); | 723 ExternalReference::address_of_canonical_non_hole_nan(); |
816 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { | 724 movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); |
817 CpuFeatureScope use_sse2(this, SSE2); | 725 bind(&have_double_value); |
818 movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); | 726 movsd(FieldOperand(elements, key, times_4, |
819 bind(&have_double_value); | 727 FixedDoubleArray::kHeaderSize - elements_offset), |
820 movsd(FieldOperand(elements, key, times_4, | 728 scratch2); |
821 FixedDoubleArray::kHeaderSize - elements_offset), | |
822 scratch2); | |
823 } else { | |
824 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); | |
825 bind(&have_double_value); | |
826 fstp_d(FieldOperand(elements, key, times_4, | |
827 FixedDoubleArray::kHeaderSize - elements_offset)); | |
828 } | |
829 jmp(&done); | 729 jmp(&done); |
830 | 730 |
831 bind(&maybe_nan); | 731 bind(&maybe_nan); |
832 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise | 732 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise |
833 // it's an Infinity, and the non-NaN code path applies. | 733 // it's an Infinity, and the non-NaN code path applies. |
834 j(greater, &is_nan, Label::kNear); | 734 j(greater, &is_nan, Label::kNear); |
835 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); | 735 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); |
836 j(zero, ¬_nan); | 736 j(zero, ¬_nan); |
837 bind(&is_nan); | 737 bind(&is_nan); |
838 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { | 738 movsd(scratch2, Operand::StaticVariable(canonical_nan_reference)); |
839 CpuFeatureScope use_sse2(this, SSE2); | |
840 movsd(scratch2, Operand::StaticVariable(canonical_nan_reference)); | |
841 } else { | |
842 fld_d(Operand::StaticVariable(canonical_nan_reference)); | |
843 } | |
844 jmp(&have_double_value, Label::kNear); | 739 jmp(&have_double_value, Label::kNear); |
845 | 740 |
846 bind(&smi_value); | 741 bind(&smi_value); |
847 // Value is a smi. Convert to a double and store. | 742 // Value is a smi. Convert to a double and store. |
848 // Preserve original value. | 743 // Preserve original value. |
849 mov(scratch1, maybe_number); | 744 mov(scratch1, maybe_number); |
850 SmiUntag(scratch1); | 745 SmiUntag(scratch1); |
851 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { | 746 Cvtsi2sd(scratch2, scratch1); |
852 CpuFeatureScope fscope(this, SSE2); | 747 movsd(FieldOperand(elements, key, times_4, |
853 Cvtsi2sd(scratch2, scratch1); | 748 FixedDoubleArray::kHeaderSize - elements_offset), |
854 movsd(FieldOperand(elements, key, times_4, | 749 scratch2); |
855 FixedDoubleArray::kHeaderSize - elements_offset), | |
856 scratch2); | |
857 } else { | |
858 push(scratch1); | |
859 fild_s(Operand(esp, 0)); | |
860 pop(scratch1); | |
861 fstp_d(FieldOperand(elements, key, times_4, | |
862 FixedDoubleArray::kHeaderSize - elements_offset)); | |
863 } | |
864 bind(&done); | 750 bind(&done); |
865 } | 751 } |
866 | 752 |
867 | 753 |
868 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) { | 754 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) { |
869 cmp(FieldOperand(obj, HeapObject::kMapOffset), map); | 755 cmp(FieldOperand(obj, HeapObject::kMapOffset), map); |
870 } | 756 } |
871 | 757 |
872 | 758 |
873 void MacroAssembler::CheckMap(Register obj, | 759 void MacroAssembler::CheckMap(Register obj, |
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1086 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate()); | 972 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate()); |
1087 ExternalReference context_address(Isolate::kContextAddress, isolate()); | 973 ExternalReference context_address(Isolate::kContextAddress, isolate()); |
1088 mov(Operand::StaticVariable(c_entry_fp_address), ebp); | 974 mov(Operand::StaticVariable(c_entry_fp_address), ebp); |
1089 mov(Operand::StaticVariable(context_address), esi); | 975 mov(Operand::StaticVariable(context_address), esi); |
1090 } | 976 } |
1091 | 977 |
1092 | 978 |
1093 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { | 979 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { |
1094 // Optionally save all XMM registers. | 980 // Optionally save all XMM registers. |
1095 if (save_doubles) { | 981 if (save_doubles) { |
1096 CpuFeatureScope scope(this, SSE2); | |
1097 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; | 982 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; |
1098 sub(esp, Immediate(space)); | 983 sub(esp, Immediate(space)); |
1099 const int offset = -2 * kPointerSize; | 984 const int offset = -2 * kPointerSize; |
1100 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { | 985 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
1101 XMMRegister reg = XMMRegister::from_code(i); | 986 XMMRegister reg = XMMRegister::from_code(i); |
1102 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); | 987 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); |
1103 } | 988 } |
1104 } else { | 989 } else { |
1105 sub(esp, Immediate(argc * kPointerSize)); | 990 sub(esp, Immediate(argc * kPointerSize)); |
1106 } | 991 } |
(...skipping 25 matching lines...) Expand all Loading... |
1132 | 1017 |
1133 void MacroAssembler::EnterApiExitFrame(int argc) { | 1018 void MacroAssembler::EnterApiExitFrame(int argc) { |
1134 EnterExitFramePrologue(); | 1019 EnterExitFramePrologue(); |
1135 EnterExitFrameEpilogue(argc, false); | 1020 EnterExitFrameEpilogue(argc, false); |
1136 } | 1021 } |
1137 | 1022 |
1138 | 1023 |
1139 void MacroAssembler::LeaveExitFrame(bool save_doubles) { | 1024 void MacroAssembler::LeaveExitFrame(bool save_doubles) { |
1140 // Optionally restore all XMM registers. | 1025 // Optionally restore all XMM registers. |
1141 if (save_doubles) { | 1026 if (save_doubles) { |
1142 CpuFeatureScope scope(this, SSE2); | |
1143 const int offset = -2 * kPointerSize; | 1027 const int offset = -2 * kPointerSize; |
1144 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { | 1028 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
1145 XMMRegister reg = XMMRegister::from_code(i); | 1029 XMMRegister reg = XMMRegister::from_code(i); |
1146 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); | 1030 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); |
1147 } | 1031 } |
1148 } | 1032 } |
1149 | 1033 |
1150 // Get the return address from the stack and restore the frame pointer. | 1034 // Get the return address from the stack and restore the frame pointer. |
1151 mov(ecx, Operand(ebp, 1 * kPointerSize)); | 1035 mov(ecx, Operand(ebp, 1 * kPointerSize)); |
1152 mov(ebp, Operand(ebp, 0 * kPointerSize)); | 1036 mov(ebp, Operand(ebp, 0 * kPointerSize)); |
(...skipping 1052 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2205 // constant, we check that the actual number of arguments match the | 2089 // constant, we check that the actual number of arguments match the |
2206 // expectation. | 2090 // expectation. |
2207 CHECK(f->nargs < 0 || f->nargs == num_arguments); | 2091 CHECK(f->nargs < 0 || f->nargs == num_arguments); |
2208 | 2092 |
2209 // TODO(1236192): Most runtime routines don't need the number of | 2093 // TODO(1236192): Most runtime routines don't need the number of |
2210 // arguments passed in because it is constant. At some point we | 2094 // arguments passed in because it is constant. At some point we |
2211 // should remove this need and make the runtime routine entry code | 2095 // should remove this need and make the runtime routine entry code |
2212 // smarter. | 2096 // smarter. |
2213 Move(eax, Immediate(num_arguments)); | 2097 Move(eax, Immediate(num_arguments)); |
2214 mov(ebx, Immediate(ExternalReference(f, isolate()))); | 2098 mov(ebx, Immediate(ExternalReference(f, isolate()))); |
2215 CEntryStub ces(isolate(), | 2099 CEntryStub ces(isolate(), 1, save_doubles); |
2216 1, | |
2217 CpuFeatures::IsSupported(SSE2) ? save_doubles | |
2218 : kDontSaveFPRegs); | |
2219 CallStub(&ces); | 2100 CallStub(&ces); |
2220 } | 2101 } |
2221 | 2102 |
2222 | 2103 |
2223 void MacroAssembler::CallExternalReference(ExternalReference ref, | 2104 void MacroAssembler::CallExternalReference(ExternalReference ref, |
2224 int num_arguments) { | 2105 int num_arguments) { |
2225 mov(eax, Immediate(num_arguments)); | 2106 mov(eax, Immediate(num_arguments)); |
2226 mov(ebx, Immediate(ref)); | 2107 mov(ebx, Immediate(ref)); |
2227 | 2108 |
2228 CEntryStub stub(isolate(), 1); | 2109 CEntryStub stub(isolate(), 1); |
(...skipping 528 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2757 ret(bytes_dropped); | 2638 ret(bytes_dropped); |
2758 } else { | 2639 } else { |
2759 pop(scratch); | 2640 pop(scratch); |
2760 add(esp, Immediate(bytes_dropped)); | 2641 add(esp, Immediate(bytes_dropped)); |
2761 push(scratch); | 2642 push(scratch); |
2762 ret(0); | 2643 ret(0); |
2763 } | 2644 } |
2764 } | 2645 } |
2765 | 2646 |
2766 | 2647 |
2767 void MacroAssembler::VerifyX87StackDepth(uint32_t depth) { | |
2768 // Make sure the floating point stack is either empty or has depth items. | |
2769 ASSERT(depth <= 7); | |
2770 // This is very expensive. | |
2771 ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts); | |
2772 | |
2773 // The top-of-stack (tos) is 7 if there is one item pushed. | |
2774 int tos = (8 - depth) % 8; | |
2775 const int kTopMask = 0x3800; | |
2776 push(eax); | |
2777 fwait(); | |
2778 fnstsw_ax(); | |
2779 and_(eax, kTopMask); | |
2780 shr(eax, 11); | |
2781 cmp(eax, Immediate(tos)); | |
2782 Check(equal, kUnexpectedFPUStackDepthAfterInstruction); | |
2783 fnclex(); | |
2784 pop(eax); | |
2785 } | |
2786 | |
2787 | |
2788 void MacroAssembler::Drop(int stack_elements) { | 2648 void MacroAssembler::Drop(int stack_elements) { |
2789 if (stack_elements > 0) { | 2649 if (stack_elements > 0) { |
2790 add(esp, Immediate(stack_elements * kPointerSize)); | 2650 add(esp, Immediate(stack_elements * kPointerSize)); |
2791 } | 2651 } |
2792 } | 2652 } |
2793 | 2653 |
2794 | 2654 |
2795 void MacroAssembler::Move(Register dst, Register src) { | 2655 void MacroAssembler::Move(Register dst, Register src) { |
2796 if (!dst.is(src)) { | 2656 if (!dst.is(src)) { |
2797 mov(dst, src); | 2657 mov(dst, src); |
(...skipping 10 matching lines...) Expand all Loading... |
2808 } | 2668 } |
2809 | 2669 |
2810 | 2670 |
2811 void MacroAssembler::Move(const Operand& dst, const Immediate& x) { | 2671 void MacroAssembler::Move(const Operand& dst, const Immediate& x) { |
2812 mov(dst, x); | 2672 mov(dst, x); |
2813 } | 2673 } |
2814 | 2674 |
2815 | 2675 |
2816 void MacroAssembler::Move(XMMRegister dst, double val) { | 2676 void MacroAssembler::Move(XMMRegister dst, double val) { |
2817 // TODO(titzer): recognize double constants with ExternalReferences. | 2677 // TODO(titzer): recognize double constants with ExternalReferences. |
2818 CpuFeatureScope scope(this, SSE2); | |
2819 uint64_t int_val = BitCast<uint64_t, double>(val); | 2678 uint64_t int_val = BitCast<uint64_t, double>(val); |
2820 if (int_val == 0) { | 2679 if (int_val == 0) { |
2821 xorps(dst, dst); | 2680 xorps(dst, dst); |
2822 } else { | 2681 } else { |
2823 int32_t lower = static_cast<int32_t>(int_val); | 2682 int32_t lower = static_cast<int32_t>(int_val); |
2824 int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt); | 2683 int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt); |
2825 push(Immediate(upper)); | 2684 push(Immediate(upper)); |
2826 push(Immediate(lower)); | 2685 push(Immediate(lower)); |
2827 movsd(dst, Operand(esp, 0)); | 2686 movsd(dst, Operand(esp, 0)); |
2828 add(esp, Immediate(kDoubleSize)); | 2687 add(esp, Immediate(kDoubleSize)); |
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3068 // Object is heap number and hash is now in scratch. Calculate cache index. | 2927 // Object is heap number and hash is now in scratch. Calculate cache index. |
3069 and_(scratch, mask); | 2928 and_(scratch, mask); |
3070 Register index = scratch; | 2929 Register index = scratch; |
3071 Register probe = mask; | 2930 Register probe = mask; |
3072 mov(probe, | 2931 mov(probe, |
3073 FieldOperand(number_string_cache, | 2932 FieldOperand(number_string_cache, |
3074 index, | 2933 index, |
3075 times_twice_pointer_size, | 2934 times_twice_pointer_size, |
3076 FixedArray::kHeaderSize)); | 2935 FixedArray::kHeaderSize)); |
3077 JumpIfSmi(probe, not_found); | 2936 JumpIfSmi(probe, not_found); |
3078 if (CpuFeatures::IsSupported(SSE2)) { | 2937 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); |
3079 CpuFeatureScope fscope(this, SSE2); | 2938 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); |
3080 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); | |
3081 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); | |
3082 } else { | |
3083 fld_d(FieldOperand(object, HeapNumber::kValueOffset)); | |
3084 fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); | |
3085 FCmp(); | |
3086 } | |
3087 j(parity_even, not_found); // Bail out if NaN is involved. | 2939 j(parity_even, not_found); // Bail out if NaN is involved. |
3088 j(not_equal, not_found); // The cache did not contain this value. | 2940 j(not_equal, not_found); // The cache did not contain this value. |
3089 jmp(&load_result_from_cache, Label::kNear); | 2941 jmp(&load_result_from_cache, Label::kNear); |
3090 | 2942 |
3091 bind(&smi_hash_calculated); | 2943 bind(&smi_hash_calculated); |
3092 // Object is smi and hash is now in scratch. Calculate cache index. | 2944 // Object is smi and hash is now in scratch. Calculate cache index. |
3093 and_(scratch, mask); | 2945 and_(scratch, mask); |
3094 // Check if the entry is the smi we are looking for. | 2946 // Check if the entry is the smi we are looking for. |
3095 cmp(object, | 2947 cmp(object, |
3096 FieldOperand(number_string_cache, | 2948 FieldOperand(number_string_cache, |
(...skipping 515 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3612 if (ms.shift() > 0) sar(edx, ms.shift()); | 3464 if (ms.shift() > 0) sar(edx, ms.shift()); |
3613 mov(eax, dividend); | 3465 mov(eax, dividend); |
3614 shr(eax, 31); | 3466 shr(eax, 31); |
3615 add(edx, eax); | 3467 add(edx, eax); |
3616 } | 3468 } |
3617 | 3469 |
3618 | 3470 |
3619 } } // namespace v8::internal | 3471 } } // namespace v8::internal |
3620 | 3472 |
3621 #endif // V8_TARGET_ARCH_IA32 | 3473 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |