Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(941)

Side by Side Diff: src/ia32/macro-assembler-ia32.cc

Issue 39973003: Merge bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: again Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/macro-assembler-ia32.h ('k') | src/ia32/stub-cache-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after
225 225
226 226
227 void MacroAssembler::TruncateDoubleToI(Register result_reg, 227 void MacroAssembler::TruncateDoubleToI(Register result_reg,
228 XMMRegister input_reg) { 228 XMMRegister input_reg) {
229 Label done; 229 Label done;
230 cvttsd2si(result_reg, Operand(input_reg)); 230 cvttsd2si(result_reg, Operand(input_reg));
231 cmp(result_reg, 0x80000000u); 231 cmp(result_reg, 0x80000000u);
232 j(not_equal, &done, Label::kNear); 232 j(not_equal, &done, Label::kNear);
233 233
234 sub(esp, Immediate(kDoubleSize)); 234 sub(esp, Immediate(kDoubleSize));
235 movdbl(MemOperand(esp, 0), input_reg); 235 movsd(MemOperand(esp, 0), input_reg);
236 SlowTruncateToI(result_reg, esp, 0); 236 SlowTruncateToI(result_reg, esp, 0);
237 add(esp, Immediate(kDoubleSize)); 237 add(esp, Immediate(kDoubleSize));
238 bind(&done); 238 bind(&done);
239 } 239 }
240 240
241 241
242 void MacroAssembler::TruncateX87TOSToI(Register result_reg) { 242 void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
243 sub(esp, Immediate(kDoubleSize)); 243 sub(esp, Immediate(kDoubleSize));
244 fst_d(MemOperand(esp, 0)); 244 fst_d(MemOperand(esp, 0));
245 SlowTruncateToI(result_reg, esp, 0); 245 SlowTruncateToI(result_reg, esp, 0);
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 sub(Operand(esp), Immediate(kDoubleSize)); 337 sub(Operand(esp), Immediate(kDoubleSize));
338 fstp_d(Operand(esp, 0)); 338 fstp_d(Operand(esp, 0));
339 SlowTruncateToI(result_reg, esp, 0); 339 SlowTruncateToI(result_reg, esp, 0);
340 add(esp, Immediate(kDoubleSize)); 340 add(esp, Immediate(kDoubleSize));
341 } else { 341 } else {
342 fstp(0); 342 fstp(0);
343 SlowTruncateToI(result_reg, input_reg); 343 SlowTruncateToI(result_reg, input_reg);
344 } 344 }
345 } else if (CpuFeatures::IsSupported(SSE2)) { 345 } else if (CpuFeatures::IsSupported(SSE2)) {
346 CpuFeatureScope scope(this, SSE2); 346 CpuFeatureScope scope(this, SSE2);
347 movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 347 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
348 cvttsd2si(result_reg, Operand(xmm0)); 348 cvttsd2si(result_reg, Operand(xmm0));
349 cmp(result_reg, 0x80000000u); 349 cmp(result_reg, 0x80000000u);
350 j(not_equal, &done, Label::kNear); 350 j(not_equal, &done, Label::kNear);
351 // Check if the input was 0x8000000 (kMinInt). 351 // Check if the input was 0x8000000 (kMinInt).
352 // If no, then we got an overflow and we deoptimize. 352 // If no, then we got an overflow and we deoptimize.
353 ExternalReference min_int = ExternalReference::address_of_min_int(); 353 ExternalReference min_int = ExternalReference::address_of_min_int();
354 ucomisd(xmm0, Operand::StaticVariable(min_int)); 354 ucomisd(xmm0, Operand::StaticVariable(min_int));
355 j(not_equal, &slow_case, Label::kNear); 355 j(not_equal, &slow_case, Label::kNear);
356 j(parity_even, &slow_case, Label::kNear); // NaN. 356 j(parity_even, &slow_case, Label::kNear); // NaN.
357 jmp(&done, Label::kNear); 357 jmp(&done, Label::kNear);
358 358
359 // Slow case. 359 // Slow case.
360 bind(&slow_case); 360 bind(&slow_case);
361 if (input_reg.is(result_reg)) { 361 if (input_reg.is(result_reg)) {
362 // Input is clobbered. Restore number from double scratch. 362 // Input is clobbered. Restore number from double scratch.
363 sub(esp, Immediate(kDoubleSize)); 363 sub(esp, Immediate(kDoubleSize));
364 movdbl(MemOperand(esp, 0), xmm0); 364 movsd(MemOperand(esp, 0), xmm0);
365 SlowTruncateToI(result_reg, esp, 0); 365 SlowTruncateToI(result_reg, esp, 0);
366 add(esp, Immediate(kDoubleSize)); 366 add(esp, Immediate(kDoubleSize));
367 } else { 367 } else {
368 SlowTruncateToI(result_reg, input_reg); 368 SlowTruncateToI(result_reg, input_reg);
369 } 369 }
370 } else { 370 } else {
371 SlowTruncateToI(result_reg, input_reg); 371 SlowTruncateToI(result_reg, input_reg);
372 } 372 }
373 bind(&done); 373 bind(&done);
374 } 374 }
375 375
376 376
377 void MacroAssembler::TaggedToI(Register result_reg, 377 void MacroAssembler::TaggedToI(Register result_reg,
378 Register input_reg, 378 Register input_reg,
379 XMMRegister temp, 379 XMMRegister temp,
380 MinusZeroMode minus_zero_mode, 380 MinusZeroMode minus_zero_mode,
381 Label* lost_precision) { 381 Label* lost_precision) {
382 Label done; 382 Label done;
383 ASSERT(!temp.is(xmm0)); 383 ASSERT(!temp.is(xmm0));
384 384
385 cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 385 cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
386 isolate()->factory()->heap_number_map()); 386 isolate()->factory()->heap_number_map());
387 j(not_equal, lost_precision, Label::kNear); 387 j(not_equal, lost_precision, Label::kNear);
388 388
389 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { 389 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
390 ASSERT(!temp.is(no_xmm_reg)); 390 ASSERT(!temp.is(no_xmm_reg));
391 CpuFeatureScope scope(this, SSE2); 391 CpuFeatureScope scope(this, SSE2);
392 392
393 movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 393 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
394 cvttsd2si(result_reg, Operand(xmm0)); 394 cvttsd2si(result_reg, Operand(xmm0));
395 Cvtsi2sd(temp, Operand(result_reg)); 395 Cvtsi2sd(temp, Operand(result_reg));
396 ucomisd(xmm0, temp); 396 ucomisd(xmm0, temp);
397 RecordComment("Deferred TaggedToI: lost precision"); 397 RecordComment("Deferred TaggedToI: lost precision");
398 j(not_equal, lost_precision, Label::kNear); 398 j(not_equal, lost_precision, Label::kNear);
399 RecordComment("Deferred TaggedToI: NaN"); 399 RecordComment("Deferred TaggedToI: NaN");
400 j(parity_even, lost_precision, Label::kNear); 400 j(parity_even, lost_precision, Label::kNear);
401 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { 401 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
402 test(result_reg, Operand(result_reg)); 402 test(result_reg, Operand(result_reg));
403 j(not_zero, &done, Label::kNear); 403 j(not_zero, &done, Label::kNear);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
438 438
439 bind(&lost_precision_pop); 439 bind(&lost_precision_pop);
440 fstp(0); 440 fstp(0);
441 jmp(lost_precision, Label::kNear); 441 jmp(lost_precision, Label::kNear);
442 } 442 }
443 } 443 }
444 bind(&done); 444 bind(&done);
445 } 445 }
446 446
447 447
448
449 static double kUint32Bias =
450 static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
451
452
453 void MacroAssembler::LoadUint32(XMMRegister dst, 448 void MacroAssembler::LoadUint32(XMMRegister dst,
454 Register src, 449 Register src,
455 XMMRegister scratch) { 450 XMMRegister scratch) {
456 ASSERT(!Serializer::enabled());
457 Label done; 451 Label done;
458 cmp(src, Immediate(0)); 452 cmp(src, Immediate(0));
459 movdbl(scratch, 453 ExternalReference uint32_bias =
460 Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32)); 454 ExternalReference::address_of_uint32_bias();
455 movsd(scratch, Operand::StaticVariable(uint32_bias));
461 Cvtsi2sd(dst, src); 456 Cvtsi2sd(dst, src);
462 j(not_sign, &done, Label::kNear); 457 j(not_sign, &done, Label::kNear);
463 addsd(dst, scratch); 458 addsd(dst, scratch);
464 bind(&done); 459 bind(&done);
465 } 460 }
466 461
467 462
468 void MacroAssembler::LoadUint32NoSSE2(Register src) { 463 void MacroAssembler::LoadUint32NoSSE2(Register src) {
469 ASSERT(!Serializer::enabled());
470 Label done; 464 Label done;
471 push(src); 465 push(src);
472 fild_s(Operand(esp, 0)); 466 fild_s(Operand(esp, 0));
473 cmp(src, Immediate(0)); 467 cmp(src, Immediate(0));
474 j(not_sign, &done, Label::kNear); 468 j(not_sign, &done, Label::kNear);
475 fld_d(Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32)); 469 ExternalReference uint32_bias =
470 ExternalReference::address_of_uint32_bias();
471 fld_d(Operand::StaticVariable(uint32_bias));
476 faddp(1); 472 faddp(1);
477 bind(&done); 473 bind(&done);
478 add(esp, Immediate(kPointerSize)); 474 add(esp, Immediate(kPointerSize));
479 } 475 }
480 476
481 477
482 void MacroAssembler::RecordWriteArray(Register object, 478 void MacroAssembler::RecordWriteArray(Register object,
483 Register value, 479 Register value,
484 Register index, 480 Register index,
485 SaveFPRegsMode save_fp, 481 SaveFPRegsMode save_fp,
(...skipping 327 matching lines...) Expand 10 before | Expand all | Expand 10 after
813 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); 809 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
814 cmp(FieldOperand(maybe_number, offset), 810 cmp(FieldOperand(maybe_number, offset),
815 Immediate(kNaNOrInfinityLowerBoundUpper32)); 811 Immediate(kNaNOrInfinityLowerBoundUpper32));
816 j(greater_equal, &maybe_nan, Label::kNear); 812 j(greater_equal, &maybe_nan, Label::kNear);
817 813
818 bind(&not_nan); 814 bind(&not_nan);
819 ExternalReference canonical_nan_reference = 815 ExternalReference canonical_nan_reference =
820 ExternalReference::address_of_canonical_non_hole_nan(); 816 ExternalReference::address_of_canonical_non_hole_nan();
821 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { 817 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
822 CpuFeatureScope use_sse2(this, SSE2); 818 CpuFeatureScope use_sse2(this, SSE2);
823 movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); 819 movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
824 bind(&have_double_value); 820 bind(&have_double_value);
825 movdbl(FieldOperand(elements, key, times_4, 821 movsd(FieldOperand(elements, key, times_4,
826 FixedDoubleArray::kHeaderSize - elements_offset), 822 FixedDoubleArray::kHeaderSize - elements_offset),
827 scratch2); 823 scratch2);
828 } else { 824 } else {
829 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); 825 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
830 bind(&have_double_value); 826 bind(&have_double_value);
831 fstp_d(FieldOperand(elements, key, times_4, 827 fstp_d(FieldOperand(elements, key, times_4,
832 FixedDoubleArray::kHeaderSize - elements_offset)); 828 FixedDoubleArray::kHeaderSize - elements_offset));
833 } 829 }
834 jmp(&done); 830 jmp(&done);
835 831
836 bind(&maybe_nan); 832 bind(&maybe_nan);
837 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise 833 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
838 // it's an Infinity, and the non-NaN code path applies. 834 // it's an Infinity, and the non-NaN code path applies.
839 j(greater, &is_nan, Label::kNear); 835 j(greater, &is_nan, Label::kNear);
840 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); 836 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
841 j(zero, &not_nan); 837 j(zero, &not_nan);
842 bind(&is_nan); 838 bind(&is_nan);
843 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { 839 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
844 CpuFeatureScope use_sse2(this, SSE2); 840 CpuFeatureScope use_sse2(this, SSE2);
845 movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); 841 movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
846 } else { 842 } else {
847 fld_d(Operand::StaticVariable(canonical_nan_reference)); 843 fld_d(Operand::StaticVariable(canonical_nan_reference));
848 } 844 }
849 jmp(&have_double_value, Label::kNear); 845 jmp(&have_double_value, Label::kNear);
850 846
851 bind(&smi_value); 847 bind(&smi_value);
852 // Value is a smi. Convert to a double and store. 848 // Value is a smi. Convert to a double and store.
853 // Preserve original value. 849 // Preserve original value.
854 mov(scratch1, maybe_number); 850 mov(scratch1, maybe_number);
855 SmiUntag(scratch1); 851 SmiUntag(scratch1);
856 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { 852 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
857 CpuFeatureScope fscope(this, SSE2); 853 CpuFeatureScope fscope(this, SSE2);
858 Cvtsi2sd(scratch2, scratch1); 854 Cvtsi2sd(scratch2, scratch1);
859 movdbl(FieldOperand(elements, key, times_4, 855 movsd(FieldOperand(elements, key, times_4,
860 FixedDoubleArray::kHeaderSize - elements_offset), 856 FixedDoubleArray::kHeaderSize - elements_offset),
861 scratch2); 857 scratch2);
862 } else { 858 } else {
863 push(scratch1); 859 push(scratch1);
864 fild_s(Operand(esp, 0)); 860 fild_s(Operand(esp, 0));
865 pop(scratch1); 861 pop(scratch1);
866 fstp_d(FieldOperand(elements, key, times_4, 862 fstp_d(FieldOperand(elements, key, times_4,
867 FixedDoubleArray::kHeaderSize - elements_offset)); 863 FixedDoubleArray::kHeaderSize - elements_offset));
868 } 864 }
869 bind(&done); 865 bind(&done);
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
1010 1006
1011 1007
1012 void MacroAssembler::AssertNotSmi(Register object) { 1008 void MacroAssembler::AssertNotSmi(Register object) {
1013 if (emit_debug_code()) { 1009 if (emit_debug_code()) {
1014 test(object, Immediate(kSmiTagMask)); 1010 test(object, Immediate(kSmiTagMask));
1015 Check(not_equal, kOperandIsASmi); 1011 Check(not_equal, kOperandIsASmi);
1016 } 1012 }
1017 } 1013 }
1018 1014
1019 1015
1016 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
1017 if (frame_mode == BUILD_STUB_FRAME) {
1018 push(ebp); // Caller's frame pointer.
1019 mov(ebp, esp);
1020 push(esi); // Callee's context.
1021 push(Immediate(Smi::FromInt(StackFrame::STUB)));
1022 } else {
1023 PredictableCodeSizeScope predictible_code_size_scope(this,
1024 kNoCodeAgeSequenceLength);
1025 if (FLAG_optimize_for_size && FLAG_age_code) {
1026 // Pre-age the code.
1027 call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
1028 RelocInfo::CODE_AGE_SEQUENCE);
1029 Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
1030 } else {
1031 push(ebp); // Caller's frame pointer.
1032 mov(ebp, esp);
1033 push(esi); // Callee's context.
1034 push(edi); // Callee's JS function.
1035 }
1036 }
1037 }
1038
1039
1020 void MacroAssembler::EnterFrame(StackFrame::Type type) { 1040 void MacroAssembler::EnterFrame(StackFrame::Type type) {
1021 push(ebp); 1041 push(ebp);
1022 mov(ebp, esp); 1042 mov(ebp, esp);
1023 push(esi); 1043 push(esi);
1024 push(Immediate(Smi::FromInt(type))); 1044 push(Immediate(Smi::FromInt(type)));
1025 push(Immediate(CodeObject())); 1045 push(Immediate(CodeObject()));
1026 if (emit_debug_code()) { 1046 if (emit_debug_code()) {
1027 cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value())); 1047 cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
1028 Check(not_equal, kCodeObjectNotProperlyPatched); 1048 Check(not_equal, kCodeObjectNotProperlyPatched);
1029 } 1049 }
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1065 1085
1066 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { 1086 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
1067 // Optionally save all XMM registers. 1087 // Optionally save all XMM registers.
1068 if (save_doubles) { 1088 if (save_doubles) {
1069 CpuFeatureScope scope(this, SSE2); 1089 CpuFeatureScope scope(this, SSE2);
1070 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; 1090 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
1071 sub(esp, Immediate(space)); 1091 sub(esp, Immediate(space));
1072 const int offset = -2 * kPointerSize; 1092 const int offset = -2 * kPointerSize;
1073 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { 1093 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1074 XMMRegister reg = XMMRegister::from_code(i); 1094 XMMRegister reg = XMMRegister::from_code(i);
1075 movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); 1095 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
1076 } 1096 }
1077 } else { 1097 } else {
1078 sub(esp, Immediate(argc * kPointerSize)); 1098 sub(esp, Immediate(argc * kPointerSize));
1079 } 1099 }
1080 1100
1081 // Get the required frame alignment for the OS. 1101 // Get the required frame alignment for the OS.
1082 const int kFrameAlignment = OS::ActivationFrameAlignment(); 1102 const int kFrameAlignment = OS::ActivationFrameAlignment();
1083 if (kFrameAlignment > 0) { 1103 if (kFrameAlignment > 0) {
1084 ASSERT(IsPowerOf2(kFrameAlignment)); 1104 ASSERT(IsPowerOf2(kFrameAlignment));
1085 and_(esp, -kFrameAlignment); 1105 and_(esp, -kFrameAlignment);
(...skipping 23 matching lines...) Expand all
1109 } 1129 }
1110 1130
1111 1131
1112 void MacroAssembler::LeaveExitFrame(bool save_doubles) { 1132 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
1113 // Optionally restore all XMM registers. 1133 // Optionally restore all XMM registers.
1114 if (save_doubles) { 1134 if (save_doubles) {
1115 CpuFeatureScope scope(this, SSE2); 1135 CpuFeatureScope scope(this, SSE2);
1116 const int offset = -2 * kPointerSize; 1136 const int offset = -2 * kPointerSize;
1117 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { 1137 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1118 XMMRegister reg = XMMRegister::from_code(i); 1138 XMMRegister reg = XMMRegister::from_code(i);
1119 movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); 1139 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
1120 } 1140 }
1121 } 1141 }
1122 1142
1123 // Get the return address from the stack and restore the frame pointer. 1143 // Get the return address from the stack and restore the frame pointer.
1124 mov(ecx, Operand(ebp, 1 * kPointerSize)); 1144 mov(ecx, Operand(ebp, 1 * kPointerSize));
1125 mov(ebp, Operand(ebp, 0 * kPointerSize)); 1145 mov(ebp, Operand(ebp, 0 * kPointerSize));
1126 1146
1127 // Pop the arguments and the receiver from the caller stack. 1147 // Pop the arguments and the receiver from the caller stack.
1128 lea(esp, Operand(esi, 1 * kPointerSize)); 1148 lea(esp, Operand(esi, 1 * kPointerSize));
1129 1149
(...skipping 1937 matching lines...) Expand 10 before | Expand all | Expand 10 after
3067 Register index = scratch; 3087 Register index = scratch;
3068 Register probe = mask; 3088 Register probe = mask;
3069 mov(probe, 3089 mov(probe,
3070 FieldOperand(number_string_cache, 3090 FieldOperand(number_string_cache,
3071 index, 3091 index,
3072 times_twice_pointer_size, 3092 times_twice_pointer_size,
3073 FixedArray::kHeaderSize)); 3093 FixedArray::kHeaderSize));
3074 JumpIfSmi(probe, not_found); 3094 JumpIfSmi(probe, not_found);
3075 if (CpuFeatures::IsSupported(SSE2)) { 3095 if (CpuFeatures::IsSupported(SSE2)) {
3076 CpuFeatureScope fscope(this, SSE2); 3096 CpuFeatureScope fscope(this, SSE2);
3077 movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); 3097 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3078 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); 3098 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
3079 } else { 3099 } else {
3080 fld_d(FieldOperand(object, HeapNumber::kValueOffset)); 3100 fld_d(FieldOperand(object, HeapNumber::kValueOffset));
3081 fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); 3101 fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
3082 FCmp(); 3102 FCmp();
3083 } 3103 }
3084 j(parity_even, not_found); // Bail out if NaN is involved. 3104 j(parity_even, not_found); // Bail out if NaN is involved.
3085 j(not_equal, not_found); // The cache did not contain this value. 3105 j(not_equal, not_found); // The cache did not contain this value.
3086 jmp(&load_result_from_cache, Label::kNear); 3106 jmp(&load_result_from_cache, Label::kNear);
3087 3107
(...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after
3505 j(not_equal, call_runtime); 3525 j(not_equal, call_runtime);
3506 3526
3507 mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset)); 3527 mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
3508 cmp(ecx, isolate()->factory()->null_value()); 3528 cmp(ecx, isolate()->factory()->null_value());
3509 j(not_equal, &next); 3529 j(not_equal, &next);
3510 } 3530 }
3511 3531
3512 3532
3513 void MacroAssembler::TestJSArrayForAllocationMemento( 3533 void MacroAssembler::TestJSArrayForAllocationMemento(
3514 Register receiver_reg, 3534 Register receiver_reg,
3515 Register scratch_reg) { 3535 Register scratch_reg,
3516 Label no_memento_available; 3536 Label* no_memento_found) {
3517
3518 ExternalReference new_space_start = 3537 ExternalReference new_space_start =
3519 ExternalReference::new_space_start(isolate()); 3538 ExternalReference::new_space_start(isolate());
3520 ExternalReference new_space_allocation_top = 3539 ExternalReference new_space_allocation_top =
3521 ExternalReference::new_space_allocation_top_address(isolate()); 3540 ExternalReference::new_space_allocation_top_address(isolate());
3522 3541
3523 lea(scratch_reg, Operand(receiver_reg, 3542 lea(scratch_reg, Operand(receiver_reg,
3524 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); 3543 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3525 cmp(scratch_reg, Immediate(new_space_start)); 3544 cmp(scratch_reg, Immediate(new_space_start));
3526 j(less, &no_memento_available); 3545 j(less, no_memento_found);
3527 cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top)); 3546 cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3528 j(greater, &no_memento_available); 3547 j(greater, no_memento_found);
3529 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize), 3548 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
3530 Immediate(isolate()->factory()->allocation_memento_map())); 3549 Immediate(isolate()->factory()->allocation_memento_map()));
3531 bind(&no_memento_available);
3532 } 3550 }
3533 3551
3534 3552
3535 } } // namespace v8::internal 3553 } } // namespace v8::internal
3536 3554
3537 #endif // V8_TARGET_ARCH_IA32 3555 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/macro-assembler-ia32.h ('k') | src/ia32/stub-cache-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698