Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(70)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 6883159: X64: Adding macro to load double from memory, and use SSE3 instruction if present. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/assembler-x64.cc ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1014 matching lines...) Expand 10 before | Expand all | Expand 10 after
1025 // Check if cache matches: Double value is stored in uint32_t[2] array. 1025 // Check if cache matches: Double value is stored in uint32_t[2] array.
1026 NearLabel cache_miss; 1026 NearLabel cache_miss;
1027 __ cmpq(rbx, Operand(rcx, 0)); 1027 __ cmpq(rbx, Operand(rcx, 0));
1028 __ j(not_equal, &cache_miss); 1028 __ j(not_equal, &cache_miss);
1029 // Cache hit! 1029 // Cache hit!
1030 __ movq(rax, Operand(rcx, 2 * kIntSize)); 1030 __ movq(rax, Operand(rcx, 2 * kIntSize));
1031 if (tagged) { 1031 if (tagged) {
1032 __ fstp(0); // Clear FPU stack. 1032 __ fstp(0); // Clear FPU stack.
1033 __ ret(kPointerSize); 1033 __ ret(kPointerSize);
1034 } else { // UNTAGGED. 1034 } else { // UNTAGGED.
1035 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 1035 __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1036 __ Ret(); 1036 __ Ret();
1037 } 1037 }
1038 1038
1039 __ bind(&cache_miss); 1039 __ bind(&cache_miss);
1040 // Update cache with new value. 1040 // Update cache with new value.
1041 if (tagged) { 1041 if (tagged) {
1042 __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); 1042 __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
1043 } else { // UNTAGGED. 1043 } else { // UNTAGGED.
1044 __ AllocateHeapNumber(rax, rdi, &skip_cache); 1044 __ AllocateHeapNumber(rax, rdi, &skip_cache);
1045 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); 1045 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1046 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); 1046 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1047 } 1047 }
1048 GenerateOperation(masm); 1048 GenerateOperation(masm);
1049 __ movq(Operand(rcx, 0), rbx); 1049 __ movq(Operand(rcx, 0), rbx);
1050 __ movq(Operand(rcx, 2 * kIntSize), rax); 1050 __ movq(Operand(rcx, 2 * kIntSize), rax);
1051 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); 1051 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
1052 if (tagged) { 1052 if (tagged) {
1053 __ ret(kPointerSize); 1053 __ ret(kPointerSize);
1054 } else { // UNTAGGED. 1054 } else { // UNTAGGED.
1055 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 1055 __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1056 __ Ret(); 1056 __ Ret();
1057 1057
1058 // Skip cache and return answer directly, only in untagged case. 1058 // Skip cache and return answer directly, only in untagged case.
1059 __ bind(&skip_cache); 1059 __ bind(&skip_cache);
1060 __ subq(rsp, Immediate(kDoubleSize)); 1060 __ subq(rsp, Immediate(kDoubleSize));
1061 __ movsd(Operand(rsp, 0), xmm1); 1061 __ movsd(Operand(rsp, 0), xmm1);
1062 __ fld_d(Operand(rsp, 0)); 1062 __ fld_d(Operand(rsp, 0));
1063 GenerateOperation(masm); 1063 GenerateOperation(masm);
1064 __ fstp_d(Operand(rsp, 0)); 1064 __ fstp_d(Operand(rsp, 0));
1065 __ movsd(xmm1, Operand(rsp, 0)); 1065 __ LoadDbl(xmm1, Operand(rsp, 0));
1066 __ addq(rsp, Immediate(kDoubleSize)); 1066 __ addq(rsp, Immediate(kDoubleSize));
1067 // We return the value in xmm1 without adding it to the cache, but 1067 // We return the value in xmm1 without adding it to the cache, but
1068 // we cause a scavenging GC so that future allocations will succeed. 1068 // we cause a scavenging GC so that future allocations will succeed.
1069 __ EnterInternalFrame(); 1069 __ EnterInternalFrame();
1070 // Allocate an unused object bigger than a HeapNumber. 1070 // Allocate an unused object bigger than a HeapNumber.
1071 __ Push(Smi::FromInt(2 * kDoubleSize)); 1071 __ Push(Smi::FromInt(2 * kDoubleSize));
1072 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); 1072 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1073 __ LeaveInternalFrame(); 1073 __ LeaveInternalFrame();
1074 __ Ret(); 1074 __ Ret();
1075 } 1075 }
1076 1076
1077 // Call runtime, doing whatever allocation and cleanup is necessary. 1077 // Call runtime, doing whatever allocation and cleanup is necessary.
1078 if (tagged) { 1078 if (tagged) {
1079 __ bind(&runtime_call_clear_stack); 1079 __ bind(&runtime_call_clear_stack);
1080 __ fstp(0); 1080 __ fstp(0);
1081 __ bind(&runtime_call); 1081 __ bind(&runtime_call);
1082 __ TailCallExternalReference( 1082 __ TailCallExternalReference(
1083 ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1); 1083 ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
1084 } else { // UNTAGGED. 1084 } else { // UNTAGGED.
1085 __ bind(&runtime_call_clear_stack); 1085 __ bind(&runtime_call_clear_stack);
1086 __ bind(&runtime_call); 1086 __ bind(&runtime_call);
1087 __ AllocateHeapNumber(rax, rdi, &skip_cache); 1087 __ AllocateHeapNumber(rax, rdi, &skip_cache);
1088 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); 1088 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1089 __ EnterInternalFrame(); 1089 __ EnterInternalFrame();
1090 __ push(rax); 1090 __ push(rax);
1091 __ CallRuntime(RuntimeFunction(), 1); 1091 __ CallRuntime(RuntimeFunction(), 1);
1092 __ LeaveInternalFrame(); 1092 __ LeaveInternalFrame();
1093 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 1093 __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1094 __ Ret(); 1094 __ Ret();
1095 } 1095 }
1096 } 1096 }
1097 1097
1098 1098
1099 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { 1099 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1100 switch (type_) { 1100 switch (type_) {
1101 // Add more cases when necessary. 1101 // Add more cases when necessary.
1102 case TranscendentalCache::SIN: return Runtime::kMath_sin; 1102 case TranscendentalCache::SIN: return Runtime::kMath_sin;
1103 case TranscendentalCache::COS: return Runtime::kMath_cos; 1103 case TranscendentalCache::COS: return Runtime::kMath_cos;
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after
1352 __ cvtlsi2sd(xmm0, kScratchRegister); 1352 __ cvtlsi2sd(xmm0, kScratchRegister);
1353 __ SmiToInteger32(kScratchRegister, rax); 1353 __ SmiToInteger32(kScratchRegister, rax);
1354 __ cvtlsi2sd(xmm1, kScratchRegister); 1354 __ cvtlsi2sd(xmm1, kScratchRegister);
1355 } 1355 }
1356 1356
1357 1357
1358 void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) { 1358 void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
1359 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done; 1359 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
1360 // Load operand in rdx into xmm0. 1360 // Load operand in rdx into xmm0.
1361 __ JumpIfSmi(rdx, &load_smi_rdx); 1361 __ JumpIfSmi(rdx, &load_smi_rdx);
1362 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 1362 __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1363 // Load operand in rax into xmm1. 1363 // Load operand in rax into xmm1.
1364 __ JumpIfSmi(rax, &load_smi_rax); 1364 __ JumpIfSmi(rax, &load_smi_rax);
1365 __ bind(&load_nonsmi_rax); 1365 __ bind(&load_nonsmi_rax);
1366 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 1366 __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1367 __ jmp(&done); 1367 __ jmp(&done);
1368 1368
1369 __ bind(&load_smi_rdx); 1369 __ bind(&load_smi_rdx);
1370 __ SmiToInteger32(kScratchRegister, rdx); 1370 __ SmiToInteger32(kScratchRegister, rdx);
1371 __ cvtlsi2sd(xmm0, kScratchRegister); 1371 __ cvtlsi2sd(xmm0, kScratchRegister);
1372 __ JumpIfNotSmi(rax, &load_nonsmi_rax); 1372 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1373 1373
1374 __ bind(&load_smi_rax); 1374 __ bind(&load_smi_rax);
1375 __ SmiToInteger32(kScratchRegister, rax); 1375 __ SmiToInteger32(kScratchRegister, rax);
1376 __ cvtlsi2sd(xmm1, kScratchRegister); 1376 __ cvtlsi2sd(xmm1, kScratchRegister);
1377 1377
1378 __ bind(&done); 1378 __ bind(&done);
1379 } 1379 }
1380 1380
1381 1381
1382 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, 1382 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1383 Label* not_numbers) { 1383 Label* not_numbers) {
1384 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; 1384 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1385 // Load operand in rdx into xmm0, or branch to not_numbers. 1385 // Load operand in rdx into xmm0, or branch to not_numbers.
1386 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); 1386 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1387 __ JumpIfSmi(rdx, &load_smi_rdx); 1387 __ JumpIfSmi(rdx, &load_smi_rdx);
1388 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); 1388 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
1389 __ j(not_equal, not_numbers); // Argument in rdx is not a number. 1389 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
1390 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 1390 __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1391 // Load operand in rax into xmm1, or branch to not_numbers. 1391 // Load operand in rax into xmm1, or branch to not_numbers.
1392 __ JumpIfSmi(rax, &load_smi_rax); 1392 __ JumpIfSmi(rax, &load_smi_rax);
1393 1393
1394 __ bind(&load_nonsmi_rax); 1394 __ bind(&load_nonsmi_rax);
1395 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); 1395 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
1396 __ j(not_equal, not_numbers); 1396 __ j(not_equal, not_numbers);
1397 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 1397 __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1398 __ jmp(&done); 1398 __ jmp(&done);
1399 1399
1400 __ bind(&load_smi_rdx); 1400 __ bind(&load_smi_rdx);
1401 __ SmiToInteger32(kScratchRegister, rdx); 1401 __ SmiToInteger32(kScratchRegister, rdx);
1402 __ cvtlsi2sd(xmm0, kScratchRegister); 1402 __ cvtlsi2sd(xmm0, kScratchRegister);
1403 __ JumpIfNotSmi(rax, &load_nonsmi_rax); 1403 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1404 1404
1405 __ bind(&load_smi_rax); 1405 __ bind(&load_smi_rax);
1406 __ SmiToInteger32(kScratchRegister, rax); 1406 __ SmiToInteger32(kScratchRegister, rax);
1407 __ cvtlsi2sd(xmm1, kScratchRegister); 1407 __ cvtlsi2sd(xmm1, kScratchRegister);
(...skipping 13 matching lines...) Expand all
1421 Register smi_result = scratch1; 1421 Register smi_result = scratch1;
1422 Label done; 1422 Label done;
1423 1423
1424 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1424 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1425 1425
1426 NearLabel first_smi, check_second; 1426 NearLabel first_smi, check_second;
1427 __ JumpIfSmi(first, &first_smi); 1427 __ JumpIfSmi(first, &first_smi);
1428 __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map); 1428 __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
1429 __ j(not_equal, on_not_smis); 1429 __ j(not_equal, on_not_smis);
1430 // Convert HeapNumber to smi if possible. 1430 // Convert HeapNumber to smi if possible.
1431 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); 1431 __ LoadDbl(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1432 __ movq(scratch2, xmm0); 1432 __ movq(scratch2, xmm0);
1433 __ cvttsd2siq(smi_result, xmm0); 1433 __ cvttsd2siq(smi_result, xmm0);
1434 // Check if conversion was successful by converting back and 1434 // Check if conversion was successful by converting back and
1435 // comparing to the original double's bits. 1435 // comparing to the original double's bits.
1436 __ cvtlsi2sd(xmm1, smi_result); 1436 __ cvtlsi2sd(xmm1, smi_result);
1437 __ movq(kScratchRegister, xmm1); 1437 __ movq(kScratchRegister, xmm1);
1438 __ cmpq(scratch2, kScratchRegister); 1438 __ cmpq(scratch2, kScratchRegister);
1439 __ j(not_equal, on_not_smis); 1439 __ j(not_equal, on_not_smis);
1440 __ Integer32ToSmi(first, smi_result); 1440 __ Integer32ToSmi(first, smi_result);
1441 1441
1442 __ bind(&check_second); 1442 __ bind(&check_second);
1443 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); 1443 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1444 __ bind(&first_smi); 1444 __ bind(&first_smi);
1445 if (FLAG_debug_code) { 1445 if (FLAG_debug_code) {
1446 // Second should be non-smi if we get here. 1446 // Second should be non-smi if we get here.
1447 __ AbortIfSmi(second); 1447 __ AbortIfSmi(second);
1448 } 1448 }
1449 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); 1449 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1450 __ j(not_equal, on_not_smis); 1450 __ j(not_equal, on_not_smis);
1451 // Convert second to smi, if possible. 1451 // Convert second to smi, if possible.
1452 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); 1452 __ LoadDbl(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1453 __ movq(scratch2, xmm0); 1453 __ movq(scratch2, xmm0);
1454 __ cvttsd2siq(smi_result, xmm0); 1454 __ cvttsd2siq(smi_result, xmm0);
1455 __ cvtlsi2sd(xmm1, smi_result); 1455 __ cvtlsi2sd(xmm1, smi_result);
1456 __ movq(kScratchRegister, xmm1); 1456 __ movq(kScratchRegister, xmm1);
1457 __ cmpq(scratch2, kScratchRegister); 1457 __ cmpq(scratch2, kScratchRegister);
1458 __ j(not_equal, on_not_smis); 1458 __ j(not_equal, on_not_smis);
1459 __ Integer32ToSmi(second, smi_result); 1459 __ Integer32ToSmi(second, smi_result);
1460 if (on_success != NULL) { 1460 if (on_success != NULL) {
1461 __ jmp(on_success); 1461 __ jmp(on_success);
1462 } else { 1462 } else {
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
1576 Label powi; 1576 Label powi;
1577 __ SmiToInteger32(rdx, rdx); 1577 __ SmiToInteger32(rdx, rdx);
1578 __ cvtlsi2sd(xmm0, rdx); 1578 __ cvtlsi2sd(xmm0, rdx);
1579 __ jmp(&powi); 1579 __ jmp(&powi);
1580 // Exponent is a smi and base is a heapnumber. 1580 // Exponent is a smi and base is a heapnumber.
1581 __ bind(&base_nonsmi); 1581 __ bind(&base_nonsmi);
1582 __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset), 1582 __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
1583 Heap::kHeapNumberMapRootIndex); 1583 Heap::kHeapNumberMapRootIndex);
1584 __ j(not_equal, &call_runtime); 1584 __ j(not_equal, &call_runtime);
1585 1585
1586 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 1586 __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1587 1587
1588 // Optimized version of pow if exponent is a smi. 1588 // Optimized version of pow if exponent is a smi.
1589 // xmm0 contains the base. 1589 // xmm0 contains the base.
1590 __ bind(&powi); 1590 __ bind(&powi);
1591 __ SmiToInteger32(rax, rax); 1591 __ SmiToInteger32(rax, rax);
1592 1592
1593 // Save exponent in base as we need to check if exponent is negative later. 1593 // Save exponent in base as we need to check if exponent is negative later.
1594 // We know that base and exponent are in different registers. 1594 // We know that base and exponent are in different registers.
1595 __ movq(rdx, rax); 1595 __ movq(rdx, rax);
1596 1596
(...skipping 29 matching lines...) Expand all
1626 __ j(equal, &call_runtime); 1626 __ j(equal, &call_runtime);
1627 1627
1628 __ jmp(&allocate_return); 1628 __ jmp(&allocate_return);
1629 1629
1630 // Exponent (or both) is a heapnumber - no matter what we should now work 1630 // Exponent (or both) is a heapnumber - no matter what we should now work
1631 // on doubles. 1631 // on doubles.
1632 __ bind(&exponent_nonsmi); 1632 __ bind(&exponent_nonsmi);
1633 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), 1633 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
1634 Heap::kHeapNumberMapRootIndex); 1634 Heap::kHeapNumberMapRootIndex);
1635 __ j(not_equal, &call_runtime); 1635 __ j(not_equal, &call_runtime);
1636 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 1636 __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1637 // Test if exponent is nan. 1637 // Test if exponent is nan.
1638 __ ucomisd(xmm1, xmm1); 1638 __ ucomisd(xmm1, xmm1);
1639 __ j(parity_even, &call_runtime); 1639 __ j(parity_even, &call_runtime);
1640 1640
1641 NearLabel base_not_smi; 1641 NearLabel base_not_smi;
1642 NearLabel handle_special_cases; 1642 NearLabel handle_special_cases;
1643 __ JumpIfNotSmi(rdx, &base_not_smi); 1643 __ JumpIfNotSmi(rdx, &base_not_smi);
1644 __ SmiToInteger32(rdx, rdx); 1644 __ SmiToInteger32(rdx, rdx);
1645 __ cvtlsi2sd(xmm0, rdx); 1645 __ cvtlsi2sd(xmm0, rdx);
1646 __ jmp(&handle_special_cases); 1646 __ jmp(&handle_special_cases);
1647 1647
1648 __ bind(&base_not_smi); 1648 __ bind(&base_not_smi);
1649 __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset), 1649 __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
1650 Heap::kHeapNumberMapRootIndex); 1650 Heap::kHeapNumberMapRootIndex);
1651 __ j(not_equal, &call_runtime); 1651 __ j(not_equal, &call_runtime);
1652 __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset)); 1652 __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
1653 __ andl(rcx, Immediate(HeapNumber::kExponentMask)); 1653 __ andl(rcx, Immediate(HeapNumber::kExponentMask));
1654 __ cmpl(rcx, Immediate(HeapNumber::kExponentMask)); 1654 __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
1655 // base is NaN or +/-Infinity 1655 // base is NaN or +/-Infinity
1656 __ j(greater_equal, &call_runtime); 1656 __ j(greater_equal, &call_runtime);
1657 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 1657 __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1658 1658
1659 // base is in xmm0 and exponent is in xmm1. 1659 // base is in xmm0 and exponent is in xmm1.
1660 __ bind(&handle_special_cases); 1660 __ bind(&handle_special_cases);
1661 NearLabel not_minus_half; 1661 NearLabel not_minus_half;
1662 // Test for -0.5. 1662 // Test for -0.5.
1663 // Load xmm2 with -0.5. 1663 // Load xmm2 with -0.5.
1664 __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE); 1664 __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
1665 __ movq(xmm2, rcx); 1665 __ movq(xmm2, rcx);
1666 // xmm2 now has -0.5. 1666 // xmm2 now has -0.5.
1667 __ ucomisd(xmm2, xmm1); 1667 __ ucomisd(xmm2, xmm1);
(...skipping 706 matching lines...) Expand 10 before | Expand all | Expand 10 after
2374 GenerateConvertHashCodeToIndex(masm, scratch, mask); 2374 GenerateConvertHashCodeToIndex(masm, scratch, mask);
2375 2375
2376 Register index = scratch; 2376 Register index = scratch;
2377 Register probe = mask; 2377 Register probe = mask;
2378 __ movq(probe, 2378 __ movq(probe,
2379 FieldOperand(number_string_cache, 2379 FieldOperand(number_string_cache,
2380 index, 2380 index,
2381 times_1, 2381 times_1,
2382 FixedArray::kHeaderSize)); 2382 FixedArray::kHeaderSize));
2383 __ JumpIfSmi(probe, not_found); 2383 __ JumpIfSmi(probe, not_found);
2384 __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); 2384 __ LoadDbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2385 __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); 2385 __ LoadDbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
2386 __ ucomisd(xmm0, xmm1); 2386 __ ucomisd(xmm0, xmm1);
2387 __ j(parity_even, not_found); // Bail out if NaN is involved. 2387 __ j(parity_even, not_found); // Bail out if NaN is involved.
2388 __ j(not_equal, not_found); // The cache did not contain this value. 2388 __ j(not_equal, not_found); // The cache did not contain this value.
2389 __ jmp(&load_result_from_cache); 2389 __ jmp(&load_result_from_cache);
2390 } 2390 }
2391 2391
2392 __ bind(&is_smi); 2392 __ bind(&is_smi);
2393 __ SmiToInteger32(scratch, object); 2393 __ SmiToInteger32(scratch, object);
2394 GenerateConvertHashCodeToIndex(masm, scratch, mask); 2394 GenerateConvertHashCodeToIndex(masm, scratch, mask);
2395 2395
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
2517 } 2517 }
2518 __ Set(rax, EQUAL); 2518 __ Set(rax, EQUAL);
2519 __ ret(0); 2519 __ ret(0);
2520 2520
2521 __ bind(&heap_number); 2521 __ bind(&heap_number);
2522 // It is a heap number, so return equal if it's not NaN. 2522 // It is a heap number, so return equal if it's not NaN.
2523 // For NaN, return 1 for every condition except greater and 2523 // For NaN, return 1 for every condition except greater and
2524 // greater-equal. Return -1 for them, so the comparison yields 2524 // greater-equal. Return -1 for them, so the comparison yields
2525 // false for all conditions except not-equal. 2525 // false for all conditions except not-equal.
2526 __ Set(rax, EQUAL); 2526 __ Set(rax, EQUAL);
2527 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 2527 __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
2528 __ ucomisd(xmm0, xmm0); 2528 __ ucomisd(xmm0, xmm0);
2529 __ setcc(parity_even, rax); 2529 __ setcc(parity_even, rax);
2530 // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs. 2530 // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
2531 if (cc_ == greater_equal || cc_ == greater) { 2531 if (cc_ == greater_equal || cc_ == greater) {
2532 __ neg(rax); 2532 __ neg(rax);
2533 } 2533 }
2534 __ ret(0); 2534 __ ret(0);
2535 } 2535 }
2536 2536
2537 __ bind(&not_identical); 2537 __ bind(&not_identical);
(...skipping 1922 matching lines...) Expand 10 before | Expand all | Expand 10 after
4460 NearLabel miss; 4460 NearLabel miss;
4461 Condition either_smi = masm->CheckEitherSmi(rax, rdx); 4461 Condition either_smi = masm->CheckEitherSmi(rax, rdx);
4462 __ j(either_smi, &generic_stub); 4462 __ j(either_smi, &generic_stub);
4463 4463
4464 __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx); 4464 __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
4465 __ j(not_equal, &miss); 4465 __ j(not_equal, &miss);
4466 __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx); 4466 __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
4467 __ j(not_equal, &miss); 4467 __ j(not_equal, &miss);
4468 4468
4469 // Load left and right operand 4469 // Load left and right operand
4470 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 4470 __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
4471 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 4471 __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
4472 4472
4473 // Compare operands 4473 // Compare operands
4474 __ ucomisd(xmm0, xmm1); 4474 __ ucomisd(xmm0, xmm1);
4475 4475
4476 // Don't base result on EFLAGS when a NaN is involved. 4476 // Don't base result on EFLAGS when a NaN is involved.
4477 __ j(parity_even, &unordered); 4477 __ j(parity_even, &unordered);
4478 4478
4479 // Return a result of -1, 0, or 1, based on EFLAGS. 4479 // Return a result of -1, 0, or 1, based on EFLAGS.
4480 // Performing mov, because xor would destroy the flag register. 4480 // Performing mov, because xor would destroy the flag register.
4481 __ movl(rax, Immediate(0)); 4481 __ movl(rax, Immediate(0));
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
4544 // Do a tail call to the rewritten stub. 4544 // Do a tail call to the rewritten stub.
4545 __ jmp(rdi); 4545 __ jmp(rdi);
4546 } 4546 }
4547 4547
4548 4548
4549 #undef __ 4549 #undef __
4550 4550
4551 } } // namespace v8::internal 4551 } } // namespace v8::internal
4552 4552
4553 #endif // V8_TARGET_ARCH_X64 4553 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/assembler-x64.cc ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698