Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(159)

Side by Side Diff: src/x64/stub-cache-x64.cc

Issue 26216008: Introduce MoveDouble to the X64 MacroAssembler (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2422 matching lines...) Expand 10 before | Expand all | Expand 10 after
2433 // If the result is still negative, go to the slow case. 2433 // If the result is still negative, go to the slow case.
2434 // This only happens for the most negative smi. 2434 // This only happens for the most negative smi.
2435 Label slow; 2435 Label slow;
2436 __ j(negative, &slow); 2436 __ j(negative, &slow);
2437 2437
2438 __ ret(2 * kPointerSize); 2438 __ ret(2 * kPointerSize);
2439 2439
2440 // Check if the argument is a heap number and load its value. 2440 // Check if the argument is a heap number and load its value.
2441 __ bind(&not_smi); 2441 __ bind(&not_smi);
2442 __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK); 2442 __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
2443 __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); 2443 __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
2444 2444
2445 // Check the sign of the argument. If the argument is positive, 2445 // Check the sign of the argument. If the argument is positive,
2446 // just return it. 2446 // just return it.
2447 Label negative_sign; 2447 Label negative_sign;
2448 const int sign_mask_shift = 2448 const int sign_mask_shift =
2449 (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte; 2449 (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
2450 __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift, 2450 __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
2451 RelocInfo::NONE64); 2451 RelocInfo::NONE64);
2452 __ testq(rbx, rdi); 2452 __ testq(rbx, rdi);
2453 __ j(not_zero, &negative_sign); 2453 __ j(not_zero, &negative_sign);
2454 __ ret(2 * kPointerSize); 2454 __ ret(2 * kPointerSize);
2455 2455
2456 // If the argument is negative, clear the sign, and return a new 2456 // If the argument is negative, clear the sign, and return a new
2457 // number. We still have the sign mask in rdi. 2457 // number. We still have the sign mask in rdi.
2458 __ bind(&negative_sign); 2458 __ bind(&negative_sign);
2459 __ xor_(rbx, rdi); 2459 __ xor_(rbx, rdi);
2460 __ AllocateHeapNumber(rax, rdx, &slow); 2460 __ AllocateHeapNumber(rax, rdx, &slow);
2461 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx); 2461 __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
2462 __ ret(2 * kPointerSize); 2462 __ ret(2 * kPointerSize);
2463 2463
2464 // Tail call the full function. We do not have to patch the receiver 2464 // Tail call the full function. We do not have to patch the receiver
2465 // because the function makes no use of it. 2465 // because the function makes no use of it.
2466 __ bind(&slow); 2466 __ bind(&slow);
2467 CallKind call_kind = CallICBase::Contextual::decode(extra_state_) 2467 CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
2468 ? CALL_AS_FUNCTION 2468 ? CALL_AS_FUNCTION
2469 : CALL_AS_METHOD; 2469 : CALL_AS_METHOD;
2470 ParameterCount expected(function); 2470 ParameterCount expected(function);
2471 __ InvokeFunction(function, expected, arguments(), 2471 __ InvokeFunction(function, expected, arguments(),
(...skipping 707 matching lines...) Expand 10 before | Expand all | Expand 10 after
3179 // ----------------------------------- 3179 // -----------------------------------
3180 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric); 3180 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
3181 } 3181 }
3182 3182
3183 3183
3184 #undef __ 3184 #undef __
3185 3185
3186 } } // namespace v8::internal 3186 } } // namespace v8::internal
3187 3187
3188 #endif // V8_TARGET_ARCH_X64 3188 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698