Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(96)

Side by Side Diff: src/x64/stub-cache-x64.cc

Issue 64313002: Introduce MoveInteger64 instruction into X64 Macro Assembler (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Refine a comment Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2275 matching lines...) Expand 10 before | Expand all | Expand 10 after
2286 2286
2287 // Smi tag and return. 2287 // Smi tag and return.
2288 __ Integer32ToSmi(rax, rax); 2288 __ Integer32ToSmi(rax, rax);
2289 __ bind(&smi); 2289 __ bind(&smi);
2290 __ ret(2 * kPointerSize); 2290 __ ret(2 * kPointerSize);
2291 2291
2292 // Check if the argument is < 2^kMantissaBits. 2292 // Check if the argument is < 2^kMantissaBits.
2293 Label already_round; 2293 Label already_round;
2294 __ bind(&conversion_failure); 2294 __ bind(&conversion_failure);
2295 int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000); 2295 int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
2296 __ movq(rbx, kTwoMantissaBits); 2296 __ MoveInteger64(rbx, kTwoMantissaBits);
2297 __ movq(xmm1, rbx); 2297 __ movq(xmm1, rbx);
2298 __ ucomisd(xmm0, xmm1); 2298 __ ucomisd(xmm0, xmm1);
2299 __ j(above_equal, &already_round); 2299 __ j(above_equal, &already_round);
2300 2300
2301 // Save a copy of the argument. 2301 // Save a copy of the argument.
2302 __ movaps(xmm2, xmm0); 2302 __ movaps(xmm2, xmm0);
2303 2303
2304 // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits. 2304 // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
2305 __ addsd(xmm0, xmm1); 2305 __ addsd(xmm0, xmm1);
2306 __ subsd(xmm0, xmm1); 2306 __ subsd(xmm0, xmm1);
2307 2307
2308 // Compare the argument and the tentative result to get the right mask: 2308 // Compare the argument and the tentative result to get the right mask:
2309 // if xmm2 < xmm0: 2309 // if xmm2 < xmm0:
2310 // xmm2 = 1...1 2310 // xmm2 = 1...1
2311 // else: 2311 // else:
2312 // xmm2 = 0...0 2312 // xmm2 = 0...0
2313 __ cmpltsd(xmm2, xmm0); 2313 __ cmpltsd(xmm2, xmm0);
2314 2314
2315 // Subtract 1 if the argument was less than the tentative result. 2315 // Subtract 1 if the argument was less than the tentative result.
2316 int64_t kOne = V8_INT64_C(0x3ff0000000000000); 2316 int64_t kOne = V8_INT64_C(0x3ff0000000000000);
2317 __ movq(rbx, kOne); 2317 __ MoveInteger64(rbx, kOne);
2318 __ movq(xmm1, rbx); 2318 __ movq(xmm1, rbx);
2319 __ andpd(xmm1, xmm2); 2319 __ andpd(xmm1, xmm2);
2320 __ subsd(xmm0, xmm1); 2320 __ subsd(xmm0, xmm1);
2321 2321
2322 // Return a new heap number. 2322 // Return a new heap number.
2323 __ AllocateHeapNumber(rax, rbx, &slow); 2323 __ AllocateHeapNumber(rax, rbx, &slow);
2324 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); 2324 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
2325 __ ret(2 * kPointerSize); 2325 __ ret(2 * kPointerSize);
2326 2326
2327 // Return the argument (when it's an already round heap number). 2327 // Return the argument (when it's an already round heap number).
(...skipping 818 matching lines...) Expand 10 before | Expand all | Expand 10 after
3146 // ----------------------------------- 3146 // -----------------------------------
3147 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric); 3147 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
3148 } 3148 }
3149 3149
3150 3150
3151 #undef __ 3151 #undef __
3152 3152
3153 } } // namespace v8::internal 3153 } } // namespace v8::internal
3154 3154
3155 #endif // V8_TARGET_ARCH_X64 3155 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698