Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2264 | 2264 |
| 2265 | 2265 |
| 2266 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { | 2266 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { |
| 2267 ASSERT(ToRegister(instr->result()).is(rax)); | 2267 ASSERT(ToRegister(instr->result()).is(rax)); |
| 2268 __ Move(rdi, instr->function()); | 2268 __ Move(rdi, instr->function()); |
| 2269 CallKnownFunction(instr->function(), instr->arity(), instr); | 2269 CallKnownFunction(instr->function(), instr->arity(), instr); |
| 2270 } | 2270 } |
| 2271 | 2271 |
| 2272 | 2272 |
| 2273 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { | 2273 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { |
| 2274 Abort("Unimplemented: %s", "DoDeferredMathAbsTaggedHeapNumber"); | 2274 Register input_reg = ToRegister(instr->InputAt(0)); |
| 2275 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | |
| 2276 Heap::kHeapNumberMapRootIndex); | |
| 2277 DeoptimizeIf(not_equal, instr->environment()); | |
| 2278 | |
| 2279 Label done; | |
| 2280 Register tmp = input_reg.is(rax) ? rcx : rax; | |
| 2281 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx; | |
| 2282 | |
| 2283 // Preserve the value of all registers. | |
| 2284 __ PushSafepointRegisters(); | |
| 2285 | |
| 2286 Label negative; | |
| 2287 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); | |
| 2288 // Check the sign of the argument. If the argument is positive, just | |
| 2289 // return it. We do not need to patch the stack since |input| and | |
| 2290 // |result| are the same register and |input| will be restored | |
| 2291 // unchanged by popping safepoint registers. | |
| 2292 __ testl(tmp, Immediate(HeapNumber::kSignMask)); | |
| 2293 __ j(not_zero, &negative); | |
| 2294 __ jmp(&done); | |
| 2295 | |
| 2296 __ bind(&negative); | |
| 2297 | |
| 2298 Label allocated, slow; | |
| 2299 __ AllocateHeapNumber(tmp, tmp2, &slow); | |
| 2300 __ jmp(&allocated); | |
| 2301 | |
| 2302 // Slow case: Call the runtime system to do the number allocation. | |
| 2303 __ bind(&slow); | |
| 2304 | |
| 2305 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | |
| 2306 RecordSafepointWithRegisters( | |
| 2307 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); | |
| 2308 // Set the pointer to the new heap number in tmp. | |
| 2309 if (!tmp.is(rax)) { | |
| 2310 __ movq(tmp, rax); | |
| 2311 } | |
| 2312 | |
| 2313 // Restore input_reg after call to runtime. | |
| 2314 __ LoadFromSafepointRegisterSlot(input_reg, input_reg); | |
| 2315 | |
| 2316 __ bind(&allocated); | |
| 2317 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); | |
| 2318 __ shl(tmp2, Immediate(1)); | |
| 2319 __ shr(tmp2, Immediate(1)); | |
| 2320 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); | |
| 2321 __ StoreToSafepointRegisterSlot(input_reg, tmp); | |
| 2322 | |
| 2323 __ bind(&done); | |
| 2324 __ PopSafepointRegisters(); | |
| 2325 } | |
| 2326 | |
| 2327 | |
| 2328 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { | |
| 2329 Register input_reg = ToRegister(instr->InputAt(0)); | |
| 2330 __ testl(input_reg, input_reg); | |
| 2331 Label is_positive; | |
| 2332 __ j(not_sign, &is_positive); | |
| 2333 __ negl(input_reg); | |
|
Lasse Reichstein
2011/02/24 13:57:02
If you want to avoid the jumps, you can do:
movl(
| |
| 2334 __ testl(input_reg, input_reg); | |
|
Lasse Reichstein
2011/02/24 13:57:02
The negl instruction sets the sign flag, so you ca
| |
| 2335 DeoptimizeIf(negative, instr->environment()); | |
| 2336 __ bind(&is_positive); | |
| 2275 } | 2337 } |
| 2276 | 2338 |
| 2277 | 2339 |
| 2278 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { | 2340 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { |
| 2279 Abort("Unimplemented: %s", "DoMathAbs"); | 2341 // Class for deferred case. |
| 2342 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { | |
| 2343 public: | |
| 2344 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | |
| 2345 LUnaryMathOperation* instr) | |
| 2346 : LDeferredCode(codegen), instr_(instr) { } | |
| 2347 virtual void Generate() { | |
| 2348 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | |
| 2349 } | |
| 2350 private: | |
| 2351 LUnaryMathOperation* instr_; | |
| 2352 }; | |
| 2353 | |
| 2354 ASSERT(instr->InputAt(0)->Equals(instr->result())); | |
| 2355 Representation r = instr->hydrogen()->value()->representation(); | |
| 2356 | |
| 2357 if (r.IsDouble()) { | |
| 2358 XMMRegister scratch = xmm0; | |
|
Lasse Reichstein
2011/02/24 13:57:02
Extra space before "scratch".
| |
| 2359 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); | |
| 2360 __ xorpd(scratch, scratch); | |
| 2361 __ subsd(scratch, input_reg); | |
| 2362 __ andpd(input_reg, scratch); | |
|
Lasse Reichstein
2011/02/24 13:57:02
Neat.
| |
| 2363 } else if (r.IsInteger32()) { | |
| 2364 EmitIntegerMathAbs(instr); | |
| 2365 } else { // Tagged case. | |
| 2366 DeferredMathAbsTaggedHeapNumber* deferred = | |
| 2367 new DeferredMathAbsTaggedHeapNumber(this, instr); | |
| 2368 Register input_reg = ToRegister(instr->InputAt(0)); | |
| 2369 // Smi check. | |
| 2370 __ JumpIfNotSmi(input_reg, deferred->entry()); | |
| 2371 EmitIntegerMathAbs(instr); | |
| 2372 __ bind(deferred->exit()); | |
| 2373 } | |
| 2280 } | 2374 } |
| 2281 | 2375 |
| 2282 | 2376 |
| 2283 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { | 2377 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
| 2284 XMMRegister xmm_scratch = xmm0; | 2378 XMMRegister xmm_scratch = xmm0; |
| 2285 Register output_reg = ToRegister(instr->result()); | 2379 Register output_reg = ToRegister(instr->result()); |
| 2286 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); | 2380 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); |
| 2287 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register. | 2381 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register. |
| 2288 __ ucomisd(input_reg, xmm_scratch); | 2382 __ ucomisd(input_reg, xmm_scratch); |
| 2289 | 2383 |
| (...skipping 1145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3435 RegisterEnvironmentForDeoptimization(environment); | 3529 RegisterEnvironmentForDeoptimization(environment); |
| 3436 ASSERT(osr_pc_offset_ == -1); | 3530 ASSERT(osr_pc_offset_ == -1); |
| 3437 osr_pc_offset_ = masm()->pc_offset(); | 3531 osr_pc_offset_ = masm()->pc_offset(); |
| 3438 } | 3532 } |
| 3439 | 3533 |
| 3440 #undef __ | 3534 #undef __ |
| 3441 | 3535 |
| 3442 } } // namespace v8::internal | 3536 } } // namespace v8::internal |
| 3443 | 3537 |
| 3444 #endif // V8_TARGET_ARCH_X64 | 3538 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |