OLD | NEW |
---|---|
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
217 // Input values must be either smi or heap number objects (fp values). | 217 // Input values must be either smi or heap number objects (fp values). |
218 // Requirements: | 218 // Requirements: |
219 // Register version: operands in registers lhs and rhs. | 219 // Register version: operands in registers lhs and rhs. |
220 // Stack version: operands on TOS+1 and TOS+2. | 220 // Stack version: operands on TOS+1 and TOS+2. |
221 // Returns operands as floating point numbers on fp stack. | 221 // Returns operands as floating point numbers on fp stack. |
222 static void LoadFloatOperands(MacroAssembler* masm); | 222 static void LoadFloatOperands(MacroAssembler* masm); |
223 static void LoadFloatOperands(MacroAssembler* masm, | 223 static void LoadFloatOperands(MacroAssembler* masm, |
224 Register lhs, | 224 Register lhs, |
225 Register rhs); | 225 Register rhs); |
226 | 226 |
227 // Code pattern for loading a floating point value and converting it | |
228 // to a 32 bit integer. Input value must be either a smi or a heap number | |
229 // object. | |
230 // Returns operands as 32-bit sign extended integers in a general purpose | |
231 // registers. | |
232 static void LoadInt32Operand(MacroAssembler* masm, | |
233 const Operand& src, | |
234 Register dst); | |
235 | |
236 // Test if operands are smi or number objects (fp). Requirements: | 227 // Test if operands are smi or number objects (fp). Requirements: |
237 // operand_1 in rax, operand_2 in rdx; falls through on float or smi | 228 // operand_1 in rax, operand_2 in rdx; falls through on float or smi |
238 // operands, jumps to the non_float label otherwise. | 229 // operands, jumps to the non_float label otherwise. |
239 static void CheckNumberOperands(MacroAssembler* masm, | 230 static void CheckNumberOperands(MacroAssembler* masm, |
240 Label* non_float); | 231 Label* non_float); |
232 | |
233 // Takes the operands in rdx and rax and loads them as integers in rax | |
234 // and rcx. | |
235 static void LoadAsIntegers(MacroAssembler* masm, | |
236 bool use_sse3, | |
237 Label* operand_conversion_failure); | |
241 }; | 238 }; |
242 | 239 |
243 | 240 |
244 // ----------------------------------------------------------------------------- | 241 // ----------------------------------------------------------------------------- |
245 // CodeGenerator implementation. | 242 // CodeGenerator implementation. |
246 | 243 |
247 CodeGenerator::CodeGenerator(int buffer_size, | 244 CodeGenerator::CodeGenerator(int buffer_size, |
248 Handle<Script> script, | 245 Handle<Script> script, |
249 bool is_eval) | 246 bool is_eval) |
250 : is_eval_(is_eval), | 247 : is_eval_(is_eval), |
(...skipping 2754 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3005 expression->AsLiteral()->IsNull())) { | 3002 expression->AsLiteral()->IsNull())) { |
3006 // Omit evaluating the value of the primitive literal. | 3003 // Omit evaluating the value of the primitive literal. |
3007 // It will be discarded anyway, and can have no side effect. | 3004 // It will be discarded anyway, and can have no side effect. |
3008 frame_->Push(Factory::undefined_value()); | 3005 frame_->Push(Factory::undefined_value()); |
3009 } else { | 3006 } else { |
3010 Load(node->expression()); | 3007 Load(node->expression()); |
3011 frame_->SetElementAt(0, Factory::undefined_value()); | 3008 frame_->SetElementAt(0, Factory::undefined_value()); |
3012 } | 3009 } |
3013 | 3010 |
3014 } else { | 3011 } else { |
3012 bool overwrite = | |
3013 (node->expression()->AsBinaryOperation() != NULL && | |
3014 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); | |
3015 Load(node->expression()); | 3015 Load(node->expression()); |
3016 switch (op) { | 3016 switch (op) { |
3017 case Token::NOT: | 3017 case Token::NOT: |
3018 case Token::DELETE: | 3018 case Token::DELETE: |
3019 case Token::TYPEOF: | 3019 case Token::TYPEOF: |
3020 UNREACHABLE(); // handled above | 3020 UNREACHABLE(); // handled above |
3021 break; | 3021 break; |
3022 | 3022 |
3023 case Token::SUB: { | 3023 case Token::SUB: { |
3024 bool overwrite = | |
3025 (node->expression()->AsBinaryOperation() != NULL && | |
3026 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); | |
3027 GenericUnaryOpStub stub(Token::SUB, overwrite); | 3024 GenericUnaryOpStub stub(Token::SUB, overwrite); |
3028 // TODO(1222589): remove dependency of TOS being cached inside stub | 3025 // TODO(1222589): remove dependency of TOS being cached inside stub |
3029 Result operand = frame_->Pop(); | 3026 Result operand = frame_->Pop(); |
3030 Result answer = frame_->CallStub(&stub, &operand); | 3027 Result answer = frame_->CallStub(&stub, &operand); |
3031 frame_->Push(&answer); | 3028 frame_->Push(&answer); |
3032 break; | 3029 break; |
3033 } | 3030 } |
3034 | 3031 |
3035 case Token::BIT_NOT: { | 3032 case Token::BIT_NOT: { |
3036 // Smi check. | 3033 // Smi check. |
3037 JumpTarget smi_label; | 3034 JumpTarget smi_label; |
3038 JumpTarget continue_label; | 3035 JumpTarget continue_label; |
3039 Result operand = frame_->Pop(); | 3036 Result operand = frame_->Pop(); |
3040 operand.ToRegister(); | 3037 operand.ToRegister(); |
3041 | 3038 |
3042 Condition is_smi = masm_->CheckSmi(operand.reg()); | 3039 Condition is_smi = masm_->CheckSmi(operand.reg()); |
3043 smi_label.Branch(is_smi, &operand); | 3040 smi_label.Branch(is_smi, &operand); |
3044 | 3041 |
3045 frame_->Push(&operand); // undo popping of TOS | 3042 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); |
3046 Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT, | 3043 Result answer = frame_->CallStub(&stub, &operand); |
3047 CALL_FUNCTION, 1); | |
3048 continue_label.Jump(&answer); | 3044 continue_label.Jump(&answer); |
3045 | |
3049 smi_label.Bind(&answer); | 3046 smi_label.Bind(&answer); |
3050 answer.ToRegister(); | 3047 answer.ToRegister(); |
3051 frame_->Spill(answer.reg()); | 3048 frame_->Spill(answer.reg()); |
3052 __ SmiNot(answer.reg(), answer.reg()); | 3049 __ SmiNot(answer.reg(), answer.reg()); |
3053 continue_label.Bind(&answer); | 3050 continue_label.Bind(&answer); |
3054 frame_->Push(&answer); | 3051 frame_->Push(&answer); |
3055 break; | 3052 break; |
3056 } | 3053 } |
3057 | 3054 |
3058 case Token::ADD: { | 3055 case Token::ADD: { |
(...skipping 3224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6283 if (answer_object == Heap::undefined_value()) { | 6280 if (answer_object == Heap::undefined_value()) { |
6284 return false; | 6281 return false; |
6285 } | 6282 } |
6286 frame_->Push(Handle<Object>(answer_object)); | 6283 frame_->Push(Handle<Object>(answer_object)); |
6287 return true; | 6284 return true; |
6288 } | 6285 } |
6289 | 6286 |
6290 | 6287 |
6291 // End of CodeGenerator implementation. | 6288 // End of CodeGenerator implementation. |
6292 | 6289 |
6290 // Get the integer part of a heap number. Surprisingly, all this bit twiddling | |
6291 // is faster than using the built-in instructions on floating point registers. | |
6292 // Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the | |
6293 // trashed registers. | |
6294 void IntegerConvert(MacroAssembler* masm, | |
6295 Register source, | |
6296 bool use_sse3, | |
6297 Label* conversion_failure) { | |
6298 ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx)); | |
6299 Label done, right_exponent, normal_exponent; | |
6300 Register scratch = rbx; | |
6301 Register scratch2 = rdi; | |
6302 // Get exponent word. | |
6303 __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); | |
6304 // Get exponent alone in scratch2. | |
6305 __ movl(scratch2, scratch); | |
6306 __ and_(scratch2, Immediate(HeapNumber::kExponentMask)); | |
6307 if (use_sse3) { | |
6308 CpuFeatures::Scope scope(SSE3); | |
6309 // Check whether the exponent is too big for a 64 bit signed integer. | |
6310 static const uint32_t kTooBigExponent = | |
6311 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; | |
6312 __ cmpl(scratch2, Immediate(kTooBigExponent)); | |
6313 __ j(greater_equal, conversion_failure); | |
6314 // Load x87 register with heap number. | |
6315 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); | |
6316 // Reserve space for 64 bit answer. | |
6317 __ subq(rsp, Immediate(sizeof(uint64_t))); // Nolint. | |
6318 // Do conversion, which cannot fail because we checked the exponent. | |
6319 __ fisttp_d(Operand(rsp, 0)); | |
6320 __ movl(rcx, Operand(rsp, 0)); // Load low word of answer into rcx. | |
6321 __ addq(rsp, Immediate(sizeof(uint64_t))); // Nolint. | |
6322 } else { | |
6323 // Load rcx with zero. We use this either for the final shift or | |
Erik Corry
2010/01/27 13:16:01
All this stuff could probably be reimplemented in
Mads Ager (chromium)
2010/01/27 13:33:34
I agree.
| |
6324 // for the answer. | |
6325 __ xor_(rcx, rcx); | |
6326 // Check whether the exponent matches a 32 bit signed int that cannot be | |
6327 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the | |
6328 // exponent is 30 (biased). This is the exponent that we are fastest at and | |
6329 // also the highest exponent we can handle here. | |
6330 const uint32_t non_smi_exponent = | |
6331 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | |
6332 __ cmpl(scratch2, Immediate(non_smi_exponent)); | |
6333 // If we have a match of the int32-but-not-Smi exponent then skip some | |
6334 // logic. | |
6335 __ j(equal, &right_exponent); | |
6336 // If the exponent is higher than that then go to slow case. This catches | |
6337 // numbers that don't fit in a signed int32, infinities and NaNs. | |
6338 __ j(less, &normal_exponent); | |
6339 | |
6340 { | |
6341 // Handle a big exponent. The only reason we have this code is that the | |
6342 // >>> operator has a tendency to generate numbers with an exponent of 31. | |
6343 const uint32_t big_non_smi_exponent = | |
6344 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; | |
6345 __ cmpl(scratch2, Immediate(big_non_smi_exponent)); | |
6346 __ j(not_equal, conversion_failure); | |
6347 // We have the big exponent, typically from >>>. This means the number is | |
6348 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. | |
6349 __ movl(scratch2, scratch); | |
6350 __ and_(scratch2, Immediate(HeapNumber::kMantissaMask)); | |
6351 // Put back the implicit 1. | |
6352 __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift)); | |
6353 // Shift up the mantissa bits to take up the space the exponent used to | |
6354 // take. We just orred in the implicit bit so that took care of one and | |
6355 // we want to use the full unsigned range so we subtract 1 bit from the | |
6356 // shift distance. | |
6357 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; | |
6358 __ shl(scratch2, Immediate(big_shift_distance)); | |
6359 // Get the second half of the double. | |
6360 __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset)); | |
6361 // Shift down 21 bits to get the most significant 11 bits or the low | |
6362 // mantissa word. | |
6363 __ shr(rcx, Immediate(32 - big_shift_distance)); | |
6364 __ or_(rcx, scratch2); | |
6365 // We have the answer in rcx, but we may need to negate it. | |
6366 __ testl(scratch, scratch); | |
6367 __ j(positive, &done); | |
6368 __ neg(rcx); | |
6369 __ jmp(&done); | |
6370 } | |
6371 | |
6372 __ bind(&normal_exponent); | |
6373 // Exponent word in scratch, exponent part of exponent word in scratch2. | |
6374 // Zero in rcx. | |
6375 // We know the exponent is smaller than 30 (biased). If it is less than | |
6376 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie | |
6377 // it rounds to zero. | |
6378 const uint32_t zero_exponent = | |
6379 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; | |
6380 __ subl(scratch2, Immediate(zero_exponent)); | |
6381 // rcx already has a Smi zero. | |
6382 __ j(less, &done); | |
6383 | |
6384 // We have a shifted exponent between 0 and 30 in scratch2. | |
6385 __ shr(scratch2, Immediate(HeapNumber::kExponentShift)); | |
6386 __ movl(rcx, Immediate(30)); | |
6387 __ subl(rcx, scratch2); | |
6388 | |
6389 __ bind(&right_exponent); | |
6390 // Here rcx is the shift, scratch is the exponent word. | |
6391 // Get the top bits of the mantissa. | |
6392 __ and_(scratch, Immediate(HeapNumber::kMantissaMask)); | |
6393 // Put back the implicit 1. | |
6394 __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift)); | |
6395 // Shift up the mantissa bits to take up the space the exponent used to | |
6396 // take. We have kExponentShift + 1 significant bits int he low end of the | |
6397 // word. Shift them to the top bits. | |
6398 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | |
6399 __ shl(scratch, Immediate(shift_distance)); | |
6400 // Get the second half of the double. For some exponents we don't | |
6401 // actually need this because the bits get shifted out again, but | |
6402 // it's probably slower to test than just to do it. | |
6403 __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); | |
6404 // Shift down 22 bits to get the most significant 10 bits or the low | |
6405 // mantissa word. | |
6406 __ shr(scratch2, Immediate(32 - shift_distance)); | |
6407 __ or_(scratch2, scratch); | |
6408 // Move down according to the exponent. | |
6409 __ shr_cl(scratch2); | |
6410 // Now the unsigned answer is in scratch2. We need to move it to rcx and | |
6411 // we may need to fix the sign. | |
6412 Label negative; | |
6413 __ xor_(rcx, rcx); | |
6414 __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset)); | |
6415 __ j(greater, &negative); | |
6416 __ movl(rcx, scratch2); | |
6417 __ jmp(&done); | |
6418 __ bind(&negative); | |
6419 __ subl(rcx, scratch2); | |
6420 __ bind(&done); | |
6421 } | |
6422 } | |
6423 | |
6424 | |
6293 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { | 6425 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { |
6294 ASSERT(op_ == Token::SUB); | 6426 Label slow, done; |
6295 | 6427 |
6296 Label slow; | 6428 if (op_ == Token::SUB) { |
6297 Label done; | 6429 // Check whether the value is a smi. |
6298 Label try_float; | 6430 Label try_float; |
6299 // Check whether the value is a smi. | 6431 __ JumpIfNotSmi(rax, &try_float); |
6300 __ JumpIfNotSmi(rax, &try_float); | 6432 |
6301 | 6433 // Enter runtime system if the value of the smi is zero |
6302 // Enter runtime system if the value of the smi is zero | 6434 // to make sure that we switch between 0 and -0. |
6303 // to make sure that we switch between 0 and -0. | 6435 // Also enter it if the value of the smi is Smi::kMinValue. |
6304 // Also enter it if the value of the smi is Smi::kMinValue. | 6436 __ SmiNeg(rax, rax, &done); |
6305 __ SmiNeg(rax, rax, &done); | 6437 |
6306 | 6438 // Either zero or Smi::kMinValue, neither of which become a smi when |
6307 // Either zero or Smi::kMinValue, neither of which become a smi when negated. | 6439 // negated. |
6308 __ SmiCompare(rax, Smi::FromInt(0)); | 6440 __ SmiCompare(rax, Smi::FromInt(0)); |
6309 __ j(not_equal, &slow); | 6441 __ j(not_equal, &slow); |
6310 __ Move(rax, Factory::minus_zero_value()); | 6442 __ Move(rax, Factory::minus_zero_value()); |
6311 __ jmp(&done); | 6443 __ jmp(&done); |
6312 | 6444 |
6313 // Enter runtime system. | 6445 // Try floating point case. |
6446 __ bind(&try_float); | |
6447 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); | |
6448 __ Cmp(rdx, Factory::heap_number_map()); | |
6449 __ j(not_equal, &slow); | |
6450 // Operand is a float, negate its value by flipping sign bit. | |
6451 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); | |
6452 __ movq(kScratchRegister, Immediate(0x01)); | |
6453 __ shl(kScratchRegister, Immediate(63)); | |
6454 __ xor_(rdx, kScratchRegister); // Flip sign. | |
6455 // rdx is value to store. | |
6456 if (overwrite_) { | |
6457 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx); | |
6458 } else { | |
6459 __ AllocateHeapNumber(rcx, rbx, &slow); | |
6460 // rcx: allocated 'empty' number | |
6461 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); | |
6462 __ movq(rax, rcx); | |
6463 } | |
6464 } else if (op_ == Token::BIT_NOT) { | |
6465 // Check if the operand is a heap number. | |
6466 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); | |
6467 __ Cmp(rdx, Factory::heap_number_map()); | |
6468 __ j(not_equal, &slow); | |
6469 | |
6470 // Convert the heap number in rax to an untagged integer in rcx. | |
6471 IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow); | |
6472 | |
6473 // Do the bitwise operation and check if the result fits in a smi. | |
6474 Label try_float; | |
6475 __ not_(rcx); | |
6476 // Tag the result as a smi and we're done. | |
6477 ASSERT(kSmiTagSize == 1); | |
6478 __ Integer32ToSmi(rax, rcx); | |
6479 } | |
6480 | |
6481 // Return from the stub. | |
6482 __ bind(&done); | |
6483 __ StubReturn(1); | |
6484 | |
6485 // Handle the slow case by jumping to the JavaScript builtin. | |
6314 __ bind(&slow); | 6486 __ bind(&slow); |
6315 __ pop(rcx); // pop return address | 6487 __ pop(rcx); // pop return address |
6316 __ push(rax); | 6488 __ push(rax); |
6317 __ push(rcx); // push return address | 6489 __ push(rcx); // push return address |
6318 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); | 6490 switch (op_) { |
6319 __ jmp(&done); | 6491 case Token::SUB: |
6320 | 6492 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); |
6321 // Try floating point case. | 6493 break; |
6322 __ bind(&try_float); | 6494 case Token::BIT_NOT: |
6323 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); | 6495 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
6324 __ Cmp(rdx, Factory::heap_number_map()); | 6496 break; |
6325 __ j(not_equal, &slow); | 6497 default: |
6326 // Operand is a float, negate its value by flipping sign bit. | 6498 UNREACHABLE(); |
6327 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); | |
6328 __ movq(kScratchRegister, Immediate(0x01)); | |
6329 __ shl(kScratchRegister, Immediate(63)); | |
6330 __ xor_(rdx, kScratchRegister); // Flip sign. | |
6331 // rdx is value to store. | |
6332 if (overwrite_) { | |
6333 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx); | |
6334 } else { | |
6335 __ AllocateHeapNumber(rcx, rbx, &slow); | |
6336 // rcx: allocated 'empty' number | |
6337 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); | |
6338 __ movq(rax, rcx); | |
6339 } | 6499 } |
6340 | |
6341 __ bind(&done); | |
6342 __ StubReturn(1); | |
6343 } | 6500 } |
6344 | 6501 |
6345 | 6502 |
6346 void CompareStub::Generate(MacroAssembler* masm) { | 6503 void CompareStub::Generate(MacroAssembler* masm) { |
6347 Label call_builtin, done; | 6504 Label call_builtin, done; |
6348 | 6505 |
6349 // NOTICE! This code is only reached after a smi-fast-case check, so | 6506 // NOTICE! This code is only reached after a smi-fast-case check, so |
6350 // it is certain that at least one operand isn't a smi. | 6507 // it is certain that at least one operand isn't a smi. |
6351 | 6508 |
6352 if (cc_ == equal) { // Both strict and non-strict. | 6509 if (cc_ == equal) { // Both strict and non-strict. |
(...skipping 928 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7281 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, | 7438 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
7282 XMMRegister dst1, | 7439 XMMRegister dst1, |
7283 XMMRegister dst2) { | 7440 XMMRegister dst2) { |
7284 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize)); | 7441 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize)); |
7285 LoadFloatOperand(masm, kScratchRegister, dst1); | 7442 LoadFloatOperand(masm, kScratchRegister, dst1); |
7286 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); | 7443 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
7287 LoadFloatOperand(masm, kScratchRegister, dst2); | 7444 LoadFloatOperand(masm, kScratchRegister, dst2); |
7288 } | 7445 } |
7289 | 7446 |
7290 | 7447 |
7291 void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm, | |
7292 const Operand& src, | |
7293 Register dst) { | |
7294 // TODO(X64): Convert number operands to int32 values. | |
7295 // Don't convert a Smi to a double first. | |
7296 UNIMPLEMENTED(); | |
7297 } | |
7298 | |
7299 | |
7300 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) { | 7448 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) { |
7301 Label load_smi_1, load_smi_2, done_load_1, done; | 7449 Label load_smi_1, load_smi_2, done_load_1, done; |
7302 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize)); | 7450 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize)); |
7303 __ JumpIfSmi(kScratchRegister, &load_smi_1); | 7451 __ JumpIfSmi(kScratchRegister, &load_smi_1); |
7304 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); | 7452 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); |
7305 __ bind(&done_load_1); | 7453 __ bind(&done_load_1); |
7306 | 7454 |
7307 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); | 7455 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
7308 __ JumpIfSmi(kScratchRegister, &load_smi_2); | 7456 __ JumpIfSmi(kScratchRegister, &load_smi_2); |
7309 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); | 7457 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); |
7310 __ jmp(&done); | 7458 __ jmp(&done); |
7311 | 7459 |
7312 __ bind(&load_smi_1); | 7460 __ bind(&load_smi_1); |
7313 __ SmiToInteger32(kScratchRegister, kScratchRegister); | 7461 __ SmiToInteger32(kScratchRegister, kScratchRegister); |
7314 __ push(kScratchRegister); | 7462 __ push(kScratchRegister); |
7315 __ fild_s(Operand(rsp, 0)); | 7463 __ fild_s(Operand(rsp, 0)); |
7316 __ pop(kScratchRegister); | 7464 __ pop(kScratchRegister); |
7317 __ jmp(&done_load_1); | 7465 __ jmp(&done_load_1); |
7318 | 7466 |
7319 __ bind(&load_smi_2); | 7467 __ bind(&load_smi_2); |
7320 __ SmiToInteger32(kScratchRegister, kScratchRegister); | 7468 __ SmiToInteger32(kScratchRegister, kScratchRegister); |
7321 __ push(kScratchRegister); | 7469 __ push(kScratchRegister); |
7322 __ fild_s(Operand(rsp, 0)); | 7470 __ fild_s(Operand(rsp, 0)); |
7323 __ pop(kScratchRegister); | 7471 __ pop(kScratchRegister); |
7324 | 7472 |
7325 __ bind(&done); | 7473 __ bind(&done); |
7326 } | 7474 } |
7327 | 7475 |
7328 | 7476 |
7477 // Input: rdx, rax are the left and right objects of a bit op. | |
7478 // Output: rax, rcx are left and right integers for a bit op. | |
7479 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, | |
7480 bool use_sse3, | |
7481 Label* conversion_failure) { | |
7482 // Check float operands. | |
7483 Label arg1_is_object, check_undefined_arg1; | |
7484 Label arg2_is_object, check_undefined_arg2; | |
7485 Label load_arg2, done; | |
7486 | |
7487 __ JumpIfNotSmi(rdx, &arg1_is_object); | |
7488 __ SmiToInteger32(rdx, rdx); | |
7489 __ jmp(&load_arg2); | |
7490 | |
7491 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | |
7492 __ bind(&check_undefined_arg1); | |
7493 __ Cmp(rdx, Factory::undefined_value()); | |
Erik Corry
2010/01/27 13:16:01
You should use CompareRoot here. It's a tiny bit
Mads Ager (chromium)
2010/01/27 13:33:34
I always forget that - thanks for pointing it out
| |
7494 __ j(not_equal, conversion_failure); | |
7495 __ movl(rdx, Immediate(0)); | |
7496 __ jmp(&load_arg2); | |
7497 | |
7498 __ bind(&arg1_is_object); | |
7499 __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); | |
7500 __ Cmp(rbx, Factory::heap_number_map()); | |
7501 __ j(not_equal, &check_undefined_arg1); | |
7502 // Get the untagged integer version of the edx heap number in rcx. | |
7503 IntegerConvert(masm, rdx, use_sse3, conversion_failure); | |
7504 __ movl(rdx, rcx); | |
7505 | |
7506 // Here edx has the untagged integer, eax has a Smi or a heap number. | |
7507 __ bind(&load_arg2); | |
7508 // Test if arg2 is a Smi. | |
7509 __ JumpIfNotSmi(rax, &arg2_is_object); | |
7510 __ SmiToInteger32(rax, rax); | |
7511 __ movl(rcx, rax); | |
7512 __ jmp(&done); | |
7513 | |
7514 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | |
7515 __ bind(&check_undefined_arg2); | |
7516 __ Cmp(rax, Factory::undefined_value()); | |
7517 __ j(not_equal, conversion_failure); | |
7518 __ movl(rcx, Immediate(0)); | |
7519 __ jmp(&done); | |
7520 | |
7521 __ bind(&arg2_is_object); | |
7522 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); | |
7523 __ Cmp(rbx, Factory::heap_number_map()); | |
7524 __ j(not_equal, &check_undefined_arg2); | |
7525 // Get the untagged integer version of the eax heap number in ecx. | |
7526 IntegerConvert(masm, rax, use_sse3, conversion_failure); | |
7527 __ bind(&done); | |
7528 __ movl(rax, rdx); | |
7529 } | |
7530 | |
7531 | |
7329 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, | 7532 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
7330 Register lhs, | 7533 Register lhs, |
7331 Register rhs) { | 7534 Register rhs) { |
7332 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done; | 7535 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done; |
7333 __ JumpIfSmi(lhs, &load_smi_lhs); | 7536 __ JumpIfSmi(lhs, &load_smi_lhs); |
7334 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset)); | 7537 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset)); |
7335 __ bind(&done_load_lhs); | 7538 __ bind(&done_load_lhs); |
7336 | 7539 |
7337 __ JumpIfSmi(rhs, &load_smi_rhs); | 7540 __ JumpIfSmi(rhs, &load_smi_rhs); |
7338 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset)); | 7541 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset)); |
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7559 __ SmiAnd(rax, rax, rbx); | 7762 __ SmiAnd(rax, rax, rbx); |
7560 break; | 7763 break; |
7561 | 7764 |
7562 case Token::BIT_XOR: | 7765 case Token::BIT_XOR: |
7563 __ SmiXor(rax, rax, rbx); | 7766 __ SmiXor(rax, rax, rbx); |
7564 break; | 7767 break; |
7565 | 7768 |
7566 case Token::SHL: | 7769 case Token::SHL: |
7567 case Token::SHR: | 7770 case Token::SHR: |
7568 case Token::SAR: | 7771 case Token::SAR: |
7569 // Move the second operand into register ecx. | 7772 // Move the second operand into register rcx. |
7570 __ movq(rcx, rbx); | 7773 __ movq(rcx, rbx); |
7571 // Perform the operation. | 7774 // Perform the operation. |
7572 switch (op_) { | 7775 switch (op_) { |
7573 case Token::SAR: | 7776 case Token::SAR: |
7574 __ SmiShiftArithmeticRight(rax, rax, rcx); | 7777 __ SmiShiftArithmeticRight(rax, rax, rcx); |
7575 break; | 7778 break; |
7576 case Token::SHR: | 7779 case Token::SHR: |
7577 __ SmiShiftLogicalRight(rax, rax, rcx, slow); | 7780 __ SmiShiftLogicalRight(rax, rax, rcx, slow); |
7578 break; | 7781 break; |
7579 case Token::SHL: | 7782 case Token::SHL: |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7655 case Token::MOD: { | 7858 case Token::MOD: { |
7656 // For MOD we go directly to runtime in the non-smi case. | 7859 // For MOD we go directly to runtime in the non-smi case. |
7657 break; | 7860 break; |
7658 } | 7861 } |
7659 case Token::BIT_OR: | 7862 case Token::BIT_OR: |
7660 case Token::BIT_AND: | 7863 case Token::BIT_AND: |
7661 case Token::BIT_XOR: | 7864 case Token::BIT_XOR: |
7662 case Token::SAR: | 7865 case Token::SAR: |
7663 case Token::SHL: | 7866 case Token::SHL: |
7664 case Token::SHR: { | 7867 case Token::SHR: { |
7665 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime); | 7868 Label skip_allocation, non_smi_result; |
7666 // TODO(X64): Don't convert a Smi to float and then back to int32 | 7869 FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime); |
7667 // afterwards. | |
7668 FloatingPointHelper::LoadFloatOperands(masm); | |
7669 | |
7670 Label skip_allocation, non_smi_result, operand_conversion_failure; | |
7671 | |
7672 // Reserve space for converted numbers. | |
7673 __ subq(rsp, Immediate(2 * kPointerSize)); | |
7674 | |
7675 if (use_sse3_) { | |
7676 // Truncate the operands to 32-bit integers and check for | |
7677 // exceptions in doing so. | |
7678 CpuFeatures::Scope scope(SSE3); | |
7679 __ fisttp_s(Operand(rsp, 0 * kPointerSize)); | |
7680 __ fisttp_s(Operand(rsp, 1 * kPointerSize)); | |
7681 __ fnstsw_ax(); | |
7682 __ testl(rax, Immediate(1)); | |
7683 __ j(not_zero, &operand_conversion_failure); | |
7684 } else { | |
7685 // Check if right operand is int32. | |
7686 __ fist_s(Operand(rsp, 0 * kPointerSize)); | |
7687 __ fild_s(Operand(rsp, 0 * kPointerSize)); | |
7688 __ FCmp(); | |
7689 __ j(not_zero, &operand_conversion_failure); | |
7690 __ j(parity_even, &operand_conversion_failure); | |
7691 | |
7692 // Check if left operand is int32. | |
7693 __ fist_s(Operand(rsp, 1 * kPointerSize)); | |
7694 __ fild_s(Operand(rsp, 1 * kPointerSize)); | |
7695 __ FCmp(); | |
7696 __ j(not_zero, &operand_conversion_failure); | |
7697 __ j(parity_even, &operand_conversion_failure); | |
7698 } | |
7699 | |
7700 // Get int32 operands and perform bitop. | |
7701 __ pop(rcx); | |
7702 __ pop(rax); | |
7703 switch (op_) { | 7870 switch (op_) { |
7704 case Token::BIT_OR: __ orl(rax, rcx); break; | 7871 case Token::BIT_OR: __ orl(rax, rcx); break; |
7705 case Token::BIT_AND: __ andl(rax, rcx); break; | 7872 case Token::BIT_AND: __ andl(rax, rcx); break; |
7706 case Token::BIT_XOR: __ xorl(rax, rcx); break; | 7873 case Token::BIT_XOR: __ xorl(rax, rcx); break; |
7707 case Token::SAR: __ sarl_cl(rax); break; | 7874 case Token::SAR: __ sarl_cl(rax); break; |
7708 case Token::SHL: __ shll_cl(rax); break; | 7875 case Token::SHL: __ shll_cl(rax); break; |
7709 case Token::SHR: __ shrl_cl(rax); break; | 7876 case Token::SHR: __ shrl_cl(rax); break; |
7710 default: UNREACHABLE(); | 7877 default: UNREACHABLE(); |
7711 } | 7878 } |
7712 if (op_ == Token::SHR) { | 7879 if (op_ == Token::SHR) { |
(...skipping 27 matching lines...) Expand all Loading... | |
7740 break; | 7907 break; |
7741 default: UNREACHABLE(); | 7908 default: UNREACHABLE(); |
7742 } | 7909 } |
7743 // Store the result in the HeapNumber and return. | 7910 // Store the result in the HeapNumber and return. |
7744 __ movq(Operand(rsp, 1 * kPointerSize), rbx); | 7911 __ movq(Operand(rsp, 1 * kPointerSize), rbx); |
7745 __ fild_s(Operand(rsp, 1 * kPointerSize)); | 7912 __ fild_s(Operand(rsp, 1 * kPointerSize)); |
7746 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); | 7913 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
7747 GenerateReturn(masm); | 7914 GenerateReturn(masm); |
7748 } | 7915 } |
7749 | 7916 |
7750 // Clear the FPU exception flag and reset the stack before calling | |
7751 // the runtime system. | |
7752 __ bind(&operand_conversion_failure); | |
7753 __ addq(rsp, Immediate(2 * kPointerSize)); | |
7754 if (use_sse3_) { | |
7755 // If we've used the SSE3 instructions for truncating the | |
7756 // floating point values to integers and it failed, we have a | |
7757 // pending #IA exception. Clear it. | |
7758 __ fnclex(); | |
7759 } else { | |
7760 // The non-SSE3 variant does early bailout if the right | |
7761 // operand isn't a 32-bit integer, so we may have a single | |
7762 // value on the FPU stack we need to get rid of. | |
7763 __ ffree(0); | |
7764 } | |
7765 | |
7766 // SHR should return uint32 - go to runtime for non-smi/negative result. | 7917 // SHR should return uint32 - go to runtime for non-smi/negative result. |
7767 if (op_ == Token::SHR) { | 7918 if (op_ == Token::SHR) { |
7768 __ bind(&non_smi_result); | 7919 __ bind(&non_smi_result); |
7769 } | 7920 } |
7770 __ movq(rax, Operand(rsp, 1 * kPointerSize)); | 7921 __ movq(rax, Operand(rsp, 1 * kPointerSize)); |
7771 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); | 7922 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); |
7772 break; | 7923 break; |
7773 } | 7924 } |
7774 default: UNREACHABLE(); break; | 7925 default: UNREACHABLE(); break; |
7775 } | 7926 } |
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7975 __ testl(rbx, rbx); | 8126 __ testl(rbx, rbx); |
7976 __ j(not_zero, &both_not_zero_length); | 8127 __ j(not_zero, &both_not_zero_length); |
7977 // First string is empty, result is second string which is in rdx. | 8128 // First string is empty, result is second string which is in rdx. |
7978 __ movq(rax, rdx); | 8129 __ movq(rax, rdx); |
7979 __ IncrementCounter(&Counters::string_add_native, 1); | 8130 __ IncrementCounter(&Counters::string_add_native, 1); |
7980 __ ret(2 * kPointerSize); | 8131 __ ret(2 * kPointerSize); |
7981 | 8132 |
7982 // Both strings are non-empty. | 8133 // Both strings are non-empty. |
7983 // rax: first string | 8134 // rax: first string |
7984 // rbx: length of first string | 8135 // rbx: length of first string |
7985 // ecx: length of second string | 8136 // rcx: length of second string |
7986 // edx: second string | 8137 // rdx: second string |
7987 // r8: instance type of first string if string check was performed above | 8138 // r8: instance type of first string if string check was performed above |
7988 // r9: instance type of first string if string check was performed above | 8139 // r9: instance type of first string if string check was performed above |
7989 Label string_add_flat_result; | 8140 Label string_add_flat_result; |
7990 __ bind(&both_not_zero_length); | 8141 __ bind(&both_not_zero_length); |
7991 // Look at the length of the result of adding the two strings. | 8142 // Look at the length of the result of adding the two strings. |
7992 __ addl(rbx, rcx); | 8143 __ addl(rbx, rcx); |
7993 // Use the runtime system when adding two one character strings, as it | 8144 // Use the runtime system when adding two one character strings, as it |
7994 // contains optimizations for this specific case using the symbol table. | 8145 // contains optimizations for this specific case using the symbol table. |
7995 __ cmpl(rbx, Immediate(2)); | 8146 __ cmpl(rbx, Immediate(2)); |
7996 __ j(equal, &string_add_runtime); | 8147 __ j(equal, &string_add_runtime); |
(...skipping 545 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
8542 // Call the function from C++. | 8693 // Call the function from C++. |
8543 return FUNCTION_CAST<ModuloFunction>(buffer); | 8694 return FUNCTION_CAST<ModuloFunction>(buffer); |
8544 } | 8695 } |
8545 | 8696 |
8546 #endif | 8697 #endif |
8547 | 8698 |
8548 | 8699 |
8549 #undef __ | 8700 #undef __ |
8550 | 8701 |
8551 } } // namespace v8::internal | 8702 } } // namespace v8::internal |
OLD | NEW |