OLD | NEW |
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
382 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0)) | 382 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0)) |
383 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) | 383 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) |
384 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) | 384 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) |
385 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0)) | 385 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0)) |
386 && (allocator()->count(r13) == (frame()->is_used(r13) ? 1 : 0)) | 386 && (allocator()->count(r13) == (frame()->is_used(r13) ? 1 : 0)) |
387 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); | 387 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); |
388 } | 388 } |
389 #endif | 389 #endif |
390 | 390 |
391 | 391 |
| 392 class DeferredReferenceGetKeyedValue: public DeferredCode { |
| 393 public: |
| 394 explicit DeferredReferenceGetKeyedValue(Register dst, |
| 395 Register receiver, |
| 396 Register key, |
| 397 bool is_global) |
| 398 : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) { |
| 399 set_comment("[ DeferredReferenceGetKeyedValue"); |
| 400 } |
| 401 |
| 402 virtual void Generate(); |
| 403 |
| 404 Label* patch_site() { return &patch_site_; } |
| 405 |
| 406 private: |
| 407 Label patch_site_; |
| 408 Register dst_; |
| 409 Register receiver_; |
| 410 Register key_; |
| 411 bool is_global_; |
| 412 }; |
| 413 |
| 414 |
| 415 void DeferredReferenceGetKeyedValue::Generate() { |
| 416 __ push(receiver_); // First IC argument. |
| 417 __ push(key_); // Second IC argument. |
| 418 |
| 419 // Calculate the delta from the IC call instruction to the map check |
| 420 // movq instruction in the inlined version. This delta is stored in |
| 421 // a test(rax, delta) instruction after the call so that we can find |
| 422 // it in the IC initialization code and patch the movq instruction. |
| 423 // This means that we cannot allow test instructions after calls to |
| 424 // KeyedLoadIC stubs in other places. |
| 425 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); |
| 426 RelocInfo::Mode mode = is_global_ |
| 427 ? RelocInfo::CODE_TARGET_CONTEXT |
| 428 : RelocInfo::CODE_TARGET; |
| 429 __ Call(ic, mode); |
| 430 // The delta from the start of the map-compare instruction to the |
| 431 // test instruction. We use masm_-> directly here instead of the __ |
| 432 // macro because the macro sometimes uses macro expansion to turn |
| 433 // into something that can't return a value. This is encountered |
| 434 // when doing generated code coverage tests. |
| 435 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); |
| 436 // Here we use masm_-> instead of the __ macro because this is the |
| 437 // instruction that gets patched and coverage code gets in the way. |
| 438 // TODO(X64): Consider whether it's worth switching the test to a |
| 439 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't |
| 440 // be generated normally. |
| 441 masm_->testl(rax, Immediate(-delta_to_patch_site)); |
| 442 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); |
| 443 |
| 444 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 445 __ pop(key_); |
| 446 __ pop(receiver_); |
| 447 } |
| 448 |
| 449 |
| 450 class DeferredReferenceSetKeyedValue: public DeferredCode { |
| 451 public: |
| 452 DeferredReferenceSetKeyedValue(Register value, |
| 453 Register key, |
| 454 Register receiver) |
| 455 : value_(value), key_(key), receiver_(receiver) { |
| 456 set_comment("[ DeferredReferenceSetKeyedValue"); |
| 457 } |
| 458 |
| 459 virtual void Generate(); |
| 460 |
| 461 Label* patch_site() { return &patch_site_; } |
| 462 |
| 463 private: |
| 464 Register value_; |
| 465 Register key_; |
| 466 Register receiver_; |
| 467 Label patch_site_; |
| 468 }; |
| 469 |
| 470 |
| 471 void DeferredReferenceSetKeyedValue::Generate() { |
| 472 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1); |
| 473 // Push receiver and key arguments on the stack. |
| 474 __ push(receiver_); |
| 475 __ push(key_); |
| 476 // Move value argument to eax as expected by the IC stub. |
| 477 if (!value_.is(rax)) __ movq(rax, value_); |
| 478 // Call the IC stub. |
| 479 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); |
| 480 __ Call(ic, RelocInfo::CODE_TARGET); |
| 481 // The delta from the start of the map-compare instructions (initial movq) |
| 482 // to the test instruction. We use masm_-> directly here instead of the |
| 483 // __ macro because the macro sometimes uses macro expansion to turn |
| 484 // into something that can't return a value. This is encountered |
| 485 // when doing generated code coverage tests. |
| 486 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); |
| 487 // Here we use masm_-> instead of the __ macro because this is the |
| 488 // instruction that gets patched and coverage code gets in the way. |
| 489 masm_->testl(rax, Immediate(-delta_to_patch_site)); |
| 490 // Restore value (returned from store IC), key and receiver |
| 491 // registers. |
| 492 if (!value_.is(rax)) __ movq(value_, rax); |
| 493 __ pop(key_); |
| 494 __ pop(receiver_); |
| 495 } |
| 496 |
| 497 |
392 class DeferredStackCheck: public DeferredCode { | 498 class DeferredStackCheck: public DeferredCode { |
393 public: | 499 public: |
394 DeferredStackCheck() { | 500 DeferredStackCheck() { |
395 set_comment("[ DeferredStackCheck"); | 501 set_comment("[ DeferredStackCheck"); |
396 } | 502 } |
397 | 503 |
398 virtual void Generate(); | 504 virtual void Generate(); |
399 }; | 505 }; |
400 | 506 |
401 | 507 |
(...skipping 1784 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2186 if (node->starts_initialization_block()) { | 2292 if (node->starts_initialization_block()) { |
2187 ASSERT(target.type() == Reference::NAMED || | 2293 ASSERT(target.type() == Reference::NAMED || |
2188 target.type() == Reference::KEYED); | 2294 target.type() == Reference::KEYED); |
2189 // Change to slow case in the beginning of an initialization | 2295 // Change to slow case in the beginning of an initialization |
2190 // block to avoid the quadratic behavior of repeatedly adding | 2296 // block to avoid the quadratic behavior of repeatedly adding |
2191 // fast properties. | 2297 // fast properties. |
2192 | 2298 |
2193 // The receiver is the argument to the runtime call. It is the | 2299 // The receiver is the argument to the runtime call. It is the |
2194 // first value pushed when the reference was loaded to the | 2300 // first value pushed when the reference was loaded to the |
2195 // frame. | 2301 // frame. |
2196 // TODO(X64): Enable this and the switch back to fast, once they work. | 2302 frame_->PushElementAt(target.size() - 1); |
2197 // frame_->PushElementAt(target.size() - 1); | 2303 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1); |
2198 // Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1); | |
2199 } | 2304 } |
2200 if (node->op() == Token::ASSIGN || | 2305 if (node->op() == Token::ASSIGN || |
2201 node->op() == Token::INIT_VAR || | 2306 node->op() == Token::INIT_VAR || |
2202 node->op() == Token::INIT_CONST) { | 2307 node->op() == Token::INIT_CONST) { |
2203 Load(node->value()); | 2308 Load(node->value()); |
2204 | 2309 |
2205 } else { | 2310 } else { |
2206 // Literal* literal = node->value()->AsLiteral(); | 2311 Literal* literal = node->value()->AsLiteral(); |
2207 bool overwrite_value = | 2312 bool overwrite_value = |
2208 (node->value()->AsBinaryOperation() != NULL && | 2313 (node->value()->AsBinaryOperation() != NULL && |
2209 node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); | 2314 node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); |
2210 // Variable* right_var = node->value()->AsVariableProxy()->AsVariable(); | 2315 Variable* right_var = node->value()->AsVariableProxy()->AsVariable(); |
2211 // There are two cases where the target is not read in the right hand | 2316 // There are two cases where the target is not read in the right hand |
2212 // side, that are easy to test for: the right hand side is a literal, | 2317 // side, that are easy to test for: the right hand side is a literal, |
2213 // or the right hand side is a different variable. TakeValue invalidates | 2318 // or the right hand side is a different variable. TakeValue invalidates |
2214 // the target, with an implicit promise that it will be written to again | 2319 // the target, with an implicit promise that it will be written to again |
2215 // before it is read. | 2320 // before it is read. |
2216 // TODO(X64): Implement TakeValue optimization. Check issue 150016. | 2321 if (literal != NULL || (right_var != NULL && right_var != var)) { |
2217 if (false) { | 2322 target.TakeValue(NOT_INSIDE_TYPEOF); |
2218 // if (literal != NULL || (right_var != NULL && right_var != var)) { | |
2219 // target.TakeValue(NOT_INSIDE_TYPEOF); | |
2220 } else { | 2323 } else { |
2221 target.GetValue(NOT_INSIDE_TYPEOF); | 2324 target.GetValue(NOT_INSIDE_TYPEOF); |
2222 } | 2325 } |
2223 Load(node->value()); | 2326 Load(node->value()); |
2224 GenericBinaryOperation(node->binary_op(), | 2327 GenericBinaryOperation(node->binary_op(), |
2225 node->type(), | 2328 node->type(), |
2226 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); | 2329 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); |
2227 } | 2330 } |
2228 | 2331 |
2229 if (var != NULL && | 2332 if (var != NULL && |
(...skipping 10 matching lines...) Expand all Loading... |
2240 } else { | 2343 } else { |
2241 target.SetValue(NOT_CONST_INIT); | 2344 target.SetValue(NOT_CONST_INIT); |
2242 } | 2345 } |
2243 if (node->ends_initialization_block()) { | 2346 if (node->ends_initialization_block()) { |
2244 ASSERT(target.type() == Reference::NAMED || | 2347 ASSERT(target.type() == Reference::NAMED || |
2245 target.type() == Reference::KEYED); | 2348 target.type() == Reference::KEYED); |
2246 // End of initialization block. Revert to fast case. The | 2349 // End of initialization block. Revert to fast case. The |
2247 // argument to the runtime call is the receiver, which is the | 2350 // argument to the runtime call is the receiver, which is the |
2248 // first value pushed as part of the reference, which is below | 2351 // first value pushed as part of the reference, which is below |
2249 // the lhs value. | 2352 // the lhs value. |
2250 // TODO(X64): Enable this once ToFastProperties works. | 2353 frame_->PushElementAt(target.size()); |
2251 // frame_->PushElementAt(target.size()); | 2354 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1); |
2252 // Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1); | |
2253 } | 2355 } |
2254 } | 2356 } |
2255 } | 2357 } |
2256 } | 2358 } |
2257 | 2359 |
2258 | 2360 |
2259 void CodeGenerator::VisitThrow(Throw* node) { | 2361 void CodeGenerator::VisitThrow(Throw* node) { |
2260 Comment cmnt(masm_, "[ Throw"); | 2362 Comment cmnt(masm_, "[ Throw"); |
2261 CodeForStatementPosition(node); | 2363 CodeForStatementPosition(node); |
2262 | 2364 |
(...skipping 1375 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3638 // 'true' => true. | 3740 // 'true' => true. |
3639 __ Cmp(value.reg(), Factory::true_value()); | 3741 __ Cmp(value.reg(), Factory::true_value()); |
3640 dest->true_target()->Branch(equal); | 3742 dest->true_target()->Branch(equal); |
3641 | 3743 |
3642 // 'undefined' => false. | 3744 // 'undefined' => false. |
3643 __ Cmp(value.reg(), Factory::undefined_value()); | 3745 __ Cmp(value.reg(), Factory::undefined_value()); |
3644 dest->false_target()->Branch(equal); | 3746 dest->false_target()->Branch(equal); |
3645 | 3747 |
3646 // Smi => false iff zero. | 3748 // Smi => false iff zero. |
3647 ASSERT(kSmiTag == 0); | 3749 ASSERT(kSmiTag == 0); |
3648 __ testq(value.reg(), value.reg()); | 3750 __ testl(value.reg(), value.reg()); |
3649 dest->false_target()->Branch(zero); | 3751 dest->false_target()->Branch(zero); |
3650 __ testl(value.reg(), Immediate(kSmiTagMask)); | 3752 __ testl(value.reg(), Immediate(kSmiTagMask)); |
3651 dest->true_target()->Branch(zero); | 3753 dest->true_target()->Branch(zero); |
3652 | 3754 |
3653 // Call the stub for all other cases. | 3755 // Call the stub for all other cases. |
3654 frame_->Push(&value); // Undo the Pop() from above. | 3756 frame_->Push(&value); // Undo the Pop() from above. |
3655 ToBooleanStub stub; | 3757 ToBooleanStub stub; |
3656 Result temp = frame_->CallStub(&stub, 1); | 3758 Result temp = frame_->CallStub(&stub, 1); |
3657 // Convert the result to a condition code. | 3759 // Convert the result to a condition code. |
3658 __ testq(temp.reg(), temp.reg()); | 3760 __ testq(temp.reg(), temp.reg()); |
(...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4123 // load IC call. | 4225 // load IC call. |
4124 LoadGlobal(); | 4226 LoadGlobal(); |
4125 frame_->Push(slot->var()->name()); | 4227 frame_->Push(slot->var()->name()); |
4126 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) | 4228 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) |
4127 ? RelocInfo::CODE_TARGET | 4229 ? RelocInfo::CODE_TARGET |
4128 : RelocInfo::CODE_TARGET_CONTEXT; | 4230 : RelocInfo::CODE_TARGET_CONTEXT; |
4129 Result answer = frame_->CallLoadIC(mode); | 4231 Result answer = frame_->CallLoadIC(mode); |
4130 // A test rax instruction following the call signals that the inobject | 4232 // A test rax instruction following the call signals that the inobject |
4131 // property case was inlined. Ensure that there is not a test eax | 4233 // property case was inlined. Ensure that there is not a test eax |
4132 // instruction here. | 4234 // instruction here. |
4133 __ nop(); | 4235 masm_->nop(); |
4134 // Discard the global object. The result is in answer. | 4236 // Discard the global object. The result is in answer. |
4135 frame_->Drop(); | 4237 frame_->Drop(); |
4136 return answer; | 4238 return answer; |
4137 } | 4239 } |
4138 | 4240 |
4139 | 4241 |
4140 void CodeGenerator::LoadGlobal() { | 4242 void CodeGenerator::LoadGlobal() { |
4141 if (in_spilled_code()) { | 4243 if (in_spilled_code()) { |
4142 frame_->EmitPush(GlobalObject()); | 4244 frame_->EmitPush(GlobalObject()); |
4143 } else { | 4245 } else { |
(...skipping 549 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4693 __ Call(ic, RelocInfo::CODE_TARGET); | 4795 __ Call(ic, RelocInfo::CODE_TARGET); |
4694 // The call must be followed by a test rax instruction to indicate | 4796 // The call must be followed by a test rax instruction to indicate |
4695 // that the inobject property case was inlined. | 4797 // that the inobject property case was inlined. |
4696 // | 4798 // |
4697 // Store the delta to the map check instruction here in the test | 4799 // Store the delta to the map check instruction here in the test |
4698 // instruction. Use masm_-> instead of the __ macro since the | 4800 // instruction. Use masm_-> instead of the __ macro since the |
4699 // latter can't return a value. | 4801 // latter can't return a value. |
4700 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | 4802 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); |
4701 // Here we use masm_-> instead of the __ macro because this is the | 4803 // Here we use masm_-> instead of the __ macro because this is the |
4702 // instruction that gets patched and coverage code gets in the way. | 4804 // instruction that gets patched and coverage code gets in the way. |
4703 masm_->testq(rax, Immediate(-delta_to_patch_site)); | 4805 masm_->testl(rax, Immediate(-delta_to_patch_site)); |
4704 __ IncrementCounter(&Counters::named_load_inline_miss, 1); | 4806 __ IncrementCounter(&Counters::named_load_inline_miss, 1); |
4705 | 4807 |
4706 if (!dst_.is(rax)) __ movq(dst_, rax); | 4808 if (!dst_.is(rax)) __ movq(dst_, rax); |
4707 __ pop(receiver_); | 4809 __ pop(receiver_); |
4708 } | 4810 } |
4709 | 4811 |
4710 | 4812 |
4711 | 4813 |
4712 | 4814 |
4713 // The result of src + value is in dst. It either overflowed or was not | 4815 // The result of src + value is in dst. It either overflowed or was not |
(...skipping 568 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5282 | 5384 |
5283 __ bind(deferred->patch_site()); | 5385 __ bind(deferred->patch_site()); |
5284 // This is the map check instruction that will be patched (so we can't | 5386 // This is the map check instruction that will be patched (so we can't |
5285 // use the double underscore macro that may insert instructions). | 5387 // use the double underscore macro that may insert instructions). |
5286 // Initially use an invalid map to force a failure. | 5388 // Initially use an invalid map to force a failure. |
5287 masm->Move(kScratchRegister, Factory::null_value()); | 5389 masm->Move(kScratchRegister, Factory::null_value()); |
5288 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), | 5390 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), |
5289 kScratchRegister); | 5391 kScratchRegister); |
5290 // This branch is always a forwards branch so it's always a fixed | 5392 // This branch is always a forwards branch so it's always a fixed |
5291 // size which allows the assert below to succeed and patching to work. | 5393 // size which allows the assert below to succeed and patching to work. |
5292 deferred->Branch(not_equal); | 5394 // Don't use deferred->Branch(...), since that might add coverage code. |
| 5395 masm->j(not_equal, deferred->entry_label()); |
5293 | 5396 |
5294 // The delta from the patch label to the load offset must be | 5397 // The delta from the patch label to the load offset must be |
5295 // statically known. | 5398 // statically known. |
5296 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) == | 5399 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) == |
5297 LoadIC::kOffsetToLoadInstruction); | 5400 LoadIC::kOffsetToLoadInstruction); |
5298 // The initial (invalid) offset has to be large enough to force | 5401 // The initial (invalid) offset has to be large enough to force |
5299 // a 32-bit instruction encoding to allow patching with an | 5402 // a 32-bit instruction encoding to allow patching with an |
5300 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag). | 5403 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag). |
5301 int offset = kMaxInt; | 5404 int offset = kMaxInt; |
5302 masm->movq(value.reg(), FieldOperand(receiver.reg(), offset)); | 5405 masm->movq(value.reg(), FieldOperand(receiver.reg(), offset)); |
5303 | 5406 |
5304 __ IncrementCounter(&Counters::named_load_inline, 1); | 5407 __ IncrementCounter(&Counters::named_load_inline, 1); |
5305 deferred->BindExit(); | 5408 deferred->BindExit(); |
5306 cgen_->frame()->Push(&receiver); | 5409 cgen_->frame()->Push(&receiver); |
5307 cgen_->frame()->Push(&value); | 5410 cgen_->frame()->Push(&value); |
5308 } | 5411 } |
5309 break; | 5412 break; |
5310 } | 5413 } |
5311 | 5414 |
5312 case KEYED: { | 5415 case KEYED: { |
5313 // TODO(1241834): Make sure that this it is safe to ignore the | 5416 // TODO(1241834): Make sure that this it is safe to ignore the |
5314 // distinction between expressions in a typeof and not in a typeof. | 5417 // distinction between expressions in a typeof and not in a typeof. |
5315 Comment cmnt(masm, "[ Load from keyed Property"); | 5418 Comment cmnt(masm, "[ Load from keyed Property"); |
5316 Variable* var = expression_->AsVariableProxy()->AsVariable(); | 5419 Variable* var = expression_->AsVariableProxy()->AsVariable(); |
5317 bool is_global = var != NULL; | 5420 bool is_global = var != NULL; |
5318 ASSERT(!is_global || var->is_global()); | 5421 ASSERT(!is_global || var->is_global()); |
| 5422 |
5319 // Inline array load code if inside of a loop. We do not know | 5423 // Inline array load code if inside of a loop. We do not know |
5320 // the receiver map yet, so we initially generate the code with | 5424 // the receiver map yet, so we initially generate the code with |
5321 // a check against an invalid map. In the inline cache code, we | 5425 // a check against an invalid map. In the inline cache code, we |
5322 // patch the map check if appropriate. | 5426 // patch the map check if appropriate. |
| 5427 if (cgen_->loop_nesting() > 0) { |
| 5428 Comment cmnt(masm, "[ Inlined load from keyed Property"); |
5323 | 5429 |
5324 // TODO(x64): Implement inlined loads for keyed properties. | 5430 Result key = cgen_->frame()->Pop(); |
5325 // Make sure to load length field as a 32-bit quantity. | 5431 Result receiver = cgen_->frame()->Pop(); |
5326 // Comment cmnt(masm, "[ Load from keyed Property"); | 5432 key.ToRegister(); |
| 5433 receiver.ToRegister(); |
5327 | 5434 |
5328 RelocInfo::Mode mode = is_global | 5435 // Use a fresh temporary to load the elements without destroying |
5329 ? RelocInfo::CODE_TARGET_CONTEXT | 5436 // the receiver which is needed for the deferred slow case. |
5330 : RelocInfo::CODE_TARGET; | 5437 Result elements = cgen_->allocator()->Allocate(); |
5331 Result answer = cgen_->frame()->CallKeyedLoadIC(mode); | 5438 ASSERT(elements.is_valid()); |
5332 // Make sure that we do not have a test instruction after the | 5439 |
5333 // call. A test instruction after the call is used to | 5440 // Use a fresh temporary for the index and later the loaded |
5334 // indicate that we have generated an inline version of the | 5441 // value. |
5335 // keyed load. The explicit nop instruction is here because | 5442 Result index = cgen_->allocator()->Allocate(); |
5336 // the push that follows might be peep-hole optimized away. | 5443 ASSERT(index.is_valid()); |
5337 __ nop(); | 5444 |
5338 cgen_->frame()->Push(&answer); | 5445 DeferredReferenceGetKeyedValue* deferred = |
| 5446 new DeferredReferenceGetKeyedValue(index.reg(), |
| 5447 receiver.reg(), |
| 5448 key.reg(), |
| 5449 is_global); |
| 5450 |
| 5451 // Check that the receiver is not a smi (only needed if this |
| 5452 // is not a load from the global context) and that it has the |
| 5453 // expected map. |
| 5454 if (!is_global) { |
| 5455 __ testl(receiver.reg(), Immediate(kSmiTagMask)); |
| 5456 deferred->Branch(zero); |
| 5457 } |
| 5458 |
| 5459 // Initially, use an invalid map. The map is patched in the IC |
| 5460 // initialization code. |
| 5461 __ bind(deferred->patch_site()); |
| 5462 // Use masm-> here instead of the double underscore macro since extra |
| 5463 // coverage code can interfere with the patching. |
| 5464 masm->movq(kScratchRegister, Factory::null_value(), RelocInfo::EMBEDDED_
OBJECT); |
| 5465 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), |
| 5466 kScratchRegister); |
| 5467 deferred->Branch(not_equal); |
| 5468 |
| 5469 // Check that the key is a non-negative smi. |
| 5470 __ testl(key.reg(), |
| 5471 Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u))); |
| 5472 deferred->Branch(not_zero); |
| 5473 |
| 5474 // Get the elements array from the receiver and check that it |
| 5475 // is not a dictionary. |
| 5476 __ movq(elements.reg(), |
| 5477 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); |
| 5478 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), |
| 5479 Factory::fixed_array_map()); |
| 5480 deferred->Branch(not_equal); |
| 5481 |
| 5482 // Shift the key to get the actual index value and check that |
| 5483 // it is within bounds. |
| 5484 __ movl(index.reg(), key.reg()); |
| 5485 __ shrl(index.reg(), Immediate(kSmiTagSize)); |
| 5486 __ cmpl(index.reg(), |
| 5487 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); |
| 5488 deferred->Branch(above_equal); |
| 5489 |
| 5490 // The index register holds the un-smi-tagged key. It has been |
| 5491 // zero-extended to 64-bits, so it can be used directly as index in the |
| 5492 // operand below. |
| 5493 // Load and check that the result is not the hole. We could |
| 5494 // reuse the index or elements register for the value. |
| 5495 // |
| 5496 // TODO(206): Consider whether it makes sense to try some |
| 5497 // heuristic about which register to reuse. For example, if |
| 5498 // one is rax, the we can reuse that one because the value |
| 5499 // coming from the deferred code will be in rax. |
| 5500 Result value = index; |
| 5501 __ movq(value.reg(), |
| 5502 Operand(elements.reg(), |
| 5503 index.reg(), |
| 5504 times_pointer_size, |
| 5505 FixedArray::kHeaderSize - kHeapObjectTag)); |
| 5506 elements.Unuse(); |
| 5507 index.Unuse(); |
| 5508 __ Cmp(value.reg(), Factory::the_hole_value()); |
| 5509 deferred->Branch(equal); |
| 5510 __ IncrementCounter(&Counters::keyed_load_inline, 1); |
| 5511 |
| 5512 deferred->BindExit(); |
| 5513 // Restore the receiver and key to the frame and push the |
| 5514 // result on top of it. |
| 5515 cgen_->frame()->Push(&receiver); |
| 5516 cgen_->frame()->Push(&key); |
| 5517 cgen_->frame()->Push(&value); |
| 5518 |
| 5519 } else { |
| 5520 Comment cmnt(masm, "[ Load from keyed Property"); |
| 5521 RelocInfo::Mode mode = is_global |
| 5522 ? RelocInfo::CODE_TARGET_CONTEXT |
| 5523 : RelocInfo::CODE_TARGET; |
| 5524 Result answer = cgen_->frame()->CallKeyedLoadIC(mode); |
| 5525 // Make sure that we do not have a test instruction after the |
| 5526 // call. A test instruction after the call is used to |
| 5527 // indicate that we have generated an inline version of the |
| 5528 // keyed load. The explicit nop instruction is here because |
| 5529 // the push that follows might be peep-hole optimized away. |
| 5530 __ nop(); |
| 5531 cgen_->frame()->Push(&answer); |
| 5532 } |
5339 break; | 5533 break; |
5340 } | 5534 } |
5341 | 5535 |
5342 default: | 5536 default: |
5343 UNREACHABLE(); | 5537 UNREACHABLE(); |
5344 } | 5538 } |
5345 } | 5539 } |
5346 | 5540 |
5347 | 5541 |
5348 void Reference::TakeValue(TypeofState typeof_state) { | 5542 void Reference::TakeValue(TypeofState typeof_state) { |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5395 Comment cmnt(masm, "[ Store to named Property"); | 5589 Comment cmnt(masm, "[ Store to named Property"); |
5396 cgen_->frame()->Push(GetName()); | 5590 cgen_->frame()->Push(GetName()); |
5397 Result answer = cgen_->frame()->CallStoreIC(); | 5591 Result answer = cgen_->frame()->CallStoreIC(); |
5398 cgen_->frame()->Push(&answer); | 5592 cgen_->frame()->Push(&answer); |
5399 break; | 5593 break; |
5400 } | 5594 } |
5401 | 5595 |
5402 case KEYED: { | 5596 case KEYED: { |
5403 Comment cmnt(masm, "[ Store to keyed Property"); | 5597 Comment cmnt(masm, "[ Store to keyed Property"); |
5404 | 5598 |
5405 // TODO(x64): Implement inlined version of keyed stores. | 5599 // Generate inlined version of the keyed store if the code is in |
| 5600 // a loop and the key is likely to be a smi. |
| 5601 Property* property = expression()->AsProperty(); |
| 5602 ASSERT(property != NULL); |
| 5603 SmiAnalysis* key_smi_analysis = property->key()->type(); |
5406 | 5604 |
5407 Result answer = cgen_->frame()->CallKeyedStoreIC(); | 5605 if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) { |
5408 // Make sure that we do not have a test instruction after the | 5606 Comment cmnt(masm, "[ Inlined store to keyed Property"); |
5409 // call. A test instruction after the call is used to | 5607 |
5410 // indicate that we have generated an inline version of the | 5608 // Get the receiver, key and value into registers. |
5411 // keyed store. | 5609 Result value = cgen_->frame()->Pop(); |
5412 __ nop(); | 5610 Result key = cgen_->frame()->Pop(); |
5413 cgen_->frame()->Push(&answer); | 5611 Result receiver = cgen_->frame()->Pop(); |
| 5612 |
| 5613 Result tmp = cgen_->allocator_->Allocate(); |
| 5614 ASSERT(tmp.is_valid()); |
| 5615 |
| 5616 // Determine whether the value is a constant before putting it |
| 5617 // in a register. |
| 5618 bool value_is_constant = value.is_constant(); |
| 5619 |
| 5620 // Make sure that value, key and receiver are in registers. |
| 5621 value.ToRegister(); |
| 5622 key.ToRegister(); |
| 5623 receiver.ToRegister(); |
| 5624 |
| 5625 DeferredReferenceSetKeyedValue* deferred = |
| 5626 new DeferredReferenceSetKeyedValue(value.reg(), |
| 5627 key.reg(), |
| 5628 receiver.reg()); |
| 5629 |
| 5630 // Check that the value is a smi if it is not a constant. |
| 5631 // We can skip the write barrier for smis and constants. |
| 5632 if (!value_is_constant) { |
| 5633 __ testl(value.reg(), Immediate(kSmiTagMask)); |
| 5634 deferred->Branch(not_zero); |
| 5635 } |
| 5636 |
| 5637 // Check that the key is a non-negative smi. |
| 5638 __ testl(key.reg(), |
| 5639 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U))); |
| 5640 deferred->Branch(not_zero); |
| 5641 |
| 5642 // Check that the receiver is not a smi. |
| 5643 __ testl(receiver.reg(), Immediate(kSmiTagMask)); |
| 5644 deferred->Branch(zero); |
| 5645 |
| 5646 // Check that the receiver is a JSArray. |
| 5647 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister); |
| 5648 deferred->Branch(not_equal); |
| 5649 |
| 5650 // Check that the key is within bounds. Both the key and the |
| 5651 // length of the JSArray are smis, so compare only low 32 bits. |
| 5652 __ cmpl(key.reg(), |
| 5653 FieldOperand(receiver.reg(), JSArray::kLengthOffset)); |
| 5654 deferred->Branch(greater_equal); |
| 5655 |
| 5656 // Get the elements array from the receiver and check that it |
| 5657 // is a flat array (not a dictionary). |
| 5658 __ movq(tmp.reg(), |
| 5659 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); |
| 5660 // Bind the deferred code patch site to be able to locate the |
| 5661 // fixed array map comparison. When debugging, we patch this |
| 5662 // comparison to always fail so that we will hit the IC call |
| 5663 // in the deferred code which will allow the debugger to |
| 5664 // break for fast case stores. |
| 5665 __ bind(deferred->patch_site()); |
| 5666 // Avoid using __ to ensure the distance from patch_site |
| 5667 // to the map address is always the same. |
| 5668 masm->movq(kScratchRegister, Factory::fixed_array_map(), |
| 5669 RelocInfo::EMBEDDED_OBJECT); |
| 5670 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), |
| 5671 kScratchRegister); |
| 5672 deferred->Branch(not_equal); |
| 5673 |
| 5674 // Store the value. |
| 5675 ASSERT_EQ(1, kSmiTagSize); |
| 5676 ASSERT_EQ(0, kSmiTag); |
| 5677 __ movq(Operand(tmp.reg(), |
| 5678 key.reg(), |
| 5679 times_half_pointer_size, |
| 5680 FixedArray::kHeaderSize - kHeapObjectTag), |
| 5681 value.reg()); |
| 5682 __ IncrementCounter(&Counters::keyed_store_inline, 1); |
| 5683 |
| 5684 deferred->BindExit(); |
| 5685 |
| 5686 cgen_->frame()->Push(&receiver); |
| 5687 cgen_->frame()->Push(&key); |
| 5688 cgen_->frame()->Push(&value); |
| 5689 } else { |
| 5690 Result answer = cgen_->frame()->CallKeyedStoreIC(); |
| 5691 // Make sure that we do not have a test instruction after the |
| 5692 // call. A test instruction after the call is used to |
| 5693 // indicate that we have generated an inline version of the |
| 5694 // keyed store. |
| 5695 masm->nop(); |
| 5696 cgen_->frame()->Push(&answer); |
| 5697 } |
5414 break; | 5698 break; |
5415 } | 5699 } |
5416 | 5700 |
5417 default: | 5701 default: |
5418 UNREACHABLE(); | 5702 UNREACHABLE(); |
5419 } | 5703 } |
5420 } | 5704 } |
5421 | 5705 |
5422 | 5706 |
5423 void ToBooleanStub::Generate(MacroAssembler* masm) { | 5707 void ToBooleanStub::Generate(MacroAssembler* masm) { |
(...skipping 1519 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6943 int CompareStub::MinorKey() { | 7227 int CompareStub::MinorKey() { |
6944 // Encode the two parameters in a unique 16 bit value. | 7228 // Encode the two parameters in a unique 16 bit value. |
6945 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); | 7229 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); |
6946 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); | 7230 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); |
6947 } | 7231 } |
6948 | 7232 |
6949 | 7233 |
6950 #undef __ | 7234 #undef __ |
6951 | 7235 |
6952 } } // namespace v8::internal | 7236 } } // namespace v8::internal |
OLD | NEW |