OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1912 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1923 // the object) | 1923 // the object) |
1924 __ movq(rcx, rax); | 1924 __ movq(rcx, rax); |
1925 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset)); | 1925 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset)); |
1926 // Get the bridge array held in the enumeration index field. | 1926 // Get the bridge array held in the enumeration index field. |
1927 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); | 1927 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); |
1928 // Get the cache from the bridge array. | 1928 // Get the cache from the bridge array. |
1929 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); | 1929 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); |
1930 | 1930 |
1931 frame_->EmitPush(rax); // <- slot 3 | 1931 frame_->EmitPush(rax); // <- slot 3 |
1932 frame_->EmitPush(rdx); // <- slot 2 | 1932 frame_->EmitPush(rdx); // <- slot 2 |
1933 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); | 1933 __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); |
1934 __ Integer32ToSmi(rax, rax); | |
1935 frame_->EmitPush(rax); // <- slot 1 | 1934 frame_->EmitPush(rax); // <- slot 1 |
1936 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 | 1935 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 |
1937 entry.Jump(); | 1936 entry.Jump(); |
1938 | 1937 |
1939 fixed_array.Bind(); | 1938 fixed_array.Bind(); |
1940 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast) | 1939 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast) |
1941 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3 | 1940 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3 |
1942 frame_->EmitPush(rax); // <- slot 2 | 1941 frame_->EmitPush(rax); // <- slot 2 |
1943 | 1942 |
1944 // Push the length of the array and the initial index onto the stack. | 1943 // Push the length of the array and the initial index onto the stack. |
1945 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset)); | 1944 __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset)); |
1946 __ Integer32ToSmi(rax, rax); | |
1947 frame_->EmitPush(rax); // <- slot 1 | 1945 frame_->EmitPush(rax); // <- slot 1 |
1948 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 | 1946 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 |
1949 | 1947 |
1950 // Condition. | 1948 // Condition. |
1951 entry.Bind(); | 1949 entry.Bind(); |
1952 // Grab the current frame's height for the break and continue | 1950 // Grab the current frame's height for the break and continue |
1953 // targets only after all the state is pushed on the frame. | 1951 // targets only after all the state is pushed on the frame. |
1954 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | 1952 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); |
1955 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | 1953 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); |
1956 | 1954 |
(...skipping 2672 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4629 | 4627 |
4630 // Fill out the elements FixedArray. | 4628 // Fill out the elements FixedArray. |
4631 // rax: JSArray. | 4629 // rax: JSArray. |
4632 // rcx: FixedArray. | 4630 // rcx: FixedArray. |
4633 // rbx: Number of elements in array as int32. | 4631 // rbx: Number of elements in array as int32. |
4634 | 4632 |
4635 // Set map. | 4633 // Set map. |
4636 __ Move(FieldOperand(rcx, HeapObject::kMapOffset), | 4634 __ Move(FieldOperand(rcx, HeapObject::kMapOffset), |
4637 Factory::fixed_array_map()); | 4635 Factory::fixed_array_map()); |
4638 // Set length. | 4636 // Set length. |
4639 __ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx); | 4637 __ Integer32ToSmi(rdx, rbx); |
| 4638 __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx); |
4640 // Fill contents of fixed-array with the-hole. | 4639 // Fill contents of fixed-array with the-hole. |
4641 __ Move(rdx, Factory::the_hole_value()); | 4640 __ Move(rdx, Factory::the_hole_value()); |
4642 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); | 4641 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); |
4643 // Fill fixed array elements with hole. | 4642 // Fill fixed array elements with hole. |
4644 // rax: JSArray. | 4643 // rax: JSArray. |
4645 // rbx: Number of elements in array that remains to be filled, as int32. | 4644 // rbx: Number of elements in array that remains to be filled, as int32. |
4646 // rcx: Start of elements in FixedArray. | 4645 // rcx: Start of elements in FixedArray. |
4647 // rdx: the hole. | 4646 // rdx: the hole. |
4648 Label loop; | 4647 Label loop; |
4649 __ testl(rbx, rbx); | 4648 __ testl(rbx, rbx); |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4748 __ InvokeFunction(rdi, expected, CALL_FUNCTION); | 4747 __ InvokeFunction(rdi, expected, CALL_FUNCTION); |
4749 | 4748 |
4750 // Find a place to put new cached value into. | 4749 // Find a place to put new cached value into. |
4751 Label add_new_entry, update_cache; | 4750 Label add_new_entry, update_cache; |
4752 __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache | 4751 __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache |
4753 // Possible optimization: cache size is constant for the given cache | 4752 // Possible optimization: cache size is constant for the given cache |
4754 // so technically we could use a constant here. However, if we have | 4753 // so technically we could use a constant here. However, if we have |
4755 // cache miss this optimization would hardly matter much. | 4754 // cache miss this optimization would hardly matter much. |
4756 | 4755 |
4757 // Check if we could add new entry to cache. | 4756 // Check if we could add new entry to cache. |
4758 __ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); | 4757 __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); |
4759 __ SmiToInteger32(r9, | 4758 __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); |
4760 FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); | 4759 __ SmiCompare(rbx, r9); |
4761 __ cmpl(rbx, r9); | |
4762 __ j(greater, &add_new_entry); | 4760 __ j(greater, &add_new_entry); |
4763 | 4761 |
4764 // Check if we could evict entry after finger. | 4762 // Check if we could evict entry after finger. |
4765 __ SmiToInteger32(rdx, | 4763 __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
4766 FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); | 4764 __ SmiToInteger32(rdx, rdx); |
| 4765 __ SmiToInteger32(rbx, rbx); |
4767 __ addq(rdx, kEntrySizeImm); | 4766 __ addq(rdx, kEntrySizeImm); |
4768 Label forward; | 4767 Label forward; |
4769 __ cmpq(rbx, rdx); | 4768 __ cmpq(rbx, rdx); |
4770 __ j(greater, &forward); | 4769 __ j(greater, &forward); |
4771 // Need to wrap over the cache. | 4770 // Need to wrap over the cache. |
4772 __ movl(rdx, kEntriesIndexImm); | 4771 __ movl(rdx, kEntriesIndexImm); |
4773 __ bind(&forward); | 4772 __ bind(&forward); |
4774 __ Integer32ToSmi(r9, rdx); | 4773 __ Integer32ToSmi(r9, rdx); |
4775 __ jmp(&update_cache); | 4774 __ jmp(&update_cache); |
4776 | 4775 |
4777 __ bind(&add_new_entry); | 4776 __ bind(&add_new_entry); |
4778 // r9 holds cache size as int. | 4777 // r9 holds cache size as smi. |
4779 __ movl(rdx, r9); | 4778 __ SmiToInteger32(rdx, r9); |
4780 __ Integer32ToSmi(r9, r9); | 4779 __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize)); |
4781 __ leal(rbx, Operand(rdx, JSFunctionResultCache::kEntrySize)); | |
4782 __ Integer32ToSmi(rbx, rbx); | |
4783 __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); | 4780 __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); |
4784 | 4781 |
4785 // Update the cache itself. | 4782 // Update the cache itself. |
4786 // rdx holds the index as int. | 4783 // rdx holds the index as int. |
4787 // r9 holds the index as smi. | 4784 // r9 holds the index as smi. |
4788 __ bind(&update_cache); | 4785 __ bind(&update_cache); |
4789 __ pop(rbx); // restore the key | 4786 __ pop(rbx); // restore the key |
4790 __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); | 4787 __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); |
4791 // Store key. | 4788 // Store key. |
4792 __ movq(ArrayElement(rcx, rdx), rbx); | 4789 __ movq(ArrayElement(rcx, rdx), rbx); |
(...skipping 2646 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7439 // patch the map check if appropriate. | 7436 // patch the map check if appropriate. |
7440 if (loop_nesting() > 0) { | 7437 if (loop_nesting() > 0) { |
7441 Comment cmnt(masm_, "[ Inlined load from keyed Property"); | 7438 Comment cmnt(masm_, "[ Inlined load from keyed Property"); |
7442 | 7439 |
7443 // Use a fresh temporary to load the elements without destroying | 7440 // Use a fresh temporary to load the elements without destroying |
7444 // the receiver which is needed for the deferred slow case. | 7441 // the receiver which is needed for the deferred slow case. |
7445 // Allocate the temporary early so that we use rax if it is free. | 7442 // Allocate the temporary early so that we use rax if it is free. |
7446 Result elements = allocator()->Allocate(); | 7443 Result elements = allocator()->Allocate(); |
7447 ASSERT(elements.is_valid()); | 7444 ASSERT(elements.is_valid()); |
7448 | 7445 |
7449 | |
7450 Result key = frame_->Pop(); | 7446 Result key = frame_->Pop(); |
7451 Result receiver = frame_->Pop(); | 7447 Result receiver = frame_->Pop(); |
7452 key.ToRegister(); | 7448 key.ToRegister(); |
7453 receiver.ToRegister(); | 7449 receiver.ToRegister(); |
7454 | 7450 |
7455 // Use a fresh temporary for the index | |
7456 Result index = allocator()->Allocate(); | |
7457 ASSERT(index.is_valid()); | |
7458 | |
7459 DeferredReferenceGetKeyedValue* deferred = | 7451 DeferredReferenceGetKeyedValue* deferred = |
7460 new DeferredReferenceGetKeyedValue(elements.reg(), | 7452 new DeferredReferenceGetKeyedValue(elements.reg(), |
7461 receiver.reg(), | 7453 receiver.reg(), |
7462 key.reg()); | 7454 key.reg()); |
7463 | 7455 |
7464 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); | 7456 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); |
7465 | 7457 |
7466 // Check that the receiver has the expected map. | 7458 // Check that the receiver has the expected map. |
7467 // Initially, use an invalid map. The map is patched in the IC | 7459 // Initially, use an invalid map. The map is patched in the IC |
7468 // initialization code. | 7460 // initialization code. |
(...skipping 12 matching lines...) Expand all Loading... |
7481 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label()); | 7473 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label()); |
7482 | 7474 |
7483 // Get the elements array from the receiver and check that it | 7475 // Get the elements array from the receiver and check that it |
7484 // is not a dictionary. | 7476 // is not a dictionary. |
7485 __ movq(elements.reg(), | 7477 __ movq(elements.reg(), |
7486 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); | 7478 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); |
7487 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), | 7479 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), |
7488 Factory::fixed_array_map()); | 7480 Factory::fixed_array_map()); |
7489 deferred->Branch(not_equal); | 7481 deferred->Branch(not_equal); |
7490 | 7482 |
7491 // Shift the key to get the actual index value and check that | 7483 // Check that key is within bounds. |
7492 // it is within bounds. | 7484 __ SmiCompare(key.reg(), |
7493 __ SmiToInteger32(index.reg(), key.reg()); | 7485 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); |
7494 __ cmpl(index.reg(), | |
7495 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); | |
7496 deferred->Branch(above_equal); | 7486 deferred->Branch(above_equal); |
7497 // The index register holds the un-smi-tagged key. It has been | 7487 |
7498 // zero-extended to 64-bits, so it can be used directly as index in the | |
7499 // operand below. | |
7500 // Load and check that the result is not the hole. We could | 7488 // Load and check that the result is not the hole. We could |
7501 // reuse the index or elements register for the value. | 7489 // reuse the index or elements register for the value. |
7502 // | 7490 // |
7503 // TODO(206): Consider whether it makes sense to try some | 7491 // TODO(206): Consider whether it makes sense to try some |
7504 // heuristic about which register to reuse. For example, if | 7492 // heuristic about which register to reuse. For example, if |
7505 // one is rax, the we can reuse that one because the value | 7493 // one is rax, the we can reuse that one because the value |
7506 // coming from the deferred code will be in rax. | 7494 // coming from the deferred code will be in rax. |
| 7495 SmiIndex index = |
| 7496 masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); |
7507 __ movq(elements.reg(), | 7497 __ movq(elements.reg(), |
7508 Operand(elements.reg(), | 7498 FieldOperand(elements.reg(), |
7509 index.reg(), | 7499 index.reg, |
7510 times_pointer_size, | 7500 index.scale, |
7511 FixedArray::kHeaderSize - kHeapObjectTag)); | 7501 FixedArray::kHeaderSize)); |
7512 result = elements; | 7502 result = elements; |
7513 elements.Unuse(); | |
7514 index.Unuse(); | |
7515 __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex); | 7503 __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex); |
7516 deferred->Branch(equal); | 7504 deferred->Branch(equal); |
7517 __ IncrementCounter(&Counters::keyed_load_inline, 1); | 7505 __ IncrementCounter(&Counters::keyed_load_inline, 1); |
7518 | 7506 |
7519 deferred->BindExit(); | 7507 deferred->BindExit(); |
7520 frame_->Push(&receiver); | 7508 frame_->Push(&receiver); |
7521 frame_->Push(&key); | 7509 frame_->Push(&key); |
7522 } else { | 7510 } else { |
7523 Comment cmnt(masm_, "[ Load from keyed Property"); | 7511 Comment cmnt(masm_, "[ Load from keyed Property"); |
7524 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET); | 7512 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET); |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7726 key.reg()); | 7714 key.reg()); |
7727 deferred->Branch(below_equal); | 7715 deferred->Branch(below_equal); |
7728 | 7716 |
7729 // Get the elements array from the receiver and check that it | 7717 // Get the elements array from the receiver and check that it |
7730 // is a flat array (not a dictionary). | 7718 // is a flat array (not a dictionary). |
7731 __ movq(tmp.reg(), | 7719 __ movq(tmp.reg(), |
7732 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); | 7720 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); |
7733 | 7721 |
7734 // Check whether it is possible to omit the write barrier. If the | 7722 // Check whether it is possible to omit the write barrier. If the |
7735 // elements array is in new space or the value written is a smi we can | 7723 // elements array is in new space or the value written is a smi we can |
7736 // safely update the elements array without updating the remembered set. | 7724 // safely update the elements array without write barrier. |
7737 Label in_new_space; | 7725 Label in_new_space; |
7738 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space); | 7726 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space); |
7739 if (!value_is_constant) { | 7727 if (!value_is_constant) { |
7740 __ JumpIfNotSmi(value.reg(), deferred->entry_label()); | 7728 __ JumpIfNotSmi(value.reg(), deferred->entry_label()); |
7741 } | 7729 } |
7742 | 7730 |
7743 __ bind(&in_new_space); | 7731 __ bind(&in_new_space); |
7744 // Bind the deferred code patch site to be able to locate the | 7732 // Bind the deferred code patch site to be able to locate the |
7745 // fixed array map comparison. When debugging, we patch this | 7733 // fixed array map comparison. When debugging, we patch this |
7746 // comparison to always fail so that we will hit the IC call | 7734 // comparison to always fail so that we will hit the IC call |
7747 // in the deferred code which will allow the debugger to | 7735 // in the deferred code which will allow the debugger to |
7748 // break for fast case stores. | 7736 // break for fast case stores. |
7749 __ bind(deferred->patch_site()); | 7737 __ bind(deferred->patch_site()); |
7750 // Avoid using __ to ensure the distance from patch_site | 7738 // Avoid using __ to ensure the distance from patch_site |
7751 // to the map address is always the same. | 7739 // to the map address is always the same. |
7752 masm->movq(kScratchRegister, Factory::fixed_array_map(), | 7740 masm->movq(kScratchRegister, Factory::fixed_array_map(), |
7753 RelocInfo::EMBEDDED_OBJECT); | 7741 RelocInfo::EMBEDDED_OBJECT); |
7754 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), | 7742 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), |
7755 kScratchRegister); | 7743 kScratchRegister); |
7756 deferred->Branch(not_equal); | 7744 deferred->Branch(not_equal); |
7757 | 7745 |
7758 // Store the value. | 7746 // Store the value. |
7759 SmiIndex index = | 7747 SmiIndex index = |
7760 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); | 7748 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); |
7761 __ movq(Operand(tmp.reg(), | 7749 __ movq(FieldOperand(tmp.reg(), |
7762 index.reg, | 7750 index.reg, |
7763 index.scale, | 7751 index.scale, |
7764 FixedArray::kHeaderSize - kHeapObjectTag), | 7752 FixedArray::kHeaderSize), |
7765 value.reg()); | 7753 value.reg()); |
7766 __ IncrementCounter(&Counters::keyed_store_inline, 1); | 7754 __ IncrementCounter(&Counters::keyed_store_inline, 1); |
7767 | 7755 |
7768 deferred->BindExit(); | 7756 deferred->BindExit(); |
7769 | 7757 |
7770 cgen_->frame()->Push(&value); | 7758 cgen_->frame()->Push(&value); |
7771 } else { | 7759 } else { |
7772 Result answer = cgen_->frame()->CallKeyedStoreIC(); | 7760 Result answer = cgen_->frame()->CallKeyedStoreIC(); |
7773 // Make sure that we do not have a test instruction after the | 7761 // Make sure that we do not have a test instruction after the |
7774 // call. A test instruction after the call is used to | 7762 // call. A test instruction after the call is used to |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7834 int length = slots_ + Context::MIN_CONTEXT_SLOTS; | 7822 int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
7835 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, | 7823 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, |
7836 rax, rbx, rcx, &gc, TAG_OBJECT); | 7824 rax, rbx, rcx, &gc, TAG_OBJECT); |
7837 | 7825 |
7838 // Get the function from the stack. | 7826 // Get the function from the stack. |
7839 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); | 7827 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
7840 | 7828 |
7841 // Setup the object header. | 7829 // Setup the object header. |
7842 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); | 7830 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); |
7843 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); | 7831 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); |
7844 __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length)); | 7832 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); |
7845 | 7833 |
7846 // Setup the fixed slots. | 7834 // Setup the fixed slots. |
7847 __ xor_(rbx, rbx); // Set to NULL. | 7835 __ xor_(rbx, rbx); // Set to NULL. |
7848 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); | 7836 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); |
7849 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax); | 7837 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax); |
7850 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx); | 7838 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx); |
7851 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx); | 7839 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx); |
7852 | 7840 |
7853 // Copy the global object from the surrounding context. | 7841 // Copy the global object from the surrounding context. |
7854 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); | 7842 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); |
(...skipping 654 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8509 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister); | 8497 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister); |
8510 __ j(not_equal, &runtime); | 8498 __ j(not_equal, &runtime); |
8511 // Check that the JSArray is in fast case. | 8499 // Check that the JSArray is in fast case. |
8512 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); | 8500 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); |
8513 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); | 8501 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); |
8514 __ Cmp(rax, Factory::fixed_array_map()); | 8502 __ Cmp(rax, Factory::fixed_array_map()); |
8515 __ j(not_equal, &runtime); | 8503 __ j(not_equal, &runtime); |
8516 // Check that the last match info has space for the capture registers and the | 8504 // Check that the last match info has space for the capture registers and the |
8517 // additional information. Ensure no overflow in add. | 8505 // additional information. Ensure no overflow in add. |
8518 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); | 8506 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); |
8519 __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); | 8507 __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); |
| 8508 __ SmiToInteger32(rax, rax); |
8520 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); | 8509 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); |
8521 __ cmpl(rdx, rax); | 8510 __ cmpl(rdx, rax); |
8522 __ j(greater, &runtime); | 8511 __ j(greater, &runtime); |
8523 | 8512 |
8524 // ecx: RegExp data (FixedArray) | 8513 // ecx: RegExp data (FixedArray) |
8525 // Check the representation and encoding of the subject string. | 8514 // Check the representation and encoding of the subject string. |
8526 Label seq_string, seq_two_byte_string, check_code; | 8515 Label seq_string, seq_two_byte_string, check_code; |
8527 const int kStringRepresentationEncodingMask = | 8516 const int kStringRepresentationEncodingMask = |
8528 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; | 8517 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; |
8529 __ movq(rax, Operand(rsp, kSubjectOffset)); | 8518 __ movq(rax, Operand(rsp, kSubjectOffset)); |
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8786 // Use of registers. Register result is used as a temporary. | 8775 // Use of registers. Register result is used as a temporary. |
8787 Register number_string_cache = result; | 8776 Register number_string_cache = result; |
8788 Register mask = scratch1; | 8777 Register mask = scratch1; |
8789 Register scratch = scratch2; | 8778 Register scratch = scratch2; |
8790 | 8779 |
8791 // Load the number string cache. | 8780 // Load the number string cache. |
8792 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | 8781 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
8793 | 8782 |
8794 // Make the hash mask from the length of the number string cache. It | 8783 // Make the hash mask from the length of the number string cache. It |
8795 // contains two elements (number and string) for each cache entry. | 8784 // contains two elements (number and string) for each cache entry. |
8796 __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); | 8785 __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); |
8797 __ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi). | 8786 // Divide smi tagged length by two. |
8798 __ subl(mask, Immediate(1)); // Make mask. | 8787 __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1); |
| 8788 __ subq(mask, Immediate(1)); // Make mask. |
8799 | 8789 |
8800 // Calculate the entry in the number string cache. The hash value in the | 8790 // Calculate the entry in the number string cache. The hash value in the |
8801 // number string cache for smis is just the smi value, and the hash for | 8791 // number string cache for smis is just the smi value, and the hash for |
8802 // doubles is the xor of the upper and lower words. See | 8792 // doubles is the xor of the upper and lower words. See |
8803 // Heap::GetNumberStringCache. | 8793 // Heap::GetNumberStringCache. |
8804 Label is_smi; | 8794 Label is_smi; |
8805 Label load_result_from_cache; | 8795 Label load_result_from_cache; |
8806 if (!object_is_smi) { | 8796 if (!object_is_smi) { |
8807 __ JumpIfSmi(object, &is_smi); | 8797 __ JumpIfSmi(object, &is_smi); |
8808 __ CheckMap(object, Factory::heap_number_map(), not_found, true); | 8798 __ CheckMap(object, Factory::heap_number_map(), not_found, true); |
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9308 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); | 9298 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
9309 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); | 9299 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); |
9310 | 9300 |
9311 // If there are no actual arguments, we're done. | 9301 // If there are no actual arguments, we're done. |
9312 Label done; | 9302 Label done; |
9313 __ testq(rcx, rcx); | 9303 __ testq(rcx, rcx); |
9314 __ j(zero, &done); | 9304 __ j(zero, &done); |
9315 | 9305 |
9316 // Get the parameters pointer from the stack and untag the length. | 9306 // Get the parameters pointer from the stack and untag the length. |
9317 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); | 9307 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); |
9318 __ SmiToInteger32(rcx, rcx); | |
9319 | 9308 |
9320 // Setup the elements pointer in the allocated arguments object and | 9309 // Setup the elements pointer in the allocated arguments object and |
9321 // initialize the header in the elements fixed array. | 9310 // initialize the header in the elements fixed array. |
9322 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); | 9311 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); |
9323 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); | 9312 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); |
9324 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); | 9313 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); |
9325 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); | 9314 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); |
9326 __ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); | 9315 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); |
| 9316 __ SmiToInteger32(rcx, rcx); // Untag length for the loop below. |
9327 | 9317 |
9328 // Copy the fixed array slots. | 9318 // Copy the fixed array slots. |
9329 Label loop; | 9319 Label loop; |
9330 __ bind(&loop); | 9320 __ bind(&loop); |
9331 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. | 9321 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. |
9332 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); | 9322 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); |
9333 __ addq(rdi, Immediate(kPointerSize)); | 9323 __ addq(rdi, Immediate(kPointerSize)); |
9334 __ subq(rdx, Immediate(kPointerSize)); | 9324 __ subq(rdx, Immediate(kPointerSize)); |
9335 __ decq(rcx); | 9325 __ decq(rcx); |
9336 __ j(not_zero, &loop); | 9326 __ j(not_zero, &loop); |
(...skipping 1797 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
11134 __ movl(rcx, r8); | 11124 __ movl(rcx, r8); |
11135 __ and_(rcx, r9); | 11125 __ and_(rcx, r9); |
11136 ASSERT(kStringEncodingMask == kAsciiStringTag); | 11126 ASSERT(kStringEncodingMask == kAsciiStringTag); |
11137 __ testl(rcx, Immediate(kAsciiStringTag)); | 11127 __ testl(rcx, Immediate(kAsciiStringTag)); |
11138 __ j(zero, &non_ascii); | 11128 __ j(zero, &non_ascii); |
11139 // Allocate an acsii cons string. | 11129 // Allocate an acsii cons string. |
11140 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); | 11130 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); |
11141 __ bind(&allocated); | 11131 __ bind(&allocated); |
11142 // Fill the fields of the cons string. | 11132 // Fill the fields of the cons string. |
11143 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx); | 11133 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx); |
11144 __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset), | 11134 __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset), |
11145 Immediate(String::kEmptyHashField)); | 11135 Immediate(String::kEmptyHashField)); |
11146 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax); | 11136 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax); |
11147 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx); | 11137 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx); |
11148 __ movq(rax, rcx); | 11138 __ movq(rax, rcx); |
11149 __ IncrementCounter(&Counters::string_add_native, 1); | 11139 __ IncrementCounter(&Counters::string_add_native, 1); |
11150 __ ret(2 * kPointerSize); | 11140 __ ret(2 * kPointerSize); |
11151 __ bind(&non_ascii); | 11141 __ bind(&non_ascii); |
11152 // Allocate a two byte cons string. | 11142 // Allocate a two byte cons string. |
11153 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); | 11143 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); |
11154 __ jmp(&allocated); | 11144 __ jmp(&allocated); |
(...skipping 709 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
11864 } | 11854 } |
11865 | 11855 |
11866 #endif | 11856 #endif |
11867 | 11857 |
11868 | 11858 |
11869 #undef __ | 11859 #undef __ |
11870 | 11860 |
11871 } } // namespace v8::internal | 11861 } } // namespace v8::internal |
11872 | 11862 |
11873 #endif // V8_TARGET_ARCH_X64 | 11863 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |