OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1838 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1849 // the object) | 1849 // the object) |
1850 __ movq(rcx, rax); | 1850 __ movq(rcx, rax); |
1851 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset)); | 1851 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset)); |
1852 // Get the bridge array held in the enumeration index field. | 1852 // Get the bridge array held in the enumeration index field. |
1853 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); | 1853 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); |
1854 // Get the cache from the bridge array. | 1854 // Get the cache from the bridge array. |
1855 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); | 1855 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); |
1856 | 1856 |
1857 frame_->EmitPush(rax); // <- slot 3 | 1857 frame_->EmitPush(rax); // <- slot 3 |
1858 frame_->EmitPush(rdx); // <- slot 2 | 1858 frame_->EmitPush(rdx); // <- slot 2 |
1859 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); | 1859 __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); |
1860 __ Integer32ToSmi(rax, rax); | |
1861 frame_->EmitPush(rax); // <- slot 1 | 1860 frame_->EmitPush(rax); // <- slot 1 |
1862 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 | 1861 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 |
1863 entry.Jump(); | 1862 entry.Jump(); |
1864 | 1863 |
1865 fixed_array.Bind(); | 1864 fixed_array.Bind(); |
1866 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast) | 1865 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast) |
1867 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3 | 1866 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3 |
1868 frame_->EmitPush(rax); // <- slot 2 | 1867 frame_->EmitPush(rax); // <- slot 2 |
1869 | 1868 |
1870 // Push the length of the array and the initial index onto the stack. | 1869 // Push the length of the array and the initial index onto the stack. |
1871 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset)); | 1870 __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset)); |
1872 __ Integer32ToSmi(rax, rax); | |
1873 frame_->EmitPush(rax); // <- slot 1 | 1871 frame_->EmitPush(rax); // <- slot 1 |
1874 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 | 1872 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 |
1875 | 1873 |
1876 // Condition. | 1874 // Condition. |
1877 entry.Bind(); | 1875 entry.Bind(); |
1878 // Grab the current frame's height for the break and continue | 1876 // Grab the current frame's height for the break and continue |
1879 // targets only after all the state is pushed on the frame. | 1877 // targets only after all the state is pushed on the frame. |
1880 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | 1878 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); |
1881 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | 1879 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); |
1882 | 1880 |
(...skipping 2534 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4417 | 4415 |
4418 // Fill out the elements FixedArray. | 4416 // Fill out the elements FixedArray. |
4419 // rax: JSArray. | 4417 // rax: JSArray. |
4420 // rcx: FixedArray. | 4418 // rcx: FixedArray. |
4421 // rbx: Number of elements in array as int32. | 4419 // rbx: Number of elements in array as int32. |
4422 | 4420 |
4423 // Set map. | 4421 // Set map. |
4424 __ Move(FieldOperand(rcx, HeapObject::kMapOffset), | 4422 __ Move(FieldOperand(rcx, HeapObject::kMapOffset), |
4425 Factory::fixed_array_map()); | 4423 Factory::fixed_array_map()); |
4426 // Set length. | 4424 // Set length. |
4427 __ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx); | 4425 __ Integer32ToSmi(rdx, rbx); |
| 4426 __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx); |
4428 // Fill contents of fixed-array with the-hole. | 4427 // Fill contents of fixed-array with the-hole. |
4429 __ Move(rdx, Factory::the_hole_value()); | 4428 __ Move(rdx, Factory::the_hole_value()); |
4430 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); | 4429 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); |
4431 // Fill fixed array elements with hole. | 4430 // Fill fixed array elements with hole. |
4432 // rax: JSArray. | 4431 // rax: JSArray. |
4433 // rbx: Number of elements in array that remains to be filled, as int32. | 4432 // rbx: Number of elements in array that remains to be filled, as int32. |
4434 // rcx: Start of elements in FixedArray. | 4433 // rcx: Start of elements in FixedArray. |
4435 // rdx: the hole. | 4434 // rdx: the hole. |
4436 Label loop; | 4435 Label loop; |
4437 __ testl(rbx, rbx); | 4436 __ testl(rbx, rbx); |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4536 __ InvokeFunction(rdi, expected, CALL_FUNCTION); | 4535 __ InvokeFunction(rdi, expected, CALL_FUNCTION); |
4537 | 4536 |
4538 // Find a place to put new cached value into. | 4537 // Find a place to put new cached value into. |
4539 Label add_new_entry, update_cache; | 4538 Label add_new_entry, update_cache; |
4540 __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache | 4539 __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache |
4541 // Possible optimization: cache size is constant for the given cache | 4540 // Possible optimization: cache size is constant for the given cache |
4542 // so technically we could use a constant here. However, if we have | 4541 // so technically we could use a constant here. However, if we have |
4543 // cache miss this optimization would hardly matter much. | 4542 // cache miss this optimization would hardly matter much. |
4544 | 4543 |
4545 // Check if we could add new entry to cache. | 4544 // Check if we could add new entry to cache. |
4546 __ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); | 4545 __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); |
4547 __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); | 4546 __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); |
4548 __ SmiToInteger32(r9, r9); | 4547 __ SmiCompare(rbx, r9); |
4549 __ cmpq(rbx, r9); | |
4550 __ j(greater, &add_new_entry); | 4548 __ j(greater, &add_new_entry); |
4551 | 4549 |
4552 // Check if we could evict entry after finger. | 4550 // Check if we could evict entry after finger. |
4553 __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); | 4551 __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); |
4554 __ SmiToInteger32(rdx, rdx); | 4552 __ SmiToInteger32(rdx, rdx); |
| 4553 __ SmiToInteger32(rbx, rbx); |
4555 __ addq(rdx, kEntrySizeImm); | 4554 __ addq(rdx, kEntrySizeImm); |
4556 Label forward; | 4555 Label forward; |
4557 __ cmpq(rbx, rdx); | 4556 __ cmpq(rbx, rdx); |
4558 __ j(greater, &forward); | 4557 __ j(greater, &forward); |
4559 // Need to wrap over the cache. | 4558 // Need to wrap over the cache. |
4560 __ movq(rdx, kEntriesIndexImm); | 4559 __ movq(rdx, kEntriesIndexImm); |
4561 __ bind(&forward); | 4560 __ bind(&forward); |
4562 __ Integer32ToSmi(r9, rdx); | 4561 __ Integer32ToSmi(r9, rdx); |
4563 __ jmp(&update_cache); | 4562 __ jmp(&update_cache); |
4564 | 4563 |
4565 __ bind(&add_new_entry); | 4564 __ bind(&add_new_entry); |
4566 // r9 holds cache size as int. | 4565 // r9 holds cache size as smi. |
4567 __ movq(rdx, r9); | 4566 __ SmiToInteger32(rdx, r9); |
4568 __ Integer32ToSmi(r9, r9); | |
4569 __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize)); | 4567 __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize)); |
4570 __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); | 4568 __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); |
4571 | 4569 |
4572 // Update the cache itself. | 4570 // Update the cache itself. |
4573 // rdx holds the index as int. | 4571 // rdx holds the index as int. |
4574 // r9 holds the index as smi. | 4572 // r9 holds the index as smi. |
4575 __ bind(&update_cache); | 4573 __ bind(&update_cache); |
4576 __ pop(rbx); // restore the key | 4574 __ pop(rbx); // restore the key |
4577 __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); | 4575 __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); |
4578 // Store key. | 4576 // Store key. |
(...skipping 2539 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7118 Result key = frame_->Pop(); | 7116 Result key = frame_->Pop(); |
7119 Result receiver = frame_->Pop(); | 7117 Result receiver = frame_->Pop(); |
7120 key.ToRegister(); | 7118 key.ToRegister(); |
7121 receiver.ToRegister(); | 7119 receiver.ToRegister(); |
7122 | 7120 |
7123 // Use a fresh temporary to load the elements without destroying | 7121 // Use a fresh temporary to load the elements without destroying |
7124 // the receiver which is needed for the deferred slow case. | 7122 // the receiver which is needed for the deferred slow case. |
7125 Result elements = allocator()->Allocate(); | 7123 Result elements = allocator()->Allocate(); |
7126 ASSERT(elements.is_valid()); | 7124 ASSERT(elements.is_valid()); |
7127 | 7125 |
7128 // Use a fresh temporary for the index and later the loaded | |
7129 // value. | |
7130 Result index = allocator()->Allocate(); | |
7131 ASSERT(index.is_valid()); | |
7132 | |
7133 DeferredReferenceGetKeyedValue* deferred = | 7126 DeferredReferenceGetKeyedValue* deferred = |
7134 new DeferredReferenceGetKeyedValue(index.reg(), | 7127 new DeferredReferenceGetKeyedValue(elements.reg(), |
7135 receiver.reg(), | 7128 receiver.reg(), |
7136 key.reg(), | 7129 key.reg(), |
7137 is_global); | 7130 is_global); |
7138 | 7131 |
7139 // Check that the receiver is not a smi (only needed if this | 7132 // Check that the receiver is not a smi (only needed if this |
7140 // is not a load from the global context) and that it has the | 7133 // is not a load from the global context) and that it has the |
7141 // expected map. | 7134 // expected map. |
7142 if (!is_global) { | 7135 if (!is_global) { |
7143 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); | 7136 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); |
7144 } | 7137 } |
(...skipping 15 matching lines...) Expand all Loading... |
7160 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label()); | 7153 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label()); |
7161 | 7154 |
7162 // Get the elements array from the receiver and check that it | 7155 // Get the elements array from the receiver and check that it |
7163 // is not a dictionary. | 7156 // is not a dictionary. |
7164 __ movq(elements.reg(), | 7157 __ movq(elements.reg(), |
7165 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); | 7158 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); |
7166 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), | 7159 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), |
7167 Factory::fixed_array_map()); | 7160 Factory::fixed_array_map()); |
7168 deferred->Branch(not_equal); | 7161 deferred->Branch(not_equal); |
7169 | 7162 |
7170 // Shift the key to get the actual index value and check that | 7163 // Check that key is within bounds. |
7171 // it is within bounds. | 7164 __ SmiCompare(key.reg(), |
7172 __ SmiToInteger32(index.reg(), key.reg()); | 7165 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); |
7173 __ cmpl(index.reg(), | |
7174 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); | |
7175 deferred->Branch(above_equal); | 7166 deferred->Branch(above_equal); |
7176 | 7167 |
7177 // The index register holds the un-smi-tagged key. It has been | 7168 // The key register holds the smi-tagged key. Load the value and |
7178 // zero-extended to 64-bits, so it can be used directly as index in the | 7169 // check that it is not the hole value. |
7179 // operand below. | 7170 Result value = elements; |
7180 // Load and check that the result is not the hole. We could | 7171 SmiIndex index = |
7181 // reuse the index or elements register for the value. | 7172 masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); |
7182 // | |
7183 // TODO(206): Consider whether it makes sense to try some | |
7184 // heuristic about which register to reuse. For example, if | |
7185 // one is rax, the we can reuse that one because the value | |
7186 // coming from the deferred code will be in rax. | |
7187 Result value = index; | |
7188 __ movq(value.reg(), | 7173 __ movq(value.reg(), |
7189 Operand(elements.reg(), | 7174 FieldOperand(elements.reg(), |
7190 index.reg(), | 7175 index.reg, |
7191 times_pointer_size, | 7176 index.scale, |
7192 FixedArray::kHeaderSize - kHeapObjectTag)); | 7177 FixedArray::kHeaderSize)); |
7193 elements.Unuse(); | |
7194 index.Unuse(); | |
7195 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); | 7178 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); |
7196 deferred->Branch(equal); | 7179 deferred->Branch(equal); |
7197 __ IncrementCounter(&Counters::keyed_load_inline, 1); | 7180 __ IncrementCounter(&Counters::keyed_load_inline, 1); |
7198 | 7181 |
7199 deferred->BindExit(); | 7182 deferred->BindExit(); |
7200 // Restore the receiver and key to the frame and push the | 7183 // Restore the receiver and key to the frame and push the |
7201 // result on top of it. | 7184 // result on top of it. |
7202 frame_->Push(&receiver); | 7185 frame_->Push(&receiver); |
7203 frame_->Push(&key); | 7186 frame_->Push(&key); |
7204 return value; | 7187 return value; |
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7483 key.reg()); | 7466 key.reg()); |
7484 deferred->Branch(below_equal); | 7467 deferred->Branch(below_equal); |
7485 | 7468 |
7486 // Get the elements array from the receiver and check that it | 7469 // Get the elements array from the receiver and check that it |
7487 // is a flat array (not a dictionary). | 7470 // is a flat array (not a dictionary). |
7488 __ movq(tmp.reg(), | 7471 __ movq(tmp.reg(), |
7489 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); | 7472 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); |
7490 | 7473 |
7491 // Check whether it is possible to omit the write barrier. If the | 7474 // Check whether it is possible to omit the write barrier. If the |
7492 // elements array is in new space or the value written is a smi we can | 7475 // elements array is in new space or the value written is a smi we can |
7493 // safely update the elements array without updating the remembered set. | 7476 // safely update the elements array without write barrier. |
7494 Label in_new_space; | 7477 Label in_new_space; |
7495 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space); | 7478 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space); |
7496 if (!value_is_constant) { | 7479 if (!value_is_constant) { |
7497 __ JumpIfNotSmi(value.reg(), deferred->entry_label()); | 7480 __ JumpIfNotSmi(value.reg(), deferred->entry_label()); |
7498 } | 7481 } |
7499 | 7482 |
7500 __ bind(&in_new_space); | 7483 __ bind(&in_new_space); |
7501 // Bind the deferred code patch site to be able to locate the | 7484 // Bind the deferred code patch site to be able to locate the |
7502 // fixed array map comparison. When debugging, we patch this | 7485 // fixed array map comparison. When debugging, we patch this |
7503 // comparison to always fail so that we will hit the IC call | 7486 // comparison to always fail so that we will hit the IC call |
7504 // in the deferred code which will allow the debugger to | 7487 // in the deferred code which will allow the debugger to |
7505 // break for fast case stores. | 7488 // break for fast case stores. |
7506 __ bind(deferred->patch_site()); | 7489 __ bind(deferred->patch_site()); |
7507 // Avoid using __ to ensure the distance from patch_site | 7490 // Avoid using __ to ensure the distance from patch_site |
7508 // to the map address is always the same. | 7491 // to the map address is always the same. |
7509 masm->movq(kScratchRegister, Factory::fixed_array_map(), | 7492 masm->movq(kScratchRegister, Factory::fixed_array_map(), |
7510 RelocInfo::EMBEDDED_OBJECT); | 7493 RelocInfo::EMBEDDED_OBJECT); |
7511 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), | 7494 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), |
7512 kScratchRegister); | 7495 kScratchRegister); |
7513 deferred->Branch(not_equal); | 7496 deferred->Branch(not_equal); |
7514 | 7497 |
7515 // Store the value. | 7498 // Store the value. |
7516 SmiIndex index = | 7499 SmiIndex index = |
7517 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); | 7500 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); |
7518 __ movq(Operand(tmp.reg(), | 7501 __ movq(FieldOperand(tmp.reg(), |
7519 index.reg, | 7502 index.reg, |
7520 index.scale, | 7503 index.scale, |
7521 FixedArray::kHeaderSize - kHeapObjectTag), | 7504 FixedArray::kHeaderSize), |
7522 value.reg()); | 7505 value.reg()); |
7523 __ IncrementCounter(&Counters::keyed_store_inline, 1); | 7506 __ IncrementCounter(&Counters::keyed_store_inline, 1); |
7524 | 7507 |
7525 deferred->BindExit(); | 7508 deferred->BindExit(); |
7526 | 7509 |
7527 cgen_->frame()->Push(&receiver); | 7510 cgen_->frame()->Push(&receiver); |
7528 cgen_->frame()->Push(&key); | 7511 cgen_->frame()->Push(&key); |
7529 cgen_->frame()->Push(&value); | 7512 cgen_->frame()->Push(&value); |
7530 } else { | 7513 } else { |
7531 Result answer = cgen_->frame()->CallKeyedStoreIC(); | 7514 Result answer = cgen_->frame()->CallKeyedStoreIC(); |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7593 int length = slots_ + Context::MIN_CONTEXT_SLOTS; | 7576 int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
7594 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, | 7577 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, |
7595 rax, rbx, rcx, &gc, TAG_OBJECT); | 7578 rax, rbx, rcx, &gc, TAG_OBJECT); |
7596 | 7579 |
7597 // Get the function from the stack. | 7580 // Get the function from the stack. |
7598 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); | 7581 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
7599 | 7582 |
7600 // Setup the object header. | 7583 // Setup the object header. |
7601 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); | 7584 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); |
7602 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); | 7585 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); |
7603 __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length)); | 7586 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); |
7604 | 7587 |
7605 // Setup the fixed slots. | 7588 // Setup the fixed slots. |
7606 __ xor_(rbx, rbx); // Set to NULL. | 7589 __ xor_(rbx, rbx); // Set to NULL. |
7607 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); | 7590 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); |
7608 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax); | 7591 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax); |
7609 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx); | 7592 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx); |
7610 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx); | 7593 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx); |
7611 | 7594 |
7612 // Copy the global object from the surrounding context. | 7595 // Copy the global object from the surrounding context. |
7613 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); | 7596 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); |
(...skipping 654 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8268 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister); | 8251 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister); |
8269 __ j(not_equal, &runtime); | 8252 __ j(not_equal, &runtime); |
8270 // Check that the JSArray is in fast case. | 8253 // Check that the JSArray is in fast case. |
8271 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); | 8254 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); |
8272 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); | 8255 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); |
8273 __ Cmp(rax, Factory::fixed_array_map()); | 8256 __ Cmp(rax, Factory::fixed_array_map()); |
8274 __ j(not_equal, &runtime); | 8257 __ j(not_equal, &runtime); |
8275 // Check that the last match info has space for the capture registers and the | 8258 // Check that the last match info has space for the capture registers and the |
8276 // additional information. Ensure no overflow in add. | 8259 // additional information. Ensure no overflow in add. |
8277 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); | 8260 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); |
8278 __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); | 8261 __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); |
| 8262 __ SmiToInteger32(rax, rax); |
8279 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); | 8263 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); |
8280 __ cmpl(rdx, rax); | 8264 __ cmpl(rdx, rax); |
8281 __ j(greater, &runtime); | 8265 __ j(greater, &runtime); |
8282 | 8266 |
8283 // ecx: RegExp data (FixedArray) | 8267 // ecx: RegExp data (FixedArray) |
8284 // Check the representation and encoding of the subject string. | 8268 // Check the representation and encoding of the subject string. |
8285 Label seq_string, seq_two_byte_string, check_code; | 8269 Label seq_string, seq_two_byte_string, check_code; |
8286 const int kStringRepresentationEncodingMask = | 8270 const int kStringRepresentationEncodingMask = |
8287 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; | 8271 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; |
8288 __ movq(rax, Operand(rsp, kSubjectOffset)); | 8272 __ movq(rax, Operand(rsp, kSubjectOffset)); |
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8546 // Use of registers. Register result is used as a temporary. | 8530 // Use of registers. Register result is used as a temporary. |
8547 Register number_string_cache = result; | 8531 Register number_string_cache = result; |
8548 Register mask = scratch1; | 8532 Register mask = scratch1; |
8549 Register scratch = scratch2; | 8533 Register scratch = scratch2; |
8550 | 8534 |
8551 // Load the number string cache. | 8535 // Load the number string cache. |
8552 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | 8536 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
8553 | 8537 |
8554 // Make the hash mask from the length of the number string cache. It | 8538 // Make the hash mask from the length of the number string cache. It |
8555 // contains two elements (number and string) for each cache entry. | 8539 // contains two elements (number and string) for each cache entry. |
8556 __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); | 8540 __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); |
8557 __ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi). | 8541 // Divide smi tagged length by two. |
8558 __ subl(mask, Immediate(1)); // Make mask. | 8542 __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1); |
| 8543 __ subq(mask, Immediate(1)); // Make mask. |
8559 | 8544 |
8560 // Calculate the entry in the number string cache. The hash value in the | 8545 // Calculate the entry in the number string cache. The hash value in the |
8561 // number string cache for smis is just the smi value, and the hash for | 8546 // number string cache for smis is just the smi value, and the hash for |
8562 // doubles is the xor of the upper and lower words. See | 8547 // doubles is the xor of the upper and lower words. See |
8563 // Heap::GetNumberStringCache. | 8548 // Heap::GetNumberStringCache. |
8564 Label is_smi; | 8549 Label is_smi; |
8565 Label load_result_from_cache; | 8550 Label load_result_from_cache; |
8566 if (!object_is_smi) { | 8551 if (!object_is_smi) { |
8567 __ JumpIfSmi(object, &is_smi); | 8552 __ JumpIfSmi(object, &is_smi); |
8568 __ CheckMap(object, Factory::heap_number_map(), not_found, true); | 8553 __ CheckMap(object, Factory::heap_number_map(), not_found, true); |
(...skipping 498 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9067 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); | 9052 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
9068 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); | 9053 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); |
9069 | 9054 |
9070 // If there are no actual arguments, we're done. | 9055 // If there are no actual arguments, we're done. |
9071 Label done; | 9056 Label done; |
9072 __ testq(rcx, rcx); | 9057 __ testq(rcx, rcx); |
9073 __ j(zero, &done); | 9058 __ j(zero, &done); |
9074 | 9059 |
9075 // Get the parameters pointer from the stack and untag the length. | 9060 // Get the parameters pointer from the stack and untag the length. |
9076 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); | 9061 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); |
9077 __ SmiToInteger32(rcx, rcx); | |
9078 | 9062 |
9079 // Setup the elements pointer in the allocated arguments object and | 9063 // Setup the elements pointer in the allocated arguments object and |
9080 // initialize the header in the elements fixed array. | 9064 // initialize the header in the elements fixed array. |
9081 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); | 9065 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); |
9082 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); | 9066 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); |
9083 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); | 9067 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); |
9084 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); | 9068 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); |
9085 __ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); | 9069 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); |
| 9070 __ SmiToInteger32(rcx, rcx); // Untag length for the loop below. |
9086 | 9071 |
9087 // Copy the fixed array slots. | 9072 // Copy the fixed array slots. |
9088 Label loop; | 9073 Label loop; |
9089 __ bind(&loop); | 9074 __ bind(&loop); |
9090 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. | 9075 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. |
9091 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); | 9076 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); |
9092 __ addq(rdi, Immediate(kPointerSize)); | 9077 __ addq(rdi, Immediate(kPointerSize)); |
9093 __ subq(rdx, Immediate(kPointerSize)); | 9078 __ subq(rdx, Immediate(kPointerSize)); |
9094 __ decq(rcx); | 9079 __ decq(rcx); |
9095 __ j(not_zero, &loop); | 9080 __ j(not_zero, &loop); |
(...skipping 1750 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
10846 __ movl(rcx, r8); | 10831 __ movl(rcx, r8); |
10847 __ and_(rcx, r9); | 10832 __ and_(rcx, r9); |
10848 ASSERT(kStringEncodingMask == kAsciiStringTag); | 10833 ASSERT(kStringEncodingMask == kAsciiStringTag); |
10849 __ testl(rcx, Immediate(kAsciiStringTag)); | 10834 __ testl(rcx, Immediate(kAsciiStringTag)); |
10850 __ j(zero, &non_ascii); | 10835 __ j(zero, &non_ascii); |
10851 // Allocate an acsii cons string. | 10836 // Allocate an acsii cons string. |
10852 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); | 10837 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); |
10853 __ bind(&allocated); | 10838 __ bind(&allocated); |
10854 // Fill the fields of the cons string. | 10839 // Fill the fields of the cons string. |
10855 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx); | 10840 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx); |
10856 __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset), | 10841 __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset), |
10857 Immediate(String::kEmptyHashField)); | 10842 Immediate(String::kEmptyHashField)); |
10858 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax); | 10843 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax); |
10859 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx); | 10844 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx); |
10860 __ movq(rax, rcx); | 10845 __ movq(rax, rcx); |
10861 __ IncrementCounter(&Counters::string_add_native, 1); | 10846 __ IncrementCounter(&Counters::string_add_native, 1); |
10862 __ ret(2 * kPointerSize); | 10847 __ ret(2 * kPointerSize); |
10863 __ bind(&non_ascii); | 10848 __ bind(&non_ascii); |
10864 // Allocate a two byte cons string. | 10849 // Allocate a two byte cons string. |
10865 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); | 10850 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); |
10866 __ jmp(&allocated); | 10851 __ jmp(&allocated); |
(...skipping 715 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
11582 // Call the function from C++. | 11567 // Call the function from C++. |
11583 return FUNCTION_CAST<ModuloFunction>(buffer); | 11568 return FUNCTION_CAST<ModuloFunction>(buffer); |
11584 } | 11569 } |
11585 | 11570 |
11586 #endif | 11571 #endif |
11587 | 11572 |
11588 | 11573 |
11589 #undef __ | 11574 #undef __ |
11590 | 11575 |
11591 } } // namespace v8::internal | 11576 } } // namespace v8::internal |
OLD | NEW |