Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(7)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 2274001: Revert r4715. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/builtins-x64.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1877 matching lines...) Expand 10 before | Expand all | Expand 10 after
1888 // the object) 1888 // the object)
1889 __ movq(rcx, rax); 1889 __ movq(rcx, rax);
1890 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset)); 1890 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
1891 // Get the bridge array held in the enumeration index field. 1891 // Get the bridge array held in the enumeration index field.
1892 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); 1892 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
1893 // Get the cache from the bridge array. 1893 // Get the cache from the bridge array.
1894 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); 1894 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1895 1895
1896 frame_->EmitPush(rax); // <- slot 3 1896 frame_->EmitPush(rax); // <- slot 3
1897 frame_->EmitPush(rdx); // <- slot 2 1897 frame_->EmitPush(rdx); // <- slot 2
1898 __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); 1898 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
1899 __ Integer32ToSmi(rax, rax);
1899 frame_->EmitPush(rax); // <- slot 1 1900 frame_->EmitPush(rax); // <- slot 1
1900 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 1901 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
1901 entry.Jump(); 1902 entry.Jump();
1902 1903
1903 fixed_array.Bind(); 1904 fixed_array.Bind();
1904 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast) 1905 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
1905 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3 1906 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
1906 frame_->EmitPush(rax); // <- slot 2 1907 frame_->EmitPush(rax); // <- slot 2
1907 1908
1908 // Push the length of the array and the initial index onto the stack. 1909 // Push the length of the array and the initial index onto the stack.
1909 __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset)); 1910 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
1911 __ Integer32ToSmi(rax, rax);
1910 frame_->EmitPush(rax); // <- slot 1 1912 frame_->EmitPush(rax); // <- slot 1
1911 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 1913 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
1912 1914
1913 // Condition. 1915 // Condition.
1914 entry.Bind(); 1916 entry.Bind();
1915 // Grab the current frame's height for the break and continue 1917 // Grab the current frame's height for the break and continue
1916 // targets only after all the state is pushed on the frame. 1918 // targets only after all the state is pushed on the frame.
1917 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); 1919 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1918 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); 1920 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1919 1921
(...skipping 2543 matching lines...) Expand 10 before | Expand all | Expand 10 after
4463 4465
4464 // Fill out the elements FixedArray. 4466 // Fill out the elements FixedArray.
4465 // rax: JSArray. 4467 // rax: JSArray.
4466 // rcx: FixedArray. 4468 // rcx: FixedArray.
4467 // rbx: Number of elements in array as int32. 4469 // rbx: Number of elements in array as int32.
4468 4470
4469 // Set map. 4471 // Set map.
4470 __ Move(FieldOperand(rcx, HeapObject::kMapOffset), 4472 __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
4471 Factory::fixed_array_map()); 4473 Factory::fixed_array_map());
4472 // Set length. 4474 // Set length.
4473 __ Integer32ToSmi(rdx, rbx); 4475 __ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
4474 __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
4475 // Fill contents of fixed-array with the-hole. 4476 // Fill contents of fixed-array with the-hole.
4476 __ Move(rdx, Factory::the_hole_value()); 4477 __ Move(rdx, Factory::the_hole_value());
4477 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); 4478 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
4478 // Fill fixed array elements with hole. 4479 // Fill fixed array elements with hole.
4479 // rax: JSArray. 4480 // rax: JSArray.
4480 // rbx: Number of elements in array that remains to be filled, as int32. 4481 // rbx: Number of elements in array that remains to be filled, as int32.
4481 // rcx: Start of elements in FixedArray. 4482 // rcx: Start of elements in FixedArray.
4482 // rdx: the hole. 4483 // rdx: the hole.
4483 Label loop; 4484 Label loop;
4484 __ testl(rbx, rbx); 4485 __ testl(rbx, rbx);
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
4583 __ InvokeFunction(rdi, expected, CALL_FUNCTION); 4584 __ InvokeFunction(rdi, expected, CALL_FUNCTION);
4584 4585
4585 // Find a place to put new cached value into. 4586 // Find a place to put new cached value into.
4586 Label add_new_entry, update_cache; 4587 Label add_new_entry, update_cache;
4587 __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache 4588 __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
4588 // Possible optimization: cache size is constant for the given cache 4589 // Possible optimization: cache size is constant for the given cache
4589 // so technically we could use a constant here. However, if we have 4590 // so technically we could use a constant here. However, if we have
4590 // cache miss this optimization would hardly matter much. 4591 // cache miss this optimization would hardly matter much.
4591 4592
4592 // Check if we could add new entry to cache. 4593 // Check if we could add new entry to cache.
4593 __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); 4594 __ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
4594 __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); 4595 __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
4595 __ SmiCompare(rbx, r9); 4596 __ SmiToInteger32(r9, r9);
4597 __ cmpq(rbx, r9);
4596 __ j(greater, &add_new_entry); 4598 __ j(greater, &add_new_entry);
4597 4599
4598 // Check if we could evict entry after finger. 4600 // Check if we could evict entry after finger.
4599 __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); 4601 __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
4600 __ SmiToInteger32(rdx, rdx); 4602 __ SmiToInteger32(rdx, rdx);
4601 __ SmiToInteger32(rbx, rbx);
4602 __ addq(rdx, kEntrySizeImm); 4603 __ addq(rdx, kEntrySizeImm);
4603 Label forward; 4604 Label forward;
4604 __ cmpq(rbx, rdx); 4605 __ cmpq(rbx, rdx);
4605 __ j(greater, &forward); 4606 __ j(greater, &forward);
4606 // Need to wrap over the cache. 4607 // Need to wrap over the cache.
4607 __ movq(rdx, kEntriesIndexImm); 4608 __ movq(rdx, kEntriesIndexImm);
4608 __ bind(&forward); 4609 __ bind(&forward);
4609 __ Integer32ToSmi(r9, rdx); 4610 __ Integer32ToSmi(r9, rdx);
4610 __ jmp(&update_cache); 4611 __ jmp(&update_cache);
4611 4612
4612 __ bind(&add_new_entry); 4613 __ bind(&add_new_entry);
4613 // r9 holds cache size as smi. 4614 // r9 holds cache size as int.
4614 __ SmiToInteger32(rdx, r9); 4615 __ movq(rdx, r9);
4616 __ Integer32ToSmi(r9, r9);
4615 __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize)); 4617 __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
4616 __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); 4618 __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
4617 4619
4618 // Update the cache itself. 4620 // Update the cache itself.
4619 // rdx holds the index as int. 4621 // rdx holds the index as int.
4620 // r9 holds the index as smi. 4622 // r9 holds the index as smi.
4621 __ bind(&update_cache); 4623 __ bind(&update_cache);
4622 __ pop(rbx); // restore the key 4624 __ pop(rbx); // restore the key
4623 __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); 4625 __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
4624 // Store key. 4626 // Store key.
(...skipping 2573 matching lines...) Expand 10 before | Expand all | Expand 10 after
7198 Result key = frame_->Pop(); 7200 Result key = frame_->Pop();
7199 Result receiver = frame_->Pop(); 7201 Result receiver = frame_->Pop();
7200 key.ToRegister(); 7202 key.ToRegister();
7201 receiver.ToRegister(); 7203 receiver.ToRegister();
7202 7204
7203 // Use a fresh temporary to load the elements without destroying 7205 // Use a fresh temporary to load the elements without destroying
7204 // the receiver which is needed for the deferred slow case. 7206 // the receiver which is needed for the deferred slow case.
7205 Result elements = allocator()->Allocate(); 7207 Result elements = allocator()->Allocate();
7206 ASSERT(elements.is_valid()); 7208 ASSERT(elements.is_valid());
7207 7209
7210 // Use a fresh temporary for the index and later the loaded
7211 // value.
7212 Result index = allocator()->Allocate();
7213 ASSERT(index.is_valid());
7214
7208 DeferredReferenceGetKeyedValue* deferred = 7215 DeferredReferenceGetKeyedValue* deferred =
7209 new DeferredReferenceGetKeyedValue(elements.reg(), 7216 new DeferredReferenceGetKeyedValue(index.reg(),
7210 receiver.reg(), 7217 receiver.reg(),
7211 key.reg(), 7218 key.reg(),
7212 is_global); 7219 is_global);
7213 7220
7214 // Check that the receiver is not a smi (only needed if this 7221 // Check that the receiver is not a smi (only needed if this
7215 // is not a load from the global context) and that it has the 7222 // is not a load from the global context) and that it has the
7216 // expected map. 7223 // expected map.
7217 if (!is_global) { 7224 if (!is_global) {
7218 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); 7225 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
7219 } 7226 }
(...skipping 15 matching lines...) Expand all
7235 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label()); 7242 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
7236 7243
7237 // Get the elements array from the receiver and check that it 7244 // Get the elements array from the receiver and check that it
7238 // is not a dictionary. 7245 // is not a dictionary.
7239 __ movq(elements.reg(), 7246 __ movq(elements.reg(),
7240 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); 7247 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
7241 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), 7248 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
7242 Factory::fixed_array_map()); 7249 Factory::fixed_array_map());
7243 deferred->Branch(not_equal); 7250 deferred->Branch(not_equal);
7244 7251
7245 // Check that key is within bounds. 7252 // Shift the key to get the actual index value and check that
7246 __ SmiCompare(key.reg(), 7253 // it is within bounds.
7247 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); 7254 __ SmiToInteger32(index.reg(), key.reg());
7255 __ cmpl(index.reg(),
7256 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
7248 deferred->Branch(above_equal); 7257 deferred->Branch(above_equal);
7249 7258
7250 // The key register holds the smi-tagged key. Load the value and 7259 // The index register holds the un-smi-tagged key. It has been
7251 // check that it is not the hole value. 7260 // zero-extended to 64-bits, so it can be used directly as index in the
7252 Result value = elements; 7261 // operand below.
7253 SmiIndex index = 7262 // Load and check that the result is not the hole. We could
7254 masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); 7263 // reuse the index or elements register for the value.
7264 //
7265 // TODO(206): Consider whether it makes sense to try some
7266 // heuristic about which register to reuse. For example, if
7267 // one is rax, the we can reuse that one because the value
7268 // coming from the deferred code will be in rax.
7269 Result value = index;
7255 __ movq(value.reg(), 7270 __ movq(value.reg(),
7256 FieldOperand(elements.reg(), 7271 Operand(elements.reg(),
7257 index.reg, 7272 index.reg(),
7258 index.scale, 7273 times_pointer_size,
7259 FixedArray::kHeaderSize)); 7274 FixedArray::kHeaderSize - kHeapObjectTag));
7275 elements.Unuse();
7276 index.Unuse();
7260 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); 7277 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
7261 deferred->Branch(equal); 7278 deferred->Branch(equal);
7262 __ IncrementCounter(&Counters::keyed_load_inline, 1); 7279 __ IncrementCounter(&Counters::keyed_load_inline, 1);
7263 7280
7264 deferred->BindExit(); 7281 deferred->BindExit();
7265 // Restore the receiver and key to the frame and push the 7282 // Restore the receiver and key to the frame and push the
7266 // result on top of it. 7283 // result on top of it.
7267 frame_->Push(&receiver); 7284 frame_->Push(&receiver);
7268 frame_->Push(&key); 7285 frame_->Push(&key);
7269 return value; 7286 return value;
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after
7548 key.reg()); 7565 key.reg());
7549 deferred->Branch(below_equal); 7566 deferred->Branch(below_equal);
7550 7567
7551 // Get the elements array from the receiver and check that it 7568 // Get the elements array from the receiver and check that it
7552 // is a flat array (not a dictionary). 7569 // is a flat array (not a dictionary).
7553 __ movq(tmp.reg(), 7570 __ movq(tmp.reg(),
7554 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); 7571 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
7555 7572
7556 // Check whether it is possible to omit the write barrier. If the 7573 // Check whether it is possible to omit the write barrier. If the
7557 // elements array is in new space or the value written is a smi we can 7574 // elements array is in new space or the value written is a smi we can
7558 // safely update the elements array without write barrier. 7575 // safely update the elements array without updating the remembered set.
7559 Label in_new_space; 7576 Label in_new_space;
7560 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space); 7577 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
7561 if (!value_is_constant) { 7578 if (!value_is_constant) {
7562 __ JumpIfNotSmi(value.reg(), deferred->entry_label()); 7579 __ JumpIfNotSmi(value.reg(), deferred->entry_label());
7563 } 7580 }
7564 7581
7565 __ bind(&in_new_space); 7582 __ bind(&in_new_space);
7566 // Bind the deferred code patch site to be able to locate the 7583 // Bind the deferred code patch site to be able to locate the
7567 // fixed array map comparison. When debugging, we patch this 7584 // fixed array map comparison. When debugging, we patch this
7568 // comparison to always fail so that we will hit the IC call 7585 // comparison to always fail so that we will hit the IC call
7569 // in the deferred code which will allow the debugger to 7586 // in the deferred code which will allow the debugger to
7570 // break for fast case stores. 7587 // break for fast case stores.
7571 __ bind(deferred->patch_site()); 7588 __ bind(deferred->patch_site());
7572 // Avoid using __ to ensure the distance from patch_site 7589 // Avoid using __ to ensure the distance from patch_site
7573 // to the map address is always the same. 7590 // to the map address is always the same.
7574 masm->movq(kScratchRegister, Factory::fixed_array_map(), 7591 masm->movq(kScratchRegister, Factory::fixed_array_map(),
7575 RelocInfo::EMBEDDED_OBJECT); 7592 RelocInfo::EMBEDDED_OBJECT);
7576 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), 7593 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
7577 kScratchRegister); 7594 kScratchRegister);
7578 deferred->Branch(not_equal); 7595 deferred->Branch(not_equal);
7579 7596
7580 // Store the value. 7597 // Store the value.
7581 SmiIndex index = 7598 SmiIndex index =
7582 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); 7599 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
7583 __ movq(FieldOperand(tmp.reg(), 7600 __ movq(Operand(tmp.reg(),
7584 index.reg, 7601 index.reg,
7585 index.scale, 7602 index.scale,
7586 FixedArray::kHeaderSize), 7603 FixedArray::kHeaderSize - kHeapObjectTag),
7587 value.reg()); 7604 value.reg());
7588 __ IncrementCounter(&Counters::keyed_store_inline, 1); 7605 __ IncrementCounter(&Counters::keyed_store_inline, 1);
7589 7606
7590 deferred->BindExit(); 7607 deferred->BindExit();
7591 7608
7592 cgen_->frame()->Push(&value); 7609 cgen_->frame()->Push(&value);
7593 } else { 7610 } else {
7594 Result answer = cgen_->frame()->CallKeyedStoreIC(); 7611 Result answer = cgen_->frame()->CallKeyedStoreIC();
7595 // Make sure that we do not have a test instruction after the 7612 // Make sure that we do not have a test instruction after the
7596 // call. A test instruction after the call is used to 7613 // call. A test instruction after the call is used to
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
7656 int length = slots_ + Context::MIN_CONTEXT_SLOTS; 7673 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
7657 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, 7674 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
7658 rax, rbx, rcx, &gc, TAG_OBJECT); 7675 rax, rbx, rcx, &gc, TAG_OBJECT);
7659 7676
7660 // Get the function from the stack. 7677 // Get the function from the stack.
7661 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); 7678 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
7662 7679
7663 // Setup the object header. 7680 // Setup the object header.
7664 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); 7681 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
7665 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); 7682 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
7666 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); 7683 __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
7667 7684
7668 // Setup the fixed slots. 7685 // Setup the fixed slots.
7669 __ xor_(rbx, rbx); // Set to NULL. 7686 __ xor_(rbx, rbx); // Set to NULL.
7670 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); 7687 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
7671 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax); 7688 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
7672 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx); 7689 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
7673 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx); 7690 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
7674 7691
7675 // Copy the global object from the surrounding context. 7692 // Copy the global object from the surrounding context.
7676 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); 7693 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
(...skipping 654 matching lines...) Expand 10 before | Expand all | Expand 10 after
8331 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister); 8348 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
8332 __ j(not_equal, &runtime); 8349 __ j(not_equal, &runtime);
8333 // Check that the JSArray is in fast case. 8350 // Check that the JSArray is in fast case.
8334 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); 8351 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
8335 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); 8352 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
8336 __ Cmp(rax, Factory::fixed_array_map()); 8353 __ Cmp(rax, Factory::fixed_array_map());
8337 __ j(not_equal, &runtime); 8354 __ j(not_equal, &runtime);
8338 // Check that the last match info has space for the capture registers and the 8355 // Check that the last match info has space for the capture registers and the
8339 // additional information. Ensure no overflow in add. 8356 // additional information. Ensure no overflow in add.
8340 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); 8357 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
8341 __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); 8358 __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
8342 __ SmiToInteger32(rax, rax);
8343 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); 8359 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
8344 __ cmpl(rdx, rax); 8360 __ cmpl(rdx, rax);
8345 __ j(greater, &runtime); 8361 __ j(greater, &runtime);
8346 8362
8347 // ecx: RegExp data (FixedArray) 8363 // ecx: RegExp data (FixedArray)
8348 // Check the representation and encoding of the subject string. 8364 // Check the representation and encoding of the subject string.
8349 Label seq_string, seq_two_byte_string, check_code; 8365 Label seq_string, seq_two_byte_string, check_code;
8350 const int kStringRepresentationEncodingMask = 8366 const int kStringRepresentationEncodingMask =
8351 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; 8367 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
8352 __ movq(rax, Operand(rsp, kSubjectOffset)); 8368 __ movq(rax, Operand(rsp, kSubjectOffset));
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after
8610 // Use of registers. Register result is used as a temporary. 8626 // Use of registers. Register result is used as a temporary.
8611 Register number_string_cache = result; 8627 Register number_string_cache = result;
8612 Register mask = scratch1; 8628 Register mask = scratch1;
8613 Register scratch = scratch2; 8629 Register scratch = scratch2;
8614 8630
8615 // Load the number string cache. 8631 // Load the number string cache.
8616 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); 8632 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
8617 8633
8618 // Make the hash mask from the length of the number string cache. It 8634 // Make the hash mask from the length of the number string cache. It
8619 // contains two elements (number and string) for each cache entry. 8635 // contains two elements (number and string) for each cache entry.
8620 __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); 8636 __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
8621 // Divide smi tagged length by two. 8637 __ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi).
8622 __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1); 8638 __ subl(mask, Immediate(1)); // Make mask.
8623 __ subq(mask, Immediate(1)); // Make mask.
8624 8639
8625 // Calculate the entry in the number string cache. The hash value in the 8640 // Calculate the entry in the number string cache. The hash value in the
8626 // number string cache for smis is just the smi value, and the hash for 8641 // number string cache for smis is just the smi value, and the hash for
8627 // doubles is the xor of the upper and lower words. See 8642 // doubles is the xor of the upper and lower words. See
8628 // Heap::GetNumberStringCache. 8643 // Heap::GetNumberStringCache.
8629 Label is_smi; 8644 Label is_smi;
8630 Label load_result_from_cache; 8645 Label load_result_from_cache;
8631 if (!object_is_smi) { 8646 if (!object_is_smi) {
8632 __ JumpIfSmi(object, &is_smi); 8647 __ JumpIfSmi(object, &is_smi);
8633 __ CheckMap(object, Factory::heap_number_map(), not_found, true); 8648 __ CheckMap(object, Factory::heap_number_map(), not_found, true);
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
9133 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); 9148 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
9134 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); 9149 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
9135 9150
9136 // If there are no actual arguments, we're done. 9151 // If there are no actual arguments, we're done.
9137 Label done; 9152 Label done;
9138 __ testq(rcx, rcx); 9153 __ testq(rcx, rcx);
9139 __ j(zero, &done); 9154 __ j(zero, &done);
9140 9155
9141 // Get the parameters pointer from the stack and untag the length. 9156 // Get the parameters pointer from the stack and untag the length.
9142 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); 9157 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
9158 __ SmiToInteger32(rcx, rcx);
9143 9159
9144 // Setup the elements pointer in the allocated arguments object and 9160 // Setup the elements pointer in the allocated arguments object and
9145 // initialize the header in the elements fixed array. 9161 // initialize the header in the elements fixed array.
9146 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); 9162 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
9147 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); 9163 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
9148 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); 9164 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
9149 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); 9165 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
9150 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); 9166 __ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
9151 __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
9152 9167
9153 // Copy the fixed array slots. 9168 // Copy the fixed array slots.
9154 Label loop; 9169 Label loop;
9155 __ bind(&loop); 9170 __ bind(&loop);
9156 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. 9171 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
9157 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); 9172 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
9158 __ addq(rdi, Immediate(kPointerSize)); 9173 __ addq(rdi, Immediate(kPointerSize));
9159 __ subq(rdx, Immediate(kPointerSize)); 9174 __ subq(rdx, Immediate(kPointerSize));
9160 __ decq(rcx); 9175 __ decq(rcx);
9161 __ j(not_zero, &loop); 9176 __ j(not_zero, &loop);
(...skipping 1750 matching lines...) Expand 10 before | Expand all | Expand 10 after
10912 __ movl(rcx, r8); 10927 __ movl(rcx, r8);
10913 __ and_(rcx, r9); 10928 __ and_(rcx, r9);
10914 ASSERT(kStringEncodingMask == kAsciiStringTag); 10929 ASSERT(kStringEncodingMask == kAsciiStringTag);
10915 __ testl(rcx, Immediate(kAsciiStringTag)); 10930 __ testl(rcx, Immediate(kAsciiStringTag));
10916 __ j(zero, &non_ascii); 10931 __ j(zero, &non_ascii);
10917 // Allocate an acsii cons string. 10932 // Allocate an acsii cons string.
10918 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); 10933 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
10919 __ bind(&allocated); 10934 __ bind(&allocated);
10920 // Fill the fields of the cons string. 10935 // Fill the fields of the cons string.
10921 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx); 10936 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
10922 __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset), 10937 __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
10923 Immediate(String::kEmptyHashField)); 10938 Immediate(String::kEmptyHashField));
10924 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax); 10939 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
10925 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx); 10940 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
10926 __ movq(rax, rcx); 10941 __ movq(rax, rcx);
10927 __ IncrementCounter(&Counters::string_add_native, 1); 10942 __ IncrementCounter(&Counters::string_add_native, 1);
10928 __ ret(2 * kPointerSize); 10943 __ ret(2 * kPointerSize);
10929 __ bind(&non_ascii); 10944 __ bind(&non_ascii);
10930 // Allocate a two byte cons string. 10945 // Allocate a two byte cons string.
10931 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); 10946 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
10932 __ jmp(&allocated); 10947 __ jmp(&allocated);
(...skipping 717 matching lines...) Expand 10 before | Expand all | Expand 10 after
11650 } 11665 }
11651 11666
11652 #endif 11667 #endif
11653 11668
11654 11669
11655 #undef __ 11670 #undef __
11656 11671
11657 } } // namespace v8::internal 11672 } } // namespace v8::internal
11658 11673
11659 #endif // V8_TARGET_ARCH_X64 11674 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/builtins-x64.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698