Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(77)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 2144006: Cardmarking writebarrier. (Closed)
Patch Set: change NewSpace and SemiSpace Contains to match HasHeapObjectTag Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1877 matching lines...) Expand 10 before | Expand all | Expand 10 after
1888 // the object) 1888 // the object)
1889 __ movq(rcx, rax); 1889 __ movq(rcx, rax);
1890 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset)); 1890 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
1891 // Get the bridge array held in the enumeration index field. 1891 // Get the bridge array held in the enumeration index field.
1892 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); 1892 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
1893 // Get the cache from the bridge array. 1893 // Get the cache from the bridge array.
1894 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); 1894 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1895 1895
1896 frame_->EmitPush(rax); // <- slot 3 1896 frame_->EmitPush(rax); // <- slot 3
1897 frame_->EmitPush(rdx); // <- slot 2 1897 frame_->EmitPush(rdx); // <- slot 2
1898 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); 1898 __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
1899 __ Integer32ToSmi(rax, rax);
1900 frame_->EmitPush(rax); // <- slot 1 1899 frame_->EmitPush(rax); // <- slot 1
1901 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 1900 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
1902 entry.Jump(); 1901 entry.Jump();
1903 1902
1904 fixed_array.Bind(); 1903 fixed_array.Bind();
1905 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast) 1904 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
1906 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3 1905 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
1907 frame_->EmitPush(rax); // <- slot 2 1906 frame_->EmitPush(rax); // <- slot 2
1908 1907
1909 // Push the length of the array and the initial index onto the stack. 1908 // Push the length of the array and the initial index onto the stack.
1910 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset)); 1909 __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
1911 __ Integer32ToSmi(rax, rax);
1912 frame_->EmitPush(rax); // <- slot 1 1910 frame_->EmitPush(rax); // <- slot 1
1913 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 1911 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
1914 1912
1915 // Condition. 1913 // Condition.
1916 entry.Bind(); 1914 entry.Bind();
1917 // Grab the current frame's height for the break and continue 1915 // Grab the current frame's height for the break and continue
1918 // targets only after all the state is pushed on the frame. 1916 // targets only after all the state is pushed on the frame.
1919 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); 1917 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1920 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); 1918 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1921 1919
(...skipping 2543 matching lines...) Expand 10 before | Expand all | Expand 10 after
4465 4463
4466 // Fill out the elements FixedArray. 4464 // Fill out the elements FixedArray.
4467 // rax: JSArray. 4465 // rax: JSArray.
4468 // rcx: FixedArray. 4466 // rcx: FixedArray.
4469 // rbx: Number of elements in array as int32. 4467 // rbx: Number of elements in array as int32.
4470 4468
4471 // Set map. 4469 // Set map.
4472 __ Move(FieldOperand(rcx, HeapObject::kMapOffset), 4470 __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
4473 Factory::fixed_array_map()); 4471 Factory::fixed_array_map());
4474 // Set length. 4472 // Set length.
4475 __ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx); 4473 __ Integer32ToSmi(rdx, rbx);
4474 __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
4476 // Fill contents of fixed-array with the-hole. 4475 // Fill contents of fixed-array with the-hole.
4477 __ Move(rdx, Factory::the_hole_value()); 4476 __ Move(rdx, Factory::the_hole_value());
4478 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); 4477 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
4479 // Fill fixed array elements with hole. 4478 // Fill fixed array elements with hole.
4480 // rax: JSArray. 4479 // rax: JSArray.
4481 // rbx: Number of elements in array that remains to be filled, as int32. 4480 // rbx: Number of elements in array that remains to be filled, as int32.
4482 // rcx: Start of elements in FixedArray. 4481 // rcx: Start of elements in FixedArray.
4483 // rdx: the hole. 4482 // rdx: the hole.
4484 Label loop; 4483 Label loop;
4485 __ testl(rbx, rbx); 4484 __ testl(rbx, rbx);
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
4584 __ InvokeFunction(rdi, expected, CALL_FUNCTION); 4583 __ InvokeFunction(rdi, expected, CALL_FUNCTION);
4585 4584
4586 // Find a place to put new cached value into. 4585 // Find a place to put new cached value into.
4587 Label add_new_entry, update_cache; 4586 Label add_new_entry, update_cache;
4588 __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache 4587 __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
4589 // Possible optimization: cache size is constant for the given cache 4588 // Possible optimization: cache size is constant for the given cache
4590 // so technically we could use a constant here. However, if we have 4589 // so technically we could use a constant here. However, if we have
4591 // cache miss this optimization would hardly matter much. 4590 // cache miss this optimization would hardly matter much.
4592 4591
4593 // Check if we could add new entry to cache. 4592 // Check if we could add new entry to cache.
4594 __ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); 4593 __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
4595 __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); 4594 __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
4596 __ SmiToInteger32(r9, r9); 4595 __ SmiCompare(rbx, r9);
4597 __ cmpq(rbx, r9);
4598 __ j(greater, &add_new_entry); 4596 __ j(greater, &add_new_entry);
4599 4597
4600 // Check if we could evict entry after finger. 4598 // Check if we could evict entry after finger.
4601 __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); 4599 __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
4602 __ SmiToInteger32(rdx, rdx); 4600 __ SmiToInteger32(rdx, rdx);
4601 __ SmiToInteger32(rbx, rbx);
4603 __ addq(rdx, kEntrySizeImm); 4602 __ addq(rdx, kEntrySizeImm);
4604 Label forward; 4603 Label forward;
4605 __ cmpq(rbx, rdx); 4604 __ cmpq(rbx, rdx);
4606 __ j(greater, &forward); 4605 __ j(greater, &forward);
4607 // Need to wrap over the cache. 4606 // Need to wrap over the cache.
4608 __ movq(rdx, kEntriesIndexImm); 4607 __ movq(rdx, kEntriesIndexImm);
4609 __ bind(&forward); 4608 __ bind(&forward);
4610 __ Integer32ToSmi(r9, rdx); 4609 __ Integer32ToSmi(r9, rdx);
4611 __ jmp(&update_cache); 4610 __ jmp(&update_cache);
4612 4611
4613 __ bind(&add_new_entry); 4612 __ bind(&add_new_entry);
4614 // r9 holds cache size as int. 4613 // r9 holds cache size as smi.
4615 __ movq(rdx, r9); 4614 __ SmiToInteger32(rdx, r9);
4616 __ Integer32ToSmi(r9, r9);
4617 __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize)); 4615 __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
4618 __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); 4616 __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
4619 4617
4620 // Update the cache itself. 4618 // Update the cache itself.
4621 // rdx holds the index as int. 4619 // rdx holds the index as int.
4622 // r9 holds the index as smi. 4620 // r9 holds the index as smi.
4623 __ bind(&update_cache); 4621 __ bind(&update_cache);
4624 __ pop(rbx); // restore the key 4622 __ pop(rbx); // restore the key
4625 __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); 4623 __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
4626 // Store key. 4624 // Store key.
(...skipping 2573 matching lines...) Expand 10 before | Expand all | Expand 10 after
7200 Result key = frame_->Pop(); 7198 Result key = frame_->Pop();
7201 Result receiver = frame_->Pop(); 7199 Result receiver = frame_->Pop();
7202 key.ToRegister(); 7200 key.ToRegister();
7203 receiver.ToRegister(); 7201 receiver.ToRegister();
7204 7202
7205 // Use a fresh temporary to load the elements without destroying 7203 // Use a fresh temporary to load the elements without destroying
7206 // the receiver which is needed for the deferred slow case. 7204 // the receiver which is needed for the deferred slow case.
7207 Result elements = allocator()->Allocate(); 7205 Result elements = allocator()->Allocate();
7208 ASSERT(elements.is_valid()); 7206 ASSERT(elements.is_valid());
7209 7207
7210 // Use a fresh temporary for the index and later the loaded
7211 // value.
7212 Result index = allocator()->Allocate();
7213 ASSERT(index.is_valid());
7214
7215 DeferredReferenceGetKeyedValue* deferred = 7208 DeferredReferenceGetKeyedValue* deferred =
7216 new DeferredReferenceGetKeyedValue(index.reg(), 7209 new DeferredReferenceGetKeyedValue(elements.reg(),
7217 receiver.reg(), 7210 receiver.reg(),
7218 key.reg(), 7211 key.reg(),
7219 is_global); 7212 is_global);
7220 7213
7221 // Check that the receiver is not a smi (only needed if this 7214 // Check that the receiver is not a smi (only needed if this
7222 // is not a load from the global context) and that it has the 7215 // is not a load from the global context) and that it has the
7223 // expected map. 7216 // expected map.
7224 if (!is_global) { 7217 if (!is_global) {
7225 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); 7218 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
7226 } 7219 }
(...skipping 15 matching lines...) Expand all
7242 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label()); 7235 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
7243 7236
7244 // Get the elements array from the receiver and check that it 7237 // Get the elements array from the receiver and check that it
7245 // is not a dictionary. 7238 // is not a dictionary.
7246 __ movq(elements.reg(), 7239 __ movq(elements.reg(),
7247 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); 7240 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
7248 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), 7241 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
7249 Factory::fixed_array_map()); 7242 Factory::fixed_array_map());
7250 deferred->Branch(not_equal); 7243 deferred->Branch(not_equal);
7251 7244
7252 // Shift the key to get the actual index value and check that 7245 // Check that key is within bounds.
7253 // it is within bounds. 7246 __ SmiCompare(key.reg(),
7254 __ SmiToInteger32(index.reg(), key.reg()); 7247 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
7255 __ cmpl(index.reg(),
7256 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
7257 deferred->Branch(above_equal); 7248 deferred->Branch(above_equal);
7258 7249
7259 // The index register holds the un-smi-tagged key. It has been 7250 // The key register holds the smi-tagged key. Load the value and
7260 // zero-extended to 64-bits, so it can be used directly as index in the 7251 // check that it is not the hole value.
7261 // operand below. 7252 Result value = elements;
7262 // Load and check that the result is not the hole. We could 7253 SmiIndex index =
7263 // reuse the index or elements register for the value. 7254 masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
7264 //
7265 // TODO(206): Consider whether it makes sense to try some
7266 // heuristic about which register to reuse. For example, if
7267 // one is rax, the we can reuse that one because the value
7268 // coming from the deferred code will be in rax.
7269 Result value = index;
7270 __ movq(value.reg(), 7255 __ movq(value.reg(),
7271 Operand(elements.reg(), 7256 FieldOperand(elements.reg(),
7272 index.reg(), 7257 index.reg,
7273 times_pointer_size, 7258 index.scale,
7274 FixedArray::kHeaderSize - kHeapObjectTag)); 7259 FixedArray::kHeaderSize));
7275 elements.Unuse();
7276 index.Unuse();
7277 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); 7260 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
7278 deferred->Branch(equal); 7261 deferred->Branch(equal);
7279 __ IncrementCounter(&Counters::keyed_load_inline, 1); 7262 __ IncrementCounter(&Counters::keyed_load_inline, 1);
7280 7263
7281 deferred->BindExit(); 7264 deferred->BindExit();
7282 // Restore the receiver and key to the frame and push the 7265 // Restore the receiver and key to the frame and push the
7283 // result on top of it. 7266 // result on top of it.
7284 frame_->Push(&receiver); 7267 frame_->Push(&receiver);
7285 frame_->Push(&key); 7268 frame_->Push(&key);
7286 return value; 7269 return value;
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after
7565 key.reg()); 7548 key.reg());
7566 deferred->Branch(below_equal); 7549 deferred->Branch(below_equal);
7567 7550
7568 // Get the elements array from the receiver and check that it 7551 // Get the elements array from the receiver and check that it
7569 // is a flat array (not a dictionary). 7552 // is a flat array (not a dictionary).
7570 __ movq(tmp.reg(), 7553 __ movq(tmp.reg(),
7571 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); 7554 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
7572 7555
7573 // Check whether it is possible to omit the write barrier. If the 7556 // Check whether it is possible to omit the write barrier. If the
7574 // elements array is in new space or the value written is a smi we can 7557 // elements array is in new space or the value written is a smi we can
7575 // safely update the elements array without updating the remembered set. 7558 // safely update the elements array without write barrier.
7576 Label in_new_space; 7559 Label in_new_space;
7577 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space); 7560 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
7578 if (!value_is_constant) { 7561 if (!value_is_constant) {
7579 __ JumpIfNotSmi(value.reg(), deferred->entry_label()); 7562 __ JumpIfNotSmi(value.reg(), deferred->entry_label());
7580 } 7563 }
7581 7564
7582 __ bind(&in_new_space); 7565 __ bind(&in_new_space);
7583 // Bind the deferred code patch site to be able to locate the 7566 // Bind the deferred code patch site to be able to locate the
7584 // fixed array map comparison. When debugging, we patch this 7567 // fixed array map comparison. When debugging, we patch this
7585 // comparison to always fail so that we will hit the IC call 7568 // comparison to always fail so that we will hit the IC call
7586 // in the deferred code which will allow the debugger to 7569 // in the deferred code which will allow the debugger to
7587 // break for fast case stores. 7570 // break for fast case stores.
7588 __ bind(deferred->patch_site()); 7571 __ bind(deferred->patch_site());
7589 // Avoid using __ to ensure the distance from patch_site 7572 // Avoid using __ to ensure the distance from patch_site
7590 // to the map address is always the same. 7573 // to the map address is always the same.
7591 masm->movq(kScratchRegister, Factory::fixed_array_map(), 7574 masm->movq(kScratchRegister, Factory::fixed_array_map(),
7592 RelocInfo::EMBEDDED_OBJECT); 7575 RelocInfo::EMBEDDED_OBJECT);
7593 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), 7576 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
7594 kScratchRegister); 7577 kScratchRegister);
7595 deferred->Branch(not_equal); 7578 deferred->Branch(not_equal);
7596 7579
7597 // Store the value. 7580 // Store the value.
7598 SmiIndex index = 7581 SmiIndex index =
7599 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); 7582 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
7600 __ movq(Operand(tmp.reg(), 7583 __ movq(FieldOperand(tmp.reg(),
7601 index.reg, 7584 index.reg,
7602 index.scale, 7585 index.scale,
7603 FixedArray::kHeaderSize - kHeapObjectTag), 7586 FixedArray::kHeaderSize),
7604 value.reg()); 7587 value.reg());
7605 __ IncrementCounter(&Counters::keyed_store_inline, 1); 7588 __ IncrementCounter(&Counters::keyed_store_inline, 1);
7606 7589
7607 deferred->BindExit(); 7590 deferred->BindExit();
7608 7591
7609 cgen_->frame()->Push(&value); 7592 cgen_->frame()->Push(&value);
7610 } else { 7593 } else {
7611 Result answer = cgen_->frame()->CallKeyedStoreIC(); 7594 Result answer = cgen_->frame()->CallKeyedStoreIC();
7612 // Make sure that we do not have a test instruction after the 7595 // Make sure that we do not have a test instruction after the
7613 // call. A test instruction after the call is used to 7596 // call. A test instruction after the call is used to
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
7673 int length = slots_ + Context::MIN_CONTEXT_SLOTS; 7656 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
7674 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, 7657 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
7675 rax, rbx, rcx, &gc, TAG_OBJECT); 7658 rax, rbx, rcx, &gc, TAG_OBJECT);
7676 7659
7677 // Get the function from the stack. 7660 // Get the function from the stack.
7678 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); 7661 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
7679 7662
7680 // Setup the object header. 7663 // Setup the object header.
7681 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); 7664 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
7682 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); 7665 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
7683 __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length)); 7666 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
7684 7667
7685 // Setup the fixed slots. 7668 // Setup the fixed slots.
7686 __ xor_(rbx, rbx); // Set to NULL. 7669 __ xor_(rbx, rbx); // Set to NULL.
7687 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); 7670 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
7688 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax); 7671 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
7689 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx); 7672 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
7690 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx); 7673 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
7691 7674
7692 // Copy the global object from the surrounding context. 7675 // Copy the global object from the surrounding context.
7693 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); 7676 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
(...skipping 654 matching lines...) Expand 10 before | Expand all | Expand 10 after
8348 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister); 8331 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
8349 __ j(not_equal, &runtime); 8332 __ j(not_equal, &runtime);
8350 // Check that the JSArray is in fast case. 8333 // Check that the JSArray is in fast case.
8351 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); 8334 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
8352 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); 8335 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
8353 __ Cmp(rax, Factory::fixed_array_map()); 8336 __ Cmp(rax, Factory::fixed_array_map());
8354 __ j(not_equal, &runtime); 8337 __ j(not_equal, &runtime);
8355 // Check that the last match info has space for the capture registers and the 8338 // Check that the last match info has space for the capture registers and the
8356 // additional information. Ensure no overflow in add. 8339 // additional information. Ensure no overflow in add.
8357 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); 8340 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
8358 __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); 8341 __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
8342 __ SmiToInteger32(rax, rax);
8359 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); 8343 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
8360 __ cmpl(rdx, rax); 8344 __ cmpl(rdx, rax);
8361 __ j(greater, &runtime); 8345 __ j(greater, &runtime);
8362 8346
8363 // ecx: RegExp data (FixedArray) 8347 // ecx: RegExp data (FixedArray)
8364 // Check the representation and encoding of the subject string. 8348 // Check the representation and encoding of the subject string.
8365 Label seq_string, seq_two_byte_string, check_code; 8349 Label seq_string, seq_two_byte_string, check_code;
8366 const int kStringRepresentationEncodingMask = 8350 const int kStringRepresentationEncodingMask =
8367 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; 8351 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
8368 __ movq(rax, Operand(rsp, kSubjectOffset)); 8352 __ movq(rax, Operand(rsp, kSubjectOffset));
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after
8626 // Use of registers. Register result is used as a temporary. 8610 // Use of registers. Register result is used as a temporary.
8627 Register number_string_cache = result; 8611 Register number_string_cache = result;
8628 Register mask = scratch1; 8612 Register mask = scratch1;
8629 Register scratch = scratch2; 8613 Register scratch = scratch2;
8630 8614
8631 // Load the number string cache. 8615 // Load the number string cache.
8632 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); 8616 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
8633 8617
8634 // Make the hash mask from the length of the number string cache. It 8618 // Make the hash mask from the length of the number string cache. It
8635 // contains two elements (number and string) for each cache entry. 8619 // contains two elements (number and string) for each cache entry.
8636 __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); 8620 __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
8637 __ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi). 8621 // Divide smi tagged length by two.
8638 __ subl(mask, Immediate(1)); // Make mask. 8622 __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
8623 __ subq(mask, Immediate(1)); // Make mask.
8639 8624
8640 // Calculate the entry in the number string cache. The hash value in the 8625 // Calculate the entry in the number string cache. The hash value in the
8641 // number string cache for smis is just the smi value, and the hash for 8626 // number string cache for smis is just the smi value, and the hash for
8642 // doubles is the xor of the upper and lower words. See 8627 // doubles is the xor of the upper and lower words. See
8643 // Heap::GetNumberStringCache. 8628 // Heap::GetNumberStringCache.
8644 Label is_smi; 8629 Label is_smi;
8645 Label load_result_from_cache; 8630 Label load_result_from_cache;
8646 if (!object_is_smi) { 8631 if (!object_is_smi) {
8647 __ JumpIfSmi(object, &is_smi); 8632 __ JumpIfSmi(object, &is_smi);
8648 __ CheckMap(object, Factory::heap_number_map(), not_found, true); 8633 __ CheckMap(object, Factory::heap_number_map(), not_found, true);
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
9148 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); 9133 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
9149 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); 9134 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
9150 9135
9151 // If there are no actual arguments, we're done. 9136 // If there are no actual arguments, we're done.
9152 Label done; 9137 Label done;
9153 __ testq(rcx, rcx); 9138 __ testq(rcx, rcx);
9154 __ j(zero, &done); 9139 __ j(zero, &done);
9155 9140
9156 // Get the parameters pointer from the stack and untag the length. 9141 // Get the parameters pointer from the stack and untag the length.
9157 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); 9142 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
9158 __ SmiToInteger32(rcx, rcx);
9159 9143
9160 // Setup the elements pointer in the allocated arguments object and 9144 // Setup the elements pointer in the allocated arguments object and
9161 // initialize the header in the elements fixed array. 9145 // initialize the header in the elements fixed array.
9162 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); 9146 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
9163 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); 9147 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
9164 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); 9148 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
9165 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); 9149 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
9166 __ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); 9150 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
9151 __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
9167 9152
9168 // Copy the fixed array slots. 9153 // Copy the fixed array slots.
9169 Label loop; 9154 Label loop;
9170 __ bind(&loop); 9155 __ bind(&loop);
9171 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. 9156 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
9172 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); 9157 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
9173 __ addq(rdi, Immediate(kPointerSize)); 9158 __ addq(rdi, Immediate(kPointerSize));
9174 __ subq(rdx, Immediate(kPointerSize)); 9159 __ subq(rdx, Immediate(kPointerSize));
9175 __ decq(rcx); 9160 __ decq(rcx);
9176 __ j(not_zero, &loop); 9161 __ j(not_zero, &loop);
(...skipping 1750 matching lines...) Expand 10 before | Expand all | Expand 10 after
10927 __ movl(rcx, r8); 10912 __ movl(rcx, r8);
10928 __ and_(rcx, r9); 10913 __ and_(rcx, r9);
10929 ASSERT(kStringEncodingMask == kAsciiStringTag); 10914 ASSERT(kStringEncodingMask == kAsciiStringTag);
10930 __ testl(rcx, Immediate(kAsciiStringTag)); 10915 __ testl(rcx, Immediate(kAsciiStringTag));
10931 __ j(zero, &non_ascii); 10916 __ j(zero, &non_ascii);
10932 // Allocate an acsii cons string. 10917 // Allocate an acsii cons string.
10933 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); 10918 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
10934 __ bind(&allocated); 10919 __ bind(&allocated);
10935 // Fill the fields of the cons string. 10920 // Fill the fields of the cons string.
10936 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx); 10921 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
10937 __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset), 10922 __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
10938 Immediate(String::kEmptyHashField)); 10923 Immediate(String::kEmptyHashField));
10939 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax); 10924 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
10940 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx); 10925 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
10941 __ movq(rax, rcx); 10926 __ movq(rax, rcx);
10942 __ IncrementCounter(&Counters::string_add_native, 1); 10927 __ IncrementCounter(&Counters::string_add_native, 1);
10943 __ ret(2 * kPointerSize); 10928 __ ret(2 * kPointerSize);
10944 __ bind(&non_ascii); 10929 __ bind(&non_ascii);
10945 // Allocate a two byte cons string. 10930 // Allocate a two byte cons string.
10946 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); 10931 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
10947 __ jmp(&allocated); 10932 __ jmp(&allocated);
(...skipping 717 matching lines...) Expand 10 before | Expand all | Expand 10 after
11665 } 11650 }
11666 11651
11667 #endif 11652 #endif
11668 11653
11669 11654
11670 #undef __ 11655 #undef __
11671 11656
11672 } } // namespace v8::internal 11657 } } // namespace v8::internal
11673 11658
11674 #endif // V8_TARGET_ARCH_X64 11659 #endif // V8_TARGET_ARCH_X64
OLDNEW
« src/spaces.cc ('K') | « src/x64/builtins-x64.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698