Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1220)

Unified Diff: src/x64/ic-x64.cc

Issue 105503006: Replace movq with movp for X64 when the operand size is kPointerSize (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebased with bleeding_edge Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/x64/full-codegen-x64.cc ('k') | src/x64/lithium-codegen-x64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/x64/ic-x64.cc
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 454e3bdaeb21d5ba8046379d4c0ef77648720043..b307e693e33978b6babe21cd585812116f5ea9fe 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -74,7 +74,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
__ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
- __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
__ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
__ j(below, miss);
@@ -90,7 +90,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
(1 << Map::kHasNamedInterceptor)));
__ j(not_zero, miss);
- __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, miss);
@@ -150,7 +150,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(result,
+ __ movp(result,
Operand(elements, r1, times_pointer_size,
kValueOffset - kHeapObjectTag));
}
@@ -216,10 +216,10 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
scratch1,
times_pointer_size,
kValueOffset - kHeapObjectTag));
- __ movq(Operand(scratch1, 0), value);
+ __ movp(Operand(scratch1, 0), value);
// Update write barrier. Make sure not to clobber the value.
- __ movq(scratch0, value);
+ __ movp(scratch0, value);
__ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
@@ -284,7 +284,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
//
// scratch - used to hold elements of the receiver and the loaded value.
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
@@ -299,7 +299,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
__ j(above_equal, out_of_range);
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movq(scratch, FieldOperand(elements,
+ __ movp(scratch, FieldOperand(elements,
index.reg,
index.scale,
FixedArray::kHeaderSize));
@@ -308,7 +308,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
if (!result.is(scratch)) {
- __ movq(result, scratch);
+ __ movp(result, scratch);
}
}
@@ -384,7 +384,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
__ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rdx: receiver
@@ -412,14 +412,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movl(rcx, rbx);
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
@@ -438,7 +438,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
- __ movq(rdi, rcx);
+ __ movp(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
@@ -479,14 +479,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&load_in_object_property);
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
- __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
+ __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
__ bind(&property_array_property);
- __ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
+ __ movp(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rax, FieldOperand(rax, rdi, times_pointer_size,
FixedArray::kHeaderSize));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -498,7 +498,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// rax: key
// rbx: elements
- __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kMapOffset));
__ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
@@ -560,7 +560,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ JumpUnlessNonNegativeSmi(rax, &slow);
// Get the map of the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
@@ -605,7 +605,7 @@ static void KeyedStoreGenerateGenericHelper(
// rdx: receiver (a JSArray)
// r9: map of receiver
if (check_map == kCheckMap) {
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, fast_double);
}
@@ -614,7 +614,7 @@ static void KeyedStoreGenerateGenericHelper(
// We have to go to the runtime if the current value is the hole because
// there may be a callback on the element
Label holecheck_passed1;
- __ movq(kScratchRegister, FieldOperand(rbx,
+ __ movp(kScratchRegister, FieldOperand(rbx,
rcx,
times_pointer_size,
FixedArray::kHeaderSize));
@@ -633,7 +633,7 @@ static void KeyedStoreGenerateGenericHelper(
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
// It's irrelevant whether array is smi-only or not when writing a smi.
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ ret(0);
@@ -648,9 +648,9 @@ static void KeyedStoreGenerateGenericHelper(
__ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
- __ movq(rdx, rax); // Preserve the value which is returned.
+ __ movp(rdx, rax); // Preserve the value which is returned.
__ RecordWriteArray(
rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
@@ -683,10 +683,10 @@ static void KeyedStoreGenerateGenericHelper(
__ ret(0);
__ bind(&transition_smi_elements);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
- __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &non_double_value);
@@ -700,7 +700,7 @@ static void KeyedStoreGenerateGenericHelper(
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
@@ -713,14 +713,14 @@ static void KeyedStoreGenerateGenericHelper(
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
rbx,
@@ -728,7 +728,7 @@ static void KeyedStoreGenerateGenericHelper(
slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -748,7 +748,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ testb(FieldOperand(r9, Map::kBitFieldOffset),
@@ -768,7 +768,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rdx: JSObject
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check array bounds.
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
@@ -796,7 +796,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(below_equal, &slow);
// Increment index to get new length.
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_if_double_array);
__ jmp(&fast_object_grow);
@@ -814,7 +814,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rdx: receiver (a JSArray)
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
@@ -928,7 +928,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
Label miss;
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
@@ -964,7 +964,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
}
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
// Enter an internal frame.
{
@@ -981,14 +981,14 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
__ CallStub(&stub);
// Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
+ __ movp(rdi, rax);
}
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global;
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &invoke);
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
@@ -998,7 +998,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
// Patch the receiver on the stack.
__ bind(&global);
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetReceiverOperand(), rdx);
+ __ movp(args.GetReceiverOperand(), rdx);
__ bind(&invoke);
}
@@ -1022,7 +1022,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
GenerateMiss(masm, argc, extra_ic_state);
}
@@ -1040,7 +1040,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// -----------------------------------
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
Label do_call, slow_call, slow_load;
Label check_number_dictionary, check_name, lookup_monomorphic_cache;
@@ -1092,7 +1092,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(rcx); // restore the key
}
- __ movq(rdi, rax);
+ __ movp(rdi, rax);
__ jmp(&do_call);
__ bind(&check_name);
@@ -1105,7 +1105,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateKeyedLoadReceiverCheck(
masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &lookup_monomorphic_cache);
@@ -1186,11 +1186,11 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load the elements into scratch1 and check its map. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments.
- __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
__ cmpq(key, scratch2);
__ j(greater_equal, unmapped_case);
@@ -1198,7 +1198,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load element index and check whether it is the hole.
const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
__ SmiToInteger64(scratch3, key);
- __ movq(scratch2, FieldOperand(scratch1,
+ __ movp(scratch2, FieldOperand(scratch1,
scratch3,
times_pointer_size,
kHeaderSize));
@@ -1208,7 +1208,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load value from context and return it. We can reuse scratch1 because
// we do not jump to the unmapped lookup (which requires the parameter
// map in scratch1).
- __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+ __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
__ SmiToInteger64(scratch3, scratch2);
return FieldOperand(scratch1,
scratch3,
@@ -1228,10 +1228,10 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
// overwritten.
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
- __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmpq(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
@@ -1252,7 +1252,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand mapped_location =
GenerateMappedArgumentsLookup(
masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
- __ movq(rax, mapped_location);
+ __ movp(rax, mapped_location);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in rbx.
@@ -1260,7 +1260,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
__ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
__ j(equal, &slow);
- __ movq(rax, unmapped_location);
+ __ movp(rax, unmapped_location);
__ Ret();
__ bind(&slow);
GenerateMiss(masm);
@@ -1277,9 +1277,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Label slow, notin;
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
- __ movq(mapped_location, rax);
+ __ movp(mapped_location, rax);
__ lea(r9, mapped_location);
- __ movq(r8, rax);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
@@ -1291,9 +1291,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
// The unmapped lookup expects that the parameter map is in rbx.
Operand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
- __ movq(unmapped_location, rax);
+ __ movp(unmapped_location, rax);
__ lea(r9, unmapped_location);
- __ movq(r8, rax);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
@@ -1319,10 +1319,10 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
// -----------------------------------
Label slow, notin;
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
- __ movq(rdi, mapped_location);
+ __ movp(rdi, mapped_location);
GenerateFunctionTailCall(masm, argc, &slow);
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in rbx.
@@ -1330,7 +1330,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
__ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
__ j(equal, &slow);
- __ movq(rdi, unmapped_location);
+ __ movp(rdi, unmapped_location);
GenerateFunctionTailCall(masm, argc, &slow);
__ bind(&slow);
GenerateMiss(masm, argc);
« no previous file with comments | « src/x64/full-codegen-x64.cc ('k') | src/x64/lithium-codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698