Index: src/ia32/code-stubs-ia32.cc |
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc |
index 00265dd7178a5eaa6507961df2850435c1313ee6..1ca577c01d339c4a0a24ef16f4beceb377835179 100644 |
--- a/src/ia32/code-stubs-ia32.cc |
+++ b/src/ia32/code-stubs-ia32.cc |
@@ -49,7 +49,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) { |
__ bind(&check_heap_number); |
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); |
Factory* factory = masm->isolate()->factory(); |
- __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); |
+ __ cmp(ebx, Immediate(factory->heap_number_map())); |
__ j(not_equal, &call_builtin, Label::kNear); |
__ ret(0); |
@@ -150,7 +150,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { |
} |
// Return and remove the on-stack parameter. |
- __ mov(esi, Operand(eax)); |
+ __ mov(esi, eax); |
__ ret(1 * kPointerSize); |
// Need to collect. Call into runtime system. |
@@ -345,7 +345,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
__ pushad(); |
if (save_doubles_ == kSaveFPRegs) { |
CpuFeatures::Scope scope(SSE2); |
- __ sub(Operand(esp), Immediate(kDoubleSize * XMMRegister::kNumRegisters)); |
+ __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); |
for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
XMMRegister reg = XMMRegister::from_code(i); |
__ movdbl(Operand(esp, i * kDoubleSize), reg); |
@@ -366,7 +366,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
XMMRegister reg = XMMRegister::from_code(i); |
__ movdbl(reg, Operand(esp, i * kDoubleSize)); |
} |
- __ add(Operand(esp), Immediate(kDoubleSize * XMMRegister::kNumRegisters)); |
+ __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); |
} |
__ popad(); |
__ ret(0); |
@@ -507,27 +507,27 @@ static void IntegerConvert(MacroAssembler* masm, |
// Check whether the exponent is too big for a 64 bit signed integer. |
static const uint32_t kTooBigExponent = |
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; |
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); |
+ __ cmp(scratch2, Immediate(kTooBigExponent)); |
__ j(greater_equal, conversion_failure); |
// Load x87 register with heap number. |
__ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); |
// Reserve space for 64 bit answer. |
- __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. |
+ __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint. |
// Do conversion, which cannot fail because we checked the exponent. |
__ fisttp_d(Operand(esp, 0)); |
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. |
- __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. |
+ __ add(esp, Immediate(sizeof(uint64_t))); // Nolint. |
} else { |
// Load ecx with zero. We use this either for the final shift or |
// for the answer. |
- __ xor_(ecx, Operand(ecx)); |
+ __ xor_(ecx, ecx); |
// Check whether the exponent matches a 32 bit signed int that cannot be |
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the |
// exponent is 30 (biased). This is the exponent that we are fastest at and |
// also the highest exponent we can handle here. |
const uint32_t non_smi_exponent = |
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
- __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); |
+ __ cmp(scratch2, Immediate(non_smi_exponent)); |
// If we have a match of the int32-but-not-Smi exponent then skip some |
// logic. |
__ j(equal, &right_exponent, Label::kNear); |
@@ -540,7 +540,7 @@ static void IntegerConvert(MacroAssembler* masm, |
// >>> operator has a tendency to generate numbers with an exponent of 31. |
const uint32_t big_non_smi_exponent = |
(HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; |
- __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); |
+ __ cmp(scratch2, Immediate(big_non_smi_exponent)); |
__ j(not_equal, conversion_failure); |
// We have the big exponent, typically from >>>. This means the number is |
// in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. |
@@ -559,9 +559,9 @@ static void IntegerConvert(MacroAssembler* masm, |
// Shift down 21 bits to get the most significant 11 bits or the low |
// mantissa word. |
__ shr(ecx, 32 - big_shift_distance); |
- __ or_(ecx, Operand(scratch2)); |
+ __ or_(ecx, scratch2); |
// We have the answer in ecx, but we may need to negate it. |
- __ test(scratch, Operand(scratch)); |
+ __ test(scratch, scratch); |
__ j(positive, &done, Label::kNear); |
__ neg(ecx); |
__ jmp(&done, Label::kNear); |
@@ -575,14 +575,14 @@ static void IntegerConvert(MacroAssembler* masm, |
// it rounds to zero. |
const uint32_t zero_exponent = |
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; |
- __ sub(Operand(scratch2), Immediate(zero_exponent)); |
+ __ sub(scratch2, Immediate(zero_exponent)); |
// ecx already has a Smi zero. |
__ j(less, &done, Label::kNear); |
// We have a shifted exponent between 0 and 30 in scratch2. |
__ shr(scratch2, HeapNumber::kExponentShift); |
__ mov(ecx, Immediate(30)); |
- __ sub(ecx, Operand(scratch2)); |
+ __ sub(ecx, scratch2); |
__ bind(&right_exponent); |
// Here ecx is the shift, scratch is the exponent word. |
@@ -602,19 +602,19 @@ static void IntegerConvert(MacroAssembler* masm, |
// Shift down 22 bits to get the most significant 10 bits or the low |
// mantissa word. |
__ shr(scratch2, 32 - shift_distance); |
- __ or_(scratch2, Operand(scratch)); |
+ __ or_(scratch2, scratch); |
// Move down according to the exponent. |
__ shr_cl(scratch2); |
// Now the unsigned answer is in scratch2. We need to move it to ecx and |
// we may need to fix the sign. |
Label negative; |
- __ xor_(ecx, Operand(ecx)); |
+ __ xor_(ecx, ecx); |
__ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); |
__ j(greater, &negative, Label::kNear); |
__ mov(ecx, scratch2); |
__ jmp(&done, Label::kNear); |
__ bind(&negative); |
- __ sub(ecx, Operand(scratch2)); |
+ __ sub(ecx, scratch2); |
__ bind(&done); |
} |
} |
@@ -716,13 +716,13 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, |
__ JumpIfNotSmi(eax, non_smi, non_smi_near); |
// We can't handle -0 with smis, so use a type transition for that case. |
- __ test(eax, Operand(eax)); |
+ __ test(eax, eax); |
__ j(zero, slow, slow_near); |
// Try optimistic subtraction '0 - value', saving operand in eax for undo. |
- __ mov(edx, Operand(eax)); |
+ __ mov(edx, eax); |
__ Set(eax, Immediate(0)); |
- __ sub(eax, Operand(edx)); |
+ __ sub(eax, edx); |
__ j(overflow, undo, undo_near); |
__ ret(0); |
} |
@@ -743,7 +743,7 @@ void UnaryOpStub::GenerateSmiCodeBitNot( |
void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) { |
- __ mov(eax, Operand(edx)); |
+ __ mov(eax, edx); |
} |
@@ -797,7 +797,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, |
__ xor_(FieldOperand(eax, HeapNumber::kExponentOffset), |
Immediate(HeapNumber::kSignMask)); // Flip sign. |
} else { |
- __ mov(edx, Operand(eax)); |
+ __ mov(edx, eax); |
// edx: operand |
Label slow_allocate_heapnumber, heapnumber_allocated; |
@@ -872,7 +872,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, |
} |
if (CpuFeatures::IsSupported(SSE2)) { |
CpuFeatures::Scope use_sse2(SSE2); |
- __ cvtsi2sd(xmm0, Operand(ecx)); |
+ __ cvtsi2sd(xmm0, ecx); |
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
} else { |
__ push(ecx); |
@@ -1065,7 +1065,7 @@ void BinaryOpStub::GenerateSmiCode( |
// eax in case the result is not a smi. |
ASSERT(!left.is(ecx) && !right.is(ecx)); |
__ mov(ecx, right); |
- __ or_(right, Operand(left)); // Bitwise or is commutative. |
+ __ or_(right, left); // Bitwise or is commutative. |
combined = right; |
break; |
@@ -1077,7 +1077,7 @@ void BinaryOpStub::GenerateSmiCode( |
case Token::DIV: |
case Token::MOD: |
__ mov(combined, right); |
- __ or_(combined, Operand(left)); |
+ __ or_(combined, left); |
break; |
case Token::SHL: |
@@ -1087,7 +1087,7 @@ void BinaryOpStub::GenerateSmiCode( |
// for the smi check register. |
ASSERT(!left.is(ecx) && !right.is(ecx)); |
__ mov(ecx, right); |
- __ or_(right, Operand(left)); |
+ __ or_(right, left); |
combined = right; |
break; |
@@ -1110,12 +1110,12 @@ void BinaryOpStub::GenerateSmiCode( |
case Token::BIT_XOR: |
ASSERT(right.is(eax)); |
- __ xor_(right, Operand(left)); // Bitwise xor is commutative. |
+ __ xor_(right, left); // Bitwise xor is commutative. |
break; |
case Token::BIT_AND: |
ASSERT(right.is(eax)); |
- __ and_(right, Operand(left)); // Bitwise and is commutative. |
+ __ and_(right, left); // Bitwise and is commutative. |
break; |
case Token::SHL: |
@@ -1164,12 +1164,12 @@ void BinaryOpStub::GenerateSmiCode( |
case Token::ADD: |
ASSERT(right.is(eax)); |
- __ add(right, Operand(left)); // Addition is commutative. |
+ __ add(right, left); // Addition is commutative. |
__ j(overflow, &use_fp_on_smis); |
break; |
case Token::SUB: |
- __ sub(left, Operand(right)); |
+ __ sub(left, right); |
__ j(overflow, &use_fp_on_smis); |
__ mov(eax, left); |
break; |
@@ -1183,7 +1183,7 @@ void BinaryOpStub::GenerateSmiCode( |
// Remove tag from one of the operands (but keep sign). |
__ SmiUntag(right); |
// Do multiplication. |
- __ imul(right, Operand(left)); // Multiplication is commutative. |
+ __ imul(right, left); // Multiplication is commutative. |
__ j(overflow, &use_fp_on_smis); |
// Check for negative zero result. Use combined = left | right. |
__ NegativeZeroTest(right, combined, &use_fp_on_smis); |
@@ -1194,7 +1194,7 @@ void BinaryOpStub::GenerateSmiCode( |
// save the left operand. |
__ mov(edi, left); |
// Check for 0 divisor. |
- __ test(right, Operand(right)); |
+ __ test(right, right); |
__ j(zero, &use_fp_on_smis); |
// Sign extend left into edx:eax. |
ASSERT(left.is(eax)); |
@@ -1210,7 +1210,7 @@ void BinaryOpStub::GenerateSmiCode( |
// Check for negative zero result. Use combined = left | right. |
__ NegativeZeroTest(eax, combined, &use_fp_on_smis); |
// Check that the remainder is zero. |
- __ test(edx, Operand(edx)); |
+ __ test(edx, edx); |
__ j(not_zero, &use_fp_on_smis); |
// Tag the result and store it in register eax. |
__ SmiTag(eax); |
@@ -1218,7 +1218,7 @@ void BinaryOpStub::GenerateSmiCode( |
case Token::MOD: |
// Check for 0 divisor. |
- __ test(right, Operand(right)); |
+ __ test(right, right); |
__ j(zero, ¬_smis); |
// Sign extend left into edx:eax. |
@@ -1269,11 +1269,11 @@ void BinaryOpStub::GenerateSmiCode( |
break; |
case Token::ADD: |
// Revert right = right + left. |
- __ sub(right, Operand(left)); |
+ __ sub(right, left); |
break; |
case Token::SUB: |
// Revert left = left - right. |
- __ add(left, Operand(right)); |
+ __ add(left, right); |
break; |
case Token::MUL: |
// Right was clobbered but a copy is in ebx. |
@@ -1311,7 +1311,7 @@ void BinaryOpStub::GenerateSmiCode( |
ASSERT_EQ(Token::SHL, op_); |
if (CpuFeatures::IsSupported(SSE2)) { |
CpuFeatures::Scope use_sse2(SSE2); |
- __ cvtsi2sd(xmm0, Operand(left)); |
+ __ cvtsi2sd(xmm0, left); |
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
} else { |
__ mov(Operand(esp, 1 * kPointerSize), left); |
@@ -1333,11 +1333,11 @@ void BinaryOpStub::GenerateSmiCode( |
switch (op_) { |
case Token::ADD: |
// Revert right = right + left. |
- __ sub(right, Operand(left)); |
+ __ sub(right, left); |
break; |
case Token::SUB: |
// Revert left = left - right. |
- __ add(left, Operand(right)); |
+ __ add(left, right); |
break; |
case Token::MUL: |
// Right was clobbered but a copy is in ebx. |
@@ -1529,7 +1529,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
// Check result type if it is currently Int32. |
if (result_type_ <= BinaryOpIC::INT32) { |
__ cvttsd2si(ecx, Operand(xmm0)); |
- __ cvtsi2sd(xmm2, Operand(ecx)); |
+ __ cvtsi2sd(xmm2, ecx); |
__ ucomisd(xmm0, xmm2); |
__ j(not_zero, ¬_int32); |
__ j(carry, ¬_int32); |
@@ -1591,9 +1591,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_, |
¬_int32); |
switch (op_) { |
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
+ case Token::BIT_OR: __ or_(eax, ecx); break; |
+ case Token::BIT_AND: __ and_(eax, ecx); break; |
+ case Token::BIT_XOR: __ xor_(eax, ecx); break; |
case Token::SAR: __ sar_cl(eax); break; |
case Token::SHL: __ shl_cl(eax); break; |
case Token::SHR: __ shr_cl(eax); break; |
@@ -1617,7 +1617,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
if (op_ != Token::SHR) { |
__ bind(&non_smi_result); |
// Allocate a heap number if needed. |
- __ mov(ebx, Operand(eax)); // ebx: result |
+ __ mov(ebx, eax); // ebx: result |
Label skip_allocation; |
switch (mode_) { |
case OVERWRITE_LEFT: |
@@ -1637,7 +1637,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
// Store the result in the HeapNumber and return. |
if (CpuFeatures::IsSupported(SSE2)) { |
CpuFeatures::Scope use_sse2(SSE2); |
- __ cvtsi2sd(xmm0, Operand(ebx)); |
+ __ cvtsi2sd(xmm0, ebx); |
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
} else { |
__ mov(Operand(esp, 1 * kPointerSize), ebx); |
@@ -1718,7 +1718,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
__ cmp(edx, factory->undefined_value()); |
__ j(not_equal, &check, Label::kNear); |
if (Token::IsBitOp(op_)) { |
- __ xor_(edx, Operand(edx)); |
+ __ xor_(edx, edx); |
} else { |
__ mov(edx, Immediate(factory->nan_value())); |
} |
@@ -1727,7 +1727,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
__ cmp(eax, factory->undefined_value()); |
__ j(not_equal, &done, Label::kNear); |
if (Token::IsBitOp(op_)) { |
- __ xor_(eax, Operand(eax)); |
+ __ xor_(eax, eax); |
} else { |
__ mov(eax, Immediate(factory->nan_value())); |
} |
@@ -1805,9 +1805,9 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
use_sse3_, |
¬_floats); |
switch (op_) { |
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
+ case Token::BIT_OR: __ or_(eax, ecx); break; |
+ case Token::BIT_AND: __ and_(eax, ecx); break; |
+ case Token::BIT_XOR: __ xor_(eax, ecx); break; |
case Token::SAR: __ sar_cl(eax); break; |
case Token::SHL: __ shl_cl(eax); break; |
case Token::SHR: __ shr_cl(eax); break; |
@@ -1831,7 +1831,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
if (op_ != Token::SHR) { |
__ bind(&non_smi_result); |
// Allocate a heap number if needed. |
- __ mov(ebx, Operand(eax)); // ebx: result |
+ __ mov(ebx, eax); // ebx: result |
Label skip_allocation; |
switch (mode_) { |
case OVERWRITE_LEFT: |
@@ -1851,7 +1851,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
// Store the result in the HeapNumber and return. |
if (CpuFeatures::IsSupported(SSE2)) { |
CpuFeatures::Scope use_sse2(SSE2); |
- __ cvtsi2sd(xmm0, Operand(ebx)); |
+ __ cvtsi2sd(xmm0, ebx); |
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
} else { |
__ mov(Operand(esp, 1 * kPointerSize), ebx); |
@@ -2004,9 +2004,9 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
use_sse3_, |
&call_runtime); |
switch (op_) { |
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
+ case Token::BIT_OR: __ or_(eax, ecx); break; |
+ case Token::BIT_AND: __ and_(eax, ecx); break; |
+ case Token::BIT_XOR: __ xor_(eax, ecx); break; |
case Token::SAR: __ sar_cl(eax); break; |
case Token::SHL: __ shl_cl(eax); break; |
case Token::SHR: __ shr_cl(eax); break; |
@@ -2030,7 +2030,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
if (op_ != Token::SHR) { |
__ bind(&non_smi_result); |
// Allocate a heap number if needed. |
- __ mov(ebx, Operand(eax)); // ebx: result |
+ __ mov(ebx, eax); // ebx: result |
Label skip_allocation; |
switch (mode_) { |
case OVERWRITE_LEFT: |
@@ -2050,7 +2050,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
// Store the result in the HeapNumber and return. |
if (CpuFeatures::IsSupported(SSE2)) { |
CpuFeatures::Scope use_sse2(SSE2); |
- __ cvtsi2sd(xmm0, Operand(ebx)); |
+ __ cvtsi2sd(xmm0, ebx); |
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
} else { |
__ mov(Operand(esp, 1 * kPointerSize), ebx); |
@@ -2160,10 +2160,10 @@ void BinaryOpStub::GenerateHeapResultAllocation( |
__ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); |
// Now edx can be overwritten losing one of the arguments as we are |
// now done and will not need it any more. |
- __ mov(edx, Operand(ebx)); |
+ __ mov(edx, ebx); |
__ bind(&skip_allocation); |
// Use object in edx as a result holder |
- __ mov(eax, Operand(edx)); |
+ __ mov(eax, edx); |
break; |
} |
case OVERWRITE_RIGHT: |
@@ -2221,7 +2221,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
// Then load the low and high words of the double into ebx, edx. |
STATIC_ASSERT(kSmiTagSize == 1); |
__ sar(eax, 1); |
- __ sub(Operand(esp), Immediate(2 * kPointerSize)); |
+ __ sub(esp, Immediate(2 * kPointerSize)); |
__ mov(Operand(esp, 0), eax); |
__ fild_s(Operand(esp, 0)); |
__ fst_d(Operand(esp, 0)); |
@@ -2232,7 +2232,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
// Check if input is a HeapNumber. |
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); |
Factory* factory = masm->isolate()->factory(); |
- __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); |
+ __ cmp(ebx, Immediate(factory->heap_number_map())); |
__ j(not_equal, &runtime_call); |
// Input is a HeapNumber. Push it on the FPU stack and load its |
// low and high words into ebx, edx. |
@@ -2244,12 +2244,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
} else { // UNTAGGED. |
if (CpuFeatures::IsSupported(SSE4_1)) { |
CpuFeatures::Scope sse4_scope(SSE4_1); |
- __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx. |
+ __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. |
} else { |
__ pshufd(xmm0, xmm1, 0x1); |
- __ movd(Operand(edx), xmm0); |
+ __ movd(edx, xmm0); |
} |
- __ movd(Operand(ebx), xmm1); |
+ __ movd(ebx, xmm1); |
} |
// ST[0] or xmm1 == double value |
@@ -2258,15 +2258,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
// Compute hash (the shifts are arithmetic): |
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
__ mov(ecx, ebx); |
- __ xor_(ecx, Operand(edx)); |
+ __ xor_(ecx, edx); |
__ mov(eax, ecx); |
__ sar(eax, 16); |
- __ xor_(ecx, Operand(eax)); |
+ __ xor_(ecx, eax); |
__ mov(eax, ecx); |
__ sar(eax, 8); |
- __ xor_(ecx, Operand(eax)); |
+ __ xor_(ecx, eax); |
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); |
- __ and_(Operand(ecx), |
+ __ and_(ecx, |
Immediate(TranscendentalCache::SubCache::kCacheSize - 1)); |
// ST[0] or xmm1 == double value. |
@@ -2281,7 +2281,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
__ mov(eax, Operand(eax, cache_array_index)); |
// Eax points to the cache for the type type_. |
// If NULL, the cache hasn't been initialized yet, so go through runtime. |
- __ test(eax, Operand(eax)); |
+ __ test(eax, eax); |
__ j(zero, &runtime_call_clear_stack); |
#ifdef DEBUG |
// Check that the layout of cache elements match expectations. |
@@ -2324,10 +2324,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); |
} else { // UNTAGGED. |
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); |
- __ sub(Operand(esp), Immediate(kDoubleSize)); |
+ __ sub(esp, Immediate(kDoubleSize)); |
__ movdbl(Operand(esp, 0), xmm1); |
__ fld_d(Operand(esp, 0)); |
- __ add(Operand(esp), Immediate(kDoubleSize)); |
+ __ add(esp, Immediate(kDoubleSize)); |
} |
GenerateOperation(masm); |
__ mov(Operand(ecx, 0), ebx); |
@@ -2342,13 +2342,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
// Skip cache and return answer directly, only in untagged case. |
__ bind(&skip_cache); |
- __ sub(Operand(esp), Immediate(kDoubleSize)); |
+ __ sub(esp, Immediate(kDoubleSize)); |
__ movdbl(Operand(esp, 0), xmm1); |
__ fld_d(Operand(esp, 0)); |
GenerateOperation(masm); |
__ fstp_d(Operand(esp, 0)); |
__ movdbl(xmm1, Operand(esp, 0)); |
- __ add(Operand(esp), Immediate(kDoubleSize)); |
+ __ add(esp, Immediate(kDoubleSize)); |
// We return the value in xmm1 without adding it to the cache, but |
// we cause a scavenging GC so that future allocations will succeed. |
{ |
@@ -2409,13 +2409,13 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { |
// If argument is outside the range -2^63..2^63, fsin/cos doesn't |
// work. We must reduce it to the appropriate range. |
__ mov(edi, edx); |
- __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. |
+ __ and_(edi, Immediate(0x7ff00000)); // Exponent only. |
int supported_exponent_limit = |
(63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; |
- __ cmp(Operand(edi), Immediate(supported_exponent_limit)); |
+ __ cmp(edi, Immediate(supported_exponent_limit)); |
__ j(below, &in_range, Label::kNear); |
// Check for infinity and NaN. Both return NaN for sin. |
- __ cmp(Operand(edi), Immediate(0x7ff00000)); |
+ __ cmp(edi, Immediate(0x7ff00000)); |
Label non_nan_result; |
__ j(not_equal, &non_nan_result, Label::kNear); |
// Input is +/-Infinity or NaN. Result is NaN. |
@@ -2424,7 +2424,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { |
__ push(Immediate(0x7ff80000)); |
__ push(Immediate(0)); |
__ fld_d(Operand(esp, 0)); |
- __ add(Operand(esp), Immediate(2 * kPointerSize)); |
+ __ add(esp, Immediate(2 * kPointerSize)); |
__ jmp(&done, Label::kNear); |
__ bind(&non_nan_result); |
@@ -2440,7 +2440,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { |
__ fwait(); |
__ fnstsw_ax(); |
// Clear if Illegal Operand or Zero Division exceptions are set. |
- __ test(Operand(eax), Immediate(5)); |
+ __ test(eax, Immediate(5)); |
__ j(zero, &no_exceptions, Label::kNear); |
__ fnclex(); |
__ bind(&no_exceptions); |
@@ -2453,7 +2453,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { |
__ fprem1(); |
__ fwait(); |
__ fnstsw_ax(); |
- __ test(Operand(eax), Immediate(0x400 /* C2 */)); |
+ __ test(eax, Immediate(0x400 /* C2 */)); |
// If C2 is set, computation only has partial result. Loop to |
// continue computation. |
__ j(not_zero, &partial_remainder_loop); |
@@ -2586,13 +2586,13 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { |
__ bind(&load_smi_edx); |
__ SmiUntag(edx); // Untag smi before converting to float. |
- __ cvtsi2sd(xmm0, Operand(edx)); |
+ __ cvtsi2sd(xmm0, edx); |
__ SmiTag(edx); // Retag smi for heap number overwriting test. |
__ jmp(&load_eax); |
__ bind(&load_smi_eax); |
__ SmiUntag(eax); // Untag smi before converting to float. |
- __ cvtsi2sd(xmm1, Operand(eax)); |
+ __ cvtsi2sd(xmm1, eax); |
__ SmiTag(eax); // Retag smi for heap number overwriting test. |
__ bind(&done); |
@@ -2616,12 +2616,12 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, |
__ jmp(not_numbers); // Argument in eax is not a number. |
__ bind(&load_smi_edx); |
__ SmiUntag(edx); // Untag smi before converting to float. |
- __ cvtsi2sd(xmm0, Operand(edx)); |
+ __ cvtsi2sd(xmm0, edx); |
__ SmiTag(edx); // Retag smi for heap number overwriting test. |
__ jmp(&load_eax); |
__ bind(&load_smi_eax); |
__ SmiUntag(eax); // Untag smi before converting to float. |
- __ cvtsi2sd(xmm1, Operand(eax)); |
+ __ cvtsi2sd(xmm1, eax); |
__ SmiTag(eax); // Retag smi for heap number overwriting test. |
__ jmp(&done, Label::kNear); |
__ bind(&load_float_eax); |
@@ -2637,11 +2637,11 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, |
__ mov(scratch, left); |
ASSERT(!scratch.is(right)); // We're about to clobber scratch. |
__ SmiUntag(scratch); |
- __ cvtsi2sd(xmm0, Operand(scratch)); |
+ __ cvtsi2sd(xmm0, scratch); |
__ mov(scratch, right); |
__ SmiUntag(scratch); |
- __ cvtsi2sd(xmm1, Operand(scratch)); |
+ __ cvtsi2sd(xmm1, scratch); |
} |
@@ -2649,12 +2649,12 @@ void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm, |
Label* non_int32, |
Register scratch) { |
__ cvttsd2si(scratch, Operand(xmm0)); |
- __ cvtsi2sd(xmm2, Operand(scratch)); |
+ __ cvtsi2sd(xmm2, scratch); |
__ ucomisd(xmm0, xmm2); |
__ j(not_zero, non_int32); |
__ j(carry, non_int32); |
__ cvttsd2si(scratch, Operand(xmm1)); |
- __ cvtsi2sd(xmm2, Operand(scratch)); |
+ __ cvtsi2sd(xmm2, scratch); |
__ ucomisd(xmm1, xmm2); |
__ j(not_zero, non_int32); |
__ j(carry, non_int32); |
@@ -2762,7 +2762,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
// Save 1 in xmm3 - we need this several times later on. |
__ mov(ecx, Immediate(1)); |
- __ cvtsi2sd(xmm3, Operand(ecx)); |
+ __ cvtsi2sd(xmm3, ecx); |
Label exponent_nonsmi; |
Label base_nonsmi; |
@@ -2773,7 +2773,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
// Optimized version when both exponent and base are smis. |
Label powi; |
__ SmiUntag(edx); |
- __ cvtsi2sd(xmm0, Operand(edx)); |
+ __ cvtsi2sd(xmm0, edx); |
__ jmp(&powi); |
// exponent is smi and base is a heapnumber. |
__ bind(&base_nonsmi); |
@@ -2815,11 +2815,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
// base has the original value of the exponent - if the exponent is |
// negative return 1/result. |
- __ test(edx, Operand(edx)); |
+ __ test(edx, edx); |
__ j(positive, &allocate_return); |
// Special case if xmm1 has reached infinity. |
__ mov(ecx, Immediate(0x7FB00000)); |
- __ movd(xmm0, Operand(ecx)); |
+ __ movd(xmm0, ecx); |
__ cvtss2sd(xmm0, xmm0); |
__ ucomisd(xmm0, xmm1); |
__ j(equal, &call_runtime); |
@@ -2842,7 +2842,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
Label handle_special_cases; |
__ JumpIfNotSmi(edx, &base_not_smi, Label::kNear); |
__ SmiUntag(edx); |
- __ cvtsi2sd(xmm0, Operand(edx)); |
+ __ cvtsi2sd(xmm0, edx); |
__ jmp(&handle_special_cases, Label::kNear); |
__ bind(&base_not_smi); |
@@ -2851,7 +2851,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
__ j(not_equal, &call_runtime); |
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); |
__ and_(ecx, HeapNumber::kExponentMask); |
- __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask)); |
+ __ cmp(ecx, Immediate(HeapNumber::kExponentMask)); |
// base is NaN or +/-Infinity |
__ j(greater_equal, &call_runtime); |
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); |
@@ -2862,7 +2862,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
// Test for -0.5. |
// Load xmm2 with -0.5. |
__ mov(ecx, Immediate(0xBF000000)); |
- __ movd(xmm2, Operand(ecx)); |
+ __ movd(xmm2, ecx); |
__ cvtss2sd(xmm2, xmm2); |
// xmm2 now has -0.5. |
__ ucomisd(xmm2, xmm1); |
@@ -2918,13 +2918,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
Label adaptor; |
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); |
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); |
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
__ j(equal, &adaptor, Label::kNear); |
// Check index against formal parameters count limit passed in |
// through register eax. Use unsigned comparison to get negative |
// check for free. |
- __ cmp(edx, Operand(eax)); |
+ __ cmp(edx, eax); |
__ j(above_equal, &slow, Label::kNear); |
// Read the argument from the stack and return it. |
@@ -2940,7 +2940,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
// comparison to get negative check for free. |
__ bind(&adaptor); |
__ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
- __ cmp(edx, Operand(ecx)); |
+ __ cmp(edx, ecx); |
__ j(above_equal, &slow, Label::kNear); |
// Read the argument from the stack and return it. |
@@ -2971,7 +2971,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { |
Label runtime; |
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); |
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); |
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
__ j(not_equal, &runtime, Label::kNear); |
// Patch the arguments.length and the parameters pointer. |
@@ -3002,7 +3002,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
Label adaptor_frame, try_allocate; |
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); |
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); |
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
__ j(equal, &adaptor_frame, Label::kNear); |
// No adaptor, parameter count = argument count. |
@@ -3021,7 +3021,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
// esp[4] = parameter count (tagged) |
// esp[8] = address of receiver argument |
// Compute the mapped parameter count = min(ebx, ecx) in ebx. |
- __ cmp(ebx, Operand(ecx)); |
+ __ cmp(ebx, ecx); |
__ j(less_equal, &try_allocate, Label::kNear); |
__ mov(ebx, ecx); |
@@ -3035,7 +3035,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
const int kParameterMapHeaderSize = |
FixedArray::kHeaderSize + 2 * kPointerSize; |
Label no_parameter_map; |
- __ test(ebx, Operand(ebx)); |
+ __ test(ebx, ebx); |
__ j(zero, &no_parameter_map, Label::kNear); |
__ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize)); |
__ bind(&no_parameter_map); |
@@ -3044,7 +3044,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
__ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); |
// 3. Arguments object. |
- __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize)); |
+ __ add(ebx, Immediate(Heap::kArgumentsObjectSize)); |
// Do the allocation of all three objects in one go. |
__ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT); |
@@ -3059,7 +3059,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); |
__ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset)); |
__ mov(ebx, Operand(esp, 0 * kPointerSize)); |
- __ test(ebx, Operand(ebx)); |
+ __ test(ebx, ebx); |
__ j(not_zero, &has_mapped_parameters, Label::kNear); |
__ mov(edi, Operand(edi, |
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX))); |
@@ -3114,7 +3114,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
// Initialize parameter map. If there are no mapped arguments, we're done. |
Label skip_parameter_map; |
- __ test(ebx, Operand(ebx)); |
+ __ test(ebx, ebx); |
__ j(zero, &skip_parameter_map); |
__ mov(FieldOperand(edi, FixedArray::kMapOffset), |
@@ -3138,7 +3138,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
__ mov(eax, Operand(esp, 2 * kPointerSize)); |
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
__ add(ebx, Operand(esp, 4 * kPointerSize)); |
- __ sub(ebx, Operand(eax)); |
+ __ sub(ebx, eax); |
__ mov(ecx, FACTORY->the_hole_value()); |
__ mov(edx, edi); |
__ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize)); |
@@ -3155,12 +3155,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
__ jmp(¶meters_test, Label::kNear); |
__ bind(¶meters_loop); |
- __ sub(Operand(eax), Immediate(Smi::FromInt(1))); |
+ __ sub(eax, Immediate(Smi::FromInt(1))); |
__ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx); |
__ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx); |
- __ add(Operand(ebx), Immediate(Smi::FromInt(1))); |
+ __ add(ebx, Immediate(Smi::FromInt(1))); |
__ bind(¶meters_test); |
- __ test(eax, Operand(eax)); |
+ __ test(eax, eax); |
__ j(not_zero, ¶meters_loop, Label::kNear); |
__ pop(ecx); |
@@ -3180,18 +3180,18 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
Label arguments_loop, arguments_test; |
__ mov(ebx, Operand(esp, 1 * kPointerSize)); |
__ mov(edx, Operand(esp, 4 * kPointerSize)); |
- __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling? |
- __ sub(Operand(edx), ebx); |
+ __ sub(edx, ebx); // Is there a smarter way to do negative scaling? |
+ __ sub(edx, ebx); |
__ jmp(&arguments_test, Label::kNear); |
__ bind(&arguments_loop); |
- __ sub(Operand(edx), Immediate(kPointerSize)); |
+ __ sub(edx, Immediate(kPointerSize)); |
__ mov(eax, Operand(edx, 0)); |
__ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax); |
- __ add(Operand(ebx), Immediate(Smi::FromInt(1))); |
+ __ add(ebx, Immediate(Smi::FromInt(1))); |
__ bind(&arguments_test); |
- __ cmp(ebx, Operand(ecx)); |
+ __ cmp(ebx, ecx); |
__ j(less, &arguments_loop, Label::kNear); |
// Restore. |
@@ -3219,7 +3219,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
Label adaptor_frame, try_allocate, runtime; |
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); |
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); |
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
__ j(equal, &adaptor_frame, Label::kNear); |
// Get the length from the frame. |
@@ -3238,11 +3238,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
// the arguments object and the elements array. |
Label add_arguments_object; |
__ bind(&try_allocate); |
- __ test(ecx, Operand(ecx)); |
+ __ test(ecx, ecx); |
__ j(zero, &add_arguments_object, Label::kNear); |
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); |
__ bind(&add_arguments_object); |
- __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict)); |
+ __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict)); |
// Do the allocation of both objects in one go. |
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); |
@@ -3269,7 +3269,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
// If there are no actual arguments, we're done. |
Label done; |
- __ test(ecx, Operand(ecx)); |
+ __ test(ecx, ecx); |
__ j(zero, &done, Label::kNear); |
// Get the parameters pointer from the stack. |
@@ -3291,8 +3291,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
__ bind(&loop); |
__ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. |
__ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); |
- __ add(Operand(edi), Immediate(kPointerSize)); |
- __ sub(Operand(edx), Immediate(kPointerSize)); |
+ __ add(edi, Immediate(kPointerSize)); |
+ __ sub(edx, Immediate(kPointerSize)); |
__ dec(ecx); |
__ j(not_zero, &loop); |
@@ -3339,7 +3339,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
ExternalReference address_of_regexp_stack_memory_size = |
ExternalReference::address_of_regexp_stack_memory_size(masm->isolate()); |
__ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); |
- __ test(ebx, Operand(ebx)); |
+ __ test(ebx, ebx); |
__ j(zero, &runtime); |
// Check that the first argument is a JSRegExp object. |
@@ -3360,7 +3360,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
// ecx: RegExp data (FixedArray) |
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. |
__ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); |
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); |
+ __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); |
__ j(not_equal, &runtime); |
// ecx: RegExp data (FixedArray) |
@@ -3370,7 +3370,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
// uses the asumption that smis are 2 * their untagged value. |
STATIC_ASSERT(kSmiTag == 0); |
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
- __ add(Operand(edx), Immediate(2)); // edx was a smi. |
+ __ add(edx, Immediate(2)); // edx was a smi. |
// Check that the static offsets vector buffer is large enough. |
__ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); |
__ j(above, &runtime); |
@@ -3392,7 +3392,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
// string length. A negative value will be greater (unsigned comparison). |
__ mov(eax, Operand(esp, kPreviousIndexOffset)); |
__ JumpIfNotSmi(eax, &runtime); |
- __ cmp(eax, Operand(ebx)); |
+ __ cmp(eax, ebx); |
__ j(above_equal, &runtime); |
// ecx: RegExp data (FixedArray) |
@@ -3412,8 +3412,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
// additional information. |
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); |
__ SmiUntag(eax); |
- __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead)); |
- __ cmp(edx, Operand(eax)); |
+ __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead)); |
+ __ cmp(edx, eax); |
__ j(greater, &runtime); |
// Reset offset for possibly sliced string. |
@@ -3430,8 +3430,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); |
__ j(zero, &seq_two_byte_string, Label::kNear); |
// Any other flat string must be a flat ascii string. |
- __ and_(Operand(ebx), |
- Immediate(kIsNotStringMask | kStringRepresentationMask)); |
+ __ and_(ebx, Immediate(kIsNotStringMask | kStringRepresentationMask)); |
__ j(zero, &seq_ascii_string, Label::kNear); |
// Check for flat cons string or sliced string. |
@@ -3443,7 +3442,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
Label cons_string, check_encoding; |
STATIC_ASSERT(kConsStringTag < kExternalStringTag); |
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); |
- __ cmp(Operand(ebx), Immediate(kExternalStringTag)); |
+ __ cmp(ebx, Immediate(kExternalStringTag)); |
__ j(less, &cons_string); |
__ j(equal, &runtime); |
@@ -3549,14 +3548,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
// Prepare start and end index of the input. |
// Load the length from the original sliced string if that is the case. |
__ mov(esi, FieldOperand(esi, String::kLengthOffset)); |
- __ add(esi, Operand(edi)); // Calculate input end wrt offset. |
+ __ add(esi, edi); // Calculate input end wrt offset. |
__ SmiUntag(edi); |
- __ add(ebx, Operand(edi)); // Calculate input start wrt offset. |
+ __ add(ebx, edi); // Calculate input start wrt offset. |
// ebx: start index of the input string |
// esi: end index of the input string |
Label setup_two_byte, setup_rest; |
- __ test(ecx, Operand(ecx)); |
+ __ test(ecx, ecx); |
__ j(zero, &setup_two_byte, Label::kNear); |
__ SmiUntag(esi); |
__ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize)); |
@@ -3576,8 +3575,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
__ bind(&setup_rest); |
// Locate the code entry and call it. |
- __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); |
- __ call(Operand(edx)); |
+ __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); |
+ __ call(edx); |
// Drop arguments and come back to JS mode. |
__ LeaveApiExitFrame(); |
@@ -3602,7 +3601,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
Operand::StaticVariable(ExternalReference::the_hole_value_location( |
masm->isolate()))); |
__ mov(eax, Operand::StaticVariable(pending_exception)); |
- __ cmp(edx, Operand(eax)); |
+ __ cmp(edx, eax); |
__ j(equal, &runtime); |
// For exception, throw the exception again. |
@@ -3623,7 +3622,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
__ bind(&failure); |
// For failure to match, return null. |
- __ mov(Operand(eax), factory->null_value()); |
+ __ mov(eax, factory->null_value()); |
__ ret(4 * kPointerSize); |
// Load RegExp data. |
@@ -3634,7 +3633,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
// Calculate number of capture registers (number_of_captures + 1) * 2. |
STATIC_ASSERT(kSmiTag == 0); |
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
- __ add(Operand(edx), Immediate(2)); // edx was a smi. |
+ __ add(edx, Immediate(2)); // edx was a smi. |
// edx: Number of capture registers |
// Load last_match_info which is still known to be a fast case JSArray. |
@@ -3675,7 +3674,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
// Capture register counter starts from number of capture registers and |
// counts down until wraping after zero. |
__ bind(&next_capture); |
- __ sub(Operand(edx), Immediate(1)); |
+ __ sub(edx, Immediate(1)); |
__ j(negative, &done, Label::kNear); |
// Read the value from the static offsets vector buffer. |
__ mov(edi, Operand(ecx, edx, times_int_size, 0)); |
@@ -3706,7 +3705,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { |
Label done; |
__ mov(ebx, Operand(esp, kPointerSize * 3)); |
__ JumpIfNotSmi(ebx, &slowcase); |
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength))); |
+ __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength))); |
__ j(above, &slowcase); |
// Smi-tagging is equivalent to multiplying by 2. |
STATIC_ASSERT(kSmiTag == 0); |
@@ -3766,10 +3765,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { |
// ebx: Start of elements in FixedArray. |
// edx: the hole. |
Label loop; |
- __ test(ecx, Operand(ecx)); |
+ __ test(ecx, ecx); |
__ bind(&loop); |
__ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero. |
- __ sub(Operand(ecx), Immediate(1)); |
+ __ sub(ecx, Immediate(1)); |
__ mov(Operand(ebx, ecx, times_pointer_size, 0), edx); |
__ jmp(&loop); |
@@ -3803,7 +3802,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
// contains two elements (number and string) for each cache entry. |
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); |
__ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. |
- __ sub(Operand(mask), Immediate(1)); // Make mask. |
+ __ sub(mask, Immediate(1)); // Make mask. |
// Calculate the entry in the number string cache. The hash value in the |
// number string cache for smis is just the smi value, and the hash for |
@@ -3829,7 +3828,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
__ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); |
__ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); |
// Object is heap number and hash is now in scratch. Calculate cache index. |
- __ and_(scratch, Operand(mask)); |
+ __ and_(scratch, mask); |
Register index = scratch; |
Register probe = mask; |
__ mov(probe, |
@@ -3855,7 +3854,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
__ bind(&smi_hash_calculated); |
// Object is smi and hash is now in scratch. Calculate cache index. |
- __ and_(scratch, Operand(mask)); |
+ __ and_(scratch, mask); |
Register index = scratch; |
// Check if the entry is the smi we are looking for. |
__ cmp(object, |
@@ -3907,10 +3906,10 @@ void CompareStub::Generate(MacroAssembler* masm) { |
// Compare two smis if required. |
if (include_smi_compare_) { |
Label non_smi, smi_done; |
- __ mov(ecx, Operand(edx)); |
- __ or_(ecx, Operand(eax)); |
+ __ mov(ecx, edx); |
+ __ or_(ecx, eax); |
__ JumpIfNotSmi(ecx, &non_smi, Label::kNear); |
- __ sub(edx, Operand(eax)); // Return on the result of the subtraction. |
+ __ sub(edx, eax); // Return on the result of the subtraction. |
__ j(no_overflow, &smi_done, Label::kNear); |
__ not_(edx); // Correct sign in case of overflow. edx is never 0 here. |
__ bind(&smi_done); |
@@ -3918,8 +3917,8 @@ void CompareStub::Generate(MacroAssembler* masm) { |
__ ret(0); |
__ bind(&non_smi); |
} else if (FLAG_debug_code) { |
- __ mov(ecx, Operand(edx)); |
- __ or_(ecx, Operand(eax)); |
+ __ mov(ecx, edx); |
+ __ or_(ecx, eax); |
__ test(ecx, Immediate(kSmiTagMask)); |
__ Assert(not_zero, "Unexpected smi operands."); |
} |
@@ -3931,7 +3930,7 @@ void CompareStub::Generate(MacroAssembler* masm) { |
// for NaN and undefined. |
{ |
Label not_identical; |
- __ cmp(eax, Operand(edx)); |
+ __ cmp(eax, edx); |
__ j(not_equal, ¬_identical); |
if (cc_ != equal) { |
@@ -3980,7 +3979,7 @@ void CompareStub::Generate(MacroAssembler* masm) { |
__ Set(eax, Immediate(0)); |
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost |
// bits. |
- __ add(edx, Operand(edx)); |
+ __ add(edx, edx); |
__ cmp(edx, kQuietNaNHighBitsMask << 1); |
if (cc_ == equal) { |
STATIC_ASSERT(EQUAL != 1); |
@@ -4014,19 +4013,19 @@ void CompareStub::Generate(MacroAssembler* masm) { |
STATIC_ASSERT(kSmiTag == 0); |
ASSERT_EQ(0, Smi::FromInt(0)); |
__ mov(ecx, Immediate(kSmiTagMask)); |
- __ and_(ecx, Operand(eax)); |
- __ test(ecx, Operand(edx)); |
+ __ and_(ecx, eax); |
+ __ test(ecx, edx); |
__ j(not_zero, ¬_smis, Label::kNear); |
// One operand is a smi. |
// Check whether the non-smi is a heap number. |
STATIC_ASSERT(kSmiTagMask == 1); |
// ecx still holds eax & kSmiTag, which is either zero or one. |
- __ sub(Operand(ecx), Immediate(0x01)); |
+ __ sub(ecx, Immediate(0x01)); |
__ mov(ebx, edx); |
- __ xor_(ebx, Operand(eax)); |
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. |
- __ xor_(ebx, Operand(eax)); |
+ __ xor_(ebx, eax); |
+ __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx. |
+ __ xor_(ebx, eax); |
// if eax was smi, ebx is now edx, else eax. |
// Check if the non-smi operand is a heap number. |
@@ -4088,9 +4087,9 @@ void CompareStub::Generate(MacroAssembler* masm) { |
// Return a result of -1, 0, or 1, based on EFLAGS. |
__ mov(eax, 0); // equal |
__ mov(ecx, Immediate(Smi::FromInt(1))); |
- __ cmov(above, eax, Operand(ecx)); |
+ __ cmov(above, eax, ecx); |
__ mov(ecx, Immediate(Smi::FromInt(-1))); |
- __ cmov(below, eax, Operand(ecx)); |
+ __ cmov(below, eax, ecx); |
__ ret(0); |
} else { |
FloatingPointHelper::CheckFloatOperands( |
@@ -4316,14 +4315,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { |
// A monomorphic cache hit or an already megamorphic state: invoke the |
// function without changing the state. |
- __ cmp(ecx, Operand(edi)); |
+ __ cmp(ecx, edi); |
__ j(equal, &call, Label::kNear); |
- __ cmp(Operand(ecx), Immediate(MegamorphicSentinel(isolate))); |
+ __ cmp(ecx, Immediate(MegamorphicSentinel(isolate))); |
__ j(equal, &call, Label::kNear); |
// A monomorphic miss (i.e, here the cache is not uninitialized) goes |
// megamorphic. |
- __ cmp(Operand(ecx), Immediate(UninitializedSentinel(isolate))); |
+ __ cmp(ecx, Immediate(UninitializedSentinel(isolate))); |
__ j(equal, &initialize, Label::kNear); |
// MegamorphicSentinel is a root so no write-barrier is needed. |
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), |
@@ -4483,7 +4482,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, |
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv. |
__ mov(Operand(esp, 2 * kPointerSize), |
Immediate(ExternalReference::isolate_address())); |
- __ call(Operand(ebx)); |
+ __ call(ebx); |
// Result is in eax or edx:eax - do not destroy these registers! |
if (always_allocate_scope) { |
@@ -4638,7 +4637,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
// Setup frame. |
__ push(ebp); |
- __ mov(ebp, Operand(esp)); |
+ __ mov(ebp, esp); |
// Push marker in two places. |
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
@@ -4706,7 +4705,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
} |
__ mov(edx, Operand(edx, 0)); // deref address |
__ lea(edx, FieldOperand(edx, Code::kHeaderSize)); |
- __ call(Operand(edx)); |
+ __ call(edx); |
// Unlink this frame from the handler chain. |
__ PopTryHandler(); |
@@ -4714,8 +4713,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
__ bind(&exit); |
// Check if the current stack frame is marked as the outermost JS frame. |
__ pop(ebx); |
- __ cmp(Operand(ebx), |
- Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
+ __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
__ j(not_equal, ¬_outermost_js_2); |
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); |
__ bind(¬_outermost_js_2); |
@@ -4729,7 +4727,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
__ pop(ebx); |
__ pop(esi); |
__ pop(edi); |
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers |
+ __ add(esp, Immediate(2 * kPointerSize)); // remove markers |
// Restore frame pointer and return. |
__ pop(ebp); |
@@ -4845,10 +4843,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { |
__ mov(scratch, FieldOperand(map, Map::kPrototypeOffset)); |
Label loop, is_instance, is_not_instance; |
__ bind(&loop); |
- __ cmp(scratch, Operand(prototype)); |
+ __ cmp(scratch, prototype); |
__ j(equal, &is_instance, Label::kNear); |
Factory* factory = masm->isolate()->factory(); |
- __ cmp(Operand(scratch), Immediate(factory->null_value())); |
+ __ cmp(scratch, Immediate(factory->null_value())); |
__ j(equal, &is_not_instance, Label::kNear); |
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); |
__ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset)); |
@@ -4946,7 +4944,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { |
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); |
} |
Label true_value, done; |
- __ test(eax, Operand(eax)); |
+ __ test(eax, eax); |
__ j(zero, &true_value, Label::kNear); |
__ mov(eax, factory->false_value()); |
__ jmp(&done, Label::kNear); |
@@ -5262,7 +5260,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
Label second_not_zero_length, both_not_zero_length; |
__ mov(ecx, FieldOperand(edx, String::kLengthOffset)); |
STATIC_ASSERT(kSmiTag == 0); |
- __ test(ecx, Operand(ecx)); |
+ __ test(ecx, ecx); |
__ j(not_zero, &second_not_zero_length, Label::kNear); |
// Second string is empty, result is first string which is already in eax. |
Counters* counters = masm->isolate()->counters(); |
@@ -5271,7 +5269,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
__ bind(&second_not_zero_length); |
__ mov(ebx, FieldOperand(eax, String::kLengthOffset)); |
STATIC_ASSERT(kSmiTag == 0); |
- __ test(ebx, Operand(ebx)); |
+ __ test(ebx, ebx); |
__ j(not_zero, &both_not_zero_length, Label::kNear); |
// First string is empty, result is second string which is in edx. |
__ mov(eax, edx); |
@@ -5286,13 +5284,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
// Look at the length of the result of adding the two strings. |
Label string_add_flat_result, longer_than_two; |
__ bind(&both_not_zero_length); |
- __ add(ebx, Operand(ecx)); |
+ __ add(ebx, ecx); |
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); |
// Handle exceptionally long strings in the runtime system. |
__ j(overflow, &string_add_runtime); |
// Use the symbol table when adding two one character strings, as it |
// helps later optimizations to return a symbol here. |
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(2))); |
+ __ cmp(ebx, Immediate(Smi::FromInt(2))); |
__ j(not_equal, &longer_than_two); |
// Check that both strings are non-external ascii strings. |
@@ -5329,7 +5327,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
&string_add_runtime); |
// Pack both characters in ebx. |
__ shl(ecx, kBitsPerByte); |
- __ or_(ebx, Operand(ecx)); |
+ __ or_(ebx, ecx); |
// Set the characters in the new string. |
__ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx); |
__ IncrementCounter(counters->string_add_native(), 1); |
@@ -5337,7 +5335,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
__ bind(&longer_than_two); |
// Check if resulting string will be flat. |
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength))); |
+ __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength))); |
__ j(below, &string_add_flat_result); |
// If result is not supposed to be flat allocate a cons string object. If both |
@@ -5347,7 +5345,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
__ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); |
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); |
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); |
- __ and_(ecx, Operand(edi)); |
+ __ and_(ecx, edi); |
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); |
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); |
__ test(ecx, Immediate(kStringEncodingMask)); |
@@ -5375,7 +5373,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
__ j(not_zero, &ascii_data); |
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); |
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); |
- __ xor_(edi, Operand(ecx)); |
+ __ xor_(edi, ecx); |
STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); |
__ and_(edi, kAsciiStringTag | kAsciiDataHintTag); |
__ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); |
@@ -5423,12 +5421,12 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
// eax: result string |
__ mov(ecx, eax); |
// Locate first character of result. |
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
+ __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
// Load first argument and locate first character. |
__ mov(edx, Operand(esp, 2 * kPointerSize)); |
__ mov(edi, FieldOperand(edx, String::kLengthOffset)); |
__ SmiUntag(edi); |
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
// eax: result string |
// ecx: first character of result |
// edx: first char of first argument |
@@ -5438,7 +5436,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
__ mov(edx, Operand(esp, 1 * kPointerSize)); |
__ mov(edi, FieldOperand(edx, String::kLengthOffset)); |
__ SmiUntag(edi); |
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
// eax: result string |
// ecx: next character of result |
// edx: first char of second argument |
@@ -5462,13 +5460,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
// eax: result string |
__ mov(ecx, eax); |
// Locate first character of result. |
- __ add(Operand(ecx), |
+ __ add(ecx, |
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
// Load first argument and locate first character. |
__ mov(edx, Operand(esp, 2 * kPointerSize)); |
__ mov(edi, FieldOperand(edx, String::kLengthOffset)); |
__ SmiUntag(edi); |
- __ add(Operand(edx), |
+ __ add(edx, |
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
// eax: result string |
// ecx: first character of result |
@@ -5479,7 +5477,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
__ mov(edx, Operand(esp, 1 * kPointerSize)); |
__ mov(edi, FieldOperand(edx, String::kLengthOffset)); |
__ SmiUntag(edi); |
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
// eax: result string |
// ecx: next character of result |
// edx: first char of second argument |
@@ -5555,15 +5553,15 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, |
if (ascii) { |
__ mov_b(scratch, Operand(src, 0)); |
__ mov_b(Operand(dest, 0), scratch); |
- __ add(Operand(src), Immediate(1)); |
- __ add(Operand(dest), Immediate(1)); |
+ __ add(src, Immediate(1)); |
+ __ add(dest, Immediate(1)); |
} else { |
__ mov_w(scratch, Operand(src, 0)); |
__ mov_w(Operand(dest, 0), scratch); |
- __ add(Operand(src), Immediate(2)); |
- __ add(Operand(dest), Immediate(2)); |
+ __ add(src, Immediate(2)); |
+ __ add(dest, Immediate(2)); |
} |
- __ sub(Operand(count), Immediate(1)); |
+ __ sub(count, Immediate(1)); |
__ j(not_zero, &loop); |
} |
@@ -5586,7 +5584,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, |
// Nothing to do for zero characters. |
Label done; |
- __ test(count, Operand(count)); |
+ __ test(count, count); |
__ j(zero, &done); |
// Make count the number of bytes to copy. |
@@ -5611,7 +5609,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, |
// Check if there are more bytes to copy. |
__ bind(&last_bytes); |
- __ test(count, Operand(count)); |
+ __ test(count, count); |
__ j(zero, &done); |
// Copy remaining characters. |
@@ -5619,9 +5617,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, |
__ bind(&loop); |
__ mov_b(scratch, Operand(src, 0)); |
__ mov_b(Operand(dest, 0), scratch); |
- __ add(Operand(src), Immediate(1)); |
- __ add(Operand(dest), Immediate(1)); |
- __ sub(Operand(count), Immediate(1)); |
+ __ add(src, Immediate(1)); |
+ __ add(dest, Immediate(1)); |
+ __ sub(count, Immediate(1)); |
__ j(not_zero, &loop); |
__ bind(&done); |
@@ -5643,12 +5641,12 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, |
// different hash algorithm. Don't try to look for these in the symbol table. |
Label not_array_index; |
__ mov(scratch, c1); |
- __ sub(Operand(scratch), Immediate(static_cast<int>('0'))); |
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0'))); |
+ __ sub(scratch, Immediate(static_cast<int>('0'))); |
+ __ cmp(scratch, Immediate(static_cast<int>('9' - '0'))); |
__ j(above, ¬_array_index, Label::kNear); |
__ mov(scratch, c2); |
- __ sub(Operand(scratch), Immediate(static_cast<int>('0'))); |
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0'))); |
+ __ sub(scratch, Immediate(static_cast<int>('0'))); |
+ __ cmp(scratch, Immediate(static_cast<int>('9' - '0'))); |
__ j(below_equal, not_probed); |
__ bind(¬_array_index); |
@@ -5661,7 +5659,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, |
// Collect the two characters in a register. |
Register chars = c1; |
__ shl(c2, kBitsPerByte); |
- __ or_(chars, Operand(c2)); |
+ __ or_(chars, c2); |
// chars: two character string, char 1 in byte 0 and char 2 in byte 1. |
// hash: hash of two character string. |
@@ -5678,7 +5676,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, |
Register mask = scratch2; |
__ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); |
__ SmiUntag(mask); |
- __ sub(Operand(mask), Immediate(1)); |
+ __ sub(mask, Immediate(1)); |
// Registers |
// chars: two character string, char 1 in byte 0 and char 2 in byte 1. |
@@ -5695,9 +5693,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, |
// Calculate entry in symbol table. |
__ mov(scratch, hash); |
if (i > 0) { |
- __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i))); |
+ __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i))); |
} |
- __ and_(scratch, Operand(mask)); |
+ __ and_(scratch, mask); |
// Load the entry from the symbol table. |
Register candidate = scratch; // Scratch register contains candidate. |
@@ -5734,7 +5732,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, |
// Check if the two characters match. |
__ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); |
__ and_(temp, 0x0000ffff); |
- __ cmp(chars, Operand(temp)); |
+ __ cmp(chars, temp); |
__ j(equal, &found_in_symbol_table); |
__ bind(&next_probe_pop_mask[i]); |
__ pop(mask); |
@@ -5761,11 +5759,11 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, |
// hash = character + (character << 10); |
__ mov(hash, character); |
__ shl(hash, 10); |
- __ add(hash, Operand(character)); |
+ __ add(hash, character); |
// hash ^= hash >> 6; |
__ mov(scratch, hash); |
__ sar(scratch, 6); |
- __ xor_(hash, Operand(scratch)); |
+ __ xor_(hash, scratch); |
} |
@@ -5774,15 +5772,15 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, |
Register character, |
Register scratch) { |
// hash += character; |
- __ add(hash, Operand(character)); |
+ __ add(hash, character); |
// hash += hash << 10; |
__ mov(scratch, hash); |
__ shl(scratch, 10); |
- __ add(hash, Operand(scratch)); |
+ __ add(hash, scratch); |
// hash ^= hash >> 6; |
__ mov(scratch, hash); |
__ sar(scratch, 6); |
- __ xor_(hash, Operand(scratch)); |
+ __ xor_(hash, scratch); |
} |
@@ -5792,19 +5790,19 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm, |
// hash += hash << 3; |
__ mov(scratch, hash); |
__ shl(scratch, 3); |
- __ add(hash, Operand(scratch)); |
+ __ add(hash, scratch); |
// hash ^= hash >> 11; |
__ mov(scratch, hash); |
__ sar(scratch, 11); |
- __ xor_(hash, Operand(scratch)); |
+ __ xor_(hash, scratch); |
// hash += hash << 15; |
__ mov(scratch, hash); |
__ shl(scratch, 15); |
- __ add(hash, Operand(scratch)); |
+ __ add(hash, scratch); |
// if (hash == 0) hash = 27; |
Label hash_not_zero; |
- __ test(hash, Operand(hash)); |
+ __ test(hash, hash); |
__ j(not_zero, &hash_not_zero, Label::kNear); |
__ mov(hash, Immediate(27)); |
__ bind(&hash_not_zero); |
@@ -5836,7 +5834,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { |
__ JumpIfNotSmi(ecx, &runtime); |
__ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. |
__ JumpIfNotSmi(edx, &runtime); |
- __ sub(ecx, Operand(edx)); |
+ __ sub(ecx, edx); |
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); |
Label return_eax; |
__ j(equal, &return_eax); |
@@ -5968,13 +5966,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { |
__ mov(edx, esi); // esi used by following code. |
// Locate first character of result. |
__ mov(edi, eax); |
- __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
+ __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
// Load string argument and locate character of sub string start. |
__ mov(esi, Operand(esp, 3 * kPointerSize)); |
- __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
+ __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from |
__ SmiUntag(ebx); |
- __ add(esi, Operand(ebx)); |
+ __ add(esi, ebx); |
// eax: result string |
// ecx: result length |
@@ -6003,18 +6001,17 @@ void SubStringStub::Generate(MacroAssembler* masm) { |
__ mov(edx, esi); // esi used by following code. |
// Locate first character of result. |
__ mov(edi, eax); |
- __ add(Operand(edi), |
+ __ add(edi, |
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
// Load string argument and locate character of sub string start. |
__ mov(esi, Operand(esp, 3 * kPointerSize)); |
- __ add(Operand(esi), |
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
+ __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from |
// As from is a smi it is 2 times the value which matches the size of a two |
// byte character. |
STATIC_ASSERT(kSmiTag == 0); |
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
- __ add(esi, Operand(ebx)); |
+ __ add(esi, ebx); |
// eax: result string |
// ecx: result length |
@@ -6054,7 +6051,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, |
Label compare_chars; |
__ bind(&check_zero_length); |
STATIC_ASSERT(kSmiTag == 0); |
- __ test(length, Operand(length)); |
+ __ test(length, length); |
__ j(not_zero, &compare_chars, Label::kNear); |
__ Set(eax, Immediate(Smi::FromInt(EQUAL))); |
__ ret(0); |
@@ -6089,14 +6086,14 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
__ j(less_equal, &left_shorter, Label::kNear); |
// Right string is shorter. Change scratch1 to be length of right string. |
- __ sub(scratch1, Operand(length_delta)); |
+ __ sub(scratch1, length_delta); |
__ bind(&left_shorter); |
Register min_length = scratch1; |
// If either length is zero, just compare lengths. |
Label compare_lengths; |
- __ test(min_length, Operand(min_length)); |
+ __ test(min_length, min_length); |
__ j(zero, &compare_lengths, Label::kNear); |
// Compare characters. |
@@ -6106,7 +6103,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
// Compare lengths - strings up to min-length are equal. |
__ bind(&compare_lengths); |
- __ test(length_delta, Operand(length_delta)); |
+ __ test(length_delta, length_delta); |
__ j(not_zero, &result_not_equal, Label::kNear); |
// Result is EQUAL. |
@@ -6155,7 +6152,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( |
__ mov_b(scratch, Operand(left, index, times_1, 0)); |
__ cmpb(scratch, Operand(right, index, times_1, 0)); |
__ j(not_equal, chars_not_equal, chars_not_equal_near); |
- __ add(Operand(index), Immediate(1)); |
+ __ add(index, Immediate(1)); |
__ j(not_zero, &loop); |
} |
@@ -6172,7 +6169,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { |
__ mov(eax, Operand(esp, 1 * kPointerSize)); // right |
Label not_same; |
- __ cmp(edx, Operand(eax)); |
+ __ cmp(edx, eax); |
__ j(not_equal, ¬_same, Label::kNear); |
STATIC_ASSERT(EQUAL == 0); |
STATIC_ASSERT(kSmiTag == 0); |
@@ -6188,7 +6185,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { |
// Compare flat ascii strings. |
// Drop arguments from the stack. |
__ pop(ecx); |
- __ add(Operand(esp), Immediate(2 * kPointerSize)); |
+ __ add(esp, Immediate(2 * kPointerSize)); |
__ push(ecx); |
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); |
@@ -6202,16 +6199,16 @@ void StringCompareStub::Generate(MacroAssembler* masm) { |
void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
ASSERT(state_ == CompareIC::SMIS); |
Label miss; |
- __ mov(ecx, Operand(edx)); |
- __ or_(ecx, Operand(eax)); |
+ __ mov(ecx, edx); |
+ __ or_(ecx, eax); |
__ JumpIfNotSmi(ecx, &miss, Label::kNear); |
if (GetCondition() == equal) { |
// For equality we do not care about the sign of the result. |
- __ sub(eax, Operand(edx)); |
+ __ sub(eax, edx); |
} else { |
Label done; |
- __ sub(edx, Operand(eax)); |
+ __ sub(edx, eax); |
__ j(no_overflow, &done, Label::kNear); |
// Correct sign of result in case of overflow. |
__ not_(edx); |
@@ -6231,8 +6228,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
Label generic_stub; |
Label unordered; |
Label miss; |
- __ mov(ecx, Operand(edx)); |
- __ and_(ecx, Operand(eax)); |
+ __ mov(ecx, edx); |
+ __ and_(ecx, eax); |
__ JumpIfSmi(ecx, &generic_stub, Label::kNear); |
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); |
@@ -6260,9 +6257,9 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
// Performing mov, because xor would destroy the flag register. |
__ mov(eax, 0); // equal |
__ mov(ecx, Immediate(Smi::FromInt(1))); |
- __ cmov(above, eax, Operand(ecx)); |
+ __ cmov(above, eax, ecx); |
__ mov(ecx, Immediate(Smi::FromInt(-1))); |
- __ cmov(below, eax, Operand(ecx)); |
+ __ cmov(below, eax, ecx); |
__ ret(0); |
__ bind(&unordered); |
@@ -6289,9 +6286,9 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { |
// Check that both operands are heap objects. |
Label miss; |
- __ mov(tmp1, Operand(left)); |
+ __ mov(tmp1, left); |
STATIC_ASSERT(kSmiTag == 0); |
- __ and_(tmp1, Operand(right)); |
+ __ and_(tmp1, right); |
__ JumpIfSmi(tmp1, &miss, Label::kNear); |
// Check that both operands are symbols. |
@@ -6300,13 +6297,13 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { |
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); |
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); |
STATIC_ASSERT(kSymbolTag != 0); |
- __ and_(tmp1, Operand(tmp2)); |
+ __ and_(tmp1, tmp2); |
__ test(tmp1, Immediate(kIsSymbolMask)); |
__ j(zero, &miss, Label::kNear); |
// Symbols are compared by identity. |
Label done; |
- __ cmp(left, Operand(right)); |
+ __ cmp(left, right); |
// Make sure eax is non-zero. At this point input operands are |
// guaranteed to be non-zero. |
ASSERT(right.is(eax)); |
@@ -6335,9 +6332,9 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
Register tmp3 = edi; |
// Check that both operands are heap objects. |
- __ mov(tmp1, Operand(left)); |
+ __ mov(tmp1, left); |
STATIC_ASSERT(kSmiTag == 0); |
- __ and_(tmp1, Operand(right)); |
+ __ and_(tmp1, right); |
__ JumpIfSmi(tmp1, &miss); |
// Check that both operands are strings. This leaves the instance |
@@ -6348,13 +6345,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); |
__ mov(tmp3, tmp1); |
STATIC_ASSERT(kNotStringTag != 0); |
- __ or_(tmp3, Operand(tmp2)); |
+ __ or_(tmp3, tmp2); |
__ test(tmp3, Immediate(kIsNotStringMask)); |
__ j(not_zero, &miss); |
// Fast check for identical strings. |
Label not_same; |
- __ cmp(left, Operand(right)); |
+ __ cmp(left, right); |
__ j(not_equal, ¬_same, Label::kNear); |
STATIC_ASSERT(EQUAL == 0); |
STATIC_ASSERT(kSmiTag == 0); |
@@ -6368,7 +6365,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
// because we already know they are not identical. |
Label do_compare; |
STATIC_ASSERT(kSymbolTag != 0); |
- __ and_(tmp1, Operand(tmp2)); |
+ __ and_(tmp1, tmp2); |
__ test(tmp1, Immediate(kIsSymbolMask)); |
__ j(zero, &do_compare, Label::kNear); |
// Make sure eax is non-zero. At this point input operands are |
@@ -6401,8 +6398,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
ASSERT(state_ == CompareIC::OBJECTS); |
Label miss; |
- __ mov(ecx, Operand(edx)); |
- __ and_(ecx, Operand(eax)); |
+ __ mov(ecx, edx); |
+ __ and_(ecx, eax); |
__ JumpIfSmi(ecx, &miss, Label::kNear); |
__ CmpObjectType(eax, JS_OBJECT_TYPE, ecx); |
@@ -6411,7 +6408,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
__ j(not_equal, &miss, Label::kNear); |
ASSERT(GetCondition() == equal); |
- __ sub(eax, Operand(edx)); |
+ __ sub(eax, edx); |
__ ret(0); |
__ bind(&miss); |
@@ -6447,7 +6444,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { |
__ push(ecx); |
// Do a tail call to the rewritten stub. |
- __ jmp(Operand(edi)); |
+ __ jmp(edi); |
} |
@@ -6476,8 +6473,8 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( |
// Capacity is smi 2^n. |
__ mov(index, FieldOperand(properties, kCapacityOffset)); |
__ dec(index); |
- __ and_(Operand(index), |
- Immediate(Smi::FromInt(name->Hash() + |
+ __ and_(index, |
+ Immediate(Smi::FromInt(name->Hash() + |
StringDictionary::GetProbeOffset(i)))); |
// Scale the index by multiplying by the entry size. |
@@ -6510,7 +6507,7 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( |
__ push(Immediate(name->Hash())); |
MaybeObject* result = masm->TryCallStub(&stub); |
if (result->IsFailure()) return result; |
- __ test(r0, Operand(r0)); |
+ __ test(r0, r0); |
__ j(not_zero, miss); |
__ jmp(done); |
return result; |
@@ -6543,9 +6540,9 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, |
__ mov(r0, FieldOperand(name, String::kHashFieldOffset)); |
__ shr(r0, String::kHashShift); |
if (i > 0) { |
- __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i))); |
+ __ add(r0, Immediate(StringDictionary::GetProbeOffset(i))); |
} |
- __ and_(r0, Operand(r1)); |
+ __ and_(r0, r1); |
// Scale the index by multiplying by the entry size. |
ASSERT(StringDictionary::kEntrySize == 3); |
@@ -6569,7 +6566,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, |
__ push(r0); |
__ CallStub(&stub); |
- __ test(r1, Operand(r1)); |
+ __ test(r1, r1); |
__ j(zero, miss); |
__ jmp(done); |
} |
@@ -6608,8 +6605,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { |
// Compute the masked index: (hash + i + i * i) & mask. |
__ mov(scratch, Operand(esp, 2 * kPointerSize)); |
if (i > 0) { |
- __ add(Operand(scratch), |
- Immediate(StringDictionary::GetProbeOffset(i))); |
+ __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i))); |
} |
__ and_(scratch, Operand(esp, 0)); |