Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(69)

Unified Diff: src/arm/code-stubs-arm.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/arm/code-stubs-arm.cc
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 6b807b27e41b25d86270b6b30c44c5f33627d008..a728d58fbfb58490dbd371f4829df1223dc6ed7e 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -377,7 +377,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- ASSERT(param_count == 0 ||
+ DCHECK(param_count == 0 ||
r0.is(descriptor->GetEnvironmentParameterRegister(
param_count - 1)));
// Push arguments
@@ -491,7 +491,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
- ASSERT(is_truncating());
+ DCHECK(is_truncating());
int double_offset = offset();
// Account for saved regs if input is sp.
@@ -623,7 +623,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
__ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
__ str(scratch_, FieldMemOperand(the_heap_number_,
@@ -754,7 +754,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* lhs_not_nan,
Label* slow,
bool strict) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ DCHECK((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
Label rhs_is_smi;
@@ -816,7 +816,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ DCHECK((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
// If either operand is a JS object or an oddball value, then they are
@@ -862,7 +862,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
Label* both_loaded_as_doubles,
Label* not_heap_numbers,
Label* slow) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ DCHECK((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
__ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
@@ -885,7 +885,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register rhs,
Label* possible_strings,
Label* not_both_strings) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ DCHECK((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
// r2 is object type of rhs.
@@ -975,7 +975,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(0, Smi::FromInt(0));
__ and_(r2, lhs, Operand(rhs));
__ JumpIfNotSmi(r2, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1087,7 +1087,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc == gt || cc == ge); // remaining cases
+ DCHECK(cc == gt || cc == ge); // remaining cases
ncr = LESS;
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
@@ -1305,7 +1305,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- ASSERT(heapnumber.is(r0));
+ DCHECK(heapnumber.is(r0));
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret(2);
} else {
@@ -1405,7 +1405,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(IsPowerOf2(frame_alignment));
__ tst(sp, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -1681,7 +1681,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+ DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// Fixed register usage throughout the stub:
const Register object = r0; // Object (lhs).
@@ -1728,7 +1728,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
} else {
- ASSERT(HasArgsInRegisters());
+ DCHECK(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
// The map_load_offset was stored in r5
@@ -2697,9 +2697,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
- ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into r4.
@@ -3166,7 +3166,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
@@ -3206,7 +3206,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+ DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
__ tst(code_,
Operand(kSmiTagMask |
((~String::kMaxOneByteCharCode) << kSmiTagSize)));
@@ -3613,7 +3613,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
// Use length_delta as result if it's zero.
__ mov(r0, Operand(length_delta), SetCC);
__ bind(&result_not_equal);
@@ -3725,7 +3725,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
+ DCHECK(state_ == CompareIC::SMI);
Label miss;
__ orr(r2, r1, r0);
__ JumpIfNotSmi(r2, &miss);
@@ -3746,7 +3746,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
+ DCHECK(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
@@ -3823,7 +3823,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -3849,7 +3849,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ cmp(left, right);
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
- ASSERT(right.is(r0));
+ DCHECK(right.is(r0));
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
@@ -3861,8 +3861,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASSERT(GetCondition() == eq);
+ DCHECK(state_ == CompareIC::UNIQUE_NAME);
+ DCHECK(GetCondition() == eq);
Label miss;
// Registers containing left and right operands respectively.
@@ -3888,7 +3888,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ cmp(left, right);
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
- ASSERT(right.is(r0));
+ DCHECK(right.is(r0));
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
@@ -3900,7 +3900,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
+ DCHECK(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -3940,13 +3940,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// because we already know they are not identical. We know they are both
// strings.
if (equality) {
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
STATIC_ASSERT(kInternalizedTag == 0);
__ orr(tmp3, tmp1, Operand(tmp2));
__ tst(tmp3, Operand(kIsNotInternalizedMask));
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
- ASSERT(right.is(r0));
+ DCHECK(right.is(r0));
__ Ret(eq);
}
@@ -3979,7 +3979,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
+ DCHECK(state_ == CompareIC::OBJECT);
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -3989,7 +3989,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
- ASSERT(GetCondition() == eq);
+ DCHECK(GetCondition() == eq);
__ sub(r0, r0, Operand(r1));
__ Ret();
@@ -4068,7 +4068,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register properties,
Handle<Name> name,
Register scratch0) {
- ASSERT(name->IsUniqueName());
+ DCHECK(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
@@ -4085,17 +4085,17 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
+ DCHECK_EQ(kSmiTagSize, 1);
Register tmp = properties;
__ add(tmp, properties, Operand(index, LSL, 1));
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
- ASSERT(!tmp.is(entity_name));
+ DCHECK(!tmp.is(entity_name));
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ cmp(entity_name, tmp);
__ b(eq, done);
@@ -4151,10 +4151,10 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Register name,
Register scratch1,
Register scratch2) {
- ASSERT(!elements.is(scratch1));
- ASSERT(!elements.is(scratch2));
- ASSERT(!name.is(scratch1));
- ASSERT(!name.is(scratch2));
+ DCHECK(!elements.is(scratch1));
+ DCHECK(!elements.is(scratch2));
+ DCHECK(!name.is(scratch1));
+ DCHECK(!name.is(scratch2));
__ AssertName(name);
@@ -4173,7 +4173,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
+ DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ add(scratch2, scratch2, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
@@ -4181,7 +4181,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
@@ -4199,7 +4199,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ stm(db_w, sp, spill_mask);
if (name.is(r0)) {
- ASSERT(!elements.is(r1));
+ DCHECK(!elements.is(r1));
__ Move(r1, name);
__ Move(r0, elements);
} else {
@@ -4255,7 +4255,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
+ DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ add(index, hash, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
@@ -4265,10 +4265,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ and_(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
+ DCHECK(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
- ASSERT_EQ(kSmiTagSize, 1);
+ DCHECK_EQ(kSmiTagSize, 1);
__ add(index, dictionary, Operand(index, LSL, 2));
__ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
@@ -4356,8 +4356,8 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+ DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+ DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
PatchBranchIntoNop(masm, 0);
PatchBranchIntoNop(masm, Assembler::kInstrSize);
}
@@ -4409,8 +4409,8 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
__ PrepareCallCFunction(argument_count, regs_.scratch0());
Register address =
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(r0));
+ DCHECK(!address.is(regs_.object()));
+ DCHECK(!address.is(r0));
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
__ Move(r1, address);
@@ -4616,7 +4616,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// We also save lr, so the count here is one higher than the mask indicates.
const int32_t kNumSavedRegs = 7;
- ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved);
+ DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
// Save all caller-save registers as this may be called from anywhere.
__ stm(db_w, sp, kSavedRegs | lr.bit());
@@ -4632,7 +4632,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(r5, sp);
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(IsPowerOf2(frame_alignment));
__ and_(sp, sp, Operand(-frame_alignment));
}
@@ -4696,12 +4696,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// sp[0] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ DCHECK(FAST_SMI_ELEMENTS == 0);
+ DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+ DCHECK(FAST_ELEMENTS == 2);
+ DCHECK(FAST_HOLEY_ELEMENTS == 3);
+ DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+ DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ tst(r3, Operand(1));
@@ -5020,7 +5020,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
+ DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
// r0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698