Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1320)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 148593004: A64: Synchronize with r18084. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
91 info()->CommitDependencies(code); 91 info()->CommitDependencies(code);
92 } 92 }
93 93
94 94
95 void LCodeGen::Abort(BailoutReason reason) { 95 void LCodeGen::Abort(BailoutReason reason) {
96 info()->set_bailout_reason(reason); 96 info()->set_bailout_reason(reason);
97 status_ = ABORTED; 97 status_ = ABORTED;
98 } 98 }
99 99
100 100
101 void LCodeGen::SaveCallerDoubles() {
102 ASSERT(info()->saves_caller_doubles());
103 ASSERT(NeedsEagerFrame());
104 Comment(";;; Save clobbered callee double registers");
105 int count = 0;
106 BitVector* doubles = chunk()->allocated_double_registers();
107 BitVector::Iterator save_iterator(doubles);
108 while (!save_iterator.Done()) {
109 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
110 MemOperand(sp, count * kDoubleSize));
111 save_iterator.Advance();
112 count++;
113 }
114 }
115
116
117 void LCodeGen::RestoreCallerDoubles() {
118 ASSERT(info()->saves_caller_doubles());
119 ASSERT(NeedsEagerFrame());
120 Comment(";;; Restore clobbered callee double registers");
121 BitVector* doubles = chunk()->allocated_double_registers();
122 BitVector::Iterator save_iterator(doubles);
123 int count = 0;
124 while (!save_iterator.Done()) {
125 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
126 MemOperand(sp, count * kDoubleSize));
127 save_iterator.Advance();
128 count++;
129 }
130 }
131
132
101 bool LCodeGen::GeneratePrologue() { 133 bool LCodeGen::GeneratePrologue() {
102 ASSERT(is_generating()); 134 ASSERT(is_generating());
103 135
104 if (info()->IsOptimizing()) { 136 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 137 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
106 138
107 #ifdef DEBUG 139 #ifdef DEBUG
108 if (strlen(FLAG_stop_at) > 0 && 140 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 141 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
110 __ stop("stop_at"); 142 __ stop("stop_at");
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
151 __ cmp(r0, sp); 183 __ cmp(r0, sp);
152 __ b(ne, &loop); 184 __ b(ne, &loop);
153 __ pop(r1); 185 __ pop(r1);
154 __ pop(r0); 186 __ pop(r0);
155 } else { 187 } else {
156 __ sub(sp, sp, Operand(slots * kPointerSize)); 188 __ sub(sp, sp, Operand(slots * kPointerSize));
157 } 189 }
158 } 190 }
159 191
160 if (info()->saves_caller_doubles()) { 192 if (info()->saves_caller_doubles()) {
161 Comment(";;; Save clobbered callee double registers"); 193 SaveCallerDoubles();
162 int count = 0;
163 BitVector* doubles = chunk()->allocated_double_registers();
164 BitVector::Iterator save_iterator(doubles);
165 while (!save_iterator.Done()) {
166 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
167 MemOperand(sp, count * kDoubleSize));
168 save_iterator.Advance();
169 count++;
170 }
171 } 194 }
172 195
173 // Possibly allocate a local context. 196 // Possibly allocate a local context.
174 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 197 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175 if (heap_slots > 0) { 198 if (heap_slots > 0) {
176 Comment(";;; Allocate local context"); 199 Comment(";;; Allocate local context");
177 // Argument to NewContext is the function, which is in r1. 200 // Argument to NewContext is the function, which is in r1.
178 __ push(r1); 201 __ push(r1);
179 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 202 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
180 FastNewContextStub stub(heap_slots); 203 FastNewContextStub stub(heap_slots);
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
306 __ bind(&deopt_jump_table_[i].label); 329 __ bind(&deopt_jump_table_[i].label);
307 Address entry = deopt_jump_table_[i].address; 330 Address entry = deopt_jump_table_[i].address;
308 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; 331 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
309 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 332 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
310 if (id == Deoptimizer::kNotDeoptimizationEntry) { 333 if (id == Deoptimizer::kNotDeoptimizationEntry) {
311 Comment(";;; jump table entry %d.", i); 334 Comment(";;; jump table entry %d.", i);
312 } else { 335 } else {
313 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 336 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
314 } 337 }
315 if (deopt_jump_table_[i].needs_frame) { 338 if (deopt_jump_table_[i].needs_frame) {
339 ASSERT(!info()->saves_caller_doubles());
316 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); 340 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
317 if (needs_frame.is_bound()) { 341 if (needs_frame.is_bound()) {
318 __ b(&needs_frame); 342 __ b(&needs_frame);
319 } else { 343 } else {
320 __ bind(&needs_frame); 344 __ bind(&needs_frame);
321 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); 345 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
322 // This variant of deopt can only be used with stubs. Since we don't 346 // This variant of deopt can only be used with stubs. Since we don't
323 // have a function pointer to install in the stack frame that we're 347 // have a function pointer to install in the stack frame that we're
324 // building, install a special marker there instead. 348 // building, install a special marker there instead.
325 ASSERT(info()->IsStub()); 349 ASSERT(info()->IsStub());
326 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 350 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
327 __ push(scratch0()); 351 __ push(scratch0());
328 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 352 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
329 __ mov(lr, Operand(pc), LeaveCC, al); 353 __ mov(lr, Operand(pc), LeaveCC, al);
330 __ mov(pc, ip); 354 __ mov(pc, ip);
331 } 355 }
332 } else { 356 } else {
357 if (info()->saves_caller_doubles()) {
358 ASSERT(info()->IsStub());
359 RestoreCallerDoubles();
360 }
333 __ mov(lr, Operand(pc), LeaveCC, al); 361 __ mov(lr, Operand(pc), LeaveCC, al);
334 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); 362 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
335 } 363 }
336 masm()->CheckConstPool(false, false); 364 masm()->CheckConstPool(false, false);
337 } 365 }
338 366
339 // Force constant pool emission at the end of the deopt jump table to make 367 // Force constant pool emission at the end of the deopt jump table to make
340 // sure that no constant pools are emitted after. 368 // sure that no constant pools are emitted after.
341 masm()->CheckConstPool(true, false); 369 masm()->CheckConstPool(true, false);
342 370
(...skipping 478 matching lines...) Expand 10 before | Expand all | Expand 10 after
821 condition = ne; 849 condition = ne;
822 __ cmp(scratch, Operand::Zero()); 850 __ cmp(scratch, Operand::Zero());
823 } 851 }
824 } 852 }
825 853
826 if (info()->ShouldTrapOnDeopt()) { 854 if (info()->ShouldTrapOnDeopt()) {
827 __ stop("trap_on_deopt", condition); 855 __ stop("trap_on_deopt", condition);
828 } 856 }
829 857
830 ASSERT(info()->IsStub() || frame_is_built_); 858 ASSERT(info()->IsStub() || frame_is_built_);
831 if (condition == al && frame_is_built_) { 859 // Go through jump table if we need to handle condition, build frame, or
860 // restore caller doubles.
861 if (condition == al && frame_is_built_ &&
862 !info()->saves_caller_doubles()) {
832 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 863 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
833 } else { 864 } else {
834 // We often have several deopts to the same entry, reuse the last 865 // We often have several deopts to the same entry, reuse the last
835 // jump entry if this is the case. 866 // jump entry if this is the case.
836 if (deopt_jump_table_.is_empty() || 867 if (deopt_jump_table_.is_empty() ||
837 (deopt_jump_table_.last().address != entry) || 868 (deopt_jump_table_.last().address != entry) ||
838 (deopt_jump_table_.last().bailout_type != bailout_type) || 869 (deopt_jump_table_.last().bailout_type != bailout_type) ||
839 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { 870 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
840 Deoptimizer::JumpTableEntry table_entry(entry, 871 Deoptimizer::JumpTableEntry table_entry(entry,
841 bailout_type, 872 bailout_type,
(...skipping 2080 matching lines...) Expand 10 before | Expand all | Expand 10 after
2922 if (FLAG_trace && info()->IsOptimizing()) { 2953 if (FLAG_trace && info()->IsOptimizing()) {
2923 // Push the return value on the stack as the parameter. 2954 // Push the return value on the stack as the parameter.
2924 // Runtime::TraceExit returns its parameter in r0. We're leaving the code 2955 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2925 // managed by the register allocator and tearing down the frame, it's 2956 // managed by the register allocator and tearing down the frame, it's
2926 // safe to write to the context register. 2957 // safe to write to the context register.
2927 __ push(r0); 2958 __ push(r0);
2928 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2959 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2929 __ CallRuntime(Runtime::kTraceExit, 1); 2960 __ CallRuntime(Runtime::kTraceExit, 1);
2930 } 2961 }
2931 if (info()->saves_caller_doubles()) { 2962 if (info()->saves_caller_doubles()) {
2932 ASSERT(NeedsEagerFrame()); 2963 RestoreCallerDoubles();
2933 BitVector* doubles = chunk()->allocated_double_registers();
2934 BitVector::Iterator save_iterator(doubles);
2935 int count = 0;
2936 while (!save_iterator.Done()) {
2937 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
2938 MemOperand(sp, count * kDoubleSize));
2939 save_iterator.Advance();
2940 count++;
2941 }
2942 } 2964 }
2943 int no_frame_start = -1; 2965 int no_frame_start = -1;
2944 if (NeedsEagerFrame()) { 2966 if (NeedsEagerFrame()) {
2945 __ mov(sp, fp); 2967 __ mov(sp, fp);
2946 no_frame_start = masm_->pc_offset(); 2968 no_frame_start = masm_->pc_offset();
2947 __ ldm(ia_w, sp, fp.bit() | lr.bit()); 2969 __ ldm(ia_w, sp, fp.bit() | lr.bit());
2948 } 2970 }
2949 if (instr->has_constant_parameter_count()) { 2971 if (instr->has_constant_parameter_count()) {
2950 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2972 int parameter_count = ToInteger32(instr->constant_parameter_count());
2951 int32_t sp_delta = (parameter_count + 1) * kPointerSize; 2973 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
(...skipping 975 matching lines...) Expand 10 before | Expand all | Expand 10 after
3927 MathPowStub stub(MathPowStub::INTEGER); 3949 MathPowStub stub(MathPowStub::INTEGER);
3928 __ CallStub(&stub); 3950 __ CallStub(&stub);
3929 } else { 3951 } else {
3930 ASSERT(exponent_type.IsDouble()); 3952 ASSERT(exponent_type.IsDouble());
3931 MathPowStub stub(MathPowStub::DOUBLE); 3953 MathPowStub stub(MathPowStub::DOUBLE);
3932 __ CallStub(&stub); 3954 __ CallStub(&stub);
3933 } 3955 }
3934 } 3956 }
3935 3957
3936 3958
3937 void LCodeGen::DoRandom(LRandom* instr) {
3938 // Assert that the register size is indeed the size of each seed.
3939 static const int kSeedSize = sizeof(uint32_t);
3940 STATIC_ASSERT(kPointerSize == kSeedSize);
3941
3942 // Load native context
3943 Register global_object = ToRegister(instr->global_object());
3944 Register native_context = global_object;
3945 __ ldr(native_context, FieldMemOperand(
3946 global_object, GlobalObject::kNativeContextOffset));
3947
3948 // Load state (FixedArray of the native context's random seeds)
3949 static const int kRandomSeedOffset =
3950 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3951 Register state = native_context;
3952 __ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
3953
3954 // Load state[0].
3955 Register state0 = ToRegister(instr->scratch());
3956 __ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3957 // Load state[1].
3958 Register state1 = ToRegister(instr->scratch2());
3959 __ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3960
3961 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3962 Register scratch3 = ToRegister(instr->scratch3());
3963 Register scratch4 = scratch0();
3964 __ and_(scratch3, state0, Operand(0xFFFF));
3965 __ mov(scratch4, Operand(18273));
3966 __ mul(scratch3, scratch3, scratch4);
3967 __ add(state0, scratch3, Operand(state0, LSR, 16));
3968 // Save state[0].
3969 __ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3970
3971 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3972 __ and_(scratch3, state1, Operand(0xFFFF));
3973 __ mov(scratch4, Operand(36969));
3974 __ mul(scratch3, scratch3, scratch4);
3975 __ add(state1, scratch3, Operand(state1, LSR, 16));
3976 // Save state[1].
3977 __ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3978
3979 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3980 Register random = scratch4;
3981 __ and_(random, state1, Operand(0x3FFFF));
3982 __ add(random, random, Operand(state0, LSL, 14));
3983
3984 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3985 // Create this constant using mov/orr to avoid PC relative load.
3986 __ mov(scratch3, Operand(0x41000000));
3987 __ orr(scratch3, scratch3, Operand(0x300000));
3988 // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
3989 DwVfpRegister result = ToDoubleRegister(instr->result());
3990 __ vmov(result, random, scratch3);
3991 // Move 0x4130000000000000 to VFP.
3992 __ mov(scratch4, Operand::Zero());
3993 DwVfpRegister scratch5 = double_scratch0();
3994 __ vmov(scratch5, scratch4, scratch3);
3995 __ vsub(result, result, scratch5);
3996 }
3997
3998
3999 void LCodeGen::DoMathExp(LMathExp* instr) { 3959 void LCodeGen::DoMathExp(LMathExp* instr) {
4000 DwVfpRegister input = ToDoubleRegister(instr->value()); 3960 DwVfpRegister input = ToDoubleRegister(instr->value());
4001 DwVfpRegister result = ToDoubleRegister(instr->result()); 3961 DwVfpRegister result = ToDoubleRegister(instr->result());
4002 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 3962 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
4003 DwVfpRegister double_scratch2 = double_scratch0(); 3963 DwVfpRegister double_scratch2 = double_scratch0();
4004 Register temp1 = ToRegister(instr->temp1()); 3964 Register temp1 = ToRegister(instr->temp1());
4005 Register temp2 = ToRegister(instr->temp2()); 3965 Register temp2 = ToRegister(instr->temp2());
4006 3966
4007 MathExpGenerator::EmitMathExp( 3967 MathExpGenerator::EmitMathExp(
4008 masm(), input, result, double_scratch1, double_scratch2, 3968 masm(), input, result, double_scratch1, double_scratch2,
(...skipping 719 matching lines...) Expand 10 before | Expand all | Expand 10 after
4728 } else { 4688 } else {
4729 __ vmov(single_scratch, ToRegister(input)); 4689 __ vmov(single_scratch, ToRegister(input));
4730 } 4690 }
4731 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); 4691 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4732 } 4692 }
4733 4693
4734 4694
4735 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { 4695 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4736 LOperand* input = instr->value(); 4696 LOperand* input = instr->value();
4737 LOperand* output = instr->result(); 4697 LOperand* output = instr->result();
4738 __ SmiTag(ToRegister(output), ToRegister(input), SetCC); 4698 ASSERT(output->IsRegister());
4739 if (!instr->hydrogen()->value()->HasRange() || 4699 if (!instr->hydrogen()->value()->HasRange() ||
4740 !instr->hydrogen()->value()->range()->IsInSmiRange()) { 4700 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4701 __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
4741 DeoptimizeIf(vs, instr->environment()); 4702 DeoptimizeIf(vs, instr->environment());
4703 } else {
4704 __ SmiTag(ToRegister(output), ToRegister(input));
4742 } 4705 }
4743 } 4706 }
4744 4707
4745 4708
4746 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4709 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4747 LOperand* input = instr->value(); 4710 LOperand* input = instr->value();
4748 LOperand* output = instr->result(); 4711 LOperand* output = instr->result();
4749 4712
4750 SwVfpRegister flt_scratch = double_scratch0().low(); 4713 SwVfpRegister flt_scratch = double_scratch0().low();
4751 __ vmov(flt_scratch, ToRegister(input)); 4714 __ vmov(flt_scratch, ToRegister(input));
(...skipping 838 matching lines...) Expand 10 before | Expand all | Expand 10 after
5590 5553
5591 5554
5592 Condition LCodeGen::EmitTypeofIs(Label* true_label, 5555 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5593 Label* false_label, 5556 Label* false_label,
5594 Register input, 5557 Register input,
5595 Handle<String> type_name) { 5558 Handle<String> type_name) {
5596 Condition final_branch_condition = kNoCondition; 5559 Condition final_branch_condition = kNoCondition;
5597 Register scratch = scratch0(); 5560 Register scratch = scratch0();
5598 if (type_name->Equals(heap()->number_string())) { 5561 if (type_name->Equals(heap()->number_string())) {
5599 __ JumpIfSmi(input, true_label); 5562 __ JumpIfSmi(input, true_label);
5600 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); 5563 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5601 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 5564 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5602 __ cmp(input, Operand(ip));
5603 final_branch_condition = eq; 5565 final_branch_condition = eq;
5604 5566
5605 } else if (type_name->Equals(heap()->string_string())) { 5567 } else if (type_name->Equals(heap()->string_string())) {
5606 __ JumpIfSmi(input, false_label); 5568 __ JumpIfSmi(input, false_label);
5607 __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE); 5569 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5608 __ b(ge, false_label); 5570 __ b(ge, false_label);
5609 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); 5571 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5610 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 5572 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5611 final_branch_condition = eq; 5573 final_branch_condition = eq;
5612 5574
5613 } else if (type_name->Equals(heap()->symbol_string())) { 5575 } else if (type_name->Equals(heap()->symbol_string())) {
5614 __ JumpIfSmi(input, false_label); 5576 __ JumpIfSmi(input, false_label);
5615 __ CompareObjectType(input, input, scratch, SYMBOL_TYPE); 5577 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5616 final_branch_condition = eq; 5578 final_branch_condition = eq;
5617 5579
5618 } else if (type_name->Equals(heap()->boolean_string())) { 5580 } else if (type_name->Equals(heap()->boolean_string())) {
5619 __ CompareRoot(input, Heap::kTrueValueRootIndex); 5581 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5620 __ b(eq, true_label); 5582 __ b(eq, true_label);
5621 __ CompareRoot(input, Heap::kFalseValueRootIndex); 5583 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5622 final_branch_condition = eq; 5584 final_branch_condition = eq;
5623 5585
5624 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { 5586 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5625 __ CompareRoot(input, Heap::kNullValueRootIndex); 5587 __ CompareRoot(input, Heap::kNullValueRootIndex);
5626 final_branch_condition = eq; 5588 final_branch_condition = eq;
5627 5589
5628 } else if (type_name->Equals(heap()->undefined_string())) { 5590 } else if (type_name->Equals(heap()->undefined_string())) {
5629 __ CompareRoot(input, Heap::kUndefinedValueRootIndex); 5591 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5630 __ b(eq, true_label); 5592 __ b(eq, true_label);
5631 __ JumpIfSmi(input, false_label); 5593 __ JumpIfSmi(input, false_label);
5632 // Check for undetectable objects => true. 5594 // Check for undetectable objects => true.
5633 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); 5595 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5634 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); 5596 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5635 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 5597 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5636 final_branch_condition = ne; 5598 final_branch_condition = ne;
5637 5599
5638 } else if (type_name->Equals(heap()->function_string())) { 5600 } else if (type_name->Equals(heap()->function_string())) {
5639 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 5601 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5602 Register type_reg = scratch;
5640 __ JumpIfSmi(input, false_label); 5603 __ JumpIfSmi(input, false_label);
5641 __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE); 5604 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5642 __ b(eq, true_label); 5605 __ b(eq, true_label);
5643 __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE)); 5606 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5644 final_branch_condition = eq; 5607 final_branch_condition = eq;
5645 5608
5646 } else if (type_name->Equals(heap()->object_string())) { 5609 } else if (type_name->Equals(heap()->object_string())) {
5610 Register map = scratch;
5647 __ JumpIfSmi(input, false_label); 5611 __ JumpIfSmi(input, false_label);
5648 if (!FLAG_harmony_typeof) { 5612 if (!FLAG_harmony_typeof) {
5649 __ CompareRoot(input, Heap::kNullValueRootIndex); 5613 __ CompareRoot(input, Heap::kNullValueRootIndex);
5650 __ b(eq, true_label); 5614 __ b(eq, true_label);
5651 } 5615 }
5652 __ CompareObjectType(input, input, scratch, 5616 __ CheckObjectTypeRange(input,
5653 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); 5617 map,
5654 __ b(lt, false_label); 5618 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5655 __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); 5619 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5656 __ b(gt, false_label); 5620 false_label);
5657 // Check for undetectable objects => false. 5621 // Check for undetectable objects => false.
5658 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); 5622 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5659 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 5623 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5660 final_branch_condition = eq; 5624 final_branch_condition = eq;
5661 5625
5662 } else { 5626 } else {
5663 __ b(false_label); 5627 __ b(false_label);
5664 } 5628 }
5665 5629
5666 return final_branch_condition; 5630 return final_branch_condition;
5667 } 5631 }
5668 5632
5669 5633
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after
5918 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); 5882 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5919 __ ldr(result, FieldMemOperand(scratch, 5883 __ ldr(result, FieldMemOperand(scratch,
5920 FixedArray::kHeaderSize - kPointerSize)); 5884 FixedArray::kHeaderSize - kPointerSize));
5921 __ bind(&done); 5885 __ bind(&done);
5922 } 5886 }
5923 5887
5924 5888
5925 #undef __ 5889 #undef __
5926 5890
5927 } } // namespace v8::internal 5891 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698