Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(73)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 7860035: Merge bleeding edge up to 9192 into the GC branch. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-arm.cc ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
191 } else { 191 } else {
192 __ CallRuntime(Runtime::kNewFunctionContext, 1); 192 __ CallRuntime(Runtime::kNewFunctionContext, 1);
193 } 193 }
194 RecordSafepoint(Safepoint::kNoDeoptimizationIndex); 194 RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
195 // Context is returned in both r0 and cp. It replaces the context 195 // Context is returned in both r0 and cp. It replaces the context
196 // passed to us. It's saved in the stack and kept live in cp. 196 // passed to us. It's saved in the stack and kept live in cp.
197 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 197 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
198 // Copy any necessary parameters into the context. 198 // Copy any necessary parameters into the context.
199 int num_parameters = scope()->num_parameters(); 199 int num_parameters = scope()->num_parameters();
200 for (int i = 0; i < num_parameters; i++) { 200 for (int i = 0; i < num_parameters; i++) {
201 Slot* slot = scope()->parameter(i)->AsSlot(); 201 Variable* var = scope()->parameter(i);
202 if (slot != NULL && slot->type() == Slot::CONTEXT) { 202 if (var->IsContextSlot()) {
203 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 203 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
204 (num_parameters - 1 - i) * kPointerSize; 204 (num_parameters - 1 - i) * kPointerSize;
205 // Load parameter from stack. 205 // Load parameter from stack.
206 __ ldr(r0, MemOperand(fp, parameter_offset)); 206 __ ldr(r0, MemOperand(fp, parameter_offset));
207 // Store it in the context. 207 // Store it in the context.
208 MemOperand target = ContextOperand(cp, slot->index()); 208 MemOperand target = ContextOperand(cp, var->index());
209 __ str(r0, target); 209 __ str(r0, target);
210 // Update the write barrier. This clobbers r3 and r0. 210 // Update the write barrier. This clobbers r3 and r0.
211 __ RecordWriteContextSlot( 211 __ RecordWriteContextSlot(
212 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); 212 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
213 } 213 }
214 } 214 }
215 Comment(";;; End allocate local context"); 215 Comment(";;; End allocate local context");
216 } 216 }
217 217
218 // Trace the call. 218 // Trace the call.
(...skipping 3234 matching lines...) Expand 10 before | Expand all | Expand 10 after
3453 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 3453 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3454 class DeferredStringCharCodeAt: public LDeferredCode { 3454 class DeferredStringCharCodeAt: public LDeferredCode {
3455 public: 3455 public:
3456 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 3456 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3457 : LDeferredCode(codegen), instr_(instr) { } 3457 : LDeferredCode(codegen), instr_(instr) { }
3458 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } 3458 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3459 private: 3459 private:
3460 LStringCharCodeAt* instr_; 3460 LStringCharCodeAt* instr_;
3461 }; 3461 };
3462 3462
3463 Register scratch = scratch0();
3464 Register string = ToRegister(instr->string()); 3463 Register string = ToRegister(instr->string());
3465 Register index = no_reg; 3464 Register index = ToRegister(instr->index());
3466 int const_index = -1;
3467 if (instr->index()->IsConstantOperand()) {
3468 const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3469 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
3470 if (!Smi::IsValid(const_index)) {
3471 // Guaranteed to be out of bounds because of the assert above.
3472 // So the bounds check that must dominate this instruction must
3473 // have deoptimized already.
3474 if (FLAG_debug_code) {
3475 __ Abort("StringCharCodeAt: out of bounds index.");
3476 }
3477 // No code needs to be generated.
3478 return;
3479 }
3480 } else {
3481 index = ToRegister(instr->index());
3482 }
3483 Register result = ToRegister(instr->result()); 3465 Register result = ToRegister(instr->result());
3484 3466
3485 DeferredStringCharCodeAt* deferred = 3467 DeferredStringCharCodeAt* deferred =
3486 new DeferredStringCharCodeAt(this, instr); 3468 new DeferredStringCharCodeAt(this, instr);
3487 3469
3488 Label flat_string, ascii_string, done;
3489
3490 // Fetch the instance type of the receiver into result register. 3470 // Fetch the instance type of the receiver into result register.
3491 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset)); 3471 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
3492 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); 3472 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
3493 3473
3494 // We need special handling for non-flat strings. 3474 // We need special handling for indirect strings.
3495 STATIC_ASSERT(kSeqStringTag == 0); 3475 Label check_sequential;
3496 __ tst(result, Operand(kStringRepresentationMask)); 3476 __ tst(result, Operand(kIsIndirectStringMask));
3497 __ b(eq, &flat_string); 3477 __ b(eq, &check_sequential);
3498 3478
3499 // Handle non-flat strings. 3479 // Dispatch on the indirect string shape: slice or cons.
3500 __ tst(result, Operand(kIsConsStringMask)); 3480 Label cons_string;
3501 __ b(eq, deferred->entry()); 3481 __ tst(result, Operand(kSlicedNotConsMask));
3482 __ b(eq, &cons_string);
3502 3483
3503 // ConsString. 3484 // Handle slices.
3485 Label indirect_string_loaded;
3486 __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
3487 __ add(index, index, Operand(result, ASR, kSmiTagSize));
3488 __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
3489 __ jmp(&indirect_string_loaded);
3490
3491 // Handle conses.
3504 // Check whether the right hand side is the empty string (i.e. if 3492 // Check whether the right hand side is the empty string (i.e. if
3505 // this is really a flat string in a cons string). If that is not 3493 // this is really a flat string in a cons string). If that is not
3506 // the case we would rather go to the runtime system now to flatten 3494 // the case we would rather go to the runtime system now to flatten
3507 // the string. 3495 // the string.
3508 __ ldr(scratch, FieldMemOperand(string, ConsString::kSecondOffset)); 3496 __ bind(&cons_string);
3497 __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
3509 __ LoadRoot(ip, Heap::kEmptyStringRootIndex); 3498 __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
3510 __ cmp(scratch, ip); 3499 __ cmp(result, ip);
3511 __ b(ne, deferred->entry()); 3500 __ b(ne, deferred->entry());
3512 // Get the first of the two strings and load its instance type. 3501 // Get the first of the two strings and load its instance type.
3513 __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset)); 3502 __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
3503
3504 __ bind(&indirect_string_loaded);
3514 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset)); 3505 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
3515 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); 3506 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
3516 // If the first cons component is also non-flat, then go to runtime. 3507
3508 // Check whether the string is sequential. The only non-sequential
3509 // shapes we support have just been unwrapped above.
3510 __ bind(&check_sequential);
3517 STATIC_ASSERT(kSeqStringTag == 0); 3511 STATIC_ASSERT(kSeqStringTag == 0);
3518 __ tst(result, Operand(kStringRepresentationMask)); 3512 __ tst(result, Operand(kStringRepresentationMask));
3519 __ b(ne, deferred->entry()); 3513 __ b(ne, deferred->entry());
3520 3514
3521 // Check for 1-byte or 2-byte string. 3515 // Dispatch on the encoding: ASCII or two-byte.
3522 __ bind(&flat_string); 3516 Label ascii_string;
3523 STATIC_ASSERT(kAsciiStringTag != 0); 3517 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
3518 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3524 __ tst(result, Operand(kStringEncodingMask)); 3519 __ tst(result, Operand(kStringEncodingMask));
3525 __ b(ne, &ascii_string); 3520 __ b(ne, &ascii_string);
3526 3521
3527 // 2-byte string. 3522 // Two-byte string.
3528 // Load the 2-byte character code into the result register. 3523 // Load the two-byte character code into the result register.
3529 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 3524 Label done;
3530 if (instr->index()->IsConstantOperand()) { 3525 __ add(result,
3531 __ ldrh(result, 3526 string,
3532 FieldMemOperand(string, 3527 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3533 SeqTwoByteString::kHeaderSize + 2 * const_index)); 3528 __ ldrh(result, MemOperand(result, index, LSL, 1));
3534 } else {
3535 __ add(scratch,
3536 string,
3537 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3538 __ ldrh(result, MemOperand(scratch, index, LSL, 1));
3539 }
3540 __ jmp(&done); 3529 __ jmp(&done);
3541 3530
3542 // ASCII string. 3531 // ASCII string.
3543 // Load the byte into the result register. 3532 // Load the byte into the result register.
3544 __ bind(&ascii_string); 3533 __ bind(&ascii_string);
3545 if (instr->index()->IsConstantOperand()) { 3534 __ add(result,
3546 __ ldrb(result, FieldMemOperand(string, 3535 string,
3547 SeqAsciiString::kHeaderSize + const_index)); 3536 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3548 } else { 3537 __ ldrb(result, MemOperand(result, index));
3549 __ add(scratch, 3538
3550 string,
3551 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3552 __ ldrb(result, MemOperand(scratch, index));
3553 }
3554 __ bind(&done); 3539 __ bind(&done);
3555 __ bind(deferred->exit()); 3540 __ bind(deferred->exit());
3556 } 3541 }
3557 3542
3558 3543
3559 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 3544 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3560 Register string = ToRegister(instr->string()); 3545 Register string = ToRegister(instr->string());
3561 Register result = ToRegister(instr->result()); 3546 Register result = ToRegister(instr->result());
3562 Register scratch = scratch0(); 3547 Register scratch = scratch0();
3563 3548
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after
3771 ASSERT(input->IsRegister() && input->Equals(instr->result())); 3756 ASSERT(input->IsRegister() && input->Equals(instr->result()));
3772 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); 3757 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3773 __ SmiTag(ToRegister(input)); 3758 __ SmiTag(ToRegister(input));
3774 } 3759 }
3775 3760
3776 3761
3777 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 3762 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3778 LOperand* input = instr->InputAt(0); 3763 LOperand* input = instr->InputAt(0);
3779 ASSERT(input->IsRegister() && input->Equals(instr->result())); 3764 ASSERT(input->IsRegister() && input->Equals(instr->result()));
3780 if (instr->needs_check()) { 3765 if (instr->needs_check()) {
3781 ASSERT(kHeapObjectTag == 1); 3766 STATIC_ASSERT(kHeapObjectTag == 1);
3782 // If the input is a HeapObject, SmiUntag will set the carry flag. 3767 // If the input is a HeapObject, SmiUntag will set the carry flag.
3783 __ SmiUntag(ToRegister(input), SetCC); 3768 __ SmiUntag(ToRegister(input), SetCC);
3784 DeoptimizeIf(cs, instr->environment()); 3769 DeoptimizeIf(cs, instr->environment());
3785 } else { 3770 } else {
3786 __ SmiUntag(ToRegister(input)); 3771 __ SmiUntag(ToRegister(input));
3787 } 3772 }
3788 } 3773 }
3789 3774
3790 3775
3791 void LCodeGen::EmitNumberUntagD(Register input_reg, 3776 void LCodeGen::EmitNumberUntagD(Register input_reg,
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
3856 SwVfpRegister single_scratch = double_scratch.low(); 3841 SwVfpRegister single_scratch = double_scratch.low();
3857 3842
3858 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); 3843 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
3859 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); 3844 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
3860 3845
3861 Label done; 3846 Label done;
3862 3847
3863 // The input was optimistically untagged; revert it. 3848 // The input was optimistically untagged; revert it.
3864 // The carry flag is set when we reach this deferred code as we just executed 3849 // The carry flag is set when we reach this deferred code as we just executed
3865 // SmiUntag(heap_object, SetCC) 3850 // SmiUntag(heap_object, SetCC)
3866 ASSERT(kHeapObjectTag == 1); 3851 STATIC_ASSERT(kHeapObjectTag == 1);
3867 __ adc(input_reg, input_reg, Operand(input_reg)); 3852 __ adc(input_reg, input_reg, Operand(input_reg));
3868 3853
3869 // Heap number map check. 3854 // Heap number map check.
3870 __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 3855 __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3871 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 3856 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3872 __ cmp(scratch1, Operand(ip)); 3857 __ cmp(scratch1, Operand(ip));
3873 3858
3874 if (instr->truncating()) { 3859 if (instr->truncating()) {
3875 Register scratch3 = ToRegister(instr->TempAt(1)); 3860 Register scratch3 = ToRegister(instr->TempAt(1));
3876 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2)); 3861 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
(...skipping 652 matching lines...) Expand 10 before | Expand all | Expand 10 after
4529 ASSERT(osr_pc_offset_ == -1); 4514 ASSERT(osr_pc_offset_ == -1);
4530 osr_pc_offset_ = masm()->pc_offset(); 4515 osr_pc_offset_ = masm()->pc_offset();
4531 } 4516 }
4532 4517
4533 4518
4534 4519
4535 4520
4536 #undef __ 4521 #undef __
4537 4522
4538 } } // namespace v8::internal 4523 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-arm.cc ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698