| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 84 code->set_stack_slots(GetStackSlotCount()); | 84 code->set_stack_slots(GetStackSlotCount()); |
| 85 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | 85 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
| 86 if (FLAG_weak_embedded_maps_in_optimized_code) { | 86 if (FLAG_weak_embedded_maps_in_optimized_code) { |
| 87 RegisterDependentCodeForEmbeddedMaps(code); | 87 RegisterDependentCodeForEmbeddedMaps(code); |
| 88 } | 88 } |
| 89 PopulateDeoptimizationData(code); | 89 PopulateDeoptimizationData(code); |
| 90 info()->CommitDependencies(code); | 90 info()->CommitDependencies(code); |
| 91 } | 91 } |
| 92 | 92 |
| 93 | 93 |
| 94 void LCodeGen::Abort(BailoutReason reason) { | 94 void LCodeGen::Abort(const char* reason) { |
| 95 info()->set_bailout_reason(reason); | 95 info()->set_bailout_reason(reason); |
| 96 status_ = ABORTED; | 96 status_ = ABORTED; |
| 97 } | 97 } |
| 98 | 98 |
| 99 | 99 |
| 100 void LCodeGen::Comment(const char* format, ...) { | 100 void LCodeGen::Comment(const char* format, ...) { |
| 101 if (!FLAG_code_comments) return; | 101 if (!FLAG_code_comments) return; |
| 102 char buffer[4 * KB]; | 102 char buffer[4 * KB]; |
| 103 StringBuilder builder(buffer, ARRAY_SIZE(buffer)); | 103 StringBuilder builder(buffer, ARRAY_SIZE(buffer)); |
| 104 va_list arguments; | 104 va_list arguments; |
| (...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 327 bool LCodeGen::GenerateDeoptJumpTable() { | 327 bool LCodeGen::GenerateDeoptJumpTable() { |
| 328 // Check that the jump table is accessible from everywhere in the function | 328 // Check that the jump table is accessible from everywhere in the function |
| 329 // code, i.e. that offsets to the table can be encoded in the 24bit signed | 329 // code, i.e. that offsets to the table can be encoded in the 24bit signed |
| 330 // immediate of a branch instruction. | 330 // immediate of a branch instruction. |
| 331 // To simplify we consider the code size from the first instruction to the | 331 // To simplify we consider the code size from the first instruction to the |
| 332 // end of the jump table. We also don't consider the pc load delta. | 332 // end of the jump table. We also don't consider the pc load delta. |
| 333 // Each entry in the jump table generates one instruction and inlines one | 333 // Each entry in the jump table generates one instruction and inlines one |
| 334 // 32bit data after it. | 334 // 32bit data after it. |
| 335 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + | 335 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + |
| 336 deopt_jump_table_.length() * 7)) { | 336 deopt_jump_table_.length() * 7)) { |
| 337 Abort(kGeneratedCodeIsTooLarge); | 337 Abort("Generated code is too large"); |
| 338 } | 338 } |
| 339 | 339 |
| 340 if (deopt_jump_table_.length() > 0) { | 340 if (deopt_jump_table_.length() > 0) { |
| 341 Comment(";;; -------------------- Jump table --------------------"); | 341 Comment(";;; -------------------- Jump table --------------------"); |
| 342 } | 342 } |
| 343 Label table_start; | 343 Label table_start; |
| 344 __ bind(&table_start); | 344 __ bind(&table_start); |
| 345 Label needs_frame; | 345 Label needs_frame; |
| 346 for (int i = 0; i < deopt_jump_table_.length(); i++) { | 346 for (int i = 0; i < deopt_jump_table_.length(); i++) { |
| 347 __ bind(&deopt_jump_table_[i].label); | 347 __ bind(&deopt_jump_table_[i].label); |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 416 return ToRegister(op->index()); | 416 return ToRegister(op->index()); |
| 417 } else if (op->IsConstantOperand()) { | 417 } else if (op->IsConstantOperand()) { |
| 418 LConstantOperand* const_op = LConstantOperand::cast(op); | 418 LConstantOperand* const_op = LConstantOperand::cast(op); |
| 419 HConstant* constant = chunk_->LookupConstant(const_op); | 419 HConstant* constant = chunk_->LookupConstant(const_op); |
| 420 Handle<Object> literal = constant->handle(); | 420 Handle<Object> literal = constant->handle(); |
| 421 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 421 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
| 422 if (r.IsInteger32()) { | 422 if (r.IsInteger32()) { |
| 423 ASSERT(literal->IsNumber()); | 423 ASSERT(literal->IsNumber()); |
| 424 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); | 424 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); |
| 425 } else if (r.IsDouble()) { | 425 } else if (r.IsDouble()) { |
| 426 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); | 426 Abort("EmitLoadRegister: Unsupported double immediate."); |
| 427 } else { | 427 } else { |
| 428 ASSERT(r.IsTagged()); | 428 ASSERT(r.IsTagged()); |
| 429 __ LoadObject(scratch, literal); | 429 __ LoadObject(scratch, literal); |
| 430 } | 430 } |
| 431 return scratch; | 431 return scratch; |
| 432 } else if (op->IsStackSlot() || op->IsArgument()) { | 432 } else if (op->IsStackSlot() || op->IsArgument()) { |
| 433 __ ldr(scratch, ToMemOperand(op)); | 433 __ ldr(scratch, ToMemOperand(op)); |
| 434 return scratch; | 434 return scratch; |
| 435 } | 435 } |
| 436 UNREACHABLE(); | 436 UNREACHABLE(); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 454 HConstant* constant = chunk_->LookupConstant(const_op); | 454 HConstant* constant = chunk_->LookupConstant(const_op); |
| 455 Handle<Object> literal = constant->handle(); | 455 Handle<Object> literal = constant->handle(); |
| 456 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 456 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
| 457 if (r.IsInteger32()) { | 457 if (r.IsInteger32()) { |
| 458 ASSERT(literal->IsNumber()); | 458 ASSERT(literal->IsNumber()); |
| 459 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); | 459 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); |
| 460 __ vmov(flt_scratch, ip); | 460 __ vmov(flt_scratch, ip); |
| 461 __ vcvt_f64_s32(dbl_scratch, flt_scratch); | 461 __ vcvt_f64_s32(dbl_scratch, flt_scratch); |
| 462 return dbl_scratch; | 462 return dbl_scratch; |
| 463 } else if (r.IsDouble()) { | 463 } else if (r.IsDouble()) { |
| 464 Abort(kUnsupportedDoubleImmediate); | 464 Abort("unsupported double immediate"); |
| 465 } else if (r.IsTagged()) { | 465 } else if (r.IsTagged()) { |
| 466 Abort(kUnsupportedTaggedImmediate); | 466 Abort("unsupported tagged immediate"); |
| 467 } | 467 } |
| 468 } else if (op->IsStackSlot() || op->IsArgument()) { | 468 } else if (op->IsStackSlot() || op->IsArgument()) { |
| 469 // TODO(regis): Why is vldr not taking a MemOperand? | 469 // TODO(regis): Why is vldr not taking a MemOperand? |
| 470 // __ vldr(dbl_scratch, ToMemOperand(op)); | 470 // __ vldr(dbl_scratch, ToMemOperand(op)); |
| 471 MemOperand mem_op = ToMemOperand(op); | 471 MemOperand mem_op = ToMemOperand(op); |
| 472 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); | 472 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); |
| 473 return dbl_scratch; | 473 return dbl_scratch; |
| 474 } | 474 } |
| 475 UNREACHABLE(); | 475 UNREACHABLE(); |
| 476 return dbl_scratch; | 476 return dbl_scratch; |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 527 LConstantOperand* const_op = LConstantOperand::cast(op); | 527 LConstantOperand* const_op = LConstantOperand::cast(op); |
| 528 HConstant* constant = chunk()->LookupConstant(const_op); | 528 HConstant* constant = chunk()->LookupConstant(const_op); |
| 529 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 529 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
| 530 if (r.IsSmi()) { | 530 if (r.IsSmi()) { |
| 531 ASSERT(constant->HasSmiValue()); | 531 ASSERT(constant->HasSmiValue()); |
| 532 return Operand(Smi::FromInt(constant->Integer32Value())); | 532 return Operand(Smi::FromInt(constant->Integer32Value())); |
| 533 } else if (r.IsInteger32()) { | 533 } else if (r.IsInteger32()) { |
| 534 ASSERT(constant->HasInteger32Value()); | 534 ASSERT(constant->HasInteger32Value()); |
| 535 return Operand(constant->Integer32Value()); | 535 return Operand(constant->Integer32Value()); |
| 536 } else if (r.IsDouble()) { | 536 } else if (r.IsDouble()) { |
| 537 Abort(kToOperandUnsupportedDoubleImmediate); | 537 Abort("ToOperand Unsupported double immediate."); |
| 538 } | 538 } |
| 539 ASSERT(r.IsTagged()); | 539 ASSERT(r.IsTagged()); |
| 540 return Operand(constant->handle()); | 540 return Operand(constant->handle()); |
| 541 } else if (op->IsRegister()) { | 541 } else if (op->IsRegister()) { |
| 542 return Operand(ToRegister(op)); | 542 return Operand(ToRegister(op)); |
| 543 } else if (op->IsDoubleRegister()) { | 543 } else if (op->IsDoubleRegister()) { |
| 544 Abort(kToOperandIsDoubleRegisterUnimplemented); | 544 Abort("ToOperand IsDoubleRegister unimplemented"); |
| 545 return Operand::Zero(); | 545 return Operand::Zero(); |
| 546 } | 546 } |
| 547 // Stack slots not implemented, use ToMemOperand instead. | 547 // Stack slots not implemented, use ToMemOperand instead. |
| 548 UNREACHABLE(); | 548 UNREACHABLE(); |
| 549 return Operand::Zero(); | 549 return Operand::Zero(); |
| 550 } | 550 } |
| 551 | 551 |
| 552 | 552 |
| 553 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { | 553 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { |
| 554 ASSERT(!op->IsRegister()); | 554 ASSERT(!op->IsRegister()); |
| (...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 765 void LCodeGen::DeoptimizeIf(Condition cc, | 765 void LCodeGen::DeoptimizeIf(Condition cc, |
| 766 LEnvironment* environment, | 766 LEnvironment* environment, |
| 767 Deoptimizer::BailoutType bailout_type) { | 767 Deoptimizer::BailoutType bailout_type) { |
| 768 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 768 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 769 ASSERT(environment->HasBeenRegistered()); | 769 ASSERT(environment->HasBeenRegistered()); |
| 770 int id = environment->deoptimization_index(); | 770 int id = environment->deoptimization_index(); |
| 771 ASSERT(info()->IsOptimizing() || info()->IsStub()); | 771 ASSERT(info()->IsOptimizing() || info()->IsStub()); |
| 772 Address entry = | 772 Address entry = |
| 773 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 773 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 774 if (entry == NULL) { | 774 if (entry == NULL) { |
| 775 Abort(kBailoutWasNotPrepared); | 775 Abort("bailout was not prepared"); |
| 776 return; | 776 return; |
| 777 } | 777 } |
| 778 | 778 |
| 779 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. | 779 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. |
| 780 if (FLAG_deopt_every_n_times == 1 && | 780 if (FLAG_deopt_every_n_times == 1 && |
| 781 !info()->IsStub() && | 781 !info()->IsStub() && |
| 782 info()->opt_count() == id) { | 782 info()->opt_count() == id) { |
| 783 ASSERT(frame_is_built_); | 783 ASSERT(frame_is_built_); |
| 784 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 784 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
| 785 return; | 785 return; |
| (...skipping 876 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1662 } | 1662 } |
| 1663 | 1663 |
| 1664 switch (instr->op()) { | 1664 switch (instr->op()) { |
| 1665 case Token::BIT_AND: | 1665 case Token::BIT_AND: |
| 1666 __ and_(result, left, right); | 1666 __ and_(result, left, right); |
| 1667 break; | 1667 break; |
| 1668 case Token::BIT_OR: | 1668 case Token::BIT_OR: |
| 1669 __ orr(result, left, right); | 1669 __ orr(result, left, right); |
| 1670 break; | 1670 break; |
| 1671 case Token::BIT_XOR: | 1671 case Token::BIT_XOR: |
| 1672 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { | 1672 __ eor(result, left, right); |
| 1673 __ mvn(result, Operand(left)); | |
| 1674 } else { | |
| 1675 __ eor(result, left, right); | |
| 1676 } | |
| 1677 break; | 1673 break; |
| 1678 default: | 1674 default: |
| 1679 UNREACHABLE(); | 1675 UNREACHABLE(); |
| 1680 break; | 1676 break; |
| 1681 } | 1677 } |
| 1682 } | 1678 } |
| 1683 | 1679 |
| 1684 | 1680 |
| 1685 void LCodeGen::DoShiftI(LShiftI* instr) { | 1681 void LCodeGen::DoShiftI(LShiftI* instr) { |
| 1686 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so | 1682 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so |
| (...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1933 | 1929 |
| 1934 if (FLAG_debug_code) { | 1930 if (FLAG_debug_code) { |
| 1935 __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); | 1931 __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 1936 __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); | 1932 __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); |
| 1937 | 1933 |
| 1938 __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); | 1934 __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); |
| 1939 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; | 1935 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
| 1940 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; | 1936 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
| 1941 __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING | 1937 __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING |
| 1942 ? one_byte_seq_type : two_byte_seq_type)); | 1938 ? one_byte_seq_type : two_byte_seq_type)); |
| 1943 __ Check(eq, kUnexpectedStringType); | 1939 __ Check(eq, "Unexpected string type"); |
| 1944 } | 1940 } |
| 1945 | 1941 |
| 1946 __ add(ip, | 1942 __ add(ip, |
| 1947 string, | 1943 string, |
| 1948 Operand(SeqString::kHeaderSize - kHeapObjectTag)); | 1944 Operand(SeqString::kHeaderSize - kHeapObjectTag)); |
| 1949 if (encoding == String::ONE_BYTE_ENCODING) { | 1945 if (encoding == String::ONE_BYTE_ENCODING) { |
| 1950 __ strb(value, MemOperand(ip, index)); | 1946 __ strb(value, MemOperand(ip, index)); |
| 1951 } else { | 1947 } else { |
| 1952 // MemOperand with ip as the base register is not allowed for strh, so | 1948 // MemOperand with ip as the base register is not allowed for strh, so |
| 1953 // we do the address calculation explicitly. | 1949 // we do the address calculation explicitly. |
| 1954 __ add(ip, ip, Operand(index, LSL, 1)); | 1950 __ add(ip, ip, Operand(index, LSL, 1)); |
| 1955 __ strh(value, MemOperand(ip)); | 1951 __ strh(value, MemOperand(ip)); |
| 1956 } | 1952 } |
| 1957 } | 1953 } |
| 1958 | 1954 |
| 1959 | 1955 |
| 1956 void LCodeGen::DoBitNotI(LBitNotI* instr) { |
| 1957 Register input = ToRegister(instr->value()); |
| 1958 Register result = ToRegister(instr->result()); |
| 1959 __ mvn(result, Operand(input)); |
| 1960 } |
| 1961 |
| 1962 |
| 1960 void LCodeGen::DoThrow(LThrow* instr) { | 1963 void LCodeGen::DoThrow(LThrow* instr) { |
| 1961 Register input_reg = EmitLoadRegister(instr->value(), ip); | 1964 Register input_reg = EmitLoadRegister(instr->value(), ip); |
| 1962 __ push(input_reg); | 1965 __ push(input_reg); |
| 1963 CallRuntime(Runtime::kThrow, 1, instr); | 1966 CallRuntime(Runtime::kThrow, 1, instr); |
| 1964 | 1967 |
| 1965 if (FLAG_debug_code) { | 1968 if (FLAG_debug_code) { |
| 1966 __ stop("Unreachable code."); | 1969 __ stop("Unreachable code."); |
| 1967 } | 1970 } |
| 1968 } | 1971 } |
| 1969 | 1972 |
| (...skipping 1220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3190 | 3193 |
| 3191 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { | 3194 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
| 3192 Register external_pointer = ToRegister(instr->elements()); | 3195 Register external_pointer = ToRegister(instr->elements()); |
| 3193 Register key = no_reg; | 3196 Register key = no_reg; |
| 3194 ElementsKind elements_kind = instr->elements_kind(); | 3197 ElementsKind elements_kind = instr->elements_kind(); |
| 3195 bool key_is_constant = instr->key()->IsConstantOperand(); | 3198 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 3196 int constant_key = 0; | 3199 int constant_key = 0; |
| 3197 if (key_is_constant) { | 3200 if (key_is_constant) { |
| 3198 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 3201 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 3199 if (constant_key & 0xF0000000) { | 3202 if (constant_key & 0xF0000000) { |
| 3200 Abort(kArrayIndexConstantValueTooBig); | 3203 Abort("array index constant value too big."); |
| 3201 } | 3204 } |
| 3202 } else { | 3205 } else { |
| 3203 key = ToRegister(instr->key()); | 3206 key = ToRegister(instr->key()); |
| 3204 } | 3207 } |
| 3205 int element_size_shift = ElementsKindToShiftSize(elements_kind); | 3208 int element_size_shift = ElementsKindToShiftSize(elements_kind); |
| 3206 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 3209 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
| 3207 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 3210 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
| 3208 int additional_offset = instr->additional_index() << element_size_shift; | 3211 int additional_offset = instr->additional_index() << element_size_shift; |
| 3209 | 3212 |
| 3210 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || | 3213 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3274 DwVfpRegister result = ToDoubleRegister(instr->result()); | 3277 DwVfpRegister result = ToDoubleRegister(instr->result()); |
| 3275 Register scratch = scratch0(); | 3278 Register scratch = scratch0(); |
| 3276 | 3279 |
| 3277 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); | 3280 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
| 3278 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 3281 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
| 3279 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 3282 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
| 3280 int constant_key = 0; | 3283 int constant_key = 0; |
| 3281 if (key_is_constant) { | 3284 if (key_is_constant) { |
| 3282 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 3285 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 3283 if (constant_key & 0xF0000000) { | 3286 if (constant_key & 0xF0000000) { |
| 3284 Abort(kArrayIndexConstantValueTooBig); | 3287 Abort("array index constant value too big."); |
| 3285 } | 3288 } |
| 3286 } else { | 3289 } else { |
| 3287 key = ToRegister(instr->key()); | 3290 key = ToRegister(instr->key()); |
| 3288 } | 3291 } |
| 3289 | 3292 |
| 3290 int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + | 3293 int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + |
| 3291 ((constant_key + instr->additional_index()) << element_size_shift); | 3294 ((constant_key + instr->additional_index()) << element_size_shift); |
| 3292 if (!key_is_constant) { | 3295 if (!key_is_constant) { |
| 3293 __ add(elements, elements, Operand(key, LSL, shift_size)); | 3296 __ add(elements, elements, Operand(key, LSL, shift_size)); |
| 3294 } | 3297 } |
| (...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3535 ParameterCount actual(receiver); | 3538 ParameterCount actual(receiver); |
| 3536 __ InvokeFunction(function, actual, CALL_FUNCTION, | 3539 __ InvokeFunction(function, actual, CALL_FUNCTION, |
| 3537 safepoint_generator, CALL_AS_METHOD); | 3540 safepoint_generator, CALL_AS_METHOD); |
| 3538 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 3541 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 3539 } | 3542 } |
| 3540 | 3543 |
| 3541 | 3544 |
| 3542 void LCodeGen::DoPushArgument(LPushArgument* instr) { | 3545 void LCodeGen::DoPushArgument(LPushArgument* instr) { |
| 3543 LOperand* argument = instr->value(); | 3546 LOperand* argument = instr->value(); |
| 3544 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { | 3547 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { |
| 3545 Abort(kDoPushArgumentNotImplementedForDoubleType); | 3548 Abort("DoPushArgument not implemented for double type."); |
| 3546 } else { | 3549 } else { |
| 3547 Register argument_reg = EmitLoadRegister(argument, ip); | 3550 Register argument_reg = EmitLoadRegister(argument, ip); |
| 3548 __ push(argument_reg); | 3551 __ push(argument_reg); |
| 3549 } | 3552 } |
| 3550 } | 3553 } |
| 3551 | 3554 |
| 3552 | 3555 |
| 3553 void LCodeGen::DoDrop(LDrop* instr) { | 3556 void LCodeGen::DoDrop(LDrop* instr) { |
| 3554 __ Drop(instr->count()); | 3557 __ Drop(instr->count()); |
| 3555 } | 3558 } |
| (...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3755 virtual LInstruction* instr() { return instr_; } | 3758 virtual LInstruction* instr() { return instr_; } |
| 3756 private: | 3759 private: |
| 3757 LMathAbs* instr_; | 3760 LMathAbs* instr_; |
| 3758 }; | 3761 }; |
| 3759 | 3762 |
| 3760 Representation r = instr->hydrogen()->value()->representation(); | 3763 Representation r = instr->hydrogen()->value()->representation(); |
| 3761 if (r.IsDouble()) { | 3764 if (r.IsDouble()) { |
| 3762 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3765 DwVfpRegister input = ToDoubleRegister(instr->value()); |
| 3763 DwVfpRegister result = ToDoubleRegister(instr->result()); | 3766 DwVfpRegister result = ToDoubleRegister(instr->result()); |
| 3764 __ vabs(result, input); | 3767 __ vabs(result, input); |
| 3765 } else if (r.IsSmiOrInteger32()) { | 3768 } else if (r.IsInteger32()) { |
| 3766 EmitIntegerMathAbs(instr); | 3769 EmitIntegerMathAbs(instr); |
| 3767 } else { | 3770 } else { |
| 3768 // Representation is tagged. | 3771 // Representation is tagged. |
| 3769 DeferredMathAbsTaggedHeapNumber* deferred = | 3772 DeferredMathAbsTaggedHeapNumber* deferred = |
| 3770 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); | 3773 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
| 3771 Register input = ToRegister(instr->value()); | 3774 Register input = ToRegister(instr->value()); |
| 3772 // Smi check. | 3775 // Smi check. |
| 3773 __ JumpIfNotSmi(input, deferred->entry()); | 3776 __ JumpIfNotSmi(input, deferred->entry()); |
| 3774 // If smi, handle it directly. | 3777 // If smi, handle it directly. |
| 3775 EmitIntegerMathAbs(instr); | 3778 EmitIntegerMathAbs(instr); |
| (...skipping 533 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4309 | 4312 |
| 4310 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4313 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4311 Register external_pointer = ToRegister(instr->elements()); | 4314 Register external_pointer = ToRegister(instr->elements()); |
| 4312 Register key = no_reg; | 4315 Register key = no_reg; |
| 4313 ElementsKind elements_kind = instr->elements_kind(); | 4316 ElementsKind elements_kind = instr->elements_kind(); |
| 4314 bool key_is_constant = instr->key()->IsConstantOperand(); | 4317 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4315 int constant_key = 0; | 4318 int constant_key = 0; |
| 4316 if (key_is_constant) { | 4319 if (key_is_constant) { |
| 4317 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 4320 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 4318 if (constant_key & 0xF0000000) { | 4321 if (constant_key & 0xF0000000) { |
| 4319 Abort(kArrayIndexConstantValueTooBig); | 4322 Abort("array index constant value too big."); |
| 4320 } | 4323 } |
| 4321 } else { | 4324 } else { |
| 4322 key = ToRegister(instr->key()); | 4325 key = ToRegister(instr->key()); |
| 4323 } | 4326 } |
| 4324 int element_size_shift = ElementsKindToShiftSize(elements_kind); | 4327 int element_size_shift = ElementsKindToShiftSize(elements_kind); |
| 4325 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 4328 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
| 4326 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 4329 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
| 4327 int additional_offset = instr->additional_index() << element_size_shift; | 4330 int additional_offset = instr->additional_index() << element_size_shift; |
| 4328 | 4331 |
| 4329 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || | 4332 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4382 Register key = no_reg; | 4385 Register key = no_reg; |
| 4383 Register scratch = scratch0(); | 4386 Register scratch = scratch0(); |
| 4384 bool key_is_constant = instr->key()->IsConstantOperand(); | 4387 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4385 int constant_key = 0; | 4388 int constant_key = 0; |
| 4386 | 4389 |
| 4387 // Calculate the effective address of the slot in the array to store the | 4390 // Calculate the effective address of the slot in the array to store the |
| 4388 // double value. | 4391 // double value. |
| 4389 if (key_is_constant) { | 4392 if (key_is_constant) { |
| 4390 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 4393 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 4391 if (constant_key & 0xF0000000) { | 4394 if (constant_key & 0xF0000000) { |
| 4392 Abort(kArrayIndexConstantValueTooBig); | 4395 Abort("array index constant value too big."); |
| 4393 } | 4396 } |
| 4394 } else { | 4397 } else { |
| 4395 key = ToRegister(instr->key()); | 4398 key = ToRegister(instr->key()); |
| 4396 } | 4399 } |
| 4397 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); | 4400 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
| 4398 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 4401 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
| 4399 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 4402 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
| 4400 Operand operand = key_is_constant | 4403 Operand operand = key_is_constant |
| 4401 ? Operand((constant_key << element_size_shift) + | 4404 ? Operand((constant_key << element_size_shift) + |
| 4402 FixedDoubleArray::kHeaderSize - kHeapObjectTag) | 4405 FixedDoubleArray::kHeaderSize - kHeapObjectTag) |
| 4403 : Operand(key, LSL, shift_size); | 4406 : Operand(key, LSL, shift_size); |
| 4404 __ add(scratch, elements, operand); | 4407 __ add(scratch, elements, operand); |
| 4405 if (!key_is_constant) { | 4408 if (!key_is_constant) { |
| 4406 __ add(scratch, scratch, | 4409 __ add(scratch, scratch, |
| 4407 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | 4410 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
| 4408 } | 4411 } |
| 4409 | 4412 |
| 4410 if (instr->NeedsCanonicalization()) { | 4413 if (instr->NeedsCanonicalization()) { |
| 4411 // Force a canonical NaN. | 4414 // Force a canonical NaN. |
| 4412 if (masm()->emit_debug_code()) { | 4415 if (masm()->emit_debug_code()) { |
| 4413 __ vmrs(ip); | 4416 __ vmrs(ip); |
| 4414 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit)); | 4417 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit)); |
| 4415 __ Assert(ne, kDefaultNaNModeNotSet); | 4418 __ Assert(ne, "Default NaN mode not set"); |
| 4416 } | 4419 } |
| 4417 __ VFPCanonicalizeNaN(value); | 4420 __ VFPCanonicalizeNaN(value); |
| 4418 } | 4421 } |
| 4419 __ vstr(value, scratch, instr->additional_index() << element_size_shift); | 4422 __ vstr(value, scratch, instr->additional_index() << element_size_shift); |
| 4420 } | 4423 } |
| 4421 | 4424 |
| 4422 | 4425 |
| 4423 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { | 4426 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
| 4424 Register value = ToRegister(instr->value()); | 4427 Register value = ToRegister(instr->value()); |
| 4425 Register elements = ToRegister(instr->elements()); | 4428 Register elements = ToRegister(instr->elements()); |
| (...skipping 778 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5204 __ mov(ip, Operand(Handle<Object>(cell))); | 5207 __ mov(ip, Operand(Handle<Object>(cell))); |
| 5205 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); | 5208 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); |
| 5206 __ cmp(reg, ip); | 5209 __ cmp(reg, ip); |
| 5207 } else { | 5210 } else { |
| 5208 __ cmp(reg, Operand(target)); | 5211 __ cmp(reg, Operand(target)); |
| 5209 } | 5212 } |
| 5210 DeoptimizeIf(ne, instr->environment()); | 5213 DeoptimizeIf(ne, instr->environment()); |
| 5211 } | 5214 } |
| 5212 | 5215 |
| 5213 | 5216 |
| 5214 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5217 void LCodeGen::DoCheckMapCommon(Register map_reg, |
| 5215 { | 5218 Handle<Map> map, |
| 5216 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 5219 LEnvironment* env) { |
| 5217 __ push(object); | 5220 Label success; |
| 5218 CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr); | 5221 __ CompareMap(map_reg, map, &success); |
| 5219 __ StoreToSafepointRegisterSlot(r0, scratch0()); | 5222 DeoptimizeIf(ne, env); |
| 5220 } | 5223 __ bind(&success); |
| 5221 __ tst(scratch0(), Operand(kSmiTagMask)); | |
| 5222 DeoptimizeIf(eq, instr->environment()); | |
| 5223 } | 5224 } |
| 5224 | 5225 |
| 5225 | 5226 |
| 5226 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5227 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5227 class DeferredCheckMaps: public LDeferredCode { | |
| 5228 public: | |
| 5229 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | |
| 5230 : LDeferredCode(codegen), instr_(instr), object_(object) { | |
| 5231 SetExit(check_maps()); | |
| 5232 } | |
| 5233 virtual void Generate() { | |
| 5234 codegen()->DoDeferredInstanceMigration(instr_, object_); | |
| 5235 } | |
| 5236 Label* check_maps() { return &check_maps_; } | |
| 5237 virtual LInstruction* instr() { return instr_; } | |
| 5238 private: | |
| 5239 LCheckMaps* instr_; | |
| 5240 Label check_maps_; | |
| 5241 Register object_; | |
| 5242 }; | |
| 5243 | |
| 5244 if (instr->hydrogen()->CanOmitMapChecks()) return; | 5228 if (instr->hydrogen()->CanOmitMapChecks()) return; |
| 5245 Register map_reg = scratch0(); | 5229 Register map_reg = scratch0(); |
| 5246 | |
| 5247 LOperand* input = instr->value(); | 5230 LOperand* input = instr->value(); |
| 5248 ASSERT(input->IsRegister()); | 5231 ASSERT(input->IsRegister()); |
| 5249 Register reg = ToRegister(input); | 5232 Register reg = ToRegister(input); |
| 5250 | 5233 |
| 5234 Label success; |
| 5251 SmallMapList* map_set = instr->hydrogen()->map_set(); | 5235 SmallMapList* map_set = instr->hydrogen()->map_set(); |
| 5252 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); | 5236 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 5253 | |
| 5254 DeferredCheckMaps* deferred = NULL; | |
| 5255 if (instr->hydrogen()->has_migration_target()) { | |
| 5256 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); | |
| 5257 __ bind(deferred->check_maps()); | |
| 5258 } | |
| 5259 | |
| 5260 Label success; | |
| 5261 for (int i = 0; i < map_set->length() - 1; i++) { | 5237 for (int i = 0; i < map_set->length() - 1; i++) { |
| 5262 Handle<Map> map = map_set->at(i); | 5238 Handle<Map> map = map_set->at(i); |
| 5263 __ CompareMap(map_reg, map, &success); | 5239 __ CompareMap(map_reg, map, &success); |
| 5264 __ b(eq, &success); | 5240 __ b(eq, &success); |
| 5265 } | 5241 } |
| 5266 | |
| 5267 Handle<Map> map = map_set->last(); | 5242 Handle<Map> map = map_set->last(); |
| 5268 __ CompareMap(map_reg, map, &success); | 5243 DoCheckMapCommon(map_reg, map, instr->environment()); |
| 5269 if (instr->hydrogen()->has_migration_target()) { | |
| 5270 __ b(ne, deferred->entry()); | |
| 5271 } else { | |
| 5272 DeoptimizeIf(ne, instr->environment()); | |
| 5273 } | |
| 5274 | |
| 5275 __ bind(&success); | 5244 __ bind(&success); |
| 5276 } | 5245 } |
| 5277 | 5246 |
| 5278 | 5247 |
| 5279 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5248 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5280 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5249 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5281 Register result_reg = ToRegister(instr->result()); | 5250 Register result_reg = ToRegister(instr->result()); |
| 5282 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | 5251 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); |
| 5283 } | 5252 } |
| 5284 | 5253 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5319 __ jmp(&done); | 5288 __ jmp(&done); |
| 5320 | 5289 |
| 5321 // smi | 5290 // smi |
| 5322 __ bind(&is_smi); | 5291 __ bind(&is_smi); |
| 5323 __ ClampUint8(result_reg, result_reg); | 5292 __ ClampUint8(result_reg, result_reg); |
| 5324 | 5293 |
| 5325 __ bind(&done); | 5294 __ bind(&done); |
| 5326 } | 5295 } |
| 5327 | 5296 |
| 5328 | 5297 |
| 5298 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { |
| 5299 if (instr->hydrogen()->CanOmitPrototypeChecks()) return; |
| 5300 |
| 5301 Register prototype_reg = ToRegister(instr->temp()); |
| 5302 Register map_reg = ToRegister(instr->temp2()); |
| 5303 |
| 5304 ZoneList<Handle<JSObject> >* prototypes = instr->prototypes(); |
| 5305 ZoneList<Handle<Map> >* maps = instr->maps(); |
| 5306 |
| 5307 ASSERT(prototypes->length() == maps->length()); |
| 5308 |
| 5309 for (int i = 0; i < prototypes->length(); i++) { |
| 5310 __ LoadHeapObject(prototype_reg, prototypes->at(i)); |
| 5311 __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset)); |
| 5312 DoCheckMapCommon(map_reg, maps->at(i), instr->environment()); |
| 5313 } |
| 5314 } |
| 5315 |
| 5316 |
| 5329 void LCodeGen::DoAllocate(LAllocate* instr) { | 5317 void LCodeGen::DoAllocate(LAllocate* instr) { |
| 5330 class DeferredAllocate: public LDeferredCode { | 5318 class DeferredAllocate: public LDeferredCode { |
| 5331 public: | 5319 public: |
| 5332 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) | 5320 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
| 5333 : LDeferredCode(codegen), instr_(instr) { } | 5321 : LDeferredCode(codegen), instr_(instr) { } |
| 5334 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } | 5322 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } |
| 5335 virtual LInstruction* instr() { return instr_; } | 5323 virtual LInstruction* instr() { return instr_; } |
| 5336 private: | 5324 private: |
| 5337 LAllocate* instr_; | 5325 LAllocate* instr_; |
| 5338 }; | 5326 }; |
| (...skipping 497 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5836 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); | 5824 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); |
| 5837 __ ldr(result, FieldMemOperand(scratch, | 5825 __ ldr(result, FieldMemOperand(scratch, |
| 5838 FixedArray::kHeaderSize - kPointerSize)); | 5826 FixedArray::kHeaderSize - kPointerSize)); |
| 5839 __ bind(&done); | 5827 __ bind(&done); |
| 5840 } | 5828 } |
| 5841 | 5829 |
| 5842 | 5830 |
| 5843 #undef __ | 5831 #undef __ |
| 5844 | 5832 |
| 5845 } } // namespace v8::internal | 5833 } } // namespace v8::internal |
| OLD | NEW |