| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 4589 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4600 EmitDeepCopy(value_object, result, source, offset); | 4600 EmitDeepCopy(value_object, result, source, offset); |
| 4601 } else if (value->IsHeapObject()) { | 4601 } else if (value->IsHeapObject()) { |
| 4602 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); | 4602 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); |
| 4603 __ str(r2, FieldMemOperand(result, total_offset)); | 4603 __ str(r2, FieldMemOperand(result, total_offset)); |
| 4604 } else { | 4604 } else { |
| 4605 __ mov(r2, Operand(value)); | 4605 __ mov(r2, Operand(value)); |
| 4606 __ str(r2, FieldMemOperand(result, total_offset)); | 4606 __ str(r2, FieldMemOperand(result, total_offset)); |
| 4607 } | 4607 } |
| 4608 } | 4608 } |
| 4609 | 4609 |
| 4610 // Copy elements backing store header. | |
| 4611 ASSERT(!has_elements || elements->IsFixedArray()); | |
| 4612 if (has_elements) { | 4610 if (has_elements) { |
| 4611 // Copy elements backing store header. |
| 4613 __ LoadHeapObject(source, elements); | 4612 __ LoadHeapObject(source, elements); |
| 4614 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { | 4613 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { |
| 4615 __ ldr(r2, FieldMemOperand(source, i)); | 4614 __ ldr(r2, FieldMemOperand(source, i)); |
| 4616 __ str(r2, FieldMemOperand(result, elements_offset + i)); | 4615 __ str(r2, FieldMemOperand(result, elements_offset + i)); |
| 4617 } | 4616 } |
| 4618 } | |
| 4619 | 4617 |
| 4620 // Copy elements backing store content. | 4618 // Copy elements backing store content. |
| 4621 ASSERT(!has_elements || elements->IsFixedArray()); | 4619 int elements_length = has_elements ? elements->length() : 0; |
| 4622 int elements_length = has_elements ? elements->length() : 0; | 4620 if (elements->IsFixedDoubleArray()) { |
| 4623 for (int i = 0; i < elements_length; i++) { | 4621 Handle<FixedDoubleArray> double_array = |
| 4624 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); | 4622 Handle<FixedDoubleArray>::cast(elements); |
| 4625 Handle<Object> value = JSObject::GetElement(object, i); | 4623 for (int i = 0; i < elements_length; i++) { |
| 4626 if (value->IsJSObject()) { | 4624 int64_t i_value; |
| 4627 Handle<JSObject> value_object = Handle<JSObject>::cast(value); | 4625 if (double_array->is_the_hole(i)) { |
| 4628 __ add(r2, result, Operand(*offset)); | 4626 i_value = kHoleNanInt64; |
| 4629 __ str(r2, FieldMemOperand(result, total_offset)); | 4627 } else { |
| 4630 __ LoadHeapObject(source, value_object); | 4628 double value = double_array->get_scalar(i); |
| 4631 EmitDeepCopy(value_object, result, source, offset); | 4629 i_value = BitCast<int64_t, double>(value); |
| 4632 } else if (value->IsHeapObject()) { | 4630 } |
| 4633 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); | 4631 // We only support little endian mode... |
| 4634 __ str(r2, FieldMemOperand(result, total_offset)); | 4632 int32_t i_value_low = i_value & 0xFFFFFFFF; |
| 4633 int32_t i_value_high = i_value >> 32; |
| 4634 int total_offset = |
| 4635 elements_offset + FixedDoubleArray::OffsetOfElementAt(i); |
| 4636 __ mov(r2, Operand(i_value_low)); |
| 4637 __ str(r2, FieldMemOperand(result, total_offset)); |
| 4638 __ mov(r2, Operand(i_value_high)); |
| 4639 __ str(r2, FieldMemOperand(result, total_offset + 4)); |
| 4640 } |
| 4641 } else if (elements->IsFixedArray()) { |
| 4642 for (int i = 0; i < elements_length; i++) { |
| 4643 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); |
| 4644 Handle<Object> value = JSObject::GetElement(object, i); |
| 4645 if (value->IsJSObject()) { |
| 4646 Handle<JSObject> value_object = Handle<JSObject>::cast(value); |
| 4647 __ add(r2, result, Operand(*offset)); |
| 4648 __ str(r2, FieldMemOperand(result, total_offset)); |
| 4649 __ LoadHeapObject(source, value_object); |
| 4650 EmitDeepCopy(value_object, result, source, offset); |
| 4651 } else if (value->IsHeapObject()) { |
| 4652 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); |
| 4653 __ str(r2, FieldMemOperand(result, total_offset)); |
| 4654 } else { |
| 4655 __ mov(r2, Operand(value)); |
| 4656 __ str(r2, FieldMemOperand(result, total_offset)); |
| 4657 } |
| 4658 } |
| 4635 } else { | 4659 } else { |
| 4636 __ mov(r2, Operand(value)); | 4660 UNREACHABLE(); |
| 4637 __ str(r2, FieldMemOperand(result, total_offset)); | |
| 4638 } | 4661 } |
| 4639 } | 4662 } |
| 4640 } | 4663 } |
| 4641 | 4664 |
| 4642 | 4665 |
| 4643 void LCodeGen::DoFastLiteral(LFastLiteral* instr) { | 4666 void LCodeGen::DoFastLiteral(LFastLiteral* instr) { |
| 4644 int size = instr->hydrogen()->total_size(); | 4667 int size = instr->hydrogen()->total_size(); |
| 4645 | 4668 |
| 4646 // Allocate all objects that are part of the literal in one big | 4669 // Allocate all objects that are part of the literal in one big |
| 4647 // allocation. This avoids multiple limit checks. | 4670 // allocation. This avoids multiple limit checks. |
| (...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5112 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 5135 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); |
| 5113 __ ldr(result, FieldMemOperand(scratch, | 5136 __ ldr(result, FieldMemOperand(scratch, |
| 5114 FixedArray::kHeaderSize - kPointerSize)); | 5137 FixedArray::kHeaderSize - kPointerSize)); |
| 5115 __ bind(&done); | 5138 __ bind(&done); |
| 5116 } | 5139 } |
| 5117 | 5140 |
| 5118 | 5141 |
| 5119 #undef __ | 5142 #undef __ |
| 5120 | 5143 |
| 5121 } } // namespace v8::internal | 5144 } } // namespace v8::internal |
| OLD | NEW |