Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(133)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 12114054: Supporting AllocationSiteInfo for Nested arrays (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressing a port compile failure Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/assembler.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 5704 matching lines...) Expand 10 before | Expand all | Expand 10 after
5715 // Load map into r2. 5715 // Load map into r2.
5716 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); 5716 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
5717 // Load the map's "bit field 2". 5717 // Load the map's "bit field 2".
5718 __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset)); 5718 __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
5719 // Retrieve elements_kind from bit field 2. 5719 // Retrieve elements_kind from bit field 2.
5720 __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount); 5720 __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
5721 __ cmp(r2, Operand(boilerplate_elements_kind)); 5721 __ cmp(r2, Operand(boilerplate_elements_kind));
5722 DeoptimizeIf(ne, instr->environment()); 5722 DeoptimizeIf(ne, instr->environment());
5723 } 5723 }
5724 5724
5725 int flags = allocation_site_mode == TRACK_ALLOCATION_SITE
5726 ? ArrayLiteral::kCreateAllocationSiteInfos
5727 : ArrayLiteral::kNoFlags;
5728
5725 // Set up the parameters to the stub/runtime call. 5729 // Set up the parameters to the stub/runtime call.
5726 __ LoadHeapObject(r3, literals); 5730 __ LoadHeapObject(r3, literals);
5727 __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); 5731 __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5728 // Boilerplate already exists, constant elements are never accessed. 5732 // Boilerplate already exists, constant elements are never accessed.
5729 // Pass an empty fixed array. 5733 // Pass an empty fixed array.
5730 __ mov(r1, Operand(isolate()->factory()->empty_fixed_array())); 5734 __ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
5731 __ Push(r3, r2, r1); 5735 __ Push(r3, r2, r1);
5732 5736
5733 // Pick the right runtime function or stub to call. 5737 // Pick the right runtime function or stub to call.
5734 int length = instr->hydrogen()->length(); 5738 int length = instr->hydrogen()->length();
5735 if (instr->hydrogen()->IsCopyOnWrite()) { 5739 if (instr->hydrogen()->IsCopyOnWrite()) {
5736 ASSERT(instr->hydrogen()->depth() == 1); 5740 ASSERT(instr->hydrogen()->depth() == 1);
5737 FastCloneShallowArrayStub::Mode mode = 5741 FastCloneShallowArrayStub::Mode mode =
5738 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; 5742 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
5739 FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length); 5743 FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
5740 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 5744 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5741 } else if (instr->hydrogen()->depth() > 1) { 5745 } else if (instr->hydrogen()->depth() > 1) {
5742 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); 5746 __ mov(r0, Operand(Smi::FromInt(flags)));
5747 __ push(r0);
5748 CallRuntime(Runtime::kCreateArrayLiteral, 4, instr);
5743 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { 5749 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
5744 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr); 5750 __ mov(r0, Operand(Smi::FromInt(flags)));
5751 __ push(r0);
5752 CallRuntime(Runtime::kCreateArrayLiteralShallow, 4, instr);
5745 } else { 5753 } else {
5746 FastCloneShallowArrayStub::Mode mode = 5754 FastCloneShallowArrayStub::Mode mode =
5747 boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS 5755 boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
5748 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS 5756 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
5749 : FastCloneShallowArrayStub::CLONE_ELEMENTS; 5757 : FastCloneShallowArrayStub::CLONE_ELEMENTS;
5750 FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); 5758 FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
5751 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 5759 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5752 } 5760 }
5753 } 5761 }
5754 5762
5755 5763
5756 void LCodeGen::EmitDeepCopy(Handle<JSObject> object, 5764 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
5765 Handle<JSObject> original_object,
5757 Register result, 5766 Register result,
5758 Register source, 5767 Register source,
5759 int* offset, 5768 int* offset,
5760 AllocationSiteMode mode) { 5769 AllocationSiteMode mode) {
5761 ASSERT(!source.is(r2)); 5770 ASSERT(!source.is(r2));
5762 ASSERT(!result.is(r2)); 5771 ASSERT(!result.is(r2));
5763 5772
5773 // Should we track allocation info for *this* object in the tree?
5764 bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE && 5774 bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
5765 object->map()->CanTrackAllocationSite(); 5775 object->ShouldTrackAllocationInfo();
5766 5776
5767 // Only elements backing stores for non-COW arrays need to be copied. 5777 // Only elements backing stores for non-COW arrays need to be copied.
5768 Handle<FixedArrayBase> elements(object->elements()); 5778 Handle<FixedArrayBase> elements(object->elements());
5779 Handle<FixedArrayBase> original_elements(original_object->elements());
5769 bool has_elements = elements->length() > 0 && 5780 bool has_elements = elements->length() > 0 &&
5770 elements->map() != isolate()->heap()->fixed_cow_array_map(); 5781 elements->map() != isolate()->heap()->fixed_cow_array_map();
5771 5782
5772 // Increase the offset so that subsequent objects end up right after 5783 // Increase the offset so that subsequent objects end up right after
5773 // this object and its backing store. 5784 // this object and its backing store.
5774 int object_offset = *offset; 5785 int object_offset = *offset;
5775 int object_size = object->map()->instance_size(); 5786 int object_size = object->map()->instance_size();
5776 int elements_size = has_elements ? elements->Size() : 0; 5787 int elements_size = has_elements ? elements->Size() : 0;
5777 int elements_offset = *offset + object_size; 5788 int elements_offset = *offset + object_size;
5778 if (create_allocation_site_info) { 5789 if (create_allocation_site_info) {
(...skipping 16 matching lines...) Expand all
5795 __ str(r2, FieldMemOperand(result, object_offset + i)); 5806 __ str(r2, FieldMemOperand(result, object_offset + i));
5796 } 5807 }
5797 5808
5798 // Copy in-object properties. 5809 // Copy in-object properties.
5799 for (int i = 0; i < inobject_properties; i++) { 5810 for (int i = 0; i < inobject_properties; i++) {
5800 int total_offset = object_offset + object->GetInObjectPropertyOffset(i); 5811 int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
5801 Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i), 5812 Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
5802 isolate()); 5813 isolate());
5803 if (value->IsJSObject()) { 5814 if (value->IsJSObject()) {
5804 Handle<JSObject> value_object = Handle<JSObject>::cast(value); 5815 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
5816 Handle<JSObject> original_value_object = Handle<JSObject>::cast(
5817 Handle<Object>(original_object->InObjectPropertyAt(i), isolate()));
5818
5805 __ add(r2, result, Operand(*offset)); 5819 __ add(r2, result, Operand(*offset));
5806 __ str(r2, FieldMemOperand(result, total_offset)); 5820 __ str(r2, FieldMemOperand(result, total_offset));
5807 __ LoadHeapObject(source, value_object); 5821 __ LoadHeapObject(source, value_object);
5808 EmitDeepCopy(value_object, result, source, offset, 5822 EmitDeepCopy(value_object, original_value_object, result, source,
5809 DONT_TRACK_ALLOCATION_SITE); 5823 offset, mode);
5810 } else if (value->IsHeapObject()) { 5824 } else if (value->IsHeapObject()) {
5811 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); 5825 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
5812 __ str(r2, FieldMemOperand(result, total_offset)); 5826 __ str(r2, FieldMemOperand(result, total_offset));
5813 } else { 5827 } else {
5814 __ mov(r2, Operand(value)); 5828 __ mov(r2, Operand(value));
5815 __ str(r2, FieldMemOperand(result, total_offset)); 5829 __ str(r2, FieldMemOperand(result, total_offset));
5816 } 5830 }
5817 } 5831 }
5818 5832
5819 // Build Allocation Site Info if desired 5833 // Build Allocation Site Info if desired
5820 if (create_allocation_site_info) { 5834 if (create_allocation_site_info) {
5821 __ mov(r2, Operand(Handle<Map>(isolate()->heap()-> 5835 __ mov(r2, Operand(Handle<Map>(isolate()->heap()->
5822 allocation_site_info_map()))); 5836 allocation_site_info_map())));
5823 __ str(r2, FieldMemOperand(result, object_size)); 5837 __ str(r2, FieldMemOperand(result, object_size + object_offset));
5824 __ str(source, FieldMemOperand(result, object_size + kPointerSize)); 5838 __ LoadHeapObject(r2, original_object);
5839 __ str(r2, FieldMemOperand(result,
5840 object_size + object_offset + kPointerSize));
5825 } 5841 }
5826 5842
5827 if (has_elements) { 5843 if (has_elements) {
5828 // Copy elements backing store header. 5844 // Copy elements backing store header.
5829 __ LoadHeapObject(source, elements); 5845 __ LoadHeapObject(source, elements);
5830 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { 5846 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
5831 __ ldr(r2, FieldMemOperand(source, i)); 5847 __ ldr(r2, FieldMemOperand(source, i));
5832 __ str(r2, FieldMemOperand(result, elements_offset + i)); 5848 __ str(r2, FieldMemOperand(result, elements_offset + i));
5833 } 5849 }
5834 5850
5835 // Copy elements backing store content. 5851 // Copy elements backing store content.
5836 int elements_length = has_elements ? elements->length() : 0; 5852 int elements_length = has_elements ? elements->length() : 0;
5837 if (elements->IsFixedDoubleArray()) { 5853 if (elements->IsFixedDoubleArray()) {
5838 Handle<FixedDoubleArray> double_array = 5854 Handle<FixedDoubleArray> double_array =
5839 Handle<FixedDoubleArray>::cast(elements); 5855 Handle<FixedDoubleArray>::cast(elements);
5840 for (int i = 0; i < elements_length; i++) { 5856 for (int i = 0; i < elements_length; i++) {
5841 int64_t value = double_array->get_representation(i); 5857 int64_t value = double_array->get_representation(i);
5842 // We only support little endian mode... 5858 // We only support little endian mode...
5843 int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF); 5859 int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
5844 int32_t value_high = static_cast<int32_t>(value >> 32); 5860 int32_t value_high = static_cast<int32_t>(value >> 32);
5845 int total_offset = 5861 int total_offset =
5846 elements_offset + FixedDoubleArray::OffsetOfElementAt(i); 5862 elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
5847 __ mov(r2, Operand(value_low)); 5863 __ mov(r2, Operand(value_low));
5848 __ str(r2, FieldMemOperand(result, total_offset)); 5864 __ str(r2, FieldMemOperand(result, total_offset));
5849 __ mov(r2, Operand(value_high)); 5865 __ mov(r2, Operand(value_high));
5850 __ str(r2, FieldMemOperand(result, total_offset + 4)); 5866 __ str(r2, FieldMemOperand(result, total_offset + 4));
5851 } 5867 }
5852 } else if (elements->IsFixedArray()) { 5868 } else if (elements->IsFixedArray()) {
5853 Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); 5869 Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
5870 ASSERT(original_object->HasFastObjectElements());
5871 Handle<FixedArray> original_fast_elements =
5872 Handle<FixedArray>::cast(original_elements);
5854 for (int i = 0; i < elements_length; i++) { 5873 for (int i = 0; i < elements_length; i++) {
5855 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); 5874 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
5856 Handle<Object> value(fast_elements->get(i), isolate()); 5875 Handle<Object> value(fast_elements->get(i), isolate());
5857 if (value->IsJSObject()) { 5876 if (value->IsJSObject()) {
5858 Handle<JSObject> value_object = Handle<JSObject>::cast(value); 5877 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
5878 Handle<JSObject> original_value_object = Handle<JSObject>::cast(
5879 Handle<Object>(original_fast_elements->get(i), isolate()));
5859 __ add(r2, result, Operand(*offset)); 5880 __ add(r2, result, Operand(*offset));
5860 __ str(r2, FieldMemOperand(result, total_offset)); 5881 __ str(r2, FieldMemOperand(result, total_offset));
5861 __ LoadHeapObject(source, value_object); 5882 __ LoadHeapObject(source, value_object);
5862 EmitDeepCopy(value_object, result, source, offset, 5883 ASSERT(!value_object.is_identical_to(original_value_object));
5863 DONT_TRACK_ALLOCATION_SITE); 5884 EmitDeepCopy(value_object, original_value_object, result, source,
5885 offset, mode);
5864 } else if (value->IsHeapObject()) { 5886 } else if (value->IsHeapObject()) {
5865 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); 5887 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
5866 __ str(r2, FieldMemOperand(result, total_offset)); 5888 __ str(r2, FieldMemOperand(result, total_offset));
5867 } else { 5889 } else {
5868 __ mov(r2, Operand(value)); 5890 __ mov(r2, Operand(value));
5869 __ str(r2, FieldMemOperand(result, total_offset)); 5891 __ str(r2, FieldMemOperand(result, total_offset));
5870 } 5892 }
5871 } 5893 }
5872 } else { 5894 } else {
5873 UNREACHABLE(); 5895 UNREACHABLE();
5874 } 5896 }
5875 } 5897 }
5876 } 5898 }
5877 5899
5878 5900
5879 void LCodeGen::DoFastLiteral(LFastLiteral* instr) { 5901 void LCodeGen::FastLiteralHelper(LFastLiteral* instr, AllocationSiteMode mode) {
5880 int size = instr->hydrogen()->total_size(); 5902 int size = mode == DONT_TRACK_ALLOCATION_SITE
5881 ElementsKind boilerplate_elements_kind = 5903 ? instr->hydrogen()->size_without_allocation_sites()
5882 instr->hydrogen()->boilerplate()->GetElementsKind(); 5904 : instr->hydrogen()->total_size();
5883 5905
5884 // Deopt if the array literal boilerplate ElementsKind is of a type different
5885 // than the expected one. The check isn't necessary if the boilerplate has
5886 // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
5887 if (CanTransitionToMoreGeneralFastElementsKind(
5888 boilerplate_elements_kind, true)) {
5889 __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
5890 // Load map into r2.
5891 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
5892 // Load the map's "bit field 2".
5893 __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
5894 // Retrieve elements_kind from bit field 2.
5895 __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
5896 __ cmp(r2, Operand(boilerplate_elements_kind));
5897 DeoptimizeIf(ne, instr->environment());
5898 }
5899
5900 // Allocate all objects that are part of the literal in one big
5901 // allocation. This avoids multiple limit checks. 5906 // allocation. This avoids multiple limit checks.
5902 Label allocated, runtime_allocate; 5907 Label allocated, runtime_allocate;
5903 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); 5908 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5904 __ jmp(&allocated); 5909 __ jmp(&allocated);
5905 5910
5906 __ bind(&runtime_allocate); 5911 __ bind(&runtime_allocate);
5907 __ mov(r0, Operand(Smi::FromInt(size))); 5912 __ mov(r0, Operand(Smi::FromInt(size)));
5908 __ push(r0); 5913 __ push(r0);
5909 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); 5914 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5910 5915
5911 __ bind(&allocated); 5916 __ bind(&allocated);
5912 int offset = 0; 5917 int offset = 0;
5913 __ LoadHeapObject(r1, instr->hydrogen()->boilerplate()); 5918 __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
5914 EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset, 5919 EmitDeepCopy(instr->hydrogen()->boilerplate(),
5920 instr->hydrogen()->original_boilerplate(),
5921 r0, r1, &offset,
5915 instr->hydrogen()->allocation_site_mode()); 5922 instr->hydrogen()->allocation_site_mode());
5916 ASSERT_EQ(size, offset); 5923 ASSERT_EQ(size, offset);
5917 } 5924 }
5918 5925
5919 5926
5927 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
5928 // TODO(mvstanton): Revisit this heuristic as site info matures.
5929 // If allocation site mode is on, then we need the ability to turn it off
5930 // after "awhile." Later, better options should be available, but for
5931 // now just allow a certain number of gcs to pass.
5932 if (instr->hydrogen()->allocation_site_mode() == TRACK_ALLOCATION_SITE) {
5933 // How many gcs have passed?
5934 const int maxCount = 3;
5935 ExternalReference gc_count_address =
5936 ExternalReference::gc_count_address(isolate());
5937 Label continue_using, done;
5938 __ mov(r0, Operand(gc_count_address));
5939 __ ldr(r0, MemOperand(r0));
5940 __ cmp(r0, Operand(maxCount));
5941 __ b(lt, &continue_using);
5942 FastLiteralHelper(instr, DONT_TRACK_ALLOCATION_SITE);
5943 __ jmp(&done);
5944 __ bind(&continue_using);
5945 FastLiteralHelper(instr, TRACK_ALLOCATION_SITE);
5946 __ bind(&done);
5947 } else {
5948 FastLiteralHelper(instr, DONT_TRACK_ALLOCATION_SITE);
5949 }
5950 }
5951
5952
5920 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { 5953 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
5921 Handle<FixedArray> literals(instr->environment()->closure()->literals()); 5954 Handle<FixedArray> literals(instr->environment()->closure()->literals());
5922 Handle<FixedArray> constant_properties = 5955 Handle<FixedArray> constant_properties =
5923 instr->hydrogen()->constant_properties(); 5956 instr->hydrogen()->constant_properties();
5924 5957
5925 // Set up the parameters to the stub/runtime call. 5958 // Set up the parameters to the stub/runtime call.
5926 __ LoadHeapObject(r3, literals); 5959 __ LoadHeapObject(r3, literals);
5927 __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); 5960 __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5928 __ mov(r1, Operand(constant_properties)); 5961 __ mov(r1, Operand(constant_properties));
5929 int flags = instr->hydrogen()->fast_elements() 5962 int flags = instr->hydrogen()->fast_elements()
5930 ? ObjectLiteral::kFastElements 5963 ? ObjectLiteral::kFastElements
5931 : ObjectLiteral::kNoFlags; 5964 : ObjectLiteral::kNoFlags;
5965
5966 if (instr->hydrogen()->allocation_site_mode() == TRACK_ALLOCATION_SITE) {
5967 flags |= ObjectLiteral::kCreateAllocationSiteInfos;
5968 }
5969
5932 __ mov(r0, Operand(Smi::FromInt(flags))); 5970 __ mov(r0, Operand(Smi::FromInt(flags)));
5933
5934 // Pick the right runtime function or stub to call. 5971 // Pick the right runtime function or stub to call.
5935 int properties_count = constant_properties->length() / 2; 5972 int properties_count = constant_properties->length() / 2;
5936 if (instr->hydrogen()->depth() > 1) { 5973 if (instr->hydrogen()->depth() > 1) {
5937 __ Push(r3, r2, r1, r0); 5974 __ Push(r3, r2, r1, r0);
5938 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); 5975 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
5939 } else if (flags != ObjectLiteral::kFastElements || 5976 } else if (flags != ObjectLiteral::kFastElements ||
5940 properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { 5977 properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
5941 __ Push(r3, r2, r1, r0); 5978 __ Push(r3, r2, r1, r0);
5942 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr); 5979 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
5943 } else { 5980 } else {
(...skipping 448 matching lines...) Expand 10 before | Expand all | Expand 10 after
6392 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); 6429 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
6393 __ ldr(result, FieldMemOperand(scratch, 6430 __ ldr(result, FieldMemOperand(scratch,
6394 FixedArray::kHeaderSize - kPointerSize)); 6431 FixedArray::kHeaderSize - kPointerSize));
6395 __ bind(&done); 6432 __ bind(&done);
6396 } 6433 }
6397 6434
6398 6435
6399 #undef __ 6436 #undef __
6400 6437
6401 } } // namespace v8::internal 6438 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/assembler.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698