Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(828)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 10103035: Share optimized code for closures. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: rebased on r11394 Created 8 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 __ pop(ecx); // Pop return address. 59 __ pop(ecx); // Pop return address.
60 __ push(eax); 60 __ push(eax);
61 __ push(ecx); // Push return address. 61 __ push(ecx); // Push return address.
62 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); 62 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
63 } 63 }
64 64
65 65
66 void FastNewClosureStub::Generate(MacroAssembler* masm) { 66 void FastNewClosureStub::Generate(MacroAssembler* masm) {
67 // Create a new closure from the given function info in new 67 // Create a new closure from the given function info in new
68 // space. Set the context to the current context in esi. 68 // space. Set the context to the current context in esi.
69 Counters* counters = masm->isolate()->counters();
70
69 Label gc; 71 Label gc;
70 __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); 72 __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
71 73
74 __ IncrementCounter(counters->fast_new_closure_total(), 1);
75
72 // Get the function info from the stack. 76 // Get the function info from the stack.
73 __ mov(edx, Operand(esp, 1 * kPointerSize)); 77 __ mov(edx, Operand(esp, 1 * kPointerSize));
74 78
75 int map_index = (language_mode_ == CLASSIC_MODE) 79 int map_index = (language_mode_ == CLASSIC_MODE)
76 ? Context::FUNCTION_MAP_INDEX 80 ? Context::FUNCTION_MAP_INDEX
77 : Context::STRICT_MODE_FUNCTION_MAP_INDEX; 81 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
78 82
79 // Compute the function map in the current global context and set that 83 // Compute the function map in the current global context and set that
80 // as the map of the allocated object. 84 // as the map of the allocated object.
81 __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); 85 __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
82 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset)); 86 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
83 __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index))); 87 __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
84 __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx); 88 __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
85 89
86 // Initialize the rest of the function. We don't have to update the 90 // Initialize the rest of the function. We don't have to update the
87 // write barrier because the allocated object is in new space. 91 // write barrier because the allocated object is in new space.
88 Factory* factory = masm->isolate()->factory(); 92 Factory* factory = masm->isolate()->factory();
89 __ mov(ebx, Immediate(factory->empty_fixed_array())); 93 __ mov(ebx, Immediate(factory->empty_fixed_array()));
90 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx); 94 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
91 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); 95 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
92 __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset), 96 __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
93 Immediate(factory->the_hole_value())); 97 Immediate(factory->the_hole_value()));
94 __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx); 98 __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
95 __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); 99 __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
96 __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); 100 __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
97 __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
98 Immediate(factory->undefined_value()));
99 101
100 // Initialize the code pointer in the function to be the one 102 // Initialize the code pointer in the function to be the one
101 // found in the shared function info object. 103 // found in the shared function info object.
104 // But first check if there is optimized version for our context.
105 Label check_optimized;
106 Label install_unoptimized;
107 if (FLAG_cache_optimized_code) {
108 __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
109 __ test(ebx, ebx);
110 __ j(not_zero, &check_optimized, Label::kNear);
111 }
112 __ bind(&install_unoptimized);
113 __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
114 Immediate(factory->undefined_value()));
102 __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); 115 __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
103 __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); 116 __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
104 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx); 117 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
105 118
106 // Return and remove the on-stack parameter. 119 // Return and remove the on-stack parameter.
107 __ ret(1 * kPointerSize); 120 __ ret(1 * kPointerSize);
108 121
122 __ bind(&check_optimized);
123
124 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
125
126 // ecx holds global context, ebx points to fixed array of pairs
127 // (global context, optimized code).
128 // Map must never be empty, so check the first elements.
129 Label install_optimized;
130 // Speculatively move code object into edx.
131 __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
132 __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
133 __ j(equal, &install_optimized);
134
135 // Iterate through the rest of map backwards. edx holds an index as a Smi.
136 Label loop;
137 Label restore;
138 __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
139 __ bind(&loop);
140 __ cmp(edx, 4); // Do not double check first entry.
141 __ j(equal, &restore);
142 __ sub(edx, Immediate(4)); // Skip a pair.
143 __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
144 __ j(not_equal, &loop, Label::kNear);
145 // Hit: fetch the optimized code.
146 __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
147
148 __ bind(&install_optimized);
149 __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
150
151 // Idea: store proper code pointers in the map and either unmangle them
152 // on marking or do nothing as the whole map is discarded on major GC anyway.
153 __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
154 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
155
156 // Now link a function into a list of optimized functions.
157 __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
158
159 __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
160 // No need in write barrier as JSFunction (eax) is in the new space.
161
162 __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
163 // Store JSFunction (eax) into edx before issuing write barrier as
164 // it clobbers all the registers passed.
165 __ mov(edx, eax);
166 __ RecordWriteContextSlot(
167 ecx,
168 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
169 edx,
170 ebx,
171 kDontSaveFPRegs);
172
173 // Return and remove the on-stack parameter.
174 __ ret(1 * kPointerSize);
175
176 __ bind(&restore);
177 // Restore SharedFunctionInfo into edx.
178 __ mov(edx, Operand(esp, 1 * kPointerSize));
179 __ jmp(&install_unoptimized);
180
109 // Create a new closure through the slower runtime call. 181 // Create a new closure through the slower runtime call.
110 __ bind(&gc); 182 __ bind(&gc);
111 __ pop(ecx); // Temporarily remove return address. 183 __ pop(ecx); // Temporarily remove return address.
112 __ pop(edx); 184 __ pop(edx);
113 __ push(esi); 185 __ push(esi);
114 __ push(edx); 186 __ push(edx);
115 __ push(Immediate(factory->false_value())); 187 __ push(Immediate(factory->false_value()));
116 __ push(ecx); // Restore return address. 188 __ push(ecx); // Restore return address.
117 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); 189 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
118 } 190 }
(...skipping 6943 matching lines...) Expand 10 before | Expand all | Expand 10 after
7062 // ElementsTransitionGenerator::GenerateSmiOnlyToObject 7134 // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7063 // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble 7135 // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7064 // and ElementsTransitionGenerator::GenerateDoubleToObject 7136 // and ElementsTransitionGenerator::GenerateDoubleToObject
7065 { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET}, 7137 { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
7066 { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET}, 7138 { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
7067 // ElementsTransitionGenerator::GenerateDoubleToObject 7139 // ElementsTransitionGenerator::GenerateDoubleToObject
7068 { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET}, 7140 { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
7069 { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET}, 7141 { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
7070 // StoreArrayLiteralElementStub::Generate 7142 // StoreArrayLiteralElementStub::Generate
7071 { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET}, 7143 { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
7144 // FastNewClosureStub
7145 { ecx, edx, ebx, EMIT_REMEMBERED_SET},
7072 // Null termination. 7146 // Null termination.
7073 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 7147 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7074 }; 7148 };
7075 7149
7076 #undef REG 7150 #undef REG
7077 7151
7078 bool RecordWriteStub::IsPregenerated() { 7152 bool RecordWriteStub::IsPregenerated() {
7079 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7153 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7080 !entry->object.is(no_reg); 7154 !entry->object.is(no_reg);
7081 entry++) { 7155 entry++) {
(...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after
7391 false); 7465 false);
7392 __ pop(edx); 7466 __ pop(edx);
7393 __ ret(0); 7467 __ ret(0);
7394 } 7468 }
7395 7469
7396 #undef __ 7470 #undef __
7397 7471
7398 } } // namespace v8::internal 7472 } } // namespace v8::internal
7399 7473
7400 #endif // V8_TARGET_ARCH_IA32 7474 #endif // V8_TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698