Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/compiler/code-assembler.cc

Issue 1893383002: Complete separation of CodeAssembler and CodeStubAssembler (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« src/compiler/code-assembler.h ('K') | « src/compiler/code-assembler.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/code-assembler.h" 5 #include "src/compiler/code-assembler.h"
6 6
7 #include <ostream> 7 #include <ostream>
8 8
9 #include "src/code-factory.h" 9 #include "src/code-factory.h"
10 #include "src/compiler/graph.h" 10 #include "src/compiler/graph.h"
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
166 } 166 }
167 167
168 Node* CodeAssembler::LoadStackPointer() { 168 Node* CodeAssembler::LoadStackPointer() {
169 return raw_assembler_->LoadStackPointer(); 169 return raw_assembler_->LoadStackPointer();
170 } 170 }
171 171
172 Node* CodeAssembler::SmiShiftBitsConstant() { 172 Node* CodeAssembler::SmiShiftBitsConstant() {
173 return IntPtrConstant(kSmiShiftSize + kSmiTagSize); 173 return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
174 } 174 }
175 175
176 Node* CodeAssembler::SmiTag(Node* value) {
177 return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
178 }
179
180 Node* CodeAssembler::SmiUntag(Node* value) {
181 return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
182 }
183
184 #define DEFINE_CODE_ASSEMBLER_BINARY_OP(name) \ 176 #define DEFINE_CODE_ASSEMBLER_BINARY_OP(name) \
185 Node* CodeAssembler::name(Node* a, Node* b) { \ 177 Node* CodeAssembler::name(Node* a, Node* b) { \
186 return raw_assembler_->name(a, b); \ 178 return raw_assembler_->name(a, b); \
187 } 179 }
188 CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP) 180 CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
189 #undef DEFINE_CODE_ASSEMBLER_BINARY_OP 181 #undef DEFINE_CODE_ASSEMBLER_BINARY_OP
190 182
191 Node* CodeAssembler::WordShl(Node* value, int shift) { 183 Node* CodeAssembler::WordShl(Node* value, int shift) {
192 return raw_assembler_->WordShl(value, IntPtrConstant(shift)); 184 return raw_assembler_->WordShl(value, IntPtrConstant(shift));
193 } 185 }
194 186
195 Node* CodeAssembler::TruncateFloat64ToInt32RoundToZero(Node* a) { 187 Node* CodeAssembler::TruncateFloat64ToInt32RoundToZero(Node* a) {
196 return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kRoundToZero, 188 return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kRoundToZero,
197 a); 189 a);
198 } 190 }
199 191
200 Node* CodeAssembler::TruncateFloat64ToInt32JavaScript(Node* a) { 192 Node* CodeAssembler::TruncateFloat64ToInt32JavaScript(Node* a) {
201 return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript, a); 193 return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript, a);
202 } 194 }
203 195
204 #define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \ 196 #define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
205 Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); } 197 Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
206 CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP) 198 CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
207 #undef DEFINE_CODE_ASSEMBLER_UNARY_OP 199 #undef DEFINE_CODE_ASSEMBLER_UNARY_OP
208 200
201 Node* CodeAssembler::Load(MachineType rep, Node* base) {
202 return raw_assembler_->Load(rep, base);
203 }
204
205 Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
206 return raw_assembler_->Load(rep, base, index);
207 }
208
209 Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) { 209 Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
210 if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) { 210 if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
211 Handle<Object> root = isolate()->heap()->root_handle(root_index); 211 Handle<Object> root = isolate()->heap()->root_handle(root_index);
212 if (root->IsSmi()) { 212 if (root->IsSmi()) {
213 return SmiConstant(Smi::cast(*root)); 213 return SmiConstant(Smi::cast(*root));
214 } else { 214 } else {
215 return HeapConstant(Handle<HeapObject>::cast(root)); 215 return HeapConstant(Handle<HeapObject>::cast(root));
216 } 216 }
217 } 217 }
218 218
219 compiler::Node* roots_array_start = 219 compiler::Node* roots_array_start =
220 ExternalConstant(ExternalReference::roots_array_start(isolate())); 220 ExternalConstant(ExternalReference::roots_array_start(isolate()));
221 USE(roots_array_start); 221 USE(roots_array_start);
222 222
223 // TODO(danno): Implement thee root-access case where the root is not constant 223 // TODO(danno): Implement the root-access case where the root is not constant
224 // and must be loaded from the root array. 224 // and must be loaded from the root array.
225 UNIMPLEMENTED(); 225 UNIMPLEMENTED();
226 return nullptr; 226 return nullptr;
227 } 227 }
228 228
229 Node* CodeAssembler::AllocateRawUnaligned(Node* size_in_bytes,
230 AllocationFlags flags,
231 Node* top_address,
232 Node* limit_address) {
233 Node* top = Load(MachineType::Pointer(), top_address);
234 Node* limit = Load(MachineType::Pointer(), limit_address);
235
236 // If there's not enough space, call the runtime.
237 RawMachineLabel runtime_call(RawMachineLabel::kDeferred), no_runtime_call,
238 merge_runtime;
239 raw_assembler_->Branch(
240 raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
241 &runtime_call, &no_runtime_call);
242
243 raw_assembler_->Bind(&runtime_call);
244 // AllocateInTargetSpace does not use the context.
245 Node* context = IntPtrConstant(0);
246 Node* runtime_flags = SmiTag(Int32Constant(
247 AllocateDoubleAlignFlag::encode(false) |
248 AllocateTargetSpace::encode(flags & kPretenured
249 ? AllocationSpace::OLD_SPACE
250 : AllocationSpace::NEW_SPACE)));
251 Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
252 SmiTag(size_in_bytes), runtime_flags);
253 raw_assembler_->Goto(&merge_runtime);
254
255 // When there is enough space, return `top' and bump it up.
256 raw_assembler_->Bind(&no_runtime_call);
257 Node* no_runtime_result = top;
258 StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
259 IntPtrAdd(top, size_in_bytes));
260 no_runtime_result =
261 IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
262 raw_assembler_->Goto(&merge_runtime);
263
264 raw_assembler_->Bind(&merge_runtime);
265 return raw_assembler_->Phi(MachineType::PointerRepresentation(),
266 runtime_result, no_runtime_result);
267 }
268
269 Node* CodeAssembler::AllocateRawAligned(Node* size_in_bytes,
270 AllocationFlags flags,
271 Node* top_address,
272 Node* limit_address) {
273 Node* top = Load(MachineType::Pointer(), top_address);
274 Node* limit = Load(MachineType::Pointer(), limit_address);
275 Node* adjusted_size = size_in_bytes;
276 if (flags & kDoubleAlignment) {
277 // TODO(epertoso): Simd128 alignment.
278 RawMachineLabel aligned, not_aligned, merge;
279 raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
280 &not_aligned, &aligned);
281
282 raw_assembler_->Bind(&not_aligned);
283 Node* not_aligned_size =
284 IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
285 raw_assembler_->Goto(&merge);
286
287 raw_assembler_->Bind(&aligned);
288 raw_assembler_->Goto(&merge);
289
290 raw_assembler_->Bind(&merge);
291 adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
292 not_aligned_size, adjusted_size);
293 }
294
295 Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
296
297 RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
298 raw_assembler_->Branch(
299 raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
300 &doesnt_need_filler, &needs_filler);
301
302 raw_assembler_->Bind(&needs_filler);
303 // Store a filler and increase the address by kPointerSize.
304 // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
305 // it when Simd128 alignment is supported.
306 StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
307 LoadRoot(Heap::kOnePointerFillerMapRootIndex));
308 Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
309 raw_assembler_->Goto(&merge_address);
310
311 raw_assembler_->Bind(&doesnt_need_filler);
312 Node* address_without_filler = address;
313 raw_assembler_->Goto(&merge_address);
314
315 raw_assembler_->Bind(&merge_address);
316 address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
317 address_with_filler, address_without_filler);
318 // Update the top.
319 StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
320 IntPtrAdd(top, adjusted_size));
321 return address;
322 }
323
324 Node* CodeAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
325 bool const new_space = !(flags & kPretenured);
326 Node* top_address = ExternalConstant(
327 new_space
328 ? ExternalReference::new_space_allocation_top_address(isolate())
329 : ExternalReference::old_space_allocation_top_address(isolate()));
330 Node* limit_address = ExternalConstant(
331 new_space
332 ? ExternalReference::new_space_allocation_limit_address(isolate())
333 : ExternalReference::old_space_allocation_limit_address(isolate()));
334
335 #ifdef V8_HOST_ARCH_32_BIT
336 if (flags & kDoubleAlignment) {
337 return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
338 limit_address);
339 }
340 #endif
341
342 return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
343 limit_address);
344 }
345
346 Node* CodeAssembler::InnerAllocate(Node* previous, int offset) {
347 return IntPtrAdd(previous, IntPtrConstant(offset));
348 }
349
350 Node* CodeAssembler::Load(MachineType rep, Node* base) {
351 return raw_assembler_->Load(rep, base);
352 }
353
354 Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
355 return raw_assembler_->Load(rep, base, index);
356 }
357
358 Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) { 229 Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
359 return raw_assembler_->Store(rep, base, value, kFullWriteBarrier); 230 return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
360 } 231 }
361 232
362 Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index, 233 Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index,
363 Node* value) { 234 Node* value) {
364 return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier); 235 return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
365 } 236 }
366 237
367 Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base, 238 Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
(...skipping 437 matching lines...) Expand 10 before | Expand all | Expand 10 after
805 } 676 }
806 } 677 }
807 } 678 }
808 679
809 bound_ = true; 680 bound_ = true;
810 } 681 }
811 682
812 } // namespace compiler 683 } // namespace compiler
813 } // namespace internal 684 } // namespace internal
814 } // namespace v8 685 } // namespace v8
OLDNEW
« src/compiler/code-assembler.h ('K') | « src/compiler/code-assembler.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698