Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(32)

Side by Side Diff: src/interpreter/interpreter-assembler.cc

Issue 2552883012: [interpreter][stubs] Fixing issues found by machine graph verifier. (Closed)
Patch Set: Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/interpreter/interpreter.cc ('k') | src/interpreter/interpreter-intrinsics.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/interpreter/interpreter-assembler.h" 5 #include "src/interpreter/interpreter-assembler.h"
6 6
7 #include <limits> 7 #include <limits>
8 #include <ostream> 8 #include <ostream>
9 9
10 #include "src/code-factory.h" 10 #include "src/code-factory.h"
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
79 } 79 }
80 80
81 void InterpreterAssembler::SetContext(Node* value) { 81 void InterpreterAssembler::SetContext(Node* value) {
82 StoreRegister(value, Register::current_context()); 82 StoreRegister(value, Register::current_context());
83 } 83 }
84 84
85 Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) { 85 Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
86 Variable cur_context(this, MachineRepresentation::kTaggedPointer); 86 Variable cur_context(this, MachineRepresentation::kTaggedPointer);
87 cur_context.Bind(context); 87 cur_context.Bind(context);
88 88
89 Variable cur_depth(this, MachineRepresentation::kWord32); 89 Variable cur_depth(this, MachineType::PointerRepresentation());
90 cur_depth.Bind(depth); 90 cur_depth.Bind(depth);
91 91
92 Label context_found(this); 92 Label context_found(this);
93 93
94 Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context}; 94 Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
95 Label context_search(this, 2, context_search_loop_variables); 95 Label context_search(this, 2, context_search_loop_variables);
96 96
97 // Fast path if the depth is 0. 97 // Fast path if the depth is 0.
98 Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search); 98 Branch(WordEqual(depth, IntPtrConstant(0)), &context_found, &context_search);
99 99
100 // Loop until the depth is 0. 100 // Loop until the depth is 0.
101 Bind(&context_search); 101 Bind(&context_search);
102 { 102 {
103 cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1))); 103 cur_depth.Bind(IntPtrSub(cur_depth.value(), IntPtrConstant(1)));
104 cur_context.Bind( 104 cur_context.Bind(
105 LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); 105 LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
106 106
107 Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found, 107 Branch(WordEqual(cur_depth.value(), IntPtrConstant(0)), &context_found,
108 &context_search); 108 &context_search);
109 } 109 }
110 110
111 Bind(&context_found); 111 Bind(&context_found);
112 return cur_context.value(); 112 return cur_context.value();
113 } 113 }
114 114
115 void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context, 115 void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
116 Node* depth, 116 Node* depth,
117 Label* target) { 117 Label* target) {
118 Variable cur_context(this, MachineRepresentation::kTaggedPointer); 118 Variable cur_context(this, MachineRepresentation::kTaggedPointer);
119 cur_context.Bind(context); 119 cur_context.Bind(context);
120 120
121 Variable cur_depth(this, MachineRepresentation::kWord32); 121 Variable cur_depth(this, MachineType::PointerRepresentation());
122 cur_depth.Bind(depth); 122 cur_depth.Bind(depth);
123 123
124 Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context}; 124 Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
125 Label context_search(this, 2, context_search_loop_variables); 125 Label context_search(this, 2, context_search_loop_variables);
126 126
127 // Loop until the depth is 0. 127 // Loop until the depth is 0.
128 Goto(&context_search); 128 Goto(&context_search);
129 Bind(&context_search); 129 Bind(&context_search);
130 { 130 {
131 // TODO(leszeks): We only need to do this check if the context had a sloppy 131 // TODO(leszeks): We only need to do this check if the context had a sloppy
132 // eval, we could pass in a context chain bitmask to figure out which 132 // eval, we could pass in a context chain bitmask to figure out which
133 // contexts actually need to be checked. 133 // contexts actually need to be checked.
134 134
135 Node* extension_slot = 135 Node* extension_slot =
136 LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX); 136 LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
137 137
138 // Jump to the target if the extension slot is not a hole. 138 // Jump to the target if the extension slot is not a hole.
139 GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target); 139 GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
140 140
141 cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1))); 141 cur_depth.Bind(IntPtrSub(cur_depth.value(), IntPtrConstant(1)));
142 cur_context.Bind( 142 cur_context.Bind(
143 LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); 143 LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
144 144
145 GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)), 145 GotoIf(WordNotEqual(cur_depth.value(), IntPtrConstant(0)), &context_search);
146 &context_search);
147 } 146 }
148 } 147 }
149 148
150 Node* InterpreterAssembler::BytecodeOffset() { 149 Node* InterpreterAssembler::BytecodeOffset() {
151 return bytecode_offset_.value(); 150 return bytecode_offset_.value();
152 } 151 }
153 152
154 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() { 153 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
155 if (made_call_) { 154 if (made_call_) {
156 // If we have made a call, restore bytecode array from stack frame in case 155 // If we have made a call, restore bytecode array from stack frame in case
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
204 Node* InterpreterAssembler::OperandOffset(int operand_index) { 203 Node* InterpreterAssembler::OperandOffset(int operand_index) {
205 return IntPtrConstant( 204 return IntPtrConstant(
206 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale())); 205 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
207 } 206 }
208 207
209 Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) { 208 Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
210 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); 209 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
211 DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( 210 DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
212 bytecode_, operand_index, operand_scale())); 211 bytecode_, operand_index, operand_scale()));
213 Node* operand_offset = OperandOffset(operand_index); 212 Node* operand_offset = OperandOffset(operand_index);
214 return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), 213 Node* load = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
215 IntPtrAdd(BytecodeOffset(), operand_offset)); 214 IntPtrAdd(BytecodeOffset(), operand_offset));
215 return ChangeUint32ToWord(load);
rmcilroy 2016/12/09 11:21:37 Not something for this CL (unless it's easier) but
Igor Sheludko 2016/12/10 00:09:50 Done.
216 } 216 }
217 217
218 Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) { 218 Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
219 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); 219 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
220 DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( 220 DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
221 bytecode_, operand_index, operand_scale())); 221 bytecode_, operand_index, operand_scale()));
222 Node* operand_offset = OperandOffset(operand_index); 222 Node* operand_offset = OperandOffset(operand_index);
223 Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), 223 Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
224 IntPtrAdd(BytecodeOffset(), operand_offset)); 224 IntPtrAdd(BytecodeOffset(), operand_offset));
225 225
226 // Ensure that we sign extend to full pointer size 226 // Ensure that we sign extend to full pointer size
227 if (kPointerSize == 8) { 227 return ChangeInt32ToIntPtr(load);
228 load = ChangeInt32ToInt64(load);
229 }
230 return load;
231 } 228 }
232 229
233 compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned( 230 compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
234 int relative_offset, MachineType result_type) { 231 int relative_offset, MachineType result_type) {
235 static const int kMaxCount = 4; 232 static const int kMaxCount = 4;
236 DCHECK(!TargetSupportsUnalignedAccess()); 233 DCHECK(!TargetSupportsUnalignedAccess());
237 234
238 int count; 235 int count;
239 switch (result_type.representation()) { 236 switch (result_type.representation()) {
240 case MachineRepresentation::kWord16: 237 case MachineRepresentation::kWord16:
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
281 return result; 278 return result;
282 } 279 }
283 280
284 Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) { 281 Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
285 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); 282 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
286 DCHECK_EQ( 283 DCHECK_EQ(
287 OperandSize::kShort, 284 OperandSize::kShort,
288 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale())); 285 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
289 int operand_offset = 286 int operand_offset =
290 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); 287 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
288 Node* load;
291 if (TargetSupportsUnalignedAccess()) { 289 if (TargetSupportsUnalignedAccess()) {
292 return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(), 290 load = Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
293 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); 291 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
294 } else { 292 } else {
295 return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16()); 293 load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
296 } 294 }
295 return ChangeUint32ToWord(load);
297 } 296 }
298 297
299 Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) { 298 Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
300 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); 299 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
301 DCHECK_EQ( 300 DCHECK_EQ(
302 OperandSize::kShort, 301 OperandSize::kShort,
303 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale())); 302 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
304 int operand_offset = 303 int operand_offset =
305 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); 304 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
306 Node* load; 305 Node* load;
307 if (TargetSupportsUnalignedAccess()) { 306 if (TargetSupportsUnalignedAccess()) {
308 load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(), 307 load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
309 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); 308 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
310 } else { 309 } else {
311 load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16()); 310 load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
312 } 311 }
313 312
314 // Ensure that we sign extend to full pointer size 313 // Ensure that we sign extend to full pointer size
315 if (kPointerSize == 8) { 314 return ChangeInt32ToIntPtr(load);
316 load = ChangeInt32ToInt64(load);
317 }
318 return load;
319 } 315 }
320 316
321 Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) { 317 Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
322 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); 318 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
323 DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize( 319 DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
324 bytecode_, operand_index, operand_scale())); 320 bytecode_, operand_index, operand_scale()));
325 int operand_offset = 321 int operand_offset =
326 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); 322 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
323 Node* load;
327 if (TargetSupportsUnalignedAccess()) { 324 if (TargetSupportsUnalignedAccess()) {
328 return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(), 325 load = Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
329 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); 326 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
330 } else { 327 } else {
331 return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32()); 328 load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
332 } 329 }
330 return ChangeUint32ToWord(load);
333 } 331 }
334 332
335 Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) { 333 Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
336 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); 334 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
337 DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize( 335 DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
338 bytecode_, operand_index, operand_scale())); 336 bytecode_, operand_index, operand_scale()));
339 int operand_offset = 337 int operand_offset =
340 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); 338 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
341 Node* load; 339 Node* load;
342 if (TargetSupportsUnalignedAccess()) { 340 if (TargetSupportsUnalignedAccess()) {
343 load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), 341 load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
344 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); 342 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
345 } else { 343 } else {
346 load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32()); 344 load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
347 } 345 }
348 346
349 // Ensure that we sign extend to full pointer size 347 // Ensure that we sign extend to full pointer size
350 if (kPointerSize == 8) { 348 return ChangeInt32ToIntPtr(load);
351 load = ChangeInt32ToInt64(load);
352 }
353 return load;
354 } 349 }
355 350
356 Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index, 351 Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
357 OperandSize operand_size) { 352 OperandSize operand_size) {
358 DCHECK(!Bytecodes::IsUnsignedOperandType( 353 DCHECK(!Bytecodes::IsUnsignedOperandType(
359 Bytecodes::GetOperandType(bytecode_, operand_index))); 354 Bytecodes::GetOperandType(bytecode_, operand_index)));
360 switch (operand_size) { 355 switch (operand_size) {
361 case OperandSize::kByte: 356 case OperandSize::kByte:
362 return BytecodeOperandSignedByte(operand_index); 357 return BytecodeOperandSignedByte(operand_index);
363 case OperandSize::kShort: 358 case OperandSize::kShort:
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
450 Bytecodes::GetOperandType(bytecode_, operand_index)); 445 Bytecodes::GetOperandType(bytecode_, operand_index));
451 OperandSize operand_size = 446 OperandSize operand_size =
452 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); 447 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
453 DCHECK_EQ(operand_size, OperandSize::kByte); 448 DCHECK_EQ(operand_size, OperandSize::kByte);
454 return BytecodeUnsignedOperand(operand_index, operand_size); 449 return BytecodeUnsignedOperand(operand_index, operand_size);
455 } 450 }
456 451
457 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) { 452 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
458 Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(), 453 Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
459 BytecodeArray::kConstantPoolOffset); 454 BytecodeArray::kConstantPoolOffset);
460 Node* entry_offset = 455 return LoadFixedArrayElement(constant_pool, index, 0, INTPTR_PARAMETERS);
461 IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
462 WordShl(index, kPointerSizeLog2));
463 return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
464 } 456 }
465 457
466 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) { 458 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
467 Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(), 459 Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
468 BytecodeArray::kConstantPoolOffset); 460 BytecodeArray::kConstantPoolOffset);
461 // TODO(ishell): move the implementation to CSA.
469 int offset = FixedArray::kHeaderSize - kHeapObjectTag; 462 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
470 #if V8_TARGET_LITTLE_ENDIAN 463 #if V8_TARGET_LITTLE_ENDIAN
471 if (Is64()) { 464 if (Is64()) {
472 offset += kPointerSize / 2; 465 offset += kPointerSize / 2;
473 } 466 }
474 #endif 467 #endif
475 Node* entry_offset = 468 Node* entry_offset =
476 IntPtrAdd(IntPtrConstant(offset), WordShl(index, kPointerSizeLog2)); 469 IntPtrAdd(IntPtrConstant(offset), WordShl(index, kPointerSizeLog2));
477 if (Is64()) { 470 if (Is64()) {
478 return ChangeInt32ToInt64( 471 return ChangeInt32ToInt64(
(...skipping 29 matching lines...) Expand all
508 stack_pointer_before_call_ = nullptr; 501 stack_pointer_before_call_ = nullptr;
509 AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call, 502 AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
510 kUnexpectedStackPointer); 503 kUnexpectedStackPointer);
511 } 504 }
512 } 505 }
513 506
514 Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector, 507 Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector,
515 Node* slot_id) { 508 Node* slot_id) {
516 Comment("increment call count"); 509 Comment("increment call count");
517 Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1)); 510 Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
518 Node* call_count = 511 Node* call_count = LoadFixedArrayElement(
519 LoadFixedArrayElement(type_feedback_vector, call_count_slot); 512 type_feedback_vector, call_count_slot, 0, INTPTR_PARAMETERS);
520 Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1))); 513 Node* new_count = SmiAdd(call_count, SmiConstant(1));
521 // Count is Smi, so we don't need a write barrier. 514 // Count is Smi, so we don't need a write barrier.
522 return StoreFixedArrayElement(type_feedback_vector, call_count_slot, 515 return StoreFixedArrayElement(type_feedback_vector, call_count_slot,
523 new_count, SKIP_WRITE_BARRIER); 516 new_count, SKIP_WRITE_BARRIER, 0,
517 INTPTR_PARAMETERS);
524 } 518 }
525 519
526 Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context, 520 Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
527 Node* first_arg, Node* arg_count, 521 Node* first_arg, Node* arg_count,
528 Node* slot_id, 522 Node* slot_id,
529 Node* type_feedback_vector, 523 Node* type_feedback_vector,
530 TailCallMode tail_call_mode) { 524 TailCallMode tail_call_mode) {
531 // Static checks to assert it is safe to examine the type feedback element. 525 // Static checks to assert it is safe to examine the type feedback element.
532 // We don't know that we have a weak cell. We might have a private symbol 526 // We don't know that we have a weak cell. We might have a private symbol
533 // or an AllocationSite, but the memory is safe to examine. 527 // or an AllocationSite, but the memory is safe to examine.
534 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to 528 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
535 // FixedArray. 529 // FixedArray.
536 // WeakCell::kValueOffset - contains a JSFunction or Smi(0) 530 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
537 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not 531 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
538 // computed, meaning that it can't appear to be a pointer. If the low bit is 532 // computed, meaning that it can't appear to be a pointer. If the low bit is
539 // 0, then hash is computed, but the 0 bit prevents the field from appearing 533 // 0, then hash is computed, but the 0 bit prevents the field from appearing
540 // to be a pointer. 534 // to be a pointer.
541 STATIC_ASSERT(WeakCell::kSize >= kPointerSize); 535 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
542 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset == 536 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
543 WeakCell::kValueOffset && 537 WeakCell::kValueOffset &&
544 WeakCell::kValueOffset == Symbol::kHashFieldSlot); 538 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
545 539
540 // Truncate |arg_count| to match the calling convention.
541 arg_count = TruncateWordToWord32(arg_count);
542
546 Variable return_value(this, MachineRepresentation::kTagged); 543 Variable return_value(this, MachineRepresentation::kTagged);
547 Label call_function(this), extra_checks(this, Label::kDeferred), call(this), 544 Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
548 end(this); 545 end(this);
549 546
550 // The checks. First, does function match the recorded monomorphic target? 547 // The checks. First, does function match the recorded monomorphic target?
551 Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id); 548 Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id,
549 0, INTPTR_PARAMETERS);
552 Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element); 550 Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
553 Node* is_monomorphic = WordEqual(function, feedback_value); 551 Node* is_monomorphic = WordEqual(function, feedback_value);
554 GotoUnless(is_monomorphic, &extra_checks); 552 GotoUnless(is_monomorphic, &extra_checks);
555 553
556 // The compare above could have been a SMI/SMI comparison. Guard against 554 // The compare above could have been a SMI/SMI comparison. Guard against
557 // this convincing us that we have a monomorphic JSFunction. 555 // this convincing us that we have a monomorphic JSFunction.
558 Node* is_smi = TaggedIsSmi(function); 556 Node* is_smi = TaggedIsSmi(function);
559 Branch(is_smi, &extra_checks, &call_function); 557 Branch(is_smi, &extra_checks, &call_function);
560 558
561 Bind(&call_function); 559 Bind(&call_function);
(...skipping 17 matching lines...) Expand all
579 create_allocation_site(this); 577 create_allocation_site(this);
580 578
581 Comment("check if megamorphic"); 579 Comment("check if megamorphic");
582 // Check if it is a megamorphic target. 580 // Check if it is a megamorphic target.
583 Node* is_megamorphic = WordEqual( 581 Node* is_megamorphic = WordEqual(
584 feedback_element, 582 feedback_element,
585 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate()))); 583 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
586 GotoIf(is_megamorphic, &call); 584 GotoIf(is_megamorphic, &call);
587 585
588 Comment("check if it is an allocation site"); 586 Comment("check if it is an allocation site");
589 Node* is_allocation_site = WordEqual( 587 GotoUnless(IsAllocationSiteMap(LoadMap(feedback_element)),
590 LoadMap(feedback_element), LoadRoot(Heap::kAllocationSiteMapRootIndex)); 588 &check_initialized);
591 GotoUnless(is_allocation_site, &check_initialized);
592 589
593 // If it is not the Array() function, mark megamorphic. 590 // If it is not the Array() function, mark megamorphic.
594 Node* context_slot = LoadContextElement(LoadNativeContext(context), 591 Node* context_slot = LoadContextElement(LoadNativeContext(context),
595 Context::ARRAY_FUNCTION_INDEX); 592 Context::ARRAY_FUNCTION_INDEX);
596 Node* is_array_function = WordEqual(context_slot, function); 593 Node* is_array_function = WordEqual(context_slot, function);
597 GotoUnless(is_array_function, &mark_megamorphic); 594 GotoUnless(is_array_function, &mark_megamorphic);
598 595
599 // It is a monomorphic Array function. Increment the call count. 596 // It is a monomorphic Array function. Increment the call count.
600 IncrementCallCount(type_feedback_vector, slot_id); 597 IncrementCallCount(type_feedback_vector, slot_id);
601 598
(...skipping 17 matching lines...) Expand all
619 GotoUnless(is_uninitialized, &mark_megamorphic); 616 GotoUnless(is_uninitialized, &mark_megamorphic);
620 617
621 Comment("handle_unitinitialized"); 618 Comment("handle_unitinitialized");
622 // If it is not a JSFunction mark it as megamorphic. 619 // If it is not a JSFunction mark it as megamorphic.
623 Node* is_smi = TaggedIsSmi(function); 620 Node* is_smi = TaggedIsSmi(function);
624 GotoIf(is_smi, &mark_megamorphic); 621 GotoIf(is_smi, &mark_megamorphic);
625 622
626 // Check if function is an object of JSFunction type. 623 // Check if function is an object of JSFunction type.
627 Node* instance_type = LoadInstanceType(function); 624 Node* instance_type = LoadInstanceType(function);
628 Node* is_js_function = 625 Node* is_js_function =
629 WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE)); 626 Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
630 GotoUnless(is_js_function, &mark_megamorphic); 627 GotoUnless(is_js_function, &mark_megamorphic);
631 628
632 // Check if it is the Array() function. 629 // Check if it is the Array() function.
633 Node* context_slot = LoadContextElement(LoadNativeContext(context), 630 Node* context_slot = LoadContextElement(LoadNativeContext(context),
634 Context::ARRAY_FUNCTION_INDEX); 631 Context::ARRAY_FUNCTION_INDEX);
635 Node* is_array_function = WordEqual(context_slot, function); 632 Node* is_array_function = WordEqual(context_slot, function);
636 GotoIf(is_array_function, &create_allocation_site); 633 GotoIf(is_array_function, &create_allocation_site);
637 634
638 // Check if the function belongs to the same native context. 635 // Check if the function belongs to the same native context.
639 Node* native_context = LoadNativeContext( 636 Node* native_context = LoadNativeContext(
(...skipping 22 matching lines...) Expand all
662 659
663 Bind(&mark_megamorphic); 660 Bind(&mark_megamorphic);
664 { 661 {
665 // Mark it as a megamorphic. 662 // Mark it as a megamorphic.
666 // MegamorphicSentinel is created as a part of Heap::InitialObjects 663 // MegamorphicSentinel is created as a part of Heap::InitialObjects
667 // and will not move during a GC. So it is safe to skip write barrier. 664 // and will not move during a GC. So it is safe to skip write barrier.
668 DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex)); 665 DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
669 StoreFixedArrayElement( 666 StoreFixedArrayElement(
670 type_feedback_vector, slot_id, 667 type_feedback_vector, slot_id,
671 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())), 668 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
672 SKIP_WRITE_BARRIER); 669 SKIP_WRITE_BARRIER, 0, INTPTR_PARAMETERS);
673 Goto(&call); 670 Goto(&call);
674 } 671 }
675 } 672 }
676 673
677 Bind(&call); 674 Bind(&call);
678 { 675 {
679 Comment("Increment call count and call using Call builtin"); 676 Comment("Increment call count and call using Call builtin");
680 // Increment the call count. 677 // Increment the call count.
681 IncrementCallCount(type_feedback_vector, slot_id); 678 IncrementCallCount(type_feedback_vector, slot_id);
682 679
(...skipping 10 matching lines...) Expand all
693 Bind(&end); 690 Bind(&end);
694 return return_value.value(); 691 return return_value.value();
695 } 692 }
696 693
697 Node* InterpreterAssembler::CallJS(Node* function, Node* context, 694 Node* InterpreterAssembler::CallJS(Node* function, Node* context,
698 Node* first_arg, Node* arg_count, 695 Node* first_arg, Node* arg_count,
699 TailCallMode tail_call_mode) { 696 TailCallMode tail_call_mode) {
700 Callable callable = CodeFactory::InterpreterPushArgsAndCall( 697 Callable callable = CodeFactory::InterpreterPushArgsAndCall(
701 isolate(), tail_call_mode, CallableType::kAny); 698 isolate(), tail_call_mode, CallableType::kAny);
702 Node* code_target = HeapConstant(callable.code()); 699 Node* code_target = HeapConstant(callable.code());
700
701 // Truncate |arg_count| to match the calling convention.
702 arg_count = TruncateWordToWord32(arg_count);
703
703 return CallStub(callable.descriptor(), code_target, context, arg_count, 704 return CallStub(callable.descriptor(), code_target, context, arg_count,
704 first_arg, function); 705 first_arg, function);
705 } 706 }
706 707
707 Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, 708 Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
708 Node* new_target, Node* first_arg, 709 Node* new_target, Node* first_arg,
709 Node* arg_count, Node* slot_id, 710 Node* arg_count, Node* slot_id,
710 Node* type_feedback_vector) { 711 Node* type_feedback_vector) {
711 Variable return_value(this, MachineRepresentation::kTagged); 712 Variable return_value(this, MachineRepresentation::kTagged);
712 Variable allocation_feedback(this, MachineRepresentation::kTagged); 713 Variable allocation_feedback(this, MachineRepresentation::kTagged);
713 Label call_construct_function(this, &allocation_feedback), 714 Label call_construct_function(this, &allocation_feedback),
714 extra_checks(this, Label::kDeferred), call_construct(this), end(this); 715 extra_checks(this, Label::kDeferred), call_construct(this), end(this);
715 716
717 // Truncate |arg_count| to match the calling convention.
718 arg_count = TruncateWordToWord32(arg_count);
719
716 // Slot id of 0 is used to indicate no type feedback is available. 720 // Slot id of 0 is used to indicate no type feedback is available.
717 STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0); 721 STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
718 Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0)); 722 Node* is_feedback_unavailable = WordEqual(slot_id, IntPtrConstant(0));
719 GotoIf(is_feedback_unavailable, &call_construct); 723 GotoIf(is_feedback_unavailable, &call_construct);
720 724
721 // Check that the constructor is not a smi. 725 // Check that the constructor is not a smi.
722 Node* is_smi = TaggedIsSmi(constructor); 726 Node* is_smi = TaggedIsSmi(constructor);
723 GotoIf(is_smi, &call_construct); 727 GotoIf(is_smi, &call_construct);
724 728
725 // Check that constructor is a JSFunction. 729 // Check that constructor is a JSFunction.
726 Node* instance_type = LoadInstanceType(constructor); 730 Node* instance_type = LoadInstanceType(constructor);
727 Node* is_js_function = 731 Node* is_js_function =
728 WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE)); 732 Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
729 GotoUnless(is_js_function, &call_construct); 733 GotoUnless(is_js_function, &call_construct);
730 734
731 // Check if it is a monomorphic constructor. 735 // Check if it is a monomorphic constructor.
732 Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id); 736 Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id,
737 0, INTPTR_PARAMETERS);
733 Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element); 738 Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
734 Node* is_monomorphic = WordEqual(constructor, feedback_value); 739 Node* is_monomorphic = WordEqual(constructor, feedback_value);
735 allocation_feedback.Bind(UndefinedConstant()); 740 allocation_feedback.Bind(UndefinedConstant());
736 Branch(is_monomorphic, &call_construct_function, &extra_checks); 741 Branch(is_monomorphic, &call_construct_function, &extra_checks);
737 742
738 Bind(&call_construct_function); 743 Bind(&call_construct_function);
739 { 744 {
740 Comment("call using callConstructFunction"); 745 Comment("call using callConstructFunction");
741 IncrementCallCount(type_feedback_vector, slot_id); 746 IncrementCallCount(type_feedback_vector, slot_id);
742 Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct( 747 Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
827 832
828 Bind(&mark_megamorphic); 833 Bind(&mark_megamorphic);
829 { 834 {
830 // MegamorphicSentinel is an immortal immovable object so 835 // MegamorphicSentinel is an immortal immovable object so
831 // write-barrier is not needed. 836 // write-barrier is not needed.
832 Comment("transition to megamorphic"); 837 Comment("transition to megamorphic");
833 DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex)); 838 DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
834 StoreFixedArrayElement( 839 StoreFixedArrayElement(
835 type_feedback_vector, slot_id, 840 type_feedback_vector, slot_id,
836 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())), 841 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
837 SKIP_WRITE_BARRIER); 842 SKIP_WRITE_BARRIER, 0, INTPTR_PARAMETERS);
838 Goto(&call_construct_function); 843 Goto(&call_construct_function);
839 } 844 }
840 } 845 }
841 846
842 Bind(&call_construct); 847 Bind(&call_construct);
843 { 848 {
844 Comment("call using callConstruct builtin"); 849 Comment("call using callConstruct builtin");
845 Callable callable = CodeFactory::InterpreterPushArgsAndConstruct( 850 Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
846 isolate(), CallableType::kAny); 851 isolate(), CallableType::kAny);
847 Node* code_target = HeapConstant(callable.code()); 852 Node* code_target = HeapConstant(callable.code());
(...skipping 10 matching lines...) Expand all
858 Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, 863 Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
859 Node* first_arg, Node* arg_count, 864 Node* first_arg, Node* arg_count,
860 int result_size) { 865 int result_size) {
861 Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size); 866 Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
862 Node* code_target = HeapConstant(callable.code()); 867 Node* code_target = HeapConstant(callable.code());
863 868
864 // Get the function entry from the function id. 869 // Get the function entry from the function id.
865 Node* function_table = ExternalConstant( 870 Node* function_table = ExternalConstant(
866 ExternalReference::runtime_function_table_address(isolate())); 871 ExternalReference::runtime_function_table_address(isolate()));
867 Node* function_offset = 872 Node* function_offset =
868 Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function))); 873 IntPtrMul(function_id, IntPtrConstant(sizeof(Runtime::Function)));
869 Node* function = IntPtrAdd(function_table, function_offset); 874 Node* function = IntPtrAdd(function_table, function_offset);
870 Node* function_entry = 875 Node* function_entry =
871 Load(MachineType::Pointer(), function, 876 Load(MachineType::Pointer(), function,
872 IntPtrConstant(offsetof(Runtime::Function, entry))); 877 IntPtrConstant(offsetof(Runtime::Function, entry)));
873 878
879 // Truncate |arg_count| to match the calling convention.
880 arg_count = TruncateWordToWord32(arg_count);
881
874 return CallStub(callable.descriptor(), code_target, context, arg_count, 882 return CallStub(callable.descriptor(), code_target, context, arg_count,
875 first_arg, function_entry, result_size); 883 first_arg, function_entry, result_size);
876 } 884 }
877 885
878 void InterpreterAssembler::UpdateInterruptBudget(Node* weight) { 886 void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
879 // TODO(rmcilroy): It might be worthwhile to only update the budget for 887 // TODO(rmcilroy): It might be worthwhile to only update the budget for
880 // backwards branches. Those are distinguishable by the {JumpLoop} bytecode. 888 // backwards branches. Those are distinguishable by the {JumpLoop} bytecode.
881 889
882 Label ok(this), interrupt_check(this, Label::kDeferred), end(this); 890 Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
883 Node* budget_offset = 891 Node* budget_offset =
884 IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag); 892 IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
885 893
886 // Update budget by |weight| and check if it reaches zero. 894 // Update budget by |weight| and check if it reaches zero.
887 Variable new_budget(this, MachineRepresentation::kWord32); 895 Variable new_budget(this, MachineRepresentation::kWord32);
888 Node* old_budget = 896 Node* old_budget =
889 Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset); 897 Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
890 new_budget.Bind(Int32Add(old_budget, weight)); 898 new_budget.Bind(Int32Add(old_budget, TruncateWordToWord32(weight)));
891 Node* condition = 899 Node* condition =
892 Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0)); 900 Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
893 Branch(condition, &ok, &interrupt_check); 901 Branch(condition, &ok, &interrupt_check);
894 902
895 // Perform interrupt and reset budget. 903 // Perform interrupt and reset budget.
896 Bind(&interrupt_check); 904 Bind(&interrupt_check);
897 { 905 {
898 CallRuntime(Runtime::kInterrupt, GetContext()); 906 CallRuntime(Runtime::kInterrupt, GetContext());
899 new_budget.Bind(Int32Constant(Interpreter::InterruptBudget())); 907 new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
900 Goto(&ok); 908 Goto(&ok);
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
948 } 956 }
949 957
950 void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs, 958 void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
951 Node* delta) { 959 Node* delta) {
952 JumpConditional(WordNotEqual(lhs, rhs), delta); 960 JumpConditional(WordNotEqual(lhs, rhs), delta);
953 } 961 }
954 962
955 Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) { 963 Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
956 Node* bytecode = 964 Node* bytecode =
957 Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset); 965 Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
958 if (kPointerSize == 8) { 966 return ChangeUint32ToWord(bytecode);
959 bytecode = ChangeUint32ToUint64(bytecode);
960 }
961 return bytecode;
962 } 967 }
963 968
964 Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) { 969 Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
965 Label do_inline_star(this), done(this); 970 Label do_inline_star(this), done(this);
966 971
967 Variable var_bytecode(this, MachineType::PointerRepresentation()); 972 Variable var_bytecode(this, MachineType::PointerRepresentation());
968 var_bytecode.Bind(target_bytecode); 973 var_bytecode.Bind(target_bytecode);
969 974
970 Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar)); 975 Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
971 Node* is_star = WordEqual(target_bytecode, star_bytecode); 976 Node* is_star = WordEqual(target_bytecode, star_bytecode);
(...skipping 22 matching lines...) Expand all
994 StoreRegister(GetAccumulator(), BytecodeOperandReg(0)); 999 StoreRegister(GetAccumulator(), BytecodeOperandReg(0));
995 1000
996 DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_)); 1001 DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
997 1002
998 Advance(); 1003 Advance();
999 bytecode_ = previous_bytecode; 1004 bytecode_ = previous_bytecode;
1000 accumulator_use_ = previous_acc_use; 1005 accumulator_use_ = previous_acc_use;
1001 } 1006 }
1002 1007
1003 Node* InterpreterAssembler::Dispatch() { 1008 Node* InterpreterAssembler::Dispatch() {
1009 Comment("========= Dispatch");
1004 Node* target_offset = Advance(); 1010 Node* target_offset = Advance();
1005 Node* target_bytecode = LoadBytecode(target_offset); 1011 Node* target_bytecode = LoadBytecode(target_offset);
1006 1012
1007 if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) { 1013 if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1008 target_bytecode = StarDispatchLookahead(target_bytecode); 1014 target_bytecode = StarDispatchLookahead(target_bytecode);
1009 } 1015 }
1010 return DispatchToBytecode(target_bytecode, BytecodeOffset()); 1016 return DispatchToBytecode(target_bytecode, BytecodeOffset());
1011 } 1017 }
1012 1018
1013 Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode, 1019 Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
1014 Node* new_bytecode_offset) { 1020 Node* new_bytecode_offset) {
1015 if (FLAG_trace_ignition_dispatches) { 1021 if (FLAG_trace_ignition_dispatches) {
1016 TraceBytecodeDispatch(target_bytecode); 1022 TraceBytecodeDispatch(target_bytecode);
1017 } 1023 }
1018 1024
1019 Node* target_code_entry = 1025 Node* target_code_entry =
1020 Load(MachineType::Pointer(), DispatchTableRawPointer(), 1026 Load(MachineType::Pointer(), DispatchTableRawPointer(),
1021 WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2))); 1027 WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
1022 1028
1023 return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset); 1029 return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1024 } 1030 }
1025 1031
1026 Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler, 1032 Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
1027 Node* bytecode_offset) { 1033 Node* bytecode_offset) {
1028 Node* handler_entry = 1034 Node* handler_entry =
1029 IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)); 1035 IntPtrAdd(BitcastTaggedToWord(handler),
1036 IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
rmcilroy 2016/12/09 11:21:37 nit - Maybe there should be a CSA helper which con
Igor Sheludko 2016/12/10 00:09:50 Added TODO, will address in a follow-up CL.
1030 return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset); 1037 return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
1031 } 1038 }
1032 1039
1033 Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry( 1040 Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1034 Node* handler_entry, Node* bytecode_offset) { 1041 Node* handler_entry, Node* bytecode_offset) {
1035 InterpreterDispatchDescriptor descriptor(isolate()); 1042 InterpreterDispatchDescriptor descriptor(isolate());
1036 Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset, 1043 Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
1037 BytecodeArrayTaggedPointer(), DispatchTableRawPointer()}; 1044 BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
1038 return TailCallBytecodeDispatch(descriptor, handler_entry, args); 1045 return TailCallBytecodeDispatch(descriptor, handler_entry, args);
1039 } 1046 }
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
1160 return var_result.value(); 1167 return var_result.value();
1161 } 1168 }
1162 1169
1163 void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { 1170 void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1164 // TODO(rmcilroy): Investigate whether it is worth supporting self 1171 // TODO(rmcilroy): Investigate whether it is worth supporting self
1165 // optimization of primitive functions like FullCodegen. 1172 // optimization of primitive functions like FullCodegen.
1166 1173
1167 // Update profiling count by -BytecodeOffset to simulate backedge to start of 1174 // Update profiling count by -BytecodeOffset to simulate backedge to start of
1168 // function. 1175 // function.
1169 Node* profiling_weight = 1176 Node* profiling_weight =
1170 Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize), 1177 IntPtrSub(IntPtrConstant(kHeapObjectTag + BytecodeArray::kHeaderSize),
1171 BytecodeOffset()); 1178 BytecodeOffset());
1172 UpdateInterruptBudget(profiling_weight); 1179 UpdateInterruptBudget(profiling_weight);
1173 } 1180 }
1174 1181
1175 Node* InterpreterAssembler::StackCheckTriggeredInterrupt() { 1182 Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
1176 Node* sp = LoadStackPointer(); 1183 Node* sp = LoadStackPointer();
1177 Node* stack_limit = Load( 1184 Node* stack_limit = Load(
1178 MachineType::Pointer(), 1185 MachineType::Pointer(),
1179 ExternalConstant(ExternalReference::address_of_stack_limit(isolate()))); 1186 ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
1180 return UintPtrLessThan(sp, stack_limit); 1187 return UintPtrLessThan(sp, stack_limit);
1181 } 1188 }
1182 1189
1183 Node* InterpreterAssembler::LoadOSRNestingLevel() { 1190 Node* InterpreterAssembler::LoadOSRNestingLevel() {
1184 Node* offset = 1191 Node* offset =
1185 IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag); 1192 IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag);
1186 return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset); 1193 Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset);
1194 // Ensure that we sign extend to full pointer size
1195 return ChangeInt32ToIntPtr(load);
1187 } 1196 }
1188 1197
1189 void InterpreterAssembler::Abort(BailoutReason bailout_reason) { 1198 void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
1190 disable_stack_check_across_call_ = true; 1199 disable_stack_check_across_call_ = true;
1191 Node* abort_id = SmiTag(Int32Constant(bailout_reason)); 1200 Node* abort_id = SmiTag(Int32Constant(bailout_reason));
1192 CallRuntime(Runtime::kAbort, GetContext(), abort_id); 1201 CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1193 disable_stack_check_across_call_ = false; 1202 disable_stack_check_across_call_ = false;
1194 } 1203 }
1195 1204
1196 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs, 1205 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1248 V8_TARGET_ARCH_PPC 1257 V8_TARGET_ARCH_PPC
1249 return true; 1258 return true;
1250 #else 1259 #else
1251 #error "Unknown Architecture" 1260 #error "Unknown Architecture"
1252 #endif 1261 #endif
1253 } 1262 }
1254 1263
1255 Node* InterpreterAssembler::RegisterCount() { 1264 Node* InterpreterAssembler::RegisterCount() {
1256 Node* bytecode_array = LoadRegister(Register::bytecode_array()); 1265 Node* bytecode_array = LoadRegister(Register::bytecode_array());
1257 Node* frame_size = LoadObjectField( 1266 Node* frame_size = LoadObjectField(
1258 bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()); 1267 bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Uint32());
1259 return Word32Sar(frame_size, Int32Constant(kPointerSizeLog2)); 1268 return WordShr(ChangeUint32ToWord(frame_size),
1269 IntPtrConstant(kPointerSizeLog2));
1260 } 1270 }
1261 1271
1262 Node* InterpreterAssembler::ExportRegisterFile(Node* array) { 1272 Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
1273 Node* register_count = RegisterCount();
1263 if (FLAG_debug_code) { 1274 if (FLAG_debug_code) {
1264 Node* array_size = LoadAndUntagFixedArrayBaseLength(array); 1275 Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
1265 AbortIfWordNotEqual( 1276 AbortIfWordNotEqual(array_size, register_count,
1266 array_size, RegisterCount(), kInvalidRegisterFileInGenerator); 1277 kInvalidRegisterFileInGenerator);
1267 } 1278 }
1268 1279
1269 Variable var_index(this, MachineRepresentation::kWord32); 1280 Variable var_index(this, MachineType::PointerRepresentation());
1270 var_index.Bind(Int32Constant(0)); 1281 var_index.Bind(IntPtrConstant(0));
1271 1282
1272 // Iterate over register file and write values into array. 1283 // Iterate over register file and write values into array.
1273 // The mapping of register to array index must match that used in 1284 // The mapping of register to array index must match that used in
1274 // BytecodeGraphBuilder::VisitResumeGenerator. 1285 // BytecodeGraphBuilder::VisitResumeGenerator.
1275 Label loop(this, &var_index), done_loop(this); 1286 Label loop(this, &var_index), done_loop(this);
1276 Goto(&loop); 1287 Goto(&loop);
1277 Bind(&loop); 1288 Bind(&loop);
1278 { 1289 {
1279 Node* index = var_index.value(); 1290 Node* index = var_index.value();
1280 Node* condition = Int32LessThan(index, RegisterCount()); 1291 GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
1281 GotoUnless(condition, &done_loop);
1282 1292
1283 Node* reg_index = 1293 Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1284 Int32Sub(Int32Constant(Register(0).ToOperand()), index); 1294 Node* value = LoadRegister(reg_index);
1285 Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
1286 1295
1287 StoreFixedArrayElement(array, index, value); 1296 StoreFixedArrayElement(array, index, value, UPDATE_WRITE_BARRIER, 0,
1297 INTPTR_PARAMETERS);
rmcilroy 2016/12/09 11:21:37 Do you need the extra arguments here, aren't these
Igor Sheludko 2016/12/10 00:09:50 Currently, all Load/StoreFixedXXXArray methods exp
1288 1298
1289 var_index.Bind(Int32Add(index, Int32Constant(1))); 1299 var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1290 Goto(&loop); 1300 Goto(&loop);
1291 } 1301 }
1292 Bind(&done_loop); 1302 Bind(&done_loop);
1293 1303
1294 return array; 1304 return array;
1295 } 1305 }
1296 1306
1297 Node* InterpreterAssembler::ImportRegisterFile(Node* array) { 1307 Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
1308 Node* register_count = RegisterCount();
1298 if (FLAG_debug_code) { 1309 if (FLAG_debug_code) {
1299 Node* array_size = LoadAndUntagFixedArrayBaseLength(array); 1310 Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
1300 AbortIfWordNotEqual( 1311 AbortIfWordNotEqual(array_size, register_count,
1301 array_size, RegisterCount(), kInvalidRegisterFileInGenerator); 1312 kInvalidRegisterFileInGenerator);
1302 } 1313 }
1303 1314
1304 Variable var_index(this, MachineRepresentation::kWord32); 1315 Variable var_index(this, MachineType::PointerRepresentation());
1305 var_index.Bind(Int32Constant(0)); 1316 var_index.Bind(IntPtrConstant(0));
1306 1317
1307 // Iterate over array and write values into register file. Also erase the 1318 // Iterate over array and write values into register file. Also erase the
1308 // array contents to not keep them alive artificially. 1319 // array contents to not keep them alive artificially.
1309 Label loop(this, &var_index), done_loop(this); 1320 Label loop(this, &var_index), done_loop(this);
1310 Goto(&loop); 1321 Goto(&loop);
1311 Bind(&loop); 1322 Bind(&loop);
1312 { 1323 {
1313 Node* index = var_index.value(); 1324 Node* index = var_index.value();
1314 Node* condition = Int32LessThan(index, RegisterCount()); 1325 GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
1315 GotoUnless(condition, &done_loop);
1316 1326
1317 Node* value = LoadFixedArrayElement(array, index); 1327 Node* value = LoadFixedArrayElement(array, index, 0, INTPTR_PARAMETERS);
1318 1328
1319 Node* reg_index = 1329 Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1320 Int32Sub(Int32Constant(Register(0).ToOperand()), index); 1330 StoreRegister(value, reg_index);
1321 StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
1322 1331
1323 StoreFixedArrayElement(array, index, StaleRegisterConstant()); 1332 StoreFixedArrayElement(array, index, StaleRegisterConstant(),
1333 UPDATE_WRITE_BARRIER, 0, INTPTR_PARAMETERS);
1324 1334
1325 var_index.Bind(Int32Add(index, Int32Constant(1))); 1335 var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1326 Goto(&loop); 1336 Goto(&loop);
1327 } 1337 }
1328 Bind(&done_loop); 1338 Bind(&done_loop);
1329 1339
1330 return array; 1340 return array;
1331 } 1341 }
1332 1342
1333 } // namespace interpreter 1343 } // namespace interpreter
1334 } // namespace internal 1344 } // namespace internal
1335 } // namespace v8 1345 } // namespace v8
OLDNEW
« no previous file with comments | « src/interpreter/interpreter.cc ('k') | src/interpreter/interpreter-intrinsics.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698