Index: src/builtins/s390/builtins-s390.cc |
diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc |
index c488dc68421ee9f523d9c6c001666dd4593a6416..5311fcdfaa88432ba1e76aa205860f04f7aba63a 100644 |
--- a/src/builtins/s390/builtins-s390.cc |
+++ b/src/builtins/s390/builtins-s390.cc |
@@ -1345,8 +1345,10 @@ |
// First lookup code, maybe we don't need to compile! |
Label gotta_call_runtime; |
Label try_shared; |
+ Label loop_top, loop_bottom; |
Register closure = r3; |
+ Register map = r8; |
Register index = r4; |
// Do we have a valid feedback vector? |
@@ -1354,29 +1356,59 @@ |
__ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); |
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); |
- // Is optimized code available in the feedback vector? |
+ __ LoadP(map, |
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
+ __ LoadP(map, |
+ FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
+ __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset)); |
+ __ CmpSmiLiteral(index, Smi::FromInt(2), r0); |
+ __ blt(&try_shared); |
+ |
+ // Find literals. |
+ // r9 : native context |
+ // r4 : length / index |
+ // r8 : optimized code map |
+ // r5 : new target |
+ // r3 : closure |
+ Register native_context = r9; |
+ __ LoadP(native_context, NativeContextMemOperand()); |
+ |
+ __ bind(&loop_top); |
+ Register temp = r1; |
+ Register array_pointer = r7; |
+ |
+ // Does the native context match? |
+ __ SmiToPtrArrayOffset(array_pointer, index); |
+ __ AddP(array_pointer, map, array_pointer); |
+ __ LoadP(temp, FieldMemOperand(array_pointer, |
+ SharedFunctionInfo::kOffsetToPreviousContext)); |
+ __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
+ __ CmpP(temp, native_context); |
+ __ bne(&loop_bottom, Label::kNear); |
+ |
+ // Code available? |
Register entry = r6; |
- __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex * |
- kPointerSize + |
- FeedbackVector::kHeaderSize)); |
+ __ LoadP(entry, |
+ FieldMemOperand(array_pointer, |
+ SharedFunctionInfo::kOffsetToPreviousCachedCode)); |
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
__ JumpIfSmi(entry, &try_shared); |
+ // Found code. Get it into the closure and return. |
// Store code entry in the closure. |
__ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); |
__ RecordWriteCodeEntryField(closure, entry, r7); |
- // Load native context into r8. |
- Register native_context = r8; |
- __ LoadP(native_context, NativeContextMemOperand()); |
- |
// Link the closure into the optimized function list. |
+ // r6 : code entry |
+ // r9: native context |
+ // r3 : closure |
__ LoadP( |
r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
__ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), |
r0); |
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, r4, |
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, temp, |
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
OMIT_SMI_CHECK); |
const int function_list_offset = |
@@ -1386,18 +1418,26 @@ |
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0); |
// Save closure before the write barrier. |
__ LoadRR(r7, closure); |
- __ RecordWriteContextSlot(native_context, function_list_offset, r7, r4, |
+ __ RecordWriteContextSlot(native_context, function_list_offset, r7, temp, |
kLRHasNotBeenSaved, kDontSaveFPRegs); |
__ JumpToJSEntry(entry); |
- // We found no optimized code. |
+ __ bind(&loop_bottom); |
+ __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength), |
+ r0); |
+ __ CmpSmiLiteral(index, Smi::FromInt(1), r0); |
+ __ bgt(&loop_top); |
+ |
+ // We found no code. |
+ __ b(&gotta_call_runtime); |
+ |
__ bind(&try_shared); |
__ LoadP(entry, |
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
// Is the shared function marked for tier up? |
- __ LoadlB(r7, FieldMemOperand( |
- entry, SharedFunctionInfo::kMarkedForTierUpByteOffset)); |
- __ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); |
+ __ LoadlB(temp, FieldMemOperand( |
+ entry, SharedFunctionInfo::kMarkedForTierUpByteOffset)); |
+ __ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); |
__ bne(&gotta_call_runtime); |
// If SFI points to anything other than CompileLazy, install that. |