| OLD | NEW | 
|     1 // Copyright 2014 the V8 project authors. All rights reserved. |     1 // Copyright 2014 the V8 project authors. All rights reserved. | 
|     2 // Use of this source code is governed by a BSD-style license that can be |     2 // Use of this source code is governed by a BSD-style license that can be | 
|     3 // found in the LICENSE file. |     3 // found in the LICENSE file. | 
|     4  |     4  | 
|     5 #if V8_TARGET_ARCH_S390 |     5 #if V8_TARGET_ARCH_S390 | 
|     6  |     6  | 
|     7 #include "src/code-stubs.h" |     7 #include "src/code-stubs.h" | 
|     8 #include "src/api-arguments.h" |     8 #include "src/api-arguments.h" | 
|     9 #include "src/base/bits.h" |     9 #include "src/base/bits.h" | 
|    10 #include "src/bootstrapper.h" |    10 #include "src/bootstrapper.h" | 
| (...skipping 1998 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  2009   __ CmpP(r3, r7); |  2009   __ CmpP(r3, r7); | 
|  2010   __ bne(miss); |  2010   __ bne(miss); | 
|  2011  |  2011  | 
|  2012   __ mov(r2, Operand(arg_count())); |  2012   __ mov(r2, Operand(arg_count())); | 
|  2013  |  2013  | 
|  2014   // Increment the call count for monomorphic function calls. |  2014   // Increment the call count for monomorphic function calls. | 
|  2015   const int count_offset = FixedArray::kHeaderSize + kPointerSize; |  2015   const int count_offset = FixedArray::kHeaderSize + kPointerSize; | 
|  2016   __ SmiToPtrArrayOffset(r7, r5); |  2016   __ SmiToPtrArrayOffset(r7, r5); | 
|  2017   __ AddP(r4, r4, r7); |  2017   __ AddP(r4, r4, r7); | 
|  2018   __ LoadP(r5, FieldMemOperand(r4, count_offset)); |  2018   __ LoadP(r5, FieldMemOperand(r4, count_offset)); | 
|  2019   __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0); |  2019   __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0); | 
|  2020   __ StoreP(r5, FieldMemOperand(r4, count_offset), r0); |  2020   __ StoreP(r5, FieldMemOperand(r4, count_offset), r0); | 
|  2021  |  2021  | 
|  2022   __ LoadRR(r4, r6); |  2022   __ LoadRR(r4, r6); | 
|  2023   __ LoadRR(r5, r3); |  2023   __ LoadRR(r5, r3); | 
|  2024   ArrayConstructorStub stub(masm->isolate(), arg_count()); |  2024   ArrayConstructorStub stub(masm->isolate(), arg_count()); | 
|  2025   __ TailCallStub(&stub); |  2025   __ TailCallStub(&stub); | 
|  2026 } |  2026 } | 
|  2027  |  2027  | 
|  2028 void CallICStub::Generate(MacroAssembler* masm) { |  2028 void CallICStub::Generate(MacroAssembler* masm) { | 
|  2029   // r3 - function |  2029   // r3 - function | 
| (...skipping 26 matching lines...) Expand all  Loading... | 
|  2056   __ CmpP(r3, r7); |  2056   __ CmpP(r3, r7); | 
|  2057   __ bne(&extra_checks_or_miss, Label::kNear); |  2057   __ bne(&extra_checks_or_miss, Label::kNear); | 
|  2058  |  2058  | 
|  2059   // The compare above could have been a SMI/SMI comparison. Guard against this |  2059   // The compare above could have been a SMI/SMI comparison. Guard against this | 
|  2060   // convincing us that we have a monomorphic JSFunction. |  2060   // convincing us that we have a monomorphic JSFunction. | 
|  2061   __ JumpIfSmi(r3, &extra_checks_or_miss); |  2061   __ JumpIfSmi(r3, &extra_checks_or_miss); | 
|  2062  |  2062  | 
|  2063   // Increment the call count for monomorphic function calls. |  2063   // Increment the call count for monomorphic function calls. | 
|  2064   const int count_offset = FixedArray::kHeaderSize + kPointerSize; |  2064   const int count_offset = FixedArray::kHeaderSize + kPointerSize; | 
|  2065   __ LoadP(r5, FieldMemOperand(r8, count_offset)); |  2065   __ LoadP(r5, FieldMemOperand(r8, count_offset)); | 
|  2066   __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0); |  2066   __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0); | 
|  2067   __ StoreP(r5, FieldMemOperand(r8, count_offset), r0); |  2067   __ StoreP(r5, FieldMemOperand(r8, count_offset), r0); | 
|  2068  |  2068  | 
|  2069   __ bind(&call_function); |  2069   __ bind(&call_function); | 
|  2070   __ mov(r2, Operand(argc)); |  2070   __ mov(r2, Operand(argc)); | 
|  2071   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(), |  2071   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(), | 
|  2072                                                     tail_call_mode()), |  2072                                                     tail_call_mode()), | 
|  2073           RelocInfo::CODE_TARGET); |  2073           RelocInfo::CODE_TARGET); | 
|  2074  |  2074  | 
|  2075   __ bind(&extra_checks_or_miss); |  2075   __ bind(&extra_checks_or_miss); | 
|  2076   Label uninitialized, miss, not_allocation_site; |  2076   Label uninitialized, miss, not_allocation_site; | 
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  2126   __ beq(&miss); |  2126   __ beq(&miss); | 
|  2127  |  2127  | 
|  2128   // Make sure the function belongs to the same native context. |  2128   // Make sure the function belongs to the same native context. | 
|  2129   __ LoadP(r6, FieldMemOperand(r3, JSFunction::kContextOffset)); |  2129   __ LoadP(r6, FieldMemOperand(r3, JSFunction::kContextOffset)); | 
|  2130   __ LoadP(r6, ContextMemOperand(r6, Context::NATIVE_CONTEXT_INDEX)); |  2130   __ LoadP(r6, ContextMemOperand(r6, Context::NATIVE_CONTEXT_INDEX)); | 
|  2131   __ LoadP(ip, NativeContextMemOperand()); |  2131   __ LoadP(ip, NativeContextMemOperand()); | 
|  2132   __ CmpP(r6, ip); |  2132   __ CmpP(r6, ip); | 
|  2133   __ bne(&miss); |  2133   __ bne(&miss); | 
|  2134  |  2134  | 
|  2135   // Initialize the call counter. |  2135   // Initialize the call counter. | 
|  2136   __ LoadSmiLiteral(r7, Smi::FromInt(CallICNexus::kCallCountIncrement)); |  2136   __ LoadSmiLiteral(r7, Smi::FromInt(1)); | 
|  2137   __ StoreP(r7, FieldMemOperand(r8, count_offset), r0); |  2137   __ StoreP(r7, FieldMemOperand(r8, count_offset), r0); | 
|  2138  |  2138  | 
|  2139   // Store the function. Use a stub since we need a frame for allocation. |  2139   // Store the function. Use a stub since we need a frame for allocation. | 
|  2140   // r4 - vector |  2140   // r4 - vector | 
|  2141   // r5 - slot |  2141   // r5 - slot | 
|  2142   // r3 - function |  2142   // r3 - function | 
|  2143   { |  2143   { | 
|  2144     FrameScope scope(masm, StackFrame::INTERNAL); |  2144     FrameScope scope(masm, StackFrame::INTERNAL); | 
|  2145     CreateWeakCellStub create_stub(masm->isolate()); |  2145     CreateWeakCellStub create_stub(masm->isolate()); | 
|  2146     __ Push(r3); |  2146     __ Push(r3); | 
| (...skipping 3453 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  5600   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, |  5600   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, | 
|  5601                            kStackUnwindSpace, NULL, return_value_operand, NULL); |  5601                            kStackUnwindSpace, NULL, return_value_operand, NULL); | 
|  5602 } |  5602 } | 
|  5603  |  5603  | 
|  5604 #undef __ |  5604 #undef __ | 
|  5605  |  5605  | 
|  5606 }  // namespace internal |  5606 }  // namespace internal | 
|  5607 }  // namespace v8 |  5607 }  // namespace v8 | 
|  5608  |  5608  | 
|  5609 #endif  // V8_TARGET_ARCH_S390 |  5609 #endif  // V8_TARGET_ARCH_S390 | 
| OLD | NEW |