Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(192)

Side by Side Diff: src/a64/code-stubs-a64.cc

Issue 196133017: Experimental parser: merge r19949 (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/code-stubs-a64.h ('k') | src/a64/codegen-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
180 Isolate* isolate, 180 Isolate* isolate,
181 CodeStubInterfaceDescriptor* descriptor) { 181 CodeStubInterfaceDescriptor* descriptor) {
182 // x1: receiver 182 // x1: receiver
183 static Register registers[] = { x1 }; 183 static Register registers[] = { x1 };
184 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); 184 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
185 descriptor->register_params_ = registers; 185 descriptor->register_params_ = registers;
186 descriptor->deoptimization_handler_ = NULL; 186 descriptor->deoptimization_handler_ = NULL;
187 } 187 }
188 188
189 189
190 void StringLengthStub::InitializeInterfaceDescriptor(
191 Isolate* isolate,
192 CodeStubInterfaceDescriptor* descriptor) {
193 static Register registers[] = { x0, x2 };
194 descriptor->register_param_count_ = 2;
195 descriptor->register_params_ = registers;
196 descriptor->deoptimization_handler_ = NULL;
197 }
198
199
200 void KeyedStringLengthStub::InitializeInterfaceDescriptor(
201 Isolate* isolate,
202 CodeStubInterfaceDescriptor* descriptor) {
203 static Register registers[] = { x1, x0 };
204 descriptor->register_param_count_ = 2;
205 descriptor->register_params_ = registers;
206 descriptor->deoptimization_handler_ = NULL;
207 }
208
209
190 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( 210 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
191 Isolate* isolate, 211 Isolate* isolate,
192 CodeStubInterfaceDescriptor* descriptor) { 212 CodeStubInterfaceDescriptor* descriptor) {
193 // x2: receiver 213 // x2: receiver
194 // x1: key 214 // x1: key
195 // x0: value 215 // x0: value
196 static Register registers[] = { x2, x1, x0 }; 216 static Register registers[] = { x2, x1, x0 };
197 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); 217 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
198 descriptor->register_params_ = registers; 218 descriptor->register_params_ = registers;
199 descriptor->deoptimization_handler_ = 219 descriptor->deoptimization_handler_ =
(...skipping 975 matching lines...) Expand 10 before | Expand all | Expand 10 after
1175 // Handle double (heap number) exponents. 1195 // Handle double (heap number) exponents.
1176 if (exponent_type_ != INTEGER) { 1196 if (exponent_type_ != INTEGER) {
1177 // Detect integer exponents stored as doubles and handle those in the 1197 // Detect integer exponents stored as doubles and handle those in the
1178 // integer fast-path. 1198 // integer fast-path.
1179 __ TryConvertDoubleToInt64(exponent_integer, exponent_double, 1199 __ TryConvertDoubleToInt64(exponent_integer, exponent_double,
1180 scratch0_double, &exponent_is_integer); 1200 scratch0_double, &exponent_is_integer);
1181 1201
1182 if (exponent_type_ == ON_STACK) { 1202 if (exponent_type_ == ON_STACK) {
1183 FPRegister half_double = d3; 1203 FPRegister half_double = d3;
1184 FPRegister minus_half_double = d4; 1204 FPRegister minus_half_double = d4;
1185 FPRegister zero_double = d5;
1186 // Detect square root case. Crankshaft detects constant +/-0.5 at compile 1205 // Detect square root case. Crankshaft detects constant +/-0.5 at compile
1187 // time and uses DoMathPowHalf instead. We then skip this check for 1206 // time and uses DoMathPowHalf instead. We then skip this check for
1188 // non-constant cases of +/-0.5 as these hardly occur. 1207 // non-constant cases of +/-0.5 as these hardly occur.
1189 1208
1190 __ Fmov(minus_half_double, -0.5); 1209 __ Fmov(minus_half_double, -0.5);
1191 __ Fmov(half_double, 0.5); 1210 __ Fmov(half_double, 0.5);
1192 __ Fcmp(minus_half_double, exponent_double); 1211 __ Fcmp(minus_half_double, exponent_double);
1193 __ Fccmp(half_double, exponent_double, NZFlag, ne); 1212 __ Fccmp(half_double, exponent_double, NZFlag, ne);
1194 // Condition flags at this point: 1213 // Condition flags at this point:
1195 // 0.5; nZCv // Identified by eq && pl 1214 // 0.5; nZCv // Identified by eq && pl
(...skipping 12 matching lines...) Expand all
1208 // exponent == -0.5: The result is +0. 1227 // exponent == -0.5: The result is +0.
1209 // (base == +0) || (base == -0) 1228 // (base == +0) || (base == -0)
1210 // exponent == 0.5: The result is +0. 1229 // exponent == 0.5: The result is +0.
1211 // exponent == -0.5: The result is +INFINITY. 1230 // exponent == -0.5: The result is +INFINITY.
1212 // (base < 0) && base.isFinite(): The result is NaN. 1231 // (base < 0) && base.isFinite(): The result is NaN.
1213 // 1232 //
1214 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except 1233 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
1215 // where base is -INFINITY or -0. 1234 // where base is -INFINITY or -0.
1216 1235
1217 // Add +0 to base. This has no effect other than turning -0 into +0. 1236 // Add +0 to base. This has no effect other than turning -0 into +0.
1218 __ Fmov(zero_double, 0.0); 1237 __ Fadd(base_double, base_double, fp_zero);
1219 __ Fadd(base_double, base_double, zero_double);
1220 // The operation -0+0 results in +0 in all cases except where the 1238 // The operation -0+0 results in +0 in all cases except where the
1221 // FPCR rounding mode is 'round towards minus infinity' (RM). The 1239 // FPCR rounding mode is 'round towards minus infinity' (RM). The
1222 // A64 simulator does not currently simulate FPCR (where the rounding 1240 // A64 simulator does not currently simulate FPCR (where the rounding
1223 // mode is set), so test the operation with some debug code. 1241 // mode is set), so test the operation with some debug code.
1224 if (masm->emit_debug_code()) { 1242 if (masm->emit_debug_code()) {
1225 Register temp = masm->Tmp1(); 1243 UseScratchRegisterScope temps(masm);
1226 // d5 zero_double The value +0.0 as a double. 1244 Register temp = temps.AcquireX();
1227 __ Fneg(scratch0_double, zero_double); 1245 __ Fneg(scratch0_double, fp_zero);
1228 // Verify that we correctly generated +0.0 and -0.0. 1246 // Verify that we correctly generated +0.0 and -0.0.
1229 // bits(+0.0) = 0x0000000000000000 1247 // bits(+0.0) = 0x0000000000000000
1230 // bits(-0.0) = 0x8000000000000000 1248 // bits(-0.0) = 0x8000000000000000
1231 __ Fmov(temp, zero_double); 1249 __ Fmov(temp, fp_zero);
1232 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero); 1250 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
1233 __ Fmov(temp, scratch0_double); 1251 __ Fmov(temp, scratch0_double);
1234 __ Eor(temp, temp, kDSignMask); 1252 __ Eor(temp, temp, kDSignMask);
1235 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero); 1253 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
1236 // Check that -0.0 + 0.0 == +0.0. 1254 // Check that -0.0 + 0.0 == +0.0.
1237 __ Fadd(scratch0_double, scratch0_double, zero_double); 1255 __ Fadd(scratch0_double, scratch0_double, fp_zero);
1238 __ Fmov(temp, scratch0_double); 1256 __ Fmov(temp, scratch0_double);
1239 __ CheckRegisterIsClear(temp, kExpectedPositiveZero); 1257 __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
1240 } 1258 }
1241 1259
1242 // If base is -INFINITY, make it +INFINITY. 1260 // If base is -INFINITY, make it +INFINITY.
1243 // * Calculate base - base: All infinities will become NaNs since both 1261 // * Calculate base - base: All infinities will become NaNs since both
1244 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64. 1262 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64.
1245 // * If the result is NaN, calculate abs(base). 1263 // * If the result is NaN, calculate abs(base).
1246 __ Fsub(scratch0_double, base_double, base_double); 1264 __ Fsub(scratch0_double, base_double, base_double);
1247 __ Fcmp(scratch0_double, 0.0); 1265 __ Fcmp(scratch0_double, 0.0);
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
1493 1511
1494 // Store the return address on the stack, in the space previously allocated 1512 // Store the return address on the stack, in the space previously allocated
1495 // by EnterExitFrame. The return address is queried by 1513 // by EnterExitFrame. The return address is queried by
1496 // ExitFrame::GetStateForFramePointer. 1514 // ExitFrame::GetStateForFramePointer.
1497 Label return_location; 1515 Label return_location;
1498 __ Adr(x12, &return_location); 1516 __ Adr(x12, &return_location);
1499 __ Poke(x12, 0); 1517 __ Poke(x12, 0);
1500 if (__ emit_debug_code()) { 1518 if (__ emit_debug_code()) {
1501 // Verify that the slot below fp[kSPOffset]-8 points to the return location 1519 // Verify that the slot below fp[kSPOffset]-8 points to the return location
1502 // (currently in x12). 1520 // (currently in x12).
1503 Register temp = masm->Tmp1(); 1521 UseScratchRegisterScope temps(masm);
1522 Register temp = temps.AcquireX();
1504 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset)); 1523 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
1505 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes))); 1524 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
1506 __ Cmp(temp, x12); 1525 __ Cmp(temp, x12);
1507 __ Check(eq, kReturnAddressNotFoundInFrame); 1526 __ Check(eq, kReturnAddressNotFoundInFrame);
1508 } 1527 }
1509 1528
1510 // Call the builtin. 1529 // Call the builtin.
1511 __ Blr(target); 1530 __ Blr(target);
1512 __ Bind(&return_location); 1531 __ Bind(&return_location);
1513 const Register& result = x0; 1532 const Register& result = x0;
1514 1533
1515 if (always_allocate) { 1534 if (always_allocate) {
(...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after
1784 // 1803 //
1785 // We must not write to jssp until after the PushCalleeSavedRegisters() 1804 // We must not write to jssp until after the PushCalleeSavedRegisters()
1786 // call, since jssp is itself a callee-saved register. 1805 // call, since jssp is itself a callee-saved register.
1787 __ SetStackPointer(csp); 1806 __ SetStackPointer(csp);
1788 __ PushCalleeSavedRegisters(); 1807 __ PushCalleeSavedRegisters();
1789 __ Mov(jssp, csp); 1808 __ Mov(jssp, csp);
1790 __ SetStackPointer(jssp); 1809 __ SetStackPointer(jssp);
1791 1810
1792 ProfileEntryHookStub::MaybeCallEntryHook(masm); 1811 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1793 1812
1813 // Set up the reserved register for 0.0.
1814 __ Fmov(fp_zero, 0.0);
1815
1794 // Build an entry frame (see layout below). 1816 // Build an entry frame (see layout below).
1795 Isolate* isolate = masm->isolate(); 1817 Isolate* isolate = masm->isolate();
1796 1818
1797 // Build an entry frame. 1819 // Build an entry frame.
1798 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; 1820 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1799 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used. 1821 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
1800 __ Mov(x13, bad_frame_pointer); 1822 __ Mov(x13, bad_frame_pointer);
1801 __ Mov(x12, Operand(Smi::FromInt(marker))); 1823 __ Mov(x12, Operand(Smi::FromInt(marker)));
1802 __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); 1824 __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
1803 __ Ldr(x10, MemOperand(x11)); 1825 __ Ldr(x10, MemOperand(x11));
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1837 // Jump to a faked try block that does the invoke, with a faked catch 1859 // Jump to a faked try block that does the invoke, with a faked catch
1838 // block that sets the pending exception. 1860 // block that sets the pending exception.
1839 __ B(&invoke); 1861 __ B(&invoke);
1840 1862
1841 // Prevent the constant pool from being emitted between the record of the 1863 // Prevent the constant pool from being emitted between the record of the
1842 // handler_entry position and the first instruction of the sequence here. 1864 // handler_entry position and the first instruction of the sequence here.
1843 // There is no risk because Assembler::Emit() emits the instruction before 1865 // There is no risk because Assembler::Emit() emits the instruction before
1844 // checking for constant pool emission, but we do not want to depend on 1866 // checking for constant pool emission, but we do not want to depend on
1845 // that. 1867 // that.
1846 { 1868 {
1847 Assembler::BlockConstPoolScope block_const_pool(masm); 1869 Assembler::BlockPoolsScope block_pools(masm);
1848 __ bind(&handler_entry); 1870 __ bind(&handler_entry);
1849 handler_offset_ = handler_entry.pos(); 1871 handler_offset_ = handler_entry.pos();
1850 // Caught exception: Store result (exception) in the pending exception 1872 // Caught exception: Store result (exception) in the pending exception
1851 // field in the JSEnv and return a failure sentinel. Coming in here the 1873 // field in the JSEnv and return a failure sentinel. Coming in here the
1852 // fp will be invalid because the PushTryHandler below sets it to 0 to 1874 // fp will be invalid because the PushTryHandler below sets it to 0 to
1853 // signal the existence of the JSEntry frame. 1875 // signal the existence of the JSEntry frame.
1854 // TODO(jbramley): Do this in the Assembler. 1876 // TODO(jbramley): Do this in the Assembler.
1855 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 1877 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1856 isolate))); 1878 isolate)));
1857 } 1879 }
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
1960 } 1982 }
1961 1983
1962 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss); 1984 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
1963 1985
1964 __ Bind(&miss); 1986 __ Bind(&miss);
1965 StubCompiler::TailCallBuiltin(masm, 1987 StubCompiler::TailCallBuiltin(masm,
1966 BaseLoadStoreStubCompiler::MissBuiltin(kind())); 1988 BaseLoadStoreStubCompiler::MissBuiltin(kind()));
1967 } 1989 }
1968 1990
1969 1991
1970 void StringLengthStub::Generate(MacroAssembler* masm) {
1971 Label miss;
1972 Register receiver;
1973 if (kind() == Code::KEYED_LOAD_IC) {
1974 // ----------- S t a t e -------------
1975 // -- lr : return address
1976 // -- x1 : receiver
1977 // -- x0 : key
1978 // -----------------------------------
1979 Register key = x0;
1980 receiver = x1;
1981 __ Cmp(key, Operand(masm->isolate()->factory()->length_string()));
1982 __ B(ne, &miss);
1983 } else {
1984 ASSERT(kind() == Code::LOAD_IC);
1985 // ----------- S t a t e -------------
1986 // -- lr : return address
1987 // -- x2 : name
1988 // -- x0 : receiver
1989 // -- sp[0] : receiver
1990 // -----------------------------------
1991 receiver = x0;
1992 }
1993
1994 StubCompiler::GenerateLoadStringLength(masm, receiver, x10, x11, &miss);
1995
1996 __ Bind(&miss);
1997 StubCompiler::TailCallBuiltin(masm,
1998 BaseLoadStoreStubCompiler::MissBuiltin(kind()));
1999 }
2000
2001
2002 void StoreArrayLengthStub::Generate(MacroAssembler* masm) { 1992 void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
2003 ASM_LOCATION("StoreArrayLengthStub::Generate"); 1993 ASM_LOCATION("StoreArrayLengthStub::Generate");
2004 // This accepts as a receiver anything JSArray::SetElementsLength accepts 1994 // This accepts as a receiver anything JSArray::SetElementsLength accepts
2005 // (currently anything except for external arrays which means anything with 1995 // (currently anything except for external arrays which means anything with
2006 // elements of FixedArray type). Value must be a number, but only smis are 1996 // elements of FixedArray type). Value must be a number, but only smis are
2007 // accepted as the most common case. 1997 // accepted as the most common case.
2008 Label miss; 1998 Label miss;
2009 1999
2010 Register receiver; 2000 Register receiver;
2011 Register value; 2001 Register value;
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
2294 __ Ret(); 2284 __ Ret();
2295 2285
2296 // Slow case: handle non-smi or out-of-bounds access to arguments by calling 2286 // Slow case: handle non-smi or out-of-bounds access to arguments by calling
2297 // the runtime system. 2287 // the runtime system.
2298 __ Bind(&slow); 2288 __ Bind(&slow);
2299 __ Push(key); 2289 __ Push(key);
2300 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); 2290 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2301 } 2291 }
2302 2292
2303 2293
2304 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { 2294 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
2305 // Stack layout on entry. 2295 // Stack layout on entry.
2306 // jssp[0]: number of parameters (tagged) 2296 // jssp[0]: number of parameters (tagged)
2307 // jssp[8]: address of receiver argument 2297 // jssp[8]: address of receiver argument
2308 // jssp[16]: function 2298 // jssp[16]: function
2309 2299
2310 // Check if the calling frame is an arguments adaptor frame. 2300 // Check if the calling frame is an arguments adaptor frame.
2311 Label runtime; 2301 Label runtime;
2312 Register caller_fp = x10; 2302 Register caller_fp = x10;
2313 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 2303 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2314 // Load and untag the context. 2304 // Load and untag the context.
2315 STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4); 2305 STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
2316 __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset + 2306 __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
2317 (kSmiShift / kBitsPerByte))); 2307 (kSmiShift / kBitsPerByte)));
2318 __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR); 2308 __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
2319 __ B(ne, &runtime); 2309 __ B(ne, &runtime);
2320 2310
2321 // Patch the arguments.length and parameters pointer in the current frame. 2311 // Patch the arguments.length and parameters pointer in the current frame.
2322 __ Ldr(x11, MemOperand(caller_fp, 2312 __ Ldr(x11, MemOperand(caller_fp,
2323 ArgumentsAdaptorFrameConstants::kLengthOffset)); 2313 ArgumentsAdaptorFrameConstants::kLengthOffset));
2324 __ Poke(x11, 0 * kXRegSizeInBytes); 2314 __ Poke(x11, 0 * kXRegSize);
2325 __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2)); 2315 __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
2326 __ Add(x10, x10, Operand(StandardFrameConstants::kCallerSPOffset)); 2316 __ Add(x10, x10, Operand(StandardFrameConstants::kCallerSPOffset));
2327 __ Poke(x10, 1 * kXRegSizeInBytes); 2317 __ Poke(x10, 1 * kXRegSize);
2328 2318
2329 __ Bind(&runtime); 2319 __ Bind(&runtime);
2330 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 2320 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2331 } 2321 }
2332 2322
2333 2323
2334 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { 2324 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
2335 // Stack layout on entry. 2325 // Stack layout on entry.
2336 // jssp[0]: number of parameters (tagged) 2326 // jssp[0]: number of parameters (tagged)
2337 // jssp[8]: address of receiver argument 2327 // jssp[8]: address of receiver argument
2338 // jssp[16]: function 2328 // jssp[16]: function
2339 // 2329 //
2340 // Returns pointer to result object in x0. 2330 // Returns pointer to result object in x0.
2341 2331
2342 // Note: arg_count_smi is an alias of param_count_smi. 2332 // Note: arg_count_smi is an alias of param_count_smi.
2343 Register arg_count_smi = x3; 2333 Register arg_count_smi = x3;
2344 Register param_count_smi = x3; 2334 Register param_count_smi = x3;
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
2410 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2)); 2400 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
2411 __ Add(size, size, kParameterMapHeaderSize); 2401 __ Add(size, size, kParameterMapHeaderSize);
2412 2402
2413 // If there are no mapped parameters, set the running size total to zero. 2403 // If there are no mapped parameters, set the running size total to zero.
2414 // Otherwise, use the parameter map size calculated earlier. 2404 // Otherwise, use the parameter map size calculated earlier.
2415 __ Cmp(mapped_params, 0); 2405 __ Cmp(mapped_params, 0);
2416 __ CzeroX(size, eq); 2406 __ CzeroX(size, eq);
2417 2407
2418 // 2. Add the size of the backing store and arguments object. 2408 // 2. Add the size of the backing store and arguments object.
2419 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2)); 2409 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
2420 __ Add(size, size, FixedArray::kHeaderSize + Heap::kArgumentsObjectSize); 2410 __ Add(size, size,
2411 FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
2421 2412
2422 // Do the allocation of all three objects in one go. Assign this to x0, as it 2413 // Do the allocation of all three objects in one go. Assign this to x0, as it
2423 // will be returned to the caller. 2414 // will be returned to the caller.
2424 Register alloc_obj = x0; 2415 Register alloc_obj = x0;
2425 __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT); 2416 __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
2426 2417
2427 // Get the arguments boilerplate from the current (global) context. 2418 // Get the arguments boilerplate from the current (global) context.
2428 2419
2429 // x0 alloc_obj pointer to allocated objects (param map, backing 2420 // x0 alloc_obj pointer to allocated objects (param map, backing
2430 // store, arguments) 2421 // store, arguments)
2431 // x1 mapped_params number of mapped parameters, min(params, args) 2422 // x1 mapped_params number of mapped parameters, min(params, args)
2432 // x2 arg_count number of function arguments 2423 // x2 arg_count number of function arguments
2433 // x3 arg_count_smi number of function arguments (smi) 2424 // x3 arg_count_smi number of function arguments (smi)
2434 // x4 function function pointer 2425 // x4 function function pointer
2435 // x7 param_count number of function parameters 2426 // x7 param_count number of function parameters
2436 // x11 args_offset offset to args (or aliased args) boilerplate (uninit) 2427 // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
2437 // x14 recv_arg pointer to receiver arguments 2428 // x14 recv_arg pointer to receiver arguments
2438 2429
2439 Register global_object = x10; 2430 Register global_object = x10;
2440 Register global_ctx = x10; 2431 Register global_ctx = x10;
2441 Register args_offset = x11; 2432 Register args_offset = x11;
2442 Register aliased_args_offset = x10; 2433 Register aliased_args_offset = x10;
2443 __ Ldr(global_object, GlobalObjectMemOperand()); 2434 __ Ldr(global_object, GlobalObjectMemOperand());
2444 __ Ldr(global_ctx, FieldMemOperand(global_object, 2435 __ Ldr(global_ctx, FieldMemOperand(global_object,
2445 GlobalObject::kNativeContextOffset)); 2436 GlobalObject::kNativeContextOffset));
2446 2437
2447 __ Ldr(args_offset, ContextMemOperand(global_ctx, 2438 __ Ldr(args_offset,
2448 Context::ARGUMENTS_BOILERPLATE_INDEX)); 2439 ContextMemOperand(global_ctx,
2440 Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX));
2449 __ Ldr(aliased_args_offset, 2441 __ Ldr(aliased_args_offset,
2450 ContextMemOperand(global_ctx, 2442 ContextMemOperand(global_ctx,
2451 Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)); 2443 Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
2452 __ Cmp(mapped_params, 0); 2444 __ Cmp(mapped_params, 0);
2453 __ CmovX(args_offset, aliased_args_offset, ne); 2445 __ CmovX(args_offset, aliased_args_offset, ne);
2454 2446
2455 // Copy the JS object part. 2447 // Copy the JS object part.
2456 __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13), 2448 __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
2457 JSObject::kHeaderSize / kPointerSize); 2449 JSObject::kHeaderSize / kPointerSize);
2458 2450
(...skipping 18 matching lines...) Expand all
2477 // x1 mapped_params number of mapped parameters, min(params, args) 2469 // x1 mapped_params number of mapped parameters, min(params, args)
2478 // x2 arg_count number of function arguments 2470 // x2 arg_count number of function arguments
2479 // x3 arg_count_smi number of function arguments (smi) 2471 // x3 arg_count_smi number of function arguments (smi)
2480 // x4 function function pointer 2472 // x4 function function pointer
2481 // x5 elements pointer to parameter map or backing store (uninit) 2473 // x5 elements pointer to parameter map or backing store (uninit)
2482 // x6 backing_store pointer to backing store (uninit) 2474 // x6 backing_store pointer to backing store (uninit)
2483 // x7 param_count number of function parameters 2475 // x7 param_count number of function parameters
2484 // x14 recv_arg pointer to receiver arguments 2476 // x14 recv_arg pointer to receiver arguments
2485 2477
2486 Register elements = x5; 2478 Register elements = x5;
2487 __ Add(elements, alloc_obj, Heap::kArgumentsObjectSize); 2479 __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
2488 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); 2480 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2489 2481
2490 // Initialize parameter map. If there are no mapped arguments, we're done. 2482 // Initialize parameter map. If there are no mapped arguments, we're done.
2491 Label skip_parameter_map; 2483 Label skip_parameter_map;
2492 __ Cmp(mapped_params, 0); 2484 __ Cmp(mapped_params, 0);
2493 // Set up backing store address, because it is needed later for filling in 2485 // Set up backing store address, because it is needed later for filling in
2494 // the unmapped arguments. 2486 // the unmapped arguments.
2495 Register backing_store = x6; 2487 Register backing_store = x6;
2496 __ CmovX(backing_store, elements, eq); 2488 __ CmovX(backing_store, elements, eq);
2497 __ B(eq, &skip_parameter_map); 2489 __ B(eq, &skip_parameter_map);
2498 2490
2499 __ LoadRoot(x10, Heap::kNonStrictArgumentsElementsMapRootIndex); 2491 __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
2500 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); 2492 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2501 __ Add(x10, mapped_params, 2); 2493 __ Add(x10, mapped_params, 2);
2502 __ SmiTag(x10); 2494 __ SmiTag(x10);
2503 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset)); 2495 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
2504 __ Str(cp, FieldMemOperand(elements, 2496 __ Str(cp, FieldMemOperand(elements,
2505 FixedArray::kHeaderSize + 0 * kPointerSize)); 2497 FixedArray::kHeaderSize + 0 * kPointerSize));
2506 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2)); 2498 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
2507 __ Add(x10, x10, kParameterMapHeaderSize); 2499 __ Add(x10, x10, kParameterMapHeaderSize);
2508 __ Str(x10, FieldMemOperand(elements, 2500 __ Str(x10, FieldMemOperand(elements,
2509 FixedArray::kHeaderSize + 1 * kPointerSize)); 2501 FixedArray::kHeaderSize + 1 * kPointerSize));
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
2642 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2)); 2634 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
2643 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset); 2635 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
2644 2636
2645 // Try the new space allocation. Start out with computing the size of the 2637 // Try the new space allocation. Start out with computing the size of the
2646 // arguments object and the elements array in words. 2638 // arguments object and the elements array in words.
2647 Register size = x10; 2639 Register size = x10;
2648 __ Bind(&try_allocate); 2640 __ Bind(&try_allocate);
2649 __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize); 2641 __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
2650 __ Cmp(param_count, 0); 2642 __ Cmp(param_count, 0);
2651 __ CzeroX(size, eq); 2643 __ CzeroX(size, eq);
2652 __ Add(size, size, Heap::kArgumentsObjectSizeStrict / kPointerSize); 2644 __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
2653 2645
2654 // Do the allocation of both objects in one go. Assign this to x0, as it will 2646 // Do the allocation of both objects in one go. Assign this to x0, as it will
2655 // be returned to the caller. 2647 // be returned to the caller.
2656 Register alloc_obj = x0; 2648 Register alloc_obj = x0;
2657 __ Allocate(size, alloc_obj, x11, x12, &runtime, 2649 __ Allocate(size, alloc_obj, x11, x12, &runtime,
2658 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 2650 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2659 2651
2660 // Get the arguments boilerplate from the current (native) context. 2652 // Get the arguments boilerplate from the current (native) context.
2661 Register global_object = x10; 2653 Register global_object = x10;
2662 Register global_ctx = x10; 2654 Register global_ctx = x10;
2663 Register args_offset = x4; 2655 Register args_offset = x4;
2664 __ Ldr(global_object, GlobalObjectMemOperand()); 2656 __ Ldr(global_object, GlobalObjectMemOperand());
2665 __ Ldr(global_ctx, FieldMemOperand(global_object, 2657 __ Ldr(global_ctx, FieldMemOperand(global_object,
2666 GlobalObject::kNativeContextOffset)); 2658 GlobalObject::kNativeContextOffset));
2667 __ Ldr(args_offset, 2659 __ Ldr(args_offset,
2668 ContextMemOperand(global_ctx, 2660 ContextMemOperand(global_ctx,
2669 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)); 2661 Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX));
2670 2662
2671 // x0 alloc_obj pointer to allocated objects: parameter array and 2663 // x0 alloc_obj pointer to allocated objects: parameter array and
2672 // arguments object 2664 // arguments object
2673 // x1 param_count_smi number of parameters passed to function (smi) 2665 // x1 param_count_smi number of parameters passed to function (smi)
2674 // x2 params pointer to parameters 2666 // x2 params pointer to parameters
2675 // x3 function function pointer 2667 // x3 function function pointer
2676 // x4 args_offset offset to arguments boilerplate 2668 // x4 args_offset offset to arguments boilerplate
2677 // x13 param_count number of parameters passed to function 2669 // x13 param_count number of parameters passed to function
2678 2670
2679 // Copy the JS object part. 2671 // Copy the JS object part.
2680 __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7), 2672 __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
2681 JSObject::kHeaderSize / kPointerSize); 2673 JSObject::kHeaderSize / kPointerSize);
2682 2674
2683 // Set the smi-tagged length as an in-object property. 2675 // Set the smi-tagged length as an in-object property.
2684 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 2676 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2685 const int kLengthOffset = JSObject::kHeaderSize + 2677 const int kLengthOffset = JSObject::kHeaderSize +
2686 Heap::kArgumentsLengthIndex * kPointerSize; 2678 Heap::kArgumentsLengthIndex * kPointerSize;
2687 __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset)); 2679 __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2688 2680
2689 // If there are no actual arguments, we're done. 2681 // If there are no actual arguments, we're done.
2690 Label done; 2682 Label done;
2691 __ Cbz(param_count, &done); 2683 __ Cbz(param_count, &done);
2692 2684
2693 // Set up the elements pointer in the allocated arguments object and 2685 // Set up the elements pointer in the allocated arguments object and
2694 // initialize the header in the elements fixed array. 2686 // initialize the header in the elements fixed array.
2695 Register elements = x5; 2687 Register elements = x5;
2696 __ Add(elements, alloc_obj, Heap::kArgumentsObjectSizeStrict); 2688 __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
2697 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); 2689 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2698 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); 2690 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2699 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); 2691 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2700 __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset)); 2692 __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
2701 2693
2702 // x0 alloc_obj pointer to allocated objects: parameter array and 2694 // x0 alloc_obj pointer to allocated objects: parameter array and
2703 // arguments object 2695 // arguments object
2704 // x1 param_count_smi number of parameters passed to function (smi) 2696 // x1 param_count_smi number of parameters passed to function (smi)
2705 // x2 params pointer to parameters 2697 // x2 params pointer to parameters
2706 // x3 function function pointer 2698 // x3 function function pointer
(...skipping 444 matching lines...) Expand 10 before | Expand all | Expand 10 after
3151 // iterates down to zero (inclusive). 3143 // iterates down to zero (inclusive).
3152 __ Add(last_match_offsets, 3144 __ Add(last_match_offsets,
3153 last_match_info_elements, 3145 last_match_info_elements,
3154 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag); 3146 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
3155 __ Bind(&next_capture); 3147 __ Bind(&next_capture);
3156 __ Subs(number_of_capture_registers, number_of_capture_registers, 2); 3148 __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
3157 __ B(mi, &done); 3149 __ B(mi, &done);
3158 // Read two 32 bit values from the static offsets vector buffer into 3150 // Read two 32 bit values from the static offsets vector buffer into
3159 // an X register 3151 // an X register
3160 __ Ldr(current_offset, 3152 __ Ldr(current_offset,
3161 MemOperand(offsets_vector_index, kWRegSizeInBytes * 2, PostIndex)); 3153 MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
3162 // Store the smi values in the last match info. 3154 // Store the smi values in the last match info.
3163 __ SmiTag(x10, current_offset); 3155 __ SmiTag(x10, current_offset);
3164 // Clearing the 32 bottom bits gives us a Smi. 3156 // Clearing the 32 bottom bits gives us a Smi.
3165 STATIC_ASSERT(kSmiShift == 32); 3157 STATIC_ASSERT(kSmiShift == 32);
3166 __ And(x11, current_offset, ~kWRegMask); 3158 __ And(x11, current_offset, ~kWRegMask);
3167 __ Stp(x10, 3159 __ Stp(x10,
3168 x11, 3160 x11,
3169 MemOperand(last_match_offsets, kXRegSizeInBytes * 2, PostIndex)); 3161 MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
3170 __ B(&next_capture); 3162 __ B(&next_capture);
3171 __ Bind(&done); 3163 __ Bind(&done);
3172 3164
3173 // Return last match info. 3165 // Return last match info.
3174 __ Peek(x0, kLastMatchInfoOffset); 3166 __ Peek(x0, kLastMatchInfoOffset);
3175 __ PopCPURegList(used_callee_saved_registers); 3167 __ PopCPURegList(used_callee_saved_registers);
3176 // Drop the 4 arguments of the stub from the stack. 3168 // Drop the 4 arguments of the stub from the stack.
3177 __ Drop(4); 3169 __ Drop(4);
3178 __ Ret(); 3170 __ Ret();
3179 3171
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
3250 3242
3251 // (9) Sliced string. Replace subject with parent. 3243 // (9) Sliced string. Replace subject with parent.
3252 __ Ldr(sliced_string_offset, 3244 __ Ldr(sliced_string_offset,
3253 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset)); 3245 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
3254 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); 3246 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3255 __ B(&check_underlying); // Go to (4). 3247 __ B(&check_underlying); // Go to (4).
3256 #endif 3248 #endif
3257 } 3249 }
3258 3250
3259 3251
3260 // TODO(jbramley): Don't use static registers here, but take them as arguments. 3252 static void GenerateRecordCallTarget(MacroAssembler* masm,
3261 static void GenerateRecordCallTarget(MacroAssembler* masm) { 3253 Register argc,
3254 Register function,
3255 Register feedback_vector,
3256 Register index,
3257 Register scratch1,
3258 Register scratch2) {
3262 ASM_LOCATION("GenerateRecordCallTarget"); 3259 ASM_LOCATION("GenerateRecordCallTarget");
3260 ASSERT(!AreAliased(scratch1, scratch2,
3261 argc, function, feedback_vector, index));
3263 // Cache the called function in a feedback vector slot. Cache states are 3262 // Cache the called function in a feedback vector slot. Cache states are
3264 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic. 3263 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
3265 // x0 : number of arguments to the construct function 3264 // argc : number of arguments to the construct function
3266 // x1 : the function to call 3265 // function : the function to call
3267 // x2 : feedback vector 3266 // feedback_vector : the feedback vector
3268 // x3 : slot in feedback vector (smi) 3267 // index : slot in feedback vector (smi)
3269 Label initialize, done, miss, megamorphic, not_array_function; 3268 Label initialize, done, miss, megamorphic, not_array_function;
3270 3269
3271 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), 3270 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
3272 masm->isolate()->heap()->undefined_value()); 3271 masm->isolate()->heap()->megamorphic_symbol());
3273 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), 3272 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
3274 masm->isolate()->heap()->the_hole_value()); 3273 masm->isolate()->heap()->uninitialized_symbol());
3275 3274
3276 // Load the cache state. 3275 // Load the cache state.
3277 __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2)); 3276 __ Add(scratch1, feedback_vector,
3278 __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize)); 3277 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3278 __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3279 3279
3280 // A monomorphic cache hit or an already megamorphic state: invoke the 3280 // A monomorphic cache hit or an already megamorphic state: invoke the
3281 // function without changing the state. 3281 // function without changing the state.
3282 __ Cmp(x4, x1); 3282 __ Cmp(scratch1, function);
3283 __ B(eq, &done); 3283 __ B(eq, &done);
3284 3284
3285 // If we came here, we need to see if we are the array function. 3285 // If we came here, we need to see if we are the array function.
3286 // If we didn't have a matching function, and we didn't find the megamorph 3286 // If we didn't have a matching function, and we didn't find the megamorph
3287 // sentinel, then we have in the slot either some other function or an 3287 // sentinel, then we have in the slot either some other function or an
3288 // AllocationSite. Do a map check on the object in ecx. 3288 // AllocationSite. Do a map check on the object in scratch1 register.
3289 __ Ldr(x5, FieldMemOperand(x4, AllocationSite::kMapOffset)); 3289 __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
3290 __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &miss); 3290 __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
3291 3291
3292 // Make sure the function is the Array() function 3292 // Make sure the function is the Array() function
3293 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x4); 3293 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
3294 __ Cmp(x1, x4); 3294 __ Cmp(function, scratch1);
3295 __ B(ne, &megamorphic); 3295 __ B(ne, &megamorphic);
3296 __ B(&done); 3296 __ B(&done);
3297 3297
3298 __ Bind(&miss); 3298 __ Bind(&miss);
3299 3299
3300 // A monomorphic miss (i.e, here the cache is not uninitialized) goes 3300 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3301 // megamorphic. 3301 // megamorphic.
3302 __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &initialize); 3302 __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
3303 // MegamorphicSentinel is an immortal immovable object (undefined) so no 3303 // MegamorphicSentinel is an immortal immovable object (undefined) so no
3304 // write-barrier is needed. 3304 // write-barrier is needed.
3305 __ Bind(&megamorphic); 3305 __ Bind(&megamorphic);
3306 __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2)); 3306 __ Add(scratch1, feedback_vector,
3307 __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); 3307 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3308 __ Str(x10, FieldMemOperand(x4, FixedArray::kHeaderSize)); 3308 __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
3309 __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3309 __ B(&done); 3310 __ B(&done);
3310 3311
3311 // An uninitialized cache is patched with the function or sentinel to 3312 // An uninitialized cache is patched with the function or sentinel to
3312 // indicate the ElementsKind if function is the Array constructor. 3313 // indicate the ElementsKind if function is the Array constructor.
3313 __ Bind(&initialize); 3314 __ Bind(&initialize);
3314 // Make sure the function is the Array() function 3315 // Make sure the function is the Array() function
3315 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x4); 3316 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
3316 __ Cmp(x1, x4); 3317 __ Cmp(function, scratch1);
3317 __ B(ne, &not_array_function); 3318 __ B(ne, &not_array_function);
3318 3319
3319 // The target function is the Array constructor, 3320 // The target function is the Array constructor,
3320 // Create an AllocationSite if we don't already have it, store it in the slot. 3321 // Create an AllocationSite if we don't already have it, store it in the slot.
3321 { 3322 {
3322 FrameScope scope(masm, StackFrame::INTERNAL); 3323 FrameScope scope(masm, StackFrame::INTERNAL);
3323 CreateAllocationSiteStub create_stub; 3324 CreateAllocationSiteStub create_stub;
3324 3325
3325 // Arguments register must be smi-tagged to call out. 3326 // Arguments register must be smi-tagged to call out.
3326 __ SmiTag(x0); 3327 __ SmiTag(argc);
3327 __ Push(x0, x1, x2, x3); 3328 __ Push(argc, function, feedback_vector, index);
3328 3329
3330 // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
3331 // index in x3.
3332 ASSERT(feedback_vector.Is(x2) && index.Is(x3));
3329 __ CallStub(&create_stub); 3333 __ CallStub(&create_stub);
3330 3334
3331 __ Pop(x3, x2, x1, x0); 3335 __ Pop(index, feedback_vector, function, argc);
3332 __ SmiUntag(x0); 3336 __ SmiUntag(argc);
3333 } 3337 }
3334 __ B(&done); 3338 __ B(&done);
3335 3339
3336 __ Bind(&not_array_function); 3340 __ Bind(&not_array_function);
3337 // An uninitialized cache is patched with the function. 3341 // An uninitialized cache is patched with the function.
3338 3342
3339 __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2)); 3343 __ Add(scratch1, feedback_vector,
3340 // TODO(all): Does the value need to be left in x4? If not, FieldMemOperand 3344 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3341 // could be used to avoid this add. 3345 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
3342 __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag); 3346 __ Str(function, MemOperand(scratch1, 0));
3343 __ Str(x1, MemOperand(x4, 0));
3344 3347
3345 __ Push(x4, x2, x1); 3348 __ Push(function);
3346 __ RecordWrite(x2, x4, x1, kLRHasNotBeenSaved, kDontSaveFPRegs, 3349 __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
3347 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); 3350 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
3348 __ Pop(x1, x2, x4); 3351 __ Pop(function);
3349
3350 // TODO(all): Are x4, x2 and x1 outputs? This isn't clear.
3351 3352
3352 __ Bind(&done); 3353 __ Bind(&done);
3353 } 3354 }
3354 3355
3355 3356
3356 void CallFunctionStub::Generate(MacroAssembler* masm) { 3357 void CallFunctionStub::Generate(MacroAssembler* masm) {
3357 ASM_LOCATION("CallFunctionStub::Generate"); 3358 ASM_LOCATION("CallFunctionStub::Generate");
3358 // x1 function the function to call 3359 // x1 function the function to call
3359 // x2 : feedback vector 3360 // x2 : feedback vector
3360 // x3 : slot in feedback vector (smi) (if x2 is not undefined) 3361 // x3 : slot in feedback vector (smi) (if x2 is not the megamorphic symbol)
3361 Register function = x1; 3362 Register function = x1;
3362 Register cache_cell = x2; 3363 Register cache_cell = x2;
3363 Register slot = x3; 3364 Register slot = x3;
3364 Register type = x4; 3365 Register type = x4;
3365 Label slow, non_function, wrap, cont; 3366 Label slow, non_function, wrap, cont;
3366 3367
3367 // TODO(jbramley): This function has a lot of unnamed registers. Name them, 3368 // TODO(jbramley): This function has a lot of unnamed registers. Name them,
3368 // and tidy things up a bit. 3369 // and tidy things up a bit.
3369 3370
3370 if (NeedsChecks()) { 3371 if (NeedsChecks()) {
3371 // Check that the function is really a JavaScript function. 3372 // Check that the function is really a JavaScript function.
3372 __ JumpIfSmi(function, &non_function); 3373 __ JumpIfSmi(function, &non_function);
3373 3374
3374 // Goto slow case if we do not have a function. 3375 // Goto slow case if we do not have a function.
3375 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow); 3376 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
3376 3377
3377 if (RecordCallTarget()) { 3378 if (RecordCallTarget()) {
3378 GenerateRecordCallTarget(masm); 3379 GenerateRecordCallTarget(masm, x0, function, cache_cell, slot, x4, x5);
3379 } 3380 }
3380 } 3381 }
3381 3382
3382 // Fast-case: Invoke the function now. 3383 // Fast-case: Invoke the function now.
3383 // x1 function pushed function 3384 // x1 function pushed function
3384 ParameterCount actual(argc_); 3385 ParameterCount actual(argc_);
3385 3386
3386 if (CallAsMethod()) { 3387 if (CallAsMethod()) {
3387 if (NeedsChecks()) { 3388 if (NeedsChecks()) {
3388 // Do not transform the receiver for strict mode functions. 3389 // Do not transform the receiver for strict mode functions.
3389 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); 3390 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
3390 __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset)); 3391 __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
3391 __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont); 3392 __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont);
3392 3393
3393 // Do not transform the receiver for native (Compilerhints already in x3). 3394 // Do not transform the receiver for native (Compilerhints already in x3).
3394 __ Tbnz(w4, SharedFunctionInfo::kNative, &cont); 3395 __ Tbnz(w4, SharedFunctionInfo::kNative, &cont);
3395 } 3396 }
3396 3397
3397 // Compute the receiver in non-strict mode. 3398 // Compute the receiver in sloppy mode.
3398 __ Peek(x3, argc_ * kPointerSize); 3399 __ Peek(x3, argc_ * kPointerSize);
3399 3400
3400 if (NeedsChecks()) { 3401 if (NeedsChecks()) {
3401 __ JumpIfSmi(x3, &wrap); 3402 __ JumpIfSmi(x3, &wrap);
3402 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt); 3403 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
3403 } else { 3404 } else {
3404 __ B(&wrap); 3405 __ B(&wrap);
3405 } 3406 }
3406 3407
3407 __ Bind(&cont); 3408 __ Bind(&cont);
3408 } 3409 }
3409 __ InvokeFunction(function, 3410 __ InvokeFunction(function,
3410 actual, 3411 actual,
3411 JUMP_FUNCTION, 3412 JUMP_FUNCTION,
3412 NullCallWrapper()); 3413 NullCallWrapper());
3413 3414
3414 if (NeedsChecks()) { 3415 if (NeedsChecks()) {
3415 // Slow-case: Non-function called. 3416 // Slow-case: Non-function called.
3416 __ Bind(&slow); 3417 __ Bind(&slow);
3417 if (RecordCallTarget()) { 3418 if (RecordCallTarget()) {
3418 // If there is a call target cache, mark it megamorphic in the 3419 // If there is a call target cache, mark it megamorphic in the
3419 // non-function case. MegamorphicSentinel is an immortal immovable object 3420 // non-function case. MegamorphicSentinel is an immortal immovable object
3420 // (undefined) so no write barrier is needed. 3421 // (megamorphic symbol) so no write barrier is needed.
3421 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), 3422 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
3422 masm->isolate()->heap()->undefined_value()); 3423 masm->isolate()->heap()->megamorphic_symbol());
3423 __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot, 3424 __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot,
3424 kPointerSizeLog2)); 3425 kPointerSizeLog2));
3425 __ LoadRoot(x11, Heap::kUndefinedValueRootIndex); 3426 __ LoadRoot(x11, Heap::kMegamorphicSymbolRootIndex);
3426 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize)); 3427 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
3427 } 3428 }
3428 // Check for function proxy. 3429 // Check for function proxy.
3429 // x10 : function type. 3430 // x10 : function type.
3430 __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function); 3431 __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function);
3431 __ Push(function); // put proxy as additional argument 3432 __ Push(function); // put proxy as additional argument
3432 __ Mov(x0, argc_ + 1); 3433 __ Mov(x0, argc_ + 1);
3433 __ Mov(x2, 0); 3434 __ Mov(x2, 0);
3434 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY); 3435 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
3435 { 3436 {
3436 Handle<Code> adaptor = 3437 Handle<Code> adaptor =
3437 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); 3438 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3438 __ Jump(adaptor, RelocInfo::CODE_TARGET); 3439 __ Jump(adaptor, RelocInfo::CODE_TARGET);
3439 } 3440 }
3440 3441
3441 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead 3442 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3442 // of the original receiver from the call site). 3443 // of the original receiver from the call site).
3443 __ Bind(&non_function); 3444 __ Bind(&non_function);
3444 __ Poke(function, argc_ * kXRegSizeInBytes); 3445 __ Poke(function, argc_ * kXRegSize);
3445 __ Mov(x0, argc_); // Set up the number of arguments. 3446 __ Mov(x0, argc_); // Set up the number of arguments.
3446 __ Mov(x2, 0); 3447 __ Mov(x2, 0);
3447 __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION); 3448 __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
3448 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 3449 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3449 RelocInfo::CODE_TARGET); 3450 RelocInfo::CODE_TARGET);
3450 } 3451 }
3451 3452
3452 if (CallAsMethod()) { 3453 if (CallAsMethod()) {
3453 __ Bind(&wrap); 3454 __ Bind(&wrap);
3454 // Wrap the receiver and patch it back onto the stack. 3455 // Wrap the receiver and patch it back onto the stack.
3455 { FrameScope frame_scope(masm, StackFrame::INTERNAL); 3456 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
3456 __ Push(x1, x3); 3457 __ Push(x1, x3);
3457 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); 3458 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
3458 __ Pop(x1); 3459 __ Pop(x1);
3459 } 3460 }
3460 __ Poke(x0, argc_ * kPointerSize); 3461 __ Poke(x0, argc_ * kPointerSize);
3461 __ B(&cont); 3462 __ B(&cont);
3462 } 3463 }
3463 } 3464 }
3464 3465
3465 3466
3466 void CallConstructStub::Generate(MacroAssembler* masm) { 3467 void CallConstructStub::Generate(MacroAssembler* masm) {
3467 ASM_LOCATION("CallConstructStub::Generate"); 3468 ASM_LOCATION("CallConstructStub::Generate");
3468 // x0 : number of arguments 3469 // x0 : number of arguments
3469 // x1 : the function to call 3470 // x1 : the function to call
3470 // x2 : feedback vector 3471 // x2 : feedback vector
3471 // x3 : slot in feedback vector (smi) (if r2 is not undefined) 3472 // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
3472 Register function = x1; 3473 Register function = x1;
3473 Label slow, non_function_call; 3474 Label slow, non_function_call;
3474 3475
3475 // Check that the function is not a smi. 3476 // Check that the function is not a smi.
3476 __ JumpIfSmi(function, &non_function_call); 3477 __ JumpIfSmi(function, &non_function_call);
3477 // Check that the function is a JSFunction. 3478 // Check that the function is a JSFunction.
3478 Register object_type = x10; 3479 Register object_type = x10;
3479 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE, 3480 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
3480 &slow); 3481 &slow);
3481 3482
3482 if (RecordCallTarget()) { 3483 if (RecordCallTarget()) {
3483 GenerateRecordCallTarget(masm); 3484 GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
3484 } 3485 }
3485 3486
3486 // Jump to the function-specific construct stub. 3487 // Jump to the function-specific construct stub.
3487 Register jump_reg = x4; 3488 Register jump_reg = x4;
3488 Register shared_func_info = jump_reg; 3489 Register shared_func_info = jump_reg;
3489 Register cons_stub = jump_reg; 3490 Register cons_stub = jump_reg;
3490 Register cons_stub_code = jump_reg; 3491 Register cons_stub_code = jump_reg;
3491 __ Ldr(shared_func_info, 3492 __ Ldr(shared_func_info,
3492 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3493 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3493 __ Ldr(cons_stub, 3494 __ Ldr(cons_stub,
(...skipping 1130 matching lines...) Expand 10 before | Expand all | Expand 10 after
4624 __ Cmp(allocation_top, x10); 4625 __ Cmp(allocation_top, x10);
4625 __ B(hi, &call_builtin); 4626 __ B(hi, &call_builtin);
4626 4627
4627 // We fit and could grow elements. 4628 // We fit and could grow elements.
4628 // Update new_space_allocation_top. 4629 // Update new_space_allocation_top.
4629 __ Str(allocation_top, MemOperand(allocation_top_addr)); 4630 __ Str(allocation_top, MemOperand(allocation_top_addr));
4630 // Push the argument. 4631 // Push the argument.
4631 __ Str(argument, MemOperand(end_elements)); 4632 __ Str(argument, MemOperand(end_elements));
4632 // Fill the rest with holes. 4633 // Fill the rest with holes.
4633 __ LoadRoot(x10, Heap::kTheHoleValueRootIndex); 4634 __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
4634 for (int i = 1; i < kAllocationDelta; i++) { 4635 ASSERT(kAllocationDelta == 4);
4635 // TODO(all): Try to use stp here. 4636 __ Stp(x10, x10, MemOperand(end_elements, 1 * kPointerSize));
4636 __ Str(x10, MemOperand(end_elements, i * kPointerSize)); 4637 __ Stp(x10, x10, MemOperand(end_elements, 3 * kPointerSize));
4637 }
4638 4638
4639 // Update elements' and array's sizes. 4639 // Update elements' and array's sizes.
4640 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); 4640 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
4641 __ Add(elements_length, 4641 __ Add(elements_length,
4642 elements_length, 4642 elements_length,
4643 Operand(Smi::FromInt(kAllocationDelta))); 4643 Operand(Smi::FromInt(kAllocationDelta)));
4644 __ Str(elements_length, 4644 __ Str(elements_length,
4645 FieldMemOperand(elements, FixedArray::kLengthOffset)); 4645 FieldMemOperand(elements, FixedArray::kLengthOffset));
4646 4646
4647 // Elements are in new space, so write barrier is not required. 4647 // Elements are in new space, so write barrier is not required.
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
4703 4703
4704 __ CheckPageFlagSet(regs_.object(), 4704 __ CheckPageFlagSet(regs_.object(),
4705 value, 4705 value,
4706 1 << MemoryChunk::SCAN_ON_SCAVENGE, 4706 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4707 &dont_need_remembered_set); 4707 &dont_need_remembered_set);
4708 4708
4709 // First notify the incremental marker if necessary, then update the 4709 // First notify the incremental marker if necessary, then update the
4710 // remembered set. 4710 // remembered set.
4711 CheckNeedsToInformIncrementalMarker( 4711 CheckNeedsToInformIncrementalMarker(
4712 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); 4712 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4713 InformIncrementalMarker(masm, mode); 4713 InformIncrementalMarker(masm);
4714 regs_.Restore(masm); // Restore the extra scratch registers we used. 4714 regs_.Restore(masm); // Restore the extra scratch registers we used.
4715
4715 __ RememberedSetHelper(object_, 4716 __ RememberedSetHelper(object_,
4716 address_, 4717 address_,
4717 value_, 4718 value_, // scratch1
4718 save_fp_regs_mode_, 4719 save_fp_regs_mode_,
4719 MacroAssembler::kReturnAtEnd); 4720 MacroAssembler::kReturnAtEnd);
4720 4721
4721 __ Bind(&dont_need_remembered_set); 4722 __ Bind(&dont_need_remembered_set);
4722 } 4723 }
4723 4724
4724 CheckNeedsToInformIncrementalMarker( 4725 CheckNeedsToInformIncrementalMarker(
4725 masm, kReturnOnNoNeedToInformIncrementalMarker, mode); 4726 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4726 InformIncrementalMarker(masm, mode); 4727 InformIncrementalMarker(masm);
4727 regs_.Restore(masm); // Restore the extra scratch registers we used. 4728 regs_.Restore(masm); // Restore the extra scratch registers we used.
4728 __ Ret(); 4729 __ Ret();
4729 } 4730 }
4730 4731
4731 4732
4732 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { 4733 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4733 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); 4734 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4734 Register address = 4735 Register address =
4735 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address(); 4736 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
4736 ASSERT(!address.Is(regs_.object())); 4737 ASSERT(!address.Is(regs_.object()));
4737 ASSERT(!address.Is(x0)); 4738 ASSERT(!address.Is(x0));
4738 __ Mov(address, regs_.address()); 4739 __ Mov(address, regs_.address());
4739 __ Mov(x0, regs_.object()); 4740 __ Mov(x0, regs_.object());
4740 __ Mov(x1, address); 4741 __ Mov(x1, address);
4741 __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate()))); 4742 __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate())));
4742 4743
4743 AllowExternalCallThatCantCauseGC scope(masm); 4744 AllowExternalCallThatCantCauseGC scope(masm);
4744 ExternalReference function = (mode == INCREMENTAL_COMPACTION) 4745 ExternalReference function =
4745 ? ExternalReference::incremental_evacuation_record_write_function( 4746 ExternalReference::incremental_marking_record_write_function(
4746 masm->isolate())
4747 : ExternalReference::incremental_marking_record_write_function(
4748 masm->isolate()); 4747 masm->isolate());
4749 __ CallCFunction(function, 3, 0); 4748 __ CallCFunction(function, 3, 0);
4750 4749
4751 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); 4750 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4752 } 4751 }
4753 4752
4754 4753
4755 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( 4754 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4756 MacroAssembler* masm, 4755 MacroAssembler* masm,
4757 OnNoNeedToInformIncrementalMarker on_no_need, 4756 OnNoNeedToInformIncrementalMarker on_no_need,
(...skipping 12 matching lines...) Expand all
4770 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset)); 4769 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4771 __ B(mi, &need_incremental); 4770 __ B(mi, &need_incremental);
4772 4771
4773 // If the object is not black we don't have to inform the incremental marker. 4772 // If the object is not black we don't have to inform the incremental marker.
4774 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); 4773 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4775 4774
4776 regs_.Restore(masm); // Restore the extra scratch registers we used. 4775 regs_.Restore(masm); // Restore the extra scratch registers we used.
4777 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 4776 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4778 __ RememberedSetHelper(object_, 4777 __ RememberedSetHelper(object_,
4779 address_, 4778 address_,
4780 value_, 4779 value_, // scratch1
4781 save_fp_regs_mode_, 4780 save_fp_regs_mode_,
4782 MacroAssembler::kReturnAtEnd); 4781 MacroAssembler::kReturnAtEnd);
4783 } else { 4782 } else {
4784 __ Ret(); 4783 __ Ret();
4785 } 4784 }
4786 4785
4787 __ Bind(&on_black); 4786 __ Bind(&on_black);
4788 // Get the value from the slot. 4787 // Get the value from the slot.
4789 Register value = regs_.scratch0(); 4788 Register value = regs_.scratch0();
4790 __ Ldr(value, MemOperand(regs_.address())); 4789 __ Ldr(value, MemOperand(regs_.address()));
(...skipping 22 matching lines...) Expand all
4813 regs_.object(), // Scratch. 4812 regs_.object(), // Scratch.
4814 regs_.address(), // Scratch. 4813 regs_.address(), // Scratch.
4815 regs_.scratch2(), // Scratch. 4814 regs_.scratch2(), // Scratch.
4816 &need_incremental_pop_scratch); 4815 &need_incremental_pop_scratch);
4817 __ Pop(regs_.object(), regs_.address()); 4816 __ Pop(regs_.object(), regs_.address());
4818 4817
4819 regs_.Restore(masm); // Restore the extra scratch registers we used. 4818 regs_.Restore(masm); // Restore the extra scratch registers we used.
4820 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 4819 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4821 __ RememberedSetHelper(object_, 4820 __ RememberedSetHelper(object_,
4822 address_, 4821 address_,
4823 value_, 4822 value_, // scratch1
4824 save_fp_regs_mode_, 4823 save_fp_regs_mode_,
4825 MacroAssembler::kReturnAtEnd); 4824 MacroAssembler::kReturnAtEnd);
4826 } else { 4825 } else {
4827 __ Ret(); 4826 __ Ret();
4828 } 4827 }
4829 4828
4830 __ Bind(&need_incremental_pop_scratch); 4829 __ Bind(&need_incremental_pop_scratch);
4831 __ Pop(regs_.object(), regs_.address()); 4830 __ Pop(regs_.object(), regs_.address());
4832 4831
4833 __ Bind(&need_incremental); 4832 __ Bind(&need_incremental);
(...skipping 12 matching lines...) Expand all
4846 // See RecordWriteStub::Patch for details. 4845 // See RecordWriteStub::Patch for details.
4847 { 4846 {
4848 InstructionAccurateScope scope(masm, 2); 4847 InstructionAccurateScope scope(masm, 2);
4849 __ adr(xzr, &skip_to_incremental_noncompacting); 4848 __ adr(xzr, &skip_to_incremental_noncompacting);
4850 __ adr(xzr, &skip_to_incremental_compacting); 4849 __ adr(xzr, &skip_to_incremental_compacting);
4851 } 4850 }
4852 4851
4853 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { 4852 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4854 __ RememberedSetHelper(object_, 4853 __ RememberedSetHelper(object_,
4855 address_, 4854 address_,
4856 value_, 4855 value_, // scratch1
4857 save_fp_regs_mode_, 4856 save_fp_regs_mode_,
4858 MacroAssembler::kReturnAtEnd); 4857 MacroAssembler::kReturnAtEnd);
4859 } 4858 }
4860 __ Ret(); 4859 __ Ret();
4861 4860
4862 __ Bind(&skip_to_incremental_noncompacting); 4861 __ Bind(&skip_to_incremental_noncompacting);
4863 GenerateIncremental(masm, INCREMENTAL); 4862 GenerateIncremental(masm, INCREMENTAL);
4864 4863
4865 __ Bind(&skip_to_incremental_compacting); 4864 __ Bind(&skip_to_incremental_compacting);
4866 GenerateIncremental(masm, INCREMENTAL_COMPACTION); 4865 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
4923 4922
4924 __ Bind(&double_elements); 4923 __ Bind(&double_elements);
4925 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); 4924 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4926 __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1, 4925 __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1,
4927 &slow_elements); 4926 &slow_elements);
4928 __ Ret(); 4927 __ Ret();
4929 } 4928 }
4930 4929
4931 4930
4932 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { 4931 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4933 // TODO(jbramley): The ARM code leaves the (shifted) offset in r1. Why? 4932 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
4934 CEntryStub ces(1, kSaveFPRegs);
4935 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); 4933 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4936 int parameter_count_offset = 4934 int parameter_count_offset =
4937 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; 4935 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4938 __ Ldr(x1, MemOperand(fp, parameter_count_offset)); 4936 __ Ldr(x1, MemOperand(fp, parameter_count_offset));
4939 if (function_mode_ == JS_FUNCTION_STUB_MODE) { 4937 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4940 __ Add(x1, x1, 1); 4938 __ Add(x1, x1, 1);
4941 } 4939 }
4942 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); 4940 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4943 __ Drop(x1); 4941 __ Drop(x1);
4944 // Return to IC Miss stub, continuation still on stack. 4942 // Return to IC Miss stub, continuation still on stack.
4945 __ Ret(); 4943 __ Ret();
4946 } 4944 }
4947 4945
4948 4946
4949 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { 4947 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4950 if (masm->isolate()->function_entry_hook() != NULL) { 4948 if (masm->isolate()->function_entry_hook() != NULL) {
4951 // TODO(all): This needs to be reliably consistent with 4949 // TODO(all): This needs to be reliably consistent with
4952 // kReturnAddressDistanceFromFunctionStart in ::Generate. 4950 // kReturnAddressDistanceFromFunctionStart in ::Generate.
4953 Assembler::BlockConstPoolScope no_const_pools(masm); 4951 Assembler::BlockPoolsScope no_pools(masm);
4954 ProfileEntryHookStub stub; 4952 ProfileEntryHookStub stub;
4955 __ Push(lr); 4953 __ Push(lr);
4956 __ CallStub(&stub); 4954 __ CallStub(&stub);
4957 __ Pop(lr); 4955 __ Pop(lr);
4958 } 4956 }
4959 } 4957 }
4960 4958
4961 4959
4962 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { 4960 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4963 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); 4961 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
4964 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by 4962 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
4965 // a "Push lr" instruction, followed by a call. 4963 // a "Push lr" instruction, followed by a call.
4966 // TODO(jbramley): Verify that this call is always made with relocation.
4967 static const int kReturnAddressDistanceFromFunctionStart = 4964 static const int kReturnAddressDistanceFromFunctionStart =
4968 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); 4965 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
4969 4966
4970 // Save all kCallerSaved registers (including lr), since this can be called 4967 // Save all kCallerSaved registers (including lr), since this can be called
4971 // from anywhere. 4968 // from anywhere.
4972 // TODO(jbramley): What about FP registers? 4969 // TODO(jbramley): What about FP registers?
4973 __ PushCPURegList(kCallerSaved); 4970 __ PushCPURegList(kCallerSaved);
4974 ASSERT(kCallerSaved.IncludesAliasOf(lr)); 4971 ASSERT(kCallerSaved.IncludesAliasOf(lr));
4975 const int kNumSavedRegs = kCallerSaved.Count(); 4972 const int kNumSavedRegs = kCallerSaved.Count();
4976 4973
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
5078 __ Add(scratch2, scratch2, Operand( 5075 __ Add(scratch2, scratch2, Operand(
5079 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); 5076 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5080 } 5077 }
5081 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); 5078 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
5082 5079
5083 // Scale the index by multiplying by the element size. 5080 // Scale the index by multiplying by the element size.
5084 ASSERT(NameDictionary::kEntrySize == 3); 5081 ASSERT(NameDictionary::kEntrySize == 3);
5085 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); 5082 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
5086 5083
5087 // Check if the key is identical to the name. 5084 // Check if the key is identical to the name.
5085 UseScratchRegisterScope temps(masm);
5086 Register scratch3 = temps.AcquireX();
5088 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); 5087 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
5089 // TODO(jbramley): We need another scratch here, but some callers can't 5088 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
5090 // provide a scratch3 so we have to use Tmp1(). We should find a clean way 5089 __ Cmp(name, scratch3);
5091 // to make it unavailable to the MacroAssembler for a short time.
5092 __ Ldr(__ Tmp1(), FieldMemOperand(scratch2, kElementsStartOffset));
5093 __ Cmp(name, __ Tmp1());
5094 __ B(eq, done); 5090 __ B(eq, done);
5095 } 5091 }
5096 5092
5097 // The inlined probes didn't find the entry. 5093 // The inlined probes didn't find the entry.
5098 // Call the complete stub to scan the whole dictionary. 5094 // Call the complete stub to scan the whole dictionary.
5099 5095
5100 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6); 5096 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
5101 spill_list.Combine(lr); 5097 spill_list.Combine(lr);
5102 spill_list.Remove(scratch1); 5098 spill_list.Remove(scratch1);
5103 spill_list.Remove(scratch2); 5099 spill_list.Remove(scratch2);
5104 5100
5105 __ PushCPURegList(spill_list); 5101 __ PushCPURegList(spill_list);
5106 5102
5107 if (name.is(x0)) { 5103 if (name.is(x0)) {
5108 ASSERT(!elements.is(x1)); 5104 ASSERT(!elements.is(x1));
5109 __ Mov(x1, name); 5105 __ Mov(x1, name);
5110 __ Mov(x0, elements); 5106 __ Mov(x0, elements);
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
5170 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good); 5166 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
5171 5167
5172 // Check if the entry name is not a unique name. 5168 // Check if the entry name is not a unique name.
5173 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); 5169 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
5174 __ Ldrb(entity_name, 5170 __ Ldrb(entity_name,
5175 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); 5171 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
5176 __ JumpIfNotUniqueName(entity_name, miss); 5172 __ JumpIfNotUniqueName(entity_name, miss);
5177 __ Bind(&good); 5173 __ Bind(&good);
5178 } 5174 }
5179 5175
5180 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6); 5176 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
5181 spill_list.Combine(lr); 5177 spill_list.Combine(lr);
5182 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved. 5178 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
5183 5179
5184 __ PushCPURegList(spill_list); 5180 __ PushCPURegList(spill_list);
5185 5181
5186 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 5182 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
5187 __ Mov(x1, Operand(name)); 5183 __ Mov(x1, Operand(name));
5188 NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); 5184 NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
5189 __ CallStub(&stub); 5185 __ CallStub(&stub);
5190 // Move stub return value to scratch0. Note that scratch0 is not included in 5186 // Move stub return value to scratch0. Note that scratch0 is not included in
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
5380 __ Add(x11, x11, Operand(Smi::FromInt(kFastElementsKindPackedToHoley))); 5376 __ Add(x11, x11, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5381 __ Str(x11, FieldMemOperand(allocation_site, 5377 __ Str(x11, FieldMemOperand(allocation_site,
5382 AllocationSite::kTransitionInfoOffset)); 5378 AllocationSite::kTransitionInfoOffset));
5383 5379
5384 __ Bind(&normal_sequence); 5380 __ Bind(&normal_sequence);
5385 int last_index = 5381 int last_index =
5386 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); 5382 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
5387 for (int i = 0; i <= last_index; ++i) { 5383 for (int i = 0; i <= last_index; ++i) {
5388 Label next; 5384 Label next;
5389 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i); 5385 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
5390 // TODO(jbramley): Is this the best way to handle this? Can we make the
5391 // tail calls conditional, rather than hopping over each one?
5392 __ CompareAndBranch(kind, candidate_kind, ne, &next); 5386 __ CompareAndBranch(kind, candidate_kind, ne, &next);
5393 ArraySingleArgumentConstructorStub stub(candidate_kind); 5387 ArraySingleArgumentConstructorStub stub(candidate_kind);
5394 __ TailCallStub(&stub); 5388 __ TailCallStub(&stub);
5395 __ Bind(&next); 5389 __ Bind(&next);
5396 } 5390 }
5397 5391
5398 // If we reached this point there is a problem. 5392 // If we reached this point there is a problem.
5399 __ Abort(kUnexpectedElementsKindInArrayConstructor); 5393 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5400 } else { 5394 } else {
5401 UNREACHABLE(); 5395 UNREACHABLE();
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
5475 UNREACHABLE(); 5469 UNREACHABLE();
5476 } 5470 }
5477 } 5471 }
5478 5472
5479 5473
5480 void ArrayConstructorStub::Generate(MacroAssembler* masm) { 5474 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5481 ASM_LOCATION("ArrayConstructorStub::Generate"); 5475 ASM_LOCATION("ArrayConstructorStub::Generate");
5482 // ----------- S t a t e ------------- 5476 // ----------- S t a t e -------------
5483 // -- x0 : argc (only if argument_count_ == ANY) 5477 // -- x0 : argc (only if argument_count_ == ANY)
5484 // -- x1 : constructor 5478 // -- x1 : constructor
5485 // -- x2 : feedback vector (fixed array or undefined) 5479 // -- x2 : feedback vector (fixed array or the megamorphic symbol)
5486 // -- x3 : slot index (if x2 is fixed array) 5480 // -- x3 : slot index (if x2 is fixed array)
5487 // -- sp[0] : return address 5481 // -- sp[0] : return address
5488 // -- sp[4] : last argument 5482 // -- sp[4] : last argument
5489 // ----------------------------------- 5483 // -----------------------------------
5490 Register constructor = x1; 5484 Register constructor = x1;
5491 Register feedback_vector = x2; 5485 Register feedback_vector = x2;
5492 Register slot_index = x3; 5486 Register slot_index = x3;
5493 5487
5488 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
5489 masm->isolate()->heap()->megamorphic_symbol());
5490
5494 if (FLAG_debug_code) { 5491 if (FLAG_debug_code) {
5495 // The array construct code is only set for the global and natives 5492 // The array construct code is only set for the global and natives
5496 // builtin Array functions which always have maps. 5493 // builtin Array functions which always have maps.
5497 5494
5498 Label unexpected_map, map_ok; 5495 Label unexpected_map, map_ok;
5499 // Initial map for the builtin Array function should be a map. 5496 // Initial map for the builtin Array function should be a map.
5500 __ Ldr(x10, FieldMemOperand(constructor, 5497 __ Ldr(x10, FieldMemOperand(constructor,
5501 JSFunction::kPrototypeOrInitialMapOffset)); 5498 JSFunction::kPrototypeOrInitialMapOffset));
5502 // Will both indicate a NULL and a Smi. 5499 // Will both indicate a NULL and a Smi.
5503 __ JumpIfSmi(x10, &unexpected_map); 5500 __ JumpIfSmi(x10, &unexpected_map);
5504 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok); 5501 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5505 __ Bind(&unexpected_map); 5502 __ Bind(&unexpected_map);
5506 __ Abort(kUnexpectedInitialMapForArrayFunction); 5503 __ Abort(kUnexpectedInitialMapForArrayFunction);
5507 __ Bind(&map_ok); 5504 __ Bind(&map_ok);
5508 5505
5509 // In feedback_vector, we expect either undefined or a valid fixed array. 5506 // In feedback_vector, we expect either the megamorphic symbol or a valid
5507 // fixed array.
5510 Label okay_here; 5508 Label okay_here;
5511 Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map(); 5509 Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
5512 __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &okay_here); 5510 __ JumpIfRoot(feedback_vector, Heap::kMegamorphicSymbolRootIndex,
5511 &okay_here);
5513 __ Ldr(x10, FieldMemOperand(feedback_vector, FixedArray::kMapOffset)); 5512 __ Ldr(x10, FieldMemOperand(feedback_vector, FixedArray::kMapOffset));
5514 __ Cmp(x10, Operand(fixed_array_map)); 5513 __ Cmp(x10, Operand(fixed_array_map));
5515 __ Assert(eq, kExpectedFixedArrayInFeedbackVector); 5514 __ Assert(eq, kExpectedFixedArrayInFeedbackVector);
5516 5515
5517 // slot_index should be a smi if we don't have undefined in feedback_vector. 5516 // slot_index should be a smi if we don't have undefined in feedback_vector.
5518 __ AssertSmi(slot_index); 5517 __ AssertSmi(slot_index);
5519 5518
5520 __ Bind(&okay_here); 5519 __ Bind(&okay_here);
5521 } 5520 }
5522 5521
5523 Register allocation_site = x2; // Overwrites feedback_vector. 5522 Register allocation_site = x2; // Overwrites feedback_vector.
5524 Register kind = x3; 5523 Register kind = x3;
5525 Label no_info; 5524 Label no_info;
5526 // Get the elements kind and case on that. 5525 // Get the elements kind and case on that.
5527 __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &no_info); 5526 __ JumpIfRoot(feedback_vector, Heap::kMegamorphicSymbolRootIndex, &no_info);
5528 __ Add(feedback_vector, feedback_vector, 5527 __ Add(feedback_vector, feedback_vector,
5529 Operand::UntagSmiAndScale(slot_index, kPointerSizeLog2)); 5528 Operand::UntagSmiAndScale(slot_index, kPointerSizeLog2));
5530 __ Ldr(allocation_site, FieldMemOperand(feedback_vector, 5529 __ Ldr(allocation_site, FieldMemOperand(feedback_vector,
5531 FixedArray::kHeaderSize)); 5530 FixedArray::kHeaderSize));
5532 5531
5533 // If the feedback vector is undefined, or contains anything other than an 5532 // If the feedback vector is the megamorphic symbol, or contains anything
5534 // AllocationSite, call an array constructor that doesn't use AllocationSites. 5533 // other than an AllocationSite, call an array constructor that doesn't
5534 // use AllocationSites.
5535 __ Ldr(x10, FieldMemOperand(allocation_site, AllocationSite::kMapOffset)); 5535 __ Ldr(x10, FieldMemOperand(allocation_site, AllocationSite::kMapOffset));
5536 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, &no_info); 5536 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, &no_info);
5537 5537
5538 __ Ldrsw(kind, 5538 __ Ldrsw(kind,
5539 UntagSmiFieldMemOperand(allocation_site, 5539 UntagSmiFieldMemOperand(allocation_site,
5540 AllocationSite::kTransitionInfoOffset)); 5540 AllocationSite::kTransitionInfoOffset));
5541 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask); 5541 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
5542 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); 5542 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5543 5543
5544 __ Bind(&no_info); 5544 __ Bind(&no_info);
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
5705 const int kCallApiFunctionSpillSpace = 4; 5705 const int kCallApiFunctionSpillSpace = 4;
5706 5706
5707 FrameScope frame_scope(masm, StackFrame::MANUAL); 5707 FrameScope frame_scope(masm, StackFrame::MANUAL);
5708 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); 5708 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5709 5709
5710 // TODO(all): Optimize this with stp and suchlike. 5710 // TODO(all): Optimize this with stp and suchlike.
5711 ASSERT(!AreAliased(x0, api_function_address)); 5711 ASSERT(!AreAliased(x0, api_function_address));
5712 // x0 = FunctionCallbackInfo& 5712 // x0 = FunctionCallbackInfo&
5713 // Arguments is after the return address. 5713 // Arguments is after the return address.
5714 __ Add(x0, masm->StackPointer(), 1 * kPointerSize); 5714 __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
5715 // FunctionCallbackInfo::implicit_args_ 5715 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
5716 __ Str(args, MemOperand(x0, 0 * kPointerSize));
5717 // FunctionCallbackInfo::values_
5718 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); 5716 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5719 __ Str(x10, MemOperand(x0, 1 * kPointerSize)); 5717 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
5720 // FunctionCallbackInfo::length_ = argc 5718 // FunctionCallbackInfo::length_ = argc and
5719 // FunctionCallbackInfo::is_construct_call = 0
5721 __ Mov(x10, argc); 5720 __ Mov(x10, argc);
5722 __ Str(x10, MemOperand(x0, 2 * kPointerSize)); 5721 __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
5723 // FunctionCallbackInfo::is_construct_call = 0
5724 __ Str(xzr, MemOperand(x0, 3 * kPointerSize));
5725 5722
5726 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; 5723 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5727 Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); 5724 Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
5728 ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL; 5725 ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
5729 ApiFunction thunk_fun(thunk_address); 5726 ApiFunction thunk_fun(thunk_address);
5730 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, 5727 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5731 masm->isolate()); 5728 masm->isolate());
5732 5729
5733 AllowExternalCallThatCantCauseGC scope(masm); 5730 AllowExternalCallThatCantCauseGC scope(masm);
5734 MemOperand context_restore_operand( 5731 MemOperand context_restore_operand(
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
5796 MemOperand(fp, 6 * kPointerSize), 5793 MemOperand(fp, 6 * kPointerSize),
5797 NULL); 5794 NULL);
5798 } 5795 }
5799 5796
5800 5797
5801 #undef __ 5798 #undef __
5802 5799
5803 } } // namespace v8::internal 5800 } } // namespace v8::internal
5804 5801
5805 #endif // V8_TARGET_ARCH_A64 5802 #endif // V8_TARGET_ARCH_A64
OLDNEW
« no previous file with comments | « src/a64/code-stubs-a64.h ('k') | src/a64/codegen-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698