Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(170)

Side by Side Diff: src/arm/stub-cache-arm.cc

Issue 6717018: Introduce accessors on builtins instance (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Removing Handle suffix and renaming enum entries Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
203 } 203 }
204 204
205 205
206 void StubCache::GenerateProbe(MacroAssembler* masm, 206 void StubCache::GenerateProbe(MacroAssembler* masm,
207 Code::Flags flags, 207 Code::Flags flags,
208 Register receiver, 208 Register receiver,
209 Register name, 209 Register name,
210 Register scratch, 210 Register scratch,
211 Register extra, 211 Register extra,
212 Register extra2) { 212 Register extra2) {
213 Isolate* isolate = Isolate::Current(); 213 Isolate* isolate = masm->isolate();
214 Label miss; 214 Label miss;
215 215
216 // Make sure that code is valid. The shifting code relies on the 216 // Make sure that code is valid. The shifting code relies on the
217 // entry size being 8. 217 // entry size being 8.
218 ASSERT(sizeof(Entry) == 8); 218 ASSERT(sizeof(Entry) == 8);
219 219
220 // Make sure the flags does not name a specific type. 220 // Make sure the flags does not name a specific type.
221 ASSERT(Code::ExtractTypeFromFlags(flags) == 0); 221 ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
222 222
223 // Make sure that there are no register conflicts. 223 // Make sure that there are no register conflicts.
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
281 // Load the initial map. The global functions all have initial maps. 281 // Load the initial map. The global functions all have initial maps.
282 __ ldr(prototype, 282 __ ldr(prototype,
283 FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); 283 FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
284 // Load the prototype from the initial map. 284 // Load the prototype from the initial map.
285 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); 285 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
286 } 286 }
287 287
288 288
289 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( 289 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
290 MacroAssembler* masm, int index, Register prototype, Label* miss) { 290 MacroAssembler* masm, int index, Register prototype, Label* miss) {
291 Isolate* isolate = masm->isolate();
291 // Check we're still in the same context. 292 // Check we're still in the same context.
292 __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 293 __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
293 __ Move(ip, Isolate::Current()->global()); 294 __ Move(ip, isolate->global());
294 __ cmp(prototype, ip); 295 __ cmp(prototype, ip);
295 __ b(ne, miss); 296 __ b(ne, miss);
296 // Get the global function with the given index. 297 // Get the global function with the given index.
297 JSFunction* function = JSFunction::cast( 298 JSFunction* function =
298 Isolate::Current()->global_context()->get(index)); 299 JSFunction::cast(isolate->global_context()->get(index));
299 // Load its initial map. The global functions all have initial maps. 300 // Load its initial map. The global functions all have initial maps.
300 __ Move(prototype, Handle<Map>(function->initial_map())); 301 __ Move(prototype, Handle<Map>(function->initial_map()));
301 // Load the prototype from the initial map. 302 // Load the prototype from the initial map.
302 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); 303 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
303 } 304 }
304 305
305 306
306 // Load a fast property out of a holder object (src). In-object properties 307 // Load a fast property out of a holder object (src). In-object properties
307 // are loaded directly otherwise the property is loaded from the properties 308 // are loaded directly otherwise the property is loaded from the properties
308 // fixed array. 309 // fixed array.
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
503 // Return the value (register r0). 504 // Return the value (register r0).
504 __ bind(&exit); 505 __ bind(&exit);
505 __ Ret(); 506 __ Ret();
506 } 507 }
507 508
508 509
509 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { 510 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
510 ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); 511 ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
511 Code* code = NULL; 512 Code* code = NULL;
512 if (kind == Code::LOAD_IC) { 513 if (kind == Code::LOAD_IC) {
513 code = Isolate::Current()->builtins()->builtin(Builtins::LoadIC_Miss); 514 code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
514 } else { 515 } else {
515 code = Isolate::Current()->builtins()->builtin(Builtins::KeyedLoadIC_Miss); 516 code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
516 } 517 }
517 518
518 Handle<Code> ic(code); 519 Handle<Code> ic(code);
519 __ Jump(ic, RelocInfo::CODE_TARGET); 520 __ Jump(ic, RelocInfo::CODE_TARGET);
520 } 521 }
521 522
522 523
523 static void GenerateCallFunction(MacroAssembler* masm, 524 static void GenerateCallFunction(MacroAssembler* masm,
524 Object* object, 525 Object* object,
525 const ParameterCount& arguments, 526 const ParameterCount& arguments,
(...skipping 416 matching lines...) Expand 10 before | Expand all | Expand 10 after
942 // Convert and store int passed in register ival to IEEE 754 single precision 943 // Convert and store int passed in register ival to IEEE 754 single precision
943 // floating point value at memory location (dst + 4 * wordoffset) 944 // floating point value at memory location (dst + 4 * wordoffset)
944 // If VFP3 is available use it for conversion. 945 // If VFP3 is available use it for conversion.
945 static void StoreIntAsFloat(MacroAssembler* masm, 946 static void StoreIntAsFloat(MacroAssembler* masm,
946 Register dst, 947 Register dst,
947 Register wordoffset, 948 Register wordoffset,
948 Register ival, 949 Register ival,
949 Register fval, 950 Register fval,
950 Register scratch1, 951 Register scratch1,
951 Register scratch2) { 952 Register scratch2) {
952 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 953 if (masm->isolate()->cpu_features()->IsSupported(VFP3)) {
953 CpuFeatures::Scope scope(VFP3); 954 CpuFeatures::Scope scope(VFP3);
954 __ vmov(s0, ival); 955 __ vmov(s0, ival);
955 __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); 956 __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
956 __ vcvt_f32_s32(s0, s0); 957 __ vcvt_f32_s32(s0, s0);
957 __ vstr(s0, scratch1, 0); 958 __ vstr(s0, scratch1, 0);
958 } else { 959 } else {
959 Label not_special, done; 960 Label not_special, done;
960 // Move sign bit from source to destination. This works because the sign 961 // Move sign bit from source to destination. This works because the sign
961 // bit in the exponent word of the double has the same position and polarity 962 // bit in the exponent word of the double has the same position and polarity
962 // as the 2's complement sign bit in a Smi. 963 // as the 2's complement sign bit in a Smi.
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after
1154 // Go to the next object in the prototype chain. 1155 // Go to the next object in the prototype chain.
1155 current = prototype; 1156 current = prototype;
1156 } 1157 }
1157 1158
1158 // Check the holder map. 1159 // Check the holder map.
1159 __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); 1160 __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1160 __ cmp(scratch1, Operand(Handle<Map>(current->map()))); 1161 __ cmp(scratch1, Operand(Handle<Map>(current->map())));
1161 __ b(ne, miss); 1162 __ b(ne, miss);
1162 1163
1163 // Log the check depth. 1164 // Log the check depth.
1164 LOG(Isolate::Current(), IntEvent("check-maps-depth", depth + 1)); 1165 LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
1165 1166
1166 // Perform security check for access to the global object. 1167 // Perform security check for access to the global object.
1167 ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); 1168 ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
1168 if (holder->IsJSGlobalProxy()) { 1169 if (holder->IsJSGlobalProxy()) {
1169 __ CheckAccessGlobalProxy(reg, scratch1, miss); 1170 __ CheckAccessGlobalProxy(reg, scratch1, miss);
1170 }; 1171 };
1171 1172
1172 // If we've skipped any global objects, it's not enough to verify 1173 // If we've skipped any global objects, it's not enough to verify
1173 // that their maps haven't changed. We also need to check that the 1174 // that their maps haven't changed. We also need to check that the
1174 // property cell for the property is still empty. 1175 // property cell for the property is still empty.
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after
1490 __ cmp(r4, r3); 1491 __ cmp(r4, r3);
1491 __ b(ne, miss); 1492 __ b(ne, miss);
1492 } else { 1493 } else {
1493 __ cmp(r1, Operand(Handle<JSFunction>(function))); 1494 __ cmp(r1, Operand(Handle<JSFunction>(function)));
1494 __ b(ne, miss); 1495 __ b(ne, miss);
1495 } 1496 }
1496 } 1497 }
1497 1498
1498 1499
1499 MaybeObject* CallStubCompiler::GenerateMissBranch() { 1500 MaybeObject* CallStubCompiler::GenerateMissBranch() {
1500 MaybeObject* maybe_obj = Isolate::Current()->stub_cache()->ComputeCallMiss( 1501 MaybeObject* maybe_obj = masm()->isolate()->stub_cache()->ComputeCallMiss(
1501 arguments().immediate(), kind_); 1502 arguments().immediate(), kind_);
1502 Object* obj; 1503 Object* obj;
1503 if (!maybe_obj->ToObject(&obj)) return maybe_obj; 1504 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1504 __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET); 1505 __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
1505 return obj; 1506 return obj;
1506 } 1507 }
1507 1508
1508 1509
1509 MaybeObject* CallStubCompiler::CompileCallField(JSObject* object, 1510 MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
1510 JSObject* holder, 1511 JSObject* holder,
(...skipping 526 matching lines...) Expand 10 before | Expand all | Expand 10 after
2037 JSFunction* function, 2038 JSFunction* function,
2038 String* name) { 2039 String* name) {
2039 // ----------- S t a t e ------------- 2040 // ----------- S t a t e -------------
2040 // -- r2 : function name 2041 // -- r2 : function name
2041 // -- lr : return address 2042 // -- lr : return address
2042 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) 2043 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2043 // -- ... 2044 // -- ...
2044 // -- sp[argc * 4] : receiver 2045 // -- sp[argc * 4] : receiver
2045 // ----------------------------------- 2046 // -----------------------------------
2046 2047
2047 if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) 2048 if (!masm()->isolate()->cpu_features()->IsSupported(VFP3))
2048 return HEAP->undefined_value(); 2049 return HEAP->undefined_value();
2049 2050
2050 CpuFeatures::Scope scope_vfp3(VFP3); 2051 CpuFeatures::Scope scope_vfp3(VFP3);
2051 2052
2052 const int argc = arguments().immediate(); 2053 const int argc = arguments().immediate();
2053 2054
2054 // If the object is not a JSObject or we got an unexpected number of 2055 // If the object is not a JSObject or we got an unexpected number of
2055 // arguments, bail out to the regular call. 2056 // arguments, bail out to the regular call.
2056 if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value(); 2057 if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
2057 2058
(...skipping 538 matching lines...) Expand 10 before | Expand all | Expand 10 after
2596 // ----------------------------------- 2597 // -----------------------------------
2597 Label miss; 2598 Label miss;
2598 2599
2599 GenerateStoreField(masm(), 2600 GenerateStoreField(masm(),
2600 object, 2601 object,
2601 index, 2602 index,
2602 transition, 2603 transition,
2603 r1, r2, r3, 2604 r1, r2, r3,
2604 &miss); 2605 &miss);
2605 __ bind(&miss); 2606 __ bind(&miss);
2606 Handle<Code> ic(Isolate::Current()->builtins()->builtin( 2607 Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2607 Builtins::StoreIC_Miss));
2608 __ Jump(ic, RelocInfo::CODE_TARGET); 2608 __ Jump(ic, RelocInfo::CODE_TARGET);
2609 2609
2610 // Return the generated code. 2610 // Return the generated code.
2611 return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name); 2611 return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
2612 } 2612 }
2613 2613
2614 2614
2615 MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object, 2615 MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
2616 AccessorInfo* callback, 2616 AccessorInfo* callback,
2617 String* name) { 2617 String* name) {
(...skipping 28 matching lines...) Expand all
2646 __ Push(ip, r2, r0); 2646 __ Push(ip, r2, r0);
2647 2647
2648 // Do tail-call to the runtime system. 2648 // Do tail-call to the runtime system.
2649 ExternalReference store_callback_property = 2649 ExternalReference store_callback_property =
2650 ExternalReference(IC_Utility(IC::kStoreCallbackProperty), 2650 ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
2651 masm()->isolate()); 2651 masm()->isolate());
2652 __ TailCallExternalReference(store_callback_property, 4, 1); 2652 __ TailCallExternalReference(store_callback_property, 4, 1);
2653 2653
2654 // Handle store cache miss. 2654 // Handle store cache miss.
2655 __ bind(&miss); 2655 __ bind(&miss);
2656 Handle<Code> ic(Isolate::Current()->builtins()->builtin( 2656 Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2657 Builtins::StoreIC_Miss));
2658 __ Jump(ic, RelocInfo::CODE_TARGET); 2657 __ Jump(ic, RelocInfo::CODE_TARGET);
2659 2658
2660 // Return the generated code. 2659 // Return the generated code.
2661 return GetCode(CALLBACKS, name); 2660 return GetCode(CALLBACKS, name);
2662 } 2661 }
2663 2662
2664 2663
2665 MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, 2664 MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
2666 String* name) { 2665 String* name) {
2667 // ----------- S t a t e ------------- 2666 // ----------- S t a t e -------------
(...skipping 28 matching lines...) Expand all
2696 __ push(r0); // strict mode 2695 __ push(r0); // strict mode
2697 2696
2698 // Do tail-call to the runtime system. 2697 // Do tail-call to the runtime system.
2699 ExternalReference store_ic_property = 2698 ExternalReference store_ic_property =
2700 ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), 2699 ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
2701 masm()->isolate()); 2700 masm()->isolate());
2702 __ TailCallExternalReference(store_ic_property, 4, 1); 2701 __ TailCallExternalReference(store_ic_property, 4, 1);
2703 2702
2704 // Handle store cache miss. 2703 // Handle store cache miss.
2705 __ bind(&miss); 2704 __ bind(&miss);
2706 Handle<Code> ic(Isolate::Current()->builtins()->builtin( 2705 Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2707 Builtins::StoreIC_Miss));
2708 __ Jump(ic, RelocInfo::CODE_TARGET); 2706 __ Jump(ic, RelocInfo::CODE_TARGET);
2709 2707
2710 // Return the generated code. 2708 // Return the generated code.
2711 return GetCode(INTERCEPTOR, name); 2709 return GetCode(INTERCEPTOR, name);
2712 } 2710 }
2713 2711
2714 2712
2715 MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, 2713 MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
2716 JSGlobalPropertyCell* cell, 2714 JSGlobalPropertyCell* cell,
2717 String* name) { 2715 String* name) {
(...skipping 22 matching lines...) Expand all
2740 2738
2741 // Store the value in the cell. 2739 // Store the value in the cell.
2742 __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); 2740 __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
2743 2741
2744 __ IncrementCounter(COUNTERS->named_store_global_inline(), 1, r4, r3); 2742 __ IncrementCounter(COUNTERS->named_store_global_inline(), 1, r4, r3);
2745 __ Ret(); 2743 __ Ret();
2746 2744
2747 // Handle store cache miss. 2745 // Handle store cache miss.
2748 __ bind(&miss); 2746 __ bind(&miss);
2749 __ IncrementCounter(COUNTERS->named_store_global_inline_miss(), 1, r4, r3); 2747 __ IncrementCounter(COUNTERS->named_store_global_inline_miss(), 1, r4, r3);
2750 Handle<Code> ic(Isolate::Current()->builtins()->builtin( 2748 Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2751 Builtins::StoreIC_Miss));
2752 __ Jump(ic, RelocInfo::CODE_TARGET); 2749 __ Jump(ic, RelocInfo::CODE_TARGET);
2753 2750
2754 // Return the generated code. 2751 // Return the generated code.
2755 return GetCode(NORMAL, name); 2752 return GetCode(NORMAL, name);
2756 } 2753 }
2757 2754
2758 2755
2759 MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name, 2756 MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
2760 JSObject* object, 2757 JSObject* object,
2761 JSObject* last) { 2758 JSObject* last) {
(...skipping 427 matching lines...) Expand 10 before | Expand all | Expand 10 after
3189 // the miss label is generated. 3186 // the miss label is generated.
3190 GenerateStoreField(masm(), 3187 GenerateStoreField(masm(),
3191 object, 3188 object,
3192 index, 3189 index,
3193 transition, 3190 transition,
3194 r2, r1, r3, 3191 r2, r1, r3,
3195 &miss); 3192 &miss);
3196 __ bind(&miss); 3193 __ bind(&miss);
3197 3194
3198 __ DecrementCounter(COUNTERS->keyed_store_field(), 1, r3, r4); 3195 __ DecrementCounter(COUNTERS->keyed_store_field(), 1, r3, r4);
3199 Handle<Code> ic(Isolate::Current()->builtins()->builtin( 3196 Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
3200 Builtins::KeyedStoreIC_Miss));
3201
3202 __ Jump(ic, RelocInfo::CODE_TARGET); 3197 __ Jump(ic, RelocInfo::CODE_TARGET);
3203 3198
3204 // Return the generated code. 3199 // Return the generated code.
3205 return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name); 3200 return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
3206 } 3201 }
3207 3202
3208 3203
3209 MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized( 3204 MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
3210 JSObject* receiver) { 3205 JSObject* receiver) {
3211 // ----------- S t a t e ------------- 3206 // ----------- S t a t e -------------
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
3261 MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); 3256 MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
3262 __ RecordWrite(scratch, 3257 __ RecordWrite(scratch,
3263 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize), 3258 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
3264 receiver_reg , elements_reg); 3259 receiver_reg , elements_reg);
3265 3260
3266 // value_reg (r0) is preserved. 3261 // value_reg (r0) is preserved.
3267 // Done. 3262 // Done.
3268 __ Ret(); 3263 __ Ret();
3269 3264
3270 __ bind(&miss); 3265 __ bind(&miss);
3271 Handle<Code> ic( 3266 Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
3272 Isolate::Current()->builtins()->builtin(Builtins::KeyedStoreIC_Miss));
3273 __ Jump(ic, RelocInfo::CODE_TARGET); 3267 __ Jump(ic, RelocInfo::CODE_TARGET);
3274 3268
3275 // Return the generated code. 3269 // Return the generated code.
3276 return GetCode(NORMAL, NULL); 3270 return GetCode(NORMAL, NULL);
3277 } 3271 }
3278 3272
3279 3273
3280 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { 3274 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
3281 // ----------- S t a t e ------------- 3275 // ----------- S t a t e -------------
3282 // -- r0 : argc 3276 // -- r0 : argc
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
3405 // Remove caller arguments and receiver from the stack and return. 3399 // Remove caller arguments and receiver from the stack and return.
3406 __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); 3400 __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
3407 __ add(sp, sp, Operand(kPointerSize)); 3401 __ add(sp, sp, Operand(kPointerSize));
3408 __ IncrementCounter(COUNTERS->constructed_objects(), 1, r1, r2); 3402 __ IncrementCounter(COUNTERS->constructed_objects(), 1, r1, r2);
3409 __ IncrementCounter(COUNTERS->constructed_objects_stub(), 1, r1, r2); 3403 __ IncrementCounter(COUNTERS->constructed_objects_stub(), 1, r1, r2);
3410 __ Jump(lr); 3404 __ Jump(lr);
3411 3405
3412 // Jump to the generic stub in case the specialized code cannot handle the 3406 // Jump to the generic stub in case the specialized code cannot handle the
3413 // construction. 3407 // construction.
3414 __ bind(&generic_stub_call); 3408 __ bind(&generic_stub_call);
3415 Code* code = Isolate::Current()->builtins()->builtin( 3409 Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric();
3416 Builtins::JSConstructStubGeneric); 3410 __ Jump(code, RelocInfo::CODE_TARGET);
3417 Handle<Code> generic_construct_stub(code);
3418 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
3419 3411
3420 // Return the generated code. 3412 // Return the generated code.
3421 return GetCode(); 3413 return GetCode();
3422 } 3414 }
3423 3415
3424 3416
3425 static bool IsElementTypeSigned(ExternalArrayType array_type) { 3417 static bool IsElementTypeSigned(ExternalArrayType array_type) {
3426 switch (array_type) { 3418 switch (array_type) {
3427 case kExternalByteArray: 3419 case kExternalByteArray:
3428 case kExternalShortArray: 3420 case kExternalShortArray:
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
3495 __ ldrsh(value, MemOperand(r3, key, LSL, 0)); 3487 __ ldrsh(value, MemOperand(r3, key, LSL, 0));
3496 break; 3488 break;
3497 case kExternalUnsignedShortArray: 3489 case kExternalUnsignedShortArray:
3498 __ ldrh(value, MemOperand(r3, key, LSL, 0)); 3490 __ ldrh(value, MemOperand(r3, key, LSL, 0));
3499 break; 3491 break;
3500 case kExternalIntArray: 3492 case kExternalIntArray:
3501 case kExternalUnsignedIntArray: 3493 case kExternalUnsignedIntArray:
3502 __ ldr(value, MemOperand(r3, key, LSL, 1)); 3494 __ ldr(value, MemOperand(r3, key, LSL, 1));
3503 break; 3495 break;
3504 case kExternalFloatArray: 3496 case kExternalFloatArray:
3505 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 3497 if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
3506 CpuFeatures::Scope scope(VFP3); 3498 CpuFeatures::Scope scope(VFP3);
3507 __ add(r2, r3, Operand(key, LSL, 1)); 3499 __ add(r2, r3, Operand(key, LSL, 1));
3508 __ vldr(s0, r2, 0); 3500 __ vldr(s0, r2, 0);
3509 } else { 3501 } else {
3510 __ ldr(value, MemOperand(r3, key, LSL, 1)); 3502 __ ldr(value, MemOperand(r3, key, LSL, 1));
3511 } 3503 }
3512 break; 3504 break;
3513 default: 3505 default:
3514 UNREACHABLE(); 3506 UNREACHABLE();
3515 break; 3507 break;
(...skipping 18 matching lines...) Expand all
3534 3526
3535 __ bind(&box_int); 3527 __ bind(&box_int);
3536 // Allocate a HeapNumber for the result and perform int-to-double 3528 // Allocate a HeapNumber for the result and perform int-to-double
3537 // conversion. Don't touch r0 or r1 as they are needed if allocation 3529 // conversion. Don't touch r0 or r1 as they are needed if allocation
3538 // fails. 3530 // fails.
3539 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); 3531 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3540 __ AllocateHeapNumber(r5, r3, r4, r6, &slow); 3532 __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
3541 // Now we can use r0 for the result as key is not needed any more. 3533 // Now we can use r0 for the result as key is not needed any more.
3542 __ mov(r0, r5); 3534 __ mov(r0, r5);
3543 3535
3544 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 3536 if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
3545 CpuFeatures::Scope scope(VFP3); 3537 CpuFeatures::Scope scope(VFP3);
3546 __ vmov(s0, value); 3538 __ vmov(s0, value);
3547 __ vcvt_f64_s32(d0, s0); 3539 __ vcvt_f64_s32(d0, s0);
3548 __ sub(r3, r0, Operand(kHeapObjectTag)); 3540 __ sub(r3, r0, Operand(kHeapObjectTag));
3549 __ vstr(d0, r3, HeapNumber::kValueOffset); 3541 __ vstr(d0, r3, HeapNumber::kValueOffset);
3550 __ Ret(); 3542 __ Ret();
3551 } else { 3543 } else {
3552 WriteInt32ToHeapNumberStub stub(value, r0, r3); 3544 WriteInt32ToHeapNumberStub stub(value, r0, r3);
3553 __ TailCallStub(&stub); 3545 __ TailCallStub(&stub);
3554 } 3546 }
3555 } else if (array_type == kExternalUnsignedIntArray) { 3547 } else if (array_type == kExternalUnsignedIntArray) {
3556 // The test is different for unsigned int values. Since we need 3548 // The test is different for unsigned int values. Since we need
3557 // the value to be in the range of a positive smi, we can't 3549 // the value to be in the range of a positive smi, we can't
3558 // handle either of the top two bits being set in the value. 3550 // handle either of the top two bits being set in the value.
3559 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 3551 if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
3560 CpuFeatures::Scope scope(VFP3); 3552 CpuFeatures::Scope scope(VFP3);
3561 Label box_int, done; 3553 Label box_int, done;
3562 __ tst(value, Operand(0xC0000000)); 3554 __ tst(value, Operand(0xC0000000));
3563 __ b(ne, &box_int); 3555 __ b(ne, &box_int);
3564 // Tag integer as smi and return it. 3556 // Tag integer as smi and return it.
3565 __ mov(r0, Operand(value, LSL, kSmiTagSize)); 3557 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3566 __ Ret(); 3558 __ Ret();
3567 3559
3568 __ bind(&box_int); 3560 __ bind(&box_int);
3569 __ vmov(s0, value); 3561 __ vmov(s0, value);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
3613 3605
3614 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); 3606 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
3615 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); 3607 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
3616 3608
3617 __ mov(r0, r4); 3609 __ mov(r0, r4);
3618 __ Ret(); 3610 __ Ret();
3619 } 3611 }
3620 } else if (array_type == kExternalFloatArray) { 3612 } else if (array_type == kExternalFloatArray) {
3621 // For the floating-point array type, we need to always allocate a 3613 // For the floating-point array type, we need to always allocate a
3622 // HeapNumber. 3614 // HeapNumber.
3623 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 3615 if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
3624 CpuFeatures::Scope scope(VFP3); 3616 CpuFeatures::Scope scope(VFP3);
3625 // Allocate a HeapNumber for the result. Don't use r0 and r1 as 3617 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3626 // AllocateHeapNumber clobbers all registers - also when jumping due to 3618 // AllocateHeapNumber clobbers all registers - also when jumping due to
3627 // exhausted young space. 3619 // exhausted young space.
3628 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); 3620 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3629 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); 3621 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3630 __ vcvt_f64_f32(d0, s0); 3622 __ vcvt_f64_f32(d0, s0);
3631 __ sub(r1, r2, Operand(kHeapObjectTag)); 3623 __ sub(r1, r2, Operand(kHeapObjectTag));
3632 __ vstr(d0, r1, HeapNumber::kValueOffset); 3624 __ vstr(d0, r1, HeapNumber::kValueOffset);
3633 3625
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
3804 __ b(ne, &slow); 3796 __ b(ne, &slow);
3805 3797
3806 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); 3798 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3807 3799
3808 // r3: base pointer of external storage. 3800 // r3: base pointer of external storage.
3809 // r4: key (integer). 3801 // r4: key (integer).
3810 3802
3811 // The WebGL specification leaves the behavior of storing NaN and 3803 // The WebGL specification leaves the behavior of storing NaN and
3812 // +/-Infinity into integer arrays basically undefined. For more 3804 // +/-Infinity into integer arrays basically undefined. For more
3813 // reproducible behavior, convert these to zero. 3805 // reproducible behavior, convert these to zero.
3814 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 3806 if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
3815 CpuFeatures::Scope scope(VFP3); 3807 CpuFeatures::Scope scope(VFP3);
3816 3808
3817
3818 if (array_type == kExternalFloatArray) { 3809 if (array_type == kExternalFloatArray) {
3819 // vldr requires offset to be a multiple of 4 so we can not 3810 // vldr requires offset to be a multiple of 4 so we can not
3820 // include -kHeapObjectTag into it. 3811 // include -kHeapObjectTag into it.
3821 __ sub(r5, r0, Operand(kHeapObjectTag)); 3812 __ sub(r5, r0, Operand(kHeapObjectTag));
3822 __ vldr(d0, r5, HeapNumber::kValueOffset); 3813 __ vldr(d0, r5, HeapNumber::kValueOffset);
3823 __ add(r5, r3, Operand(r4, LSL, 2)); 3814 __ add(r5, r3, Operand(r4, LSL, 2));
3824 __ vcvt_f32_f64(s0, d0); 3815 __ vcvt_f32_f64(s0, d0);
3825 __ vstr(s0, r5, 0); 3816 __ vstr(s0, r5, 0);
3826 } else { 3817 } else {
3827 // Need to perform float-to-int conversion. 3818 // Need to perform float-to-int conversion.
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
4017 4008
4018 return GetCode(flags); 4009 return GetCode(flags);
4019 } 4010 }
4020 4011
4021 4012
4022 #undef __ 4013 #undef __
4023 4014
4024 } } // namespace v8::internal 4015 } } // namespace v8::internal
4025 4016
4026 #endif // V8_TARGET_ARCH_ARM 4017 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/macro-assembler-arm.cc ('k') | src/arm/virtual-frame-arm.cc » ('j') | src/builtins.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698