OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 20 matching lines...) Expand all Loading... |
31 | 31 |
32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
34 #include "regexp-macro-assembler.h" | 34 #include "regexp-macro-assembler.h" |
35 #include "stub-cache.h" | 35 #include "stub-cache.h" |
36 | 36 |
37 namespace v8 { | 37 namespace v8 { |
38 namespace internal { | 38 namespace internal { |
39 | 39 |
40 | 40 |
| 41 void FastNewClosureStub::InitializeInterfaceDescriptor( |
| 42 Isolate* isolate, |
| 43 CodeStubInterfaceDescriptor* descriptor) { |
| 44 static Register registers[] = { r2 }; |
| 45 descriptor->register_param_count_ = 1; |
| 46 descriptor->register_params_ = registers; |
| 47 descriptor->deoptimization_handler_ = |
| 48 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry; |
| 49 } |
| 50 |
| 51 |
41 void ToNumberStub::InitializeInterfaceDescriptor( | 52 void ToNumberStub::InitializeInterfaceDescriptor( |
42 Isolate* isolate, | 53 Isolate* isolate, |
43 CodeStubInterfaceDescriptor* descriptor) { | 54 CodeStubInterfaceDescriptor* descriptor) { |
44 static Register registers[] = { r0 }; | 55 static Register registers[] = { r0 }; |
45 descriptor->register_param_count_ = 1; | 56 descriptor->register_param_count_ = 1; |
46 descriptor->register_params_ = registers; | 57 descriptor->register_params_ = registers; |
47 descriptor->deoptimization_handler_ = NULL; | 58 descriptor->deoptimization_handler_ = NULL; |
48 } | 59 } |
49 | 60 |
50 | 61 |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
302 __ push(descriptor->register_params_[i]); | 313 __ push(descriptor->register_params_[i]); |
303 } | 314 } |
304 ExternalReference miss = descriptor->miss_handler(); | 315 ExternalReference miss = descriptor->miss_handler(); |
305 __ CallExternalReference(miss, descriptor->register_param_count_); | 316 __ CallExternalReference(miss, descriptor->register_param_count_); |
306 } | 317 } |
307 | 318 |
308 __ Ret(); | 319 __ Ret(); |
309 } | 320 } |
310 | 321 |
311 | 322 |
312 void FastNewClosureStub::Generate(MacroAssembler* masm) { | |
313 // Create a new closure from the given function info in new | |
314 // space. Set the context to the current context in cp. | |
315 Counters* counters = masm->isolate()->counters(); | |
316 | |
317 Label gc; | |
318 | |
319 // Pop the function info from the stack. | |
320 __ pop(r3); | |
321 | |
322 // Attempt to allocate new JSFunction in new space. | |
323 __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT); | |
324 | |
325 __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7); | |
326 | |
327 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); | |
328 | |
329 // Compute the function map in the current native context and set that | |
330 // as the map of the allocated object. | |
331 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | |
332 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); | |
333 __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index))); | |
334 __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
335 | |
336 // Initialize the rest of the function. We don't have to update the | |
337 // write barrier because the allocated object is in new space. | |
338 __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); | |
339 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); | |
340 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); | |
341 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); | |
342 __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); | |
343 __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); | |
344 __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); | |
345 __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); | |
346 | |
347 // Initialize the code pointer in the function to be the one | |
348 // found in the shared function info object. | |
349 // But first check if there is an optimized version for our context. | |
350 Label check_optimized; | |
351 Label install_unoptimized; | |
352 if (FLAG_cache_optimized_code) { | |
353 __ ldr(r1, | |
354 FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset)); | |
355 __ tst(r1, r1); | |
356 __ b(ne, &check_optimized); | |
357 } | |
358 __ bind(&install_unoptimized); | |
359 __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); | |
360 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); | |
361 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); | |
362 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
363 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); | |
364 | |
365 // Return result. The argument function info has been popped already. | |
366 __ Ret(); | |
367 | |
368 __ bind(&check_optimized); | |
369 | |
370 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7); | |
371 | |
372 // r2 holds native context, r1 points to fixed array of 3-element entries | |
373 // (native context, optimized code, literals). | |
374 // The optimized code map must never be empty, so check the first elements. | |
375 Label install_optimized; | |
376 // Speculatively move code object into r4. | |
377 __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot)); | |
378 __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot)); | |
379 __ cmp(r2, r5); | |
380 __ b(eq, &install_optimized); | |
381 | |
382 // Iterate through the rest of map backwards. r4 holds an index as a Smi. | |
383 Label loop; | |
384 __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); | |
385 __ bind(&loop); | |
386 // Do not double check first entry. | |
387 __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex))); | |
388 __ b(eq, &install_unoptimized); | |
389 __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); | |
390 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
391 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); | |
392 __ ldr(r5, MemOperand(r5)); | |
393 __ cmp(r2, r5); | |
394 __ b(ne, &loop); | |
395 // Hit: fetch the optimized code. | |
396 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
397 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); | |
398 __ add(r5, r5, Operand(kPointerSize)); | |
399 __ ldr(r4, MemOperand(r5)); | |
400 | |
401 __ bind(&install_optimized); | |
402 __ IncrementCounter(counters->fast_new_closure_install_optimized(), | |
403 1, r6, r7); | |
404 | |
405 // TODO(fschneider): Idea: store proper code pointers in the map and either | |
406 // unmangle them on marking or do nothing as the whole map is discarded on | |
407 // major GC anyway. | |
408 __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
409 __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); | |
410 | |
411 // Now link a function into a list of optimized functions. | |
412 __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
413 | |
414 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); | |
415 // No need for write barrier as JSFunction (eax) is in the new space. | |
416 | |
417 __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
418 // Store JSFunction (eax) into edx before issuing write barrier as | |
419 // it clobbers all the registers passed. | |
420 __ mov(r4, r0); | |
421 __ RecordWriteContextSlot( | |
422 r2, | |
423 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), | |
424 r4, | |
425 r1, | |
426 kLRHasNotBeenSaved, | |
427 kDontSaveFPRegs); | |
428 | |
429 // Return result. The argument function info has been popped already. | |
430 __ Ret(); | |
431 | |
432 // Create a new closure through the slower runtime call. | |
433 __ bind(&gc); | |
434 __ LoadRoot(r4, Heap::kFalseValueRootIndex); | |
435 __ Push(cp, r3, r4); | |
436 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); | |
437 } | |
438 | |
439 | |
440 void FastNewContextStub::Generate(MacroAssembler* masm) { | 323 void FastNewContextStub::Generate(MacroAssembler* masm) { |
441 // Try to allocate the context in new space. | 324 // Try to allocate the context in new space. |
442 Label gc; | 325 Label gc; |
443 int length = slots_ + Context::MIN_CONTEXT_SLOTS; | 326 int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
444 | 327 |
445 // Attempt to allocate the context in new space. | 328 // Attempt to allocate the context in new space. |
446 __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); | 329 __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); |
447 | 330 |
448 // Load the function from the stack. | 331 // Load the function from the stack. |
449 __ ldr(r3, MemOperand(sp, 0)); | 332 __ ldr(r3, MemOperand(sp, 0)); |
(...skipping 6813 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7263 __ bind(&fast_elements_case); | 7146 __ bind(&fast_elements_case); |
7264 GenerateCase(masm, FAST_ELEMENTS); | 7147 GenerateCase(masm, FAST_ELEMENTS); |
7265 } | 7148 } |
7266 | 7149 |
7267 | 7150 |
7268 #undef __ | 7151 #undef __ |
7269 | 7152 |
7270 } } // namespace v8::internal | 7153 } } // namespace v8::internal |
7271 | 7154 |
7272 #endif // V8_TARGET_ARCH_ARM | 7155 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |