OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 21 matching lines...) Expand all Loading... |
32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
34 #include "codegen.h" | 34 #include "codegen.h" |
35 #include "regexp-macro-assembler.h" | 35 #include "regexp-macro-assembler.h" |
36 #include "stub-cache.h" | 36 #include "stub-cache.h" |
37 | 37 |
38 namespace v8 { | 38 namespace v8 { |
39 namespace internal { | 39 namespace internal { |
40 | 40 |
41 | 41 |
| 42 void FastNewClosureStub::InitializeInterfaceDescriptor( |
| 43 Isolate* isolate, |
| 44 CodeStubInterfaceDescriptor* descriptor) { |
| 45 static Register registers[] = { a2 }; |
| 46 descriptor->register_param_count_ = 1; |
| 47 descriptor->register_params_ = registers; |
| 48 descriptor->deoptimization_handler_ = |
| 49 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry; |
| 50 } |
| 51 |
| 52 |
42 void ToNumberStub::InitializeInterfaceDescriptor( | 53 void ToNumberStub::InitializeInterfaceDescriptor( |
43 Isolate* isolate, | 54 Isolate* isolate, |
44 CodeStubInterfaceDescriptor* descriptor) { | 55 CodeStubInterfaceDescriptor* descriptor) { |
45 static Register registers[] = { a0 }; | 56 static Register registers[] = { a0 }; |
46 descriptor->register_param_count_ = 1; | 57 descriptor->register_param_count_ = 1; |
47 descriptor->register_params_ = registers; | 58 descriptor->register_params_ = registers; |
48 descriptor->deoptimization_handler_ = NULL; | 59 descriptor->deoptimization_handler_ = NULL; |
49 } | 60 } |
50 | 61 |
51 | 62 |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
303 __ push(descriptor->register_params_[i]); | 314 __ push(descriptor->register_params_[i]); |
304 } | 315 } |
305 ExternalReference miss = descriptor->miss_handler(); | 316 ExternalReference miss = descriptor->miss_handler(); |
306 __ CallExternalReference(miss, descriptor->register_param_count_); | 317 __ CallExternalReference(miss, descriptor->register_param_count_); |
307 } | 318 } |
308 | 319 |
309 __ Ret(); | 320 __ Ret(); |
310 } | 321 } |
311 | 322 |
312 | 323 |
313 void FastNewClosureStub::Generate(MacroAssembler* masm) { | |
314 // Create a new closure from the given function info in new | |
315 // space. Set the context to the current context in cp. | |
316 Counters* counters = masm->isolate()->counters(); | |
317 | |
318 Label gc; | |
319 | |
320 // Pop the function info from the stack. | |
321 __ pop(a3); | |
322 | |
323 // Attempt to allocate new JSFunction in new space. | |
324 __ Allocate(JSFunction::kSize, v0, a1, a2, &gc, TAG_OBJECT); | |
325 | |
326 __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3); | |
327 | |
328 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); | |
329 | |
330 // Compute the function map in the current native context and set that | |
331 // as the map of the allocated object. | |
332 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | |
333 __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset)); | |
334 __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index))); | |
335 __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset)); | |
336 | |
337 // Initialize the rest of the function. We don't have to update the | |
338 // write barrier because the allocated object is in new space. | |
339 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex); | |
340 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex); | |
341 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset)); | |
342 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset)); | |
343 __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset)); | |
344 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset)); | |
345 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset)); | |
346 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset)); | |
347 | |
348 // Initialize the code pointer in the function to be the one | |
349 // found in the shared function info object. | |
350 // But first check if there is an optimized version for our context. | |
351 Label check_optimized; | |
352 Label install_unoptimized; | |
353 if (FLAG_cache_optimized_code) { | |
354 __ lw(a1, | |
355 FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset)); | |
356 __ And(at, a1, a1); | |
357 __ Branch(&check_optimized, ne, at, Operand(zero_reg)); | |
358 } | |
359 __ bind(&install_unoptimized); | |
360 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); | |
361 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset)); | |
362 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset)); | |
363 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
364 | |
365 // Return result. The argument function info has been popped already. | |
366 __ Ret(USE_DELAY_SLOT); | |
367 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); | |
368 | |
369 __ bind(&check_optimized); | |
370 | |
371 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3); | |
372 | |
373 // a2 holds native context, a1 points to fixed array of 3-element entries | |
374 // (native context, optimized code, literals). | |
375 // The optimized code map must never be empty, so check the first elements. | |
376 Label install_optimized; | |
377 // Speculatively move code object into t0. | |
378 __ lw(t0, FieldMemOperand(a1, SharedFunctionInfo::kFirstCodeSlot)); | |
379 __ lw(t1, FieldMemOperand(a1, SharedFunctionInfo::kFirstContextSlot)); | |
380 __ Branch(&install_optimized, eq, a2, Operand(t1)); | |
381 | |
382 // Iterate through the rest of map backwards. t0 holds an index as a Smi. | |
383 Label loop; | |
384 __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset)); | |
385 __ bind(&loop); | |
386 // Do not double check first entry. | |
387 __ Branch(&install_unoptimized, eq, t0, | |
388 Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex))); | |
389 __ Subu(t0, t0, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); | |
390 __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
391 __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize); | |
392 __ Addu(t1, t1, Operand(at)); | |
393 __ lw(t1, MemOperand(t1)); | |
394 __ Branch(&loop, ne, a2, Operand(t1)); | |
395 // Hit: fetch the optimized code. | |
396 __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
397 __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize); | |
398 __ Addu(t1, t1, Operand(at)); | |
399 __ Addu(t1, t1, Operand(kPointerSize)); | |
400 __ lw(t0, MemOperand(t1)); | |
401 | |
402 __ bind(&install_optimized); | |
403 __ IncrementCounter(counters->fast_new_closure_install_optimized(), | |
404 1, t2, t3); | |
405 | |
406 // TODO(fschneider): Idea: store proper code pointers in the map and either | |
407 // unmangle them on marking or do nothing as the whole map is discarded on | |
408 // major GC anyway. | |
409 __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
410 __ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); | |
411 | |
412 // Now link a function into a list of optimized functions. | |
413 __ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
414 | |
415 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset)); | |
416 // No need for write barrier as JSFunction (eax) is in the new space. | |
417 | |
418 __ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
419 // Store JSFunction (eax) into edx before issuing write barrier as | |
420 // it clobbers all the registers passed. | |
421 __ mov(t0, v0); | |
422 __ RecordWriteContextSlot( | |
423 a2, | |
424 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), | |
425 t0, | |
426 a1, | |
427 kRAHasNotBeenSaved, | |
428 kDontSaveFPRegs); | |
429 | |
430 // Return result. The argument function info has been popped already. | |
431 __ Ret(); | |
432 | |
433 // Create a new closure through the slower runtime call. | |
434 __ bind(&gc); | |
435 __ LoadRoot(t0, Heap::kFalseValueRootIndex); | |
436 __ Push(cp, a3, t0); | |
437 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); | |
438 } | |
439 | |
440 | |
441 void FastNewContextStub::Generate(MacroAssembler* masm) { | 324 void FastNewContextStub::Generate(MacroAssembler* masm) { |
442 // Try to allocate the context in new space. | 325 // Try to allocate the context in new space. |
443 Label gc; | 326 Label gc; |
444 int length = slots_ + Context::MIN_CONTEXT_SLOTS; | 327 int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
445 | 328 |
446 // Attempt to allocate the context in new space. | 329 // Attempt to allocate the context in new space. |
447 __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT); | 330 __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT); |
448 | 331 |
449 // Load the function from the stack. | 332 // Load the function from the stack. |
450 __ lw(a3, MemOperand(sp, 0)); | 333 __ lw(a3, MemOperand(sp, 0)); |
(...skipping 7128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7579 __ bind(&fast_elements_case); | 7462 __ bind(&fast_elements_case); |
7580 GenerateCase(masm, FAST_ELEMENTS); | 7463 GenerateCase(masm, FAST_ELEMENTS); |
7581 } | 7464 } |
7582 | 7465 |
7583 | 7466 |
7584 #undef __ | 7467 #undef __ |
7585 | 7468 |
7586 } } // namespace v8::internal | 7469 } } // namespace v8::internal |
7587 | 7470 |
7588 #endif // V8_TARGET_ARCH_MIPS | 7471 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |