OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 5227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5238 } | 5238 } |
5239 | 5239 |
5240 virtual void Generate(); | 5240 virtual void Generate(); |
5241 | 5241 |
5242 private: | 5242 private: |
5243 Handle<String> name_; | 5243 Handle<String> name_; |
5244 }; | 5244 }; |
5245 | 5245 |
5246 | 5246 |
5247 void DeferredReferenceGetNamedValue::Generate() { | 5247 void DeferredReferenceGetNamedValue::Generate() { |
5248 __ DecrementCounter(&Counters::named_load_inline, 1, r1, r2); | 5248 Register scratch1 = VirtualFrame::scratch0(); |
5249 __ IncrementCounter(&Counters::named_load_inline_miss, 1, r1, r2); | 5249 Register scratch2 = VirtualFrame::scratch1(); |
| 5250 __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2); |
| 5251 __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2); |
5250 | 5252 |
5251 // Setup the registers and call load IC. | 5253 // Setup the registers and call load IC. |
5252 // On entry to this deferred code, r0 is assumed to already contain the | 5254 // On entry to this deferred code, r0 is assumed to already contain the |
5253 // receiver from the top of the stack. | 5255 // receiver from the top of the stack. |
5254 __ mov(r2, Operand(name_)); | 5256 __ mov(r2, Operand(name_)); |
5255 | 5257 |
5256 // The rest of the instructions in the deferred code must be together. | 5258 // The rest of the instructions in the deferred code must be together. |
5257 { Assembler::BlockConstPoolScope block_const_pool(masm_); | 5259 { Assembler::BlockConstPoolScope block_const_pool(masm_); |
5258 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); | 5260 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); |
5259 __ Call(ic, RelocInfo::CODE_TARGET); | 5261 __ Call(ic, RelocInfo::CODE_TARGET); |
5260 // The call must be followed by a nop(1) instruction to indicate that the | 5262 // The call must be followed by a nop(1) instruction to indicate that the |
5261 // in-object has been inlined. | 5263 // in-object has been inlined. |
5262 __ nop(PROPERTY_LOAD_INLINED); | 5264 __ nop(PROPERTY_ACCESS_INLINED); |
5263 | 5265 |
5264 // Block the constant pool for one more instruction after leaving this | 5266 // Block the constant pool for one more instruction after leaving this |
5265 // constant pool block scope to include the branch instruction ending the | 5267 // constant pool block scope to include the branch instruction ending the |
5266 // deferred code. | 5268 // deferred code. |
5267 __ BlockConstPoolFor(1); | 5269 __ BlockConstPoolFor(1); |
5268 } | 5270 } |
5269 } | 5271 } |
5270 | 5272 |
5271 | 5273 |
5272 class DeferredReferenceGetKeyedValue: public DeferredCode { | 5274 class DeferredReferenceGetKeyedValue: public DeferredCode { |
(...skipping 12 matching lines...) Expand all Loading... |
5285 __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2); | 5287 __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2); |
5286 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2); | 5288 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2); |
5287 | 5289 |
5288 // The rest of the instructions in the deferred code must be together. | 5290 // The rest of the instructions in the deferred code must be together. |
5289 { Assembler::BlockConstPoolScope block_const_pool(masm_); | 5291 { Assembler::BlockConstPoolScope block_const_pool(masm_); |
5290 // Call keyed load IC. It has all arguments on the stack. | 5292 // Call keyed load IC. It has all arguments on the stack. |
5291 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); | 5293 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); |
5292 __ Call(ic, RelocInfo::CODE_TARGET); | 5294 __ Call(ic, RelocInfo::CODE_TARGET); |
5293 // The call must be followed by a nop instruction to indicate that the | 5295 // The call must be followed by a nop instruction to indicate that the |
5294 // keyed load has been inlined. | 5296 // keyed load has been inlined. |
5295 __ nop(PROPERTY_LOAD_INLINED); | 5297 __ nop(PROPERTY_ACCESS_INLINED); |
5296 | 5298 |
5297 // Block the constant pool for one more instruction after leaving this | 5299 // Block the constant pool for one more instruction after leaving this |
5298 // constant pool block scope to include the branch instruction ending the | 5300 // constant pool block scope to include the branch instruction ending the |
5299 // deferred code. | 5301 // deferred code. |
5300 __ BlockConstPoolFor(1); | 5302 __ BlockConstPoolFor(1); |
5301 } | 5303 } |
5302 } | 5304 } |
5303 | 5305 |
5304 | 5306 |
| 5307 class DeferredReferenceSetKeyedValue: public DeferredCode { |
| 5308 public: |
| 5309 DeferredReferenceSetKeyedValue() { |
| 5310 set_comment("[ DeferredReferenceSetKeyedValue"); |
| 5311 } |
| 5312 |
| 5313 virtual void Generate(); |
| 5314 }; |
| 5315 |
| 5316 |
| 5317 void DeferredReferenceSetKeyedValue::Generate() { |
| 5318 Register scratch1 = VirtualFrame::scratch0(); |
| 5319 Register scratch2 = VirtualFrame::scratch1(); |
| 5320 __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2); |
| 5321 __ IncrementCounter( |
| 5322 &Counters::keyed_store_inline_miss, 1, scratch1, scratch2); |
| 5323 |
| 5324 // The rest of the instructions in the deferred code must be together. |
| 5325 { Assembler::BlockConstPoolScope block_const_pool(masm_); |
| 5326 // Call keyed load IC. It has receiver amd key on the stack and the value to |
| 5327 // store in r0. |
| 5328 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); |
| 5329 __ Call(ic, RelocInfo::CODE_TARGET); |
| 5330 // The call must be followed by a nop instruction to indicate that the |
| 5331 // keyed store has been inlined. |
| 5332 __ nop(PROPERTY_ACCESS_INLINED); |
| 5333 |
| 5334 // Block the constant pool for one more instruction after leaving this |
| 5335 // constant pool block scope to include the branch instruction ending the |
| 5336 // deferred code. |
| 5337 __ BlockConstPoolFor(1); |
| 5338 } |
| 5339 } |
| 5340 |
| 5341 |
5305 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { | 5342 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { |
5306 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { | 5343 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { |
5307 Comment cmnt(masm(), "[ Load from named Property"); | 5344 Comment cmnt(masm(), "[ Load from named Property"); |
5308 // Setup the name register and call load IC. | 5345 // Setup the name register and call load IC. |
5309 frame_->SpillAllButCopyTOSToR0(); | 5346 frame_->SpillAllButCopyTOSToR0(); |
5310 __ mov(r2, Operand(name)); | 5347 __ mov(r2, Operand(name)); |
5311 frame_->CallLoadIC(is_contextual | 5348 frame_->CallLoadIC(is_contextual |
5312 ? RelocInfo::CODE_TARGET_CONTEXT | 5349 ? RelocInfo::CODE_TARGET_CONTEXT |
5313 : RelocInfo::CODE_TARGET); | 5350 : RelocInfo::CODE_TARGET); |
5314 } else { | 5351 } else { |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5383 Register key = r1; | 5420 Register key = r1; |
5384 VirtualFrame::SpilledScope spilled(frame_); | 5421 VirtualFrame::SpilledScope spilled(frame_); |
5385 | 5422 |
5386 DeferredReferenceGetKeyedValue* deferred = | 5423 DeferredReferenceGetKeyedValue* deferred = |
5387 new DeferredReferenceGetKeyedValue(); | 5424 new DeferredReferenceGetKeyedValue(); |
5388 | 5425 |
5389 // Check that the receiver is a heap object. | 5426 // Check that the receiver is a heap object. |
5390 __ tst(receiver, Operand(kSmiTagMask)); | 5427 __ tst(receiver, Operand(kSmiTagMask)); |
5391 deferred->Branch(eq); | 5428 deferred->Branch(eq); |
5392 | 5429 |
5393 // The following instructions are the inlined load keyed property. Parts | 5430 // The following instructions are the part of the inlined load keyed |
5394 // of this code are patched, so the exact number of instructions generated | 5431 // property code which can be patched. Therefore the exact number of |
5395 // need to be fixed. Therefore the constant pool is blocked while generating | 5432 // instructions generated need to be fixed, so the constant pool is blocked |
5396 // this code. | 5433 // while generating this code. |
5397 #ifdef DEBUG | 5434 #ifdef DEBUG |
5398 int kInlinedKeyedLoadInstructions = 19; | 5435 int kInlinedKeyedLoadInstructions = 19; |
5399 Label check_inlined_codesize; | 5436 Label check_inlined_codesize; |
5400 masm_->bind(&check_inlined_codesize); | 5437 masm_->bind(&check_inlined_codesize); |
5401 #endif | 5438 #endif |
5402 { Assembler::BlockConstPoolScope block_const_pool(masm_); | 5439 { Assembler::BlockConstPoolScope block_const_pool(masm_); |
5403 Register scratch1 = VirtualFrame::scratch0(); | 5440 Register scratch1 = VirtualFrame::scratch0(); |
5404 Register scratch2 = VirtualFrame::scratch1(); | 5441 Register scratch2 = VirtualFrame::scratch1(); |
5405 // Check the map. The null map used below is patched by the inline cache | 5442 // Check the map. The null map used below is patched by the inline cache |
5406 // code. | 5443 // code. |
5407 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 5444 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
5408 __ mov(scratch2, Operand(Factory::null_value())); | 5445 __ mov(scratch2, Operand(Factory::null_value())); |
5409 __ cmp(scratch1, scratch2); | 5446 __ cmp(scratch1, scratch2); |
5410 deferred->Branch(ne); | 5447 deferred->Branch(ne); |
5411 | 5448 |
5412 // Check that the key is a smi. | 5449 // Check that the key is a smi. |
5413 __ tst(key, Operand(kSmiTagMask)); | 5450 __ tst(key, Operand(kSmiTagMask)); |
5414 deferred->Branch(ne); | 5451 deferred->Branch(ne); |
5415 | 5452 |
5416 // Get the elements array from the receiver and check that it | 5453 // Get the elements array from the receiver and check that it |
5417 // is not a dictionary. | 5454 // is not a dictionary. |
5418 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 5455 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
5419 __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); | 5456 __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); |
5420 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 5457 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
5421 __ cmp(scratch2, ip); | 5458 __ cmp(scratch2, ip); |
5422 deferred->Branch(ne); | 5459 deferred->Branch(ne); |
5423 | 5460 |
5424 // Check that key is within bounds. | 5461 // Check that key is within bounds. Use unsigned comparison to handle |
| 5462 // negative keys. |
5425 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); | 5463 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); |
5426 __ cmp(scratch2, Operand(key, ASR, kSmiTagSize)); | 5464 __ cmp(scratch2, Operand(key, ASR, kSmiTagSize)); |
5427 deferred->Branch(ls); // Unsigned less equal. | 5465 deferred->Branch(ls); // Unsigned less equal. |
5428 | 5466 |
5429 // Load and check that the result is not the hole (key is a smi). | 5467 // Load and check that the result is not the hole (key is a smi). |
5430 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); | 5468 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); |
5431 __ add(scratch1, | 5469 __ add(scratch1, |
5432 scratch1, | 5470 scratch1, |
5433 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 5471 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
5434 __ ldr(r0, | 5472 __ ldr(r0, |
5435 MemOperand(scratch1, key, LSL, | 5473 MemOperand(scratch1, key, LSL, |
5436 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); | 5474 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); |
5437 __ cmp(r0, scratch2); | 5475 __ cmp(r0, scratch2); |
5438 // This is the only branch to deferred where r0 and r1 do not contain the | 5476 // This is the only branch to deferred where r0 and r1 do not contain the |
5439 // receiver and key. We can't just load undefined here because we have to | 5477 // receiver and key. We can't just load undefined here because we have to |
5440 // check the prototype. | 5478 // check the prototype. |
5441 deferred->Branch(eq); | 5479 deferred->Branch(eq); |
5442 | 5480 |
5443 // Make sure that the expected number of instructions are generated. | 5481 // Make sure that the expected number of instructions are generated. |
5444 ASSERT_EQ(kInlinedKeyedLoadInstructions, | 5482 ASSERT_EQ(kInlinedKeyedLoadInstructions, |
5445 masm_->InstructionsGeneratedSince(&check_inlined_codesize)); | 5483 masm_->InstructionsGeneratedSince(&check_inlined_codesize)); |
5446 } | 5484 } |
5447 | 5485 |
5448 deferred->BindExit(); | 5486 deferred->BindExit(); |
5449 } | 5487 } |
5450 } | 5488 } |
5451 | 5489 |
5452 | 5490 |
| 5491 void CodeGenerator::EmitKeyedStore(StaticType* key_type) { |
| 5492 frame_->AssertIsSpilled(); |
| 5493 // Generate inlined version of the keyed store if the code is in a loop |
| 5494 // and the key is likely to be a smi. |
| 5495 if (loop_nesting() > 0 && key_type->IsLikelySmi()) { |
| 5496 // Inline the keyed store. |
| 5497 Comment cmnt(masm_, "[ Inlined store to keyed property"); |
| 5498 |
| 5499 DeferredReferenceSetKeyedValue* deferred = |
| 5500 new DeferredReferenceSetKeyedValue(); |
| 5501 |
| 5502 // Counter will be decremented in the deferred code. Placed here to avoid |
| 5503 // having it in the instruction stream below where patching will occur. |
| 5504 __ IncrementCounter(&Counters::keyed_store_inline, 1, |
| 5505 frame_->scratch0(), frame_->scratch1()); |
| 5506 |
| 5507 // Check that the value is a smi. As this inlined code does not set the |
| 5508 // write barrier it is only possible to store smi values. |
| 5509 __ tst(r0, Operand(kSmiTagMask)); |
| 5510 deferred->Branch(ne); |
| 5511 |
| 5512 // Load the key and receiver from the stack. |
| 5513 __ ldr(r1, MemOperand(sp, 0)); |
| 5514 __ ldr(r2, MemOperand(sp, kPointerSize)); |
| 5515 |
| 5516 // Check that the key is a smi. |
| 5517 __ tst(r1, Operand(kSmiTagMask)); |
| 5518 deferred->Branch(ne); |
| 5519 |
| 5520 // Check that the receiver is a heap object. |
| 5521 __ tst(r2, Operand(kSmiTagMask)); |
| 5522 deferred->Branch(eq); |
| 5523 |
| 5524 // Check that the receiver is a JSArray. |
| 5525 __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE); |
| 5526 deferred->Branch(ne); |
| 5527 |
| 5528 // Check that the key is within bounds. Both the key and the length of |
| 5529 // the JSArray are smis. Use unsigned comparison to handle negative keys. |
| 5530 __ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset)); |
| 5531 __ cmp(r3, r1); |
| 5532 deferred->Branch(ls); // Unsigned less equal. |
| 5533 |
| 5534 // The following instructions are the part of the inlined store keyed |
| 5535 // property code which can be patched. Therefore the exact number of |
| 5536 // instructions generated need to be fixed, so the constant pool is blocked |
| 5537 // while generating this code. |
| 5538 #ifdef DEBUG |
| 5539 int kInlinedKeyedStoreInstructions = 7; |
| 5540 Label check_inlined_codesize; |
| 5541 masm_->bind(&check_inlined_codesize); |
| 5542 #endif |
| 5543 { Assembler::BlockConstPoolScope block_const_pool(masm_); |
| 5544 // Get the elements array from the receiver and check that it |
| 5545 // is not a dictionary. |
| 5546 __ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset)); |
| 5547 __ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset)); |
| 5548 // Read the fixed array map from the constant pool (not from the root |
| 5549 // array) so that the value can be patched. When debugging, we patch this |
| 5550 // comparison to always fail so that we will hit the IC call in the |
| 5551 // deferred code which will allow the debugger to break for fast case |
| 5552 // stores. |
| 5553 __ mov(r5, Operand(Factory::fixed_array_map())); |
| 5554 __ cmp(r4, r5); |
| 5555 deferred->Branch(ne); |
| 5556 |
| 5557 // Store the value. |
| 5558 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 5559 __ str(r0, MemOperand(r3, r1, LSL, |
| 5560 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); |
| 5561 |
| 5562 // Make sure that the expected number of instructions are generated. |
| 5563 ASSERT_EQ(kInlinedKeyedStoreInstructions, |
| 5564 masm_->InstructionsGeneratedSince(&check_inlined_codesize)); |
| 5565 } |
| 5566 |
| 5567 deferred->BindExit(); |
| 5568 } else { |
| 5569 frame()->CallKeyedStoreIC(); |
| 5570 } |
| 5571 } |
| 5572 |
| 5573 |
5453 #ifdef DEBUG | 5574 #ifdef DEBUG |
5454 bool CodeGenerator::HasValidEntryRegisters() { return true; } | 5575 bool CodeGenerator::HasValidEntryRegisters() { return true; } |
5455 #endif | 5576 #endif |
5456 | 5577 |
5457 | 5578 |
5458 #undef __ | 5579 #undef __ |
5459 #define __ ACCESS_MASM(masm) | 5580 #define __ ACCESS_MASM(masm) |
5460 | 5581 |
5461 | 5582 |
5462 Handle<String> Reference::GetName() { | 5583 Handle<String> Reference::GetName() { |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5556 break; | 5677 break; |
5557 } | 5678 } |
5558 | 5679 |
5559 case KEYED: { | 5680 case KEYED: { |
5560 VirtualFrame::SpilledScope scope(frame); | 5681 VirtualFrame::SpilledScope scope(frame); |
5561 Comment cmnt(masm, "[ Store to keyed Property"); | 5682 Comment cmnt(masm, "[ Store to keyed Property"); |
5562 Property* property = expression_->AsProperty(); | 5683 Property* property = expression_->AsProperty(); |
5563 ASSERT(property != NULL); | 5684 ASSERT(property != NULL); |
5564 cgen_->CodeForSourcePosition(property->position()); | 5685 cgen_->CodeForSourcePosition(property->position()); |
5565 | 5686 |
5566 // Call IC code. | 5687 frame->EmitPop(r0); // Value. |
5567 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); | 5688 cgen_->EmitKeyedStore(property->key()->type()); |
5568 frame->EmitPop(r0); // value | |
5569 frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); | |
5570 frame->EmitPush(r0); | 5689 frame->EmitPush(r0); |
5571 cgen_->UnloadReference(this); | 5690 cgen_->UnloadReference(this); |
5572 break; | 5691 break; |
5573 } | 5692 } |
5574 | 5693 |
5575 default: | 5694 default: |
5576 UNREACHABLE(); | 5695 UNREACHABLE(); |
5577 } | 5696 } |
5578 } | 5697 } |
5579 | 5698 |
(...skipping 3924 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9504 | 9623 |
9505 // Just jump to runtime to add the two strings. | 9624 // Just jump to runtime to add the two strings. |
9506 __ bind(&string_add_runtime); | 9625 __ bind(&string_add_runtime); |
9507 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 9626 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
9508 } | 9627 } |
9509 | 9628 |
9510 | 9629 |
9511 #undef __ | 9630 #undef __ |
9512 | 9631 |
9513 } } // namespace v8::internal | 9632 } } // namespace v8::internal |
OLD | NEW |