OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 5262 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5273 public: | 5273 public: |
5274 DeferredReferenceGetKeyedValue() { | 5274 DeferredReferenceGetKeyedValue() { |
5275 set_comment("[ DeferredReferenceGetKeyedValue"); | 5275 set_comment("[ DeferredReferenceGetKeyedValue"); |
5276 } | 5276 } |
5277 | 5277 |
5278 virtual void Generate(); | 5278 virtual void Generate(); |
5279 }; | 5279 }; |
5280 | 5280 |
5281 | 5281 |
5282 void DeferredReferenceGetKeyedValue::Generate() { | 5282 void DeferredReferenceGetKeyedValue::Generate() { |
5283 __ DecrementCounter(&Counters::keyed_load_inline, 1, r1, r2); | 5283 Register scratch1 = VirtualFrame::scratch0(); |
5284 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, r1, r2); | 5284 Register scratch2 = VirtualFrame::scratch1(); |
| 5285 __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2); |
| 5286 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2); |
5285 | 5287 |
5286 // The rest of the instructions in the deferred code must be together. | 5288 // The rest of the instructions in the deferred code must be together. |
5287 { Assembler::BlockConstPoolScope block_const_pool(masm_); | 5289 { Assembler::BlockConstPoolScope block_const_pool(masm_); |
5288 // Call keyed load IC. It has all arguments on the stack. | 5290 // Call keyed load IC. It has all arguments on the stack. |
5289 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); | 5291 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); |
5290 __ Call(ic, RelocInfo::CODE_TARGET); | 5292 __ Call(ic, RelocInfo::CODE_TARGET); |
5291 // The call must be followed by a nop instruction to indicate that the | 5293 // The call must be followed by a nop instruction to indicate that the |
5292 // keyed load has been inlined. | 5294 // keyed load has been inlined. |
5293 __ nop(PROPERTY_LOAD_INLINED); | 5295 __ nop(PROPERTY_LOAD_INLINED); |
5294 | 5296 |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5368 frame_->CallKeyedLoadIC(); | 5370 frame_->CallKeyedLoadIC(); |
5369 } else { | 5371 } else { |
5370 // Inline the keyed load. | 5372 // Inline the keyed load. |
5371 Comment cmnt(masm_, "[ Inlined load from keyed property"); | 5373 Comment cmnt(masm_, "[ Inlined load from keyed property"); |
5372 | 5374 |
5373 // Counter will be decremented in the deferred code. Placed here to avoid | 5375 // Counter will be decremented in the deferred code. Placed here to avoid |
5374 // having it in the instruction stream below where patching will occur. | 5376 // having it in the instruction stream below where patching will occur. |
5375 __ IncrementCounter(&Counters::keyed_load_inline, 1, | 5377 __ IncrementCounter(&Counters::keyed_load_inline, 1, |
5376 frame_->scratch0(), frame_->scratch1()); | 5378 frame_->scratch0(), frame_->scratch1()); |
5377 | 5379 |
5378 // Load the receiver from the stack. | 5380 // Load the receiver and key from the stack. |
5379 frame_->SpillAllButCopyTOSToR0(); | 5381 frame_->SpillAllButCopyTOSToR1R0(); |
| 5382 Register receiver = r0; |
| 5383 Register key = r1; |
5380 VirtualFrame::SpilledScope spilled(frame_); | 5384 VirtualFrame::SpilledScope spilled(frame_); |
5381 | 5385 |
5382 DeferredReferenceGetKeyedValue* deferred = | 5386 DeferredReferenceGetKeyedValue* deferred = |
5383 new DeferredReferenceGetKeyedValue(); | 5387 new DeferredReferenceGetKeyedValue(); |
5384 | 5388 |
5385 // Check that the receiver is a heap object. | 5389 // Check that the receiver is a heap object. |
5386 __ tst(r0, Operand(kSmiTagMask)); | 5390 __ tst(receiver, Operand(kSmiTagMask)); |
5387 deferred->Branch(eq); | 5391 deferred->Branch(eq); |
5388 | 5392 |
5389 // The following instructions are the inlined load keyed property. Parts | 5393 // The following instructions are the inlined load keyed property. Parts |
5390 // of this code are patched, so the exact number of instructions generated | 5394 // of this code are patched, so the exact number of instructions generated |
5391 // need to be fixed. Therefore the constant pool is blocked while generating | 5395 // need to be fixed. Therefore the constant pool is blocked while generating |
5392 // this code. | 5396 // this code. |
5393 #ifdef DEBUG | 5397 #ifdef DEBUG |
5394 int kInlinedKeyedLoadInstructions = 20; | 5398 int kInlinedKeyedLoadInstructions = 19; |
5395 Label check_inlined_codesize; | 5399 Label check_inlined_codesize; |
5396 masm_->bind(&check_inlined_codesize); | 5400 masm_->bind(&check_inlined_codesize); |
5397 #endif | 5401 #endif |
5398 { Assembler::BlockConstPoolScope block_const_pool(masm_); | 5402 { Assembler::BlockConstPoolScope block_const_pool(masm_); |
| 5403 Register scratch1 = VirtualFrame::scratch0(); |
| 5404 Register scratch2 = VirtualFrame::scratch1(); |
5399 // Check the map. The null map used below is patched by the inline cache | 5405 // Check the map. The null map used below is patched by the inline cache |
5400 // code. | 5406 // code. |
5401 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5407 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
5402 __ mov(r2, Operand(Factory::null_value())); | 5408 __ mov(scratch2, Operand(Factory::null_value())); |
5403 __ cmp(r1, r2); | 5409 __ cmp(scratch1, scratch2); |
5404 deferred->Branch(ne); | 5410 deferred->Branch(ne); |
5405 | 5411 |
5406 // Load the key from the stack. | |
5407 __ ldr(r1, MemOperand(sp, 0)); | |
5408 | |
5409 // Check that the key is a smi. | 5412 // Check that the key is a smi. |
5410 __ tst(r1, Operand(kSmiTagMask)); | 5413 __ tst(key, Operand(kSmiTagMask)); |
5411 deferred->Branch(ne); | 5414 deferred->Branch(ne); |
5412 | 5415 |
5413 // Get the elements array from the receiver and check that it | 5416 // Get the elements array from the receiver and check that it |
5414 // is not a dictionary. | 5417 // is not a dictionary. |
5415 __ ldr(r2, FieldMemOperand(r0, JSObject::kElementsOffset)); | 5418 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
5416 __ ldr(r3, FieldMemOperand(r2, JSObject::kMapOffset)); | 5419 __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); |
5417 __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex); | 5420 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
5418 __ cmp(r3, r4); | 5421 __ cmp(scratch2, ip); |
5419 deferred->Branch(ne); | 5422 deferred->Branch(ne); |
5420 | 5423 |
5421 // Check that key is within bounds. | 5424 // Check that key is within bounds. |
5422 __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); | 5425 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); |
5423 __ cmp(r3, Operand(r1, ASR, kSmiTagSize)); | 5426 __ cmp(scratch2, Operand(key, ASR, kSmiTagSize)); |
5424 deferred->Branch(ls); // Unsigned less equal. | 5427 deferred->Branch(ls); // Unsigned less equal. |
5425 | 5428 |
5426 // Load and check that the result is not the hole (r1 is a smi). | 5429 // Load and check that the result is not the hole (key is a smi). |
5427 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); | 5430 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); |
5428 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 5431 __ add(scratch1, |
5429 __ ldr(r0, MemOperand(r2, r1, LSL, | 5432 scratch1, |
5430 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); | 5433 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
5431 __ cmp(r0, r3); | 5434 __ ldr(r0, |
| 5435 MemOperand(scratch1, key, LSL, |
| 5436 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); |
| 5437 __ cmp(r0, scratch2); |
| 5438 // This is the only branch to deferred where r0 and r1 do not contain the |
| 5439 // receiver and key. We can't just load undefined here because we have to |
| 5440 // check the prototype. |
5432 deferred->Branch(eq); | 5441 deferred->Branch(eq); |
5433 | 5442 |
5434 // Make sure that the expected number of instructions are generated. | 5443 // Make sure that the expected number of instructions are generated. |
5435 ASSERT_EQ(kInlinedKeyedLoadInstructions, | 5444 ASSERT_EQ(kInlinedKeyedLoadInstructions, |
5436 masm_->InstructionsGeneratedSince(&check_inlined_codesize)); | 5445 masm_->InstructionsGeneratedSince(&check_inlined_codesize)); |
5437 } | 5446 } |
5438 | 5447 |
5439 deferred->BindExit(); | 5448 deferred->BindExit(); |
5440 } | 5449 } |
5441 } | 5450 } |
(...skipping 4053 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9495 | 9504 |
9496 // Just jump to runtime to add the two strings. | 9505 // Just jump to runtime to add the two strings. |
9497 __ bind(&string_add_runtime); | 9506 __ bind(&string_add_runtime); |
9498 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 9507 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
9499 } | 9508 } |
9500 | 9509 |
9501 | 9510 |
9502 #undef __ | 9511 #undef __ |
9503 | 9512 |
9504 } } // namespace v8::internal | 9513 } } // namespace v8::internal |
OLD | NEW |