OLD | NEW |
---|---|
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1267 case Token::SUB: { | 1267 case Token::SUB: { |
1268 DeferredCode* deferred = NULL; | 1268 DeferredCode* deferred = NULL; |
1269 Result answer(this); // Only allocate a new register if reversed. | 1269 Result answer(this); // Only allocate a new register if reversed. |
1270 if (reversed) { | 1270 if (reversed) { |
1271 answer = allocator()->Allocate(); | 1271 answer = allocator()->Allocate(); |
1272 ASSERT(answer.is_valid()); | 1272 ASSERT(answer.is_valid()); |
1273 deferred = new DeferredInlineSmiSubReversed(this, | 1273 deferred = new DeferredInlineSmiSubReversed(this, |
1274 smi_value, | 1274 smi_value, |
1275 overwrite_mode); | 1275 overwrite_mode); |
1276 __ Set(answer.reg(), Immediate(value)); | 1276 __ Set(answer.reg(), Immediate(value)); |
1277 if (operand->is_register()) { | 1277 // We are in the reversed case so they can't both be Smi constants. |
1278 __ sub(answer.reg(), Operand(operand->reg())); | 1278 ASSERT(operand->is_register()); |
1279 } else { | 1279 __ sub(answer.reg(), Operand(operand->reg())); |
1280 ASSERT(operand->is_constant()); | |
1281 __ sub(Operand(answer.reg()), Immediate(operand->handle())); | |
1282 } | |
1283 } else { | 1280 } else { |
1284 operand->ToRegister(); | 1281 operand->ToRegister(); |
1285 frame_->Spill(operand->reg()); | 1282 frame_->Spill(operand->reg()); |
1286 deferred = new DeferredInlineSmiSub(this, | 1283 deferred = new DeferredInlineSmiSub(this, |
1287 smi_value, | 1284 smi_value, |
1288 overwrite_mode); | 1285 overwrite_mode); |
1289 __ sub(Operand(operand->reg()), Immediate(value)); | 1286 __ sub(Operand(operand->reg()), Immediate(value)); |
1290 answer = *operand; | 1287 answer = *operand; |
1291 } | 1288 } |
1292 deferred->enter()->Branch(overflow, operand, not_taken); | 1289 deferred->enter()->Branch(overflow, operand, not_taken); |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1367 } else { | 1364 } else { |
1368 // Only the least significant 5 bits of the shift value are used. | 1365 // Only the least significant 5 bits of the shift value are used. |
1369 // In the slow case, this masking is done inside the runtime call. | 1366 // In the slow case, this masking is done inside the runtime call. |
1370 int shift_value = int_value & 0x1f; | 1367 int shift_value = int_value & 0x1f; |
1371 DeferredCode* deferred = | 1368 DeferredCode* deferred = |
1372 new DeferredInlineSmiOperation(this, Token::SHL, smi_value, | 1369 new DeferredInlineSmiOperation(this, Token::SHL, smi_value, |
1373 overwrite_mode); | 1370 overwrite_mode); |
1374 operand->ToRegister(); | 1371 operand->ToRegister(); |
1375 __ test(operand->reg(), Immediate(kSmiTagMask)); | 1372 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1376 deferred->enter()->Branch(not_zero, operand, not_taken); | 1373 deferred->enter()->Branch(not_zero, operand, not_taken); |
1377 Result answer = allocator()->Allocate(); | 1374 if (shift_value != 0) { |
1378 ASSERT(answer.is_valid()); | 1375 Result answer = allocator()->Allocate(); |
1379 __ mov(answer.reg(), operand->reg()); | 1376 ASSERT(answer.is_valid()); |
1380 ASSERT(kSmiTag == 0); // adjust code if not the case | 1377 __ mov(answer.reg(), operand->reg()); |
1381 // We do no shifts, only the Smi conversion, if shift_value is 1. | 1378 ASSERT(kSmiTag == 0); // adjust code if not the case |
1382 if (shift_value == 0) { | 1379 // We do no shifts, only the Smi conversion, if shift_value is 1. |
1383 __ sar(answer.reg(), kSmiTagSize); | 1380 if (shift_value == 0) { |
1384 } else if (shift_value > 1) { | 1381 __ sar(answer.reg(), kSmiTagSize); |
iposva
2009/04/28 16:52:24
Somehow I feel this will never be reached, and I a
Erik Corry
2009/04/28 18:05:49
Presumably you mean "and I am sure that". It has
| |
1385 __ shl(answer.reg(), shift_value - 1); | 1382 } else if (shift_value > 1) { |
1383 __ shl(answer.reg(), shift_value - 1); | |
1384 } | |
1385 // Convert int result to Smi, checking that it is in int range. | |
1386 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
1387 __ add(answer.reg(), Operand(answer.reg())); | |
1388 deferred->enter()->Branch(overflow, operand, not_taken); | |
1389 operand->Unuse(); | |
1390 deferred->BindExit(&answer); | |
1391 frame_->Push(&answer); | |
1392 } else { | |
1393 deferred->BindExit(operand); | |
1394 frame_->Push(operand); | |
1386 } | 1395 } |
1387 // Convert int result to Smi, checking that it is in int range. | |
1388 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
1389 __ add(answer.reg(), Operand(answer.reg())); | |
1390 deferred->enter()->Branch(overflow, operand, not_taken); | |
1391 operand->Unuse(); | |
1392 deferred->BindExit(&answer); | |
1393 frame_->Push(&answer); | |
1394 } | 1396 } |
1395 break; | 1397 break; |
1396 } | 1398 } |
1397 | 1399 |
1398 case Token::BIT_OR: | 1400 case Token::BIT_OR: |
1399 case Token::BIT_XOR: | 1401 case Token::BIT_XOR: |
1400 case Token::BIT_AND: { | 1402 case Token::BIT_AND: { |
1401 DeferredCode* deferred = NULL; | 1403 DeferredCode* deferred = NULL; |
1402 if (reversed) { | 1404 if (reversed) { |
1403 deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value, | 1405 deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value, |
1404 overwrite_mode); | 1406 overwrite_mode); |
1405 } else { | 1407 } else { |
1406 deferred = new DeferredInlineSmiOperation(this, op, smi_value, | 1408 deferred = new DeferredInlineSmiOperation(this, op, smi_value, |
1407 overwrite_mode); | 1409 overwrite_mode); |
1408 } | 1410 } |
1409 operand->ToRegister(); | 1411 operand->ToRegister(); |
1410 __ test(operand->reg(), Immediate(kSmiTagMask)); | 1412 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1411 deferred->enter()->Branch(not_zero, operand, not_taken); | 1413 deferred->enter()->Branch(not_zero, operand, not_taken); |
1412 frame_->Spill(operand->reg()); | 1414 frame_->Spill(operand->reg()); |
1413 if (op == Token::BIT_AND) { | 1415 if (op == Token::BIT_AND) { |
1414 if (int_value == 0) { | 1416 __ and_(Operand(operand->reg()), Immediate(value)); |
1415 __ xor_(Operand(operand->reg()), operand->reg()); | |
1416 } else { | |
1417 __ and_(Operand(operand->reg()), Immediate(value)); | |
1418 } | |
1419 } else if (op == Token::BIT_XOR) { | 1417 } else if (op == Token::BIT_XOR) { |
1420 if (int_value != 0) { | 1418 if (int_value != 0) { |
1421 __ xor_(Operand(operand->reg()), Immediate(value)); | 1419 __ xor_(Operand(operand->reg()), Immediate(value)); |
1422 } | 1420 } |
1423 } else { | 1421 } else { |
1424 ASSERT(op == Token::BIT_OR); | 1422 ASSERT(op == Token::BIT_OR); |
1425 if (int_value != 0) { | 1423 if (int_value != 0) { |
1426 __ or_(Operand(operand->reg()), Immediate(value)); | 1424 __ or_(Operand(operand->reg()), Immediate(value)); |
1427 } | 1425 } |
1428 } | 1426 } |
(...skipping 3842 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5271 RelocInfo::Mode mode = is_global_ | 5269 RelocInfo::Mode mode = is_global_ |
5272 ? RelocInfo::CODE_TARGET_CONTEXT | 5270 ? RelocInfo::CODE_TARGET_CONTEXT |
5273 : RelocInfo::CODE_TARGET; | 5271 : RelocInfo::CODE_TARGET; |
5274 Result value = cgen->frame()->CallKeyedLoadIC(mode); | 5272 Result value = cgen->frame()->CallKeyedLoadIC(mode); |
5275 // The result needs to be specifically the eax register because the | 5273 // The result needs to be specifically the eax register because the |
5276 // offset to the patch site will be expected in a test eax | 5274 // offset to the patch site will be expected in a test eax |
5277 // instruction. | 5275 // instruction. |
5278 ASSERT(value.is_register() && value.reg().is(eax)); | 5276 ASSERT(value.is_register() && value.reg().is(eax)); |
5279 // The delta from the start of the map-compare instruction to the | 5277 // The delta from the start of the map-compare instruction to the |
5280 // test eax instruction. We use masm_ directly here instead of the | 5278 // test eax instruction. We use masm_ directly here instead of the |
5281 // __ macro because the __ macro sometimes uses macro expansion to turn | 5279 // double underscore macro because the macro sometimes uses macro |
5282 // into something that can't return a value. This is encountered when | 5280 // expansion to turn into something that can't return a value. This |
5283 // doing generated code coverage tests. | 5281 // is encountered when doing generated code coverage tests. |
5284 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | 5282 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); |
5285 __ test(value.reg(), Immediate(-delta_to_patch_site)); | 5283 __ test(value.reg(), Immediate(-delta_to_patch_site)); |
5286 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); | 5284 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); |
5287 | 5285 |
5288 // The receiver and key were spilled by the call, so their state as | 5286 // The receiver and key were spilled by the call, so their state as |
5289 // constants or copies has been changed. Thus, they need to be | 5287 // constants or copies has been changed. Thus, they need to be |
5290 // "mergable" in the block at the exit label and are therefore | 5288 // "mergable" in the block at the exit label and are therefore |
5291 // passed as return results here. | 5289 // passed as return results here. |
5292 key = cgen->frame()->Pop(); | 5290 key = cgen->frame()->Pop(); |
5293 receiver = cgen->frame()->Pop(); | 5291 receiver = cgen->frame()->Pop(); |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5374 // is not a load from the global context) and that it has the | 5372 // is not a load from the global context) and that it has the |
5375 // expected map. | 5373 // expected map. |
5376 if (!is_global) { | 5374 if (!is_global) { |
5377 __ test(receiver.reg(), Immediate(kSmiTagMask)); | 5375 __ test(receiver.reg(), Immediate(kSmiTagMask)); |
5378 deferred->enter()->Branch(zero, &receiver, &key, not_taken); | 5376 deferred->enter()->Branch(zero, &receiver, &key, not_taken); |
5379 } | 5377 } |
5380 | 5378 |
5381 // Initially, use an invalid map. The map is patched in the IC | 5379 // Initially, use an invalid map. The map is patched in the IC |
5382 // initialization code. | 5380 // initialization code. |
5383 __ bind(deferred->patch_site()); | 5381 __ bind(deferred->patch_site()); |
5384 __ cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), | 5382 // Use masm-> here instead of the double underscore macro since extra |
5383 // coverage code can interfere with the patching. | |
5384 masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), | |
5385 Immediate(Factory::null_value())); | 5385 Immediate(Factory::null_value())); |
5386 deferred->enter()->Branch(not_equal, &receiver, &key, not_taken); | 5386 deferred->enter()->Branch(not_equal, &receiver, &key, not_taken); |
5387 | 5387 |
5388 // Check that the key is a smi. | 5388 // Check that the key is a smi. |
5389 __ test(key.reg(), Immediate(kSmiTagMask)); | 5389 __ test(key.reg(), Immediate(kSmiTagMask)); |
5390 deferred->enter()->Branch(not_zero, &receiver, &key, not_taken); | 5390 deferred->enter()->Branch(not_zero, &receiver, &key, not_taken); |
5391 | 5391 |
5392 // Get the elements array from the receiver and check that it | 5392 // Get the elements array from the receiver and check that it |
5393 // is not a dictionary. | 5393 // is not a dictionary. |
5394 Result elements = cgen_->allocator()->Allocate(); | 5394 Result elements = cgen_->allocator()->Allocate(); |
(...skipping 1726 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7121 | 7121 |
7122 // Slow-case: Go through the JavaScript implementation. | 7122 // Slow-case: Go through the JavaScript implementation. |
7123 __ bind(&slow); | 7123 __ bind(&slow); |
7124 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 7124 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
7125 } | 7125 } |
7126 | 7126 |
7127 | 7127 |
7128 #undef __ | 7128 #undef __ |
7129 | 7129 |
7130 } } // namespace v8::internal | 7130 } } // namespace v8::internal |
OLD | NEW |