Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(140)

Side by Side Diff: src/codegen-ia32.cc

Issue 18225: Experimental: processor flags are not preserved across CFG edges... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/toiger/
Patch Set: '' Created 11 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1251 matching lines...) Expand 10 before | Expand all | Expand 10 after
1262 right_side.ToRegister(eax); 1262 right_side.ToRegister(eax);
1263 } else { 1263 } else {
1264 frame_->Spill(eax); // Can be multiply referenced, even now. 1264 frame_->Spill(eax); // Can be multiply referenced, even now.
1265 frame_->Spill(edx); 1265 frame_->Spill(edx);
1266 __ xchg(eax, edx); 1266 __ xchg(eax, edx);
1267 // If left_side and right_side become real (non-dummy) arguments 1267 // If left_side and right_side become real (non-dummy) arguments
1268 // to CallStub, they need to be swapped in this case. 1268 // to CallStub, they need to be swapped in this case.
1269 } 1269 }
1270 CompareStub stub(cc, strict); 1270 CompareStub stub(cc, strict);
1271 Result answer = frame_->CallStub(&stub, &right_side, &left_side, 0); 1271 Result answer = frame_->CallStub(&stub, &right_side, &left_side, 0);
1272 ASSERT(answer.is_register() && answer.reg().is_byte_register());
1273 frame_->Spill(answer.reg());
1272 if (cc == equal) { 1274 if (cc == equal) {
1273 __ test(answer.reg(), Operand(answer.reg())); 1275 __ test(answer.reg(), Operand(answer.reg()));
1274 } else { 1276 } else {
1275 __ cmp(answer.reg(), 0); 1277 __ cmp(answer.reg(), 0);
1276 } 1278 }
1277 answer.Unuse(); 1279 __ setcc(cc, answer.reg());
1280 __ and_(Operand(answer.reg()), Immediate(1));
1278 // The expected frame at JumpTarget "done" is bound to the current frame. 1281 // The expected frame at JumpTarget "done" is bound to the current frame.
1279 // This current frame is spilled, due to the call to CallStub. 1282 // This current frame is spilled, due to the call to CallStub.
1280 // It would be better if the fast SMI case controlled the expected frame. 1283 // It would be better if the fast SMI case controlled the expected frame.
1281 done.Jump(); 1284 done.Jump(&answer);
1282 1285
1283 is_smi.Bind(&left_side, &right_side); 1286 is_smi.Bind(&left_side, &right_side);
1287 left_side.ToRegister();
1288 right_side.ToRegister();
1289 // Find a byte register for the flag value.
1290 if (!left_side.reg().is_byte_register() &&
1291 !right_side.reg().is_byte_register()) {
1292 // If we allocate a register here, it should be a byte register
1293 // because there is only one non-reserved register on IA32 that
1294 // does not have a byte part (namely edi) and it must be left_side
1295 // (and right_side).
1296 answer = allocator_->Allocate();
1297 ASSERT(answer.is_register() && answer.reg().is_byte_register());
1298 } else {
1299 answer = left_side.reg().is_byte_register() ? left_side : right_side;
1300 frame_->Spill(answer.reg());
1301 }
1284 __ cmp(left_side.reg(), Operand(right_side.reg())); 1302 __ cmp(left_side.reg(), Operand(right_side.reg()));
1285 right_side.Unuse(); 1303 right_side.Unuse();
1286 left_side.Unuse(); 1304 left_side.Unuse();
1287 // Fall through to |done|. 1305 __ setcc(cc, answer.reg());
1306 __ and_(Operand(answer.reg()), Immediate(1));
1288 1307
1289 done.Bind(); 1308 done.Bind(&answer);
1290 cc_reg_ = cc; 1309 answer.ToRegister();
1310 __ test(answer.reg(), Operand(answer.reg()));
1311 cc_reg_ = not_zero;
1291 } 1312 }
1292 1313
1293 1314
1294 class DeferredSmiComparison: public DeferredCode { 1315 class DeferredSmiComparison: public DeferredCode {
1295 public: 1316 public:
1296 DeferredSmiComparison(CodeGenerator* generator, 1317 DeferredSmiComparison(CodeGenerator* generator,
1297 Condition cc, 1318 Condition cc,
1298 bool strict, 1319 bool strict,
1299 int int_value) : 1320 int int_value) :
1300 DeferredCode(generator), 1321 DeferredCode(generator),
(...skipping 16 matching lines...) Expand all
1317 CompareStub stub(cc_, strict_); 1338 CompareStub stub(cc_, strict_);
1318 Result argument(cgen); 1339 Result argument(cgen);
1319 enter()->Bind(&argument); 1340 enter()->Bind(&argument);
1320 1341
1321 // Setup parameters and call stub. 1342 // Setup parameters and call stub.
1322 argument.ToRegister(edx); 1343 argument.ToRegister(edx);
1323 Result value = cgen->allocator()->Allocate(eax); 1344 Result value = cgen->allocator()->Allocate(eax);
1324 ASSERT(value.is_valid()); 1345 ASSERT(value.is_valid());
1325 __ Set(value.reg(), Immediate(Smi::FromInt(int_value_))); 1346 __ Set(value.reg(), Immediate(Smi::FromInt(int_value_)));
1326 Result result = cgen->frame()->CallStub(&stub, &argument, &value, 0); 1347 Result result = cgen->frame()->CallStub(&stub, &argument, &value, 0);
1327 ASSERT(result.is_register()); 1348 ASSERT(result.is_register() && result.reg().is_byte_register());
1349 cgen->frame()->Spill(result.reg());
1328 __ cmp(result.reg(), 0); 1350 __ cmp(result.reg(), 0);
1329 result.Unuse(); 1351 __ setcc(cc_, result.reg());
1330 // The actual result is returned in the flags. 1352 __ and_(Operand(result.reg()), Immediate(1));
1331 exit()->Jump(); 1353 exit()->Jump(&result);
1332 } 1354 }
1333 1355
1334 1356
1335 void CodeGenerator::SmiComparison(Condition cc, 1357 void CodeGenerator::SmiComparison(Condition cc,
1336 Handle<Object> value, 1358 Handle<Object> value,
1337 bool strict) { 1359 bool strict) {
1338 // Strict only makes sense for equality comparisons. 1360 // Strict only makes sense for equality comparisons.
1339 ASSERT(!strict || cc == equal); 1361 ASSERT(!strict || cc == equal);
1340 1362
1341 int int_value = Smi::cast(*value)->value(); 1363 int int_value = Smi::cast(*value)->value();
1342 ASSERT(is_intn(int_value, kMaxSmiInlinedBits)); 1364 ASSERT(is_intn(int_value, kMaxSmiInlinedBits));
1343 1365
1344 DeferredSmiComparison* deferred = 1366 DeferredSmiComparison* deferred =
1345 new DeferredSmiComparison(this, cc, strict, int_value); 1367 new DeferredSmiComparison(this, cc, strict, int_value);
1346 1368
1347 Result comparee = frame_->Pop(); 1369 Result comparee = frame_->Pop();
1348 comparee.ToRegister(); 1370 comparee.ToRegister();
1349 __ test(comparee.reg(), Immediate(kSmiTagMask)); 1371 __ test(comparee.reg(), Immediate(kSmiTagMask));
1350 deferred->enter()->Branch(not_zero, &comparee, not_taken); 1372 deferred->enter()->Branch(not_zero, &comparee, not_taken);
1373 // Find a byte register to hold the flag value. If the comparee is
1374 // a byte register we can use it, otherwise allocating will give us
1375 // one since there is only one non-reserved IA32 register that does
1376 // not have a byte part (namely edi).
1377 Result flag(this);
1378 if (comparee.reg().is_byte_register()) {
1379 flag = comparee;
1380 frame_->Spill(flag.reg());
1381 } else {
1382 flag = allocator_->Allocate();
1383 ASSERT(flag.is_register() && flag.reg().is_byte_register());
1384 }
1351 // Test smi equality and comparison by signed int comparison. 1385 // Test smi equality and comparison by signed int comparison.
1352 __ cmp(Operand(comparee.reg()), Immediate(value)); 1386 __ cmp(Operand(comparee.reg()), Immediate(value));
1353 comparee.Unuse(); 1387 comparee.Unuse();
1354 deferred->exit()->Bind(); 1388 __ setcc(cc, flag.reg());
1355 cc_reg_ = cc; 1389 __ and_(Operand(flag.reg()), Immediate(1));
1390
1391 deferred->exit()->Bind(&flag);
1392 flag.ToRegister();
1393 __ test(flag.reg(), Operand(flag.reg()));
1394 cc_reg_ = not_zero;
1356 } 1395 }
1357 1396
1358 1397
1359 class CallFunctionStub: public CodeStub { 1398 class CallFunctionStub: public CodeStub {
1360 public: 1399 public:
1361 explicit CallFunctionStub(int argc) : argc_(argc) { } 1400 explicit CallFunctionStub(int argc) : argc_(argc) { }
1362 1401
1363 void Generate(MacroAssembler* masm); 1402 void Generate(MacroAssembler* masm);
1364 1403
1365 private: 1404 private:
(...skipping 2217 matching lines...) Expand 10 before | Expand all | Expand 10 after
3583 slow_case.Bind(); 3622 slow_case.Bind();
3584 frame_->EmitPush(Immediate(Factory::undefined_value())); 3623 frame_->EmitPush(Immediate(Factory::undefined_value()));
3585 3624
3586 end.Bind(); 3625 end.Bind();
3587 } 3626 }
3588 3627
3589 3628
3590 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { 3629 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3591 ASSERT(args->length() == 1); 3630 ASSERT(args->length() == 1);
3592 LoadAndSpill(args->at(0)); 3631 LoadAndSpill(args->at(0));
3593 JumpTarget answer(this); 3632 Label answer;
3594 // We need the CC bits to come out as not_equal in the case where the 3633 // We need the CC bits to come out as not_equal in the case where the
3595 // object is a smi. This can't be done with the usual test opcode so 3634 // object is a smi. This can't be done with the usual test opcode so
3596 // we copy the object to ecx and do some destructive ops on it that 3635 // we copy the object to ecx and do some destructive ops on it that
3597 // result in the right CC bits. 3636 // result in the right CC bits.
3598 frame_->EmitPop(eax); 3637 frame_->EmitPop(eax);
3599 __ mov(ecx, Operand(eax)); 3638 __ mov(ecx, Operand(eax));
3600 __ and_(ecx, kSmiTagMask); 3639 __ and_(ecx, kSmiTagMask);
3601 __ xor_(ecx, kSmiTagMask); 3640 __ xor_(ecx, kSmiTagMask);
3602 answer.Branch(not_equal, not_taken); 3641 __ j(not_equal, &answer, not_taken);
3603 // It is a heap object - get map. 3642 // It is a heap object - get map.
3604 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); 3643 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
3605 __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset)); 3644 __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
3606 // Check if the object is a JS array or not. 3645 // Check if the object is a JS array or not.
3607 __ cmp(eax, JS_ARRAY_TYPE); 3646 __ cmp(eax, JS_ARRAY_TYPE);
3608 answer.Bind(); 3647 __ bind(&answer);
3609 cc_reg_ = equal; 3648 cc_reg_ = equal;
3610 } 3649 }
3611 3650
3612 3651
3613 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { 3652 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3614 ASSERT(args->length() == 0); 3653 ASSERT(args->length() == 0);
3615 3654
3616 // Seed the result with the formal parameters count, which will be 3655 // Seed the result with the formal parameters count, which will be
3617 // used in case no arguments adaptor frame is found below the 3656 // used in case no arguments adaptor frame is found below the
3618 // current frame. 3657 // current frame.
(...skipping 2486 matching lines...) Expand 10 before | Expand all | Expand 10 after
6105 6144
6106 // Slow-case: Go through the JavaScript implementation. 6145 // Slow-case: Go through the JavaScript implementation.
6107 __ bind(&slow); 6146 __ bind(&slow);
6108 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 6147 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
6109 } 6148 }
6110 6149
6111 6150
6112 #undef __ 6151 #undef __
6113 6152
6114 } } // namespace v8::internal 6153 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698