Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(362)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 164480: X64: Do not use an AllocateWithoutSpill register if it is invalid. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 11 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 3066 matching lines...) Expand 10 before | Expand all | Expand 10 after
3077 DeferredCode* deferred = NULL; 3077 DeferredCode* deferred = NULL;
3078 if (is_postfix) { 3078 if (is_postfix) {
3079 deferred = new DeferredPostfixCountOperation(new_value.reg(), 3079 deferred = new DeferredPostfixCountOperation(new_value.reg(),
3080 old_value.reg(), 3080 old_value.reg(),
3081 is_increment); 3081 is_increment);
3082 } else { 3082 } else {
3083 deferred = new DeferredPrefixCountOperation(new_value.reg(), 3083 deferred = new DeferredPrefixCountOperation(new_value.reg(),
3084 is_increment); 3084 is_increment);
3085 } 3085 }
3086 3086
3087 // If we have a free register, combine the smi and overflow checks.
3087 Result tmp = allocator_->AllocateWithoutSpilling(); 3088 Result tmp = allocator_->AllocateWithoutSpilling();
3088 ASSERT(kSmiTagMask == 1 && kSmiTag == 0); 3089 ASSERT(kSmiTagMask == 1 && kSmiTag == 0);
3089 __ movl(tmp.reg(), Immediate(kSmiTagMask)); 3090 if (tmp.is_valid()) {
3090 // Smi test. 3091 __ movl(tmp.reg(), Immediate(kSmiTagMask));
3092 }
3093
3094 // Try incrementing or decrementing the smi.
3091 __ movq(kScratchRegister, new_value.reg()); 3095 __ movq(kScratchRegister, new_value.reg());
3092 if (is_increment) { 3096 if (is_increment) {
3093 __ addl(kScratchRegister, Immediate(Smi::FromInt(1))); 3097 __ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
3094 } else { 3098 } else {
3095 __ subl(kScratchRegister, Immediate(Smi::FromInt(1))); 3099 __ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
3096 } 3100 }
3097 // deferred->Branch(overflow); 3101
3098 __ cmovl(overflow, kScratchRegister, tmp.reg()); 3102 // Go to the deferred case if the result overflows or is non-smi.
3099 __ testl(kScratchRegister, tmp.reg()); 3103 if (tmp.is_valid()){
3100 tmp.Unuse(); 3104 __ cmovl(overflow, kScratchRegister, tmp.reg());
3101 deferred->Branch(not_zero); 3105 __ testl(kScratchRegister, tmp.reg());
3106 tmp.Unuse();
3107 deferred->Branch(not_zero);
3108 } else {
3109 deferred->Branch(overflow);
3110 __ testl(kScratchRegister, Immediate(kSmiTagMask));
3111 deferred->Branch(not_zero);
3112 }
3113
3102 __ movq(new_value.reg(), kScratchRegister); 3114 __ movq(new_value.reg(), kScratchRegister);
3103
3104 deferred->BindExit(); 3115 deferred->BindExit();
3105 3116
3106
3107 // Postfix: store the old value in the allocated slot under the 3117 // Postfix: store the old value in the allocated slot under the
3108 // reference. 3118 // reference.
3109 if (is_postfix) frame_->SetElementAt(target.size(), &old_value); 3119 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
3110 3120
3111 frame_->Push(&new_value); 3121 frame_->Push(&new_value);
3112 // Non-constant: update the reference. 3122 // Non-constant: update the reference.
3113 if (!is_const) target.SetValue(NOT_CONST_INIT); 3123 if (!is_const) target.SetValue(NOT_CONST_INIT);
3114 } 3124 }
3115 3125
3116 // Postfix: drop the new value and use the old. 3126 // Postfix: drop the new value and use the old.
(...skipping 4577 matching lines...) Expand 10 before | Expand all | Expand 10 after
7694 int CompareStub::MinorKey() { 7704 int CompareStub::MinorKey() {
7695 // Encode the two parameters in a unique 16 bit value. 7705 // Encode the two parameters in a unique 16 bit value.
7696 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); 7706 ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
7697 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); 7707 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
7698 } 7708 }
7699 7709
7700 7710
7701 #undef __ 7711 #undef __
7702 7712
7703 } } // namespace v8::internal 7713 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698