Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(77)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 1869001: Add inlined code for (constant SHL smi), ported from ia32 to x64. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/codegen-ia32.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
186 186
187 private: 187 private:
188 Token::Value op_; 188 Token::Value op_;
189 Register dst_; 189 Register dst_;
190 Register src_; 190 Register src_;
191 Smi* value_; 191 Smi* value_;
192 OverwriteMode overwrite_mode_; 192 OverwriteMode overwrite_mode_;
193 }; 193 };
194 194
195 195
196 // Call the appropriate binary operation stub to compute value op src
197 // and leave the result in dst.
198 class DeferredInlineSmiOperationReversed: public DeferredCode {
199 public:
200 DeferredInlineSmiOperationReversed(Token::Value op,
201 Register dst,
202 Smi* value,
203 Register src,
204 OverwriteMode overwrite_mode)
205 : op_(op),
206 dst_(dst),
207 value_(value),
208 src_(src),
209 overwrite_mode_(overwrite_mode) {
210 set_comment("[ DeferredInlineSmiOperationReversed");
211 }
212
213 virtual void Generate();
214
215 private:
216 Token::Value op_;
217 Register dst_;
218 Smi* value_;
219 Register src_;
220 OverwriteMode overwrite_mode_;
221 };
222
223
196 class FloatingPointHelper : public AllStatic { 224 class FloatingPointHelper : public AllStatic {
197 public: 225 public:
198 // Code pattern for loading a floating point value. Input value must 226 // Code pattern for loading a floating point value. Input value must
199 // be either a smi or a heap number object (fp value). Requirements: 227 // be either a smi or a heap number object (fp value). Requirements:
200 // operand on TOS+1. Returns operand as floating point number on FPU 228 // operand on TOS+1. Returns operand as floating point number on FPU
201 // stack. 229 // stack.
202 static void LoadFloatOperand(MacroAssembler* masm, Register scratch); 230 static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
203 231
204 // Code pattern for loading a floating point value. Input value must 232 // Code pattern for loading a floating point value. Input value must
205 // be either a smi or a heap number object (fp value). Requirements: 233 // be either a smi or a heap number object (fp value). Requirements:
(...skipping 6150 matching lines...) Expand 10 before | Expand all | Expand 10 after
6356 // For mod we don't generate all the Smi code inline. 6384 // For mod we don't generate all the Smi code inline.
6357 GenericBinaryOpStub stub( 6385 GenericBinaryOpStub stub(
6358 op_, 6386 op_,
6359 overwrite_mode_, 6387 overwrite_mode_,
6360 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB); 6388 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
6361 stub.GenerateCall(masm_, src_, value_); 6389 stub.GenerateCall(masm_, src_, value_);
6362 if (!dst_.is(rax)) __ movq(dst_, rax); 6390 if (!dst_.is(rax)) __ movq(dst_, rax);
6363 } 6391 }
6364 6392
6365 6393
6394 void DeferredInlineSmiOperationReversed::Generate() {
6395 GenericBinaryOpStub stub(
6396 op_,
6397 overwrite_mode_,
6398 NO_SMI_CODE_IN_STUB);
6399 stub.GenerateCall(masm_, value_, src_);
6400 if (!dst_.is(rax)) __ movq(dst_, rax);
6401 }
6402
6403
6366 Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr, 6404 Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
6367 Result* operand, 6405 Result* operand,
6368 Handle<Object> value, 6406 Handle<Object> value,
6369 bool reversed, 6407 bool reversed,
6370 OverwriteMode overwrite_mode) { 6408 OverwriteMode overwrite_mode) {
6371 // NOTE: This is an attempt to inline (a bit) more of the code for 6409 // NOTE: This is an attempt to inline (a bit) more of the code for
6372 // some possible smi operations (like + and -) when (at least) one 6410 // some possible smi operations (like + and -) when (at least) one
6373 // of the operands is a constant smi. 6411 // of the operands is a constant smi.
6374 // Consumes the argument "operand". 6412 // Consumes the argument "operand".
6375 if (IsUnsafeSmi(value)) { 6413 if (IsUnsafeSmi(value)) {
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
6485 operand->reg(), 6523 operand->reg(),
6486 shift_value, 6524 shift_value,
6487 deferred->entry_label()); 6525 deferred->entry_label());
6488 deferred->BindExit(); 6526 deferred->BindExit();
6489 operand->Unuse(); 6527 operand->Unuse();
6490 } 6528 }
6491 break; 6529 break;
6492 6530
6493 case Token::SHL: 6531 case Token::SHL:
6494 if (reversed) { 6532 if (reversed) {
6495 Result constant_operand(value); 6533 // Move operand into rcx and also into a second register.
6496 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, 6534 // If operand is already in a register, take advantage of that.
6497 overwrite_mode); 6535 // This lets us modify rcx, but still bail out to deferred code.
6536 Result right;
6537 Result right_copy_in_rcx;
6538 TypeInfo right_type_info = operand->type_info();
6539 operand->ToRegister();
6540 if (operand->reg().is(rcx)) {
6541 right = allocator()->Allocate();
6542 __ movq(right.reg(), rcx);
6543 frame_->Spill(rcx);
6544 right_copy_in_rcx = *operand;
6545 } else {
6546 right_copy_in_rcx = allocator()->Allocate(rcx);
6547 __ movq(rcx, operand->reg());
6548 right = *operand;
6549 }
6550 operand->Unuse();
6551
6552 answer = allocator()->Allocate();
6553 DeferredInlineSmiOperationReversed* deferred =
6554 new DeferredInlineSmiOperationReversed(op,
6555 answer.reg(),
6556 smi_value,
6557 right.reg(),
6558 overwrite_mode);
6559 __ movq(answer.reg(), Immediate(int_value));
6560 __ SmiToInteger32(rcx, rcx);
6561 if (!right.type_info().IsSmi()) {
6562 Condition is_smi = masm_->CheckSmi(right.reg());
6563 deferred->Branch(NegateCondition(is_smi));
6564 } else if (FLAG_debug_code) {
6565 __ AbortIfNotSmi(right.reg(),
6566 "Static type info claims non-smi is smi in (const SHL smi).");
6567 }
6568 __ shl_cl(answer.reg());
6569 __ Integer32ToSmi(answer.reg(), answer.reg());
6570
6571 deferred->BindExit();
6498 } else { 6572 } else {
6499 // Only the least significant 5 bits of the shift value are used. 6573 // Only the least significant 5 bits of the shift value are used.
6500 // In the slow case, this masking is done inside the runtime call. 6574 // In the slow case, this masking is done inside the runtime call.
6501 int shift_value = int_value & 0x1f; 6575 int shift_value = int_value & 0x1f;
6502 operand->ToRegister(); 6576 operand->ToRegister();
6503 if (shift_value == 0) { 6577 if (shift_value == 0) {
6504 // Spill operand so it can be overwritten in the slow case. 6578 // Spill operand so it can be overwritten in the slow case.
6505 frame_->Spill(operand->reg()); 6579 frame_->Spill(operand->reg());
6506 DeferredInlineSmiOperation* deferred = 6580 DeferredInlineSmiOperation* deferred =
6507 new DeferredInlineSmiOperation(op, 6581 new DeferredInlineSmiOperation(op,
(...skipping 686 matching lines...) Expand 10 before | Expand all | Expand 10 after
7194 new DeferredReferenceSetKeyedValue(value.reg(), 7268 new DeferredReferenceSetKeyedValue(value.reg(),
7195 key.reg(), 7269 key.reg(),
7196 receiver.reg()); 7270 receiver.reg());
7197 7271
7198 // Check that the receiver is not a smi. 7272 // Check that the receiver is not a smi.
7199 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); 7273 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
7200 7274
7201 // Check that the key is a smi. 7275 // Check that the key is a smi.
7202 if (!key.is_smi()) { 7276 if (!key.is_smi()) {
7203 __ JumpIfNotSmi(key.reg(), deferred->entry_label()); 7277 __ JumpIfNotSmi(key.reg(), deferred->entry_label());
7204 } else { 7278 } else if (FLAG_debug_code) {
7205 if (FLAG_debug_code) { 7279 __ AbortIfNotSmi(key.reg(), "Non-smi value in smi-typed value.");
7206 __ AbortIfNotSmi(key.reg(), "Non-smi value in smi-typed value.");
7207 }
7208 } 7280 }
7209 7281
7210 // Check that the receiver is a JSArray. 7282 // Check that the receiver is a JSArray.
7211 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister); 7283 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
7212 deferred->Branch(not_equal); 7284 deferred->Branch(not_equal);
7213 7285
7214 // Check that the key is within bounds. Both the key and the 7286 // Check that the key is within bounds. Both the key and the
7215 // length of the JSArray are smis. Use unsigned comparison to handle 7287 // length of the JSArray are smis. Use unsigned comparison to handle
7216 // negative keys. 7288 // negative keys.
7217 __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset), 7289 __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
(...skipping 2736 matching lines...) Expand 10 before | Expand all | Expand 10 after
9954 // (and only if smi code is generated). This is the right moment to 10026 // (and only if smi code is generated). This is the right moment to
9955 // patch to HEAP_NUMBERS state. The transition is attempted only for 10027 // patch to HEAP_NUMBERS state. The transition is attempted only for
9956 // the four basic operations. The stub stays in the DEFAULT state 10028 // the four basic operations. The stub stays in the DEFAULT state
9957 // forever for all other operations (also if smi code is skipped). 10029 // forever for all other operations (also if smi code is skipped).
9958 GenerateTypeTransition(masm); 10030 GenerateTypeTransition(masm);
9959 } 10031 }
9960 10032
9961 Label not_floats; 10033 Label not_floats;
9962 // rax: y 10034 // rax: y
9963 // rdx: x 10035 // rdx: x
9964 if (static_operands_type_.IsNumber()) { 10036 if (static_operands_type_.IsNumber() && FLAG_debug_code) {
9965 if (FLAG_debug_code) { 10037 // Assert at runtime that inputs are only numbers.
9966 // Assert at runtime that inputs are only numbers. 10038 __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
9967 __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number."); 10039 __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
9968 __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
9969 }
9970 } else { 10040 } else {
9971 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime); 10041 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
9972 } 10042 }
9973 // Fast-case: Both operands are numbers. 10043 // Fast-case: Both operands are numbers.
9974 // xmm4 and xmm5 are volatile XMM registers. 10044 // xmm4 and xmm5 are volatile XMM registers.
9975 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5); 10045 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
9976 10046
9977 switch (op_) { 10047 switch (op_) {
9978 case Token::ADD: __ addsd(xmm4, xmm5); break; 10048 case Token::ADD: __ addsd(xmm4, xmm5); break;
9979 case Token::SUB: __ subsd(xmm4, xmm5); break; 10049 case Token::SUB: __ subsd(xmm4, xmm5); break;
(...skipping 1356 matching lines...) Expand 10 before | Expand all | Expand 10 after
11336 // Call the function from C++. 11406 // Call the function from C++.
11337 return FUNCTION_CAST<ModuloFunction>(buffer); 11407 return FUNCTION_CAST<ModuloFunction>(buffer);
11338 } 11408 }
11339 11409
11340 #endif 11410 #endif
11341 11411
11342 11412
11343 #undef __ 11413 #undef __
11344 11414
11345 } } // namespace v8::internal 11415 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/ia32/codegen-ia32.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698