Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(98)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 2122021: Make ldrd and strd instructions take two register arguments (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/assembler-arm.cc ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1496 matching lines...) Expand 10 before | Expand all | Expand 10 after
1507 1507
1508 // Stack and frame now have 4 elements. 1508 // Stack and frame now have 4 elements.
1509 __ bind(&slow); 1509 __ bind(&slow);
1510 1510
1511 // Generic computation of x.apply(y, args) with no special optimization. 1511 // Generic computation of x.apply(y, args) with no special optimization.
1512 // Flip applicand.apply and applicand on the stack, so 1512 // Flip applicand.apply and applicand on the stack, so
1513 // applicand looks like the receiver of the applicand.apply call. 1513 // applicand looks like the receiver of the applicand.apply call.
1514 // Then process it as a normal function call. 1514 // Then process it as a normal function call.
1515 __ ldr(r0, MemOperand(sp, 3 * kPointerSize)); 1515 __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
1516 __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); 1516 __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1517 __ strd(r0, MemOperand(sp, 2 * kPointerSize)); 1517 __ strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
1518 1518
1519 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS); 1519 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
1520 frame_->CallStub(&call_function, 3); 1520 frame_->CallStub(&call_function, 3);
1521 // The function and its two arguments have been dropped. 1521 // The function and its two arguments have been dropped.
1522 frame_->Drop(); // Drop the receiver as well. 1522 frame_->Drop(); // Drop the receiver as well.
1523 frame_->EmitPush(r0); 1523 frame_->EmitPush(r0);
1524 // Stack now has 1 element: 1524 // Stack now has 1 element:
1525 // sp[0]: result 1525 // sp[0]: result
1526 __ bind(&done); 1526 __ bind(&done);
1527 1527
(...skipping 770 matching lines...) Expand 10 before | Expand all | Expand 10 after
2298 // sp[1] : array/enum cache length 2298 // sp[1] : array/enum cache length
2299 // sp[2] : array or enum cache 2299 // sp[2] : array or enum cache
2300 // sp[3] : 0 or map 2300 // sp[3] : 0 or map
2301 // sp[4] : enumerable 2301 // sp[4] : enumerable
2302 // Grab the current frame's height for the break and continue 2302 // Grab the current frame's height for the break and continue
2303 // targets only after all the state is pushed on the frame. 2303 // targets only after all the state is pushed on the frame.
2304 node->break_target()->SetExpectedHeight(); 2304 node->break_target()->SetExpectedHeight();
2305 node->continue_target()->SetExpectedHeight(); 2305 node->continue_target()->SetExpectedHeight();
2306 2306
2307 // Load the current count to r0, load the length to r1. 2307 // Load the current count to r0, load the length to r1.
2308 __ ldrd(r0, frame_->ElementAt(0)); 2308 __ ldrd(r0, r1, frame_->ElementAt(0));
2309 __ cmp(r0, r1); // compare to the array length 2309 __ cmp(r0, r1); // compare to the array length
2310 node->break_target()->Branch(hs); 2310 node->break_target()->Branch(hs);
2311 2311
2312 // Get the i'th entry of the array. 2312 // Get the i'th entry of the array.
2313 __ ldr(r2, frame_->ElementAt(2)); 2313 __ ldr(r2, frame_->ElementAt(2));
2314 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 2314 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2315 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); 2315 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2316 2316
2317 // Get Map or 0. 2317 // Get Map or 0.
2318 __ ldr(r2, frame_->ElementAt(3)); 2318 __ ldr(r2, frame_->ElementAt(3));
(...skipping 4052 matching lines...) Expand 10 before | Expand all | Expand 10 after
6371 // Load the double from rhs, tagged HeapNumber r0, to d6. 6371 // Load the double from rhs, tagged HeapNumber r0, to d6.
6372 __ sub(r7, r0, Operand(kHeapObjectTag)); 6372 __ sub(r7, r0, Operand(kHeapObjectTag));
6373 __ vldr(d6, r7, HeapNumber::kValueOffset); 6373 __ vldr(d6, r7, HeapNumber::kValueOffset);
6374 } else { 6374 } else {
6375 __ push(lr); 6375 __ push(lr);
6376 // Convert lhs to a double in r2, r3. 6376 // Convert lhs to a double in r2, r3.
6377 __ mov(r7, Operand(r1)); 6377 __ mov(r7, Operand(r1));
6378 ConvertToDoubleStub stub1(r3, r2, r7, r6); 6378 ConvertToDoubleStub stub1(r3, r2, r7, r6);
6379 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); 6379 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
6380 // Load rhs to a double in r0, r1. 6380 // Load rhs to a double in r0, r1.
6381 __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); 6381 __ ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
6382 __ pop(lr); 6382 __ pop(lr);
6383 } 6383 }
6384 6384
6385 // We now have both loaded as doubles but we can skip the lhs nan check 6385 // We now have both loaded as doubles but we can skip the lhs nan check
6386 // since it's a smi. 6386 // since it's a smi.
6387 __ jmp(lhs_not_nan); 6387 __ jmp(lhs_not_nan);
6388 6388
6389 __ bind(&rhs_is_smi); 6389 __ bind(&rhs_is_smi);
6390 // Rhs is a smi. Check whether the non-smi lhs is a heap number. 6390 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
6391 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); 6391 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
(...skipping 14 matching lines...) Expand all
6406 CpuFeatures::Scope scope(VFP3); 6406 CpuFeatures::Scope scope(VFP3);
6407 // Load the double from lhs, tagged HeapNumber r1, to d7. 6407 // Load the double from lhs, tagged HeapNumber r1, to d7.
6408 __ sub(r7, r1, Operand(kHeapObjectTag)); 6408 __ sub(r7, r1, Operand(kHeapObjectTag));
6409 __ vldr(d7, r7, HeapNumber::kValueOffset); 6409 __ vldr(d7, r7, HeapNumber::kValueOffset);
6410 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); 6410 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
6411 __ vmov(s13, r7); 6411 __ vmov(s13, r7);
6412 __ vcvt_f64_s32(d6, s13); 6412 __ vcvt_f64_s32(d6, s13);
6413 } else { 6413 } else {
6414 __ push(lr); 6414 __ push(lr);
6415 // Load lhs to a double in r2, r3. 6415 // Load lhs to a double in r2, r3.
6416 __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); 6416 __ ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
6417 // Convert rhs to a double in r0, r1. 6417 // Convert rhs to a double in r0, r1.
6418 __ mov(r7, Operand(r0)); 6418 __ mov(r7, Operand(r0));
6419 ConvertToDoubleStub stub2(r1, r0, r7, r6); 6419 ConvertToDoubleStub stub2(r1, r0, r7, r6);
6420 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); 6420 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
6421 __ pop(lr); 6421 __ pop(lr);
6422 } 6422 }
6423 // Fall through to both_loaded_as_doubles. 6423 // Fall through to both_loaded_as_doubles.
6424 } 6424 }
6425 6425
6426 6426
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
6570 6570
6571 // Both are heap numbers. Load them up then jump to the code we have 6571 // Both are heap numbers. Load them up then jump to the code we have
6572 // for that. 6572 // for that.
6573 if (CpuFeatures::IsSupported(VFP3)) { 6573 if (CpuFeatures::IsSupported(VFP3)) {
6574 CpuFeatures::Scope scope(VFP3); 6574 CpuFeatures::Scope scope(VFP3);
6575 __ sub(r7, r0, Operand(kHeapObjectTag)); 6575 __ sub(r7, r0, Operand(kHeapObjectTag));
6576 __ vldr(d6, r7, HeapNumber::kValueOffset); 6576 __ vldr(d6, r7, HeapNumber::kValueOffset);
6577 __ sub(r7, r1, Operand(kHeapObjectTag)); 6577 __ sub(r7, r1, Operand(kHeapObjectTag));
6578 __ vldr(d7, r7, HeapNumber::kValueOffset); 6578 __ vldr(d7, r7, HeapNumber::kValueOffset);
6579 } else { 6579 } else {
6580 __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); 6580 __ ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
6581 __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); 6581 __ ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
6582 } 6582 }
6583 __ jmp(both_loaded_as_doubles); 6583 __ jmp(both_loaded_as_doubles);
6584 } 6584 }
6585 6585
6586 6586
6587 // Fast negative check for symbol-to-symbol equality. 6587 // Fast negative check for symbol-to-symbol equality.
6588 static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) { 6588 static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
6589 // r2 is object type of r0. 6589 // r2 is object type of r0.
6590 // Ensure that no non-strings have the symbol bit set. 6590 // Ensure that no non-strings have the symbol bit set.
6591 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); 6591 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
(...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after
6948 if (mode_ == OVERWRITE_RIGHT) { 6948 if (mode_ == OVERWRITE_RIGHT) {
6949 __ mov(r5, Operand(r0)); // Overwrite this heap number. 6949 __ mov(r5, Operand(r0)); // Overwrite this heap number.
6950 } 6950 }
6951 if (use_fp_registers) { 6951 if (use_fp_registers) {
6952 CpuFeatures::Scope scope(VFP3); 6952 CpuFeatures::Scope scope(VFP3);
6953 // Load the double from tagged HeapNumber r0 to d7. 6953 // Load the double from tagged HeapNumber r0 to d7.
6954 __ sub(r7, r0, Operand(kHeapObjectTag)); 6954 __ sub(r7, r0, Operand(kHeapObjectTag));
6955 __ vldr(d7, r7, HeapNumber::kValueOffset); 6955 __ vldr(d7, r7, HeapNumber::kValueOffset);
6956 } else { 6956 } else {
6957 // Calling convention says that second double is in r2 and r3. 6957 // Calling convention says that second double is in r2 and r3.
6958 __ ldrd(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); 6958 __ ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
6959 } 6959 }
6960 __ jmp(&finished_loading_r0); 6960 __ jmp(&finished_loading_r0);
6961 __ bind(&r0_is_smi); 6961 __ bind(&r0_is_smi);
6962 if (mode_ == OVERWRITE_RIGHT) { 6962 if (mode_ == OVERWRITE_RIGHT) {
6963 // We can't overwrite a Smi so get address of new heap number into r5. 6963 // We can't overwrite a Smi so get address of new heap number into r5.
6964 __ AllocateHeapNumber(r5, r6, r7, &slow); 6964 __ AllocateHeapNumber(r5, r6, r7, &slow);
6965 } 6965 }
6966 6966
6967 if (use_fp_registers) { 6967 if (use_fp_registers) {
6968 CpuFeatures::Scope scope(VFP3); 6968 CpuFeatures::Scope scope(VFP3);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
7000 if (mode_ == OVERWRITE_LEFT) { 7000 if (mode_ == OVERWRITE_LEFT) {
7001 __ mov(r5, Operand(r1)); // Overwrite this heap number. 7001 __ mov(r5, Operand(r1)); // Overwrite this heap number.
7002 } 7002 }
7003 if (use_fp_registers) { 7003 if (use_fp_registers) {
7004 CpuFeatures::Scope scope(VFP3); 7004 CpuFeatures::Scope scope(VFP3);
7005 // Load the double from tagged HeapNumber r1 to d6. 7005 // Load the double from tagged HeapNumber r1 to d6.
7006 __ sub(r7, r1, Operand(kHeapObjectTag)); 7006 __ sub(r7, r1, Operand(kHeapObjectTag));
7007 __ vldr(d6, r7, HeapNumber::kValueOffset); 7007 __ vldr(d6, r7, HeapNumber::kValueOffset);
7008 } else { 7008 } else {
7009 // Calling convention says that first double is in r0 and r1. 7009 // Calling convention says that first double is in r0 and r1.
7010 __ ldrd(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); 7010 __ ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
7011 } 7011 }
7012 __ jmp(&finished_loading_r1); 7012 __ jmp(&finished_loading_r1);
7013 __ bind(&r1_is_smi); 7013 __ bind(&r1_is_smi);
7014 if (mode_ == OVERWRITE_LEFT) { 7014 if (mode_ == OVERWRITE_LEFT) {
7015 // We can't overwrite a Smi so get address of new heap number into r5. 7015 // We can't overwrite a Smi so get address of new heap number into r5.
7016 __ AllocateHeapNumber(r5, r6, r7, &slow); 7016 __ AllocateHeapNumber(r5, r6, r7, &slow);
7017 } 7017 }
7018 7018
7019 if (use_fp_registers) { 7019 if (use_fp_registers) {
7020 CpuFeatures::Scope scope(VFP3); 7020 CpuFeatures::Scope scope(VFP3);
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
7071 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); 7071 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
7072 // Store answer in the overwritable heap number. 7072 // Store answer in the overwritable heap number.
7073 #if !defined(USE_ARM_EABI) 7073 #if !defined(USE_ARM_EABI)
7074 // Double returned in fp coprocessor register 0 and 1, encoded as register 7074 // Double returned in fp coprocessor register 0 and 1, encoded as register
7075 // cr8. Offsets must be divisible by 4 for coprocessor so we need to 7075 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
7076 // substract the tag from r5. 7076 // substract the tag from r5.
7077 __ sub(r4, r5, Operand(kHeapObjectTag)); 7077 __ sub(r4, r5, Operand(kHeapObjectTag));
7078 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); 7078 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
7079 #else 7079 #else
7080 // Double returned in registers 0 and 1. 7080 // Double returned in registers 0 and 1.
7081 __ strd(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); 7081 __ strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
7082 #endif 7082 #endif
7083 __ mov(r0, Operand(r5)); 7083 __ mov(r0, Operand(r5));
7084 // And we are done. 7084 // And we are done.
7085 __ pop(pc); 7085 __ pop(pc);
7086 } 7086 }
7087 } 7087 }
7088 7088
7089 7089
7090 if (lhs.is(r0)) { 7090 if (lhs.is(r0)) {
7091 __ b(&slow); 7091 __ b(&slow);
(...skipping 2939 matching lines...) Expand 10 before | Expand all | Expand 10 after
10031 __ bind(&string_add_runtime); 10031 __ bind(&string_add_runtime);
10032 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 10032 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
10033 } 10033 }
10034 10034
10035 10035
10036 #undef __ 10036 #undef __
10037 10037
10038 } } // namespace v8::internal 10038 } } // namespace v8::internal
10039 10039
10040 #endif // V8_TARGET_ARCH_ARM 10040 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.cc ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698