Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(174)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 2019003: ldrd/strd Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1468 matching lines...) Expand 10 before | Expand all | Expand 10 after
1479 1479
1480 // Stack and frame now have 4 elements. 1480 // Stack and frame now have 4 elements.
1481 __ bind(&slow); 1481 __ bind(&slow);
1482 1482
1483 // Generic computation of x.apply(y, args) with no special optimization. 1483 // Generic computation of x.apply(y, args) with no special optimization.
1484 // Flip applicand.apply and applicand on the stack, so 1484 // Flip applicand.apply and applicand on the stack, so
1485 // applicand looks like the receiver of the applicand.apply call. 1485 // applicand looks like the receiver of the applicand.apply call.
1486 // Then process it as a normal function call. 1486 // Then process it as a normal function call.
1487 __ ldr(r0, MemOperand(sp, 3 * kPointerSize)); 1487 __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
1488 __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); 1488 __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1489 __ str(r0, MemOperand(sp, 2 * kPointerSize)); 1489 __ strd(r0, MemOperand(sp, 2 * kPointerSize));
1490 __ str(r1, MemOperand(sp, 3 * kPointerSize));
1491 1490
1492 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS); 1491 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
1493 frame_->CallStub(&call_function, 3); 1492 frame_->CallStub(&call_function, 3);
1494 // The function and its two arguments have been dropped. 1493 // The function and its two arguments have been dropped.
1495 frame_->Drop(); // Drop the receiver as well. 1494 frame_->Drop(); // Drop the receiver as well.
1496 frame_->EmitPush(r0); 1495 frame_->EmitPush(r0);
1497 // Stack now has 1 element: 1496 // Stack now has 1 element:
1498 // sp[0]: result 1497 // sp[0]: result
1499 __ bind(&done); 1498 __ bind(&done);
1500 1499
(...skipping 771 matching lines...) Expand 10 before | Expand all | Expand 10 after
2272 // sp[0] : index 2271 // sp[0] : index
2273 // sp[1] : array/enum cache length 2272 // sp[1] : array/enum cache length
2274 // sp[2] : array or enum cache 2273 // sp[2] : array or enum cache
2275 // sp[3] : 0 or map 2274 // sp[3] : 0 or map
2276 // sp[4] : enumerable 2275 // sp[4] : enumerable
2277 // Grab the current frame's height for the break and continue 2276 // Grab the current frame's height for the break and continue
2278 // targets only after all the state is pushed on the frame. 2277 // targets only after all the state is pushed on the frame.
2279 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); 2278 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
2280 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); 2279 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
2281 2280
2282 __ ldr(r0, frame_->ElementAt(0)); // load the current count 2281 // Load the current count to r0, load the length to r1.
2283 __ ldr(r1, frame_->ElementAt(1)); // load the length 2282 __ ldrd(r0, frame_->ElementAt(0));
2284 __ cmp(r0, r1); // compare to the array length 2283 __ cmp(r0, r1); // compare to the array length
2285 node->break_target()->Branch(hs); 2284 node->break_target()->Branch(hs);
2286 2285
2287 __ ldr(r0, frame_->ElementAt(0)); 2286 __ ldr(r0, frame_->ElementAt(0));
2288 2287
2289 // Get the i'th entry of the array. 2288 // Get the i'th entry of the array.
2290 __ ldr(r2, frame_->ElementAt(2)); 2289 __ ldr(r2, frame_->ElementAt(2));
2291 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 2290 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2292 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); 2291 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2293 2292
(...skipping 3947 matching lines...) Expand 10 before | Expand all | Expand 10 after
6241 // Load the double from rhs, tagged HeapNumber r0, to d6. 6240 // Load the double from rhs, tagged HeapNumber r0, to d6.
6242 __ sub(r7, r0, Operand(kHeapObjectTag)); 6241 __ sub(r7, r0, Operand(kHeapObjectTag));
6243 __ vldr(d6, r7, HeapNumber::kValueOffset); 6242 __ vldr(d6, r7, HeapNumber::kValueOffset);
6244 } else { 6243 } else {
6245 __ push(lr); 6244 __ push(lr);
6246 // Convert lhs to a double in r2, r3. 6245 // Convert lhs to a double in r2, r3.
6247 __ mov(r7, Operand(r1)); 6246 __ mov(r7, Operand(r1));
6248 ConvertToDoubleStub stub1(r3, r2, r7, r6); 6247 ConvertToDoubleStub stub1(r3, r2, r7, r6);
6249 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); 6248 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
6250 // Load rhs to a double in r0, r1. 6249 // Load rhs to a double in r0, r1.
6251 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); 6250 __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
6252 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
6253 __ pop(lr); 6251 __ pop(lr);
6254 } 6252 }
6255 6253
6256 // We now have both loaded as doubles but we can skip the lhs nan check 6254 // We now have both loaded as doubles but we can skip the lhs nan check
6257 // since it's a smi. 6255 // since it's a smi.
6258 __ jmp(lhs_not_nan); 6256 __ jmp(lhs_not_nan);
6259 6257
6260 __ bind(&rhs_is_smi); 6258 __ bind(&rhs_is_smi);
6261 // Rhs is a smi. Check whether the non-smi lhs is a heap number. 6259 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
6262 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); 6260 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
(...skipping 14 matching lines...) Expand all
6277 CpuFeatures::Scope scope(VFP3); 6275 CpuFeatures::Scope scope(VFP3);
6278 // Load the double from lhs, tagged HeapNumber r1, to d7. 6276 // Load the double from lhs, tagged HeapNumber r1, to d7.
6279 __ sub(r7, r1, Operand(kHeapObjectTag)); 6277 __ sub(r7, r1, Operand(kHeapObjectTag));
6280 __ vldr(d7, r7, HeapNumber::kValueOffset); 6278 __ vldr(d7, r7, HeapNumber::kValueOffset);
6281 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); 6279 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
6282 __ vmov(s13, r7); 6280 __ vmov(s13, r7);
6283 __ vcvt_f64_s32(d6, s13); 6281 __ vcvt_f64_s32(d6, s13);
6284 } else { 6282 } else {
6285 __ push(lr); 6283 __ push(lr);
6286 // Load lhs to a double in r2, r3. 6284 // Load lhs to a double in r2, r3.
6287 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); 6285 __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
6288 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
6289 // Convert rhs to a double in r0, r1. 6286 // Convert rhs to a double in r0, r1.
6290 __ mov(r7, Operand(r0)); 6287 __ mov(r7, Operand(r0));
6291 ConvertToDoubleStub stub2(r1, r0, r7, r6); 6288 ConvertToDoubleStub stub2(r1, r0, r7, r6);
6292 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); 6289 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
6293 __ pop(lr); 6290 __ pop(lr);
6294 } 6291 }
6295 // Fall through to both_loaded_as_doubles. 6292 // Fall through to both_loaded_as_doubles.
6296 } 6293 }
6297 6294
6298 6295
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
6442 6439
6443 // Both are heap numbers. Load them up then jump to the code we have 6440 // Both are heap numbers. Load them up then jump to the code we have
6444 // for that. 6441 // for that.
6445 if (CpuFeatures::IsSupported(VFP3)) { 6442 if (CpuFeatures::IsSupported(VFP3)) {
6446 CpuFeatures::Scope scope(VFP3); 6443 CpuFeatures::Scope scope(VFP3);
6447 __ sub(r7, r0, Operand(kHeapObjectTag)); 6444 __ sub(r7, r0, Operand(kHeapObjectTag));
6448 __ vldr(d6, r7, HeapNumber::kValueOffset); 6445 __ vldr(d6, r7, HeapNumber::kValueOffset);
6449 __ sub(r7, r1, Operand(kHeapObjectTag)); 6446 __ sub(r7, r1, Operand(kHeapObjectTag));
6450 __ vldr(d7, r7, HeapNumber::kValueOffset); 6447 __ vldr(d7, r7, HeapNumber::kValueOffset);
6451 } else { 6448 } else {
6452 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); 6449 __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
6453 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); 6450 __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
6454 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
6455 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
6456 } 6451 }
6457 __ jmp(both_loaded_as_doubles); 6452 __ jmp(both_loaded_as_doubles);
6458 } 6453 }
6459 6454
6460 6455
6461 // Fast negative check for symbol-to-symbol equality. 6456 // Fast negative check for symbol-to-symbol equality.
6462 static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) { 6457 static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
6463 // r2 is object type of r0. 6458 // r2 is object type of r0.
6464 // Ensure that no non-strings have the symbol bit set. 6459 // Ensure that no non-strings have the symbol bit set.
6465 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); 6460 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
(...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after
6822 if (mode_ == OVERWRITE_RIGHT) { 6817 if (mode_ == OVERWRITE_RIGHT) {
6823 __ mov(r5, Operand(r0)); // Overwrite this heap number. 6818 __ mov(r5, Operand(r0)); // Overwrite this heap number.
6824 } 6819 }
6825 if (use_fp_registers) { 6820 if (use_fp_registers) {
6826 CpuFeatures::Scope scope(VFP3); 6821 CpuFeatures::Scope scope(VFP3);
6827 // Load the double from tagged HeapNumber r0 to d7. 6822 // Load the double from tagged HeapNumber r0 to d7.
6828 __ sub(r7, r0, Operand(kHeapObjectTag)); 6823 __ sub(r7, r0, Operand(kHeapObjectTag));
6829 __ vldr(d7, r7, HeapNumber::kValueOffset); 6824 __ vldr(d7, r7, HeapNumber::kValueOffset);
6830 } else { 6825 } else {
6831 // Calling convention says that second double is in r2 and r3. 6826 // Calling convention says that second double is in r2 and r3.
6832 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); 6827 __ ldrd(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
6833 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
6834 } 6828 }
6835 __ jmp(&finished_loading_r0); 6829 __ jmp(&finished_loading_r0);
6836 __ bind(&r0_is_smi); 6830 __ bind(&r0_is_smi);
6837 if (mode_ == OVERWRITE_RIGHT) { 6831 if (mode_ == OVERWRITE_RIGHT) {
6838 // We can't overwrite a Smi so get address of new heap number into r5. 6832 // We can't overwrite a Smi so get address of new heap number into r5.
6839 __ AllocateHeapNumber(r5, r6, r7, &slow); 6833 __ AllocateHeapNumber(r5, r6, r7, &slow);
6840 } 6834 }
6841 6835
6842 if (use_fp_registers) { 6836 if (use_fp_registers) {
6843 CpuFeatures::Scope scope(VFP3); 6837 CpuFeatures::Scope scope(VFP3);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
6875 if (mode_ == OVERWRITE_LEFT) { 6869 if (mode_ == OVERWRITE_LEFT) {
6876 __ mov(r5, Operand(r1)); // Overwrite this heap number. 6870 __ mov(r5, Operand(r1)); // Overwrite this heap number.
6877 } 6871 }
6878 if (use_fp_registers) { 6872 if (use_fp_registers) {
6879 CpuFeatures::Scope scope(VFP3); 6873 CpuFeatures::Scope scope(VFP3);
6880 // Load the double from tagged HeapNumber r1 to d6. 6874 // Load the double from tagged HeapNumber r1 to d6.
6881 __ sub(r7, r1, Operand(kHeapObjectTag)); 6875 __ sub(r7, r1, Operand(kHeapObjectTag));
6882 __ vldr(d6, r7, HeapNumber::kValueOffset); 6876 __ vldr(d6, r7, HeapNumber::kValueOffset);
6883 } else { 6877 } else {
6884 // Calling convention says that first double is in r0 and r1. 6878 // Calling convention says that first double is in r0 and r1.
6885 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); 6879 __ ldrd(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
6886 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
6887 } 6880 }
6888 __ jmp(&finished_loading_r1); 6881 __ jmp(&finished_loading_r1);
6889 __ bind(&r1_is_smi); 6882 __ bind(&r1_is_smi);
6890 if (mode_ == OVERWRITE_LEFT) { 6883 if (mode_ == OVERWRITE_LEFT) {
6891 // We can't overwrite a Smi so get address of new heap number into r5. 6884 // We can't overwrite a Smi so get address of new heap number into r5.
6892 __ AllocateHeapNumber(r5, r6, r7, &slow); 6885 __ AllocateHeapNumber(r5, r6, r7, &slow);
6893 } 6886 }
6894 6887
6895 if (use_fp_registers) { 6888 if (use_fp_registers) {
6896 CpuFeatures::Scope scope(VFP3); 6889 CpuFeatures::Scope scope(VFP3);
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
6947 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); 6940 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
6948 // Store answer in the overwritable heap number. 6941 // Store answer in the overwritable heap number.
6949 #if !defined(USE_ARM_EABI) 6942 #if !defined(USE_ARM_EABI)
6950 // Double returned in fp coprocessor register 0 and 1, encoded as register 6943 // Double returned in fp coprocessor register 0 and 1, encoded as register
6951 // cr8. Offsets must be divisible by 4 for coprocessor so we need to 6944 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
6952 // substract the tag from r5. 6945 // substract the tag from r5.
6953 __ sub(r4, r5, Operand(kHeapObjectTag)); 6946 __ sub(r4, r5, Operand(kHeapObjectTag));
6954 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); 6947 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
6955 #else 6948 #else
6956 // Double returned in registers 0 and 1. 6949 // Double returned in registers 0 and 1.
6957 __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); 6950 __ strd(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
6958 __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
6959 #endif 6951 #endif
6960 __ mov(r0, Operand(r5)); 6952 __ mov(r0, Operand(r5));
6961 // And we are done. 6953 // And we are done.
6962 __ pop(pc); 6954 __ pop(pc);
6963 } 6955 }
6964 } 6956 }
6965 6957
6966 6958
6967 if (lhs.is(r0)) { 6959 if (lhs.is(r0)) {
6968 __ b(&slow); 6960 __ b(&slow);
(...skipping 2932 matching lines...) Expand 10 before | Expand all | Expand 10 after
9901 9893
9902 // Just jump to runtime to add the two strings. 9894 // Just jump to runtime to add the two strings.
9903 __ bind(&string_add_runtime); 9895 __ bind(&string_add_runtime);
9904 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 9896 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
9905 } 9897 }
9906 9898
9907 9899
9908 #undef __ 9900 #undef __
9909 9901
9910 } } // namespace v8::internal 9902 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698