Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(442)

Side by Side Diff: runtime/vm/intermediate_language_arm.cc

Issue 292433008: Allows unboxed doubles to be disabled. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/instructions_arm.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/cpu.h" 10 #include "vm/cpu.h"
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
283 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 283 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
284 locs->set_out(0, Location::RequiresFpuRegister()); 284 locs->set_out(0, Location::RequiresFpuRegister());
285 locs->set_temp(0, Location::RequiresRegister()); 285 locs->set_temp(0, Location::RequiresRegister());
286 return locs; 286 return locs;
287 } 287 }
288 288
289 289
290 void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 290 void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
291 // The register allocator drops constant definitions that have no uses. 291 // The register allocator drops constant definitions that have no uses.
292 if (!locs()->out(0).IsInvalid()) { 292 if (!locs()->out(0).IsInvalid()) {
293 if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0)) { 293 if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0) &&
294 TargetCPUFeatures::neon_supported()) {
294 const QRegister dst = locs()->out(0).fpu_reg(); 295 const QRegister dst = locs()->out(0).fpu_reg();
295 __ veorq(dst, dst, dst); 296 __ veorq(dst, dst, dst);
296 } else { 297 } else {
297 const DRegister dst = EvenDRegisterOf(locs()->out(0).fpu_reg()); 298 const DRegister dst = EvenDRegisterOf(locs()->out(0).fpu_reg());
298 const Register temp = locs()->temp(0).reg(); 299 const Register temp = locs()->temp(0).reg();
299 __ LoadDImmediate(dst, Double::Cast(value()).value(), temp); 300 __ LoadDImmediate(dst, Double::Cast(value()).value(), temp);
300 } 301 }
301 } 302 }
302 } 303 }
303 304
(...skipping 1765 matching lines...) Expand 10 before | Expand all | Expand 10 after
2069 __ TryAllocate(compiler->double_class(), 2070 __ TryAllocate(compiler->double_class(),
2070 slow_path->entry_label(), 2071 slow_path->entry_label(),
2071 temp, 2072 temp,
2072 temp2); 2073 temp2);
2073 __ Bind(slow_path->exit_label()); 2074 __ Bind(slow_path->exit_label());
2074 __ MoveRegister(temp2, temp); 2075 __ MoveRegister(temp2, temp);
2075 __ StoreIntoObject(instance_reg, 2076 __ StoreIntoObject(instance_reg,
2076 FieldAddress(instance_reg, offset_in_bytes_), 2077 FieldAddress(instance_reg, offset_in_bytes_),
2077 temp2); 2078 temp2);
2078 __ Bind(&copy_double); 2079 __ Bind(&copy_double);
2079 __ LoadDFromOffset(fpu_temp, 2080 __ CopyDoubleField(temp, value_reg, TMP, temp2, fpu_temp);
2080 value_reg,
2081 Double::value_offset() - kHeapObjectTag);
2082 __ StoreDToOffset(fpu_temp,
2083 temp,
2084 Double::value_offset() - kHeapObjectTag);
2085 __ b(&skip_store); 2081 __ b(&skip_store);
2086 } 2082 }
2087 2083
2088 { 2084 {
2089 __ Bind(&store_float32x4); 2085 __ Bind(&store_float32x4);
2090 Label copy_float32x4; 2086 Label copy_float32x4;
2091 StoreInstanceFieldSlowPath* slow_path = 2087 StoreInstanceFieldSlowPath* slow_path =
2092 new StoreInstanceFieldSlowPath(this, compiler->float32x4_class()); 2088 new StoreInstanceFieldSlowPath(this, compiler->float32x4_class());
2093 compiler->AddSlowPathCode(slow_path); 2089 compiler->AddSlowPathCode(slow_path);
2094 2090
2095 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes_)); 2091 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes_));
2096 __ CompareImmediate(temp, 2092 __ CompareImmediate(temp,
2097 reinterpret_cast<intptr_t>(Object::null())); 2093 reinterpret_cast<intptr_t>(Object::null()));
2098 __ b(&copy_float32x4, NE); 2094 __ b(&copy_float32x4, NE);
2099 2095
2100 __ TryAllocate(compiler->float32x4_class(), 2096 __ TryAllocate(compiler->float32x4_class(),
2101 slow_path->entry_label(), 2097 slow_path->entry_label(),
2102 temp, 2098 temp,
2103 temp2); 2099 temp2);
2104 __ Bind(slow_path->exit_label()); 2100 __ Bind(slow_path->exit_label());
2105 __ MoveRegister(temp2, temp); 2101 __ MoveRegister(temp2, temp);
2106 __ StoreIntoObject(instance_reg, 2102 __ StoreIntoObject(instance_reg,
2107 FieldAddress(instance_reg, offset_in_bytes_), 2103 FieldAddress(instance_reg, offset_in_bytes_),
2108 temp2); 2104 temp2);
2109 __ Bind(&copy_float32x4); 2105 __ Bind(&copy_float32x4);
2110 __ LoadMultipleDFromOffset(fpu_temp, 2, value_reg, 2106 __ CopyFloat32x4Field(temp, value_reg, TMP, temp2, fpu_temp);
2111 Float32x4::value_offset() - kHeapObjectTag);
2112 __ StoreMultipleDToOffset(fpu_temp, 2, temp,
2113 Float32x4::value_offset() - kHeapObjectTag);
2114 __ b(&skip_store); 2107 __ b(&skip_store);
2115 } 2108 }
2116 2109
2117 { 2110 {
2118 __ Bind(&store_float64x2); 2111 __ Bind(&store_float64x2);
2119 Label copy_float64x2; 2112 Label copy_float64x2;
2120 StoreInstanceFieldSlowPath* slow_path = 2113 StoreInstanceFieldSlowPath* slow_path =
2121 new StoreInstanceFieldSlowPath(this, compiler->float64x2_class()); 2114 new StoreInstanceFieldSlowPath(this, compiler->float64x2_class());
2122 compiler->AddSlowPathCode(slow_path); 2115 compiler->AddSlowPathCode(slow_path);
2123 2116
2124 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes_)); 2117 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes_));
2125 __ CompareImmediate(temp, 2118 __ CompareImmediate(temp,
2126 reinterpret_cast<intptr_t>(Object::null())); 2119 reinterpret_cast<intptr_t>(Object::null()));
2127 __ b(&copy_float64x2, NE); 2120 __ b(&copy_float64x2, NE);
2128 2121
2129 __ TryAllocate(compiler->float64x2_class(), 2122 __ TryAllocate(compiler->float64x2_class(),
2130 slow_path->entry_label(), 2123 slow_path->entry_label(),
2131 temp, 2124 temp,
2132 temp2); 2125 temp2);
2133 __ Bind(slow_path->exit_label()); 2126 __ Bind(slow_path->exit_label());
2134 __ MoveRegister(temp2, temp); 2127 __ MoveRegister(temp2, temp);
2135 __ StoreIntoObject(instance_reg, 2128 __ StoreIntoObject(instance_reg,
2136 FieldAddress(instance_reg, offset_in_bytes_), 2129 FieldAddress(instance_reg, offset_in_bytes_),
2137 temp2); 2130 temp2);
2138 __ Bind(&copy_float64x2); 2131 __ Bind(&copy_float64x2);
2139 __ LoadMultipleDFromOffset(fpu_temp, 2, value_reg, 2132 __ CopyFloat64x2Field(temp, value_reg, TMP, temp2, fpu_temp);
2140 Float64x2::value_offset() - kHeapObjectTag);
2141 __ StoreMultipleDToOffset(fpu_temp, 2, temp,
2142 Float64x2::value_offset() - kHeapObjectTag);
2143 __ b(&skip_store); 2133 __ b(&skip_store);
2144 } 2134 }
2145 2135
2146 __ Bind(&store_pointer); 2136 __ Bind(&store_pointer);
2147 } 2137 }
2148 2138
2149 if (ShouldEmitStoreBarrier()) { 2139 if (ShouldEmitStoreBarrier()) {
2150 Register value_reg = locs()->in(1).reg(); 2140 Register value_reg = locs()->in(1).reg();
2151 __ StoreIntoObject(instance_reg, 2141 __ StoreIntoObject(instance_reg,
2152 FieldAddress(instance_reg, offset_in_bytes_), 2142 FieldAddress(instance_reg, offset_in_bytes_),
(...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after
2474 : LocationSummary::kCallOnSlowPath); 2464 : LocationSummary::kCallOnSlowPath);
2475 2465
2476 locs->set_in(0, Location::RequiresRegister()); 2466 locs->set_in(0, Location::RequiresRegister());
2477 2467
2478 if (IsUnboxedLoad() && opt) { 2468 if (IsUnboxedLoad() && opt) {
2479 locs->AddTemp(Location::RequiresRegister()); 2469 locs->AddTemp(Location::RequiresRegister());
2480 } else if (IsPotentialUnboxedLoad()) { 2470 } else if (IsPotentialUnboxedLoad()) {
2481 locs->AddTemp(opt ? Location::RequiresFpuRegister() 2471 locs->AddTemp(opt ? Location::RequiresFpuRegister()
2482 : Location::FpuRegisterLocation(Q1)); 2472 : Location::FpuRegisterLocation(Q1));
2483 locs->AddTemp(Location::RequiresRegister()); 2473 locs->AddTemp(Location::RequiresRegister());
2474 locs->AddTemp(Location::RequiresRegister());
2484 } 2475 }
2485 locs->set_out(0, Location::RequiresRegister()); 2476 locs->set_out(0, Location::RequiresRegister());
2486 return locs; 2477 return locs;
2487 } 2478 }
2488 2479
2489 2480
2490 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2481 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2491 Register instance_reg = locs()->in(0).reg(); 2482 const Register instance_reg = locs()->in(0).reg();
2492 if (IsUnboxedLoad() && compiler->is_optimizing()) { 2483 if (IsUnboxedLoad() && compiler->is_optimizing()) {
2493 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg()); 2484 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
2494 const Register temp = locs()->temp(0).reg(); 2485 const Register temp = locs()->temp(0).reg();
2495 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes())); 2486 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
2496 const intptr_t cid = field()->UnboxedFieldCid(); 2487 const intptr_t cid = field()->UnboxedFieldCid();
2497 switch (cid) { 2488 switch (cid) {
2498 case kDoubleCid: 2489 case kDoubleCid:
2499 __ Comment("UnboxedDoubleLoadFieldInstr"); 2490 __ Comment("UnboxedDoubleLoadFieldInstr");
2500 __ LoadDFromOffset(result, temp, 2491 __ LoadDFromOffset(result, temp,
2501 Double::value_offset() - kHeapObjectTag); 2492 Double::value_offset() - kHeapObjectTag);
(...skipping 10 matching lines...) Expand all
2512 break; 2503 break;
2513 default: 2504 default:
2514 UNREACHABLE(); 2505 UNREACHABLE();
2515 } 2506 }
2516 return; 2507 return;
2517 } 2508 }
2518 2509
2519 Label done; 2510 Label done;
2520 Register result_reg = locs()->out(0).reg(); 2511 Register result_reg = locs()->out(0).reg();
2521 if (IsPotentialUnboxedLoad()) { 2512 if (IsPotentialUnboxedLoad()) {
2513 const DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg());
2522 const Register temp = locs()->temp(1).reg(); 2514 const Register temp = locs()->temp(1).reg();
2523 const DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg()); 2515 const Register temp2 = locs()->temp(2).reg();
2524 2516
2525 Label load_pointer; 2517 Label load_pointer;
2526 Label load_double; 2518 Label load_double;
2527 Label load_float32x4; 2519 Label load_float32x4;
2528 Label load_float64x2; 2520 Label load_float64x2;
2529 2521
2530 __ LoadObject(result_reg, Field::ZoneHandle(field()->raw())); 2522 __ LoadObject(result_reg, Field::ZoneHandle(field()->raw()));
2531 2523
2532 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset()); 2524 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset());
2533 FieldAddress field_nullability_operand(result_reg, 2525 FieldAddress field_nullability_operand(result_reg,
(...skipping 26 matching lines...) Expand all
2560 __ Bind(&load_double); 2552 __ Bind(&load_double);
2561 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); 2553 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
2562 compiler->AddSlowPathCode(slow_path); 2554 compiler->AddSlowPathCode(slow_path);
2563 2555
2564 __ TryAllocate(compiler->double_class(), 2556 __ TryAllocate(compiler->double_class(),
2565 slow_path->entry_label(), 2557 slow_path->entry_label(),
2566 result_reg, 2558 result_reg,
2567 temp); 2559 temp);
2568 __ Bind(slow_path->exit_label()); 2560 __ Bind(slow_path->exit_label());
2569 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes())); 2561 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
2570 __ LoadDFromOffset(value, temp, Double::value_offset() - kHeapObjectTag); 2562 __ CopyDoubleField(result_reg, temp, TMP, temp2, value);
2571 __ StoreDToOffset(value,
2572 result_reg,
2573 Double::value_offset() - kHeapObjectTag);
2574 __ b(&done); 2563 __ b(&done);
2575 } 2564 }
2576 2565
2577 { 2566 {
2578 __ Bind(&load_float32x4); 2567 __ Bind(&load_float32x4);
2579 BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this); 2568 BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this);
2580 compiler->AddSlowPathCode(slow_path); 2569 compiler->AddSlowPathCode(slow_path);
2581 2570
2582 __ TryAllocate(compiler->float32x4_class(), 2571 __ TryAllocate(compiler->float32x4_class(),
2583 slow_path->entry_label(), 2572 slow_path->entry_label(),
2584 result_reg, 2573 result_reg,
2585 temp); 2574 temp);
2586 __ Bind(slow_path->exit_label()); 2575 __ Bind(slow_path->exit_label());
2587 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes())); 2576 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
2588 __ LoadMultipleDFromOffset(value, 2, temp, 2577 __ CopyFloat32x4Field(result_reg, temp, TMP, temp2, value);
2589 Float32x4::value_offset() - kHeapObjectTag);
2590 __ StoreMultipleDToOffset(value, 2, result_reg,
2591 Float32x4::value_offset() - kHeapObjectTag);
2592 __ b(&done); 2578 __ b(&done);
2593 } 2579 }
2594 2580
2595 { 2581 {
2596 __ Bind(&load_float64x2); 2582 __ Bind(&load_float64x2);
2597 BoxFloat64x2SlowPath* slow_path = new BoxFloat64x2SlowPath(this); 2583 BoxFloat64x2SlowPath* slow_path = new BoxFloat64x2SlowPath(this);
2598 compiler->AddSlowPathCode(slow_path); 2584 compiler->AddSlowPathCode(slow_path);
2599 2585
2600 __ TryAllocate(compiler->float64x2_class(), 2586 __ TryAllocate(compiler->float64x2_class(),
2601 slow_path->entry_label(), 2587 slow_path->entry_label(),
2602 result_reg, 2588 result_reg,
2603 temp); 2589 temp);
2604 __ Bind(slow_path->exit_label()); 2590 __ Bind(slow_path->exit_label());
2605 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes())); 2591 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
2606 __ LoadMultipleDFromOffset(value, 2, temp, 2592 __ CopyFloat64x2Field(result_reg, temp, TMP, temp2, value);
2607 Float64x2::value_offset() - kHeapObjectTag);
2608 __ StoreMultipleDToOffset(value, 2, result_reg,
2609 Float64x2::value_offset() - kHeapObjectTag);
2610 __ b(&done); 2593 __ b(&done);
2611 } 2594 }
2612 2595
2613 __ Bind(&load_pointer); 2596 __ Bind(&load_pointer);
2614 } 2597 }
2615 __ LoadFromOffset(kWord, result_reg, 2598 __ LoadFromOffset(kWord, result_reg,
2616 instance_reg, offset_in_bytes() - kHeapObjectTag); 2599 instance_reg, offset_in_bytes() - kHeapObjectTag);
2617 __ Bind(&done); 2600 __ Bind(&done);
2618 } 2601 }
2619 2602
(...skipping 3662 matching lines...) Expand 10 before | Expand all | Expand 10 after
6282 compiler->GenerateCall(token_pos(), 6265 compiler->GenerateCall(token_pos(),
6283 &label, 6266 &label,
6284 PcDescriptors::kOther, 6267 PcDescriptors::kOther,
6285 locs()); 6268 locs());
6286 __ Drop(ArgumentCount()); // Discard arguments. 6269 __ Drop(ArgumentCount()); // Discard arguments.
6287 } 6270 }
6288 6271
6289 } // namespace dart 6272 } // namespace dart
6290 6273
6291 #endif // defined TARGET_ARCH_ARM 6274 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/instructions_arm.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698