Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/ic/ppc/stub-cache-ppc.cc

Issue 571173003: PowerPC specific sub-directories (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Updated ppc sub-dirs to current V8 code levels Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 //
3 // Copyright IBM Corp. 2012, 2013. All rights reserved.
4 //
2 // Use of this source code is governed by a BSD-style license that can be 5 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 6 // found in the LICENSE file.
4 7
5 #include "src/v8.h" 8 #include "src/v8.h"
6 9
7 #if V8_TARGET_ARCH_ARM 10 #if V8_TARGET_ARCH_PPC
8 11
9 #include "src/codegen.h" 12 #include "src/codegen.h"
10 #include "src/ic/stub-cache.h" 13 #include "src/ic/stub-cache.h"
11 14
12 namespace v8 { 15 namespace v8 {
13 namespace internal { 16 namespace internal {
14 17
15 #define __ ACCESS_MASM(masm) 18 #define __ ACCESS_MASM(masm)
16 19
17 20
18 static void ProbeTable(Isolate* isolate, MacroAssembler* masm, 21 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
19 Code::Flags flags, bool leave_frame, 22 Code::Flags flags, bool leave_frame,
20 StubCache::Table table, Register receiver, Register name, 23 StubCache::Table table, Register receiver, Register name,
21 // Number of the cache entry, not scaled. 24 // Number of the cache entry, not scaled.
22 Register offset, Register scratch, Register scratch2, 25 Register offset, Register scratch, Register scratch2,
23 Register offset_scratch) { 26 Register offset_scratch) {
24 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); 27 ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
25 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); 28 ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
26 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); 29 ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
27 30
28 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); 31 uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
29 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); 32 uintptr_t value_off_addr =
30 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); 33 reinterpret_cast<uintptr_t>(value_offset.address());
34 uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
31 35
32 // Check the relative positions of the address fields. 36 // Check the relative positions of the address fields.
33 DCHECK(value_off_addr > key_off_addr); 37 DCHECK(value_off_addr > key_off_addr);
34 DCHECK((value_off_addr - key_off_addr) % 4 == 0); 38 DCHECK((value_off_addr - key_off_addr) % 4 == 0);
35 DCHECK((value_off_addr - key_off_addr) < (256 * 4)); 39 DCHECK((value_off_addr - key_off_addr) < (256 * 4));
36 DCHECK(map_off_addr > key_off_addr); 40 DCHECK(map_off_addr > key_off_addr);
37 DCHECK((map_off_addr - key_off_addr) % 4 == 0); 41 DCHECK((map_off_addr - key_off_addr) % 4 == 0);
38 DCHECK((map_off_addr - key_off_addr) < (256 * 4)); 42 DCHECK((map_off_addr - key_off_addr) < (256 * 4));
39 43
40 Label miss; 44 Label miss;
41 Register base_addr = scratch; 45 Register base_addr = scratch;
42 scratch = no_reg; 46 scratch = no_reg;
43 47
44 // Multiply by 3 because there are 3 fields per entry (name, code, map). 48 // Multiply by 3 because there are 3 fields per entry (name, code, map).
45 __ add(offset_scratch, offset, Operand(offset, LSL, 1)); 49 __ ShiftLeftImm(offset_scratch, offset, Operand(1));
50 __ add(offset_scratch, offset, offset_scratch);
46 51
47 // Calculate the base address of the entry. 52 // Calculate the base address of the entry.
48 __ mov(base_addr, Operand(key_offset)); 53 __ mov(base_addr, Operand(key_offset));
49 __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); 54 __ ShiftLeftImm(scratch2, offset_scratch, Operand(kPointerSizeLog2));
55 __ add(base_addr, base_addr, scratch2);
50 56
51 // Check that the key in the entry matches the name. 57 // Check that the key in the entry matches the name.
52 __ ldr(ip, MemOperand(base_addr, 0)); 58 __ LoadP(ip, MemOperand(base_addr, 0));
53 __ cmp(name, ip); 59 __ cmp(name, ip);
54 __ b(ne, &miss); 60 __ bne(&miss);
55 61
56 // Check the map matches. 62 // Check the map matches.
57 __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); 63 __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
58 __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); 64 __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
59 __ cmp(ip, scratch2); 65 __ cmp(ip, scratch2);
60 __ b(ne, &miss); 66 __ bne(&miss);
61 67
62 // Get the code entry from the cache. 68 // Get the code entry from the cache.
63 Register code = scratch2; 69 Register code = scratch2;
64 scratch2 = no_reg; 70 scratch2 = no_reg;
65 __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); 71 __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
66 72
67 // Check that the flags match what we're looking for. 73 // Check that the flags match what we're looking for.
68 Register flags_reg = base_addr; 74 Register flags_reg = base_addr;
69 base_addr = no_reg; 75 base_addr = no_reg;
70 __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); 76 __ lwz(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
71 // It's a nice optimization if this constant is encodable in the bic insn.
72 77
73 uint32_t mask = Code::kFlagsNotUsedInLookup; 78 DCHECK(!r0.is(flags_reg));
74 DCHECK(__ ImmediateFitsAddrMode1Instruction(mask)); 79 __ li(r0, Operand(Code::kFlagsNotUsedInLookup));
75 __ bic(flags_reg, flags_reg, Operand(mask)); 80 __ andc(flags_reg, flags_reg, r0);
76 __ cmp(flags_reg, Operand(flags)); 81 __ mov(r0, Operand(flags));
77 __ b(ne, &miss); 82 __ cmpl(flags_reg, r0);
83 __ bne(&miss);
78 84
79 #ifdef DEBUG 85 #ifdef DEBUG
80 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { 86 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
81 __ jmp(&miss); 87 __ b(&miss);
82 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { 88 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
83 __ jmp(&miss); 89 __ b(&miss);
84 } 90 }
85 #endif 91 #endif
86 92
87 if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL); 93 if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
88 94
89 // Jump to the first instruction in the code stub. 95 // Jump to the first instruction in the code stub.
90 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); 96 __ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag));
97 __ mtctr(r0);
98 __ bctr();
91 99
92 // Miss: fall through. 100 // Miss: fall through.
93 __ bind(&miss); 101 __ bind(&miss);
94 } 102 }
95 103
96 104
97 void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, 105 void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
98 bool leave_frame, Register receiver, 106 bool leave_frame, Register receiver,
99 Register name, Register scratch, Register extra, 107 Register name, Register scratch, Register extra,
100 Register extra2, Register extra3) { 108 Register extra2, Register extra3) {
101 Isolate* isolate = masm->isolate(); 109 Isolate* isolate = masm->isolate();
102 Label miss; 110 Label miss;
103 111
112 #if V8_TARGET_ARCH_PPC64
113 // Make sure that code is valid. The multiplying code relies on the
114 // entry size being 24.
115 DCHECK(sizeof(Entry) == 24);
116 #else
104 // Make sure that code is valid. The multiplying code relies on the 117 // Make sure that code is valid. The multiplying code relies on the
105 // entry size being 12. 118 // entry size being 12.
106 DCHECK(sizeof(Entry) == 12); 119 DCHECK(sizeof(Entry) == 12);
120 #endif
107 121
108 // Make sure the flags does not name a specific type. 122 // Make sure the flags does not name a specific type.
109 DCHECK(Code::ExtractTypeFromFlags(flags) == 0); 123 DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
110 124
111 // Make sure that there are no register conflicts. 125 // Make sure that there are no register conflicts.
112 DCHECK(!scratch.is(receiver)); 126 DCHECK(!scratch.is(receiver));
113 DCHECK(!scratch.is(name)); 127 DCHECK(!scratch.is(name));
114 DCHECK(!extra.is(receiver)); 128 DCHECK(!extra.is(receiver));
115 DCHECK(!extra.is(name)); 129 DCHECK(!extra.is(name));
116 DCHECK(!extra.is(scratch)); 130 DCHECK(!extra.is(scratch));
117 DCHECK(!extra2.is(receiver)); 131 DCHECK(!extra2.is(receiver));
118 DCHECK(!extra2.is(name)); 132 DCHECK(!extra2.is(name));
119 DCHECK(!extra2.is(scratch)); 133 DCHECK(!extra2.is(scratch));
120 DCHECK(!extra2.is(extra)); 134 DCHECK(!extra2.is(extra));
121 135
122 // Check scratch, extra and extra2 registers are valid. 136 // Check scratch, extra and extra2 registers are valid.
123 DCHECK(!scratch.is(no_reg)); 137 DCHECK(!scratch.is(no_reg));
124 DCHECK(!extra.is(no_reg)); 138 DCHECK(!extra.is(no_reg));
125 DCHECK(!extra2.is(no_reg)); 139 DCHECK(!extra2.is(no_reg));
126 DCHECK(!extra3.is(no_reg)); 140 DCHECK(!extra3.is(no_reg));
127 141
128 Counters* counters = masm->isolate()->counters(); 142 Counters* counters = masm->isolate()->counters();
129 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2, 143 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
130 extra3); 144 extra3);
131 145
132 // Check that the receiver isn't a smi. 146 // Check that the receiver isn't a smi.
133 __ JumpIfSmi(receiver, &miss); 147 __ JumpIfSmi(receiver, &miss);
134 148
135 // Get the map of the receiver and compute the hash. 149 // Get the map of the receiver and compute the hash.
136 __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); 150 __ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
137 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); 151 __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
138 __ add(scratch, scratch, Operand(ip)); 152 __ add(scratch, scratch, ip);
153 #if V8_TARGET_ARCH_PPC64
154 // Use only the low 32 bits of the map pointer.
155 __ rldicl(scratch, scratch, 0, 32);
156 #endif
139 uint32_t mask = kPrimaryTableSize - 1; 157 uint32_t mask = kPrimaryTableSize - 1;
140 // We shift out the last two bits because they are not part of the hash and 158 // We shift out the last two bits because they are not part of the hash and
141 // they are always 01 for maps. 159 // they are always 01 for maps.
142 __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); 160 __ ShiftRightImm(scratch, scratch, Operand(kCacheIndexShift));
143 // Mask down the eor argument to the minimum to keep the immediate 161 // Mask down the eor argument to the minimum to keep the immediate
144 // ARM-encodable. 162 // encodable.
145 __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); 163 __ xori(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
146 // Prefer and_ to ubfx here because ubfx takes 2 cycles. 164 // Prefer and_ to ubfx here because ubfx takes 2 cycles.
147 __ and_(scratch, scratch, Operand(mask)); 165 __ andi(scratch, scratch, Operand(mask));
148 166
149 // Probe the primary table. 167 // Probe the primary table.
150 ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name, 168 ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
151 scratch, extra, extra2, extra3); 169 scratch, extra, extra2, extra3);
152 170
153 // Primary miss: Compute hash for secondary probe. 171 // Primary miss: Compute hash for secondary probe.
154 __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); 172 __ ShiftRightImm(extra, name, Operand(kCacheIndexShift));
173 __ sub(scratch, scratch, extra);
155 uint32_t mask2 = kSecondaryTableSize - 1; 174 uint32_t mask2 = kSecondaryTableSize - 1;
156 __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); 175 __ addi(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
157 __ and_(scratch, scratch, Operand(mask2)); 176 __ andi(scratch, scratch, Operand(mask2));
158 177
159 // Probe the secondary table. 178 // Probe the secondary table.
160 ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name, 179 ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
161 scratch, extra, extra2, extra3); 180 scratch, extra, extra2, extra3);
162 181
163 // Cache miss: Fall-through and let caller handle the miss by 182 // Cache miss: Fall-through and let caller handle the miss by
164 // entering the runtime system. 183 // entering the runtime system.
165 __ bind(&miss); 184 __ bind(&miss);
166 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2, 185 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
167 extra3); 186 extra3);
168 } 187 }
169 188
170 189
171 #undef __ 190 #undef __
172 } 191 }
173 } // namespace v8::internal 192 } // namespace v8::internal
174 193
175 #endif // V8_TARGET_ARCH_ARM 194 #endif // V8_TARGET_ARCH_PPC
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698