Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(854)

Side by Side Diff: src/a64/code-stubs-a64.h

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/builtins-a64.cc ('k') | src/a64/code-stubs-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #ifndef V8_ARM_CODE_STUBS_ARM_H_ 28 #ifndef V8_A64_CODE_STUBS_A64_H_
29 #define V8_ARM_CODE_STUBS_ARM_H_ 29 #define V8_A64_CODE_STUBS_A64_H_
30 30
31 #include "ic-inl.h" 31 #include "ic-inl.h"
32 32
33 namespace v8 { 33 namespace v8 {
34 namespace internal { 34 namespace internal {
35 35
36 36
37 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); 37 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
38 38
39 39
40 class StoreBufferOverflowStub: public PlatformCodeStub { 40 class StoreBufferOverflowStub: public PlatformCodeStub {
41 public: 41 public:
42 explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) 42 explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
43 : save_doubles_(save_fp) {} 43 : save_doubles_(save_fp) { }
44 44
45 void Generate(MacroAssembler* masm); 45 void Generate(MacroAssembler* masm);
46 46
47 static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); 47 static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
48 virtual bool SometimesSetsUpAFrame() { return false; } 48 virtual bool SometimesSetsUpAFrame() { return false; }
49 49
50 private: 50 private:
51 SaveFPRegsMode save_doubles_; 51 SaveFPRegsMode save_doubles_;
52 52
53 Major MajorKey() { return StoreBufferOverflow; } 53 Major MajorKey() { return StoreBufferOverflow; }
54 int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } 54 int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
55 }; 55 };
56 56
57 57
58 class StringHelper : public AllStatic { 58 class StringHelper : public AllStatic {
59 public: 59 public:
60 // Generate code for copying a large number of characters. This function 60 // TODO(all): These don't seem to be used any more. Delete them.
61 // is allowed to spend extra time setting up conditions to make copying
62 // faster. Copying of overlapping regions is not supported.
63 // Dest register ends at the position after the last character written.
64 static void GenerateCopyCharactersLong(MacroAssembler* masm,
65 Register dest,
66 Register src,
67 Register count,
68 Register scratch1,
69 Register scratch2,
70 Register scratch3,
71 Register scratch4,
72 int flags);
73
74 61
75 // Generate string hash. 62 // Generate string hash.
76 static void GenerateHashInit(MacroAssembler* masm, 63 static void GenerateHashInit(MacroAssembler* masm,
77 Register hash, 64 Register hash,
78 Register character); 65 Register character);
79 66
80 static void GenerateHashAddCharacter(MacroAssembler* masm, 67 static void GenerateHashAddCharacter(MacroAssembler* masm,
81 Register hash, 68 Register hash,
82 Register character); 69 Register character);
83 70
84 static void GenerateHashGetHash(MacroAssembler* masm, 71 static void GenerateHashGetHash(MacroAssembler* masm,
85 Register hash); 72 Register hash,
73 Register scratch);
86 74
87 private: 75 private:
88 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); 76 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
89 }; 77 };
90 78
91 79
92 class SubStringStub: public PlatformCodeStub {
93 public:
94 SubStringStub() {}
95
96 private:
97 Major MajorKey() { return SubString; }
98 int MinorKey() { return 0; }
99
100 void Generate(MacroAssembler* masm);
101 };
102
103
104
105 class StringCompareStub: public PlatformCodeStub {
106 public:
107 StringCompareStub() { }
108
109 // Compares two flat ASCII strings and returns result in r0.
110 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
111 Register left,
112 Register right,
113 Register scratch1,
114 Register scratch2,
115 Register scratch3,
116 Register scratch4);
117
118 // Compares two flat ASCII strings for equality and returns result
119 // in r0.
120 static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
121 Register left,
122 Register right,
123 Register scratch1,
124 Register scratch2,
125 Register scratch3);
126
127 private:
128 virtual Major MajorKey() { return StringCompare; }
129 virtual int MinorKey() { return 0; }
130 virtual void Generate(MacroAssembler* masm);
131
132 static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
133 Register left,
134 Register right,
135 Register length,
136 Register scratch1,
137 Register scratch2,
138 Label* chars_not_equal);
139 };
140
141
142 // This stub can convert a signed int32 to a heap number (double). It does
143 // not work for int32s that are in Smi range! No GC occurs during this stub
144 // so you don't have to set up the frame.
145 class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
146 public:
147 WriteInt32ToHeapNumberStub(Register the_int,
148 Register the_heap_number,
149 Register scratch)
150 : the_int_(the_int),
151 the_heap_number_(the_heap_number),
152 scratch_(scratch) { }
153
154 static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
155
156 private:
157 Register the_int_;
158 Register the_heap_number_;
159 Register scratch_;
160
161 // Minor key encoding in 16 bits.
162 class IntRegisterBits: public BitField<int, 0, 4> {};
163 class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
164 class ScratchRegisterBits: public BitField<int, 8, 4> {};
165
166 Major MajorKey() { return WriteInt32ToHeapNumber; }
167 int MinorKey() {
168 // Encode the parameters in a unique 16 bit value.
169 return IntRegisterBits::encode(the_int_.code())
170 | HeapNumberRegisterBits::encode(the_heap_number_.code())
171 | ScratchRegisterBits::encode(scratch_.code());
172 }
173
174 void Generate(MacroAssembler* masm);
175 };
176
177
178 class RecordWriteStub: public PlatformCodeStub { 80 class RecordWriteStub: public PlatformCodeStub {
179 public: 81 public:
82 // Stub to record the write of 'value' at 'address' in 'object'.
83 // Typically 'address' = 'object' + <some offset>.
84 // See MacroAssembler::RecordWriteField() for example.
180 RecordWriteStub(Register object, 85 RecordWriteStub(Register object,
181 Register value, 86 Register value,
182 Register address, 87 Register address,
183 RememberedSetAction remembered_set_action, 88 RememberedSetAction remembered_set_action,
184 SaveFPRegsMode fp_mode) 89 SaveFPRegsMode fp_mode)
185 : object_(object), 90 : object_(object),
186 value_(value), 91 value_(value),
187 address_(address), 92 address_(address),
188 remembered_set_action_(remembered_set_action), 93 remembered_set_action_(remembered_set_action),
189 save_fp_regs_mode_(fp_mode), 94 save_fp_regs_mode_(fp_mode),
190 regs_(object, // An input reg. 95 regs_(object, // An input reg.
191 address, // An input reg. 96 address, // An input reg.
192 value) { // One scratch reg. 97 value) { // One scratch reg.
193 } 98 }
194 99
195 enum Mode { 100 enum Mode {
196 STORE_BUFFER_ONLY, 101 STORE_BUFFER_ONLY,
197 INCREMENTAL, 102 INCREMENTAL,
198 INCREMENTAL_COMPACTION 103 INCREMENTAL_COMPACTION
199 }; 104 };
200 105
201 virtual bool SometimesSetsUpAFrame() { return false; } 106 virtual bool SometimesSetsUpAFrame() { return false; }
202 107
203 static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { 108 static Mode GetMode(Code* stub) {
204 masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); 109 // Find the mode depending on the first two instructions.
205 ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos))); 110 Instruction* instr1 =
206 } 111 reinterpret_cast<Instruction*>(stub->instruction_start());
112 Instruction* instr2 = instr1->following();
207 113
208 static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { 114 if (instr1->IsUncondBranchImm()) {
209 masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); 115 ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
210 ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
211 }
212
213 static Mode GetMode(Code* stub) {
214 Instr first_instruction = Assembler::instr_at(stub->instruction_start());
215 Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
216 Assembler::kInstrSize);
217
218 if (Assembler::IsBranch(first_instruction)) {
219 return INCREMENTAL; 116 return INCREMENTAL;
220 } 117 }
221 118
222 ASSERT(Assembler::IsTstImmediate(first_instruction)); 119 ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
223 120
224 if (Assembler::IsBranch(second_instruction)) { 121 if (instr2->IsUncondBranchImm()) {
225 return INCREMENTAL_COMPACTION; 122 return INCREMENTAL_COMPACTION;
226 } 123 }
227 124
228 ASSERT(Assembler::IsTstImmediate(second_instruction)); 125 ASSERT(instr2->IsPCRelAddressing());
229 126
230 return STORE_BUFFER_ONLY; 127 return STORE_BUFFER_ONLY;
231 } 128 }
232 129
130 // We patch the two first instructions of the stub back and forth between an
131 // adr and branch when we start and stop incremental heap marking.
132 // The branch is
133 // b label
134 // The adr is
135 // adr xzr label
136 // so effectively a nop.
233 static void Patch(Code* stub, Mode mode) { 137 static void Patch(Code* stub, Mode mode) {
234 MacroAssembler masm(NULL, 138 // We are going to patch the two first instructions of the stub.
235 stub->instruction_start(), 139 PatchingAssembler patcher(
236 stub->instruction_size()); 140 reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
141 Instruction* instr1 = patcher.InstructionAt(0);
142 Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
143 // Instructions must be either 'adr' or 'b'.
144 ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
145 ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
146 // Retrieve the offsets to the labels.
147 int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
148 int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
149
237 switch (mode) { 150 switch (mode) {
238 case STORE_BUFFER_ONLY: 151 case STORE_BUFFER_ONLY:
239 ASSERT(GetMode(stub) == INCREMENTAL || 152 ASSERT(GetMode(stub) == INCREMENTAL ||
240 GetMode(stub) == INCREMENTAL_COMPACTION); 153 GetMode(stub) == INCREMENTAL_COMPACTION);
241 PatchBranchIntoNop(&masm, 0); 154 patcher.adr(xzr, offset_to_incremental_noncompacting);
242 PatchBranchIntoNop(&masm, Assembler::kInstrSize); 155 patcher.adr(xzr, offset_to_incremental_compacting);
243 break; 156 break;
244 case INCREMENTAL: 157 case INCREMENTAL:
245 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); 158 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
246 PatchNopIntoBranch(&masm, 0); 159 patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
160 patcher.adr(xzr, offset_to_incremental_compacting);
247 break; 161 break;
248 case INCREMENTAL_COMPACTION: 162 case INCREMENTAL_COMPACTION:
249 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); 163 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
250 PatchNopIntoBranch(&masm, Assembler::kInstrSize); 164 patcher.adr(xzr, offset_to_incremental_noncompacting);
165 patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
251 break; 166 break;
252 } 167 }
253 ASSERT(GetMode(stub) == mode); 168 ASSERT(GetMode(stub) == mode);
254 CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
255 } 169 }
256 170
257 private: 171 private:
258 // This is a helper class for freeing up 3 scratch registers. The input is 172 // This is a helper class to manage the registers associated with the stub.
259 // two registers that must be preserved and one scratch register provided by 173 // The 'object' and 'address' registers must be preserved.
260 // the caller.
261 class RegisterAllocation { 174 class RegisterAllocation {
262 public: 175 public:
263 RegisterAllocation(Register object, 176 RegisterAllocation(Register object,
264 Register address, 177 Register address,
265 Register scratch0) 178 Register scratch)
266 : object_(object), 179 : object_(object),
267 address_(address), 180 address_(address),
268 scratch0_(scratch0) { 181 scratch0_(scratch),
269 ASSERT(!AreAliased(scratch0, object, address, no_reg)); 182 saved_regs_(kCallerSaved) {
270 scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_); 183 ASSERT(!AreAliased(scratch, object, address));
184
185 // We would like to require more scratch registers for this stub,
186 // but the number of registers comes down to the ones used in
187 // FullCodeGen::SetVar(), which is architecture independent.
188 // We allocate 2 extra scratch registers that we'll save on the stack.
189 CPURegList pool_available = GetValidRegistersForAllocation();
190 CPURegList used_regs(object, address, scratch);
191 pool_available.Remove(used_regs);
192 scratch1_ = Register(pool_available.PopLowestIndex());
193 scratch2_ = Register(pool_available.PopLowestIndex());
194
195 // SaveCallerRegisters method needs to save caller saved register, however
196 // we don't bother saving ip0 and ip1 because they are used as scratch
197 // registers by the MacroAssembler.
198 saved_regs_.Remove(ip0);
199 saved_regs_.Remove(ip1);
200
201 // The scratch registers will be restored by other means so we don't need
202 // to save them with the other caller saved registers.
203 saved_regs_.Remove(scratch0_);
204 saved_regs_.Remove(scratch1_);
205 saved_regs_.Remove(scratch2_);
271 } 206 }
272 207
273 void Save(MacroAssembler* masm) { 208 void Save(MacroAssembler* masm) {
274 ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
275 // We don't have to save scratch0_ because it was given to us as 209 // We don't have to save scratch0_ because it was given to us as
276 // a scratch register. 210 // a scratch register.
277 masm->push(scratch1_); 211 masm->Push(scratch1_, scratch2_);
278 } 212 }
279 213
280 void Restore(MacroAssembler* masm) { 214 void Restore(MacroAssembler* masm) {
281 masm->pop(scratch1_); 215 masm->Pop(scratch2_, scratch1_);
282 } 216 }
283 217
284 // If we have to call into C then we need to save and restore all caller- 218 // If we have to call into C then we need to save and restore all caller-
285 // saved registers that were not already preserved. The scratch registers 219 // saved registers that were not already preserved.
286 // will be restored by other means so we don't bother pushing them here.
287 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { 220 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
288 masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); 221 // TODO(all): This can be very expensive, and it is likely that not every
222 // register will need to be preserved. Can we improve this?
223 masm->PushCPURegList(saved_regs_);
289 if (mode == kSaveFPRegs) { 224 if (mode == kSaveFPRegs) {
290 masm->SaveFPRegs(sp, scratch0_); 225 masm->PushCPURegList(kCallerSavedFP);
291 } 226 }
292 } 227 }
293 228
294 inline void RestoreCallerSaveRegisters(MacroAssembler*masm, 229 void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
295 SaveFPRegsMode mode) { 230 // TODO(all): This can be very expensive, and it is likely that not every
231 // register will need to be preserved. Can we improve this?
296 if (mode == kSaveFPRegs) { 232 if (mode == kSaveFPRegs) {
297 masm->RestoreFPRegs(sp, scratch0_); 233 masm->PopCPURegList(kCallerSavedFP);
298 } 234 }
299 masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); 235 masm->PopCPURegList(saved_regs_);
300 } 236 }
301 237
302 inline Register object() { return object_; } 238 Register object() { return object_; }
303 inline Register address() { return address_; } 239 Register address() { return address_; }
304 inline Register scratch0() { return scratch0_; } 240 Register scratch0() { return scratch0_; }
305 inline Register scratch1() { return scratch1_; } 241 Register scratch1() { return scratch1_; }
242 Register scratch2() { return scratch2_; }
306 243
307 private: 244 private:
308 Register object_; 245 Register object_;
309 Register address_; 246 Register address_;
310 Register scratch0_; 247 Register scratch0_;
311 Register scratch1_; 248 Register scratch1_;
249 Register scratch2_;
250 CPURegList saved_regs_;
251
252 // TODO(all): We should consider moving this somewhere else.
253 static CPURegList GetValidRegistersForAllocation() {
254 // The list of valid registers for allocation is defined as all the
255 // registers without those with a special meaning.
256 //
257 // The default list excludes registers x26 to x31 because they are
258 // reserved for the following purpose:
259 // - x26 root register
260 // - x27 context pointer register
261 // - x28 jssp
262 // - x29 frame pointer
263 // - x30 link register(lr)
264 // - x31 xzr/stack pointer
265 CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25);
266
267 // We also remove MacroAssembler's scratch registers.
268 list.Remove(ip0);
269 list.Remove(ip1);
270 list.Remove(x8);
271 list.Remove(x9);
272
273 return list;
274 }
312 275
313 friend class RecordWriteStub; 276 friend class RecordWriteStub;
314 }; 277 };
315 278
279 // A list of stub variants which are pregenerated.
280 // The variants are stored in the same format as the minor key, so
281 // MinorKeyFor() can be used to populate and check this list.
282 static const int kAheadOfTime[];
283
284 void Generate(MacroAssembler* masm);
285 void GenerateIncremental(MacroAssembler* masm, Mode mode);
286
316 enum OnNoNeedToInformIncrementalMarker { 287 enum OnNoNeedToInformIncrementalMarker {
317 kReturnOnNoNeedToInformIncrementalMarker, 288 kReturnOnNoNeedToInformIncrementalMarker,
318 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker 289 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
319 }; 290 };
320 291
321 void Generate(MacroAssembler* masm);
322 void GenerateIncremental(MacroAssembler* masm, Mode mode);
323 void CheckNeedsToInformIncrementalMarker( 292 void CheckNeedsToInformIncrementalMarker(
324 MacroAssembler* masm, 293 MacroAssembler* masm,
325 OnNoNeedToInformIncrementalMarker on_no_need, 294 OnNoNeedToInformIncrementalMarker on_no_need,
326 Mode mode); 295 Mode mode);
327 void InformIncrementalMarker(MacroAssembler* masm, Mode mode); 296 void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
328 297
329 Major MajorKey() { return RecordWrite; } 298 Major MajorKey() { return RecordWrite; }
330 299
331 int MinorKey() { 300 int MinorKey() {
332 return ObjectBits::encode(object_.code()) | 301 return MinorKeyFor(object_, value_, address_, remembered_set_action_,
333 ValueBits::encode(value_.code()) | 302 save_fp_regs_mode_);
334 AddressBits::encode(address_.code()) | 303 }
335 RememberedSetActionBits::encode(remembered_set_action_) | 304
336 SaveFPRegsModeBits::encode(save_fp_regs_mode_); 305 static int MinorKeyFor(Register object,
306 Register value,
307 Register address,
308 RememberedSetAction action,
309 SaveFPRegsMode fp_mode) {
310 ASSERT(object.Is64Bits());
311 ASSERT(value.Is64Bits());
312 ASSERT(address.Is64Bits());
313 return ObjectBits::encode(object.code()) |
314 ValueBits::encode(value.code()) |
315 AddressBits::encode(address.code()) |
316 RememberedSetActionBits::encode(action) |
317 SaveFPRegsModeBits::encode(fp_mode);
337 } 318 }
338 319
339 void Activate(Code* code) { 320 void Activate(Code* code) {
340 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); 321 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
341 } 322 }
342 323
343 class ObjectBits: public BitField<int, 0, 4> {}; 324 class ObjectBits: public BitField<int, 0, 5> {};
344 class ValueBits: public BitField<int, 4, 4> {}; 325 class ValueBits: public BitField<int, 5, 5> {};
345 class AddressBits: public BitField<int, 8, 4> {}; 326 class AddressBits: public BitField<int, 10, 5> {};
346 class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; 327 class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
347 class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; 328 class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
348 329
349 Register object_; 330 Register object_;
350 Register value_; 331 Register value_;
351 Register address_; 332 Register address_;
352 RememberedSetAction remembered_set_action_; 333 RememberedSetAction remembered_set_action_;
353 SaveFPRegsMode save_fp_regs_mode_; 334 SaveFPRegsMode save_fp_regs_mode_;
354 Label slow_; 335 Label slow_;
355 RegisterAllocation regs_; 336 RegisterAllocation regs_;
356 }; 337 };
357 338
358 339
359 // Trampoline stub to call into native code. To call safely into native code 340 // Helper to call C++ functions from generated code. The caller must prepare
360 // in the presence of compacting GC (which can move code objects) we need to 341 // the exit frame before doing the call with GenerateCall.
361 // keep the code which called into native pinned in the memory. Currently the
362 // simplest approach is to generate such stub early enough so it can never be
363 // moved by GC
364 class DirectCEntryStub: public PlatformCodeStub { 342 class DirectCEntryStub: public PlatformCodeStub {
365 public: 343 public:
366 DirectCEntryStub() {} 344 DirectCEntryStub() {}
367 void Generate(MacroAssembler* masm); 345 void Generate(MacroAssembler* masm);
368 void GenerateCall(MacroAssembler* masm, Register target); 346 void GenerateCall(MacroAssembler* masm, Register target);
369 347
370 private: 348 private:
371 Major MajorKey() { return DirectCEntry; } 349 Major MajorKey() { return DirectCEntry; }
372 int MinorKey() { return 0; } 350 int MinorKey() { return 0; }
373 351
(...skipping 15 matching lines...) Expand all
389 Register receiver, 367 Register receiver,
390 Register properties, 368 Register properties,
391 Handle<Name> name, 369 Handle<Name> name,
392 Register scratch0); 370 Register scratch0);
393 371
394 static void GeneratePositiveLookup(MacroAssembler* masm, 372 static void GeneratePositiveLookup(MacroAssembler* masm,
395 Label* miss, 373 Label* miss,
396 Label* done, 374 Label* done,
397 Register elements, 375 Register elements,
398 Register name, 376 Register name,
399 Register r0, 377 Register scratch1,
400 Register r1); 378 Register scratch2);
401 379
402 virtual bool SometimesSetsUpAFrame() { return false; } 380 virtual bool SometimesSetsUpAFrame() { return false; }
403 381
404 private: 382 private:
405 static const int kInlinedProbes = 4; 383 static const int kInlinedProbes = 4;
406 static const int kTotalProbes = 20; 384 static const int kTotalProbes = 20;
407 385
408 static const int kCapacityOffset = 386 static const int kCapacityOffset =
409 NameDictionary::kHeaderSize + 387 NameDictionary::kHeaderSize +
410 NameDictionary::kCapacityIndex * kPointerSize; 388 NameDictionary::kCapacityIndex * kPointerSize;
411 389
412 static const int kElementsStartOffset = 390 static const int kElementsStartOffset =
413 NameDictionary::kHeaderSize + 391 NameDictionary::kHeaderSize +
414 NameDictionary::kElementsStartIndex * kPointerSize; 392 NameDictionary::kElementsStartIndex * kPointerSize;
415 393
416 Major MajorKey() { return NameDictionaryLookup; } 394 Major MajorKey() { return NameDictionaryLookup; }
417 395
418 int MinorKey() { 396 int MinorKey() {
419 return LookupModeBits::encode(mode_); 397 return LookupModeBits::encode(mode_);
420 } 398 }
421 399
422 class LookupModeBits: public BitField<LookupMode, 0, 1> {}; 400 class LookupModeBits: public BitField<LookupMode, 0, 1> {};
423 401
424 LookupMode mode_; 402 LookupMode mode_;
425 }; 403 };
426 404
427 405
406 class SubStringStub: public PlatformCodeStub {
407 public:
408 SubStringStub() {}
409
410 private:
411 Major MajorKey() { return SubString; }
412 int MinorKey() { return 0; }
413
414 void Generate(MacroAssembler* masm);
415 };
416
417
418 class StringCompareStub: public PlatformCodeStub {
419 public:
420 StringCompareStub() { }
421
422 // Compares two flat ASCII strings and returns result in x0.
423 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
424 Register left,
425 Register right,
426 Register scratch1,
427 Register scratch2,
428 Register scratch3,
429 Register scratch4);
430
431 // Compare two flat ASCII strings for equality and returns result
432 // in x0.
433 static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
434 Register left,
435 Register right,
436 Register scratch1,
437 Register scratch2,
438 Register scratch3);
439
440 private:
441 virtual Major MajorKey() { return StringCompare; }
442 virtual int MinorKey() { return 0; }
443 virtual void Generate(MacroAssembler* masm);
444
445 static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
446 Register left,
447 Register right,
448 Register length,
449 Register scratch1,
450 Register scratch2,
451 Label* chars_not_equal);
452 };
453
454
428 struct PlatformCallInterfaceDescriptor { 455 struct PlatformCallInterfaceDescriptor {
429 explicit PlatformCallInterfaceDescriptor( 456 explicit PlatformCallInterfaceDescriptor(
430 TargetAddressStorageMode storage_mode) 457 TargetAddressStorageMode storage_mode)
431 : storage_mode_(storage_mode) { } 458 : storage_mode_(storage_mode) { }
432 459
433 TargetAddressStorageMode storage_mode() { return storage_mode_; } 460 TargetAddressStorageMode storage_mode() { return storage_mode_; }
434 461
435 private: 462 private:
436 TargetAddressStorageMode storage_mode_; 463 TargetAddressStorageMode storage_mode_;
437 }; 464 };
438 465
439 466
440 } } // namespace v8::internal 467 } } // namespace v8::internal
441 468
442 #endif // V8_ARM_CODE_STUBS_ARM_H_ 469 #endif // V8_A64_CODE_STUBS_A64_H_
OLDNEW
« no previous file with comments | « src/a64/builtins-a64.cc ('k') | src/a64/code-stubs-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698