Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(837)

Side by Side Diff: src/s390/code-stubs-s390.h

Issue 1725243004: S390: Initial impl of S390 asm, masm, code-stubs,... (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Updated BUILD.gn + cpu-s390.cc to addr @jochen's comments. Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_PPC_CODE_STUBS_PPC_H_ 5 #ifndef V8_S390_CODE_STUBS_S390_H_
6 #define V8_PPC_CODE_STUBS_PPC_H_ 6 #define V8_S390_CODE_STUBS_S390_H_
7 7
8 #include "src/ppc/frames-ppc.h" 8 #include "src/s390/frames-s390.h"
9 9
10 namespace v8 { 10 namespace v8 {
11 namespace internal { 11 namespace internal {
12 12
13
14 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); 13 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
15 14
16
17 class StringHelper : public AllStatic { 15 class StringHelper : public AllStatic {
18 public: 16 public:
19 // Generate code for copying a large number of characters. This function 17 // Generate code for copying a large number of characters. This function
20 // is allowed to spend extra time setting up conditions to make copying 18 // is allowed to spend extra time setting up conditions to make copying
21 // faster. Copying of overlapping regions is not supported. 19 // faster. Copying of overlapping regions is not supported.
22 // Dest register ends at the position after the last character written. 20 // Dest register ends at the position after the last character written.
23 static void GenerateCopyCharacters(MacroAssembler* masm, Register dest, 21 static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
24 Register src, Register count, 22 Register src, Register count,
25 Register scratch, 23 Register scratch,
26 String::Encoding encoding); 24 String::Encoding encoding);
(...skipping 14 matching lines...) Expand all
41 private: 39 private:
42 static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm, 40 static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
43 Register left, Register right, 41 Register left, Register right,
44 Register length, 42 Register length,
45 Register scratch1, 43 Register scratch1,
46 Label* chars_not_equal); 44 Label* chars_not_equal);
47 45
48 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); 46 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
49 }; 47 };
50 48
51
52 class StoreRegistersStateStub : public PlatformCodeStub { 49 class StoreRegistersStateStub : public PlatformCodeStub {
53 public: 50 public:
54 explicit StoreRegistersStateStub(Isolate* isolate) 51 explicit StoreRegistersStateStub(Isolate* isolate)
55 : PlatformCodeStub(isolate) {} 52 : PlatformCodeStub(isolate) {}
56 53
57 static void GenerateAheadOfTime(Isolate* isolate); 54 static void GenerateAheadOfTime(Isolate* isolate);
58 55
59 private: 56 private:
60 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR(); 57 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
61 DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub); 58 DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
62 }; 59 };
63 60
64
65 class RestoreRegistersStateStub : public PlatformCodeStub { 61 class RestoreRegistersStateStub : public PlatformCodeStub {
66 public: 62 public:
67 explicit RestoreRegistersStateStub(Isolate* isolate) 63 explicit RestoreRegistersStateStub(Isolate* isolate)
68 : PlatformCodeStub(isolate) {} 64 : PlatformCodeStub(isolate) {}
69 65
70 static void GenerateAheadOfTime(Isolate* isolate); 66 static void GenerateAheadOfTime(Isolate* isolate);
71 67
72 private: 68 private:
73 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR(); 69 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
74 DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub); 70 DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
75 }; 71 };
76 72
77
78 class RecordWriteStub : public PlatformCodeStub { 73 class RecordWriteStub : public PlatformCodeStub {
79 public: 74 public:
80 RecordWriteStub(Isolate* isolate, Register object, Register value, 75 RecordWriteStub(Isolate* isolate, Register object, Register value,
81 Register address, RememberedSetAction remembered_set_action, 76 Register address, RememberedSetAction remembered_set_action,
82 SaveFPRegsMode fp_mode) 77 SaveFPRegsMode fp_mode)
83 : PlatformCodeStub(isolate), 78 : PlatformCodeStub(isolate),
84 regs_(object, // An input reg. 79 regs_(object, // An input reg.
85 address, // An input reg. 80 address, // An input reg.
86 value) { // One scratch reg. 81 value) { // One scratch reg.
87 minor_key_ = ObjectBits::encode(object.code()) | 82 minor_key_ = ObjectBits::encode(object.code()) |
88 ValueBits::encode(value.code()) | 83 ValueBits::encode(value.code()) |
89 AddressBits::encode(address.code()) | 84 AddressBits::encode(address.code()) |
90 RememberedSetActionBits::encode(remembered_set_action) | 85 RememberedSetActionBits::encode(remembered_set_action) |
91 SaveFPRegsModeBits::encode(fp_mode); 86 SaveFPRegsModeBits::encode(fp_mode);
92 } 87 }
93 88
94 RecordWriteStub(uint32_t key, Isolate* isolate) 89 RecordWriteStub(uint32_t key, Isolate* isolate)
95 : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {} 90 : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
96 91
97 enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION }; 92 enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
98 93
99 bool SometimesSetsUpAFrame() override { return false; } 94 bool SometimesSetsUpAFrame() override { return false; }
100 95
101 static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { 96 // Patch an always taken branch into a NOP branch
102 // Consider adding DCHECK here to catch bad patching 97 static void PatchBranchCondMask(MacroAssembler* masm, int pos, Condition c) {
103 masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BT); 98 int32_t instrLen = masm->instr_length_at(pos);
99 DCHECK(instrLen == 4 || instrLen == 6);
100
101 if (instrLen == 4) {
102 // BRC - Branch Mask @ Bits 23-20
103 FourByteInstr updatedMask = static_cast<FourByteInstr>(c) << 20;
104 masm->instr_at_put<FourByteInstr>(
105 pos, (masm->instr_at(pos) & ~kFourByteBrCondMask) | updatedMask);
106 } else {
107 // BRCL - Branch Mask @ Bits 39-36
108 SixByteInstr updatedMask = static_cast<SixByteInstr>(c) << 36;
109 masm->instr_at_put<SixByteInstr>(
110 pos, (masm->instr_at(pos) & ~kSixByteBrCondMask) | updatedMask);
111 }
104 } 112 }
105 113
106 static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { 114 static bool isBranchNop(SixByteInstr instr, int instrLength) {
107 // Consider adding DCHECK here to catch bad patching 115 if ((4 == instrLength && 0 == (instr & kFourByteBrCondMask)) ||
108 masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BF); 116 // BRC - Check for 0x0 mask condition.
117 (6 == instrLength && 0 == (instr & kSixByteBrCondMask))) {
118 // BRCL - Check for 0x0 mask condition
119 return true;
120 }
121 return false;
109 } 122 }
110 123
111 static Mode GetMode(Code* stub) { 124 static Mode GetMode(Code* stub) {
112 Instr first_instruction = 125 int32_t first_instr_length =
113 Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize); 126 Instruction::InstructionLength(stub->instruction_start());
114 Instr second_instruction = Assembler::instr_at(stub->instruction_start() + 127 int32_t second_instr_length = Instruction::InstructionLength(
115 (Assembler::kInstrSize * 2)); 128 stub->instruction_start() + first_instr_length);
116 129
117 // Consider adding DCHECK here to catch unexpected instruction sequence 130 uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
118 if (BF == (first_instruction & kBOfieldMask)) { 131 uint64_t second_instr =
132 Assembler::instr_at(stub->instruction_start() + first_instr_length);
133
134 DCHECK(first_instr_length == 4 || first_instr_length == 6);
135 DCHECK(second_instr_length == 4 || second_instr_length == 6);
136
137 bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
138 bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
139
140 // STORE_BUFFER_ONLY has NOP on both branches
141 if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
142 // INCREMENTAL_COMPACTION has NOP on second branch.
143 else if (isFirstInstrNOP && !isSecondInstrNOP)
144 return INCREMENTAL_COMPACTION;
145 // INCREMENTAL has NOP on first branch.
146 else if (!isFirstInstrNOP && isSecondInstrNOP)
119 return INCREMENTAL; 147 return INCREMENTAL;
120 }
121 148
122 if (BF == (second_instruction & kBOfieldMask)) { 149 DCHECK(false);
123 return INCREMENTAL_COMPACTION;
124 }
125
126 return STORE_BUFFER_ONLY; 150 return STORE_BUFFER_ONLY;
127 } 151 }
128 152
129 static void Patch(Code* stub, Mode mode) { 153 static void Patch(Code* stub, Mode mode) {
130 MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(), 154 MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
131 stub->instruction_size(), CodeObjectRequired::kNo); 155 stub->instruction_size(), CodeObjectRequired::kNo);
156
157 // Get instruction lengths of two branches
158 int32_t first_instr_length = masm.instr_length_at(0);
159 int32_t second_instr_length = masm.instr_length_at(first_instr_length);
160
132 switch (mode) { 161 switch (mode) {
133 case STORE_BUFFER_ONLY: 162 case STORE_BUFFER_ONLY:
134 DCHECK(GetMode(stub) == INCREMENTAL || 163 DCHECK(GetMode(stub) == INCREMENTAL ||
135 GetMode(stub) == INCREMENTAL_COMPACTION); 164 GetMode(stub) == INCREMENTAL_COMPACTION);
136 165
137 PatchBranchIntoNop(&masm, Assembler::kInstrSize); 166 PatchBranchCondMask(&masm, 0, CC_NOP);
138 PatchBranchIntoNop(&masm, Assembler::kInstrSize * 2); 167 PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
139 break; 168 break;
140 case INCREMENTAL: 169 case INCREMENTAL:
141 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); 170 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
142 PatchNopIntoBranch(&masm, Assembler::kInstrSize); 171 PatchBranchCondMask(&masm, 0, CC_ALWAYS);
143 break; 172 break;
144 case INCREMENTAL_COMPACTION: 173 case INCREMENTAL_COMPACTION:
145 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); 174 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
146 PatchNopIntoBranch(&masm, Assembler::kInstrSize * 2); 175 PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
147 break; 176 break;
148 } 177 }
149 DCHECK(GetMode(stub) == mode); 178 DCHECK(GetMode(stub) == mode);
150 Assembler::FlushICache(stub->GetIsolate(), 179 Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
151 stub->instruction_start() + Assembler::kInstrSize, 180 first_instr_length + second_instr_length);
152 2 * Assembler::kInstrSize);
153 } 181 }
154 182
155 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR(); 183 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
156 184
157 private: 185 private:
158 // This is a helper class for freeing up 3 scratch registers. The input is 186 // This is a helper class for freeing up 3 scratch registers. The input is
159 // two registers that must be preserved and one scratch register provided by 187 // two registers that must be preserved and one scratch register provided by
160 // the caller. 188 // the caller.
161 class RegisterAllocation { 189 class RegisterAllocation {
162 public: 190 public:
163 RegisterAllocation(Register object, Register address, Register scratch0) 191 RegisterAllocation(Register object, Register address, Register scratch0)
164 : object_(object), address_(address), scratch0_(scratch0) { 192 : object_(object), address_(address), scratch0_(scratch0) {
165 DCHECK(!AreAliased(scratch0, object, address, no_reg)); 193 DCHECK(!AreAliased(scratch0, object, address, no_reg));
166 scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_); 194 scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
167 } 195 }
168 196
169 void Save(MacroAssembler* masm) { 197 void Save(MacroAssembler* masm) {
170 DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_)); 198 DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
171 // We don't have to save scratch0_ because it was given to us as 199 // We don't have to save scratch0_ because it was given to us as
172 // a scratch register. 200 // a scratch register.
173 masm->push(scratch1_); 201 masm->push(scratch1_);
174 } 202 }
175 203
176 void Restore(MacroAssembler* masm) { masm->pop(scratch1_); } 204 void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
177 205
178 // If we have to call into C then we need to save and restore all caller- 206 // If we have to call into C then we need to save and restore all caller-
179 // saved registers that were not already preserved. The scratch registers 207 // saved registers that were not already preserved. The scratch registers
180 // will be restored by other means so we don't bother pushing them here. 208 // will be restored by other means so we don't bother pushing them here.
181 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { 209 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
182 masm->mflr(r0); 210 masm->push(r14);
183 masm->push(r0);
184 masm->MultiPush(kJSCallerSaved & ~scratch1_.bit()); 211 masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
185 if (mode == kSaveFPRegs) { 212 if (mode == kSaveFPRegs) {
186 // Save all volatile FP registers except d0. 213 // Save all volatile FP registers except d0.
187 masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit()); 214 masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
188 } 215 }
189 } 216 }
190 217
191 inline void RestoreCallerSaveRegisters(MacroAssembler* masm, 218 inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
192 SaveFPRegsMode mode) { 219 SaveFPRegsMode mode) {
193 if (mode == kSaveFPRegs) { 220 if (mode == kSaveFPRegs) {
194 // Restore all volatile FP registers except d0. 221 // Restore all volatile FP registers except d0.
195 masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit()); 222 masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
196 } 223 }
197 masm->MultiPop(kJSCallerSaved & ~scratch1_.bit()); 224 masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
198 masm->pop(r0); 225 masm->pop(r14);
199 masm->mtlr(r0);
200 } 226 }
201 227
202 inline Register object() { return object_; } 228 inline Register object() { return object_; }
203 inline Register address() { return address_; } 229 inline Register address() { return address_; }
204 inline Register scratch0() { return scratch0_; } 230 inline Register scratch0() { return scratch0_; }
205 inline Register scratch1() { return scratch1_; } 231 inline Register scratch1() { return scratch1_; }
206 232
207 private: 233 private:
208 Register object_; 234 Register object_;
209 Register address_; 235 Register address_;
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
244 } 270 }
245 271
246 RememberedSetAction remembered_set_action() const { 272 RememberedSetAction remembered_set_action() const {
247 return RememberedSetActionBits::decode(minor_key_); 273 return RememberedSetActionBits::decode(minor_key_);
248 } 274 }
249 275
250 SaveFPRegsMode save_fp_regs_mode() const { 276 SaveFPRegsMode save_fp_regs_mode() const {
251 return SaveFPRegsModeBits::decode(minor_key_); 277 return SaveFPRegsModeBits::decode(minor_key_);
252 } 278 }
253 279
254 class ObjectBits : public BitField<int, 0, 5> {}; 280 class ObjectBits : public BitField<int, 0, 4> {};
255 class ValueBits : public BitField<int, 5, 5> {}; 281 class ValueBits : public BitField<int, 4, 4> {};
256 class AddressBits : public BitField<int, 10, 5> {}; 282 class AddressBits : public BitField<int, 8, 4> {};
257 class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> { 283 class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
258 }; 284 };
259 class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {}; 285 class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
260 286
261 Label slow_; 287 Label slow_;
262 RegisterAllocation regs_; 288 RegisterAllocation regs_;
263 289
264 DISALLOW_COPY_AND_ASSIGN(RecordWriteStub); 290 DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
265 }; 291 };
266 292
267
268 // Trampoline stub to call into native code. To call safely into native code 293 // Trampoline stub to call into native code. To call safely into native code
269 // in the presence of compacting GC (which can move code objects) we need to 294 // in the presence of compacting GC (which can move code objects) we need to
270 // keep the code which called into native pinned in the memory. Currently the 295 // keep the code which called into native pinned in the memory. Currently the
271 // simplest approach is to generate such stub early enough so it can never be 296 // simplest approach is to generate such stub early enough so it can never be
272 // moved by GC 297 // moved by GC
273 class DirectCEntryStub : public PlatformCodeStub { 298 class DirectCEntryStub : public PlatformCodeStub {
274 public: 299 public:
275 explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {} 300 explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
276 void GenerateCall(MacroAssembler* masm, Register target); 301 void GenerateCall(MacroAssembler* masm, Register target);
277 302
278 private: 303 private:
279 bool NeedsImmovableCode() override { return true; } 304 bool NeedsImmovableCode() override { return true; }
280 305
281 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR(); 306 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
282 DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub); 307 DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
283 }; 308 };
284 309
285
286 class NameDictionaryLookupStub : public PlatformCodeStub { 310 class NameDictionaryLookupStub : public PlatformCodeStub {
287 public: 311 public:
288 enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; 312 enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
289 313
290 NameDictionaryLookupStub(Isolate* isolate, LookupMode mode) 314 NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
291 : PlatformCodeStub(isolate) { 315 : PlatformCodeStub(isolate) {
292 minor_key_ = LookupModeBits::encode(mode); 316 minor_key_ = LookupModeBits::encode(mode);
293 } 317 }
294 318
295 static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss, 319 static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
(...skipping 19 matching lines...) Expand all
315 NameDictionary::kHeaderSize + 339 NameDictionary::kHeaderSize +
316 NameDictionary::kElementsStartIndex * kPointerSize; 340 NameDictionary::kElementsStartIndex * kPointerSize;
317 341
318 LookupMode mode() const { return LookupModeBits::decode(minor_key_); } 342 LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
319 343
320 class LookupModeBits : public BitField<LookupMode, 0, 1> {}; 344 class LookupModeBits : public BitField<LookupMode, 0, 1> {};
321 345
322 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR(); 346 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
323 DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub); 347 DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
324 }; 348 };
349
350 class FloatingPointHelper : public AllStatic {
351 public:
352 enum Destination { kFPRegisters, kCoreRegisters };
353
354 // Loads smis from r0 and r1 (right and left in binary operations) into
355 // floating point registers. Depending on the destination the values ends up
356 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
357 // floating point registers VFP3 must be supported. If core registers are
358 // requested when VFP3 is supported d6 and d7 will be scratched.
359 static void LoadSmis(MacroAssembler* masm, Register scratch1,
360 Register scratch2);
361
362 // Loads objects from r0 and r1 (right and left in binary operations) into
363 // floating point registers. Depending on the destination the values ends up
364 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
365 // floating point registers VFP3 must be supported. If core registers are
366 // requested when VFP3 is supported d6 and d7 will still be scratched. If
367 // either r0 or r1 is not a number (not smi and not heap number object) the
368 // not_number label is jumped to with r0 and r1 intact.
369 static void LoadOperands(MacroAssembler* masm, Register heap_number_map,
370 Register scratch1, Register scratch2,
371 Label* not_number);
372
373 // Convert the smi or heap number in object to an int32 using the rules
374 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
375 // and brought into the range -2^31 .. +2^31 - 1.
376 static void ConvertNumberToInt32(MacroAssembler* masm, Register object,
377 Register dst, Register heap_number_map,
378 Register scratch1, Register scratch2,
379 Register scratch3,
380 DoubleRegister double_scratch,
381 Label* not_int32);
382
383 // Converts the integer (untagged smi) in |src| to a double, storing
384 // the result to |double_dst|
385 static void ConvertIntToDouble(MacroAssembler* masm, Register src,
386 DoubleRegister double_dst);
387
388 // Converts the unsigned integer (untagged smi) in |src| to
389 // a double, storing the result to |double_dst|
390 static void ConvertUnsignedIntToDouble(MacroAssembler* masm, Register src,
391 DoubleRegister double_dst);
392
393 // Converts the integer (untagged smi) in |src| to
394 // a float, storing the result in |dst|
395 static void ConvertIntToFloat(MacroAssembler* masm, const DoubleRegister dst,
396 const Register src);
397
398 // Load the number from object into double_dst in the double format.
399 // Control will jump to not_int32 if the value cannot be exactly represented
400 // by a 32-bit integer.
401 // Floating point value in the 32-bit integer range that are not exact integer
402 // won't be loaded.
403 static void LoadNumberAsInt32Double(MacroAssembler* masm, Register object,
404 DoubleRegister double_dst,
405 DoubleRegister double_scratch,
406 Register heap_number_map,
407 Register scratch1, Register scratch2,
408 Label* not_int32);
409
410 // Loads the number from object into dst as a 32-bit integer.
411 // Control will jump to not_int32 if the object cannot be exactly represented
412 // by a 32-bit integer.
413 // Floating point value in the 32-bit integer range that are not exact integer
414 // won't be converted.
415 // scratch3 is not used when VFP3 is supported.
416 static void LoadNumberAsInt32(MacroAssembler* masm, Register object,
417 Register dst, Register heap_number_map,
418 Register scratch1, Register scratch2,
419 Register scratch3,
420 DoubleRegister double_scratch0,
421 DoubleRegister double_scratch1,
422 Label* not_int32);
423
424 // Generate non VFP3 code to check if a double can be exactly represented by a
425 // 32-bit integer. This does not check for 0 or -0, which need
426 // to be checked for separately.
427 // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
428 // through otherwise.
429 // src1 and src2 will be cloberred.
430 //
431 // Expected input:
432 // - src1: higher (exponent) part of the double value.
433 // - src2: lower (mantissa) part of the double value.
434 // Output status:
435 // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
436 // - src2: contains 1.
437 // - other registers are clobbered.
438 static void DoubleIs32BitInteger(MacroAssembler* masm, Register src1,
439 Register src2, Register dst,
440 Register scratch, Label* not_int32);
441
442 // Generates code to call a C function to do a double operation using core
443 // registers. (Used when VFP3 is not supported.)
444 // This code never falls through, but returns with a heap number containing
445 // the result in r0.
446 // Register heapnumber_result must be a heap number in which the
447 // result of the operation will be stored.
448 // Requires the following layout on entry:
449 // r0: Left value (least significant part of mantissa).
450 // r1: Left value (sign, exponent, top of mantissa).
451 // r2: Right value (least significant part of mantissa).
452 // r3: Right value (sign, exponent, top of mantissa).
453 static void CallCCodeForDoubleOperation(MacroAssembler* masm, Token::Value op,
454 Register heap_number_result,
455 Register scratch);
456
457 private:
458 static void LoadNumber(MacroAssembler* masm, Register object,
459 DoubleRegister dst, Register heap_number_map,
460 Register scratch1, Register scratch2,
461 Label* not_number);
462 };
463
325 } // namespace internal 464 } // namespace internal
326 } // namespace v8 465 } // namespace v8
327 466
328 #endif // V8_PPC_CODE_STUBS_PPC_H_ 467 #endif // V8_S390_CODE_STUBS_S390_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698