| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
| 15 // | 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #ifndef V8_ARM_CODE_STUBS_ARM_H_ | 28 #ifndef V8_A64_CODE_STUBS_A64_H_ |
| 29 #define V8_ARM_CODE_STUBS_ARM_H_ | 29 #define V8_A64_CODE_STUBS_A64_H_ |
| 30 | 30 |
| 31 #include "ic-inl.h" | 31 #include "ic-inl.h" |
| 32 | 32 |
| 33 namespace v8 { | 33 namespace v8 { |
| 34 namespace internal { | 34 namespace internal { |
| 35 | 35 |
| 36 | 36 |
| 37 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); | 37 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); |
| 38 | 38 |
| 39 | 39 |
| 40 // Compute a transcendental math function natively, or call the | 40 // Compute a transcendental math function natively, or call the |
| 41 // TranscendentalCache runtime function. | 41 // TranscendentalCache runtime function. |
| 42 class TranscendentalCacheStub: public PlatformCodeStub { | 42 class TranscendentalCacheStub: public PlatformCodeStub { |
| 43 public: | 43 public: |
| 44 enum ArgumentType { | 44 enum ArgumentType { |
| 45 TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, | 45 TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, |
| 46 UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits | 46 UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits |
| 47 }; | 47 }; |
| 48 | 48 |
| 49 TranscendentalCacheStub(TranscendentalCache::Type type, | 49 TranscendentalCacheStub(TranscendentalCache::Type type, |
| 50 ArgumentType argument_type) | 50 ArgumentType argument_type) |
| 51 : type_(type), argument_type_(argument_type) { } | 51 : type_(type), argument_type_(argument_type) { } |
| 52 void Generate(MacroAssembler* masm); | 52 void Generate(MacroAssembler* masm); |
| 53 private: | 53 private: |
| 54 TranscendentalCache::Type type_; | 54 TranscendentalCache::Type type_; |
| 55 ArgumentType argument_type_; | 55 ArgumentType argument_type_; |
| 56 void GenerateCallCFunction(MacroAssembler* masm, Register scratch); | |
| 57 | 56 |
| 58 Major MajorKey() { return TranscendentalCache; } | 57 Major MajorKey() { return TranscendentalCache; } |
| 59 int MinorKey() { return type_ | argument_type_; } | 58 int MinorKey() { return type_ | argument_type_; } |
| 59 ExternalReference CFunction(Isolate* isolate); |
| 60 Runtime::FunctionId RuntimeFunction(); | 60 Runtime::FunctionId RuntimeFunction(); |
| 61 }; | 61 }; |
| 62 | 62 |
| 63 | 63 |
| 64 class StoreBufferOverflowStub: public PlatformCodeStub { | 64 class StoreBufferOverflowStub: public PlatformCodeStub { |
| 65 public: | 65 public: |
| 66 explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) | 66 explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) |
| 67 : save_doubles_(save_fp) {} | 67 : save_doubles_(save_fp) { } |
| 68 | 68 |
| 69 void Generate(MacroAssembler* masm); | 69 void Generate(MacroAssembler* masm); |
| 70 | 70 |
| 71 virtual bool IsPregenerated() { return true; } | 71 virtual bool IsPregenerated() { return true; } |
| 72 static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); | 72 static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); |
| 73 virtual bool SometimesSetsUpAFrame() { return false; } | 73 virtual bool SometimesSetsUpAFrame() { return false; } |
| 74 | 74 |
| 75 private: | 75 private: |
| 76 SaveFPRegsMode save_doubles_; | 76 SaveFPRegsMode save_doubles_; |
| 77 | 77 |
| (...skipping 25 matching lines...) Expand all Loading... |
| 103 class OpBits: public BitField<Token::Value, 1, 7> {}; | 103 class OpBits: public BitField<Token::Value, 1, 7> {}; |
| 104 class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {}; | 104 class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {}; |
| 105 | 105 |
| 106 Major MajorKey() { return UnaryOp; } | 106 Major MajorKey() { return UnaryOp; } |
| 107 int MinorKey() { | 107 int MinorKey() { |
| 108 return ModeBits::encode(mode_) | 108 return ModeBits::encode(mode_) |
| 109 | OpBits::encode(op_) | 109 | OpBits::encode(op_) |
| 110 | OperandTypeInfoBits::encode(operand_type_); | 110 | OperandTypeInfoBits::encode(operand_type_); |
| 111 } | 111 } |
| 112 | 112 |
| 113 // Note: A lot of the helper functions below will vanish when we use virtual | |
| 114 // function instead of switch more often. | |
| 115 void Generate(MacroAssembler* masm); | 113 void Generate(MacroAssembler* masm); |
| 116 | 114 |
| 117 void GenerateTypeTransition(MacroAssembler* masm); | 115 void GenerateTypeTransition(MacroAssembler* masm); |
| 118 | 116 |
| 119 void GenerateSmiStub(MacroAssembler* masm); | 117 void GenerateSmiStub(MacroAssembler* masm); |
| 120 void GenerateSmiStubSub(MacroAssembler* masm); | 118 void GenerateSmiStubSub(MacroAssembler* masm); |
| 121 void GenerateSmiStubBitNot(MacroAssembler* masm); | 119 void GenerateSmiStubBitNot(MacroAssembler* masm); |
| 122 void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow); | 120 void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow); |
| 123 void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow); | 121 void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow); |
| 124 | 122 |
| 125 void GenerateNumberStub(MacroAssembler* masm); | 123 void GenerateNumberStub(MacroAssembler* masm); |
| 126 void GenerateNumberStubSub(MacroAssembler* masm); | 124 void GenerateNumberStubSub(MacroAssembler* masm); |
| 127 void GenerateNumberStubBitNot(MacroAssembler* masm); | 125 void GenerateNumberStubBitNot(MacroAssembler* masm); |
| 128 void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); | 126 void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); |
| 129 void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); | 127 void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); |
| 130 | 128 |
| 131 void GenerateGenericStub(MacroAssembler* masm); | 129 void GenerateGenericStub(MacroAssembler* masm); |
| 132 void GenerateGenericStubSub(MacroAssembler* masm); | |
| 133 void GenerateGenericStubBitNot(MacroAssembler* masm); | |
| 134 void GenerateGenericCodeFallback(MacroAssembler* masm); | |
| 135 | 130 |
| 136 virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; } | 131 virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; } |
| 137 | 132 |
| 138 virtual InlineCacheState GetICState() { | 133 virtual InlineCacheState GetICState() { |
| 139 return UnaryOpIC::ToState(operand_type_); | 134 return UnaryOpIC::ToState(operand_type_); |
| 140 } | 135 } |
| 141 | 136 |
| 142 virtual void FinishCode(Handle<Code> code) { | 137 virtual void FinishCode(Handle<Code> code) { |
| 143 code->set_unary_op_type(operand_type_); | 138 code->set_unary_op_type(operand_type_); |
| 144 } | 139 } |
| 145 }; | 140 }; |
| 146 | 141 |
| 147 | 142 |
| 148 class StringHelper : public AllStatic { | 143 class StringHelper : public AllStatic { |
| 149 public: | 144 public: |
| 150 // Generate code for copying characters using a simple loop. This should only | |
| 151 // be used in places where the number of characters is small and the | |
| 152 // additional setup and checking in GenerateCopyCharactersLong adds too much | |
| 153 // overhead. Copying of overlapping regions is not supported. | |
| 154 // Dest register ends at the position after the last character written. | |
| 155 static void GenerateCopyCharacters(MacroAssembler* masm, | |
| 156 Register dest, | |
| 157 Register src, | |
| 158 Register count, | |
| 159 Register scratch, | |
| 160 bool ascii); | |
| 161 | |
| 162 // Generate code for copying a large number of characters. This function | |
| 163 // is allowed to spend extra time setting up conditions to make copying | |
| 164 // faster. Copying of overlapping regions is not supported. | |
| 165 // Dest register ends at the position after the last character written. | |
| 166 static void GenerateCopyCharactersLong(MacroAssembler* masm, | |
| 167 Register dest, | |
| 168 Register src, | |
| 169 Register count, | |
| 170 Register scratch1, | |
| 171 Register scratch2, | |
| 172 Register scratch3, | |
| 173 Register scratch4, | |
| 174 Register scratch5, | |
| 175 int flags); | |
| 176 | |
| 177 | |
| 178 // Probe the string table for a two character string. If the string is | 145 // Probe the string table for a two character string. If the string is |
| 179 // not found by probing a jump to the label not_found is performed. This jump | 146 // not found by probing a jump to the label not_found is performed. This jump |
| 180 // does not guarantee that the string is not in the string table. If the | 147 // does not guarantee that the string is not in the string table. If the |
| 181 // string is found the code falls through with the string in register r0. | 148 // string is found the code falls through with the string in register x0. |
| 182 // Contents of both c1 and c2 registers are modified. At the exit c1 is | 149 // Contents of both c1 and c2 registers are modified. At the exit c1 is |
| 183 // guaranteed to contain halfword with low and high bytes equal to | 150 // guaranteed to contain halfword with low and high bytes equal to |
| 184 // initial contents of c1 and c2 respectively. | 151 // initial contents of c1 and c2 respectively. |
| 185 static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, | 152 static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, |
| 186 Register c1, | 153 Register c1, |
| 187 Register c2, | 154 Register c2, |
| 188 Register scratch1, | 155 Register scratch1, |
| 189 Register scratch2, | 156 Register scratch2, |
| 190 Register scratch3, | 157 Register scratch3, |
| 191 Register scratch4, | 158 Register scratch4, |
| 192 Register scratch5, | 159 Register scratch5, |
| 193 Label* not_found); | 160 Label* not_found); |
| 194 | 161 |
| 162 |
| 163 // Get the instance types for a pair of input strings. |
| 164 static void LoadPairInstanceTypes(MacroAssembler* masm, |
| 165 Register first_type, |
| 166 Register second_type, |
| 167 Register first_string, |
| 168 Register second_string); |
| 169 |
| 170 |
| 195 // Generate string hash. | 171 // Generate string hash. |
| 196 static void GenerateHashInit(MacroAssembler* masm, | 172 static void GenerateHashInit(MacroAssembler* masm, |
| 197 Register hash, | 173 Register hash, |
| 198 Register character); | 174 Register character); |
| 199 | 175 |
| 200 static void GenerateHashAddCharacter(MacroAssembler* masm, | 176 static void GenerateHashAddCharacter(MacroAssembler* masm, |
| 201 Register hash, | 177 Register hash, |
| 202 Register character); | 178 Register character); |
| 203 | 179 |
| 204 static void GenerateHashGetHash(MacroAssembler* masm, | 180 static void GenerateHashGetHash(MacroAssembler* masm, |
| 205 Register hash); | 181 Register hash, |
| 182 Register scratch); |
| 206 | 183 |
| 207 private: | 184 private: |
| 208 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); | 185 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); |
| 209 }; | 186 }; |
| 210 | 187 |
| 211 | 188 |
| 212 // Flag that indicates how to generate code for the stub StringAddStub. | 189 // Flag that indicates how to generate code for the stub StringAddStub. |
| 213 enum StringAddFlags { | 190 enum StringAddFlags { |
| 214 NO_STRING_ADD_FLAGS = 1 << 0, | 191 NO_STRING_ADD_FLAGS = 1 << 0, |
| 215 // Omit left string check in stub (left is definitely a string). | 192 // Omit left string check in stub (left is definitely a string.) |
| 216 NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1, | 193 NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1, |
| 217 // Omit right string check in stub (right is definitely a string). | 194 // Omit right string check in stub (right is definitely a string.) |
| 218 NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2, | 195 NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2, |
| 219 // Stub needs a frame before calling the runtime | 196 // Stub needs a frame before calling the runtime |
| 220 ERECT_FRAME = 1 << 3, | 197 ERECT_FRAME = 1 << 3, |
| 221 // Omit both string checks in stub. | 198 // Omit both string checks in stub. |
| 222 NO_STRING_CHECK_IN_STUB = | 199 NO_STRING_CHECK_IN_STUB = |
| 223 NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB | 200 NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB |
| 224 }; | 201 }; |
| 225 | 202 |
| 226 | 203 |
| 227 class StringAddStub: public PlatformCodeStub { | 204 class StringAddStub: public PlatformCodeStub { |
| 228 public: | 205 public: |
| 229 explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} | 206 explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} |
| 230 | 207 |
| 231 private: | 208 private: |
| 232 Major MajorKey() { return StringAdd; } | 209 Major MajorKey() { return StringAdd; } |
| 233 int MinorKey() { return flags_; } | 210 int MinorKey() { return flags_; } |
| 234 | 211 |
| 235 void Generate(MacroAssembler* masm); | 212 void Generate(MacroAssembler* masm); |
| 236 | 213 |
| 237 void GenerateConvertArgument(MacroAssembler* masm, | 214 void GenerateConvertArgument(MacroAssembler* masm, |
| 238 int stack_offset, | |
| 239 Register arg, | 215 Register arg, |
| 240 Register scratch1, | 216 Register scratch1, |
| 241 Register scratch2, | 217 Register scratch2, |
| 242 Register scratch3, | 218 Register scratch3, |
| 243 Register scratch4, | 219 Register scratch4, |
| 244 Label* slow); | 220 Label* slow); |
| 245 | 221 |
| 246 void GenerateRegisterArgsPush(MacroAssembler* masm); | 222 void GenerateRegisterArgsPush(MacroAssembler* masm); |
| 247 void GenerateRegisterArgsPop(MacroAssembler* masm); | 223 void GenerateRegisterArgsPop(MacroAssembler* masm); |
| 248 | 224 |
| 249 const StringAddFlags flags_; | 225 const StringAddFlags flags_; |
| 250 }; | 226 }; |
| 251 | 227 |
| 252 | 228 |
| 253 class SubStringStub: public PlatformCodeStub { | |
| 254 public: | |
| 255 SubStringStub() {} | |
| 256 | |
| 257 private: | |
| 258 Major MajorKey() { return SubString; } | |
| 259 int MinorKey() { return 0; } | |
| 260 | |
| 261 void Generate(MacroAssembler* masm); | |
| 262 }; | |
| 263 | |
| 264 | |
| 265 | |
| 266 class StringCompareStub: public PlatformCodeStub { | |
| 267 public: | |
| 268 StringCompareStub() { } | |
| 269 | |
| 270 // Compares two flat ASCII strings and returns result in r0. | |
| 271 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | |
| 272 Register left, | |
| 273 Register right, | |
| 274 Register scratch1, | |
| 275 Register scratch2, | |
| 276 Register scratch3, | |
| 277 Register scratch4); | |
| 278 | |
| 279 // Compares two flat ASCII strings for equality and returns result | |
| 280 // in r0. | |
| 281 static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, | |
| 282 Register left, | |
| 283 Register right, | |
| 284 Register scratch1, | |
| 285 Register scratch2, | |
| 286 Register scratch3); | |
| 287 | |
| 288 private: | |
| 289 virtual Major MajorKey() { return StringCompare; } | |
| 290 virtual int MinorKey() { return 0; } | |
| 291 virtual void Generate(MacroAssembler* masm); | |
| 292 | |
| 293 static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, | |
| 294 Register left, | |
| 295 Register right, | |
| 296 Register length, | |
| 297 Register scratch1, | |
| 298 Register scratch2, | |
| 299 Label* chars_not_equal); | |
| 300 }; | |
| 301 | |
| 302 | |
| 303 // This stub can convert a signed int32 to a heap number (double). It does | |
| 304 // not work for int32s that are in Smi range! No GC occurs during this stub | |
| 305 // so you don't have to set up the frame. | |
| 306 class WriteInt32ToHeapNumberStub : public PlatformCodeStub { | |
| 307 public: | |
| 308 WriteInt32ToHeapNumberStub(Register the_int, | |
| 309 Register the_heap_number, | |
| 310 Register scratch) | |
| 311 : the_int_(the_int), | |
| 312 the_heap_number_(the_heap_number), | |
| 313 scratch_(scratch) { } | |
| 314 | |
| 315 bool IsPregenerated(); | |
| 316 static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); | |
| 317 | |
| 318 private: | |
| 319 Register the_int_; | |
| 320 Register the_heap_number_; | |
| 321 Register scratch_; | |
| 322 | |
| 323 // Minor key encoding in 16 bits. | |
| 324 class IntRegisterBits: public BitField<int, 0, 4> {}; | |
| 325 class HeapNumberRegisterBits: public BitField<int, 4, 4> {}; | |
| 326 class ScratchRegisterBits: public BitField<int, 8, 4> {}; | |
| 327 | |
| 328 Major MajorKey() { return WriteInt32ToHeapNumber; } | |
| 329 int MinorKey() { | |
| 330 // Encode the parameters in a unique 16 bit value. | |
| 331 return IntRegisterBits::encode(the_int_.code()) | |
| 332 | HeapNumberRegisterBits::encode(the_heap_number_.code()) | |
| 333 | ScratchRegisterBits::encode(scratch_.code()); | |
| 334 } | |
| 335 | |
| 336 void Generate(MacroAssembler* masm); | |
| 337 }; | |
| 338 | |
| 339 | |
| 340 class NumberToStringStub: public PlatformCodeStub { | 229 class NumberToStringStub: public PlatformCodeStub { |
| 341 public: | 230 public: |
| 342 NumberToStringStub() { } | 231 NumberToStringStub() { } |
| 343 | 232 |
| 233 enum ObjectType { |
| 234 OBJECT_IS_SMI = 0, |
| 235 OBJECT_IS_NOT_SMI = 1 |
| 236 }; |
| 237 |
| 344 // Generate code to do a lookup in the number string cache. If the number in | 238 // Generate code to do a lookup in the number string cache. If the number in |
| 345 // the register object is found in the cache the generated code falls through | 239 // the register object is found in the cache the generated code falls through |
| 346 // with the result in the result register. The object and the result register | 240 // with the result in the result register. The object and the result register |
| 347 // can be the same. If the number is not found in the cache the code jumps to | 241 // can be the same. If the number is not found in the cache the code jumps to |
| 348 // the label not_found with only the content of register object unchanged. | 242 // the label not_found with only the content of register object unchanged. |
| 349 static void GenerateLookupNumberStringCache(MacroAssembler* masm, | 243 static void GenerateLookupNumberStringCache(MacroAssembler* masm, |
| 350 Register object, | 244 Register object, |
| 351 Register result, | 245 Register result, |
| 352 Register scratch1, | 246 Register scratch1, |
| 353 Register scratch2, | 247 Register scratch2, |
| 354 Register scratch3, | 248 Register scratch3, |
| 355 bool object_is_smi, | 249 ObjectType object_type, |
| 356 Label* not_found); | 250 Label* not_found); |
| 357 | 251 |
| 358 private: | 252 private: |
| 359 Major MajorKey() { return NumberToString; } | 253 Major MajorKey() { return NumberToString; } |
| 360 int MinorKey() { return 0; } | 254 int MinorKey() { return 0; } |
| 361 | 255 |
| 362 void Generate(MacroAssembler* masm); | 256 void Generate(MacroAssembler* masm); |
| 363 }; | 257 }; |
| 364 | 258 |
| 365 | 259 |
| 366 class RecordWriteStub: public PlatformCodeStub { | 260 class RecordWriteStub: public PlatformCodeStub { |
| 367 public: | 261 public: |
| 262 // Stub to record the write of 'value' at 'address' in 'object'. |
| 263 // Typically 'address' = 'object' + <some offset>. |
| 264 // See MacroAssembler::RecordWriteField() for example. |
| 368 RecordWriteStub(Register object, | 265 RecordWriteStub(Register object, |
| 369 Register value, | 266 Register value, |
| 370 Register address, | 267 Register address, |
| 371 RememberedSetAction remembered_set_action, | 268 RememberedSetAction remembered_set_action, |
| 372 SaveFPRegsMode fp_mode) | 269 SaveFPRegsMode fp_mode) |
| 373 : object_(object), | 270 : object_(object), |
| 374 value_(value), | 271 value_(value), |
| 375 address_(address), | 272 address_(address), |
| 376 remembered_set_action_(remembered_set_action), | 273 remembered_set_action_(remembered_set_action), |
| 377 save_fp_regs_mode_(fp_mode), | 274 save_fp_regs_mode_(fp_mode), |
| 378 regs_(object, // An input reg. | 275 regs_(object, // An input reg. |
| 379 address, // An input reg. | 276 address, // An input reg. |
| 380 value) { // One scratch reg. | 277 value) { // One scratch reg. |
| 381 } | 278 } |
| 382 | 279 |
| 383 enum Mode { | 280 enum Mode { |
| 384 STORE_BUFFER_ONLY, | 281 STORE_BUFFER_ONLY, |
| 385 INCREMENTAL, | 282 INCREMENTAL, |
| 386 INCREMENTAL_COMPACTION | 283 INCREMENTAL_COMPACTION |
| 387 }; | 284 }; |
| 388 | 285 |
| 389 virtual bool IsPregenerated(); | 286 virtual bool IsPregenerated(); |
| 390 static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); | 287 static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); |
| 391 virtual bool SometimesSetsUpAFrame() { return false; } | 288 virtual bool SometimesSetsUpAFrame() { return false; } |
| 392 | 289 |
| 393 static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { | 290 static Mode GetMode(Code* stub) { |
| 394 masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); | 291 // Find the mode depending on the first two instructions. |
| 395 ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos))); | 292 Instruction* instr1 = |
| 396 } | 293 reinterpret_cast<Instruction*>(stub->instruction_start()); |
| 294 Instruction* instr2 = instr1->following(); |
| 397 | 295 |
| 398 static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { | 296 if (instr1->IsUncondBranchImm()) { |
| 399 masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); | 297 ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code())); |
| 400 ASSERT(Assembler::IsBranch(masm->instr_at(pos))); | |
| 401 } | |
| 402 | |
| 403 static Mode GetMode(Code* stub) { | |
| 404 Instr first_instruction = Assembler::instr_at(stub->instruction_start()); | |
| 405 Instr second_instruction = Assembler::instr_at(stub->instruction_start() + | |
| 406 Assembler::kInstrSize); | |
| 407 | |
| 408 if (Assembler::IsBranch(first_instruction)) { | |
| 409 return INCREMENTAL; | 298 return INCREMENTAL; |
| 410 } | 299 } |
| 411 | 300 |
| 412 ASSERT(Assembler::IsTstImmediate(first_instruction)); | 301 ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code())); |
| 413 | 302 |
| 414 if (Assembler::IsBranch(second_instruction)) { | 303 if (instr2->IsUncondBranchImm()) { |
| 415 return INCREMENTAL_COMPACTION; | 304 return INCREMENTAL_COMPACTION; |
| 416 } | 305 } |
| 417 | 306 |
| 418 ASSERT(Assembler::IsTstImmediate(second_instruction)); | 307 ASSERT(instr2->IsPCRelAddressing()); |
| 419 | 308 |
| 420 return STORE_BUFFER_ONLY; | 309 return STORE_BUFFER_ONLY; |
| 421 } | 310 } |
| 422 | 311 |
| 312 // We patch the two first instructions of the stub back and forth between an |
| 313 // adr and branch when we start and stop incremental heap marking. |
| 314 // The branch is |
| 315 // b label |
| 316 // The adr is |
| 317 // adr xzr label |
| 318 // so effectively a nop. |
| 423 static void Patch(Code* stub, Mode mode) { | 319 static void Patch(Code* stub, Mode mode) { |
| 424 MacroAssembler masm(NULL, | 320 // We are going to patch the two first instructions of the stub. |
| 425 stub->instruction_start(), | 321 PatchingAssembler patcher( |
| 426 stub->instruction_size()); | 322 reinterpret_cast<Instruction*>(stub->instruction_start()), 2); |
| 323 Instruction* instr1 = patcher.InstructionAt(0); |
| 324 Instruction* instr2 = patcher.InstructionAt(kInstructionSize); |
| 325 // Instructions must be either 'adr' or 'b'. |
| 326 ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm()); |
| 327 ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm()); |
| 328 // Retrieve the offsets to the labels. |
| 329 int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset(); |
| 330 int32_t offset_to_incremental_compacting = instr2->ImmPCOffset(); |
| 331 |
| 427 switch (mode) { | 332 switch (mode) { |
| 428 case STORE_BUFFER_ONLY: | 333 case STORE_BUFFER_ONLY: |
| 429 ASSERT(GetMode(stub) == INCREMENTAL || | 334 ASSERT(GetMode(stub) == INCREMENTAL || |
| 430 GetMode(stub) == INCREMENTAL_COMPACTION); | 335 GetMode(stub) == INCREMENTAL_COMPACTION); |
| 431 PatchBranchIntoNop(&masm, 0); | 336 patcher.adr(xzr, offset_to_incremental_noncompacting); |
| 432 PatchBranchIntoNop(&masm, Assembler::kInstrSize); | 337 patcher.adr(xzr, offset_to_incremental_compacting); |
| 433 break; | 338 break; |
| 434 case INCREMENTAL: | 339 case INCREMENTAL: |
| 435 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); | 340 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); |
| 436 PatchNopIntoBranch(&masm, 0); | 341 patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2); |
| 342 patcher.adr(xzr, offset_to_incremental_compacting); |
| 437 break; | 343 break; |
| 438 case INCREMENTAL_COMPACTION: | 344 case INCREMENTAL_COMPACTION: |
| 439 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); | 345 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); |
| 440 PatchNopIntoBranch(&masm, Assembler::kInstrSize); | 346 patcher.adr(xzr, offset_to_incremental_noncompacting); |
| 347 patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2); |
| 441 break; | 348 break; |
| 442 } | 349 } |
| 443 ASSERT(GetMode(stub) == mode); | 350 ASSERT(GetMode(stub) == mode); |
| 444 CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize); | |
| 445 } | 351 } |
| 446 | 352 |
| 447 private: | 353 private: |
| 448 // This is a helper class for freeing up 3 scratch registers. The input is | 354 // This is a helper class to manage the registers associated with the stub. |
| 449 // two registers that must be preserved and one scratch register provided by | 355 // The 'object' and 'address' registers must be preserved. |
| 450 // the caller. | |
| 451 class RegisterAllocation { | 356 class RegisterAllocation { |
| 452 public: | 357 public: |
| 453 RegisterAllocation(Register object, | 358 RegisterAllocation(Register object, |
| 454 Register address, | 359 Register address, |
| 455 Register scratch0) | 360 Register scratch) |
| 456 : object_(object), | 361 : object_(object), |
| 457 address_(address), | 362 address_(address), |
| 458 scratch0_(scratch0) { | 363 scratch0_(scratch), |
| 459 ASSERT(!AreAliased(scratch0, object, address, no_reg)); | 364 saved_regs_(kCallerSaved) { |
| 460 scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_); | 365 ASSERT(!AreAliased(scratch, object, address)); |
| 366 |
| 367 // We would like to requiere more scratch registers for this stub, |
| 368 // but the number of registers comes down to the ones used in |
| 369 // FullCodeGen::SetVar(), which is architecture independent. |
| 370 // We allocate 2 extra scratch registers that we'll save on the stack. |
| 371 CPURegList pool_available = GetValidRegistersForAllocation(); |
| 372 CPURegList used_regs(object, address, scratch); |
| 373 pool_available.Remove(used_regs); |
| 374 scratch1_ = Register(pool_available.PopLowestIndex()); |
| 375 scratch2_ = Register(pool_available.PopLowestIndex()); |
| 376 |
| 377 // SaveCallerRegisters method needs to save caller saved register, however |
| 378 // we dont bother saving x8, x9, ip0 and ip1 because they are used as |
| 379 // scratch registers by the MacroAssembler. |
| 380 saved_regs_.Remove(ip0); |
| 381 saved_regs_.Remove(ip1); |
| 382 saved_regs_.Remove(x8); |
| 383 saved_regs_.Remove(x9); |
| 384 |
| 385 // The scratch registers will be restored by other means so we don't need |
| 386 // to save them with the other caller saved registers. |
| 387 saved_regs_.Remove(scratch0_); |
| 388 saved_regs_.Remove(scratch1_); |
| 389 saved_regs_.Remove(scratch2_); |
| 461 } | 390 } |
| 462 | 391 |
| 463 void Save(MacroAssembler* masm) { | 392 void Save(MacroAssembler* masm) { |
| 464 ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); | |
| 465 // We don't have to save scratch0_ because it was given to us as | 393 // We don't have to save scratch0_ because it was given to us as |
| 466 // a scratch register. | 394 // a scratch register. |
| 467 masm->push(scratch1_); | 395 masm->Push(scratch1_, scratch2_); |
| 468 } | 396 } |
| 469 | 397 |
| 470 void Restore(MacroAssembler* masm) { | 398 void Restore(MacroAssembler* masm) { |
| 471 masm->pop(scratch1_); | 399 masm->Pop(scratch2_, scratch1_); |
| 472 } | 400 } |
| 473 | 401 |
| 474 // If we have to call into C then we need to save and restore all caller- | 402 // If we have to call into C then we need to save and restore all caller- |
| 475 // saved registers that were not already preserved. The scratch registers | 403 // saved registers that were not already preserved. |
| 476 // will be restored by other means so we don't bother pushing them here. | |
| 477 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { | 404 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { |
| 478 masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); | 405 // TODO(all): This can be very expensive, and it is likely that not every |
| 406 // register will need to be preserved. Can we improve this? |
| 407 masm->PushCPURegList(saved_regs_); |
| 479 if (mode == kSaveFPRegs) { | 408 if (mode == kSaveFPRegs) { |
| 480 masm->SaveFPRegs(sp, scratch0_); | 409 masm->PushCPURegList(kCallerSavedFP); |
| 481 } | 410 } |
| 482 } | 411 } |
| 483 | 412 |
| 484 inline void RestoreCallerSaveRegisters(MacroAssembler*masm, | 413 inline void RestoreCallerSaveRegisters(MacroAssembler*masm, |
| 485 SaveFPRegsMode mode) { | 414 SaveFPRegsMode mode) { |
| 415 // TODO(all): This can be very expensive, and it is likely that not every |
| 416 // register will need to be preserved. Can we improve this? |
| 486 if (mode == kSaveFPRegs) { | 417 if (mode == kSaveFPRegs) { |
| 487 masm->RestoreFPRegs(sp, scratch0_); | 418 masm->PopCPURegList(kCallerSavedFP); |
| 488 } | 419 } |
| 489 masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); | 420 masm->PopCPURegList(saved_regs_); |
| 490 } | 421 } |
| 491 | 422 |
| 492 inline Register object() { return object_; } | 423 inline Register object() { return object_; } |
| 493 inline Register address() { return address_; } | 424 inline Register address() { return address_; } |
| 494 inline Register scratch0() { return scratch0_; } | 425 inline Register scratch0() { return scratch0_; } |
| 495 inline Register scratch1() { return scratch1_; } | 426 inline Register scratch1() { return scratch1_; } |
| 427 inline Register scratch2() { return scratch2_; } |
| 496 | 428 |
| 497 private: | 429 private: |
| 498 Register object_; | 430 Register object_; |
| 499 Register address_; | 431 Register address_; |
| 500 Register scratch0_; | 432 Register scratch0_; |
| 501 Register scratch1_; | 433 Register scratch1_; |
| 434 Register scratch2_; |
| 435 CPURegList saved_regs_; |
| 502 | 436 |
| 503 Register GetRegThatIsNotOneOf(Register r1, | 437 // TODO(all): We should consider moving this somewhere else. |
| 504 Register r2, | 438 static CPURegList GetValidRegistersForAllocation() { |
| 505 Register r3) { | 439 // The list of valid registers for allocation is defined as all the |
| 506 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { | 440 // registers without those with a special meaning. |
| 507 Register candidate = Register::FromAllocationIndex(i); | 441 // |
| 508 if (candidate.is(r1)) continue; | 442 // The default list excludes registers x26 to x31 because they are |
| 509 if (candidate.is(r2)) continue; | 443 // reserved for the following purpose: |
| 510 if (candidate.is(r3)) continue; | 444 // - x26 root register |
| 511 return candidate; | 445 // - x27 context pointer register |
| 512 } | 446 // - x28 jssp |
| 513 UNREACHABLE(); | 447 // - x29 frame pointer |
| 514 return no_reg; | 448 // - x30 link register(lr) |
| 449 // - x31 xzr/stack pointer |
| 450 CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25); |
| 451 |
| 452 // We also remove MacroAssembler's scratch registers. |
| 453 list.Remove(ip0); |
| 454 list.Remove(ip1); |
| 455 list.Remove(x8); |
| 456 list.Remove(x9); |
| 457 |
| 458 return list; |
| 515 } | 459 } |
| 460 |
| 516 friend class RecordWriteStub; | 461 friend class RecordWriteStub; |
| 517 }; | 462 }; |
| 518 | 463 |
| 464 // A list of stub variants which are pregenerated. |
| 465 // The variants are stored in the same format as the minor key, so |
| 466 // MinorKeyFor() can be used to populate and check this list. |
| 467 static const int kAheadOfTime[]; |
| 468 |
| 469 void Generate(MacroAssembler* masm); |
| 470 void GenerateIncremental(MacroAssembler* masm, Mode mode); |
| 471 |
| 519 enum OnNoNeedToInformIncrementalMarker { | 472 enum OnNoNeedToInformIncrementalMarker { |
| 520 kReturnOnNoNeedToInformIncrementalMarker, | 473 kReturnOnNoNeedToInformIncrementalMarker, |
| 521 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker | 474 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker |
| 522 }; | 475 }; |
| 523 | 476 |
| 524 void Generate(MacroAssembler* masm); | |
| 525 void GenerateIncremental(MacroAssembler* masm, Mode mode); | |
| 526 void CheckNeedsToInformIncrementalMarker( | 477 void CheckNeedsToInformIncrementalMarker( |
| 527 MacroAssembler* masm, | 478 MacroAssembler* masm, |
| 528 OnNoNeedToInformIncrementalMarker on_no_need, | 479 OnNoNeedToInformIncrementalMarker on_no_need, |
| 529 Mode mode); | 480 Mode mode); |
| 530 void InformIncrementalMarker(MacroAssembler* masm, Mode mode); | 481 void InformIncrementalMarker(MacroAssembler* masm, Mode mode); |
| 531 | 482 |
| 532 Major MajorKey() { return RecordWrite; } | 483 Major MajorKey() { return RecordWrite; } |
| 533 | 484 |
| 534 int MinorKey() { | 485 int MinorKey() { |
| 535 return ObjectBits::encode(object_.code()) | | 486 return MinorKeyFor(object_, value_, address_, remembered_set_action_, |
| 536 ValueBits::encode(value_.code()) | | 487 save_fp_regs_mode_); |
| 537 AddressBits::encode(address_.code()) | | 488 } |
| 538 RememberedSetActionBits::encode(remembered_set_action_) | | 489 |
| 539 SaveFPRegsModeBits::encode(save_fp_regs_mode_); | 490 static int MinorKeyFor(Register object, |
| 491 Register value, |
| 492 Register address, |
| 493 RememberedSetAction action, |
| 494 SaveFPRegsMode fp_mode) { |
| 495 ASSERT(object.Is64Bits()); |
| 496 ASSERT(value.Is64Bits()); |
| 497 ASSERT(address.Is64Bits()); |
| 498 return ObjectBits::encode(object.code()) | |
| 499 ValueBits::encode(value.code()) | |
| 500 AddressBits::encode(address.code()) | |
| 501 RememberedSetActionBits::encode(action) | |
| 502 SaveFPRegsModeBits::encode(fp_mode); |
| 540 } | 503 } |
| 541 | 504 |
| 542 void Activate(Code* code) { | 505 void Activate(Code* code) { |
| 543 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); | 506 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); |
| 544 } | 507 } |
| 545 | 508 |
| 546 class ObjectBits: public BitField<int, 0, 4> {}; | 509 class ObjectBits: public BitField<int, 0, 5> {}; |
| 547 class ValueBits: public BitField<int, 4, 4> {}; | 510 class ValueBits: public BitField<int, 5, 5> {}; |
| 548 class AddressBits: public BitField<int, 8, 4> {}; | 511 class AddressBits: public BitField<int, 10, 5> {}; |
| 549 class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; | 512 class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {}; |
| 550 class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; | 513 class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {}; |
| 551 | 514 |
| 552 Register object_; | 515 Register object_; |
| 553 Register value_; | 516 Register value_; |
| 554 Register address_; | 517 Register address_; |
| 555 RememberedSetAction remembered_set_action_; | 518 RememberedSetAction remembered_set_action_; |
| 556 SaveFPRegsMode save_fp_regs_mode_; | 519 SaveFPRegsMode save_fp_regs_mode_; |
| 557 Label slow_; | 520 Label slow_; |
| 558 RegisterAllocation regs_; | 521 RegisterAllocation regs_; |
| 559 }; | 522 }; |
| 560 | 523 |
| 561 | 524 |
| 562 // Enter C code from generated RegExp code in a way that allows | 525 // Helper to call C++ functions from generated code. The caller must prepare |
| 563 // the C code to fix the return address in case of a GC. | 526 // the exit frame before doing the call with GenerateCall. |
| 564 // Currently only needed on ARM. | |
| 565 class RegExpCEntryStub: public PlatformCodeStub { | |
| 566 public: | |
| 567 RegExpCEntryStub() {} | |
| 568 virtual ~RegExpCEntryStub() {} | |
| 569 void Generate(MacroAssembler* masm); | |
| 570 | |
| 571 private: | |
| 572 Major MajorKey() { return RegExpCEntry; } | |
| 573 int MinorKey() { return 0; } | |
| 574 | |
| 575 bool NeedsImmovableCode() { return true; } | |
| 576 }; | |
| 577 | |
| 578 | |
| 579 // Trampoline stub to call into native code. To call safely into native code | |
| 580 // in the presence of compacting GC (which can move code objects) we need to | |
| 581 // keep the code which called into native pinned in the memory. Currently the | |
| 582 // simplest approach is to generate such stub early enough so it can never be | |
| 583 // moved by GC | |
| 584 class DirectCEntryStub: public PlatformCodeStub { | 527 class DirectCEntryStub: public PlatformCodeStub { |
| 585 public: | 528 public: |
| 586 DirectCEntryStub() {} | 529 DirectCEntryStub() {} |
| 587 void Generate(MacroAssembler* masm); | 530 void Generate(MacroAssembler* masm); |
| 588 void GenerateCall(MacroAssembler* masm, ExternalReference function); | |
| 589 void GenerateCall(MacroAssembler* masm, Register target); | 531 void GenerateCall(MacroAssembler* masm, Register target); |
| 590 | 532 |
| 591 private: | 533 private: |
| 592 Major MajorKey() { return DirectCEntry; } | 534 Major MajorKey() { return DirectCEntry; } |
| 593 int MinorKey() { return 0; } | 535 int MinorKey() { return 0; } |
| 594 | 536 |
| 595 bool NeedsImmovableCode() { return true; } | 537 bool NeedsImmovableCode() { return true; } |
| 596 }; | 538 }; |
| 597 | 539 |
| 598 | 540 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 610 Register receiver, | 552 Register receiver, |
| 611 Register properties, | 553 Register properties, |
| 612 Handle<Name> name, | 554 Handle<Name> name, |
| 613 Register scratch0); | 555 Register scratch0); |
| 614 | 556 |
| 615 static void GeneratePositiveLookup(MacroAssembler* masm, | 557 static void GeneratePositiveLookup(MacroAssembler* masm, |
| 616 Label* miss, | 558 Label* miss, |
| 617 Label* done, | 559 Label* done, |
| 618 Register elements, | 560 Register elements, |
| 619 Register name, | 561 Register name, |
| 620 Register r0, | 562 Register scratch1, |
| 621 Register r1); | 563 Register scratch2); |
| 622 | 564 |
| 623 virtual bool SometimesSetsUpAFrame() { return false; } | 565 virtual bool SometimesSetsUpAFrame() { return false; } |
| 624 | 566 |
| 625 private: | 567 private: |
| 626 static const int kInlinedProbes = 4; | 568 static const int kInlinedProbes = 4; |
| 627 static const int kTotalProbes = 20; | 569 static const int kTotalProbes = 20; |
| 628 | 570 |
| 629 static const int kCapacityOffset = | 571 static const int kCapacityOffset = |
| 630 NameDictionary::kHeaderSize + | 572 NameDictionary::kHeaderSize + |
| 631 NameDictionary::kCapacityIndex * kPointerSize; | 573 NameDictionary::kCapacityIndex * kPointerSize; |
| 632 | 574 |
| 633 static const int kElementsStartOffset = | 575 static const int kElementsStartOffset = |
| 634 NameDictionary::kHeaderSize + | 576 NameDictionary::kHeaderSize + |
| 635 NameDictionary::kElementsStartIndex * kPointerSize; | 577 NameDictionary::kElementsStartIndex * kPointerSize; |
| 636 | 578 |
| 637 Major MajorKey() { return NameDictionaryLookup; } | 579 Major MajorKey() { return NameDictionaryLookup; } |
| 638 | 580 |
| 639 int MinorKey() { | 581 int MinorKey() { |
| 640 return LookupModeBits::encode(mode_); | 582 return LookupModeBits::encode(mode_); |
| 641 } | 583 } |
| 642 | 584 |
| 643 class LookupModeBits: public BitField<LookupMode, 0, 1> {}; | 585 class LookupModeBits: public BitField<LookupMode, 0, 1> {}; |
| 644 | 586 |
| 645 LookupMode mode_; | 587 LookupMode mode_; |
| 646 }; | 588 }; |
| 647 | 589 |
| 648 | 590 |
| 591 class SubStringStub: public PlatformCodeStub { |
| 592 public: |
| 593 SubStringStub() {} |
| 594 |
| 595 private: |
| 596 Major MajorKey() { return SubString; } |
| 597 int MinorKey() { return 0; } |
| 598 |
| 599 void Generate(MacroAssembler* masm); |
| 600 }; |
| 601 |
| 602 |
| 603 class StringCompareStub: public PlatformCodeStub { |
| 604 public: |
| 605 StringCompareStub() { } |
| 606 |
| 607 // Compares two flat ASCII strings and returns result in x0. |
| 608 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
| 609 Register left, |
| 610 Register right, |
| 611 Register scratch1, |
| 612 Register scratch2, |
| 613 Register scratch3, |
| 614 Register scratch4); |
| 615 |
| 616 // Compare two flat ASCII strings for equality and returns result |
| 617 // in x0. |
| 618 static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, |
| 619 Register left, |
| 620 Register right, |
| 621 Register scratch1, |
| 622 Register scratch2, |
| 623 Register scratch3); |
| 624 |
| 625 private: |
| 626 virtual Major MajorKey() { return StringCompare; } |
| 627 virtual int MinorKey() { return 0; } |
| 628 virtual void Generate(MacroAssembler* masm); |
| 629 |
| 630 static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, |
| 631 Register left, |
| 632 Register right, |
| 633 Register length, |
| 634 Register scratch1, |
| 635 Register scratch2, |
| 636 Label* chars_not_equal); |
| 637 }; |
| 638 |
| 639 |
| 649 } } // namespace v8::internal | 640 } } // namespace v8::internal |
| 650 | 641 |
| 651 #endif // V8_ARM_CODE_STUBS_ARM_H_ | 642 #endif // V8_A64_CODE_STUBS_A64_H_ |
| OLD | NEW |