| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 17 matching lines...) Expand all Loading... |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if defined(V8_TARGET_ARCH_IA32) | 30 #if defined(V8_TARGET_ARCH_IA32) |
| 31 | 31 |
| 32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
| 33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
| 34 #include "isolate.h" | 34 #include "isolate.h" |
| 35 #include "jsregexp.h" | 35 #include "jsregexp.h" |
| 36 #include "regexp-macro-assembler.h" | 36 #include "regexp-macro-assembler.h" |
| 37 #include "stub-cache.h" | 37 #include "stub-cache.h" |
| 38 #include "codegen.h" |
| 38 | 39 |
| 39 namespace v8 { | 40 namespace v8 { |
| 40 namespace internal { | 41 namespace internal { |
| 41 | 42 |
| 42 #define __ ACCESS_MASM(masm) | 43 #define __ ACCESS_MASM(masm) |
| 43 | 44 |
| 44 void ToNumberStub::Generate(MacroAssembler* masm) { | 45 void ToNumberStub::Generate(MacroAssembler* masm) { |
| 45 // The ToNumber stub takes one argument in eax. | 46 // The ToNumber stub takes one argument in eax. |
| 46 Label check_heap_number, call_builtin; | 47 Label check_heap_number, call_builtin; |
| 47 __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear); | 48 __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear); |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 224 // Return and remove the on-stack parameters. | 225 // Return and remove the on-stack parameters. |
| 225 __ mov(esi, eax); | 226 __ mov(esi, eax); |
| 226 __ ret(2 * kPointerSize); | 227 __ ret(2 * kPointerSize); |
| 227 | 228 |
| 228 // Need to collect. Call into runtime system. | 229 // Need to collect. Call into runtime system. |
| 229 __ bind(&gc); | 230 __ bind(&gc); |
| 230 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); | 231 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); |
| 231 } | 232 } |
| 232 | 233 |
| 233 | 234 |
| 234 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { | 235 static void GenerateFastCloneShallowArrayCommon( |
| 235 // Stack layout on entry: | 236 MacroAssembler* masm, |
| 237 int length, |
| 238 FastCloneShallowArrayStub::Mode mode, |
| 239 Label* fail) { |
| 240 // Registers on entry: |
| 236 // | 241 // |
| 237 // [esp + kPointerSize]: constant elements. | 242 // ecx: boilerplate literal array. |
| 238 // [esp + (2 * kPointerSize)]: literal index. | 243 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS); |
| 239 // [esp + (3 * kPointerSize)]: literals array. | |
| 240 | 244 |
| 241 // All sizes here are multiples of kPointerSize. | 245 // All sizes here are multiples of kPointerSize. |
| 242 int elements_size = 0; | 246 int elements_size = 0; |
| 243 if (length_ > 0) { | 247 if (length > 0) { |
| 244 elements_size = mode_ == CLONE_DOUBLE_ELEMENTS | 248 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS |
| 245 ? FixedDoubleArray::SizeFor(length_) | 249 ? FixedDoubleArray::SizeFor(length) |
| 246 : FixedArray::SizeFor(length_); | 250 : FixedArray::SizeFor(length); |
| 247 } | 251 } |
| 248 int size = JSArray::kSize + elements_size; | 252 int size = JSArray::kSize + elements_size; |
| 249 | 253 |
| 250 // Load boilerplate object into ecx and check if we need to create a | |
| 251 // boilerplate. | |
| 252 Label slow_case; | |
| 253 __ mov(ecx, Operand(esp, 3 * kPointerSize)); | |
| 254 __ mov(eax, Operand(esp, 2 * kPointerSize)); | |
| 255 STATIC_ASSERT(kPointerSize == 4); | |
| 256 STATIC_ASSERT(kSmiTagSize == 1); | |
| 257 STATIC_ASSERT(kSmiTag == 0); | |
| 258 __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size, | |
| 259 FixedArray::kHeaderSize)); | |
| 260 Factory* factory = masm->isolate()->factory(); | |
| 261 __ cmp(ecx, factory->undefined_value()); | |
| 262 __ j(equal, &slow_case); | |
| 263 | |
| 264 if (FLAG_debug_code) { | |
| 265 const char* message; | |
| 266 Handle<Map> expected_map; | |
| 267 if (mode_ == CLONE_ELEMENTS) { | |
| 268 message = "Expected (writable) fixed array"; | |
| 269 expected_map = factory->fixed_array_map(); | |
| 270 } else if (mode_ == CLONE_DOUBLE_ELEMENTS) { | |
| 271 message = "Expected (writable) fixed double array"; | |
| 272 expected_map = factory->fixed_double_array_map(); | |
| 273 } else { | |
| 274 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); | |
| 275 message = "Expected copy-on-write fixed array"; | |
| 276 expected_map = factory->fixed_cow_array_map(); | |
| 277 } | |
| 278 __ push(ecx); | |
| 279 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); | |
| 280 __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map); | |
| 281 __ Assert(equal, message); | |
| 282 __ pop(ecx); | |
| 283 } | |
| 284 | |
| 285 // Allocate both the JS array and the elements array in one big | 254 // Allocate both the JS array and the elements array in one big |
| 286 // allocation. This avoids multiple limit checks. | 255 // allocation. This avoids multiple limit checks. |
| 287 __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT); | 256 __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT); |
| 288 | 257 |
| 289 // Copy the JS array part. | 258 // Copy the JS array part. |
| 290 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { | 259 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { |
| 291 if ((i != JSArray::kElementsOffset) || (length_ == 0)) { | 260 if ((i != JSArray::kElementsOffset) || (length == 0)) { |
| 292 __ mov(ebx, FieldOperand(ecx, i)); | 261 __ mov(ebx, FieldOperand(ecx, i)); |
| 293 __ mov(FieldOperand(eax, i), ebx); | 262 __ mov(FieldOperand(eax, i), ebx); |
| 294 } | 263 } |
| 295 } | 264 } |
| 296 | 265 |
| 297 if (length_ > 0) { | 266 if (length > 0) { |
| 298 // Get hold of the elements array of the boilerplate and setup the | 267 // Get hold of the elements array of the boilerplate and setup the |
| 299 // elements pointer in the resulting object. | 268 // elements pointer in the resulting object. |
| 300 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); | 269 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); |
| 301 __ lea(edx, Operand(eax, JSArray::kSize)); | 270 __ lea(edx, Operand(eax, JSArray::kSize)); |
| 302 __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx); | 271 __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx); |
| 303 | 272 |
| 304 // Copy the elements array. | 273 // Copy the elements array. |
| 305 if (mode_ == CLONE_ELEMENTS) { | 274 if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) { |
| 306 for (int i = 0; i < elements_size; i += kPointerSize) { | 275 for (int i = 0; i < elements_size; i += kPointerSize) { |
| 307 __ mov(ebx, FieldOperand(ecx, i)); | 276 __ mov(ebx, FieldOperand(ecx, i)); |
| 308 __ mov(FieldOperand(edx, i), ebx); | 277 __ mov(FieldOperand(edx, i), ebx); |
| 309 } | 278 } |
| 310 } else { | 279 } else { |
| 311 ASSERT(mode_ == CLONE_DOUBLE_ELEMENTS); | 280 ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS); |
| 312 int i; | 281 int i; |
| 313 for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) { | 282 for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) { |
| 314 __ mov(ebx, FieldOperand(ecx, i)); | 283 __ mov(ebx, FieldOperand(ecx, i)); |
| 315 __ mov(FieldOperand(edx, i), ebx); | 284 __ mov(FieldOperand(edx, i), ebx); |
| 316 } | 285 } |
| 317 while (i < elements_size) { | 286 while (i < elements_size) { |
| 318 __ fld_d(FieldOperand(ecx, i)); | 287 __ fld_d(FieldOperand(ecx, i)); |
| 319 __ fstp_d(FieldOperand(edx, i)); | 288 __ fstp_d(FieldOperand(edx, i)); |
| 320 i += kDoubleSize; | 289 i += kDoubleSize; |
| 321 } | 290 } |
| 322 ASSERT(i == elements_size); | 291 ASSERT(i == elements_size); |
| 323 } | 292 } |
| 324 } | 293 } |
| 294 } |
| 325 | 295 |
| 296 |
| 297 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { |
| 298 // Stack layout on entry: |
| 299 // |
| 300 // [esp + kPointerSize]: constant elements. |
| 301 // [esp + (2 * kPointerSize)]: literal index. |
| 302 // [esp + (3 * kPointerSize)]: literals array. |
| 303 |
| 304 // Load boilerplate object into ecx and check if we need to create a |
| 305 // boilerplate. |
| 306 __ mov(ecx, Operand(esp, 3 * kPointerSize)); |
| 307 __ mov(eax, Operand(esp, 2 * kPointerSize)); |
| 308 STATIC_ASSERT(kPointerSize == 4); |
| 309 STATIC_ASSERT(kSmiTagSize == 1); |
| 310 STATIC_ASSERT(kSmiTag == 0); |
| 311 __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size, |
| 312 FixedArray::kHeaderSize)); |
| 313 Factory* factory = masm->isolate()->factory(); |
| 314 __ cmp(ecx, factory->undefined_value()); |
| 315 Label slow_case; |
| 316 __ j(equal, &slow_case); |
| 317 |
| 318 FastCloneShallowArrayStub::Mode mode = mode_; |
| 319 // ecx is boilerplate object. |
| 320 if (mode == CLONE_ANY_ELEMENTS) { |
| 321 Label double_elements, check_fast_elements; |
| 322 __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset)); |
| 323 __ CheckMap(ebx, factory->fixed_cow_array_map(), |
| 324 &check_fast_elements, DONT_DO_SMI_CHECK); |
| 325 GenerateFastCloneShallowArrayCommon(masm, 0, |
| 326 COPY_ON_WRITE_ELEMENTS, &slow_case); |
| 327 __ ret(3 * kPointerSize); |
| 328 |
| 329 __ bind(&check_fast_elements); |
| 330 __ CheckMap(ebx, factory->fixed_array_map(), |
| 331 &double_elements, DONT_DO_SMI_CHECK); |
| 332 GenerateFastCloneShallowArrayCommon(masm, length_, |
| 333 CLONE_ELEMENTS, &slow_case); |
| 334 __ ret(3 * kPointerSize); |
| 335 |
| 336 __ bind(&double_elements); |
| 337 mode = CLONE_DOUBLE_ELEMENTS; |
| 338 // Fall through to generate the code to handle double elements. |
| 339 } |
| 340 |
| 341 if (FLAG_debug_code) { |
| 342 const char* message; |
| 343 Handle<Map> expected_map; |
| 344 if (mode == CLONE_ELEMENTS) { |
| 345 message = "Expected (writable) fixed array"; |
| 346 expected_map = factory->fixed_array_map(); |
| 347 } else if (mode == CLONE_DOUBLE_ELEMENTS) { |
| 348 message = "Expected (writable) fixed double array"; |
| 349 expected_map = factory->fixed_double_array_map(); |
| 350 } else { |
| 351 ASSERT(mode == COPY_ON_WRITE_ELEMENTS); |
| 352 message = "Expected copy-on-write fixed array"; |
| 353 expected_map = factory->fixed_cow_array_map(); |
| 354 } |
| 355 __ push(ecx); |
| 356 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); |
| 357 __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map); |
| 358 __ Assert(equal, message); |
| 359 __ pop(ecx); |
| 360 } |
| 361 |
| 362 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case); |
| 326 // Return and remove the on-stack parameters. | 363 // Return and remove the on-stack parameters. |
| 327 __ ret(3 * kPointerSize); | 364 __ ret(3 * kPointerSize); |
| 328 | 365 |
| 329 __ bind(&slow_case); | 366 __ bind(&slow_case); |
| 330 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); | 367 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); |
| 331 } | 368 } |
| 332 | 369 |
| 333 | 370 |
| 334 // The stub expects its argument on the stack and returns its result in tos_: | 371 // The stub expects its argument on the stack and returns its result in tos_: |
| 335 // zero for false, and a non-zero value for true. | 372 // zero for false, and a non-zero value for true. |
| (...skipping 4746 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5082 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); | 5119 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); |
| 5083 if (!include_number_compare_) stream->Add("_NO_NUMBER"); | 5120 if (!include_number_compare_) stream->Add("_NO_NUMBER"); |
| 5084 if (!include_smi_compare_) stream->Add("_NO_SMI"); | 5121 if (!include_smi_compare_) stream->Add("_NO_SMI"); |
| 5085 } | 5122 } |
| 5086 | 5123 |
| 5087 | 5124 |
| 5088 // ------------------------------------------------------------------------- | 5125 // ------------------------------------------------------------------------- |
| 5089 // StringCharCodeAtGenerator | 5126 // StringCharCodeAtGenerator |
| 5090 | 5127 |
| 5091 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 5128 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 5092 Label flat_string; | |
| 5093 Label ascii_string; | |
| 5094 Label got_char_code; | |
| 5095 Label sliced_string; | |
| 5096 | |
| 5097 // If the receiver is a smi trigger the non-string case. | 5129 // If the receiver is a smi trigger the non-string case. |
| 5098 STATIC_ASSERT(kSmiTag == 0); | 5130 STATIC_ASSERT(kSmiTag == 0); |
| 5099 __ JumpIfSmi(object_, receiver_not_string_); | 5131 __ JumpIfSmi(object_, receiver_not_string_); |
| 5100 | 5132 |
| 5101 // Fetch the instance type of the receiver into result register. | 5133 // Fetch the instance type of the receiver into result register. |
| 5102 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); | 5134 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); |
| 5103 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); | 5135 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); |
| 5104 // If the receiver is not a string trigger the non-string case. | 5136 // If the receiver is not a string trigger the non-string case. |
| 5105 __ test(result_, Immediate(kIsNotStringMask)); | 5137 __ test(result_, Immediate(kIsNotStringMask)); |
| 5106 __ j(not_zero, receiver_not_string_); | 5138 __ j(not_zero, receiver_not_string_); |
| 5107 | 5139 |
| 5108 // If the index is non-smi trigger the non-smi case. | 5140 // If the index is non-smi trigger the non-smi case. |
| 5109 STATIC_ASSERT(kSmiTag == 0); | 5141 STATIC_ASSERT(kSmiTag == 0); |
| 5110 __ JumpIfNotSmi(index_, &index_not_smi_); | 5142 __ JumpIfNotSmi(index_, &index_not_smi_); |
| 5111 __ bind(&got_smi_index_); | 5143 __ bind(&got_smi_index_); |
| 5112 | 5144 |
| 5113 // Check for index out of range. | 5145 // Check for index out of range. |
| 5114 __ cmp(index_, FieldOperand(object_, String::kLengthOffset)); | 5146 __ cmp(index_, FieldOperand(object_, String::kLengthOffset)); |
| 5115 __ j(above_equal, index_out_of_range_); | 5147 __ j(above_equal, index_out_of_range_); |
| 5116 | 5148 |
| 5117 // We need special handling for non-flat strings. | 5149 __ SmiUntag(index_); |
| 5118 STATIC_ASSERT(kSeqStringTag == 0); | |
| 5119 __ test(result_, Immediate(kStringRepresentationMask)); | |
| 5120 __ j(zero, &flat_string); | |
| 5121 | 5150 |
| 5122 // Handle non-flat strings. | 5151 Factory* factory = masm->isolate()->factory(); |
| 5123 __ and_(result_, kStringRepresentationMask); | 5152 StringCharLoadGenerator::Generate( |
| 5124 STATIC_ASSERT(kConsStringTag < kExternalStringTag); | 5153 masm, factory, object_, index_, result_, &call_runtime_); |
| 5125 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); | |
| 5126 __ cmp(result_, kExternalStringTag); | |
| 5127 __ j(greater, &sliced_string, Label::kNear); | |
| 5128 __ j(equal, &call_runtime_); | |
| 5129 | 5154 |
| 5130 // ConsString. | |
| 5131 // Check whether the right hand side is the empty string (i.e. if | |
| 5132 // this is really a flat string in a cons string). If that is not | |
| 5133 // the case we would rather go to the runtime system now to flatten | |
| 5134 // the string. | |
| 5135 Label assure_seq_string; | |
| 5136 __ cmp(FieldOperand(object_, ConsString::kSecondOffset), | |
| 5137 Immediate(masm->isolate()->factory()->empty_string())); | |
| 5138 __ j(not_equal, &call_runtime_); | |
| 5139 // Get the first of the two parts. | |
| 5140 __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset)); | |
| 5141 __ jmp(&assure_seq_string, Label::kNear); | |
| 5142 | |
| 5143 // SlicedString, unpack and add offset. | |
| 5144 __ bind(&sliced_string); | |
| 5145 __ add(index_, FieldOperand(object_, SlicedString::kOffsetOffset)); | |
| 5146 __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset)); | |
| 5147 | |
| 5148 // Assure that we are dealing with a sequential string. Go to runtime if not. | |
| 5149 // Note that if the original string is a cons or slice with an external | |
| 5150 // string as underlying string, we pass that unpacked underlying string with | |
| 5151 // the adjusted index to the runtime function. | |
| 5152 __ bind(&assure_seq_string); | |
| 5153 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); | |
| 5154 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); | |
| 5155 STATIC_ASSERT(kSeqStringTag == 0); | |
| 5156 __ test(result_, Immediate(kStringRepresentationMask)); | |
| 5157 __ j(not_zero, &call_runtime_); | |
| 5158 | |
| 5159 // Check for 1-byte or 2-byte string. | |
| 5160 __ bind(&flat_string); | |
| 5161 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); | |
| 5162 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | |
| 5163 __ test(result_, Immediate(kStringEncodingMask)); | |
| 5164 __ j(not_zero, &ascii_string, Label::kNear); | |
| 5165 | |
| 5166 // 2-byte string. | |
| 5167 // Load the 2-byte character code into the result register. | |
| 5168 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | |
| 5169 __ movzx_w(result_, FieldOperand(object_, | |
| 5170 index_, times_1, // Scratch is smi-tagged. | |
| 5171 SeqTwoByteString::kHeaderSize)); | |
| 5172 __ jmp(&got_char_code, Label::kNear); | |
| 5173 | |
| 5174 // ASCII string. | |
| 5175 // Load the byte into the result register. | |
| 5176 __ bind(&ascii_string); | |
| 5177 __ SmiUntag(index_); | |
| 5178 __ movzx_b(result_, FieldOperand(object_, | |
| 5179 index_, times_1, | |
| 5180 SeqAsciiString::kHeaderSize)); | |
| 5181 __ bind(&got_char_code); | |
| 5182 __ SmiTag(result_); | 5155 __ SmiTag(result_); |
| 5183 __ bind(&exit_); | 5156 __ bind(&exit_); |
| 5184 } | 5157 } |
| 5185 | 5158 |
| 5186 | 5159 |
| 5187 void StringCharCodeAtGenerator::GenerateSlow( | 5160 void StringCharCodeAtGenerator::GenerateSlow( |
| 5188 MacroAssembler* masm, | 5161 MacroAssembler* masm, |
| 5189 const RuntimeCallHelper& call_helper) { | 5162 const RuntimeCallHelper& call_helper) { |
| 5190 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); | 5163 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); |
| 5191 | 5164 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 5221 __ JumpIfNotSmi(index_, index_out_of_range_); | 5194 __ JumpIfNotSmi(index_, index_out_of_range_); |
| 5222 // Otherwise, return to the fast path. | 5195 // Otherwise, return to the fast path. |
| 5223 __ jmp(&got_smi_index_); | 5196 __ jmp(&got_smi_index_); |
| 5224 | 5197 |
| 5225 // Call runtime. We get here when the receiver is a string and the | 5198 // Call runtime. We get here when the receiver is a string and the |
| 5226 // index is a number, but the code of getting the actual character | 5199 // index is a number, but the code of getting the actual character |
| 5227 // is too complex (e.g., when the string needs to be flattened). | 5200 // is too complex (e.g., when the string needs to be flattened). |
| 5228 __ bind(&call_runtime_); | 5201 __ bind(&call_runtime_); |
| 5229 call_helper.BeforeCall(masm); | 5202 call_helper.BeforeCall(masm); |
| 5230 __ push(object_); | 5203 __ push(object_); |
| 5204 __ SmiTag(index_); |
| 5231 __ push(index_); | 5205 __ push(index_); |
| 5232 __ CallRuntime(Runtime::kStringCharCodeAt, 2); | 5206 __ CallRuntime(Runtime::kStringCharCodeAt, 2); |
| 5233 if (!result_.is(eax)) { | 5207 if (!result_.is(eax)) { |
| 5234 __ mov(result_, eax); | 5208 __ mov(result_, eax); |
| 5235 } | 5209 } |
| 5236 call_helper.AfterCall(masm); | 5210 call_helper.AfterCall(masm); |
| 5237 __ jmp(&exit_); | 5211 __ jmp(&exit_); |
| 5238 | 5212 |
| 5239 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); | 5213 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); |
| 5240 } | 5214 } |
| (...skipping 725 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5966 // If coming from the make_two_character_string path, the string | 5940 // If coming from the make_two_character_string path, the string |
| 5967 // is too short to be sliced anyways. | 5941 // is too short to be sliced anyways. |
| 5968 STATIC_ASSERT(2 < SlicedString::kMinLength); | 5942 STATIC_ASSERT(2 < SlicedString::kMinLength); |
| 5969 __ jmp(©_routine); | 5943 __ jmp(©_routine); |
| 5970 __ bind(&result_longer_than_two); | 5944 __ bind(&result_longer_than_two); |
| 5971 | 5945 |
| 5972 // eax: string | 5946 // eax: string |
| 5973 // ebx: instance type | 5947 // ebx: instance type |
| 5974 // ecx: sub string length | 5948 // ecx: sub string length |
| 5975 // edx: from index (smi) | 5949 // edx: from index (smi) |
| 5976 Label allocate_slice, sliced_string, seq_string; | 5950 Label allocate_slice, sliced_string, seq_or_external_string; |
| 5977 __ cmp(ecx, SlicedString::kMinLength); | 5951 __ cmp(ecx, SlicedString::kMinLength); |
| 5978 // Short slice. Copy instead of slicing. | 5952 // Short slice. Copy instead of slicing. |
| 5979 __ j(less, ©_routine); | 5953 __ j(less, ©_routine); |
| 5980 STATIC_ASSERT(kSeqStringTag == 0); | 5954 // If the string is not indirect, it can only be sequential or external. |
| 5981 __ test(ebx, Immediate(kStringRepresentationMask)); | |
| 5982 __ j(zero, &seq_string, Label::kNear); | |
| 5983 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); | 5955 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); |
| 5984 STATIC_ASSERT(kIsIndirectStringMask != 0); | 5956 STATIC_ASSERT(kIsIndirectStringMask != 0); |
| 5985 __ test(ebx, Immediate(kIsIndirectStringMask)); | 5957 __ test(ebx, Immediate(kIsIndirectStringMask)); |
| 5986 // External string. Jump to runtime. | 5958 __ j(zero, &seq_or_external_string, Label::kNear); |
| 5987 __ j(zero, &runtime); | |
| 5988 | 5959 |
| 5989 Factory* factory = masm->isolate()->factory(); | 5960 Factory* factory = masm->isolate()->factory(); |
| 5990 __ test(ebx, Immediate(kSlicedNotConsMask)); | 5961 __ test(ebx, Immediate(kSlicedNotConsMask)); |
| 5991 __ j(not_zero, &sliced_string, Label::kNear); | 5962 __ j(not_zero, &sliced_string, Label::kNear); |
| 5992 // Cons string. Check whether it is flat, then fetch first part. | 5963 // Cons string. Check whether it is flat, then fetch first part. |
| 5993 __ cmp(FieldOperand(eax, ConsString::kSecondOffset), | 5964 __ cmp(FieldOperand(eax, ConsString::kSecondOffset), |
| 5994 factory->empty_string()); | 5965 factory->empty_string()); |
| 5995 __ j(not_equal, &runtime); | 5966 __ j(not_equal, &runtime); |
| 5996 __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset)); | 5967 __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset)); |
| 5997 __ jmp(&allocate_slice, Label::kNear); | 5968 __ jmp(&allocate_slice, Label::kNear); |
| 5998 | 5969 |
| 5999 __ bind(&sliced_string); | 5970 __ bind(&sliced_string); |
| 6000 // Sliced string. Fetch parent and correct start index by offset. | 5971 // Sliced string. Fetch parent and correct start index by offset. |
| 6001 __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset)); | 5972 __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset)); |
| 6002 __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset)); | 5973 __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset)); |
| 6003 __ jmp(&allocate_slice, Label::kNear); | 5974 __ jmp(&allocate_slice, Label::kNear); |
| 6004 | 5975 |
| 6005 __ bind(&seq_string); | 5976 __ bind(&seq_or_external_string); |
| 6006 // Sequential string. Just move string to the right register. | 5977 // Sequential or external string. Just move string to the correct register. |
| 6007 __ mov(edi, eax); | 5978 __ mov(edi, eax); |
| 6008 | 5979 |
| 6009 __ bind(&allocate_slice); | 5980 __ bind(&allocate_slice); |
| 6010 // edi: underlying subject string | 5981 // edi: underlying subject string |
| 6011 // ebx: instance type of original subject string | 5982 // ebx: instance type of original subject string |
| 6012 // edx: offset | 5983 // edx: offset |
| 6013 // ecx: length | 5984 // ecx: length |
| 6014 // Allocate new sliced string. At this point we do not reload the instance | 5985 // Allocate new sliced string. At this point we do not reload the instance |
| 6015 // type including the string encoding because we simply rely on the info | 5986 // type including the string encoding because we simply rely on the info |
| 6016 // provided by the original string. It does not matter if the original | 5987 // provided by the original string. It does not matter if the original |
| (...skipping 1090 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7107 false); | 7078 false); |
| 7108 __ pop(edx); | 7079 __ pop(edx); |
| 7109 __ ret(0); | 7080 __ ret(0); |
| 7110 } | 7081 } |
| 7111 | 7082 |
| 7112 #undef __ | 7083 #undef __ |
| 7113 | 7084 |
| 7114 } } // namespace v8::internal | 7085 } } // namespace v8::internal |
| 7115 | 7086 |
| 7116 #endif // V8_TARGET_ARCH_IA32 | 7087 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |