Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(188)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 8551006: Version 3.7.9. (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/version.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
212 // Return and remove the on-stack parameter. 212 // Return and remove the on-stack parameter.
213 __ movq(rsi, rax); 213 __ movq(rsi, rax);
214 __ ret(2 * kPointerSize); 214 __ ret(2 * kPointerSize);
215 215
216 // Need to collect. Call into runtime system. 216 // Need to collect. Call into runtime system.
217 __ bind(&gc); 217 __ bind(&gc);
218 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); 218 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
219 } 219 }
220 220
221 221
222 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { 222 static void GenerateFastCloneShallowArrayCommon(
223 // Stack layout on entry: 223 MacroAssembler* masm,
224 int length,
225 FastCloneShallowArrayStub::Mode mode,
226 Label* fail) {
227 // Registers on entry:
224 // 228 //
225 // [rsp + kPointerSize]: constant elements. 229 // rcx: boilerplate literal array.
226 // [rsp + (2 * kPointerSize)]: literal index. 230 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
227 // [rsp + (3 * kPointerSize)]: literals array.
228 231
229 // All sizes here are multiples of kPointerSize. 232 // All sizes here are multiples of kPointerSize.
230 int elements_size = 0; 233 int elements_size = 0;
231 if (length_ > 0) { 234 if (length > 0) {
232 elements_size = mode_ == CLONE_DOUBLE_ELEMENTS 235 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
233 ? FixedDoubleArray::SizeFor(length_) 236 ? FixedDoubleArray::SizeFor(length)
234 : FixedArray::SizeFor(length_); 237 : FixedArray::SizeFor(length);
235 } 238 }
236 int size = JSArray::kSize + elements_size; 239 int size = JSArray::kSize + elements_size;
237 240
238 // Load boilerplate object into rcx and check if we need to create a
239 // boilerplate.
240 Label slow_case;
241 __ movq(rcx, Operand(rsp, 3 * kPointerSize));
242 __ movq(rax, Operand(rsp, 2 * kPointerSize));
243 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
244 __ movq(rcx,
245 FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
246 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
247 __ j(equal, &slow_case);
248
249 if (FLAG_debug_code) {
250 const char* message;
251 Heap::RootListIndex expected_map_index;
252 if (mode_ == CLONE_ELEMENTS) {
253 message = "Expected (writable) fixed array";
254 expected_map_index = Heap::kFixedArrayMapRootIndex;
255 } else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
256 message = "Expected (writable) fixed double array";
257 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
258 } else {
259 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
260 message = "Expected copy-on-write fixed array";
261 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
262 }
263 __ push(rcx);
264 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
265 __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
266 expected_map_index);
267 __ Assert(equal, message);
268 __ pop(rcx);
269 }
270
271 // Allocate both the JS array and the elements array in one big 241 // Allocate both the JS array and the elements array in one big
272 // allocation. This avoids multiple limit checks. 242 // allocation. This avoids multiple limit checks.
273 __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT); 243 __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
274 244
275 // Copy the JS array part. 245 // Copy the JS array part.
276 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { 246 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
277 if ((i != JSArray::kElementsOffset) || (length_ == 0)) { 247 if ((i != JSArray::kElementsOffset) || (length == 0)) {
278 __ movq(rbx, FieldOperand(rcx, i)); 248 __ movq(rbx, FieldOperand(rcx, i));
279 __ movq(FieldOperand(rax, i), rbx); 249 __ movq(FieldOperand(rax, i), rbx);
280 } 250 }
281 } 251 }
282 252
283 if (length_ > 0) { 253 if (length > 0) {
284 // Get hold of the elements array of the boilerplate and setup the 254 // Get hold of the elements array of the boilerplate and setup the
285 // elements pointer in the resulting object. 255 // elements pointer in the resulting object.
286 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset)); 256 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
287 __ lea(rdx, Operand(rax, JSArray::kSize)); 257 __ lea(rdx, Operand(rax, JSArray::kSize));
288 __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx); 258 __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
289 259
290 // Copy the elements array. 260 // Copy the elements array.
291 if (mode_ == CLONE_ELEMENTS) { 261 if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
292 for (int i = 0; i < elements_size; i += kPointerSize) { 262 for (int i = 0; i < elements_size; i += kPointerSize) {
293 __ movq(rbx, FieldOperand(rcx, i)); 263 __ movq(rbx, FieldOperand(rcx, i));
294 __ movq(FieldOperand(rdx, i), rbx); 264 __ movq(FieldOperand(rdx, i), rbx);
295 } 265 }
296 } else { 266 } else {
297 ASSERT(mode_ == CLONE_DOUBLE_ELEMENTS); 267 ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
298 int i; 268 int i;
299 for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) { 269 for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
300 __ movq(rbx, FieldOperand(rcx, i)); 270 __ movq(rbx, FieldOperand(rcx, i));
301 __ movq(FieldOperand(rdx, i), rbx); 271 __ movq(FieldOperand(rdx, i), rbx);
302 } 272 }
303 while (i < elements_size) { 273 while (i < elements_size) {
304 __ movsd(xmm0, FieldOperand(rcx, i)); 274 __ movsd(xmm0, FieldOperand(rcx, i));
305 __ movsd(FieldOperand(rdx, i), xmm0); 275 __ movsd(FieldOperand(rdx, i), xmm0);
306 i += kDoubleSize; 276 i += kDoubleSize;
307 } 277 }
308 ASSERT(i == elements_size); 278 ASSERT(i == elements_size);
309 } 279 }
310 } 280 }
281 }
311 282
312 // Return and remove the on-stack parameters. 283 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
284 // Stack layout on entry:
285 //
286 // [rsp + kPointerSize]: constant elements.
287 // [rsp + (2 * kPointerSize)]: literal index.
288 // [rsp + (3 * kPointerSize)]: literals array.
289
290 // Load boilerplate object into rcx and check if we need to create a
291 // boilerplate.
292 __ movq(rcx, Operand(rsp, 3 * kPointerSize));
293 __ movq(rax, Operand(rsp, 2 * kPointerSize));
294 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
295 __ movq(rcx,
296 FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
297 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
298 Label slow_case;
299 __ j(equal, &slow_case);
300
301 FastCloneShallowArrayStub::Mode mode = mode_;
302 // rcx is boilerplate object.
303 Factory* factory = masm->isolate()->factory();
304 if (mode == CLONE_ANY_ELEMENTS) {
305 Label double_elements, check_fast_elements;
306 __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
307 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
308 factory->fixed_cow_array_map());
309 __ j(not_equal, &check_fast_elements);
310 GenerateFastCloneShallowArrayCommon(masm, 0,
311 COPY_ON_WRITE_ELEMENTS, &slow_case);
312 __ ret(3 * kPointerSize);
313
314 __ bind(&check_fast_elements);
315 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
316 factory->fixed_array_map());
317 __ j(not_equal, &double_elements);
318 GenerateFastCloneShallowArrayCommon(masm, length_,
319 CLONE_ELEMENTS, &slow_case);
320 __ ret(3 * kPointerSize);
321
322 __ bind(&double_elements);
323 mode = CLONE_DOUBLE_ELEMENTS;
324 // Fall through to generate the code to handle double elements.
325 }
326
327 if (FLAG_debug_code) {
328 const char* message;
329 Heap::RootListIndex expected_map_index;
330 if (mode == CLONE_ELEMENTS) {
331 message = "Expected (writable) fixed array";
332 expected_map_index = Heap::kFixedArrayMapRootIndex;
333 } else if (mode == CLONE_DOUBLE_ELEMENTS) {
334 message = "Expected (writable) fixed double array";
335 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
336 } else {
337 ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
338 message = "Expected copy-on-write fixed array";
339 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
340 }
341 __ push(rcx);
342 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
343 __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
344 expected_map_index);
345 __ Assert(equal, message);
346 __ pop(rcx);
347 }
348
349 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
313 __ ret(3 * kPointerSize); 350 __ ret(3 * kPointerSize);
314 351
315 __ bind(&slow_case); 352 __ bind(&slow_case);
316 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); 353 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
317 } 354 }
318 355
319 356
320 // The stub expects its argument on the stack and returns its result in tos_: 357 // The stub expects its argument on the stack and returns its result in tos_:
321 // zero for false, and a non-zero value for true. 358 // zero for false, and a non-zero value for true.
322 void ToBooleanStub::Generate(MacroAssembler* masm) { 359 void ToBooleanStub::Generate(MacroAssembler* masm) {
(...skipping 4582 matching lines...) Expand 10 before | Expand all | Expand 10 after
4905 // If coming from the make_two_character_string path, the string 4942 // If coming from the make_two_character_string path, the string
4906 // is too short to be sliced anyways. 4943 // is too short to be sliced anyways.
4907 STATIC_ASSERT(2 < SlicedString::kMinLength); 4944 STATIC_ASSERT(2 < SlicedString::kMinLength);
4908 __ jmp(&copy_routine); 4945 __ jmp(&copy_routine);
4909 __ bind(&result_longer_than_two); 4946 __ bind(&result_longer_than_two);
4910 4947
4911 // rax: string 4948 // rax: string
4912 // rbx: instance type 4949 // rbx: instance type
4913 // rcx: sub string length 4950 // rcx: sub string length
4914 // rdx: from index (smi) 4951 // rdx: from index (smi)
4915 Label allocate_slice, sliced_string, seq_string; 4952 Label allocate_slice, sliced_string, seq_or_external_string;
4916 __ cmpq(rcx, Immediate(SlicedString::kMinLength)); 4953 __ cmpq(rcx, Immediate(SlicedString::kMinLength));
4917 // Short slice. Copy instead of slicing. 4954 // Short slice. Copy instead of slicing.
4918 __ j(less, &copy_routine); 4955 __ j(less, &copy_routine);
4919 STATIC_ASSERT(kSeqStringTag == 0); 4956 // If the string is not indirect, it can only be sequential or external.
4920 __ testb(rbx, Immediate(kStringRepresentationMask));
4921 __ j(zero, &seq_string, Label::kNear);
4922 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); 4957 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
4923 STATIC_ASSERT(kIsIndirectStringMask != 0); 4958 STATIC_ASSERT(kIsIndirectStringMask != 0);
4924 __ testb(rbx, Immediate(kIsIndirectStringMask)); 4959 __ testb(rbx, Immediate(kIsIndirectStringMask));
4925 // External string. Jump to runtime. 4960 __ j(zero, &seq_or_external_string, Label::kNear);
4926 __ j(zero, &runtime);
4927 4961
4928 __ testb(rbx, Immediate(kSlicedNotConsMask)); 4962 __ testb(rbx, Immediate(kSlicedNotConsMask));
4929 __ j(not_zero, &sliced_string, Label::kNear); 4963 __ j(not_zero, &sliced_string, Label::kNear);
4930 // Cons string. Check whether it is flat, then fetch first part. 4964 // Cons string. Check whether it is flat, then fetch first part.
4931 __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset), 4965 __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
4932 Heap::kEmptyStringRootIndex); 4966 Heap::kEmptyStringRootIndex);
4933 __ j(not_equal, &runtime); 4967 __ j(not_equal, &runtime);
4934 __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset)); 4968 __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
4935 __ jmp(&allocate_slice, Label::kNear); 4969 __ jmp(&allocate_slice, Label::kNear);
4936 4970
4937 __ bind(&sliced_string); 4971 __ bind(&sliced_string);
4938 // Sliced string. Fetch parent and correct start index by offset. 4972 // Sliced string. Fetch parent and correct start index by offset.
4939 __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset)); 4973 __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
4940 __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset)); 4974 __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
4941 __ jmp(&allocate_slice, Label::kNear); 4975 __ jmp(&allocate_slice, Label::kNear);
4942 4976
4943 __ bind(&seq_string); 4977 __ bind(&seq_or_external_string);
4944 // Sequential string. Just move string to the right register. 4978 // Sequential or external string. Just move string to the correct register.
4945 __ movq(rdi, rax); 4979 __ movq(rdi, rax);
4946 4980
4947 __ bind(&allocate_slice); 4981 __ bind(&allocate_slice);
4948 // edi: underlying subject string 4982 // edi: underlying subject string
4949 // ebx: instance type of original subject string 4983 // ebx: instance type of original subject string
4950 // edx: offset 4984 // edx: offset
4951 // ecx: length 4985 // ecx: length
4952 // Allocate new sliced string. At this point we do not reload the instance 4986 // Allocate new sliced string. At this point we do not reload the instance
4953 // type including the string encoding because we simply rely on the info 4987 // type including the string encoding because we simply rely on the info
4954 // provided by the original string. It does not matter if the original 4988 // provided by the original string. It does not matter if the original
(...skipping 1072 matching lines...) Expand 10 before | Expand all | Expand 10 after
6027 xmm0, 6061 xmm0,
6028 &slow_elements); 6062 &slow_elements);
6029 __ ret(0); 6063 __ ret(0);
6030 } 6064 }
6031 6065
6032 #undef __ 6066 #undef __
6033 6067
6034 } } // namespace v8::internal 6068 } } // namespace v8::internal
6035 6069
6036 #endif // V8_TARGET_ARCH_X64 6070 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/version.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698