Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(67)

Side by Side Diff: src/ia32/codegen-ia32.cc

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/code-stubs-ia32.cc ('k') | src/ia32/debug-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 21 matching lines...) Expand all
32 #include "codegen.h" 32 #include "codegen.h"
33 33
34 namespace v8 { 34 namespace v8 {
35 namespace internal { 35 namespace internal {
36 36
37 37
38 // ------------------------------------------------------------------------- 38 // -------------------------------------------------------------------------
39 // Platform-specific RuntimeCallHelper functions. 39 // Platform-specific RuntimeCallHelper functions.
40 40
41 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 41 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
42 masm->EnterInternalFrame(); 42 masm->EnterFrame(StackFrame::INTERNAL);
43 ASSERT(!masm->has_frame());
44 masm->set_has_frame(true);
43 } 45 }
44 46
45 47
46 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 48 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
47 masm->LeaveInternalFrame(); 49 masm->LeaveFrame(StackFrame::INTERNAL);
50 ASSERT(masm->has_frame());
51 masm->set_has_frame(false);
48 } 52 }
49 53
50 54
51 #define __ masm. 55 #define __ masm.
52 56
53 static void MemCopyWrapper(void* dest, const void* src, size_t size) { 57 static void MemCopyWrapper(void* dest, const void* src, size_t size) {
54 memcpy(dest, src, size); 58 memcpy(dest, src, size);
55 } 59 }
56 60
57 61
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
101 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); 105 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
102 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); 106 __ mov(src, Operand(esp, stack_offset + kSourceOffset));
103 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); 107 __ mov(count, Operand(esp, stack_offset + kSizeOffset));
104 108
105 109
106 __ movdqu(xmm0, Operand(src, 0)); 110 __ movdqu(xmm0, Operand(src, 0));
107 __ movdqu(Operand(dst, 0), xmm0); 111 __ movdqu(Operand(dst, 0), xmm0);
108 __ mov(edx, dst); 112 __ mov(edx, dst);
109 __ and_(edx, 0xF); 113 __ and_(edx, 0xF);
110 __ neg(edx); 114 __ neg(edx);
111 __ add(Operand(edx), Immediate(16)); 115 __ add(edx, Immediate(16));
112 __ add(dst, Operand(edx)); 116 __ add(dst, edx);
113 __ add(src, Operand(edx)); 117 __ add(src, edx);
114 __ sub(Operand(count), edx); 118 __ sub(count, edx);
115 119
116 // edi is now aligned. Check if esi is also aligned. 120 // edi is now aligned. Check if esi is also aligned.
117 Label unaligned_source; 121 Label unaligned_source;
118 __ test(Operand(src), Immediate(0x0F)); 122 __ test(src, Immediate(0x0F));
119 __ j(not_zero, &unaligned_source); 123 __ j(not_zero, &unaligned_source);
120 { 124 {
121 // Copy loop for aligned source and destination. 125 // Copy loop for aligned source and destination.
122 __ mov(edx, count); 126 __ mov(edx, count);
123 Register loop_count = ecx; 127 Register loop_count = ecx;
124 Register count = edx; 128 Register count = edx;
125 __ shr(loop_count, 5); 129 __ shr(loop_count, 5);
126 { 130 {
127 // Main copy loop. 131 // Main copy loop.
128 Label loop; 132 Label loop;
129 __ bind(&loop); 133 __ bind(&loop);
130 __ prefetch(Operand(src, 0x20), 1); 134 __ prefetch(Operand(src, 0x20), 1);
131 __ movdqa(xmm0, Operand(src, 0x00)); 135 __ movdqa(xmm0, Operand(src, 0x00));
132 __ movdqa(xmm1, Operand(src, 0x10)); 136 __ movdqa(xmm1, Operand(src, 0x10));
133 __ add(Operand(src), Immediate(0x20)); 137 __ add(src, Immediate(0x20));
134 138
135 __ movdqa(Operand(dst, 0x00), xmm0); 139 __ movdqa(Operand(dst, 0x00), xmm0);
136 __ movdqa(Operand(dst, 0x10), xmm1); 140 __ movdqa(Operand(dst, 0x10), xmm1);
137 __ add(Operand(dst), Immediate(0x20)); 141 __ add(dst, Immediate(0x20));
138 142
139 __ dec(loop_count); 143 __ dec(loop_count);
140 __ j(not_zero, &loop); 144 __ j(not_zero, &loop);
141 } 145 }
142 146
143 // At most 31 bytes to copy. 147 // At most 31 bytes to copy.
144 Label move_less_16; 148 Label move_less_16;
145 __ test(Operand(count), Immediate(0x10)); 149 __ test(count, Immediate(0x10));
146 __ j(zero, &move_less_16); 150 __ j(zero, &move_less_16);
147 __ movdqa(xmm0, Operand(src, 0)); 151 __ movdqa(xmm0, Operand(src, 0));
148 __ add(Operand(src), Immediate(0x10)); 152 __ add(src, Immediate(0x10));
149 __ movdqa(Operand(dst, 0), xmm0); 153 __ movdqa(Operand(dst, 0), xmm0);
150 __ add(Operand(dst), Immediate(0x10)); 154 __ add(dst, Immediate(0x10));
151 __ bind(&move_less_16); 155 __ bind(&move_less_16);
152 156
153 // At most 15 bytes to copy. Copy 16 bytes at end of string. 157 // At most 15 bytes to copy. Copy 16 bytes at end of string.
154 __ and_(count, 0xF); 158 __ and_(count, 0xF);
155 __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); 159 __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
156 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); 160 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
157 161
158 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset)); 162 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
159 __ pop(esi); 163 __ pop(esi);
160 __ pop(edi); 164 __ pop(edi);
161 __ ret(0); 165 __ ret(0);
162 } 166 }
163 __ Align(16); 167 __ Align(16);
164 { 168 {
165 // Copy loop for unaligned source and aligned destination. 169 // Copy loop for unaligned source and aligned destination.
166 // If source is not aligned, we can't read it as efficiently. 170 // If source is not aligned, we can't read it as efficiently.
167 __ bind(&unaligned_source); 171 __ bind(&unaligned_source);
168 __ mov(edx, ecx); 172 __ mov(edx, ecx);
169 Register loop_count = ecx; 173 Register loop_count = ecx;
170 Register count = edx; 174 Register count = edx;
171 __ shr(loop_count, 5); 175 __ shr(loop_count, 5);
172 { 176 {
173 // Main copy loop 177 // Main copy loop
174 Label loop; 178 Label loop;
175 __ bind(&loop); 179 __ bind(&loop);
176 __ prefetch(Operand(src, 0x20), 1); 180 __ prefetch(Operand(src, 0x20), 1);
177 __ movdqu(xmm0, Operand(src, 0x00)); 181 __ movdqu(xmm0, Operand(src, 0x00));
178 __ movdqu(xmm1, Operand(src, 0x10)); 182 __ movdqu(xmm1, Operand(src, 0x10));
179 __ add(Operand(src), Immediate(0x20)); 183 __ add(src, Immediate(0x20));
180 184
181 __ movdqa(Operand(dst, 0x00), xmm0); 185 __ movdqa(Operand(dst, 0x00), xmm0);
182 __ movdqa(Operand(dst, 0x10), xmm1); 186 __ movdqa(Operand(dst, 0x10), xmm1);
183 __ add(Operand(dst), Immediate(0x20)); 187 __ add(dst, Immediate(0x20));
184 188
185 __ dec(loop_count); 189 __ dec(loop_count);
186 __ j(not_zero, &loop); 190 __ j(not_zero, &loop);
187 } 191 }
188 192
189 // At most 31 bytes to copy. 193 // At most 31 bytes to copy.
190 Label move_less_16; 194 Label move_less_16;
191 __ test(Operand(count), Immediate(0x10)); 195 __ test(count, Immediate(0x10));
192 __ j(zero, &move_less_16); 196 __ j(zero, &move_less_16);
193 __ movdqu(xmm0, Operand(src, 0)); 197 __ movdqu(xmm0, Operand(src, 0));
194 __ add(Operand(src), Immediate(0x10)); 198 __ add(src, Immediate(0x10));
195 __ movdqa(Operand(dst, 0), xmm0); 199 __ movdqa(Operand(dst, 0), xmm0);
196 __ add(Operand(dst), Immediate(0x10)); 200 __ add(dst, Immediate(0x10));
197 __ bind(&move_less_16); 201 __ bind(&move_less_16);
198 202
199 // At most 15 bytes to copy. Copy 16 bytes at end of string. 203 // At most 15 bytes to copy. Copy 16 bytes at end of string.
200 __ and_(count, 0x0F); 204 __ and_(count, 0x0F);
201 __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); 205 __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
202 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); 206 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
203 207
204 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset)); 208 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
205 __ pop(esi); 209 __ pop(esi);
206 __ pop(edi); 210 __ pop(edi);
(...skipping 14 matching lines...) Expand all
221 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); 225 __ mov(count, Operand(esp, stack_offset + kSizeOffset));
222 226
223 // Copy the first word. 227 // Copy the first word.
224 __ mov(eax, Operand(src, 0)); 228 __ mov(eax, Operand(src, 0));
225 __ mov(Operand(dst, 0), eax); 229 __ mov(Operand(dst, 0), eax);
226 230
227 // Increment src,dstso that dst is aligned. 231 // Increment src,dstso that dst is aligned.
228 __ mov(edx, dst); 232 __ mov(edx, dst);
229 __ and_(edx, 0x03); 233 __ and_(edx, 0x03);
230 __ neg(edx); 234 __ neg(edx);
231 __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3) 235 __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
232 __ add(dst, Operand(edx)); 236 __ add(dst, edx);
233 __ add(src, Operand(edx)); 237 __ add(src, edx);
234 __ sub(Operand(count), edx); 238 __ sub(count, edx);
235 // edi is now aligned, ecx holds number of remaning bytes to copy. 239 // edi is now aligned, ecx holds number of remaning bytes to copy.
236 240
237 __ mov(edx, count); 241 __ mov(edx, count);
238 count = edx; 242 count = edx;
239 __ shr(ecx, 2); // Make word count instead of byte count. 243 __ shr(ecx, 2); // Make word count instead of byte count.
240 __ rep_movs(); 244 __ rep_movs();
241 245
242 // At most 3 bytes left to copy. Copy 4 bytes at end of string. 246 // At most 3 bytes left to copy. Copy 4 bytes at end of string.
243 __ and_(count, 3); 247 __ and_(count, 3);
244 __ mov(eax, Operand(src, count, times_1, -4)); 248 __ mov(eax, Operand(src, count, times_1, -4));
(...skipping 12 matching lines...) Expand all
257 CPU::FlushICache(buffer, actual_size); 261 CPU::FlushICache(buffer, actual_size);
258 OS::ProtectCode(buffer, actual_size); 262 OS::ProtectCode(buffer, actual_size);
259 return FUNCTION_CAST<OS::MemCopyFunction>(buffer); 263 return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
260 } 264 }
261 265
262 #undef __ 266 #undef __
263 267
264 } } // namespace v8::internal 268 } } // namespace v8::internal
265 269
266 #endif // V8_TARGET_ARCH_IA32 270 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/code-stubs-ia32.cc ('k') | src/ia32/debug-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698