OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/mips64/codegen-mips64.h" | 5 #include "src/mips64/codegen-mips64.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
8 | 8 |
9 #include <memory> | 9 #include <memory> |
10 | 10 |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
119 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); | 119 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); |
120 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); | 120 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); |
121 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); | 121 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); |
122 | 122 |
123 if (pref_hint_store != kPrefHintPrepareForStore) { | 123 if (pref_hint_store != kPrefHintPrepareForStore) { |
124 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); | 124 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); |
125 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); | 125 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); |
126 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); | 126 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); |
127 } | 127 } |
128 __ bind(&loop16w); | 128 __ bind(&loop16w); |
129 __ lw(a4, MemOperand(a1)); | 129 __ Lw(a4, MemOperand(a1)); |
130 | 130 |
131 if (pref_hint_store == kPrefHintPrepareForStore) { | 131 if (pref_hint_store == kPrefHintPrepareForStore) { |
132 __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch. | 132 __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch. |
133 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg)); | 133 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg)); |
134 } | 134 } |
135 __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot. | 135 __ Lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot. |
136 | 136 |
137 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); | 137 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); |
138 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); | 138 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); |
139 | 139 |
140 __ bind(&skip_pref); | 140 __ bind(&skip_pref); |
141 __ lw(a6, MemOperand(a1, 2, loadstore_chunk)); | 141 __ Lw(a6, MemOperand(a1, 2, loadstore_chunk)); |
142 __ lw(a7, MemOperand(a1, 3, loadstore_chunk)); | 142 __ Lw(a7, MemOperand(a1, 3, loadstore_chunk)); |
143 __ lw(t0, MemOperand(a1, 4, loadstore_chunk)); | 143 __ Lw(t0, MemOperand(a1, 4, loadstore_chunk)); |
144 __ lw(t1, MemOperand(a1, 5, loadstore_chunk)); | 144 __ Lw(t1, MemOperand(a1, 5, loadstore_chunk)); |
145 __ lw(t2, MemOperand(a1, 6, loadstore_chunk)); | 145 __ Lw(t2, MemOperand(a1, 6, loadstore_chunk)); |
146 __ lw(t3, MemOperand(a1, 7, loadstore_chunk)); | 146 __ Lw(t3, MemOperand(a1, 7, loadstore_chunk)); |
147 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); | 147 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); |
148 | 148 |
149 __ sw(a4, MemOperand(a0)); | 149 __ Sw(a4, MemOperand(a0)); |
150 __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); | 150 __ Sw(a5, MemOperand(a0, 1, loadstore_chunk)); |
151 __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); | 151 __ Sw(a6, MemOperand(a0, 2, loadstore_chunk)); |
152 __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); | 152 __ Sw(a7, MemOperand(a0, 3, loadstore_chunk)); |
153 __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); | 153 __ Sw(t0, MemOperand(a0, 4, loadstore_chunk)); |
154 __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); | 154 __ Sw(t1, MemOperand(a0, 5, loadstore_chunk)); |
155 __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); | 155 __ Sw(t2, MemOperand(a0, 6, loadstore_chunk)); |
156 __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); | 156 __ Sw(t3, MemOperand(a0, 7, loadstore_chunk)); |
157 | 157 |
158 __ lw(a4, MemOperand(a1, 8, loadstore_chunk)); | 158 __ Lw(a4, MemOperand(a1, 8, loadstore_chunk)); |
159 __ lw(a5, MemOperand(a1, 9, loadstore_chunk)); | 159 __ Lw(a5, MemOperand(a1, 9, loadstore_chunk)); |
160 __ lw(a6, MemOperand(a1, 10, loadstore_chunk)); | 160 __ Lw(a6, MemOperand(a1, 10, loadstore_chunk)); |
161 __ lw(a7, MemOperand(a1, 11, loadstore_chunk)); | 161 __ Lw(a7, MemOperand(a1, 11, loadstore_chunk)); |
162 __ lw(t0, MemOperand(a1, 12, loadstore_chunk)); | 162 __ Lw(t0, MemOperand(a1, 12, loadstore_chunk)); |
163 __ lw(t1, MemOperand(a1, 13, loadstore_chunk)); | 163 __ Lw(t1, MemOperand(a1, 13, loadstore_chunk)); |
164 __ lw(t2, MemOperand(a1, 14, loadstore_chunk)); | 164 __ Lw(t2, MemOperand(a1, 14, loadstore_chunk)); |
165 __ lw(t3, MemOperand(a1, 15, loadstore_chunk)); | 165 __ Lw(t3, MemOperand(a1, 15, loadstore_chunk)); |
166 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); | 166 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); |
167 | 167 |
168 __ sw(a4, MemOperand(a0, 8, loadstore_chunk)); | 168 __ Sw(a4, MemOperand(a0, 8, loadstore_chunk)); |
169 __ sw(a5, MemOperand(a0, 9, loadstore_chunk)); | 169 __ Sw(a5, MemOperand(a0, 9, loadstore_chunk)); |
170 __ sw(a6, MemOperand(a0, 10, loadstore_chunk)); | 170 __ Sw(a6, MemOperand(a0, 10, loadstore_chunk)); |
171 __ sw(a7, MemOperand(a0, 11, loadstore_chunk)); | 171 __ Sw(a7, MemOperand(a0, 11, loadstore_chunk)); |
172 __ sw(t0, MemOperand(a0, 12, loadstore_chunk)); | 172 __ Sw(t0, MemOperand(a0, 12, loadstore_chunk)); |
173 __ sw(t1, MemOperand(a0, 13, loadstore_chunk)); | 173 __ Sw(t1, MemOperand(a0, 13, loadstore_chunk)); |
174 __ sw(t2, MemOperand(a0, 14, loadstore_chunk)); | 174 __ Sw(t2, MemOperand(a0, 14, loadstore_chunk)); |
175 __ sw(t3, MemOperand(a0, 15, loadstore_chunk)); | 175 __ Sw(t3, MemOperand(a0, 15, loadstore_chunk)); |
176 __ addiu(a0, a0, 16 * loadstore_chunk); | 176 __ addiu(a0, a0, 16 * loadstore_chunk); |
177 __ bne(a0, a3, &loop16w); | 177 __ bne(a0, a3, &loop16w); |
178 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. | 178 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. |
179 __ mov(a2, t8); | 179 __ mov(a2, t8); |
180 | 180 |
181 // Here we have src and dest word-aligned but less than 64-bytes to go. | 181 // Here we have src and dest word-aligned but less than 64-bytes to go. |
182 // Check for a 32 bytes chunk and copy if there is one. Otherwise jump | 182 // Check for a 32 bytes chunk and copy if there is one. Otherwise jump |
183 // down to chk1w to handle the tail end of the copy. | 183 // down to chk1w to handle the tail end of the copy. |
184 __ bind(&chkw); | 184 __ bind(&chkw); |
185 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); | 185 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); |
186 __ andi(t8, a2, 0x1f); | 186 __ andi(t8, a2, 0x1f); |
187 __ beq(a2, t8, &chk1w); // Less than 32? | 187 __ beq(a2, t8, &chk1w); // Less than 32? |
188 __ nop(); // In delay slot. | 188 __ nop(); // In delay slot. |
189 __ lw(a4, MemOperand(a1)); | 189 __ Lw(a4, MemOperand(a1)); |
190 __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); | 190 __ Lw(a5, MemOperand(a1, 1, loadstore_chunk)); |
191 __ lw(a6, MemOperand(a1, 2, loadstore_chunk)); | 191 __ Lw(a6, MemOperand(a1, 2, loadstore_chunk)); |
192 __ lw(a7, MemOperand(a1, 3, loadstore_chunk)); | 192 __ Lw(a7, MemOperand(a1, 3, loadstore_chunk)); |
193 __ lw(t0, MemOperand(a1, 4, loadstore_chunk)); | 193 __ Lw(t0, MemOperand(a1, 4, loadstore_chunk)); |
194 __ lw(t1, MemOperand(a1, 5, loadstore_chunk)); | 194 __ Lw(t1, MemOperand(a1, 5, loadstore_chunk)); |
195 __ lw(t2, MemOperand(a1, 6, loadstore_chunk)); | 195 __ Lw(t2, MemOperand(a1, 6, loadstore_chunk)); |
196 __ lw(t3, MemOperand(a1, 7, loadstore_chunk)); | 196 __ Lw(t3, MemOperand(a1, 7, loadstore_chunk)); |
197 __ addiu(a1, a1, 8 * loadstore_chunk); | 197 __ addiu(a1, a1, 8 * loadstore_chunk); |
198 __ sw(a4, MemOperand(a0)); | 198 __ Sw(a4, MemOperand(a0)); |
199 __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); | 199 __ Sw(a5, MemOperand(a0, 1, loadstore_chunk)); |
200 __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); | 200 __ Sw(a6, MemOperand(a0, 2, loadstore_chunk)); |
201 __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); | 201 __ Sw(a7, MemOperand(a0, 3, loadstore_chunk)); |
202 __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); | 202 __ Sw(t0, MemOperand(a0, 4, loadstore_chunk)); |
203 __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); | 203 __ Sw(t1, MemOperand(a0, 5, loadstore_chunk)); |
204 __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); | 204 __ Sw(t2, MemOperand(a0, 6, loadstore_chunk)); |
205 __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); | 205 __ Sw(t3, MemOperand(a0, 7, loadstore_chunk)); |
206 __ addiu(a0, a0, 8 * loadstore_chunk); | 206 __ addiu(a0, a0, 8 * loadstore_chunk); |
207 | 207 |
208 // Here we have less than 32 bytes to copy. Set up for a loop to copy | 208 // Here we have less than 32 bytes to copy. Set up for a loop to copy |
209 // one word at a time. Set a2 to count how many bytes we have to copy | 209 // one word at a time. Set a2 to count how many bytes we have to copy |
210 // after all the word chunks are copied and a3 to the dst pointer after | 210 // after all the word chunks are copied and a3 to the dst pointer after |
211 // all the word chunks have been copied. We will loop, incrementing a0 | 211 // all the word chunks have been copied. We will loop, incrementing a0 |
212 // and a1 untill a0 equals a3. | 212 // and a1 untill a0 equals a3. |
213 __ bind(&chk1w); | 213 __ bind(&chk1w); |
214 __ andi(a2, t8, loadstore_chunk - 1); | 214 __ andi(a2, t8, loadstore_chunk - 1); |
215 __ beq(a2, t8, &lastb); | 215 __ beq(a2, t8, &lastb); |
216 __ subu(a3, t8, a2); // In delay slot. | 216 __ subu(a3, t8, a2); // In delay slot. |
217 __ addu(a3, a0, a3); | 217 __ addu(a3, a0, a3); |
218 | 218 |
219 __ bind(&wordCopy_loop); | 219 __ bind(&wordCopy_loop); |
220 __ lw(a7, MemOperand(a1)); | 220 __ Lw(a7, MemOperand(a1)); |
221 __ addiu(a0, a0, loadstore_chunk); | 221 __ addiu(a0, a0, loadstore_chunk); |
222 __ addiu(a1, a1, loadstore_chunk); | 222 __ addiu(a1, a1, loadstore_chunk); |
223 __ bne(a0, a3, &wordCopy_loop); | 223 __ bne(a0, a3, &wordCopy_loop); |
224 __ sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. | 224 __ Sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. |
225 | 225 |
226 __ bind(&lastb); | 226 __ bind(&lastb); |
227 __ Branch(&leave, le, a2, Operand(zero_reg)); | 227 __ Branch(&leave, le, a2, Operand(zero_reg)); |
228 __ addu(a3, a0, a2); | 228 __ addu(a3, a0, a2); |
229 | 229 |
230 __ bind(&lastbloop); | 230 __ bind(&lastbloop); |
231 __ lb(v1, MemOperand(a1)); | 231 __ Lb(v1, MemOperand(a1)); |
232 __ addiu(a0, a0, 1); | 232 __ addiu(a0, a0, 1); |
233 __ addiu(a1, a1, 1); | 233 __ addiu(a1, a1, 1); |
234 __ bne(a0, a3, &lastbloop); | 234 __ bne(a0, a3, &lastbloop); |
235 __ sb(v1, MemOperand(a0, -1)); // In delay slot. | 235 __ Sb(v1, MemOperand(a0, -1)); // In delay slot. |
236 | 236 |
237 __ bind(&leave); | 237 __ bind(&leave); |
238 __ jr(ra); | 238 __ jr(ra); |
239 __ nop(); | 239 __ nop(); |
240 | 240 |
241 // Unaligned case. Only the dst gets aligned so we need to do partial | 241 // Unaligned case. Only the dst gets aligned so we need to do partial |
242 // loads of the source followed by normal stores to the dst (once we | 242 // loads of the source followed by normal stores to the dst (once we |
243 // have aligned the destination). | 243 // have aligned the destination). |
244 __ bind(&unaligned); | 244 __ bind(&unaligned); |
245 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. | 245 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
355 __ lwr(t0, | 355 __ lwr(t0, |
356 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); | 356 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); |
357 __ lwr(t1, | 357 __ lwr(t1, |
358 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); | 358 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); |
359 __ lwr(t2, | 359 __ lwr(t2, |
360 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); | 360 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); |
361 __ lwr(t3, | 361 __ lwr(t3, |
362 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); | 362 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); |
363 } | 363 } |
364 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); | 364 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); |
365 __ sw(a4, MemOperand(a0)); | 365 __ Sw(a4, MemOperand(a0)); |
366 __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); | 366 __ Sw(a5, MemOperand(a0, 1, loadstore_chunk)); |
367 __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); | 367 __ Sw(a6, MemOperand(a0, 2, loadstore_chunk)); |
368 __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); | 368 __ Sw(a7, MemOperand(a0, 3, loadstore_chunk)); |
369 __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); | 369 __ Sw(t0, MemOperand(a0, 4, loadstore_chunk)); |
370 __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); | 370 __ Sw(t1, MemOperand(a0, 5, loadstore_chunk)); |
371 __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); | 371 __ Sw(t2, MemOperand(a0, 6, loadstore_chunk)); |
372 __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); | 372 __ Sw(t3, MemOperand(a0, 7, loadstore_chunk)); |
373 if (kArchEndian == kLittle) { | 373 if (kArchEndian == kLittle) { |
374 __ lwr(a4, MemOperand(a1, 8, loadstore_chunk)); | 374 __ lwr(a4, MemOperand(a1, 8, loadstore_chunk)); |
375 __ lwr(a5, MemOperand(a1, 9, loadstore_chunk)); | 375 __ lwr(a5, MemOperand(a1, 9, loadstore_chunk)); |
376 __ lwr(a6, MemOperand(a1, 10, loadstore_chunk)); | 376 __ lwr(a6, MemOperand(a1, 10, loadstore_chunk)); |
377 __ lwr(a7, MemOperand(a1, 11, loadstore_chunk)); | 377 __ lwr(a7, MemOperand(a1, 11, loadstore_chunk)); |
378 __ lwr(t0, MemOperand(a1, 12, loadstore_chunk)); | 378 __ lwr(t0, MemOperand(a1, 12, loadstore_chunk)); |
379 __ lwr(t1, MemOperand(a1, 13, loadstore_chunk)); | 379 __ lwr(t1, MemOperand(a1, 13, loadstore_chunk)); |
380 __ lwr(t2, MemOperand(a1, 14, loadstore_chunk)); | 380 __ lwr(t2, MemOperand(a1, 14, loadstore_chunk)); |
381 __ lwr(t3, MemOperand(a1, 15, loadstore_chunk)); | 381 __ lwr(t3, MemOperand(a1, 15, loadstore_chunk)); |
382 __ lwl(a4, | 382 __ lwl(a4, |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
415 __ lwr(t0, | 415 __ lwr(t0, |
416 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); | 416 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); |
417 __ lwr(t1, | 417 __ lwr(t1, |
418 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); | 418 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); |
419 __ lwr(t2, | 419 __ lwr(t2, |
420 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); | 420 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); |
421 __ lwr(t3, | 421 __ lwr(t3, |
422 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); | 422 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); |
423 } | 423 } |
424 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); | 424 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); |
425 __ sw(a4, MemOperand(a0, 8, loadstore_chunk)); | 425 __ Sw(a4, MemOperand(a0, 8, loadstore_chunk)); |
426 __ sw(a5, MemOperand(a0, 9, loadstore_chunk)); | 426 __ Sw(a5, MemOperand(a0, 9, loadstore_chunk)); |
427 __ sw(a6, MemOperand(a0, 10, loadstore_chunk)); | 427 __ Sw(a6, MemOperand(a0, 10, loadstore_chunk)); |
428 __ sw(a7, MemOperand(a0, 11, loadstore_chunk)); | 428 __ Sw(a7, MemOperand(a0, 11, loadstore_chunk)); |
429 __ sw(t0, MemOperand(a0, 12, loadstore_chunk)); | 429 __ Sw(t0, MemOperand(a0, 12, loadstore_chunk)); |
430 __ sw(t1, MemOperand(a0, 13, loadstore_chunk)); | 430 __ Sw(t1, MemOperand(a0, 13, loadstore_chunk)); |
431 __ sw(t2, MemOperand(a0, 14, loadstore_chunk)); | 431 __ Sw(t2, MemOperand(a0, 14, loadstore_chunk)); |
432 __ sw(t3, MemOperand(a0, 15, loadstore_chunk)); | 432 __ Sw(t3, MemOperand(a0, 15, loadstore_chunk)); |
433 __ addiu(a0, a0, 16 * loadstore_chunk); | 433 __ addiu(a0, a0, 16 * loadstore_chunk); |
434 __ bne(a0, a3, &ua_loop16w); | 434 __ bne(a0, a3, &ua_loop16w); |
435 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. | 435 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. |
436 __ mov(a2, t8); | 436 __ mov(a2, t8); |
437 | 437 |
438 // Here less than 64-bytes. Check for | 438 // Here less than 64-bytes. Check for |
439 // a 32 byte chunk and copy if there is one. Otherwise jump down to | 439 // a 32 byte chunk and copy if there is one. Otherwise jump down to |
440 // ua_chk1w to handle the tail end of the copy. | 440 // ua_chk1w to handle the tail end of the copy. |
441 __ bind(&ua_chkw); | 441 __ bind(&ua_chkw); |
442 __ Pref(pref_hint_load, MemOperand(a1)); | 442 __ Pref(pref_hint_load, MemOperand(a1)); |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
489 __ lwr(t0, | 489 __ lwr(t0, |
490 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); | 490 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); |
491 __ lwr(t1, | 491 __ lwr(t1, |
492 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); | 492 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); |
493 __ lwr(t2, | 493 __ lwr(t2, |
494 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); | 494 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); |
495 __ lwr(t3, | 495 __ lwr(t3, |
496 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); | 496 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); |
497 } | 497 } |
498 __ addiu(a1, a1, 8 * loadstore_chunk); | 498 __ addiu(a1, a1, 8 * loadstore_chunk); |
499 __ sw(a4, MemOperand(a0)); | 499 __ Sw(a4, MemOperand(a0)); |
500 __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); | 500 __ Sw(a5, MemOperand(a0, 1, loadstore_chunk)); |
501 __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); | 501 __ Sw(a6, MemOperand(a0, 2, loadstore_chunk)); |
502 __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); | 502 __ Sw(a7, MemOperand(a0, 3, loadstore_chunk)); |
503 __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); | 503 __ Sw(t0, MemOperand(a0, 4, loadstore_chunk)); |
504 __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); | 504 __ Sw(t1, MemOperand(a0, 5, loadstore_chunk)); |
505 __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); | 505 __ Sw(t2, MemOperand(a0, 6, loadstore_chunk)); |
506 __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); | 506 __ Sw(t3, MemOperand(a0, 7, loadstore_chunk)); |
507 __ addiu(a0, a0, 8 * loadstore_chunk); | 507 __ addiu(a0, a0, 8 * loadstore_chunk); |
508 | 508 |
509 // Less than 32 bytes to copy. Set up for a loop to | 509 // Less than 32 bytes to copy. Set up for a loop to |
510 // copy one word at a time. | 510 // copy one word at a time. |
511 __ bind(&ua_chk1w); | 511 __ bind(&ua_chk1w); |
512 __ andi(a2, t8, loadstore_chunk - 1); | 512 __ andi(a2, t8, loadstore_chunk - 1); |
513 __ beq(a2, t8, &ua_smallCopy); | 513 __ beq(a2, t8, &ua_smallCopy); |
514 __ subu(a3, t8, a2); // In delay slot. | 514 __ subu(a3, t8, a2); // In delay slot. |
515 __ addu(a3, a0, a3); | 515 __ addu(a3, a0, a3); |
516 | 516 |
517 __ bind(&ua_wordCopy_loop); | 517 __ bind(&ua_wordCopy_loop); |
518 if (kArchEndian == kLittle) { | 518 if (kArchEndian == kLittle) { |
519 __ lwr(v1, MemOperand(a1)); | 519 __ lwr(v1, MemOperand(a1)); |
520 __ lwl(v1, | 520 __ lwl(v1, |
521 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); | 521 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
522 } else { | 522 } else { |
523 __ lwl(v1, MemOperand(a1)); | 523 __ lwl(v1, MemOperand(a1)); |
524 __ lwr(v1, | 524 __ lwr(v1, |
525 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); | 525 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
526 } | 526 } |
527 __ addiu(a0, a0, loadstore_chunk); | 527 __ addiu(a0, a0, loadstore_chunk); |
528 __ addiu(a1, a1, loadstore_chunk); | 528 __ addiu(a1, a1, loadstore_chunk); |
529 __ bne(a0, a3, &ua_wordCopy_loop); | 529 __ bne(a0, a3, &ua_wordCopy_loop); |
530 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. | 530 __ Sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. |
531 | 531 |
532 // Copy the last 8 bytes. | 532 // Copy the last 8 bytes. |
533 __ bind(&ua_smallCopy); | 533 __ bind(&ua_smallCopy); |
534 __ beq(a2, zero_reg, &leave); | 534 __ beq(a2, zero_reg, &leave); |
535 __ addu(a3, a0, a2); // In delay slot. | 535 __ addu(a3, a0, a2); // In delay slot. |
536 | 536 |
537 __ bind(&ua_smallCopy_loop); | 537 __ bind(&ua_smallCopy_loop); |
538 __ lb(v1, MemOperand(a1)); | 538 __ Lb(v1, MemOperand(a1)); |
539 __ addiu(a0, a0, 1); | 539 __ addiu(a0, a0, 1); |
540 __ addiu(a1, a1, 1); | 540 __ addiu(a1, a1, 1); |
541 __ bne(a0, a3, &ua_smallCopy_loop); | 541 __ bne(a0, a3, &ua_smallCopy_loop); |
542 __ sb(v1, MemOperand(a0, -1)); // In delay slot. | 542 __ Sb(v1, MemOperand(a0, -1)); // In delay slot. |
543 | 543 |
544 __ jr(ra); | 544 __ jr(ra); |
545 __ nop(); | 545 __ nop(); |
546 } | 546 } |
547 CodeDesc desc; | 547 CodeDesc desc; |
548 masm.GetCode(&desc); | 548 masm.GetCode(&desc); |
549 DCHECK(!RelocInfo::RequiresRelocation(isolate, desc)); | 549 DCHECK(!RelocInfo::RequiresRelocation(isolate, desc)); |
550 | 550 |
551 Assembler::FlushICache(isolate, buffer, actual_size); | 551 Assembler::FlushICache(isolate, buffer, actual_size); |
552 base::OS::ProtectCode(buffer, actual_size); | 552 base::OS::ProtectCode(buffer, actual_size); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
609 | 609 |
610 void StringCharLoadGenerator::Generate(MacroAssembler* masm, | 610 void StringCharLoadGenerator::Generate(MacroAssembler* masm, |
611 Register string, | 611 Register string, |
612 Register index, | 612 Register index, |
613 Register result, | 613 Register result, |
614 Label* call_runtime) { | 614 Label* call_runtime) { |
615 Label indirect_string_loaded; | 615 Label indirect_string_loaded; |
616 __ bind(&indirect_string_loaded); | 616 __ bind(&indirect_string_loaded); |
617 | 617 |
618 // Fetch the instance type of the receiver into result register. | 618 // Fetch the instance type of the receiver into result register. |
619 __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset)); | 619 __ Ld(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
620 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); | 620 __ Lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
621 | 621 |
622 // We need special handling for indirect strings. | 622 // We need special handling for indirect strings. |
623 Label check_sequential; | 623 Label check_sequential; |
624 __ And(at, result, Operand(kIsIndirectStringMask)); | 624 __ And(at, result, Operand(kIsIndirectStringMask)); |
625 __ Branch(&check_sequential, eq, at, Operand(zero_reg)); | 625 __ Branch(&check_sequential, eq, at, Operand(zero_reg)); |
626 | 626 |
627 // Dispatch on the indirect string shape: slice or cons. | 627 // Dispatch on the indirect string shape: slice or cons. |
628 Label cons_string, thin_string; | 628 Label cons_string, thin_string; |
629 __ And(at, result, Operand(kStringRepresentationMask)); | 629 __ And(at, result, Operand(kStringRepresentationMask)); |
630 __ Branch(&cons_string, eq, at, Operand(kConsStringTag)); | 630 __ Branch(&cons_string, eq, at, Operand(kConsStringTag)); |
631 __ Branch(&thin_string, eq, at, Operand(kThinStringTag)); | 631 __ Branch(&thin_string, eq, at, Operand(kThinStringTag)); |
632 | 632 |
633 // Handle slices. | 633 // Handle slices. |
634 __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); | 634 __ Ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); |
635 __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset)); | 635 __ Ld(string, FieldMemOperand(string, SlicedString::kParentOffset)); |
636 __ dsra32(at, result, 0); | 636 __ dsra32(at, result, 0); |
637 __ Daddu(index, index, at); | 637 __ Daddu(index, index, at); |
638 __ jmp(&indirect_string_loaded); | 638 __ jmp(&indirect_string_loaded); |
639 | 639 |
640 // Handle thin strings. | 640 // Handle thin strings. |
641 __ bind(&thin_string); | 641 __ bind(&thin_string); |
642 __ ld(string, FieldMemOperand(string, ThinString::kActualOffset)); | 642 __ Ld(string, FieldMemOperand(string, ThinString::kActualOffset)); |
643 __ jmp(&indirect_string_loaded); | 643 __ jmp(&indirect_string_loaded); |
644 | 644 |
645 // Handle cons strings. | 645 // Handle cons strings. |
646 // Check whether the right hand side is the empty string (i.e. if | 646 // Check whether the right hand side is the empty string (i.e. if |
647 // this is really a flat string in a cons string). If that is not | 647 // this is really a flat string in a cons string). If that is not |
648 // the case we would rather go to the runtime system now to flatten | 648 // the case we would rather go to the runtime system now to flatten |
649 // the string. | 649 // the string. |
650 __ bind(&cons_string); | 650 __ bind(&cons_string); |
651 __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset)); | 651 __ Ld(result, FieldMemOperand(string, ConsString::kSecondOffset)); |
652 __ LoadRoot(at, Heap::kempty_stringRootIndex); | 652 __ LoadRoot(at, Heap::kempty_stringRootIndex); |
653 __ Branch(call_runtime, ne, result, Operand(at)); | 653 __ Branch(call_runtime, ne, result, Operand(at)); |
654 // Get the first of the two strings and load its instance type. | 654 // Get the first of the two strings and load its instance type. |
655 __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset)); | 655 __ Ld(string, FieldMemOperand(string, ConsString::kFirstOffset)); |
656 __ jmp(&indirect_string_loaded); | 656 __ jmp(&indirect_string_loaded); |
657 | 657 |
658 // Distinguish sequential and external strings. Only these two string | 658 // Distinguish sequential and external strings. Only these two string |
659 // representations can reach here (slices and flat cons strings have been | 659 // representations can reach here (slices and flat cons strings have been |
660 // reduced to the underlying sequential or external string). | 660 // reduced to the underlying sequential or external string). |
661 Label external_string, check_encoding; | 661 Label external_string, check_encoding; |
662 __ bind(&check_sequential); | 662 __ bind(&check_sequential); |
663 STATIC_ASSERT(kSeqStringTag == 0); | 663 STATIC_ASSERT(kSeqStringTag == 0); |
664 __ And(at, result, Operand(kStringRepresentationMask)); | 664 __ And(at, result, Operand(kStringRepresentationMask)); |
665 __ Branch(&external_string, ne, at, Operand(zero_reg)); | 665 __ Branch(&external_string, ne, at, Operand(zero_reg)); |
(...skipping 11 matching lines...) Expand all Loading... |
677 // Assert that we do not have a cons or slice (indirect strings) here. | 677 // Assert that we do not have a cons or slice (indirect strings) here. |
678 // Sequential strings have already been ruled out. | 678 // Sequential strings have already been ruled out. |
679 __ And(at, result, Operand(kIsIndirectStringMask)); | 679 __ And(at, result, Operand(kIsIndirectStringMask)); |
680 __ Assert(eq, kExternalStringExpectedButNotFound, | 680 __ Assert(eq, kExternalStringExpectedButNotFound, |
681 at, Operand(zero_reg)); | 681 at, Operand(zero_reg)); |
682 } | 682 } |
683 // Rule out short external strings. | 683 // Rule out short external strings. |
684 STATIC_ASSERT(kShortExternalStringTag != 0); | 684 STATIC_ASSERT(kShortExternalStringTag != 0); |
685 __ And(at, result, Operand(kShortExternalStringMask)); | 685 __ And(at, result, Operand(kShortExternalStringMask)); |
686 __ Branch(call_runtime, ne, at, Operand(zero_reg)); | 686 __ Branch(call_runtime, ne, at, Operand(zero_reg)); |
687 __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); | 687 __ Ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); |
688 | 688 |
689 Label one_byte, done; | 689 Label one_byte, done; |
690 __ bind(&check_encoding); | 690 __ bind(&check_encoding); |
691 STATIC_ASSERT(kTwoByteStringTag == 0); | 691 STATIC_ASSERT(kTwoByteStringTag == 0); |
692 __ And(at, result, Operand(kStringEncodingMask)); | 692 __ And(at, result, Operand(kStringEncodingMask)); |
693 __ Branch(&one_byte, ne, at, Operand(zero_reg)); | 693 __ Branch(&one_byte, ne, at, Operand(zero_reg)); |
694 // Two-byte string. | 694 // Two-byte string. |
695 __ Dlsa(at, string, index, 1); | 695 __ Dlsa(at, string, index, 1); |
696 __ lhu(result, MemOperand(at)); | 696 __ Lhu(result, MemOperand(at)); |
697 __ jmp(&done); | 697 __ jmp(&done); |
698 __ bind(&one_byte); | 698 __ bind(&one_byte); |
699 // One_byte string. | 699 // One_byte string. |
700 __ Daddu(at, string, index); | 700 __ Daddu(at, string, index); |
701 __ lbu(result, MemOperand(at)); | 701 __ Lbu(result, MemOperand(at)); |
702 __ bind(&done); | 702 __ bind(&done); |
703 } | 703 } |
704 | 704 |
705 #ifdef DEBUG | 705 #ifdef DEBUG |
706 // nop(CODE_AGE_MARKER_NOP) | 706 // nop(CODE_AGE_MARKER_NOP) |
707 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; | 707 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; |
708 #endif | 708 #endif |
709 | 709 |
710 | 710 |
711 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { | 711 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
774 } | 774 } |
775 } | 775 } |
776 | 776 |
777 | 777 |
778 #undef __ | 778 #undef __ |
779 | 779 |
780 } // namespace internal | 780 } // namespace internal |
781 } // namespace v8 | 781 } // namespace v8 |
782 | 782 |
783 #endif // V8_TARGET_ARCH_MIPS64 | 783 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |