| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 13 matching lines...) Expand all Loading... |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if defined(V8_TARGET_ARCH_MIPS) | 30 #if defined(V8_TARGET_ARCH_MIPS) |
| 31 | 31 |
| 32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
| 33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
| 34 #include "codegen-inl.h" | 34 #include "codegen.h" |
| 35 #include "regexp-macro-assembler.h" | 35 #include "regexp-macro-assembler.h" |
| 36 | 36 |
| 37 namespace v8 { | 37 namespace v8 { |
| 38 namespace internal { | 38 namespace internal { |
| 39 | 39 |
| 40 | 40 |
| 41 #define __ ACCESS_MASM(masm) | 41 #define __ ACCESS_MASM(masm) |
| 42 | 42 |
| 43 | 43 |
| 44 void ToNumberStub::Generate(MacroAssembler* masm) { | 44 void ToNumberStub::Generate(MacroAssembler* masm) { |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 104 void Print() { PrintF("ConvertToDoubleStub\n"); } | 104 void Print() { PrintF("ConvertToDoubleStub\n"); } |
| 105 #endif | 105 #endif |
| 106 }; | 106 }; |
| 107 | 107 |
| 108 | 108 |
| 109 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | 109 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { |
| 110 UNIMPLEMENTED_MIPS(); | 110 UNIMPLEMENTED_MIPS(); |
| 111 } | 111 } |
| 112 | 112 |
| 113 | 113 |
| 114 class FloatingPointHelper : public AllStatic { | |
| 115 public: | |
| 116 | |
| 117 enum Destination { | |
| 118 kFPURegisters, | |
| 119 kCoreRegisters | |
| 120 }; | |
| 121 | |
| 122 | |
| 123 // Loads smis from a0 and a1 (right and left in binary operations) into | |
| 124 // floating point registers. Depending on the destination the values ends up | |
| 125 // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination | |
| 126 // is floating point registers FPU must be supported. If core registers are | |
| 127 // requested when FPU is supported f12 and f14 will be scratched. | |
| 128 static void LoadSmis(MacroAssembler* masm, | |
| 129 Destination destination, | |
| 130 Register scratch1, | |
| 131 Register scratch2); | |
| 132 | |
| 133 // Loads objects from a0 and a1 (right and left in binary operations) into | |
| 134 // floating point registers. Depending on the destination the values ends up | |
| 135 // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination | |
| 136 // is floating point registers FPU must be supported. If core registers are | |
| 137 // requested when FPU is supported f12 and f14 will still be scratched. If | |
| 138 // either a0 or a1 is not a number (not smi and not heap number object) the | |
| 139 // not_number label is jumped to with a0 and a1 intact. | |
| 140 static void LoadOperands(MacroAssembler* masm, | |
| 141 FloatingPointHelper::Destination destination, | |
| 142 Register heap_number_map, | |
| 143 Register scratch1, | |
| 144 Register scratch2, | |
| 145 Label* not_number); | |
| 146 // Loads the number from object into dst as a 32-bit integer if possible. If | |
| 147 // the object is not a 32-bit integer control continues at the label | |
| 148 // not_int32. If FPU is supported double_scratch is used but not scratch2. | |
| 149 static void LoadNumberAsInteger(MacroAssembler* masm, | |
| 150 Register object, | |
| 151 Register dst, | |
| 152 Register heap_number_map, | |
| 153 Register scratch1, | |
| 154 Register scratch2, | |
| 155 FPURegister double_scratch, | |
| 156 Label* not_int32); | |
| 157 private: | |
| 158 static void LoadNumber(MacroAssembler* masm, | |
| 159 FloatingPointHelper::Destination destination, | |
| 160 Register object, | |
| 161 FPURegister dst, | |
| 162 Register dst1, | |
| 163 Register dst2, | |
| 164 Register heap_number_map, | |
| 165 Register scratch1, | |
| 166 Register scratch2, | |
| 167 Label* not_number); | |
| 168 }; | |
| 169 | |
| 170 | |
| 171 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 114 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
| 172 FloatingPointHelper::Destination destination, | 115 FloatingPointHelper::Destination destination, |
| 173 Register scratch1, | 116 Register scratch1, |
| 174 Register scratch2) { | 117 Register scratch2) { |
| 175 UNIMPLEMENTED_MIPS(); | 118 UNIMPLEMENTED_MIPS(); |
| 176 } | 119 } |
| 177 | 120 |
| 178 | 121 |
| 179 void FloatingPointHelper::LoadOperands( | 122 void FloatingPointHelper::LoadOperands( |
| 180 MacroAssembler* masm, | 123 MacroAssembler* masm, |
| (...skipping 13 matching lines...) Expand all Loading... |
| 194 Register dst1, | 137 Register dst1, |
| 195 Register dst2, | 138 Register dst2, |
| 196 Register heap_number_map, | 139 Register heap_number_map, |
| 197 Register scratch1, | 140 Register scratch1, |
| 198 Register scratch2, | 141 Register scratch2, |
| 199 Label* not_number) { | 142 Label* not_number) { |
| 200 UNIMPLEMENTED_MIPS(); | 143 UNIMPLEMENTED_MIPS(); |
| 201 } | 144 } |
| 202 | 145 |
| 203 | 146 |
| 204 void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm, | 147 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, |
| 205 Register object, | 148 Register object, |
| 206 Register dst, | 149 Register dst, |
| 207 Register heap_number_map, | 150 Register heap_number_map, |
| 208 Register scratch1, | 151 Register scratch1, |
| 209 Register scratch2, | 152 Register scratch2, |
| 210 FPURegister double_scratch, | 153 Register scratch3, |
| 211 Label* not_int32) { | 154 FPURegister double_scratch, |
| 155 Label* not_number) { |
| 212 UNIMPLEMENTED_MIPS(); | 156 UNIMPLEMENTED_MIPS(); |
| 213 } | 157 } |
| 214 | 158 |
| 159 |
| 160 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, |
| 161 Register int_scratch, |
| 162 Destination destination, |
| 163 FPURegister double_dst, |
| 164 Register dst1, |
| 165 Register dst2, |
| 166 Register scratch2, |
| 167 FPURegister single_scratch) { |
| 168 UNIMPLEMENTED_MIPS(); |
| 169 } |
| 170 |
| 171 |
| 172 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
| 173 Register object, |
| 174 Destination destination, |
| 175 FPURegister double_dst, |
| 176 Register dst1, |
| 177 Register dst2, |
| 178 Register heap_number_map, |
| 179 Register scratch1, |
| 180 Register scratch2, |
| 181 FPURegister single_scratch, |
| 182 Label* not_int32) { |
| 183 UNIMPLEMENTED_MIPS(); |
| 184 } |
| 185 |
| 186 |
| 187 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
| 188 Register object, |
| 189 Register dst, |
| 190 Register heap_number_map, |
| 191 Register scratch1, |
| 192 Register scratch2, |
| 193 Register scratch3, |
| 194 FPURegister double_scratch, |
| 195 Label* not_int32) { |
| 196 UNIMPLEMENTED_MIPS(); |
| 197 } |
| 198 |
| 199 |
| 200 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, |
| 201 Register src1, |
| 202 Register src2, |
| 203 Register dst, |
| 204 Register scratch, |
| 205 Label* not_int32) { |
| 206 UNIMPLEMENTED_MIPS(); |
| 207 } |
| 208 |
| 209 |
| 210 void FloatingPointHelper::CallCCodeForDoubleOperation( |
| 211 MacroAssembler* masm, |
| 212 Token::Value op, |
| 213 Register heap_number_result, |
| 214 Register scratch) { |
| 215 UNIMPLEMENTED_MIPS(); |
| 216 } |
| 217 |
| 215 | 218 |
| 216 // See comment for class, this does NOT work for int32's that are in Smi range. | 219 // See comment for class, this does NOT work for int32's that are in Smi range. |
| 217 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | 220 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { |
| 218 UNIMPLEMENTED_MIPS(); | 221 UNIMPLEMENTED_MIPS(); |
| 219 } | 222 } |
| 220 | 223 |
| 221 | 224 |
| 222 void EmitNanCheck(MacroAssembler* masm, Condition cc) { | 225 void EmitNanCheck(MacroAssembler* masm, Condition cc) { |
| 223 UNIMPLEMENTED_MIPS(); | 226 UNIMPLEMENTED_MIPS(); |
| 224 } | 227 } |
| (...skipping 24 matching lines...) Expand all Loading... |
| 249 } | 252 } |
| 250 | 253 |
| 251 | 254 |
| 252 // This stub does not handle the inlined cases (Smis, Booleans, undefined). | 255 // This stub does not handle the inlined cases (Smis, Booleans, undefined). |
| 253 // The stub returns zero for false, and a non-zero value for true. | 256 // The stub returns zero for false, and a non-zero value for true. |
| 254 void ToBooleanStub::Generate(MacroAssembler* masm) { | 257 void ToBooleanStub::Generate(MacroAssembler* masm) { |
| 255 UNIMPLEMENTED_MIPS(); | 258 UNIMPLEMENTED_MIPS(); |
| 256 } | 259 } |
| 257 | 260 |
| 258 | 261 |
| 259 // We fall into this code if the operands were Smis, but the result was | 262 Handle<Code> GetTypeRecordingUnaryOpStub(int key, |
| 260 // not (eg. overflow). We branch into this code (to the not_smi label) if | 263 TRUnaryOpIC::TypeInfo type_info) { |
| 261 // the operands were not both Smi. The operands are in lhs and rhs. | 264 TypeRecordingUnaryOpStub stub(key, type_info); |
| 262 // To call the C-implemented binary fp operation routines we need to end up | 265 return stub.GetCode(); |
| 263 // with the double precision floating point operands in a0 and a1 (for the | 266 } |
| 264 // value in a1) and a2 and a3 (for the value in a0). | 267 |
| 265 void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, | 268 |
| 266 Label* not_smi, | 269 const char* TypeRecordingUnaryOpStub::GetName() { |
| 267 Register lhs, | 270 UNIMPLEMENTED_MIPS(); |
| 268 Register rhs, | 271 return NULL; |
| 269 const Builtins::JavaScript& builtin) { | 272 } |
| 273 |
| 274 |
| 275 // TODO(svenpanne): Use virtual functions instead of switch. |
| 276 void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) { |
| 270 UNIMPLEMENTED_MIPS(); | 277 UNIMPLEMENTED_MIPS(); |
| 271 } | 278 } |
| 272 | 279 |
| 273 | 280 |
| 274 // For bitwise ops where the inputs are not both Smis we here try to determine | 281 void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 275 // whether both inputs are either Smis or at least heap numbers that can be | |
| 276 // represented by a 32 bit signed value. We truncate towards zero as required | |
| 277 // by the ES spec. If this is the case we do the bitwise op and see if the | |
| 278 // result is a Smi. If so, great, otherwise we try to find a heap number to | |
| 279 // write the answer into (either by allocating or by overwriting). | |
| 280 // On entry the operands are in lhs (x) and rhs (y). (Result = x op y). | |
| 281 // On exit the result is in v0. | |
| 282 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, | |
| 283 Register lhs, | |
| 284 Register rhs) { | |
| 285 UNIMPLEMENTED_MIPS(); | 282 UNIMPLEMENTED_MIPS(); |
| 286 } | 283 } |
| 287 | 284 |
| 288 | 285 |
| 289 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 286 // TODO(svenpanne): Use virtual functions instead of switch. |
| 287 void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 290 UNIMPLEMENTED_MIPS(); | 288 UNIMPLEMENTED_MIPS(); |
| 291 } | 289 } |
| 292 | 290 |
| 293 | 291 |
| 294 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 292 void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { |
| 295 UNIMPLEMENTED_MIPS(); | 293 UNIMPLEMENTED_MIPS(); |
| 296 } | 294 } |
| 297 | 295 |
| 298 | 296 |
| 299 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { | 297 void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { |
| 300 GenericBinaryOpStub stub(key, type_info); | 298 UNIMPLEMENTED_MIPS(); |
| 301 return stub.GetCode(); | |
| 302 } | 299 } |
| 303 | 300 |
| 304 | 301 |
| 302 void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, |
| 303 Label* non_smi, |
| 304 Label* slow) { |
| 305 UNIMPLEMENTED_MIPS(); |
| 306 } |
| 307 |
| 308 |
| 309 void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, |
| 310 Label* non_smi) { |
| 311 UNIMPLEMENTED_MIPS(); |
| 312 } |
| 313 |
| 314 |
| 315 // TODO(svenpanne): Use virtual functions instead of switch. |
| 316 void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
| 317 UNIMPLEMENTED_MIPS(); |
| 318 } |
| 319 |
| 320 |
| 321 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { |
| 322 UNIMPLEMENTED_MIPS(); |
| 323 } |
| 324 |
| 325 |
| 326 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot( |
| 327 MacroAssembler* masm) { |
| 328 UNIMPLEMENTED_MIPS(); |
| 329 } |
| 330 |
| 331 |
| 332 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, |
| 333 Label* slow) { |
| 334 UNIMPLEMENTED_MIPS(); |
| 335 } |
| 336 |
| 337 |
| 338 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot( |
| 339 MacroAssembler* masm, Label* slow) { |
| 340 UNIMPLEMENTED_MIPS(); |
| 341 } |
| 342 |
| 343 |
| 344 // TODO(svenpanne): Use virtual functions instead of switch. |
| 345 void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { |
| 346 UNIMPLEMENTED_MIPS(); |
| 347 } |
| 348 |
| 349 |
| 350 void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { |
| 351 UNIMPLEMENTED_MIPS(); |
| 352 } |
| 353 |
| 354 |
| 355 void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { |
| 356 UNIMPLEMENTED_MIPS(); |
| 357 } |
| 358 |
| 359 |
| 360 void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback( |
| 361 MacroAssembler* masm) { |
| 362 UNIMPLEMENTED_MIPS(); |
| 363 } |
| 364 |
| 365 |
| 305 Handle<Code> GetTypeRecordingBinaryOpStub(int key, | 366 Handle<Code> GetTypeRecordingBinaryOpStub(int key, |
| 306 TRBinaryOpIC::TypeInfo type_info, | 367 TRBinaryOpIC::TypeInfo type_info, |
| 307 TRBinaryOpIC::TypeInfo result_type_info) { | 368 TRBinaryOpIC::TypeInfo result_type_info) { |
| 308 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); | 369 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); |
| 309 return stub.GetCode(); | 370 return stub.GetCode(); |
| 310 } | 371 } |
| 311 | 372 |
| 312 | 373 |
| 313 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 374 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 314 UNIMPLEMENTED_MIPS(); | 375 UNIMPLEMENTED_MIPS(); |
| (...skipping 30 matching lines...) Expand all Loading... |
| 345 Label* gc_required) { | 406 Label* gc_required) { |
| 346 UNIMPLEMENTED_MIPS(); | 407 UNIMPLEMENTED_MIPS(); |
| 347 } | 408 } |
| 348 | 409 |
| 349 | 410 |
| 350 // Generate the smi code. If the operation on smis are successful this return is | 411 // Generate the smi code. If the operation on smis are successful this return is |
| 351 // generated. If the result is not a smi and heap number allocation is not | 412 // generated. If the result is not a smi and heap number allocation is not |
| 352 // requested the code falls through. If number allocation is requested but a | 413 // requested the code falls through. If number allocation is requested but a |
| 353 // heap number cannot be allocated the code jumps to the lable gc_required. | 414 // heap number cannot be allocated the code jumps to the lable gc_required. |
| 354 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 415 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
| 416 Label* use_runtime, |
| 355 Label* gc_required, | 417 Label* gc_required, |
| 356 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 418 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
| 357 UNIMPLEMENTED_MIPS(); | 419 UNIMPLEMENTED_MIPS(); |
| 358 } | 420 } |
| 359 | 421 |
| 360 | 422 |
| 361 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 423 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 362 UNIMPLEMENTED_MIPS(); | 424 UNIMPLEMENTED_MIPS(); |
| 363 } | 425 } |
| 364 | 426 |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 419 UNIMPLEMENTED_MIPS(); | 481 UNIMPLEMENTED_MIPS(); |
| 420 return Runtime::kAbort; | 482 return Runtime::kAbort; |
| 421 } | 483 } |
| 422 | 484 |
| 423 | 485 |
| 424 void StackCheckStub::Generate(MacroAssembler* masm) { | 486 void StackCheckStub::Generate(MacroAssembler* masm) { |
| 425 UNIMPLEMENTED_MIPS(); | 487 UNIMPLEMENTED_MIPS(); |
| 426 } | 488 } |
| 427 | 489 |
| 428 | 490 |
| 491 void MathPowStub::Generate(MacroAssembler* masm) { |
| 492 UNIMPLEMENTED_MIPS(); |
| 493 } |
| 494 |
| 495 |
| 429 bool CEntryStub::NeedsImmovableCode() { | 496 bool CEntryStub::NeedsImmovableCode() { |
| 430 return true; | 497 return true; |
| 431 } | 498 } |
| 432 | 499 |
| 433 | 500 |
| 434 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | 501 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { |
| 435 UNIMPLEMENTED_MIPS(); | 502 UNIMPLEMENTED_MIPS(); |
| 436 } | 503 } |
| 437 | 504 |
| 438 | 505 |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 503 return name_; | 570 return name_; |
| 504 } | 571 } |
| 505 | 572 |
| 506 | 573 |
| 507 int CompareStub::MinorKey() { | 574 int CompareStub::MinorKey() { |
| 508 UNIMPLEMENTED_MIPS(); | 575 UNIMPLEMENTED_MIPS(); |
| 509 return 0; | 576 return 0; |
| 510 } | 577 } |
| 511 | 578 |
| 512 | 579 |
| 513 // StringCharCodeAtGenerator | 580 // StringCharCodeAtGenerator. |
| 514 | |
| 515 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 581 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 516 UNIMPLEMENTED_MIPS(); | 582 UNIMPLEMENTED_MIPS(); |
| 517 } | 583 } |
| 518 | 584 |
| 519 | 585 |
| 520 void StringCharCodeAtGenerator::GenerateSlow( | 586 void StringCharCodeAtGenerator::GenerateSlow( |
| 521 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | 587 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { |
| 522 UNIMPLEMENTED_MIPS(); | 588 UNIMPLEMENTED_MIPS(); |
| 523 } | 589 } |
| 524 | 590 |
| (...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 677 UNIMPLEMENTED_MIPS(); | 743 UNIMPLEMENTED_MIPS(); |
| 678 } | 744 } |
| 679 | 745 |
| 680 | 746 |
| 681 void SubStringStub::Generate(MacroAssembler* masm) { | 747 void SubStringStub::Generate(MacroAssembler* masm) { |
| 682 UNIMPLEMENTED_MIPS(); | 748 UNIMPLEMENTED_MIPS(); |
| 683 } | 749 } |
| 684 | 750 |
| 685 | 751 |
| 686 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | 752 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
| 753 Register left, |
| 687 Register right, | 754 Register right, |
| 688 Register left, | |
| 689 Register scratch1, | 755 Register scratch1, |
| 690 Register scratch2, | 756 Register scratch2, |
| 691 Register scratch3, | 757 Register scratch3, |
| 692 Register scratch4) { | 758 Register scratch4) { |
| 693 UNIMPLEMENTED_MIPS(); | 759 UNIMPLEMENTED_MIPS(); |
| 694 } | 760 } |
| 695 | 761 |
| 696 | 762 |
| 697 void StringCompareStub::Generate(MacroAssembler* masm) { | 763 void StringCompareStub::Generate(MacroAssembler* masm) { |
| 698 UNIMPLEMENTED_MIPS(); | 764 UNIMPLEMENTED_MIPS(); |
| 699 } | 765 } |
| 700 | 766 |
| 701 | 767 |
| 702 void StringAddStub::Generate(MacroAssembler* masm) { | 768 void StringAddStub::Generate(MacroAssembler* masm) { |
| 703 UNIMPLEMENTED_MIPS(); | 769 UNIMPLEMENTED_MIPS(); |
| 704 } | 770 } |
| 705 | 771 |
| 706 | 772 |
| 707 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 773 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| 708 UNIMPLEMENTED_MIPS(); | 774 UNIMPLEMENTED_MIPS(); |
| 709 } | 775 } |
| 710 | 776 |
| 711 | 777 |
| 712 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { | 778 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
| 713 UNIMPLEMENTED_MIPS(); | 779 UNIMPLEMENTED_MIPS(); |
| 714 } | 780 } |
| 715 | 781 |
| 716 | 782 |
| 783 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
| 784 UNIMPLEMENTED_MIPS(); |
| 785 } |
| 786 |
| 787 |
| 717 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 788 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
| 718 UNIMPLEMENTED_MIPS(); | 789 UNIMPLEMENTED_MIPS(); |
| 719 } | 790 } |
| 720 | 791 |
| 721 | 792 |
| 722 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { | 793 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { |
| 723 UNIMPLEMENTED_MIPS(); | 794 UNIMPLEMENTED_MIPS(); |
| 724 } | 795 } |
| 725 | 796 |
| 726 | 797 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 738 UNIMPLEMENTED_MIPS(); | 809 UNIMPLEMENTED_MIPS(); |
| 739 } | 810 } |
| 740 | 811 |
| 741 | 812 |
| 742 #undef __ | 813 #undef __ |
| 743 | 814 |
| 744 } } // namespace v8::internal | 815 } } // namespace v8::internal |
| 745 | 816 |
| 746 #endif // V8_TARGET_ARCH_MIPS | 817 #endif // V8_TARGET_ARCH_MIPS |
| 747 | 818 |
| OLD | NEW |