Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 7060010: Merge bleeding edge into the GC branch up to 7948. The asserts (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
195 cmpq(with, kScratchRegister); 195 cmpq(with, kScratchRegister);
196 } 196 }
197 197
198 198
199 void MacroAssembler::RecordWriteHelper(Register object, 199 void MacroAssembler::RecordWriteHelper(Register object,
200 Register addr, 200 Register addr,
201 Register scratch, 201 Register scratch,
202 SaveFPRegsMode save_fp) { 202 SaveFPRegsMode save_fp) {
203 if (emit_debug_code()) { 203 if (emit_debug_code()) {
204 // Check that the object is not in new space. 204 // Check that the object is not in new space.
205 NearLabel not_in_new_space; 205 Label not_in_new_space;
206 InNewSpace(object, scratch, not_equal, &not_in_new_space); 206 InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
207 Abort("new-space object passed to RecordWriteHelper"); 207 Abort("new-space object passed to RecordWriteHelper");
208 bind(&not_in_new_space); 208 bind(&not_in_new_space);
209 } 209 }
210 210
211 // Load store buffer top. 211 // Load store buffer top.
212 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex); 212 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
213 // Store pointer to buffer. 213 // Store pointer to buffer.
214 movq(Operand(scratch, 0), addr); 214 movq(Operand(scratch, 0), addr);
215 // Increment buffer top. 215 // Increment buffer top.
216 addq(scratch, Immediate(kPointerSize)); 216 addq(scratch, Immediate(kPointerSize));
217 // Write back new top of buffer. 217 // Write back new top of buffer.
218 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex); 218 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
219 // Call stub on end of buffer. 219 // Call stub on end of buffer.
220 NearLabel no_overflow; 220 Label no_overflow;
221 // Check for end of buffer. 221 // Check for end of buffer.
222 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); 222 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
223 j(equal, &no_overflow); 223 j(equal, &no_overflow, Label::kNear);
224 StoreBufferOverflowStub store_buffer_overflow = 224 StoreBufferOverflowStub store_buffer_overflow =
225 StoreBufferOverflowStub(save_fp); 225 StoreBufferOverflowStub(save_fp);
226 CallStub(&store_buffer_overflow); 226 CallStub(&store_buffer_overflow);
227 bind(&no_overflow); 227 bind(&no_overflow);
228 } 228 }
229 229
230 230
231 void MacroAssembler::InNewSpace(Register object,
232 Register scratch,
233 Condition cc,
234 Label* branch,
235 Label::Distance near_jump) {
236 if (Serializer::enabled()) {
237 // Can't do arithmetic on external references if it might get serialized.
238 // The mask isn't really an address. We load it as an external reference in
239 // case the size of the new space is different between the snapshot maker
240 // and the running system.
241 if (scratch.is(object)) {
242 movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
243 and_(scratch, kScratchRegister);
244 } else {
245 movq(scratch, ExternalReference::new_space_mask(isolate()));
246 and_(scratch, object);
247 }
248 movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
249 cmpq(scratch, kScratchRegister);
250 j(cc, branch, near_jump);
251 } else {
252 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
253 intptr_t new_space_start =
254 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
255 movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
256 if (scratch.is(object)) {
257 addq(scratch, kScratchRegister);
258 } else {
259 lea(scratch, Operand(object, kScratchRegister, times_1, 0));
260 }
261 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
262 j(cc, branch, near_jump);
263 }
264 }
265
266
231 void MacroAssembler::RecordWrite(Register object, 267 void MacroAssembler::RecordWrite(Register object,
232 int offset, 268 int offset,
233 Register value, 269 Register value,
234 Register index, 270 Register index,
235 SaveFPRegsMode save_fp) { 271 SaveFPRegsMode save_fp) {
236 // The compiled code assumes that record write doesn't change the 272 // The compiled code assumes that record write doesn't change the
237 // context register, so we check that none of the clobbered 273 // context register, so we check that none of the clobbered
238 // registers are rsi. 274 // registers are rsi.
239 ASSERT(!value.is(rsi) && !index.is(rsi)); 275 ASSERT(!value.is(rsi) && !index.is(rsi));
240 276
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
288 324
289 325
290 void MacroAssembler::RecordWriteNonSmi(Register object, 326 void MacroAssembler::RecordWriteNonSmi(Register object,
291 int offset, 327 int offset,
292 Register scratch, 328 Register scratch,
293 Register index, 329 Register index,
294 SaveFPRegsMode save_fp) { 330 SaveFPRegsMode save_fp) {
295 Label done; 331 Label done;
296 332
297 if (emit_debug_code()) { 333 if (emit_debug_code()) {
298 NearLabel okay; 334 Label okay;
299 JumpIfNotSmi(object, &okay); 335 JumpIfNotSmi(object, &okay, Label::kNear);
300 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); 336 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
301 bind(&okay); 337 bind(&okay);
302 338
303 if (offset == 0) { 339 if (offset == 0) {
304 // index must be int32. 340 // index must be int32.
305 Register tmp = index.is(rax) ? rbx : rax; 341 Register tmp = index.is(rax) ? rbx : rax;
306 push(tmp); 342 push(tmp);
307 movl(tmp, index); 343 movl(tmp, index);
308 cmpq(tmp, index); 344 cmpq(tmp, index);
309 Check(equal, "Index register for RecordWrite must be untagged int32."); 345 Check(equal, "Index register for RecordWrite must be untagged int32.");
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
345 } 381 }
346 382
347 383
348 void MacroAssembler::Assert(Condition cc, const char* msg) { 384 void MacroAssembler::Assert(Condition cc, const char* msg) {
349 if (emit_debug_code()) Check(cc, msg); 385 if (emit_debug_code()) Check(cc, msg);
350 } 386 }
351 387
352 388
353 void MacroAssembler::AssertFastElements(Register elements) { 389 void MacroAssembler::AssertFastElements(Register elements) {
354 if (emit_debug_code()) { 390 if (emit_debug_code()) {
355 NearLabel ok; 391 Label ok;
356 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), 392 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
357 Heap::kFixedArrayMapRootIndex); 393 Heap::kFixedArrayMapRootIndex);
358 j(equal, &ok); 394 j(equal, &ok, Label::kNear);
359 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), 395 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
360 Heap::kFixedCOWArrayMapRootIndex); 396 Heap::kFixedCOWArrayMapRootIndex);
361 j(equal, &ok); 397 j(equal, &ok, Label::kNear);
362 Abort("JSObject with fast elements map has slow elements"); 398 Abort("JSObject with fast elements map has slow elements");
363 bind(&ok); 399 bind(&ok);
364 } 400 }
365 } 401 }
366 402
367 403
368 void MacroAssembler::Check(Condition cc, const char* msg) { 404 void MacroAssembler::Check(Condition cc, const char* msg) {
369 NearLabel L; 405 Label L;
370 j(cc, &L); 406 j(cc, &L, Label::kNear);
371 Abort(msg); 407 Abort(msg);
372 // will not return here 408 // will not return here
373 bind(&L); 409 bind(&L);
374 } 410 }
375 411
376 412
377 void MacroAssembler::CheckStackAlignment() { 413 void MacroAssembler::CheckStackAlignment() {
378 int frame_alignment = OS::ActivationFrameAlignment(); 414 int frame_alignment = OS::ActivationFrameAlignment();
379 int frame_alignment_mask = frame_alignment - 1; 415 int frame_alignment_mask = frame_alignment - 1;
380 if (frame_alignment > kPointerSize) { 416 if (frame_alignment > kPointerSize) {
381 ASSERT(IsPowerOf2(frame_alignment)); 417 ASSERT(IsPowerOf2(frame_alignment));
382 NearLabel alignment_as_expected; 418 Label alignment_as_expected;
383 testq(rsp, Immediate(frame_alignment_mask)); 419 testq(rsp, Immediate(frame_alignment_mask));
384 j(zero, &alignment_as_expected); 420 j(zero, &alignment_as_expected, Label::kNear);
385 // Abort if stack is not aligned. 421 // Abort if stack is not aligned.
386 int3(); 422 int3();
387 bind(&alignment_as_expected); 423 bind(&alignment_as_expected);
388 } 424 }
389 } 425 }
390 426
391 427
392 void MacroAssembler::NegativeZeroTest(Register result, 428 void MacroAssembler::NegativeZeroTest(Register result,
393 Register op, 429 Register op,
394 Label* then_label) { 430 Label* then_label) {
395 NearLabel ok; 431 Label ok;
396 testl(result, result); 432 testl(result, result);
397 j(not_zero, &ok); 433 j(not_zero, &ok, Label::kNear);
398 testl(op, op); 434 testl(op, op);
399 j(sign, then_label); 435 j(sign, then_label);
400 bind(&ok); 436 bind(&ok);
401 } 437 }
402 438
403 439
404 void MacroAssembler::Abort(const char* msg) { 440 void MacroAssembler::Abort(const char* msg) {
405 // We want to pass the msg string like a smi to avoid GC 441 // We want to pass the msg string like a smi to avoid GC
406 // problems, however msg is not guaranteed to be aligned 442 // problems, however msg is not guaranteed to be aligned
407 // properly. Instead, we pass an aligned pointer that is 443 // properly. Instead, we pass an aligned pointer that is
(...skipping 19 matching lines...) Expand all
427 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), 463 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
428 RelocInfo::NONE); 464 RelocInfo::NONE);
429 push(kScratchRegister); 465 push(kScratchRegister);
430 CallRuntime(Runtime::kAbort, 2); 466 CallRuntime(Runtime::kAbort, 2);
431 // will not return here 467 // will not return here
432 int3(); 468 int3();
433 } 469 }
434 470
435 471
436 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) { 472 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
437 ASSERT(allow_stub_calls()); // calls are not allowed in some stubs 473 // ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
474 // TODO(gc): Fix this!
475 // TODO(gc): Fix this!
438 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); 476 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
439 } 477 }
440 478
441 479
442 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { 480 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
443 ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. 481 ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
444 MaybeObject* result = stub->TryGetCode(); 482 MaybeObject* result = stub->TryGetCode();
445 if (!result->IsFailure()) { 483 if (!result->IsFailure()) {
446 call(Handle<Code>(Code::cast(result->ToObjectUnchecked())), 484 call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
447 RelocInfo::CODE_TARGET); 485 RelocInfo::CODE_TARGET);
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after
832 870
833 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { 871 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
834 if (emit_debug_code()) { 872 if (emit_debug_code()) {
835 movq(dst, 873 movq(dst,
836 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), 874 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
837 RelocInfo::NONE); 875 RelocInfo::NONE);
838 cmpq(dst, kSmiConstantRegister); 876 cmpq(dst, kSmiConstantRegister);
839 if (allow_stub_calls()) { 877 if (allow_stub_calls()) {
840 Assert(equal, "Uninitialized kSmiConstantRegister"); 878 Assert(equal, "Uninitialized kSmiConstantRegister");
841 } else { 879 } else {
842 NearLabel ok; 880 Label ok;
843 j(equal, &ok); 881 j(equal, &ok, Label::kNear);
844 int3(); 882 int3();
845 bind(&ok); 883 bind(&ok);
846 } 884 }
847 } 885 }
848 int value = source->value(); 886 int value = source->value();
849 if (value == 0) { 887 if (value == 0) {
850 xorl(dst, dst); 888 xorl(dst, dst);
851 return; 889 return;
852 } 890 }
853 bool negative = value < 0; 891 bool negative = value < 0;
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
895 if (!dst.is(src)) { 933 if (!dst.is(src)) {
896 movl(dst, src); 934 movl(dst, src);
897 } 935 }
898 shl(dst, Immediate(kSmiShift)); 936 shl(dst, Immediate(kSmiShift));
899 } 937 }
900 938
901 939
902 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { 940 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
903 if (emit_debug_code()) { 941 if (emit_debug_code()) {
904 testb(dst, Immediate(0x01)); 942 testb(dst, Immediate(0x01));
905 NearLabel ok; 943 Label ok;
906 j(zero, &ok); 944 j(zero, &ok, Label::kNear);
907 if (allow_stub_calls()) { 945 if (allow_stub_calls()) {
908 Abort("Integer32ToSmiField writing to non-smi location"); 946 Abort("Integer32ToSmiField writing to non-smi location");
909 } else { 947 } else {
910 int3(); 948 int3();
911 } 949 }
912 bind(&ok); 950 bind(&ok);
913 } 951 }
914 ASSERT(kSmiShift % kBitsPerByte == 0); 952 ASSERT(kSmiShift % kBitsPerByte == 0);
915 movl(Operand(dst, kSmiShift / kBitsPerByte), src); 953 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
916 } 954 }
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
1053 int power) { 1091 int power) {
1054 ASSERT((0 <= power) && (power < 32)); 1092 ASSERT((0 <= power) && (power < 32));
1055 if (dst.is(src)) { 1093 if (dst.is(src)) {
1056 shr(dst, Immediate(power + kSmiShift)); 1094 shr(dst, Immediate(power + kSmiShift));
1057 } else { 1095 } else {
1058 UNIMPLEMENTED(); // Not used. 1096 UNIMPLEMENTED(); // Not used.
1059 } 1097 }
1060 } 1098 }
1061 1099
1062 1100
1101 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1102 Label* on_not_smis,
1103 Label::Distance near_jump) {
1104 if (dst.is(src1) || dst.is(src2)) {
1105 ASSERT(!src1.is(kScratchRegister));
1106 ASSERT(!src2.is(kScratchRegister));
1107 movq(kScratchRegister, src1);
1108 or_(kScratchRegister, src2);
1109 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1110 movq(dst, kScratchRegister);
1111 } else {
1112 movq(dst, src1);
1113 or_(dst, src2);
1114 JumpIfNotSmi(dst, on_not_smis, near_jump);
1115 }
1116 }
1117
1118
1063 Condition MacroAssembler::CheckSmi(Register src) { 1119 Condition MacroAssembler::CheckSmi(Register src) {
1064 ASSERT_EQ(0, kSmiTag); 1120 ASSERT_EQ(0, kSmiTag);
1065 testb(src, Immediate(kSmiTagMask)); 1121 testb(src, Immediate(kSmiTagMask));
1066 return zero; 1122 return zero;
1067 } 1123 }
1068 1124
1069 1125
1070 Condition MacroAssembler::CheckSmi(const Operand& src) { 1126 Condition MacroAssembler::CheckSmi(const Operand& src) {
1071 ASSERT_EQ(0, kSmiTag); 1127 ASSERT_EQ(0, kSmiTag);
1072 testb(src, Immediate(kSmiTagMask)); 1128 testb(src, Immediate(kSmiTagMask));
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
1163 if (!(src.AddressUsesRegister(dst))) { 1219 if (!(src.AddressUsesRegister(dst))) {
1164 movl(dst, Immediate(kSmiTagMask)); 1220 movl(dst, Immediate(kSmiTagMask));
1165 andl(dst, src); 1221 andl(dst, src);
1166 } else { 1222 } else {
1167 movl(dst, src); 1223 movl(dst, src);
1168 andl(dst, Immediate(kSmiTagMask)); 1224 andl(dst, Immediate(kSmiTagMask));
1169 } 1225 }
1170 } 1226 }
1171 1227
1172 1228
1229 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1230 Label* on_invalid,
1231 Label::Distance near_jump) {
1232 Condition is_valid = CheckInteger32ValidSmiValue(src);
1233 j(NegateCondition(is_valid), on_invalid, near_jump);
1234 }
1235
1236
1237 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1238 Label* on_invalid,
1239 Label::Distance near_jump) {
1240 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1241 j(NegateCondition(is_valid), on_invalid, near_jump);
1242 }
1243
1244
1245 void MacroAssembler::JumpIfSmi(Register src,
1246 Label* on_smi,
1247 Label::Distance near_jump) {
1248 Condition smi = CheckSmi(src);
1249 j(smi, on_smi, near_jump);
1250 }
1251
1252
1253 void MacroAssembler::JumpIfNotSmi(Register src,
1254 Label* on_not_smi,
1255 Label::Distance near_jump) {
1256 Condition smi = CheckSmi(src);
1257 j(NegateCondition(smi), on_not_smi, near_jump);
1258 }
1259
1260
1261 void MacroAssembler::JumpUnlessNonNegativeSmi(
1262 Register src, Label* on_not_smi_or_negative,
1263 Label::Distance near_jump) {
1264 Condition non_negative_smi = CheckNonNegativeSmi(src);
1265 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1266 }
1267
1268
1269 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1270 Smi* constant,
1271 Label* on_equals,
1272 Label::Distance near_jump) {
1273 SmiCompare(src, constant);
1274 j(equal, on_equals, near_jump);
1275 }
1276
1277
1278 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1279 Register src2,
1280 Label* on_not_both_smi,
1281 Label::Distance near_jump) {
1282 Condition both_smi = CheckBothSmi(src1, src2);
1283 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1284 }
1285
1286
1287 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1288 Register src2,
1289 Label* on_not_both_smi,
1290 Label::Distance near_jump) {
1291 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1292 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1293 }
1294
1295
1296 void MacroAssembler::SmiTryAddConstant(Register dst,
1297 Register src,
1298 Smi* constant,
1299 Label* on_not_smi_result,
1300 Label::Distance near_jump) {
1301 // Does not assume that src is a smi.
1302 ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
1303 ASSERT_EQ(0, kSmiTag);
1304 ASSERT(!dst.is(kScratchRegister));
1305 ASSERT(!src.is(kScratchRegister));
1306
1307 JumpIfNotSmi(src, on_not_smi_result, near_jump);
1308 Register tmp = (dst.is(src) ? kScratchRegister : dst);
1309 LoadSmiConstant(tmp, constant);
1310 addq(tmp, src);
1311 j(overflow, on_not_smi_result, near_jump);
1312 if (dst.is(src)) {
1313 movq(dst, tmp);
1314 }
1315 }
1316
1317
1173 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { 1318 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1174 if (constant->value() == 0) { 1319 if (constant->value() == 0) {
1175 if (!dst.is(src)) { 1320 if (!dst.is(src)) {
1176 movq(dst, src); 1321 movq(dst, src);
1177 } 1322 }
1178 return; 1323 return;
1179 } else if (dst.is(src)) { 1324 } else if (dst.is(src)) {
1180 ASSERT(!dst.is(kScratchRegister)); 1325 ASSERT(!dst.is(kScratchRegister));
1181 switch (constant->value()) { 1326 switch (constant->value()) {
1182 case 1: 1327 case 1:
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1219 } 1364 }
1220 1365
1221 1366
1222 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { 1367 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1223 if (constant->value() != 0) { 1368 if (constant->value() != 0) {
1224 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); 1369 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1225 } 1370 }
1226 } 1371 }
1227 1372
1228 1373
1374 void MacroAssembler::SmiAddConstant(Register dst,
1375 Register src,
1376 Smi* constant,
1377 Label* on_not_smi_result,
1378 Label::Distance near_jump) {
1379 if (constant->value() == 0) {
1380 if (!dst.is(src)) {
1381 movq(dst, src);
1382 }
1383 } else if (dst.is(src)) {
1384 ASSERT(!dst.is(kScratchRegister));
1385
1386 LoadSmiConstant(kScratchRegister, constant);
1387 addq(kScratchRegister, src);
1388 j(overflow, on_not_smi_result, near_jump);
1389 movq(dst, kScratchRegister);
1390 } else {
1391 LoadSmiConstant(dst, constant);
1392 addq(dst, src);
1393 j(overflow, on_not_smi_result, near_jump);
1394 }
1395 }
1396
1397
1229 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { 1398 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1230 if (constant->value() == 0) { 1399 if (constant->value() == 0) {
1231 if (!dst.is(src)) { 1400 if (!dst.is(src)) {
1232 movq(dst, src); 1401 movq(dst, src);
1233 } 1402 }
1234 } else if (dst.is(src)) { 1403 } else if (dst.is(src)) {
1235 ASSERT(!dst.is(kScratchRegister)); 1404 ASSERT(!dst.is(kScratchRegister));
1236 Register constant_reg = GetSmiConstant(constant); 1405 Register constant_reg = GetSmiConstant(constant);
1237 subq(dst, constant_reg); 1406 subq(dst, constant_reg);
1238 } else { 1407 } else {
1239 if (constant->value() == Smi::kMinValue) { 1408 if (constant->value() == Smi::kMinValue) {
1240 LoadSmiConstant(dst, constant); 1409 LoadSmiConstant(dst, constant);
1241 // Adding and subtracting the min-value gives the same result, it only 1410 // Adding and subtracting the min-value gives the same result, it only
1242 // differs on the overflow bit, which we don't check here. 1411 // differs on the overflow bit, which we don't check here.
1243 addq(dst, src); 1412 addq(dst, src);
1244 } else { 1413 } else {
1245 // Subtract by adding the negation. 1414 // Subtract by adding the negation.
1246 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); 1415 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1247 addq(dst, src); 1416 addq(dst, src);
1248 } 1417 }
1249 } 1418 }
1250 } 1419 }
1251 1420
1252 1421
1422 void MacroAssembler::SmiSubConstant(Register dst,
1423 Register src,
1424 Smi* constant,
1425 Label* on_not_smi_result,
1426 Label::Distance near_jump) {
1427 if (constant->value() == 0) {
1428 if (!dst.is(src)) {
1429 movq(dst, src);
1430 }
1431 } else if (dst.is(src)) {
1432 ASSERT(!dst.is(kScratchRegister));
1433 if (constant->value() == Smi::kMinValue) {
1434 // Subtracting min-value from any non-negative value will overflow.
1435 // We test the non-negativeness before doing the subtraction.
1436 testq(src, src);
1437 j(not_sign, on_not_smi_result, near_jump);
1438 LoadSmiConstant(kScratchRegister, constant);
1439 subq(dst, kScratchRegister);
1440 } else {
1441 // Subtract by adding the negation.
1442 LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1443 addq(kScratchRegister, dst);
1444 j(overflow, on_not_smi_result, near_jump);
1445 movq(dst, kScratchRegister);
1446 }
1447 } else {
1448 if (constant->value() == Smi::kMinValue) {
1449 // Subtracting min-value from any non-negative value will overflow.
1450 // We test the non-negativeness before doing the subtraction.
1451 testq(src, src);
1452 j(not_sign, on_not_smi_result, near_jump);
1453 LoadSmiConstant(dst, constant);
1454 // Adding and subtracting the min-value gives the same result, it only
1455 // differs on the overflow bit, which we don't check here.
1456 addq(dst, src);
1457 } else {
1458 // Subtract by adding the negation.
1459 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1460 addq(dst, src);
1461 j(overflow, on_not_smi_result, near_jump);
1462 }
1463 }
1464 }
1465
1466
1467 void MacroAssembler::SmiNeg(Register dst,
1468 Register src,
1469 Label* on_smi_result,
1470 Label::Distance near_jump) {
1471 if (dst.is(src)) {
1472 ASSERT(!dst.is(kScratchRegister));
1473 movq(kScratchRegister, src);
1474 neg(dst); // Low 32 bits are retained as zero by negation.
1475 // Test if result is zero or Smi::kMinValue.
1476 cmpq(dst, kScratchRegister);
1477 j(not_equal, on_smi_result, near_jump);
1478 movq(src, kScratchRegister);
1479 } else {
1480 movq(dst, src);
1481 neg(dst);
1482 cmpq(dst, src);
1483 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1484 j(not_equal, on_smi_result, near_jump);
1485 }
1486 }
1487
1488
1489 void MacroAssembler::SmiAdd(Register dst,
1490 Register src1,
1491 Register src2,
1492 Label* on_not_smi_result,
1493 Label::Distance near_jump) {
1494 ASSERT_NOT_NULL(on_not_smi_result);
1495 ASSERT(!dst.is(src2));
1496 if (dst.is(src1)) {
1497 movq(kScratchRegister, src1);
1498 addq(kScratchRegister, src2);
1499 j(overflow, on_not_smi_result, near_jump);
1500 movq(dst, kScratchRegister);
1501 } else {
1502 movq(dst, src1);
1503 addq(dst, src2);
1504 j(overflow, on_not_smi_result, near_jump);
1505 }
1506 }
1507
1508
1509 void MacroAssembler::SmiAdd(Register dst,
1510 Register src1,
1511 const Operand& src2,
1512 Label* on_not_smi_result,
1513 Label::Distance near_jump) {
1514 ASSERT_NOT_NULL(on_not_smi_result);
1515 if (dst.is(src1)) {
1516 movq(kScratchRegister, src1);
1517 addq(kScratchRegister, src2);
1518 j(overflow, on_not_smi_result, near_jump);
1519 movq(dst, kScratchRegister);
1520 } else {
1521 ASSERT(!src2.AddressUsesRegister(dst));
1522 movq(dst, src1);
1523 addq(dst, src2);
1524 j(overflow, on_not_smi_result, near_jump);
1525 }
1526 }
1527
1528
1253 void MacroAssembler::SmiAdd(Register dst, 1529 void MacroAssembler::SmiAdd(Register dst,
1254 Register src1, 1530 Register src1,
1255 Register src2) { 1531 Register src2) {
1256 // No overflow checking. Use only when it's known that 1532 // No overflow checking. Use only when it's known that
1257 // overflowing is impossible. 1533 // overflowing is impossible.
1258 if (!dst.is(src1)) { 1534 if (!dst.is(src1)) {
1259 if (emit_debug_code()) { 1535 if (emit_debug_code()) {
1260 movq(kScratchRegister, src1); 1536 movq(kScratchRegister, src1);
1261 addq(kScratchRegister, src2); 1537 addq(kScratchRegister, src2);
1262 Check(no_overflow, "Smi addition overflow"); 1538 Check(no_overflow, "Smi addition overflow");
1263 } 1539 }
1264 lea(dst, Operand(src1, src2, times_1, 0)); 1540 lea(dst, Operand(src1, src2, times_1, 0));
1265 } else { 1541 } else {
1266 addq(dst, src2); 1542 addq(dst, src2);
1267 Assert(no_overflow, "Smi addition overflow"); 1543 Assert(no_overflow, "Smi addition overflow");
1268 } 1544 }
1269 } 1545 }
1270 1546
1271 1547
1548 void MacroAssembler::SmiSub(Register dst,
1549 Register src1,
1550 Register src2,
1551 Label* on_not_smi_result,
1552 Label::Distance near_jump) {
1553 ASSERT_NOT_NULL(on_not_smi_result);
1554 ASSERT(!dst.is(src2));
1555 if (dst.is(src1)) {
1556 cmpq(dst, src2);
1557 j(overflow, on_not_smi_result, near_jump);
1558 subq(dst, src2);
1559 } else {
1560 movq(dst, src1);
1561 subq(dst, src2);
1562 j(overflow, on_not_smi_result, near_jump);
1563 }
1564 }
1565
1566
1272 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { 1567 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1273 // No overflow checking. Use only when it's known that 1568 // No overflow checking. Use only when it's known that
1274 // overflowing is impossible (e.g., subtracting two positive smis). 1569 // overflowing is impossible (e.g., subtracting two positive smis).
1275 ASSERT(!dst.is(src2)); 1570 ASSERT(!dst.is(src2));
1276 if (!dst.is(src1)) { 1571 if (!dst.is(src1)) {
1277 movq(dst, src1); 1572 movq(dst, src1);
1278 } 1573 }
1279 subq(dst, src2); 1574 subq(dst, src2);
1280 Assert(no_overflow, "Smi subtraction overflow"); 1575 Assert(no_overflow, "Smi subtraction overflow");
1281 } 1576 }
1282 1577
1283 1578
1284 void MacroAssembler::SmiSub(Register dst, 1579 void MacroAssembler::SmiSub(Register dst,
1285 Register src1, 1580 Register src1,
1581 const Operand& src2,
1582 Label* on_not_smi_result,
1583 Label::Distance near_jump) {
1584 ASSERT_NOT_NULL(on_not_smi_result);
1585 if (dst.is(src1)) {
1586 movq(kScratchRegister, src2);
1587 cmpq(src1, kScratchRegister);
1588 j(overflow, on_not_smi_result, near_jump);
1589 subq(src1, kScratchRegister);
1590 } else {
1591 movq(dst, src1);
1592 subq(dst, src2);
1593 j(overflow, on_not_smi_result, near_jump);
1594 }
1595 }
1596
1597
1598 void MacroAssembler::SmiSub(Register dst,
1599 Register src1,
1286 const Operand& src2) { 1600 const Operand& src2) {
1287 // No overflow checking. Use only when it's known that 1601 // No overflow checking. Use only when it's known that
1288 // overflowing is impossible (e.g., subtracting two positive smis). 1602 // overflowing is impossible (e.g., subtracting two positive smis).
1289 if (!dst.is(src1)) { 1603 if (!dst.is(src1)) {
1290 movq(dst, src1); 1604 movq(dst, src1);
1291 } 1605 }
1292 subq(dst, src2); 1606 subq(dst, src2);
1293 Assert(no_overflow, "Smi subtraction overflow"); 1607 Assert(no_overflow, "Smi subtraction overflow");
1294 } 1608 }
1295 1609
1296 1610
1611 void MacroAssembler::SmiMul(Register dst,
1612 Register src1,
1613 Register src2,
1614 Label* on_not_smi_result,
1615 Label::Distance near_jump) {
1616 ASSERT(!dst.is(src2));
1617 ASSERT(!dst.is(kScratchRegister));
1618 ASSERT(!src1.is(kScratchRegister));
1619 ASSERT(!src2.is(kScratchRegister));
1620
1621 if (dst.is(src1)) {
1622 Label failure, zero_correct_result;
1623 movq(kScratchRegister, src1); // Create backup for later testing.
1624 SmiToInteger64(dst, src1);
1625 imul(dst, src2);
1626 j(overflow, &failure, Label::kNear);
1627
1628 // Check for negative zero result. If product is zero, and one
1629 // argument is negative, go to slow case.
1630 Label correct_result;
1631 testq(dst, dst);
1632 j(not_zero, &correct_result, Label::kNear);
1633
1634 movq(dst, kScratchRegister);
1635 xor_(dst, src2);
1636 // Result was positive zero.
1637 j(positive, &zero_correct_result, Label::kNear);
1638
1639 bind(&failure); // Reused failure exit, restores src1.
1640 movq(src1, kScratchRegister);
1641 jmp(on_not_smi_result, near_jump);
1642
1643 bind(&zero_correct_result);
1644 Set(dst, 0);
1645
1646 bind(&correct_result);
1647 } else {
1648 SmiToInteger64(dst, src1);
1649 imul(dst, src2);
1650 j(overflow, on_not_smi_result, near_jump);
1651 // Check for negative zero result. If product is zero, and one
1652 // argument is negative, go to slow case.
1653 Label correct_result;
1654 testq(dst, dst);
1655 j(not_zero, &correct_result, Label::kNear);
1656 // One of src1 and src2 is zero, the check whether the other is
1657 // negative.
1658 movq(kScratchRegister, src1);
1659 xor_(kScratchRegister, src2);
1660 j(negative, on_not_smi_result, near_jump);
1661 bind(&correct_result);
1662 }
1663 }
1664
1665
1666 void MacroAssembler::SmiDiv(Register dst,
1667 Register src1,
1668 Register src2,
1669 Label* on_not_smi_result,
1670 Label::Distance near_jump) {
1671 ASSERT(!src1.is(kScratchRegister));
1672 ASSERT(!src2.is(kScratchRegister));
1673 ASSERT(!dst.is(kScratchRegister));
1674 ASSERT(!src2.is(rax));
1675 ASSERT(!src2.is(rdx));
1676 ASSERT(!src1.is(rdx));
1677
1678 // Check for 0 divisor (result is +/-Infinity).
1679 testq(src2, src2);
1680 j(zero, on_not_smi_result, near_jump);
1681
1682 if (src1.is(rax)) {
1683 movq(kScratchRegister, src1);
1684 }
1685 SmiToInteger32(rax, src1);
1686 // We need to rule out dividing Smi::kMinValue by -1, since that would
1687 // overflow in idiv and raise an exception.
1688 // We combine this with negative zero test (negative zero only happens
1689 // when dividing zero by a negative number).
1690
1691 // We overshoot a little and go to slow case if we divide min-value
1692 // by any negative value, not just -1.
1693 Label safe_div;
1694 testl(rax, Immediate(0x7fffffff));
1695 j(not_zero, &safe_div, Label::kNear);
1696 testq(src2, src2);
1697 if (src1.is(rax)) {
1698 j(positive, &safe_div, Label::kNear);
1699 movq(src1, kScratchRegister);
1700 jmp(on_not_smi_result, near_jump);
1701 } else {
1702 j(negative, on_not_smi_result, near_jump);
1703 }
1704 bind(&safe_div);
1705
1706 SmiToInteger32(src2, src2);
1707 // Sign extend src1 into edx:eax.
1708 cdq();
1709 idivl(src2);
1710 Integer32ToSmi(src2, src2);
1711 // Check that the remainder is zero.
1712 testl(rdx, rdx);
1713 if (src1.is(rax)) {
1714 Label smi_result;
1715 j(zero, &smi_result, Label::kNear);
1716 movq(src1, kScratchRegister);
1717 jmp(on_not_smi_result, near_jump);
1718 bind(&smi_result);
1719 } else {
1720 j(not_zero, on_not_smi_result, near_jump);
1721 }
1722 if (!dst.is(src1) && src1.is(rax)) {
1723 movq(src1, kScratchRegister);
1724 }
1725 Integer32ToSmi(dst, rax);
1726 }
1727
1728
1729 void MacroAssembler::SmiMod(Register dst,
1730 Register src1,
1731 Register src2,
1732 Label* on_not_smi_result,
1733 Label::Distance near_jump) {
1734 ASSERT(!dst.is(kScratchRegister));
1735 ASSERT(!src1.is(kScratchRegister));
1736 ASSERT(!src2.is(kScratchRegister));
1737 ASSERT(!src2.is(rax));
1738 ASSERT(!src2.is(rdx));
1739 ASSERT(!src1.is(rdx));
1740 ASSERT(!src1.is(src2));
1741
1742 testq(src2, src2);
1743 j(zero, on_not_smi_result, near_jump);
1744
1745 if (src1.is(rax)) {
1746 movq(kScratchRegister, src1);
1747 }
1748 SmiToInteger32(rax, src1);
1749 SmiToInteger32(src2, src2);
1750
1751 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1752 Label safe_div;
1753 cmpl(rax, Immediate(Smi::kMinValue));
1754 j(not_equal, &safe_div, Label::kNear);
1755 cmpl(src2, Immediate(-1));
1756 j(not_equal, &safe_div, Label::kNear);
1757 // Retag inputs and go slow case.
1758 Integer32ToSmi(src2, src2);
1759 if (src1.is(rax)) {
1760 movq(src1, kScratchRegister);
1761 }
1762 jmp(on_not_smi_result, near_jump);
1763 bind(&safe_div);
1764
1765 // Sign extend eax into edx:eax.
1766 cdq();
1767 idivl(src2);
1768 // Restore smi tags on inputs.
1769 Integer32ToSmi(src2, src2);
1770 if (src1.is(rax)) {
1771 movq(src1, kScratchRegister);
1772 }
1773 // Check for a negative zero result. If the result is zero, and the
1774 // dividend is negative, go slow to return a floating point negative zero.
1775 Label smi_result;
1776 testl(rdx, rdx);
1777 j(not_zero, &smi_result, Label::kNear);
1778 testq(src1, src1);
1779 j(negative, on_not_smi_result, near_jump);
1780 bind(&smi_result);
1781 Integer32ToSmi(dst, rdx);
1782 }
1783
1784
1297 void MacroAssembler::SmiNot(Register dst, Register src) { 1785 void MacroAssembler::SmiNot(Register dst, Register src) {
1298 ASSERT(!dst.is(kScratchRegister)); 1786 ASSERT(!dst.is(kScratchRegister));
1299 ASSERT(!src.is(kScratchRegister)); 1787 ASSERT(!src.is(kScratchRegister));
1300 // Set tag and padding bits before negating, so that they are zero afterwards. 1788 // Set tag and padding bits before negating, so that they are zero afterwards.
1301 movl(kScratchRegister, Immediate(~0)); 1789 movl(kScratchRegister, Immediate(~0));
1302 if (dst.is(src)) { 1790 if (dst.is(src)) {
1303 xor_(dst, kScratchRegister); 1791 xor_(dst, kScratchRegister);
1304 } else { 1792 } else {
1305 lea(dst, Operand(src, kScratchRegister, times_1, 0)); 1793 lea(dst, Operand(src, kScratchRegister, times_1, 0));
1306 } 1794 }
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
1393 int shift_value) { 1881 int shift_value) {
1394 if (!dst.is(src)) { 1882 if (!dst.is(src)) {
1395 movq(dst, src); 1883 movq(dst, src);
1396 } 1884 }
1397 if (shift_value > 0) { 1885 if (shift_value > 0) {
1398 shl(dst, Immediate(shift_value)); 1886 shl(dst, Immediate(shift_value));
1399 } 1887 }
1400 } 1888 }
1401 1889
1402 1890
1891 void MacroAssembler::SmiShiftLogicalRightConstant(
1892 Register dst, Register src, int shift_value,
1893 Label* on_not_smi_result, Label::Distance near_jump) {
1894 // Logic right shift interprets its result as an *unsigned* number.
1895 if (dst.is(src)) {
1896 UNIMPLEMENTED(); // Not used.
1897 } else {
1898 movq(dst, src);
1899 if (shift_value == 0) {
1900 testq(dst, dst);
1901 j(negative, on_not_smi_result, near_jump);
1902 }
1903 shr(dst, Immediate(shift_value + kSmiShift));
1904 shl(dst, Immediate(kSmiShift));
1905 }
1906 }
1907
1908
1403 void MacroAssembler::SmiShiftLeft(Register dst, 1909 void MacroAssembler::SmiShiftLeft(Register dst,
1404 Register src1, 1910 Register src1,
1405 Register src2) { 1911 Register src2) {
1406 ASSERT(!dst.is(rcx)); 1912 ASSERT(!dst.is(rcx));
1407 NearLabel result_ok;
1408 // Untag shift amount. 1913 // Untag shift amount.
1409 if (!dst.is(src1)) { 1914 if (!dst.is(src1)) {
1410 movq(dst, src1); 1915 movq(dst, src1);
1411 } 1916 }
1412 SmiToInteger32(rcx, src2); 1917 SmiToInteger32(rcx, src2);
1413 // Shift amount specified by lower 5 bits, not six as the shl opcode. 1918 // Shift amount specified by lower 5 bits, not six as the shl opcode.
1414 and_(rcx, Immediate(0x1f)); 1919 and_(rcx, Immediate(0x1f));
1415 shl_cl(dst); 1920 shl_cl(dst);
1416 } 1921 }
1417 1922
1418 1923
1924 void MacroAssembler::SmiShiftLogicalRight(Register dst,
1925 Register src1,
1926 Register src2,
1927 Label* on_not_smi_result,
1928 Label::Distance near_jump) {
1929 ASSERT(!dst.is(kScratchRegister));
1930 ASSERT(!src1.is(kScratchRegister));
1931 ASSERT(!src2.is(kScratchRegister));
1932 ASSERT(!dst.is(rcx));
1933 // dst and src1 can be the same, because the one case that bails out
1934 // is a shift by 0, which leaves dst, and therefore src1, unchanged.
1935 if (src1.is(rcx) || src2.is(rcx)) {
1936 movq(kScratchRegister, rcx);
1937 }
1938 if (!dst.is(src1)) {
1939 movq(dst, src1);
1940 }
1941 SmiToInteger32(rcx, src2);
1942 orl(rcx, Immediate(kSmiShift));
1943 shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
1944 shl(dst, Immediate(kSmiShift));
1945 testq(dst, dst);
1946 if (src1.is(rcx) || src2.is(rcx)) {
1947 Label positive_result;
1948 j(positive, &positive_result, Label::kNear);
1949 if (src1.is(rcx)) {
1950 movq(src1, kScratchRegister);
1951 } else {
1952 movq(src2, kScratchRegister);
1953 }
1954 jmp(on_not_smi_result, near_jump);
1955 bind(&positive_result);
1956 } else {
1957 // src2 was zero and src1 negative.
1958 j(negative, on_not_smi_result, near_jump);
1959 }
1960 }
1961
1962
1419 void MacroAssembler::SmiShiftArithmeticRight(Register dst, 1963 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1420 Register src1, 1964 Register src1,
1421 Register src2) { 1965 Register src2) {
1422 ASSERT(!dst.is(kScratchRegister)); 1966 ASSERT(!dst.is(kScratchRegister));
1423 ASSERT(!src1.is(kScratchRegister)); 1967 ASSERT(!src1.is(kScratchRegister));
1424 ASSERT(!src2.is(kScratchRegister)); 1968 ASSERT(!src2.is(kScratchRegister));
1425 ASSERT(!dst.is(rcx)); 1969 ASSERT(!dst.is(rcx));
1426 if (src1.is(rcx)) { 1970 if (src1.is(rcx)) {
1427 movq(kScratchRegister, src1); 1971 movq(kScratchRegister, src1);
1428 } else if (src2.is(rcx)) { 1972 } else if (src2.is(rcx)) {
1429 movq(kScratchRegister, src2); 1973 movq(kScratchRegister, src2);
1430 } 1974 }
1431 if (!dst.is(src1)) { 1975 if (!dst.is(src1)) {
1432 movq(dst, src1); 1976 movq(dst, src1);
1433 } 1977 }
1434 SmiToInteger32(rcx, src2); 1978 SmiToInteger32(rcx, src2);
1435 orl(rcx, Immediate(kSmiShift)); 1979 orl(rcx, Immediate(kSmiShift));
1436 sar_cl(dst); // Shift 32 + original rcx & 0x1f. 1980 sar_cl(dst); // Shift 32 + original rcx & 0x1f.
1437 shl(dst, Immediate(kSmiShift)); 1981 shl(dst, Immediate(kSmiShift));
1438 if (src1.is(rcx)) { 1982 if (src1.is(rcx)) {
1439 movq(src1, kScratchRegister); 1983 movq(src1, kScratchRegister);
1440 } else if (src2.is(rcx)) { 1984 } else if (src2.is(rcx)) {
1441 movq(src2, kScratchRegister); 1985 movq(src2, kScratchRegister);
1442 } 1986 }
1443 } 1987 }
1444 1988
1445 1989
1990 void MacroAssembler::SelectNonSmi(Register dst,
1991 Register src1,
1992 Register src2,
1993 Label* on_not_smis,
1994 Label::Distance near_jump) {
1995 ASSERT(!dst.is(kScratchRegister));
1996 ASSERT(!src1.is(kScratchRegister));
1997 ASSERT(!src2.is(kScratchRegister));
1998 ASSERT(!dst.is(src1));
1999 ASSERT(!dst.is(src2));
2000 // Both operands must not be smis.
2001 #ifdef DEBUG
2002 if (allow_stub_calls()) { // Check contains a stub call.
2003 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2004 Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
2005 }
2006 #endif
2007 ASSERT_EQ(0, kSmiTag);
2008 ASSERT_EQ(0, Smi::FromInt(0));
2009 movl(kScratchRegister, Immediate(kSmiTagMask));
2010 and_(kScratchRegister, src1);
2011 testl(kScratchRegister, src2);
2012 // If non-zero then both are smis.
2013 j(not_zero, on_not_smis, near_jump);
2014
2015 // Exactly one operand is a smi.
2016 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2017 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2018 subq(kScratchRegister, Immediate(1));
2019 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2020 movq(dst, src1);
2021 xor_(dst, src2);
2022 and_(dst, kScratchRegister);
2023 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2024 xor_(dst, src1);
2025 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2026 }
2027
2028
1446 SmiIndex MacroAssembler::SmiToIndex(Register dst, 2029 SmiIndex MacroAssembler::SmiToIndex(Register dst,
1447 Register src, 2030 Register src,
1448 int shift) { 2031 int shift) {
1449 ASSERT(is_uint6(shift)); 2032 ASSERT(is_uint6(shift));
1450 // There is a possible optimization if shift is in the range 60-63, but that 2033 // There is a possible optimization if shift is in the range 60-63, but that
1451 // will (and must) never happen. 2034 // will (and must) never happen.
1452 if (!dst.is(src)) { 2035 if (!dst.is(src)) {
1453 movq(dst, src); 2036 movq(dst, src);
1454 } 2037 }
1455 if (shift < kSmiShift) { 2038 if (shift < kSmiShift) {
(...skipping 21 matching lines...) Expand all
1477 return SmiIndex(dst, times_1); 2060 return SmiIndex(dst, times_1);
1478 } 2061 }
1479 2062
1480 2063
1481 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { 2064 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
1482 ASSERT_EQ(0, kSmiShift % kBitsPerByte); 2065 ASSERT_EQ(0, kSmiShift % kBitsPerByte);
1483 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); 2066 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
1484 } 2067 }
1485 2068
1486 2069
2070 void MacroAssembler::JumpIfNotString(Register object,
2071 Register object_map,
2072 Label* not_string,
2073 Label::Distance near_jump) {
2074 Condition is_smi = CheckSmi(object);
2075 j(is_smi, not_string, near_jump);
2076 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2077 j(above_equal, not_string, near_jump);
2078 }
2079
2080
2081 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2082 Register first_object,
2083 Register second_object,
2084 Register scratch1,
2085 Register scratch2,
2086 Label* on_fail,
2087 Label::Distance near_jump) {
2088 // Check that both objects are not smis.
2089 Condition either_smi = CheckEitherSmi(first_object, second_object);
2090 j(either_smi, on_fail, near_jump);
2091
2092 // Load instance type for both strings.
2093 movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2094 movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2095 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2096 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2097
2098 // Check that both are flat ascii strings.
2099 ASSERT(kNotStringTag != 0);
2100 const int kFlatAsciiStringMask =
2101 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2102 const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2103
2104 andl(scratch1, Immediate(kFlatAsciiStringMask));
2105 andl(scratch2, Immediate(kFlatAsciiStringMask));
2106 // Interleave the bits to check both scratch1 and scratch2 in one test.
2107 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2108 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2109 cmpl(scratch1,
2110 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2111 j(not_equal, on_fail, near_jump);
2112 }
2113
2114
2115 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2116 Register instance_type,
2117 Register scratch,
2118 Label* failure,
2119 Label::Distance near_jump) {
2120 if (!scratch.is(instance_type)) {
2121 movl(scratch, instance_type);
2122 }
2123
2124 const int kFlatAsciiStringMask =
2125 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2126
2127 andl(scratch, Immediate(kFlatAsciiStringMask));
2128 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
2129 j(not_equal, failure, near_jump);
2130 }
2131
2132
2133 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2134 Register first_object_instance_type,
2135 Register second_object_instance_type,
2136 Register scratch1,
2137 Register scratch2,
2138 Label* on_fail,
2139 Label::Distance near_jump) {
2140 // Load instance type for both strings.
2141 movq(scratch1, first_object_instance_type);
2142 movq(scratch2, second_object_instance_type);
2143
2144 // Check that both are flat ascii strings.
2145 ASSERT(kNotStringTag != 0);
2146 const int kFlatAsciiStringMask =
2147 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2148 const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2149
2150 andl(scratch1, Immediate(kFlatAsciiStringMask));
2151 andl(scratch2, Immediate(kFlatAsciiStringMask));
2152 // Interleave the bits to check both scratch1 and scratch2 in one test.
2153 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2154 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2155 cmpl(scratch1,
2156 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2157 j(not_equal, on_fail, near_jump);
2158 }
2159
2160
1487 2161
1488 void MacroAssembler::Move(Register dst, Register src) { 2162 void MacroAssembler::Move(Register dst, Register src) {
1489 if (!dst.is(src)) { 2163 if (!dst.is(src)) {
1490 movq(dst, src); 2164 movq(dst, src);
1491 } 2165 }
1492 } 2166 }
1493 2167
1494 2168
1495 void MacroAssembler::Move(Register dst, Handle<Object> source) { 2169 void MacroAssembler::Move(Register dst, Handle<Object> source) {
1496 ASSERT(!source->IsFailure()); 2170 ASSERT(!source->IsFailure());
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after
1782 Operand handler_operand = ExternalOperand(handler_address); 2456 Operand handler_operand = ExternalOperand(handler_address);
1783 movq(rsp, handler_operand); 2457 movq(rsp, handler_operand);
1784 // get next in chain 2458 // get next in chain
1785 pop(handler_operand); 2459 pop(handler_operand);
1786 pop(rbp); // pop frame pointer 2460 pop(rbp); // pop frame pointer
1787 pop(rdx); // remove state 2461 pop(rdx); // remove state
1788 2462
1789 // Before returning we restore the context from the frame pointer if not NULL. 2463 // Before returning we restore the context from the frame pointer if not NULL.
1790 // The frame pointer is NULL in the exception handler of a JS entry frame. 2464 // The frame pointer is NULL in the exception handler of a JS entry frame.
1791 Set(rsi, 0); // Tentatively set context pointer to NULL 2465 Set(rsi, 0); // Tentatively set context pointer to NULL
1792 NearLabel skip; 2466 Label skip;
1793 cmpq(rbp, Immediate(0)); 2467 cmpq(rbp, Immediate(0));
1794 j(equal, &skip); 2468 j(equal, &skip, Label::kNear);
1795 movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 2469 movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
1796 bind(&skip); 2470 bind(&skip);
1797 ret(0); 2471 ret(0);
1798 } 2472 }
1799 2473
1800 2474
1801 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, 2475 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
1802 Register value) { 2476 Register value) {
1803 // Keep thrown value in rax. 2477 // Keep thrown value in rax.
1804 if (!value.is(rax)) { 2478 if (!value.is(rax)) {
1805 movq(rax, value); 2479 movq(rax, value);
1806 } 2480 }
1807 // Fetch top stack handler. 2481 // Fetch top stack handler.
1808 ExternalReference handler_address(Isolate::k_handler_address, isolate()); 2482 ExternalReference handler_address(Isolate::k_handler_address, isolate());
1809 Load(rsp, handler_address); 2483 Load(rsp, handler_address);
1810 2484
1811 // Unwind the handlers until the ENTRY handler is found. 2485 // Unwind the handlers until the ENTRY handler is found.
1812 NearLabel loop, done; 2486 Label loop, done;
1813 bind(&loop); 2487 bind(&loop);
1814 // Load the type of the current stack handler. 2488 // Load the type of the current stack handler.
1815 const int kStateOffset = StackHandlerConstants::kStateOffset; 2489 const int kStateOffset = StackHandlerConstants::kStateOffset;
1816 cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY)); 2490 cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
1817 j(equal, &done); 2491 j(equal, &done, Label::kNear);
1818 // Fetch the next handler in the list. 2492 // Fetch the next handler in the list.
1819 const int kNextOffset = StackHandlerConstants::kNextOffset; 2493 const int kNextOffset = StackHandlerConstants::kNextOffset;
1820 movq(rsp, Operand(rsp, kNextOffset)); 2494 movq(rsp, Operand(rsp, kNextOffset));
1821 jmp(&loop); 2495 jmp(&loop);
1822 bind(&done); 2496 bind(&done);
1823 2497
1824 // Set the top handler address to next handler past the current ENTRY handler. 2498 // Set the top handler address to next handler past the current ENTRY handler.
1825 Operand handler_operand = ExternalOperand(handler_address); 2499 Operand handler_operand = ExternalOperand(handler_address);
1826 pop(handler_operand); 2500 pop(handler_operand);
1827 2501
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
1889 2563
1890 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { 2564 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
1891 cmpb(FieldOperand(map, Map::kInstanceTypeOffset), 2565 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
1892 Immediate(static_cast<int8_t>(type))); 2566 Immediate(static_cast<int8_t>(type)));
1893 } 2567 }
1894 2568
1895 2569
1896 void MacroAssembler::CheckMap(Register obj, 2570 void MacroAssembler::CheckMap(Register obj,
1897 Handle<Map> map, 2571 Handle<Map> map,
1898 Label* fail, 2572 Label* fail,
1899 bool is_heap_object) { 2573 SmiCheckType smi_check_type) {
1900 if (!is_heap_object) { 2574 if (smi_check_type == DO_SMI_CHECK) {
1901 JumpIfSmi(obj, fail); 2575 JumpIfSmi(obj, fail);
1902 } 2576 }
1903 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map); 2577 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
1904 j(not_equal, fail); 2578 j(not_equal, fail);
1905 } 2579 }
1906 2580
1907 2581
2582 void MacroAssembler::ClampUint8(Register reg) {
2583 Label done;
2584 testl(reg, Immediate(0xFFFFFF00));
2585 j(zero, &done, Label::kNear);
2586 setcc(negative, reg); // 1 if negative, 0 if positive.
2587 decb(reg); // 0 if negative, 255 if positive.
2588 bind(&done);
2589 }
2590
2591
2592 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
2593 XMMRegister temp_xmm_reg,
2594 Register result_reg,
2595 Register temp_reg) {
2596 Label done;
2597 Set(result_reg, 0);
2598 xorps(temp_xmm_reg, temp_xmm_reg);
2599 ucomisd(input_reg, temp_xmm_reg);
2600 j(below, &done, Label::kNear);
2601 uint64_t one_half = BitCast<uint64_t, double>(0.5);
2602 Set(temp_reg, one_half);
2603 movq(temp_xmm_reg, temp_reg);
2604 addsd(temp_xmm_reg, input_reg);
2605 cvttsd2si(result_reg, temp_xmm_reg);
2606 testl(result_reg, Immediate(0xFFFFFF00));
2607 j(zero, &done, Label::kNear);
2608 Set(result_reg, 255);
2609 bind(&done);
2610 }
2611
2612
2613 void MacroAssembler::DispatchMap(Register obj,
2614 Handle<Map> map,
2615 Handle<Code> success,
2616 SmiCheckType smi_check_type) {
2617 Label fail;
2618 if (smi_check_type == DO_SMI_CHECK) {
2619 JumpIfSmi(obj, &fail);
2620 }
2621 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2622 j(equal, success, RelocInfo::CODE_TARGET);
2623
2624 bind(&fail);
2625 }
2626
2627
1908 void MacroAssembler::AbortIfNotNumber(Register object) { 2628 void MacroAssembler::AbortIfNotNumber(Register object) {
1909 NearLabel ok; 2629 Label ok;
1910 Condition is_smi = CheckSmi(object); 2630 Condition is_smi = CheckSmi(object);
1911 j(is_smi, &ok); 2631 j(is_smi, &ok, Label::kNear);
1912 Cmp(FieldOperand(object, HeapObject::kMapOffset), 2632 Cmp(FieldOperand(object, HeapObject::kMapOffset),
1913 isolate()->factory()->heap_number_map()); 2633 isolate()->factory()->heap_number_map());
1914 Assert(equal, "Operand not a number"); 2634 Assert(equal, "Operand not a number");
1915 bind(&ok); 2635 bind(&ok);
1916 } 2636 }
1917 2637
1918 2638
1919 void MacroAssembler::AbortIfSmi(Register object) { 2639 void MacroAssembler::AbortIfSmi(Register object) {
1920 NearLabel ok;
1921 Condition is_smi = CheckSmi(object); 2640 Condition is_smi = CheckSmi(object);
1922 Assert(NegateCondition(is_smi), "Operand is a smi"); 2641 Assert(NegateCondition(is_smi), "Operand is a smi");
1923 } 2642 }
1924 2643
1925 2644
1926 void MacroAssembler::AbortIfNotSmi(Register object) { 2645 void MacroAssembler::AbortIfNotSmi(Register object) {
1927 Condition is_smi = CheckSmi(object); 2646 Condition is_smi = CheckSmi(object);
1928 Assert(is_smi, "Operand is not a smi"); 2647 Assert(is_smi, "Operand is not a smi");
1929 } 2648 }
1930 2649
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1973 Label* miss) { 2692 Label* miss) {
1974 // Check that the receiver isn't a smi. 2693 // Check that the receiver isn't a smi.
1975 testl(function, Immediate(kSmiTagMask)); 2694 testl(function, Immediate(kSmiTagMask));
1976 j(zero, miss); 2695 j(zero, miss);
1977 2696
1978 // Check that the function really is a function. 2697 // Check that the function really is a function.
1979 CmpObjectType(function, JS_FUNCTION_TYPE, result); 2698 CmpObjectType(function, JS_FUNCTION_TYPE, result);
1980 j(not_equal, miss); 2699 j(not_equal, miss);
1981 2700
1982 // Make sure that the function has an instance prototype. 2701 // Make sure that the function has an instance prototype.
1983 NearLabel non_instance; 2702 Label non_instance;
1984 testb(FieldOperand(result, Map::kBitFieldOffset), 2703 testb(FieldOperand(result, Map::kBitFieldOffset),
1985 Immediate(1 << Map::kHasNonInstancePrototype)); 2704 Immediate(1 << Map::kHasNonInstancePrototype));
1986 j(not_zero, &non_instance); 2705 j(not_zero, &non_instance, Label::kNear);
1987 2706
1988 // Get the prototype or initial map from the function. 2707 // Get the prototype or initial map from the function.
1989 movq(result, 2708 movq(result,
1990 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2709 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1991 2710
1992 // If the prototype or initial map is the hole, don't return it and 2711 // If the prototype or initial map is the hole, don't return it and
1993 // simply miss the cache instead. This will allow us to allocate a 2712 // simply miss the cache instead. This will allow us to allocate a
1994 // prototype object on-demand in the runtime system. 2713 // prototype object on-demand in the runtime system.
1995 CompareRoot(result, Heap::kTheHoleValueRootIndex); 2714 CompareRoot(result, Heap::kTheHoleValueRootIndex);
1996 j(equal, miss); 2715 j(equal, miss);
1997 2716
1998 // If the function does not have an initial map, we're done. 2717 // If the function does not have an initial map, we're done.
1999 NearLabel done; 2718 Label done;
2000 CmpObjectType(result, MAP_TYPE, kScratchRegister); 2719 CmpObjectType(result, MAP_TYPE, kScratchRegister);
2001 j(not_equal, &done); 2720 j(not_equal, &done, Label::kNear);
2002 2721
2003 // Get the prototype from the initial map. 2722 // Get the prototype from the initial map.
2004 movq(result, FieldOperand(result, Map::kPrototypeOffset)); 2723 movq(result, FieldOperand(result, Map::kPrototypeOffset));
2005 jmp(&done); 2724 jmp(&done, Label::kNear);
2006 2725
2007 // Non-instance prototype: Fetch prototype from constructor field 2726 // Non-instance prototype: Fetch prototype from constructor field
2008 // in initial map. 2727 // in initial map.
2009 bind(&non_instance); 2728 bind(&non_instance);
2010 movq(result, FieldOperand(result, Map::kConstructorOffset)); 2729 movq(result, FieldOperand(result, Map::kConstructorOffset));
2011 2730
2012 // All done. 2731 // All done.
2013 bind(&done); 2732 bind(&done);
2014 } 2733 }
2015 2734
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
2057 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 2776 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2058 } 2777 }
2059 #endif // ENABLE_DEBUGGER_SUPPORT 2778 #endif // ENABLE_DEBUGGER_SUPPORT
2060 2779
2061 2780
2062 void MacroAssembler::InvokeCode(Register code, 2781 void MacroAssembler::InvokeCode(Register code,
2063 const ParameterCount& expected, 2782 const ParameterCount& expected,
2064 const ParameterCount& actual, 2783 const ParameterCount& actual,
2065 InvokeFlag flag, 2784 InvokeFlag flag,
2066 const CallWrapper& call_wrapper) { 2785 const CallWrapper& call_wrapper) {
2067 NearLabel done; 2786 Label done;
2068 InvokePrologue(expected, 2787 InvokePrologue(expected,
2069 actual, 2788 actual,
2070 Handle<Code>::null(), 2789 Handle<Code>::null(),
2071 code, 2790 code,
2072 &done, 2791 &done,
2073 flag, 2792 flag,
2074 call_wrapper); 2793 call_wrapper,
2794 Label::kNear);
2075 if (flag == CALL_FUNCTION) { 2795 if (flag == CALL_FUNCTION) {
2076 call_wrapper.BeforeCall(CallSize(code)); 2796 call_wrapper.BeforeCall(CallSize(code));
2077 call(code); 2797 call(code);
2078 call_wrapper.AfterCall(); 2798 call_wrapper.AfterCall();
2079 } else { 2799 } else {
2080 ASSERT(flag == JUMP_FUNCTION); 2800 ASSERT(flag == JUMP_FUNCTION);
2081 jmp(code); 2801 jmp(code);
2082 } 2802 }
2083 bind(&done); 2803 bind(&done);
2084 } 2804 }
2085 2805
2086 2806
2087 void MacroAssembler::InvokeCode(Handle<Code> code, 2807 void MacroAssembler::InvokeCode(Handle<Code> code,
2088 const ParameterCount& expected, 2808 const ParameterCount& expected,
2089 const ParameterCount& actual, 2809 const ParameterCount& actual,
2090 RelocInfo::Mode rmode, 2810 RelocInfo::Mode rmode,
2091 InvokeFlag flag, 2811 InvokeFlag flag,
2092 const CallWrapper& call_wrapper) { 2812 const CallWrapper& call_wrapper) {
2093 NearLabel done; 2813 Label done;
2094 Register dummy = rax; 2814 Register dummy = rax;
2095 InvokePrologue(expected, 2815 InvokePrologue(expected,
2096 actual, 2816 actual,
2097 code, 2817 code,
2098 dummy, 2818 dummy,
2099 &done, 2819 &done,
2100 flag, 2820 flag,
2101 call_wrapper); 2821 call_wrapper,
2822 Label::kNear);
2102 if (flag == CALL_FUNCTION) { 2823 if (flag == CALL_FUNCTION) {
2103 call_wrapper.BeforeCall(CallSize(code)); 2824 call_wrapper.BeforeCall(CallSize(code));
2104 Call(code, rmode); 2825 Call(code, rmode);
2105 call_wrapper.AfterCall(); 2826 call_wrapper.AfterCall();
2106 } else { 2827 } else {
2107 ASSERT(flag == JUMP_FUNCTION); 2828 ASSERT(flag == JUMP_FUNCTION);
2108 Jump(code, rmode); 2829 Jump(code, rmode);
2109 } 2830 }
2110 bind(&done); 2831 bind(&done);
2111 } 2832 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
2151 InvokeCode(code, 2872 InvokeCode(code,
2152 expected, 2873 expected,
2153 actual, 2874 actual,
2154 RelocInfo::CODE_TARGET, 2875 RelocInfo::CODE_TARGET,
2155 flag, 2876 flag,
2156 call_wrapper); 2877 call_wrapper);
2157 } 2878 }
2158 } 2879 }
2159 2880
2160 2881
2882 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2883 const ParameterCount& actual,
2884 Handle<Code> code_constant,
2885 Register code_register,
2886 Label* done,
2887 InvokeFlag flag,
2888 const CallWrapper& call_wrapper,
2889 Label::Distance near_jump) {
2890 bool definitely_matches = false;
2891 Label invoke;
2892 if (expected.is_immediate()) {
2893 ASSERT(actual.is_immediate());
2894 if (expected.immediate() == actual.immediate()) {
2895 definitely_matches = true;
2896 } else {
2897 Set(rax, actual.immediate());
2898 if (expected.immediate() ==
2899 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2900 // Don't worry about adapting arguments for built-ins that
2901 // don't want that done. Skip adaption code by making it look
2902 // like we have a match between expected and actual number of
2903 // arguments.
2904 definitely_matches = true;
2905 } else {
2906 Set(rbx, expected.immediate());
2907 }
2908 }
2909 } else {
2910 if (actual.is_immediate()) {
2911 // Expected is in register, actual is immediate. This is the
2912 // case when we invoke function values without going through the
2913 // IC mechanism.
2914 cmpq(expected.reg(), Immediate(actual.immediate()));
2915 j(equal, &invoke, Label::kNear);
2916 ASSERT(expected.reg().is(rbx));
2917 Set(rax, actual.immediate());
2918 } else if (!expected.reg().is(actual.reg())) {
2919 // Both expected and actual are in (different) registers. This
2920 // is the case when we invoke functions using call and apply.
2921 cmpq(expected.reg(), actual.reg());
2922 j(equal, &invoke, Label::kNear);
2923 ASSERT(actual.reg().is(rax));
2924 ASSERT(expected.reg().is(rbx));
2925 }
2926 }
2927
2928 if (!definitely_matches) {
2929 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
2930 if (!code_constant.is_null()) {
2931 movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
2932 addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2933 } else if (!code_register.is(rdx)) {
2934 movq(rdx, code_register);
2935 }
2936
2937 if (flag == CALL_FUNCTION) {
2938 call_wrapper.BeforeCall(CallSize(adaptor));
2939 Call(adaptor, RelocInfo::CODE_TARGET);
2940 call_wrapper.AfterCall();
2941 jmp(done, near_jump);
2942 } else {
2943 Jump(adaptor, RelocInfo::CODE_TARGET);
2944 }
2945 bind(&invoke);
2946 }
2947 }
2948
2949
2161 void MacroAssembler::EnterFrame(StackFrame::Type type) { 2950 void MacroAssembler::EnterFrame(StackFrame::Type type) {
2162 push(rbp); 2951 push(rbp);
2163 movq(rbp, rsp); 2952 movq(rbp, rsp);
2164 push(rsi); // Context. 2953 push(rsi); // Context.
2165 Push(Smi::FromInt(type)); 2954 Push(Smi::FromInt(type));
2166 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); 2955 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
2167 push(kScratchRegister); 2956 push(kScratchRegister);
2168 if (emit_debug_code()) { 2957 if (emit_debug_code()) {
2169 movq(kScratchRegister, 2958 movq(kScratchRegister,
2170 isolate()->factory()->undefined_value(), 2959 isolate()->factory()->undefined_value(),
(...skipping 659 matching lines...) Expand 10 before | Expand all | Expand 10 after
2830 movq(function, Operand(function, Context::SlotOffset(index))); 3619 movq(function, Operand(function, Context::SlotOffset(index)));
2831 } 3620 }
2832 3621
2833 3622
2834 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, 3623 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2835 Register map) { 3624 Register map) {
2836 // Load the initial map. The global functions all have initial maps. 3625 // Load the initial map. The global functions all have initial maps.
2837 movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3626 movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2838 if (emit_debug_code()) { 3627 if (emit_debug_code()) {
2839 Label ok, fail; 3628 Label ok, fail;
2840 CheckMap(map, isolate()->factory()->meta_map(), &fail, false); 3629 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2841 jmp(&ok); 3630 jmp(&ok);
2842 bind(&fail); 3631 bind(&fail);
2843 Abort("Global functions must have initial map"); 3632 Abort("Global functions must have initial map");
2844 bind(&ok); 3633 bind(&ok);
2845 } 3634 }
2846 } 3635 }
2847 3636
2848 3637
2849 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { 3638 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2850 // On Windows 64 stack slots are reserved by the caller for all arguments 3639 // On Windows 64 stack slots are reserved by the caller for all arguments
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
2919 CPU::FlushICache(address_, size_); 3708 CPU::FlushICache(address_, size_);
2920 3709
2921 // Check that the code was patched as expected. 3710 // Check that the code was patched as expected.
2922 ASSERT(masm_.pc_ == address_ + size_); 3711 ASSERT(masm_.pc_ == address_ + size_);
2923 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 3712 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2924 } 3713 }
2925 3714
2926 } } // namespace v8::internal 3715 } } // namespace v8::internal
2927 3716
2928 #endif // V8_TARGET_ARCH_X64 3717 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698