Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 240473009: Update SafeMove, SafePush, SmiToIndex and SmiToNegativeIndex for x32 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 980 matching lines...) Expand 10 before | Expand all | Expand 10 after
991 // Smi tagging, untagging and tag detection. 991 // Smi tagging, untagging and tag detection.
992 992
993 bool MacroAssembler::IsUnsafeInt(const int32_t x) { 993 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
994 static const int kMaxBits = 17; 994 static const int kMaxBits = 17;
995 return !is_intn(x, kMaxBits); 995 return !is_intn(x, kMaxBits);
996 } 996 }
997 997
998 998
999 void MacroAssembler::SafeMove(Register dst, Smi* src) { 999 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1000 ASSERT(!dst.is(kScratchRegister)); 1000 ASSERT(!dst.is(kScratchRegister));
1001 ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
1002 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { 1001 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1003 Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); 1002 if (SmiValuesAre32Bits()) {
1004 Move(kScratchRegister, Smi::FromInt(jit_cookie())); 1003 // JIT cookie can be converted to Smi.
1005 xorq(dst, kScratchRegister); 1004 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1005 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1006 xorp(dst, kScratchRegister);
1007 } else {
1008 ASSERT(SmiValuesAre31Bits());
1009 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1010 movp(dst, Immediate(value ^ jit_cookie()));
1011 xorp(dst, Immediate(jit_cookie()));
1012 }
1006 } else { 1013 } else {
1007 Move(dst, src); 1014 Move(dst, src);
1008 } 1015 }
1009 } 1016 }
1010 1017
1011 1018
1012 void MacroAssembler::SafePush(Smi* src) { 1019 void MacroAssembler::SafePush(Smi* src) {
1013 ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
1014 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { 1020 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1015 Push(Smi::FromInt(src->value() ^ jit_cookie())); 1021 if (SmiValuesAre32Bits()) {
1016 Move(kScratchRegister, Smi::FromInt(jit_cookie())); 1022 // JIT cookie can be converted to Smi.
1017 xorq(Operand(rsp, 0), kScratchRegister); 1023 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1024 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1025 xorp(Operand(rsp, 0), kScratchRegister);
1026 } else {
1027 ASSERT(SmiValuesAre31Bits());
1028 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1029 Push(Immediate(value ^ jit_cookie()));
1030 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1031 }
1018 } else { 1032 } else {
1019 Push(src); 1033 Push(src);
1020 } 1034 }
1021 } 1035 }
1022 1036
1023 1037
1024 Register MacroAssembler::GetSmiConstant(Smi* source) { 1038 Register MacroAssembler::GetSmiConstant(Smi* source) {
1025 int value = source->value(); 1039 int value = source->value();
1026 if (value == 0) { 1040 if (value == 0) {
1027 xorl(kScratchRegister, kScratchRegister); 1041 xorl(kScratchRegister, kScratchRegister);
(...skipping 1199 matching lines...) Expand 10 before | Expand all | Expand 10 after
2227 andp(dst, kScratchRegister); 2241 andp(dst, kScratchRegister);
2228 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. 2242 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2229 xorp(dst, src1); 2243 xorp(dst, src1);
2230 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. 2244 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2231 } 2245 }
2232 2246
2233 2247
2234 SmiIndex MacroAssembler::SmiToIndex(Register dst, 2248 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2235 Register src, 2249 Register src,
2236 int shift) { 2250 int shift) {
2237 ASSERT(is_uint6(shift)); 2251 if (SmiValuesAre32Bits()) {
2238 // There is a possible optimization if shift is in the range 60-63, but that 2252 ASSERT(is_uint6(shift));
2239 // will (and must) never happen. 2253 // There is a possible optimization if shift is in the range 60-63, but that
2240 if (!dst.is(src)) { 2254 // will (and must) never happen.
2241 movq(dst, src); 2255 if (!dst.is(src)) {
2256 movp(dst, src);
2257 }
2258 if (shift < kSmiShift) {
2259 sarp(dst, Immediate(kSmiShift - shift));
2260 } else {
2261 shlp(dst, Immediate(shift - kSmiShift));
2262 }
2263 return SmiIndex(dst, times_1);
2264 } else {
2265 ASSERT(SmiValuesAre31Bits());
2266 ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2267 if (!dst.is(src)) {
2268 movp(dst, src);
2269 }
2270 // We have to sign extend the index register to 64-bit as the SMI might
2271 // be negative.
2272 movsxlq(dst, dst);
2273 if (shift == times_1) {
2274 sarq(dst, Immediate(kSmiShift));
2275 return SmiIndex(dst, times_1);
2276 }
2277 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2242 } 2278 }
2243 if (shift < kSmiShift) {
2244 sarq(dst, Immediate(kSmiShift - shift));
2245 } else {
2246 shlq(dst, Immediate(shift - kSmiShift));
2247 }
2248 return SmiIndex(dst, times_1);
2249 } 2279 }
2250 2280
2281
2251 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, 2282 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2252 Register src, 2283 Register src,
2253 int shift) { 2284 int shift) {
2254 // Register src holds a positive smi. 2285 if (SmiValuesAre32Bits()) {
2255 ASSERT(is_uint6(shift)); 2286 // Register src holds a positive smi.
2256 if (!dst.is(src)) { 2287 ASSERT(is_uint6(shift));
2257 movq(dst, src); 2288 if (!dst.is(src)) {
2289 movp(dst, src);
2290 }
2291 negp(dst);
2292 if (shift < kSmiShift) {
2293 sarp(dst, Immediate(kSmiShift - shift));
2294 } else {
2295 shlp(dst, Immediate(shift - kSmiShift));
2296 }
2297 return SmiIndex(dst, times_1);
2298 } else {
2299 ASSERT(SmiValuesAre31Bits());
2300 ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2301 if (!dst.is(src)) {
2302 movp(dst, src);
2303 }
2304 negq(dst);
2305 if (shift == times_1) {
2306 sarq(dst, Immediate(kSmiShift));
2307 return SmiIndex(dst, times_1);
2308 }
2309 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2258 } 2310 }
2259 negq(dst);
2260 if (shift < kSmiShift) {
2261 sarq(dst, Immediate(kSmiShift - shift));
2262 } else {
2263 shlq(dst, Immediate(shift - kSmiShift));
2264 }
2265 return SmiIndex(dst, times_1);
2266 } 2311 }
2267 2312
2268 2313
2269 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { 2314 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2270 ASSERT_EQ(0, kSmiShift % kBitsPerByte); 2315 ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2271 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); 2316 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2272 } 2317 }
2273 2318
2274 2319
2275 void MacroAssembler::Push(Smi* source) { 2320 void MacroAssembler::Push(Smi* source) {
(...skipping 2879 matching lines...) Expand 10 before | Expand all | Expand 10 after
5155 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); 5200 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
5156 movl(rax, dividend); 5201 movl(rax, dividend);
5157 shrl(rax, Immediate(31)); 5202 shrl(rax, Immediate(31));
5158 addl(rdx, rax); 5203 addl(rdx, rax);
5159 } 5204 }
5160 5205
5161 5206
5162 } } // namespace v8::internal 5207 } } // namespace v8::internal
5163 5208
5164 #endif // V8_TARGET_ARCH_X64 5209 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698