Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(286)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 293023: Added infrastructure for optimizing new CanvasArray types in WebGL... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | test/cctest/test-api.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 562 matching lines...) Expand 10 before | Expand all | Expand 10 after
573 return equal; 573 return equal;
574 } 574 }
575 575
576 576
577 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { 577 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
578 // A 32-bit integer value can always be converted to a smi. 578 // A 32-bit integer value can always be converted to a smi.
579 return always; 579 return always;
580 } 580 }
581 581
582 582
583 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
584 // An unsigned 32-bit integer value is valid as long as the high bit
585 // is not set.
586 testq(src, Immediate(0x80000000));
587 return zero;
588 }
589
590
583 void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) { 591 void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
584 if (dst.is(src)) { 592 if (dst.is(src)) {
585 ASSERT(!dst.is(kScratchRegister)); 593 ASSERT(!dst.is(kScratchRegister));
586 movq(kScratchRegister, src); 594 movq(kScratchRegister, src);
587 neg(dst); // Low 32 bits are retained as zero by negation. 595 neg(dst); // Low 32 bits are retained as zero by negation.
588 // Test if result is zero or Smi::kMinValue. 596 // Test if result is zero or Smi::kMinValue.
589 cmpq(dst, kScratchRegister); 597 cmpq(dst, kScratchRegister);
590 j(not_equal, on_smi_result); 598 j(not_equal, on_smi_result);
591 movq(src, kScratchRegister); 599 movq(src, kScratchRegister);
592 } else { 600 } else {
(...skipping 643 matching lines...) Expand 10 before | Expand all | Expand 10 after
1236 j(equal, on_equals); 1244 j(equal, on_equals);
1237 } 1245 }
1238 1246
1239 1247
1240 void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) { 1248 void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
1241 Condition is_valid = CheckInteger32ValidSmiValue(src); 1249 Condition is_valid = CheckInteger32ValidSmiValue(src);
1242 j(NegateCondition(is_valid), on_invalid); 1250 j(NegateCondition(is_valid), on_invalid);
1243 } 1251 }
1244 1252
1245 1253
1254 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1255 Label* on_invalid) {
1256 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1257 j(NegateCondition(is_valid), on_invalid);
1258 }
1259
1260
1246 void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2, 1261 void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
1247 Label* on_not_both_smi) { 1262 Label* on_not_both_smi) {
1248 Condition both_smi = CheckBothSmi(src1, src2); 1263 Condition both_smi = CheckBothSmi(src1, src2);
1249 j(NegateCondition(both_smi), on_not_both_smi); 1264 j(NegateCondition(both_smi), on_not_both_smi);
1250 } 1265 }
1251 1266
1252 1267
1253 void MacroAssembler::Move(Register dst, Handle<Object> source) { 1268 void MacroAssembler::Move(Register dst, Handle<Object> source) {
1254 ASSERT(!source->IsFailure()); 1269 ASSERT(!source->IsFailure());
1255 if (source->IsSmi()) { 1270 if (source->IsSmi()) {
(...skipping 950 matching lines...) Expand 10 before | Expand all | Expand 10 after
2206 and_(object, Immediate(~kHeapObjectTagMask)); 2221 and_(object, Immediate(~kHeapObjectTagMask));
2207 movq(kScratchRegister, new_space_allocation_top); 2222 movq(kScratchRegister, new_space_allocation_top);
2208 #ifdef DEBUG 2223 #ifdef DEBUG
2209 cmpq(object, Operand(kScratchRegister, 0)); 2224 cmpq(object, Operand(kScratchRegister, 0));
2210 Check(below, "Undo allocation of non allocated memory"); 2225 Check(below, "Undo allocation of non allocated memory");
2211 #endif 2226 #endif
2212 movq(Operand(kScratchRegister, 0), object); 2227 movq(Operand(kScratchRegister, 0), object);
2213 } 2228 }
2214 2229
2215 2230
2231 void MacroAssembler::AllocateHeapNumber(Register result,
2232 Register scratch,
2233 Label* gc_required) {
2234 // Allocate heap number in new space.
2235 AllocateInNewSpace(HeapNumber::kSize,
2236 result,
2237 scratch,
2238 no_reg,
2239 gc_required,
2240 TAG_OBJECT);
2241
2242 // Set the map.
2243 LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
2244 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2245 }
2246
2247
2216 CodePatcher::CodePatcher(byte* address, int size) 2248 CodePatcher::CodePatcher(byte* address, int size)
2217 : address_(address), size_(size), masm_(address, size + Assembler::kGap) { 2249 : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
2218 // Create a new macro assembler pointing to the address of the code to patch. 2250 // Create a new macro assembler pointing to the address of the code to patch.
2219 // The size is adjusted with kGap on order for the assembler to generate size 2251 // The size is adjusted with kGap on order for the assembler to generate size
2220 // bytes of instructions without failing with buffer size constraints. 2252 // bytes of instructions without failing with buffer size constraints.
2221 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2253 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2222 } 2254 }
2223 2255
2224 2256
2225 CodePatcher::~CodePatcher() { 2257 CodePatcher::~CodePatcher() {
2226 // Indicate that code has changed. 2258 // Indicate that code has changed.
2227 CPU::FlushICache(address_, size_); 2259 CPU::FlushICache(address_, size_);
2228 2260
2229 // Check that the code was patched as expected. 2261 // Check that the code was patched as expected.
2230 ASSERT(masm_.pc_ == address_ + size_); 2262 ASSERT(masm_.pc_ == address_ + size_);
2231 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2263 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2232 } 2264 }
2233 2265
2234 } } // namespace v8::internal 2266 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | test/cctest/test-api.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698