| OLD | NEW |
| 1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 71 XMMRegister xmm13 = { 13 }; | 71 XMMRegister xmm13 = { 13 }; |
| 72 XMMRegister xmm14 = { 14 }; | 72 XMMRegister xmm14 = { 14 }; |
| 73 XMMRegister xmm15 = { 15 }; | 73 XMMRegister xmm15 = { 15 }; |
| 74 | 74 |
| 75 | 75 |
| 76 // ----------------------------------------------------------------------------- | 76 // ----------------------------------------------------------------------------- |
| 77 // Implementation of CpuFeatures | 77 // Implementation of CpuFeatures |
| 78 | 78 |
| 79 // The required user mode extensions in X64 are (from AMD64 ABI Table A.1): | 79 // The required user mode extensions in X64 are (from AMD64 ABI Table A.1): |
| 80 // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall | 80 // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall |
| 81 uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures; | 81 AssemblerData::AssemblerData() |
| 82 uint64_t CpuFeatures::enabled_ = 0; | 82 :BasicAssemblerData(), |
| 83 uint64_t CpuFeatures::found_by_runtime_probing_ = 0; | 83 spare_buffer_(NULL), |
| 84 supported_(CpuFeatures::kDefaultCpuFeatures), |
| 85 enabled_(0), |
| 86 found_by_runtime_probing_(0) { |
| 87 } |
| 84 | 88 |
| 85 void CpuFeatures::Probe() { | 89 void CpuFeatures::Probe() { |
| 90 AssemblerData* data = v8_context()->assembler_data_; |
| 86 ASSERT(Heap::HasBeenSetup()); | 91 ASSERT(Heap::HasBeenSetup()); |
| 87 ASSERT(supported_ == kDefaultCpuFeatures); | 92 ASSERT(data->supported_ == kDefaultCpuFeatures); |
| 88 if (Serializer::enabled()) { | 93 if (Serializer::enabled()) { |
| 89 supported_ |= OS::CpuFeaturesImpliedByPlatform(); | 94 data->supported_ |= OS::CpuFeaturesImpliedByPlatform(); |
| 90 return; // No features if we might serialize. | 95 return; // No features if we might serialize. |
| 91 } | 96 } |
| 92 | 97 |
| 93 Assembler assm(NULL, 0); | 98 Assembler assm(NULL, 0); |
| 94 Label cpuid, done; | 99 Label cpuid, done; |
| 95 #define __ assm. | 100 #define __ assm. |
| 96 // Save old rsp, since we are going to modify the stack. | 101 // Save old rsp, since we are going to modify the stack. |
| 97 __ push(rbp); | 102 __ push(rbp); |
| 98 __ pushfq(); | 103 __ pushfq(); |
| 99 __ push(rcx); | 104 __ push(rcx); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 114 | 119 |
| 115 // CPUID not supported. Clear the supported features in edx:eax. | 120 // CPUID not supported. Clear the supported features in edx:eax. |
| 116 __ xor_(rax, rax); | 121 __ xor_(rax, rax); |
| 117 __ jmp(&done); | 122 __ jmp(&done); |
| 118 | 123 |
| 119 // Invoke CPUID with 1 in eax to get feature information in | 124 // Invoke CPUID with 1 in eax to get feature information in |
| 120 // ecx:edx. Temporarily enable CPUID support because we know it's | 125 // ecx:edx. Temporarily enable CPUID support because we know it's |
| 121 // safe here. | 126 // safe here. |
| 122 __ bind(&cpuid); | 127 __ bind(&cpuid); |
| 123 __ movq(rax, Immediate(1)); | 128 __ movq(rax, Immediate(1)); |
| 124 supported_ = kDefaultCpuFeatures | (1 << CPUID); | 129 data->supported_ = kDefaultCpuFeatures | (1 << CPUID); |
| 125 { Scope fscope(CPUID); | 130 { Scope fscope(CPUID); |
| 126 __ cpuid(); | 131 __ cpuid(); |
| 127 // Move the result from ecx:edx to rdi. | 132 // Move the result from ecx:edx to rdi. |
| 128 __ movl(rdi, rdx); // Zero-extended to 64 bits. | 133 __ movl(rdi, rdx); // Zero-extended to 64 bits. |
| 129 __ shl(rcx, Immediate(32)); | 134 __ shl(rcx, Immediate(32)); |
| 130 __ or_(rdi, rcx); | 135 __ or_(rdi, rcx); |
| 131 | 136 |
| 132 // Get the sahf supported flag, from CPUID(0x80000001) | 137 // Get the sahf supported flag, from CPUID(0x80000001) |
| 133 __ movq(rax, 0x80000001, RelocInfo::NONE); | 138 __ movq(rax, 0x80000001, RelocInfo::NONE); |
| 134 __ cpuid(); | 139 __ cpuid(); |
| 135 } | 140 } |
| 136 supported_ = kDefaultCpuFeatures; | 141 data->supported_ = kDefaultCpuFeatures; |
| 137 | 142 |
| 138 // Put the CPU flags in rax. | 143 // Put the CPU flags in rax. |
| 139 // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID). | 144 // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID). |
| 140 __ movl(rax, Immediate(1)); | 145 __ movl(rax, Immediate(1)); |
| 141 __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported. | 146 __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported. |
| 142 __ not_(rax); | 147 __ not_(rax); |
| 143 __ and_(rax, rdi); | 148 __ and_(rax, rdi); |
| 144 __ or_(rax, rcx); | 149 __ or_(rax, rcx); |
| 145 __ or_(rax, Immediate(1 << CPUID)); | 150 __ or_(rax, Immediate(1 << CPUID)); |
| 146 | 151 |
| 147 // Done. | 152 // Done. |
| 148 __ bind(&done); | 153 __ bind(&done); |
| 149 __ movq(rsp, rbp); | 154 __ movq(rsp, rbp); |
| 150 __ pop(rbx); | 155 __ pop(rbx); |
| 151 __ pop(rcx); | 156 __ pop(rcx); |
| 152 __ popfq(); | 157 __ popfq(); |
| 153 __ pop(rbp); | 158 __ pop(rbp); |
| 154 __ ret(0); | 159 __ ret(0); |
| 155 #undef __ | 160 #undef __ |
| 156 | 161 |
| 157 CodeDesc desc; | 162 CodeDesc desc; |
| 158 assm.GetCode(&desc); | 163 assm.GetCode(&desc); |
| 159 Object* code = | 164 Object* code = |
| 160 Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL); | 165 Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL); |
| 161 if (!code->IsCode()) return; | 166 if (!code->IsCode()) return; |
| 162 LOG(CodeCreateEvent(Logger::BUILTIN_TAG, | 167 LOG(CodeCreateEvent(Logger::BUILTIN_TAG, |
| 163 Code::cast(code), "CpuFeatures::Probe")); | 168 Code::cast(code), "CpuFeatures::Probe")); |
| 164 typedef uint64_t (*F0)(); | 169 typedef uint64_t (*F0)(); |
| 165 F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry()); | 170 F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry()); |
| 166 supported_ = probe(); | 171 data->supported_ = probe(); |
| 167 found_by_runtime_probing_ = supported_; | 172 data->found_by_runtime_probing_ = data->supported_; |
| 168 found_by_runtime_probing_ &= ~kDefaultCpuFeatures; | 173 data->found_by_runtime_probing_ &= ~kDefaultCpuFeatures; |
| 169 uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform(); | 174 uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform(); |
| 170 supported_ |= os_guarantees; | 175 data->supported_ |= os_guarantees; |
| 171 found_by_runtime_probing_ &= ~os_guarantees; | 176 data->found_by_runtime_probing_ &= ~os_guarantees; |
| 172 // SSE2 and CMOV must be available on an X64 CPU. | 177 // SSE2 and CMOV must be available on an X64 CPU. |
| 173 ASSERT(IsSupported(CPUID)); | 178 ASSERT(IsSupported(CPUID)); |
| 174 ASSERT(IsSupported(SSE2)); | 179 ASSERT(IsSupported(SSE2)); |
| 175 ASSERT(IsSupported(CMOV)); | 180 ASSERT(IsSupported(CMOV)); |
| 176 } | 181 } |
| 177 | 182 |
| 178 | 183 |
| 179 // ----------------------------------------------------------------------------- | 184 // ----------------------------------------------------------------------------- |
| 180 // Implementation of RelocInfo | 185 // Implementation of RelocInfo |
| 181 | 186 |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 264 } | 269 } |
| 265 | 270 |
| 266 | 271 |
| 267 // ----------------------------------------------------------------------------- | 272 // ----------------------------------------------------------------------------- |
| 268 // Implementation of Assembler | 273 // Implementation of Assembler |
| 269 | 274 |
| 270 #ifdef GENERATED_CODE_COVERAGE | 275 #ifdef GENERATED_CODE_COVERAGE |
| 271 static void InitCoverageLog(); | 276 static void InitCoverageLog(); |
| 272 #endif | 277 #endif |
| 273 | 278 |
| 274 byte* Assembler::spare_buffer_ = NULL; | |
| 275 | |
| 276 Assembler::Assembler(void* buffer, int buffer_size) | 279 Assembler::Assembler(void* buffer, int buffer_size) |
| 277 : code_targets_(100) { | 280 : code_targets_(100) { |
| 278 if (buffer == NULL) { | 281 if (buffer == NULL) { |
| 279 // do our own buffer management | 282 // do our own buffer management |
| 280 if (buffer_size <= kMinimalBufferSize) { | 283 if (buffer_size <= kMinimalBufferSize) { |
| 281 buffer_size = kMinimalBufferSize; | 284 buffer_size = kMinimalBufferSize; |
| 282 | 285 AssemblerData* const data = v8_context()->assembler_data_; |
| 283 if (spare_buffer_ != NULL) { | 286 if (data->spare_buffer_ != NULL) { |
| 284 buffer = spare_buffer_; | 287 buffer = data->spare_buffer_; |
| 285 spare_buffer_ = NULL; | 288 data->spare_buffer_ = NULL; |
| 286 } | 289 } |
| 287 } | 290 } |
| 288 if (buffer == NULL) { | 291 if (buffer == NULL) { |
| 289 buffer_ = NewArray<byte>(buffer_size); | 292 buffer_ = NewArray<byte>(buffer_size); |
| 290 } else { | 293 } else { |
| 291 buffer_ = static_cast<byte*>(buffer); | 294 buffer_ = static_cast<byte*>(buffer); |
| 292 } | 295 } |
| 293 buffer_size_ = buffer_size; | 296 buffer_size_ = buffer_size; |
| 294 own_buffer_ = true; | 297 own_buffer_ = true; |
| 295 } else { | 298 } else { |
| (...skipping 24 matching lines...) Expand all Loading... |
| 320 written_statement_position_ = current_statement_position_; | 323 written_statement_position_ = current_statement_position_; |
| 321 written_position_ = current_position_; | 324 written_position_ = current_position_; |
| 322 #ifdef GENERATED_CODE_COVERAGE | 325 #ifdef GENERATED_CODE_COVERAGE |
| 323 InitCoverageLog(); | 326 InitCoverageLog(); |
| 324 #endif | 327 #endif |
| 325 } | 328 } |
| 326 | 329 |
| 327 | 330 |
| 328 Assembler::~Assembler() { | 331 Assembler::~Assembler() { |
| 329 if (own_buffer_) { | 332 if (own_buffer_) { |
| 330 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { | 333 AssemblerData* const data = v8_context()->assembler_data_; |
| 331 spare_buffer_ = buffer_; | 334 if (data->spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { |
| 335 data->spare_buffer_ = buffer_; |
| 332 } else { | 336 } else { |
| 333 DeleteArray(buffer_); | 337 DeleteArray(buffer_); |
| 334 } | 338 } |
| 335 } | 339 } |
| 336 } | 340 } |
| 337 | 341 |
| 338 | 342 |
| 339 void Assembler::GetCode(CodeDesc* desc) { | 343 void Assembler::GetCode(CodeDesc* desc) { |
| 340 // finalize code | 344 // finalize code |
| 341 // (at this point overflow() may be true, but the gap ensures that | 345 // (at this point overflow() may be true, but the gap ensures that |
| 342 // we are still not overlapping instructions and relocation info) | 346 // we are still not overlapping instructions and relocation info) |
| 343 ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap | 347 ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap |
| 344 // setup desc | 348 // setup desc |
| 345 desc->buffer = buffer_; | 349 desc->buffer = buffer_; |
| 346 desc->buffer_size = buffer_size_; | 350 desc->buffer_size = buffer_size_; |
| 347 desc->instr_size = pc_offset(); | 351 desc->instr_size = pc_offset(); |
| 348 ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system. | 352 ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system. |
| 349 desc->reloc_size = | 353 desc->reloc_size = |
| 350 static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos()); | 354 static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos()); |
| 351 desc->origin = this; | 355 desc->origin = this; |
| 352 | 356 |
| 353 Counters::reloc_info_size.Increment(desc->reloc_size); | 357 INCREMENT_COUNTER(reloc_info_size, desc->reloc_size); |
| 354 } | 358 } |
| 355 | 359 |
| 356 | 360 |
| 357 void Assembler::Align(int m) { | 361 void Assembler::Align(int m) { |
| 358 ASSERT(IsPowerOf2(m)); | 362 ASSERT(IsPowerOf2(m)); |
| 359 while ((pc_offset() & (m - 1)) != 0) { | 363 while ((pc_offset() & (m - 1)) != 0) { |
| 360 nop(); | 364 nop(); |
| 361 } | 365 } |
| 362 } | 366 } |
| 363 | 367 |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 421 | 425 |
| 422 // copy the data | 426 // copy the data |
| 423 intptr_t pc_delta = desc.buffer - buffer_; | 427 intptr_t pc_delta = desc.buffer - buffer_; |
| 424 intptr_t rc_delta = (desc.buffer + desc.buffer_size) - | 428 intptr_t rc_delta = (desc.buffer + desc.buffer_size) - |
| 425 (buffer_ + buffer_size_); | 429 (buffer_ + buffer_size_); |
| 426 memmove(desc.buffer, buffer_, desc.instr_size); | 430 memmove(desc.buffer, buffer_, desc.instr_size); |
| 427 memmove(rc_delta + reloc_info_writer.pos(), | 431 memmove(rc_delta + reloc_info_writer.pos(), |
| 428 reloc_info_writer.pos(), desc.reloc_size); | 432 reloc_info_writer.pos(), desc.reloc_size); |
| 429 | 433 |
| 430 // switch buffers | 434 // switch buffers |
| 431 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { | 435 AssemblerData* const data = v8_context()->assembler_data_; |
| 432 spare_buffer_ = buffer_; | 436 if (data->spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { |
| 437 data->spare_buffer_ = buffer_; |
| 433 } else { | 438 } else { |
| 434 DeleteArray(buffer_); | 439 DeleteArray(buffer_); |
| 435 } | 440 } |
| 436 buffer_ = desc.buffer; | 441 buffer_ = desc.buffer; |
| 437 buffer_size_ = desc.buffer_size; | 442 buffer_size_ = desc.buffer_size; |
| 438 pc_ += pc_delta; | 443 pc_ += pc_delta; |
| 439 if (last_pc_ != NULL) { | 444 if (last_pc_ != NULL) { |
| 440 last_pc_ += pc_delta; | 445 last_pc_ += pc_delta; |
| 441 } | 446 } |
| 442 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, | 447 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
| (...skipping 2086 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2529 RecordRelocInfo(RelocInfo::POSITION, current_position_); | 2534 RecordRelocInfo(RelocInfo::POSITION, current_position_); |
| 2530 written_position_ = current_position_; | 2535 written_position_ = current_position_; |
| 2531 } | 2536 } |
| 2532 } | 2537 } |
| 2533 | 2538 |
| 2534 | 2539 |
| 2535 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask | | 2540 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask | |
| 2536 1 << RelocInfo::INTERNAL_REFERENCE | | 2541 1 << RelocInfo::INTERNAL_REFERENCE | |
| 2537 1 << RelocInfo::JS_RETURN; | 2542 1 << RelocInfo::JS_RETURN; |
| 2538 | 2543 |
| 2544 void Assembler::PostConstruct() { |
| 2545 v8_context()->assembler_data_ = new AssemblerData(); |
| 2546 } |
| 2547 |
| 2548 void Assembler::PreDestroy() { |
| 2549 delete v8_context()->assembler_data_; |
| 2550 } |
| 2551 |
| 2539 } } // namespace v8::internal | 2552 } } // namespace v8::internal |
| OLD | NEW |