OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkGpu.h" | 8 #include "GrVkGpu.h" |
9 | 9 |
10 #include "GrContextOptions.h" | 10 #include "GrContextOptions.h" |
(...skipping 16 matching lines...) Expand all Loading... |
27 #include "GrVkRenderPass.h" | 27 #include "GrVkRenderPass.h" |
28 #include "GrVkResourceProvider.h" | 28 #include "GrVkResourceProvider.h" |
29 #include "GrVkTexture.h" | 29 #include "GrVkTexture.h" |
30 #include "GrVkTextureRenderTarget.h" | 30 #include "GrVkTextureRenderTarget.h" |
31 #include "GrVkTransferBuffer.h" | 31 #include "GrVkTransferBuffer.h" |
32 #include "GrVkVertexBuffer.h" | 32 #include "GrVkVertexBuffer.h" |
33 | 33 |
34 #include "SkConfig8888.h" | 34 #include "SkConfig8888.h" |
35 | 35 |
36 #include "vk/GrVkInterface.h" | 36 #include "vk/GrVkInterface.h" |
| 37 #include "vk/GrVkTypes.h" |
37 | 38 |
38 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X) | 39 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X) |
39 #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X) | 40 #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X) |
40 #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X) | 41 #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X) |
41 | 42 |
42 //////////////////////////////////////////////////////////////////////////////// | 43 //////////////////////////////////////////////////////////////////////////////// |
43 // Stuff used to set up a GrVkGpu secrectly for now. | 44 // Stuff used to set up a GrVkGpu secrectly for now. |
44 | 45 |
45 // For now the VkGpuCreate is using the same signature as GL. This is mostly for
ease of | 46 // For now the VkGpuCreate is using the same signature as GL. This is mostly for
ease of |
46 // hiding this code from offical skia. In the end the VkGpuCreate will not take
a GrBackendContext | 47 // hiding this code from offical skia. In the end the VkGpuCreate will not take
a GrBackendContext |
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
188 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps)); | 189 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps)); |
189 | 190 |
190 } | 191 } |
191 | 192 |
192 GrVkGpu::~GrVkGpu() { | 193 GrVkGpu::~GrVkGpu() { |
193 shaderc_compiler_release(fCompiler); | 194 shaderc_compiler_release(fCompiler); |
194 fCurrentCmdBuffer->end(this); | 195 fCurrentCmdBuffer->end(this); |
195 fCurrentCmdBuffer->unref(this); | 196 fCurrentCmdBuffer->unref(this); |
196 | 197 |
197 // wait for all commands to finish | 198 // wait for all commands to finish |
198 VK_CALL(QueueWaitIdle(fQueue)); | 199 VkResult res = VK_CALL(QueueWaitIdle(fQueue)); |
| 200 SkASSERT(res == VK_SUCCESS); |
199 | 201 |
200 // must call this just before we destroy the VkDevice | 202 // must call this just before we destroy the VkDevice |
201 fResourceProvider.destroyResources(); | 203 fResourceProvider.destroyResources(); |
202 | 204 |
203 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr)); | 205 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr)); |
204 VK_CALL(DestroyDevice(fDevice, nullptr)); | 206 VK_CALL(DestroyDevice(fDevice, nullptr)); |
205 VK_CALL(DestroyInstance(fVkInstance, nullptr)); | 207 VK_CALL(DestroyInstance(fVkInstance, nullptr)); |
206 } | 208 } |
207 | 209 |
208 /////////////////////////////////////////////////////////////////////////////// | 210 /////////////////////////////////////////////////////////////////////////////// |
(...skipping 366 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
575 | 577 |
576 if (0 == desc.fTextureHandle) { | 578 if (0 == desc.fTextureHandle) { |
577 return nullptr; | 579 return nullptr; |
578 } | 580 } |
579 | 581 |
580 int maxSize = this->caps()->maxTextureSize(); | 582 int maxSize = this->caps()->maxTextureSize(); |
581 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { | 583 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { |
582 return nullptr; | 584 return nullptr; |
583 } | 585 } |
584 | 586 |
585 // TODO: determine what format Chrome will actually send us and turn it into
a Resource | 587 const GrVkTextureInfo* info = reinterpret_cast<const GrVkTextureInfo*>(desc.
fTextureHandle); |
586 GrVkImage::Resource* imageRsrc = reinterpret_cast<GrVkImage::Resource*>(desc
.fTextureHandle); | 588 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) { |
| 589 return nullptr; |
| 590 } |
587 | 591 |
588 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership) | 592 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership) |
589 ? GrGpuResource::kAdopted_LifeCycle | 593 ? GrGpuResource::kAdopted_LifeCycle |
590 : GrGpuResource::kBorrowed_LifeCycle; | 594 : GrGpuResource::kBorrowed_LifeCycle; |
591 | 595 |
592 GrSurfaceDesc surfDesc; | 596 GrSurfaceDesc surfDesc; |
593 // next line relies on GrBackendTextureDesc's flags matching GrTexture's | 597 // next line relies on GrBackendTextureDesc's flags matching GrTexture's |
594 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags; | 598 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags; |
595 surfDesc.fWidth = desc.fWidth; | 599 surfDesc.fWidth = desc.fWidth; |
596 surfDesc.fHeight = desc.fHeight; | 600 surfDesc.fHeight = desc.fHeight; |
597 surfDesc.fConfig = desc.fConfig; | 601 surfDesc.fConfig = desc.fConfig; |
598 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount()
); | 602 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount()
); |
599 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFla
g); | 603 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFla
g); |
600 // In GL, Chrome assumes all textures are BottomLeft | 604 // In GL, Chrome assumes all textures are BottomLeft |
601 // In VK, we don't have this restriction | 605 // In VK, we don't have this restriction |
602 surfDesc.fOrigin = resolve_origin(desc.fOrigin); | 606 surfDesc.fOrigin = resolve_origin(desc.fOrigin); |
603 | 607 |
604 GrVkTexture* texture = nullptr; | 608 GrVkTexture* texture = nullptr; |
605 if (renderTarget) { | 609 if (renderTarget) { |
606 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this
, surfDesc, | 610 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this
, surfDesc, |
607 life
Cycle, format, | 611 life
Cycle, format, |
608 imag
eRsrc); | 612 info
); |
609 } else { | 613 } else { |
610 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, f
ormat, imageRsrc); | 614 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, f
ormat, |
| 615 info); |
611 } | 616 } |
612 if (!texture) { | 617 if (!texture) { |
613 return nullptr; | 618 return nullptr; |
614 } | 619 } |
615 | 620 |
616 return texture; | 621 return texture; |
617 } | 622 } |
618 | 623 |
619 GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
sc& wrapDesc, | 624 GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
sc& wrapDesc, |
620 GrWrapOwnership ownership) { | 625 GrWrapOwnership ownership) { |
621 | 626 |
622 // TODO: determine what format Chrome will actually send us and turn it into
a Resource | 627 const GrVkTextureInfo* info = |
623 GrVkImage::Resource* imageRsrc = | 628 reinterpret_cast<const GrVkTextureInfo*>(wrapDesc.fRenderTargetHandle); |
624 reinterpret_cast<GrVkImage::Resource*>(wrapDesc.fRenderTargetHandle); | 629 if (VK_NULL_HANDLE == info->fImage || |
| 630 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership))
{ |
| 631 return nullptr; |
| 632 } |
625 | 633 |
626 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership) | 634 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership) |
627 ? GrGpuResource::kAdopted_LifeCycle | 635 ? GrGpuResource::kAdopted_LifeCycle |
628 : GrGpuResource::kBorrowed_LifeCycle; | 636 : GrGpuResource::kBorrowed_LifeCycle; |
629 | 637 |
630 GrSurfaceDesc desc; | 638 GrSurfaceDesc desc; |
631 desc.fConfig = wrapDesc.fConfig; | 639 desc.fConfig = wrapDesc.fConfig; |
632 desc.fFlags = kCheckAllocation_GrSurfaceFlag; | 640 desc.fFlags = kCheckAllocation_GrSurfaceFlag; |
633 desc.fWidth = wrapDesc.fWidth; | 641 desc.fWidth = wrapDesc.fWidth; |
634 desc.fHeight = wrapDesc.fHeight; | 642 desc.fHeight = wrapDesc.fHeight; |
635 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount()
); | 643 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount()
); |
636 | 644 |
637 desc.fOrigin = resolve_origin(wrapDesc.fOrigin); | 645 desc.fOrigin = resolve_origin(wrapDesc.fOrigin); |
638 | 646 |
639 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, de
sc, | 647 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, de
sc, |
640 lifeCycl
e, imageRsrc); | 648 lifeCycl
e, |
| 649 info); |
641 if (tgt && wrapDesc.fStencilBits) { | 650 if (tgt && wrapDesc.fStencilBits) { |
642 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeig
ht)) { | 651 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeig
ht)) { |
643 tgt->unref(); | 652 tgt->unref(); |
644 return nullptr; | 653 return nullptr; |
645 } | 654 } |
646 } | 655 } |
647 return tgt; | 656 return tgt; |
648 } | 657 } |
649 | 658 |
650 //////////////////////////////////////////////////////////////////////////////// | 659 //////////////////////////////////////////////////////////////////////////////// |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
729 return 0; | 738 return 0; |
730 } | 739 } |
731 | 740 |
732 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; | 741 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; |
733 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; | 742 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; |
734 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; | 743 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; |
735 | 744 |
736 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIB
LE_BIT : | 745 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIB
LE_BIT : |
737 VK_MEMORY_PROPERTY_DEVICE_LOC
AL_BIT; | 746 VK_MEMORY_PROPERTY_DEVICE_LOC
AL_BIT; |
738 | 747 |
739 // This ImageDesc refers to the texture that will be read by the client. Thu
s even if msaa is | 748 VkImage image = VK_NULL_HANDLE; |
740 // requested, this ImageDesc describes the resolved texutre. Therefore we al
ways have samples set | 749 VkDeviceMemory alloc = VK_NULL_HANDLE; |
741 // to 1. | |
742 GrVkImage::ImageDesc imageDesc; | |
743 imageDesc.fImageType = VK_IMAGE_TYPE_2D; | |
744 imageDesc.fFormat = pixelFormat; | |
745 imageDesc.fWidth = w; | |
746 imageDesc.fHeight = h; | |
747 imageDesc.fLevels = 1; | |
748 imageDesc.fSamples = 1; | |
749 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TI
LING_OPTIMAL; | |
750 imageDesc.fUsageFlags = usageFlags; | |
751 imageDesc.fMemProps = memProps; | |
752 | 750 |
753 const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(this, image
Desc); | 751 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE
_TILING_OPTIMAL; |
754 if (!imageRsrc) { | 752 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling) |
| 753 ? VK_IMAGE_LAYOUT_PREINITIALIZED |
| 754 : VK_IMAGE_LAYOUT_UNDEFINED; |
| 755 |
| 756 // Create Image |
| 757 VkSampleCountFlagBits vkSamples; |
| 758 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) { |
755 return 0; | 759 return 0; |
756 } | 760 } |
757 | 761 |
| 762 const VkImageCreateInfo imageCreateInfo = { |
| 763 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType |
| 764 NULL, // pNext |
| 765 0, // VkImageCreateFlags |
| 766 VK_IMAGE_TYPE_2D, // VkImageType |
| 767 pixelFormat, // VkFormat |
| 768 { w, h, 1 }, // VkExtent3D |
| 769 1, // mipLevels |
| 770 1, // arrayLayers |
| 771 vkSamples, // samples |
| 772 imageTiling, // VkImageTiling |
| 773 usageFlags, // VkImageUsageFlags |
| 774 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode |
| 775 0, // queueFamilyCount |
| 776 0, // pQueueFamilyIndices |
| 777 initialLayout // initialLayout |
| 778 }; |
| 779 |
| 780 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageC
reateInfo, nullptr, &image)); |
| 781 |
| 782 if (!GrVkMemory::AllocAndBindImageMemory(this, image, memProps, &alloc)) { |
| 783 VK_CALL(DestroyImage(this->device(), image, nullptr)); |
| 784 return 0; |
| 785 } |
| 786 |
758 if (srcData) { | 787 if (srcData) { |
759 if (linearTiling) { | 788 if (linearTiling) { |
760 const VkImageSubresource subres = { | 789 const VkImageSubresource subres = { |
761 VK_IMAGE_ASPECT_COLOR_BIT, | 790 VK_IMAGE_ASPECT_COLOR_BIT, |
762 0, // mipLevel | 791 0, // mipLevel |
763 0, // arraySlice | 792 0, // arraySlice |
764 }; | 793 }; |
765 VkSubresourceLayout layout; | 794 VkSubresourceLayout layout; |
766 VkResult err; | 795 VkResult err; |
767 | 796 |
768 const GrVkInterface* interface = this->vkInterface(); | 797 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout))
; |
769 | |
770 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, | |
771 imageRsrc->fImage, | |
772 &subres, | |
773 &layout)); | |
774 | 798 |
775 void* mapPtr; | 799 void* mapPtr; |
776 err = GR_VK_CALL(interface, MapMemory(fDevice, | 800 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &
mapPtr)); |
777 imageRsrc->fAlloc, | |
778 0, | |
779 layout.rowPitch * h, | |
780 0, | |
781 &mapPtr)); | |
782 if (err) { | 801 if (err) { |
783 imageRsrc->unref(this); | 802 VK_CALL(FreeMemory(this->device(), alloc, nullptr)); |
| 803 VK_CALL(DestroyImage(this->device(), image, nullptr)); |
784 return 0; | 804 return 0; |
785 } | 805 } |
786 | 806 |
787 size_t bpp = GrBytesPerPixel(config); | 807 size_t bpp = GrBytesPerPixel(config); |
788 size_t rowCopyBytes = bpp * w; | 808 size_t rowCopyBytes = bpp * w; |
789 // If there is no padding on dst (layout.rowPitch) we can do a singl
e memcopy. | 809 // If there is no padding on dst (layout.rowPitch) we can do a singl
e memcopy. |
790 // This assumes the srcData comes in with no padding. | 810 // This assumes the srcData comes in with no padding. |
791 if (rowCopyBytes == layout.rowPitch) { | 811 if (rowCopyBytes == layout.rowPitch) { |
792 memcpy(mapPtr, srcData, rowCopyBytes * h); | 812 memcpy(mapPtr, srcData, rowCopyBytes * h); |
793 } else { | 813 } else { |
794 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcDa
ta, w, rowCopyBytes, | 814 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcDa
ta, rowCopyBytes, |
795 h); | 815 rowCopyBytes, h); |
796 } | 816 } |
797 GR_VK_CALL(interface, UnmapMemory(fDevice, imageRsrc->fAlloc)); | 817 VK_CALL(UnmapMemory(fDevice, alloc)); |
798 } else { | 818 } else { |
799 // TODO: Add support for copying to optimal tiling | 819 // TODO: Add support for copying to optimal tiling |
800 SkASSERT(false); | 820 SkASSERT(false); |
801 } | 821 } |
802 } | 822 } |
803 | 823 |
804 return (GrBackendObject)imageRsrc; | 824 GrVkTextureInfo* info = new GrVkTextureInfo; |
| 825 info->fImage = image; |
| 826 info->fAlloc = alloc; |
| 827 info->fImageTiling = imageTiling; |
| 828 info->fImageLayout = initialLayout; |
| 829 |
| 830 return (GrBackendObject)info; |
805 } | 831 } |
806 | 832 |
807 bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const { | 833 bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const { |
808 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id); | 834 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id
); |
809 | 835 |
810 if (backend && backend->fImage && backend->fAlloc) { | 836 if (backend && backend->fImage && backend->fAlloc) { |
811 VkMemoryRequirements req; | 837 VkMemoryRequirements req; |
812 memset(&req, 0, sizeof(req)); | 838 memset(&req, 0, sizeof(req)); |
813 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice, | 839 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice, |
814 backend->fIma
ge, | 840 backend->fIma
ge, |
815 &req)); | 841 &req)); |
816 // TODO: find a better check | 842 // TODO: find a better check |
817 // This will probably fail with a different driver | 843 // This will probably fail with a different driver |
818 return (req.size > 0) && (req.size <= 8192 * 8192); | 844 return (req.size > 0) && (req.size <= 8192 * 8192); |
819 } | 845 } |
820 | 846 |
821 return false; | 847 return false; |
822 } | 848 } |
823 | 849 |
824 void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon)
{ | 850 void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon)
{ |
825 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id); | 851 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id
); |
826 | 852 |
827 if (backend) { | 853 if (backend) { |
828 if (!abandon) { | 854 if (!abandon) { |
829 backend->unref(this); | 855 // something in the command buffer may still be using this, so force
submit |
830 } else { | 856 this->submitCommandBuffer(kForce_SyncQueue); |
831 backend->unrefAndAbandon(); | 857 |
| 858 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr)); |
| 859 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr)); |
832 } | 860 } |
| 861 delete backend; |
833 } | 862 } |
834 } | 863 } |
835 | 864 |
836 //////////////////////////////////////////////////////////////////////////////// | 865 //////////////////////////////////////////////////////////////////////////////// |
837 | 866 |
838 void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask, | 867 void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask, |
839 VkPipelineStageFlags dstStageMask, | 868 VkPipelineStageFlags dstStageMask, |
840 bool byRegion, | 869 bool byRegion, |
841 VkMemoryBarrier* barrier) const { | 870 VkMemoryBarrier* barrier) const { |
842 SkASSERT(fCurrentCmdBuffer); | 871 SkASSERT(fCurrentCmdBuffer); |
(...skipping 621 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1464 int set_a_break_pt_here = 9; | 1493 int set_a_break_pt_here = 9; |
1465 aglSwapBuffers(aglGetCurrentContext()); | 1494 aglSwapBuffers(aglGetCurrentContext()); |
1466 #elif defined(SK_BUILD_FOR_WIN32) | 1495 #elif defined(SK_BUILD_FOR_WIN32) |
1467 SwapBuf(); | 1496 SwapBuf(); |
1468 int set_a_break_pt_here = 9; | 1497 int set_a_break_pt_here = 9; |
1469 SwapBuf(); | 1498 SwapBuf(); |
1470 #endif | 1499 #endif |
1471 #endif | 1500 #endif |
1472 } | 1501 } |
1473 | 1502 |
OLD | NEW |