| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2010 Google Inc. | 2 * Copyright 2010 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 | 8 |
| 9 #include "GrGpu.h" | 9 #include "GrGpu.h" |
| 10 | 10 |
| (...skipping 28 matching lines...) Expand all Loading... |
| 39 fIndexBuffer.reset(di.indexBuffer()); | 39 fIndexBuffer.reset(di.indexBuffer()); |
| 40 | 40 |
| 41 return *this; | 41 return *this; |
| 42 } | 42 } |
| 43 | 43 |
| 44 //////////////////////////////////////////////////////////////////////////////// | 44 //////////////////////////////////////////////////////////////////////////////// |
| 45 | 45 |
| 46 GrGpu::GrGpu(GrContext* context) | 46 GrGpu::GrGpu(GrContext* context) |
| 47 : fResetTimestamp(kExpiredTimestamp+1) | 47 : fResetTimestamp(kExpiredTimestamp+1) |
| 48 , fResetBits(kAll_GrBackendState) | 48 , fResetBits(kAll_GrBackendState) |
| 49 , fMultisampleSpecsAllocator(1) |
| 49 , fContext(context) { | 50 , fContext(context) { |
| 50 fMultisampleSpecs.emplace_back(0, 0, nullptr); // Index 0 is an invalid uniq
ue id. | |
| 51 } | 51 } |
| 52 | 52 |
| 53 GrGpu::~GrGpu() {} | 53 GrGpu::~GrGpu() {} |
| 54 | 54 |
| 55 void GrGpu::disconnect(DisconnectType) {} | 55 void GrGpu::disconnect(DisconnectType) {} |
| 56 | 56 |
| 57 //////////////////////////////////////////////////////////////////////////////// | 57 //////////////////////////////////////////////////////////////////////////////// |
| 58 | 58 |
| 59 bool GrGpu::makeCopyForTextureParams(int width, int height, const GrTextureParam
s& textureParams, | 59 bool GrGpu::makeCopyForTextureParams(int width, int height, const GrTextureParam
s& textureParams, |
| 60 GrTextureProducer::CopyParams* copyParams)
const { | 60 GrTextureProducer::CopyParams* copyParams)
const { |
| (...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 418 if (GrRenderTarget* target = surface->asRenderTarget()) { | 418 if (GrRenderTarget* target = surface->asRenderTarget()) { |
| 419 target->flagAsNeedingResolve(bounds); | 419 target->flagAsNeedingResolve(bounds); |
| 420 } | 420 } |
| 421 GrTexture* texture = surface->asTexture(); | 421 GrTexture* texture = surface->asTexture(); |
| 422 if (texture && 1 == mipLevels) { | 422 if (texture && 1 == mipLevels) { |
| 423 texture->texturePriv().dirtyMipMaps(true); | 423 texture->texturePriv().dirtyMipMaps(true); |
| 424 } | 424 } |
| 425 } | 425 } |
| 426 } | 426 } |
| 427 | 427 |
| 428 inline static uint8_t multisample_specs_id(uint8_t numSamples, GrSurfaceOrigin o
rigin, |
| 429 const GrCaps& caps) { |
| 430 if (!caps.sampleLocationsSupport()) { |
| 431 return numSamples; |
| 432 } |
| 433 |
| 434 SkASSERT(numSamples < 128); |
| 435 SkASSERT(kTopLeft_GrSurfaceOrigin == origin || kBottomLeft_GrSurfaceOrigin =
= origin); |
| 436 return (numSamples << 1) | (origin - 1); |
| 437 |
| 438 GR_STATIC_ASSERT(1 == kTopLeft_GrSurfaceOrigin); |
| 439 GR_STATIC_ASSERT(2 == kBottomLeft_GrSurfaceOrigin); |
| 440 } |
| 441 |
| 428 const GrGpu::MultisampleSpecs& GrGpu::getMultisampleSpecs(GrRenderTarget* rt, | 442 const GrGpu::MultisampleSpecs& GrGpu::getMultisampleSpecs(GrRenderTarget* rt, |
| 429 const GrStencilSetting
s& stencil) { | 443 const GrStencilSetting
s& stencil) { |
| 430 SkASSERT(rt->desc().fSampleCnt > 1); | 444 const GrSurfaceDesc& desc = rt->desc(); |
| 431 | 445 uint8_t surfDescKey = multisample_specs_id(desc.fSampleCnt, desc.fOrigin, *t
his->caps()); |
| 432 #ifndef SK_DEBUG | 446 if (fMultisampleSpecsMap.count() > surfDescKey && fMultisampleSpecsMap[surfD
escKey]) { |
| 433 // In debug mode we query the multisample info every time to verify the cach
ing is correct. | 447 #if !defined(SK_DEBUG) |
| 434 if (uint8_t id = rt->renderTargetPriv().accessMultisampleSpecsID()) { | 448 // In debug mode we query the multisample info every time and verify the
caching is correct. |
| 435 SkASSERT(id > 0 && id < fMultisampleSpecs.count()); | 449 return *fMultisampleSpecsMap[surfDescKey]; |
| 436 return fMultisampleSpecs[id]; | 450 #endif |
| 437 } | 451 } |
| 438 #endif | |
| 439 | |
| 440 int effectiveSampleCnt; | 452 int effectiveSampleCnt; |
| 441 SkSTArray<16, SkPoint, true> pattern; | 453 SkAutoTDeleteArray<SkPoint> locations(nullptr); |
| 442 this->onGetMultisampleSpecs(rt, stencil, &effectiveSampleCnt, &pattern); | 454 this->onGetMultisampleSpecs(rt, stencil, &effectiveSampleCnt, &locations); |
| 443 SkASSERT(effectiveSampleCnt >= rt->desc().fSampleCnt); | 455 SkASSERT(effectiveSampleCnt && effectiveSampleCnt >= desc.fSampleCnt); |
| 444 | 456 uint8_t effectiveKey = multisample_specs_id(effectiveSampleCnt, desc.fOrigin
, *this->caps()); |
| 445 uint8_t id; | 457 if (fMultisampleSpecsMap.count() > effectiveKey && fMultisampleSpecsMap[effe
ctiveKey]) { |
| 446 if (this->caps()->sampleLocationsSupport()) { | 458 const MultisampleSpecs& specs = *fMultisampleSpecsMap[effectiveKey]; |
| 447 SkASSERT(pattern.count() == effectiveSampleCnt); | 459 SkASSERT(effectiveKey == specs.fUniqueID); |
| 448 const auto& emplaceResult = | 460 SkASSERT(effectiveSampleCnt == specs.fEffectiveSampleCnt); |
| 449 fMultisampleSpecsIdMap.emplace(pattern, SkTMin(fMultisampleSpecs.cou
nt(), 255)); | 461 SkASSERT(!this->caps()->sampleLocationsSupport() || |
| 450 id = emplaceResult.first->second; | 462 !memcmp(locations.get(), specs.fSampleLocations.get(), |
| 451 if (emplaceResult.second) { | 463 effectiveSampleCnt * sizeof(SkPoint))); |
| 452 // This means the emplace did not find the pattern in the map alread
y, and therefore an | 464 SkASSERT(surfDescKey <= effectiveKey); |
| 453 // actual insertion took place. (We don't expect to see many unique
sample patterns.) | 465 SkASSERT(!fMultisampleSpecsMap[surfDescKey] || fMultisampleSpecsMap[surf
DescKey] == &specs); |
| 454 const SkPoint* sampleLocations = emplaceResult.first->first.begin(); | 466 fMultisampleSpecsMap[surfDescKey] = &specs; |
| 455 SkASSERT(id == fMultisampleSpecs.count()); | 467 return specs; |
| 456 fMultisampleSpecs.emplace_back(id, effectiveSampleCnt, sampleLocatio
ns); | |
| 457 } | |
| 458 } else { | |
| 459 id = effectiveSampleCnt; | |
| 460 for (int i = fMultisampleSpecs.count(); i <= id; ++i) { | |
| 461 fMultisampleSpecs.emplace_back(i, i, nullptr); | |
| 462 } | |
| 463 } | 468 } |
| 464 SkASSERT(id > 0); | 469 const MultisampleSpecs& specs = *new (&fMultisampleSpecsAllocator) |
| 465 SkASSERT(!rt->renderTargetPriv().accessMultisampleSpecsID() || | 470 MultisampleSpecs{effectiveKey, effectiveSampleCnt, locations.release()}; |
| 466 rt->renderTargetPriv().accessMultisampleSpecsID() == id); | 471 if (fMultisampleSpecsMap.count() <= effectiveKey) { |
| 467 | 472 int n = 1 + effectiveKey - fMultisampleSpecsMap.count(); |
| 468 rt->renderTargetPriv().accessMultisampleSpecsID() = id; | 473 fMultisampleSpecsMap.push_back_n(n, (const MultisampleSpecs*) nullptr); |
| 469 return fMultisampleSpecs[id]; | 474 } |
| 475 fMultisampleSpecsMap[effectiveKey] = &specs; |
| 476 if (effectiveSampleCnt != desc.fSampleCnt) { |
| 477 SkASSERT(surfDescKey < effectiveKey); |
| 478 fMultisampleSpecsMap[surfDescKey] = &specs; |
| 479 } |
| 480 return specs; |
| 470 } | 481 } |
| 471 | 482 |
| 472 bool GrGpu::SamplePatternComparator::operator()(const SamplePattern& a, | |
| 473 const SamplePattern& b) const { | |
| 474 if (a.count() != b.count()) { | |
| 475 return a.count() < b.count(); | |
| 476 } | |
| 477 for (int i = 0; i < a.count(); ++i) { | |
| 478 // This doesn't have geometric meaning. We just need to define an orderi
ng for std::map. | |
| 479 if (a[i].x() != b[i].x()) { | |
| 480 return a[i].x() < b[i].x(); | |
| 481 } | |
| 482 if (a[i].y() != b[i].y()) { | |
| 483 return a[i].y() < b[i].y(); | |
| 484 } | |
| 485 } | |
| 486 return false; // Equal. | |
| 487 } | |
| OLD | NEW |