Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/gpu/GrContext.cpp

Issue 22850006: Replace uses of GrAssert by SkASSERT. (Closed) Base URL: https://skia.googlecode.com/svn/trunk
Patch Set: rebase Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/gpu/GrClipMaskManager.cpp ('k') | src/gpu/GrDefaultPathRenderer.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2011 Google Inc. 3 * Copyright 2011 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrContext.h" 10 #include "GrContext.h"
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
55 55
56 static const size_t MAX_TEXTURE_CACHE_COUNT = 2048; 56 static const size_t MAX_TEXTURE_CACHE_COUNT = 2048;
57 static const size_t MAX_TEXTURE_CACHE_BYTES = GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT * 1024 * 1024; 57 static const size_t MAX_TEXTURE_CACHE_BYTES = GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT * 1024 * 1024;
58 58
59 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 59 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
60 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 60 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
61 61
62 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11; 62 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
63 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4; 63 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
64 64
65 #define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 65 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
66 66
67 // Glorified typedef to avoid including GrDrawState.h in GrContext.h 67 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
68 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {}; 68 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
69 69
70 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) { 70 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
71 GrContext* context = SkNEW(GrContext); 71 GrContext* context = SkNEW(GrContext);
72 if (context->init(backend, backendContext)) { 72 if (context->init(backend, backendContext)) {
73 return context; 73 return context;
74 } else { 74 } else {
75 context->unref(); 75 context->unref();
(...skipping 24 matching lines...) Expand all
100 fDrawBuffer = NULL; 100 fDrawBuffer = NULL;
101 fDrawBufferVBAllocPool = NULL; 101 fDrawBufferVBAllocPool = NULL;
102 fDrawBufferIBAllocPool = NULL; 102 fDrawBufferIBAllocPool = NULL;
103 fAARectRenderer = NULL; 103 fAARectRenderer = NULL;
104 fOvalRenderer = NULL; 104 fOvalRenderer = NULL;
105 fViewMatrix.reset(); 105 fViewMatrix.reset();
106 fMaxTextureSizeOverride = 1 << 20; 106 fMaxTextureSizeOverride = 1 << 20;
107 } 107 }
108 108
109 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { 109 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
110 GrAssert(NULL == fGpu); 110 SkASSERT(NULL == fGpu);
111 111
112 fGpu = GrGpu::Create(backend, backendContext, this); 112 fGpu = GrGpu::Create(backend, backendContext, this);
113 if (NULL == fGpu) { 113 if (NULL == fGpu) {
114 return false; 114 return false;
115 } 115 }
116 116
117 fDrawState = SkNEW(GrDrawState); 117 fDrawState = SkNEW(GrDrawState);
118 fGpu->setDrawState(fDrawState); 118 fGpu->setDrawState(fDrawState);
119 119
120 fTextureCache = SkNEW_ARGS(GrResourceCache, 120 fTextureCache = SkNEW_ARGS(GrResourceCache,
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after
360 rtDesc.fHeight = GrNextPow2(desc.fHeight); 360 rtDesc.fHeight = GrNextPow2(desc.fHeight);
361 int bpp = GrBytesPerPixel(desc.fConfig); 361 int bpp = GrBytesPerPixel(desc.fConfig);
362 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fH eight); 362 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fH eight);
363 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 363 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
364 srcData, desc.fWidth, desc.fHeight, bpp); 364 srcData, desc.fWidth, desc.fHeight, bpp);
365 365
366 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 366 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
367 367
368 SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedP ixels.get(), 368 SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedP ixels.get(),
369 stretchedRowBytes) ; 369 stretchedRowBytes) ;
370 GrAssert(NULL != texture); 370 SkASSERT(NULL != texture);
371 } 371 }
372 372
373 return texture; 373 return texture;
374 } 374 }
375 375
376 GrTexture* GrContext::createTexture(const GrTextureParams* params, 376 GrTexture* GrContext::createTexture(const GrTextureParams* params,
377 const GrTextureDesc& desc, 377 const GrTextureDesc& desc,
378 const GrCacheID& cacheID, 378 const GrCacheID& cacheID,
379 void* srcData, 379 void* srcData,
380 size_t rowBytes) { 380 size_t rowBytes) {
(...skipping 30 matching lines...) Expand all
411 // necessary space before adding it. 411 // necessary space before adding it.
412 textureCache->purgeAsNeeded(1, texture->sizeInBytes()); 412 textureCache->purgeAsNeeded(1, texture->sizeInBytes());
413 // Make the resource exclusive so future 'find' calls don't return it 413 // Make the resource exclusive so future 'find' calls don't return it
414 textureCache->addResource(key, texture, GrResourceCache::kHide_Ownership Flag); 414 textureCache->addResource(key, texture, GrResourceCache::kHide_Ownership Flag);
415 } 415 }
416 return texture; 416 return texture;
417 } 417 }
418 418
419 GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, Scra tchTexMatch match) { 419 GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, Scra tchTexMatch match) {
420 420
421 GrAssert((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) || 421 SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
422 !(inDesc.fFlags & kNoStencil_GrTextureFlagBit)); 422 !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
423 423
424 // Renderable A8 targets are not universally supported (e.g., not on ANGLE) 424 // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
425 GrAssert(this->isConfigRenderable(kAlpha_8_GrPixelConfig) || 425 SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig) ||
426 !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) || 426 !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
427 (inDesc.fConfig != kAlpha_8_GrPixelConfig)); 427 (inDesc.fConfig != kAlpha_8_GrPixelConfig));
428 428
429 if (!fGpu->caps()->reuseScratchTextures()) { 429 if (!fGpu->caps()->reuseScratchTextures()) {
430 // If we're never recycling scratch textures we can 430 // If we're never recycling scratch textures we can
431 // always make them the right size 431 // always make them the right size
432 return create_scratch_texture(fGpu, fTextureCache, inDesc); 432 return create_scratch_texture(fGpu, fTextureCache, inDesc);
433 } 433 }
434 434
435 GrTextureDesc desc = inDesc; 435 GrTextureDesc desc = inDesc;
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
480 } 480 }
481 481
482 void GrContext::addExistingTextureToCache(GrTexture* texture) { 482 void GrContext::addExistingTextureToCache(GrTexture* texture) {
483 483
484 if (NULL == texture) { 484 if (NULL == texture) {
485 return; 485 return;
486 } 486 }
487 487
488 // This texture should already have a cache entry since it was once 488 // This texture should already have a cache entry since it was once
489 // attached 489 // attached
490 GrAssert(NULL != texture->getCacheEntry()); 490 SkASSERT(NULL != texture->getCacheEntry());
491 491
492 // Conceptually, the cache entry is going to assume responsibility 492 // Conceptually, the cache entry is going to assume responsibility
493 // for the creation ref. 493 // for the creation ref.
494 GrAssert(texture->unique()); 494 SkASSERT(texture->unique());
495 495
496 // Since this texture came from an AutoScratchTexture it should 496 // Since this texture came from an AutoScratchTexture it should
497 // still be in the exclusive pile 497 // still be in the exclusive pile
498 fTextureCache->makeNonExclusive(texture->getCacheEntry()); 498 fTextureCache->makeNonExclusive(texture->getCacheEntry());
499 499
500 if (fGpu->caps()->reuseScratchTextures()) { 500 if (fGpu->caps()->reuseScratchTextures()) {
501 this->purgeCache(); 501 this->purgeCache();
502 } else { 502 } else {
503 // When we aren't reusing textures we know this scratch texture 503 // When we aren't reusing textures we know this scratch texture
504 // will never be reused and would be just wasting time in the cache 504 // will never be reused and would be just wasting time in the cache
505 fTextureCache->deleteResource(texture->getCacheEntry()); 505 fTextureCache->deleteResource(texture->getCacheEntry());
506 } 506 }
507 } 507 }
508 508
509 509
510 void GrContext::unlockScratchTexture(GrTexture* texture) { 510 void GrContext::unlockScratchTexture(GrTexture* texture) {
511 ASSERT_OWNED_RESOURCE(texture); 511 ASSERT_OWNED_RESOURCE(texture);
512 GrAssert(NULL != texture->getCacheEntry()); 512 SkASSERT(NULL != texture->getCacheEntry());
513 513
514 // If this is a scratch texture we detached it from the cache 514 // If this is a scratch texture we detached it from the cache
515 // while it was locked (to avoid two callers simultaneously getting 515 // while it was locked (to avoid two callers simultaneously getting
516 // the same texture). 516 // the same texture).
517 if (texture->getCacheEntry()->key().isScratch()) { 517 if (texture->getCacheEntry()->key().isScratch()) {
518 fTextureCache->makeNonExclusive(texture->getCacheEntry()); 518 fTextureCache->makeNonExclusive(texture->getCacheEntry());
519 this->purgeCache(); 519 this->purgeCache();
520 } 520 }
521 } 521 }
522 522
523 void GrContext::purgeCache() { 523 void GrContext::purgeCache() {
524 if (NULL != fTextureCache) { 524 if (NULL != fTextureCache) {
525 fTextureCache->purgeAsNeeded(); 525 fTextureCache->purgeAsNeeded();
526 } 526 }
527 } 527 }
528 528
529 bool GrContext::OverbudgetCB(void* data) { 529 bool GrContext::OverbudgetCB(void* data) {
530 GrAssert(NULL != data); 530 SkASSERT(NULL != data);
531 531
532 GrContext* context = reinterpret_cast<GrContext*>(data); 532 GrContext* context = reinterpret_cast<GrContext*>(data);
533 533
534 // Flush the InOrderDrawBuffer to possibly free up some textures 534 // Flush the InOrderDrawBuffer to possibly free up some textures
535 context->flush(); 535 context->flush();
536 536
537 // TODO: actually track flush's behavior rather than always just 537 // TODO: actually track flush's behavior rather than always just
538 // returning true. 538 // returning true.
539 return true; 539 return true;
540 } 540 }
(...skipping 821 matching lines...) Expand 10 before | Expand all | Expand 10 after
1362 GrConfigConversionEffect::kN one_PMConversion, 1362 GrConfigConversionEffect::kN one_PMConversion,
1363 textureMatrix)); 1363 textureMatrix));
1364 } 1364 }
1365 swapRAndB = false; // we will handle the swap in the draw. 1365 swapRAndB = false; // we will handle the swap in the draw.
1366 1366
1367 // We protect the existing geometry here since it may not be 1367 // We protect the existing geometry here since it may not be
1368 // clear to the caller that a draw operation (i.e., drawSimpleRe ct) 1368 // clear to the caller that a draw operation (i.e., drawSimpleRe ct)
1369 // can be invoked in this method 1369 // can be invoked in this method
1370 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget: :kReset_ASRInit); 1370 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget: :kReset_ASRInit);
1371 GrDrawState* drawState = fGpu->drawState(); 1371 GrDrawState* drawState = fGpu->drawState();
1372 GrAssert(effect); 1372 SkASSERT(effect);
1373 drawState->addColorEffect(effect); 1373 drawState->addColorEffect(effect);
1374 1374
1375 drawState->setRenderTarget(texture->asRenderTarget()); 1375 drawState->setRenderTarget(texture->asRenderTarget());
1376 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar (height)); 1376 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar (height));
1377 fGpu->drawSimpleRect(rect, NULL); 1377 fGpu->drawSimpleRect(rect, NULL);
1378 // we want to read back from the scratch's origin 1378 // we want to read back from the scratch's origin
1379 left = 0; 1379 left = 0;
1380 top = 0; 1380 top = 0;
1381 target = texture->asRenderTarget(); 1381 target = texture->asRenderTarget();
1382 } 1382 }
1383 } 1383 }
1384 } 1384 }
1385 if (!fGpu->readPixels(target, 1385 if (!fGpu->readPixels(target,
1386 left, top, width, height, 1386 left, top, width, height,
1387 readConfig, buffer, rowBytes)) { 1387 readConfig, buffer, rowBytes)) {
1388 return false; 1388 return false;
1389 } 1389 }
1390 // Perform any conversions we weren't able to perform using a scratch textur e. 1390 // Perform any conversions we weren't able to perform using a scratch textur e.
1391 if (unpremul || swapRAndB) { 1391 if (unpremul || swapRAndB) {
1392 // These are initialized to suppress a warning 1392 // These are initialized to suppress a warning
1393 SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888; 1393 SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888;
1394 SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888; 1394 SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888;
1395 1395
1396 SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false , &srcC8888); 1396 SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false , &srcC8888);
1397 grconfig_to_config8888(dstConfig, unpremul, &dstC8888); 1397 grconfig_to_config8888(dstConfig, unpremul, &dstC8888);
1398 1398
1399 if (swapRAndB) { 1399 if (swapRAndB) {
1400 GrAssert(c8888IsValid); // we should only do r/b swap on 8888 config s 1400 SkASSERT(c8888IsValid); // we should only do r/b swap on 8888 config s
1401 srcC8888 = swap_config8888_red_and_blue(srcC8888); 1401 srcC8888 = swap_config8888_red_and_blue(srcC8888);
1402 } 1402 }
1403 GrAssert(c8888IsValid); 1403 SkASSERT(c8888IsValid);
1404 uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer); 1404 uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer);
1405 SkConvertConfig8888Pixels(b32, rowBytes, dstC8888, 1405 SkConvertConfig8888Pixels(b32, rowBytes, dstC8888,
1406 b32, rowBytes, srcC8888, 1406 b32, rowBytes, srcC8888,
1407 width, height); 1407 width, height);
1408 } 1408 }
1409 return true; 1409 return true;
1410 } 1410 }
1411 1411
1412 void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1412 void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1413 GrAssert(target); 1413 SkASSERT(target);
1414 ASSERT_OWNED_RESOURCE(target); 1414 ASSERT_OWNED_RESOURCE(target);
1415 // In the future we may track whether there are any pending draws to this 1415 // In the future we may track whether there are any pending draws to this
1416 // target. We don't today so we always perform a flush. We don't promise 1416 // target. We don't today so we always perform a flush. We don't promise
1417 // this to our clients, though. 1417 // this to our clients, though.
1418 this->flush(); 1418 this->flush();
1419 fGpu->resolveRenderTarget(target); 1419 fGpu->resolveRenderTarget(target);
1420 } 1420 }
1421 1421
1422 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) { 1422 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
1423 if (NULL == src || NULL == dst) { 1423 if (NULL == src || NULL == dst) {
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
1521 if (kUnpremul_PixelOpsFlag & flags) { 1521 if (kUnpremul_PixelOpsFlag & flags) {
1522 if (!GrPixelConfigIs8888(srcConfig)) { 1522 if (!GrPixelConfigIs8888(srcConfig)) {
1523 return false; 1523 return false;
1524 } 1524 }
1525 effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix )); 1525 effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix ));
1526 // handle the unpremul step on the CPU if we couldn't create an effect t o do it. 1526 // handle the unpremul step on the CPU if we couldn't create an effect t o do it.
1527 if (NULL == effect) { 1527 if (NULL == effect) {
1528 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1528 SkCanvas::Config8888 srcConfig8888, dstConfig8888;
1529 GR_DEBUGCODE(bool success = ) 1529 GR_DEBUGCODE(bool success = )
1530 grconfig_to_config8888(srcConfig, true, &srcConfig8888); 1530 grconfig_to_config8888(srcConfig, true, &srcConfig8888);
1531 GrAssert(success); 1531 SkASSERT(success);
1532 GR_DEBUGCODE(success = ) 1532 GR_DEBUGCODE(success = )
1533 grconfig_to_config8888(srcConfig, false, &dstConfig8888); 1533 grconfig_to_config8888(srcConfig, false, &dstConfig8888);
1534 GrAssert(success); 1534 SkASSERT(success);
1535 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1535 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
1536 tmpPixels.reset(width * height); 1536 tmpPixels.reset(width * height);
1537 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1537 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
1538 src, rowBytes, srcConfig8888, 1538 src, rowBytes, srcConfig8888,
1539 width, height); 1539 width, height);
1540 buffer = tmpPixels.get(); 1540 buffer = tmpPixels.get();
1541 rowBytes = 4 * width; 1541 rowBytes = 4 * width;
1542 } 1542 }
1543 } 1543 }
1544 if (NULL == effect) { 1544 if (NULL == effect) {
(...skipping 10 matching lines...) Expand all
1555 return false; 1555 return false;
1556 } 1556 }
1557 1557
1558 // writeRenderTargetPixels can be called in the midst of drawing another 1558 // writeRenderTargetPixels can be called in the midst of drawing another
1559 // object (e.g., when uploading a SW path rendering to the gpu while 1559 // object (e.g., when uploading a SW path rendering to the gpu while
1560 // drawing a rect) so preserve the current geometry. 1560 // drawing a rect) so preserve the current geometry.
1561 SkMatrix matrix; 1561 SkMatrix matrix;
1562 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 1562 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1563 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRI nit, &matrix); 1563 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRI nit, &matrix);
1564 GrDrawState* drawState = fGpu->drawState(); 1564 GrDrawState* drawState = fGpu->drawState();
1565 GrAssert(effect); 1565 SkASSERT(effect);
1566 drawState->addColorEffect(effect); 1566 drawState->addColorEffect(effect);
1567 1567
1568 drawState->setRenderTarget(target); 1568 drawState->setRenderTarget(target);
1569 1569
1570 fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(heig ht)), NULL); 1570 fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(heig ht)), NULL);
1571 return true; 1571 return true;
1572 } 1572 }
1573 //////////////////////////////////////////////////////////////////////////////// 1573 ////////////////////////////////////////////////////////////////////////////////
1574 1574
1575 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint, 1575 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1576 BufferedDraw buffered, 1576 BufferedDraw buffered,
1577 AutoRestoreEffects* are) { 1577 AutoRestoreEffects* are) {
1578 // All users of this draw state should be freeing up all effects when they'r e done. 1578 // All users of this draw state should be freeing up all effects when they'r e done.
1579 // Otherwise effects that own resources may keep those resources alive indef initely. 1579 // Otherwise effects that own resources may keep those resources alive indef initely.
1580 GrAssert(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageSt ages()); 1580 SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageSt ages());
1581 1581
1582 if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffere d) { 1582 if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffere d) {
1583 fDrawBuffer->flush(); 1583 fDrawBuffer->flush();
1584 fLastDrawWasBuffered = kNo_BufferedDraw; 1584 fLastDrawWasBuffered = kNo_BufferedDraw;
1585 } 1585 }
1586 ASSERT_OWNED_RESOURCE(fRenderTarget.get()); 1586 ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1587 if (NULL != paint) { 1587 if (NULL != paint) {
1588 GrAssert(NULL != are); 1588 SkASSERT(NULL != are);
1589 are->set(fDrawState); 1589 are->set(fDrawState);
1590 fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get()); 1590 fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1591 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK 1591 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1592 if ((paint->hasMask() || 0xff != paint->fCoverage) && 1592 if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1593 !fGpu->canApplyCoverage()) { 1593 !fGpu->canApplyCoverage()) {
1594 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1594 GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1595 } 1595 }
1596 #endif 1596 #endif
1597 } else { 1597 } else {
1598 fDrawState->reset(fViewMatrix); 1598 fDrawState->reset(fViewMatrix);
1599 fDrawState->setRenderTarget(fRenderTarget.get()); 1599 fDrawState->setRenderTarget(fRenderTarget.get());
1600 } 1600 }
1601 GrDrawTarget* target; 1601 GrDrawTarget* target;
1602 if (kYes_BufferedDraw == buffered) { 1602 if (kYes_BufferedDraw == buffered) {
1603 fLastDrawWasBuffered = kYes_BufferedDraw; 1603 fLastDrawWasBuffered = kYes_BufferedDraw;
1604 target = fDrawBuffer; 1604 target = fDrawBuffer;
1605 } else { 1605 } else {
1606 GrAssert(kNo_BufferedDraw == buffered); 1606 SkASSERT(kNo_BufferedDraw == buffered);
1607 fLastDrawWasBuffered = kNo_BufferedDraw; 1607 fLastDrawWasBuffered = kNo_BufferedDraw;
1608 target = fGpu; 1608 target = fGpu;
1609 } 1609 }
1610 fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip && 1610 fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
1611 !fClip->fClipStack->isWideO pen()); 1611 !fClip->fClipStack->isWideO pen());
1612 target->setClip(fClip); 1612 target->setClip(fClip);
1613 GrAssert(fDrawState == target->drawState()); 1613 SkASSERT(fDrawState == target->drawState());
1614 return target; 1614 return target;
1615 } 1615 }
1616 1616
1617 /* 1617 /*
1618 * This method finds a path renderer that can draw the specified path on 1618 * This method finds a path renderer that can draw the specified path on
1619 * the provided target. 1619 * the provided target.
1620 * Due to its expense, the software path renderer has split out so it can 1620 * Due to its expense, the software path renderer has split out so it can
1621 * can be individually allowed/disallowed via the "allowSW" boolean. 1621 * can be individually allowed/disallowed via the "allowSW" boolean.
1622 */ 1622 */
1623 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, 1623 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1657 intptr_t mask = 1 << shift; 1657 intptr_t mask = 1 << shift;
1658 if (pred) { 1658 if (pred) {
1659 bits |= mask; 1659 bits |= mask;
1660 } else { 1660 } else {
1661 bits &= ~mask; 1661 bits &= ~mask;
1662 } 1662 }
1663 return bits; 1663 return bits;
1664 } 1664 }
1665 1665
1666 void GrContext::setupDrawBuffer() { 1666 void GrContext::setupDrawBuffer() {
1667 1667 SkASSERT(NULL == fDrawBuffer);
1668 GrAssert(NULL == fDrawBuffer); 1668 SkASSERT(NULL == fDrawBufferVBAllocPool);
1669 GrAssert(NULL == fDrawBufferVBAllocPool); 1669 SkASSERT(NULL == fDrawBufferIBAllocPool);
1670 GrAssert(NULL == fDrawBufferIBAllocPool);
1671 1670
1672 fDrawBufferVBAllocPool = 1671 fDrawBufferVBAllocPool =
1673 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false, 1672 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1674 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1673 DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1675 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); 1674 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1676 fDrawBufferIBAllocPool = 1675 fDrawBufferIBAllocPool =
1677 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false, 1676 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1678 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1677 DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1679 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); 1678 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1680 1679
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1734 return NULL; 1733 return NULL;
1735 } 1734 }
1736 } 1735 }
1737 1736
1738 /////////////////////////////////////////////////////////////////////////////// 1737 ///////////////////////////////////////////////////////////////////////////////
1739 #if GR_CACHE_STATS 1738 #if GR_CACHE_STATS
1740 void GrContext::printCacheStats() const { 1739 void GrContext::printCacheStats() const {
1741 fTextureCache->printStats(); 1740 fTextureCache->printStats();
1742 } 1741 }
1743 #endif 1742 #endif
OLDNEW
« no previous file with comments | « src/gpu/GrClipMaskManager.cpp ('k') | src/gpu/GrDefaultPathRenderer.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698