| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkNormalBevelSource.h" | 8 #include "SkNormalBevelSource.h" |
| 9 | 9 |
| 10 #include "SkNormalSource.h" | 10 #include "SkNormalSource.h" |
| (...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 238 } | 238 } |
| 239 | 239 |
| 240 SkNormalSource::BevelType fBevelType; | 240 SkNormalSource::BevelType fBevelType; |
| 241 SkScalar fBevelWidth; | 241 SkScalar fBevelWidth; |
| 242 SkScalar fBevelHeight; | 242 SkScalar fBevelHeight; |
| 243 }; | 243 }; |
| 244 | 244 |
| 245 sk_sp<GrFragmentProcessor> SkNormalBevelSourceImpl::asFragmentProcessor( | 245 sk_sp<GrFragmentProcessor> SkNormalBevelSourceImpl::asFragmentProcessor( |
| 246 const SkShader::AsFPArgs& args) const { | 246 const SkShader::AsFPArgs& args) const { |
| 247 | 247 |
| 248 // This assumes a uniform scale. Anisotropic scaling might not be handled gr
acefully. |
| 248 SkScalar maxScale = args.fViewMatrix->getMaxScale(); | 249 SkScalar maxScale = args.fViewMatrix->getMaxScale(); |
| 249 | 250 |
| 250 // Providing device-space width and height | 251 // Providing device-space width and height |
| 251 return sk_make_sp<NormalBevelFP>(fType, maxScale * fWidth, maxScale * fHeigh
t); | 252 return sk_make_sp<NormalBevelFP>(fType, maxScale * fWidth, maxScale * fHeigh
t); |
| 252 } | 253 } |
| 253 | 254 |
| 254 #endif // SK_SUPPORT_GPU | 255 #endif // SK_SUPPORT_GPU |
| 255 | 256 |
| 256 //////////////////////////////////////////////////////////////////////////// | 257 //////////////////////////////////////////////////////////////////////////// |
| 257 | 258 |
| 258 SkNormalBevelSourceImpl::Provider::Provider() {} | 259 SkNormalBevelSourceImpl::Provider::Provider() {} |
| 259 | 260 |
| 260 SkNormalBevelSourceImpl::Provider::~Provider() {} | 261 SkNormalBevelSourceImpl::Provider::~Provider() {} |
| 261 | 262 |
| 262 SkNormalSource::Provider* SkNormalBevelSourceImpl::asProvider(const SkShader::Co
ntextRec &rec, | 263 SkNormalSource::Provider* SkNormalBevelSourceImpl::asProvider(const SkShader::Co
ntextRec &rec, |
| 263 void *storage) const
{ | 264 void *storage) con
st { |
| 264 return new (storage) Provider(); | 265 return new (storage) Provider(); |
| 265 } | 266 } |
| 266 | 267 |
| 267 size_t SkNormalBevelSourceImpl::providerSize(const SkShader::ContextRec&) const
{ | 268 size_t SkNormalBevelSourceImpl::providerSize(const SkShader::ContextRec&) const
{ |
| 268 return sizeof(Provider); | 269 return sizeof(Provider); |
| 269 } | 270 } |
| 270 | 271 |
| 272 // TODO Implement feature for the CPU pipeline |
| 271 void SkNormalBevelSourceImpl::Provider::fillScanLine(int x, int y, SkPoint3 outp
ut[], | 273 void SkNormalBevelSourceImpl::Provider::fillScanLine(int x, int y, SkPoint3 outp
ut[], |
| 272 int count) const { | 274 int count) const { |
| 273 for (int i = 0; i < count; i++) { | 275 for (int i = 0; i < count; i++) { |
| 274 output[i] = {0.0f, 0.0f, 1.0f}; | 276 output[i] = {0.0f, 0.0f, 1.0f}; |
| 275 } | 277 } |
| 276 } | 278 } |
| 277 | 279 |
| 278 //////////////////////////////////////////////////////////////////////////////// | 280 //////////////////////////////////////////////////////////////////////////////// |
| 279 | 281 |
| 280 sk_sp<SkFlattenable> SkNormalBevelSourceImpl::CreateProc(SkReadBuffer& buf) { | 282 sk_sp<SkFlattenable> SkNormalBevelSourceImpl::CreateProc(SkReadBuffer& buf) { |
| 281 | 283 |
| 282 auto type = static_cast<SkNormalSource::BevelType>(buf.readInt()); | 284 auto type = static_cast<SkNormalSource::BevelType>(buf.readInt()); |
| 283 SkScalar width = buf.readScalar(); | 285 SkScalar width = buf.readScalar(); |
| 284 SkScalar height = buf.readScalar(); | 286 SkScalar height = buf.readScalar(); |
| 285 | 287 |
| 286 return sk_make_sp<SkNormalBevelSourceImpl>(type, width, height); | 288 return sk_make_sp<SkNormalBevelSourceImpl>(type, width, height); |
| 287 } | 289 } |
| 288 | 290 |
| 289 void SkNormalBevelSourceImpl::flatten(SkWriteBuffer& buf) const { | 291 void SkNormalBevelSourceImpl::flatten(SkWriteBuffer& buf) const { |
| 290 this->INHERITED::flatten(buf); | 292 this->INHERITED::flatten(buf); |
| 291 | 293 |
| 292 buf.writeInt(static_cast<int>(fType)); | 294 buf.writeInt(static_cast<int>(fType)); |
| 293 buf.writeScalar(fWidth); | 295 buf.writeScalar(fWidth); |
| 294 buf.writeScalar(fHeight); | 296 buf.writeScalar(fHeight); |
| 295 } | 297 } |
| 296 | 298 |
| 297 //////////////////////////////////////////////////////////////////////////// | 299 //////////////////////////////////////////////////////////////////////////// |
| 298 | 300 |
| 299 sk_sp<SkNormalSource> SkNormalSource::MakeBevel(BevelType type, SkScalar width,
SkScalar height) { | 301 sk_sp<SkNormalSource> SkNormalSource::MakeBevel(BevelType type, SkScalar width,
SkScalar height) { |
| 300 /* TODO make sure this checks are tolerant enough to account for loss of con
version when GPUs | 302 /* TODO make sure these checks are tolerant enough to account for loss of co
nversion when GPUs |
| 301 use 16-bit float types. We don't want to assume stuff is non-zero on the
GPU and be wrong.*/ | 303 use 16-bit float types. We don't want to assume stuff is non-zero on the
GPU and be wrong.*/ |
| 302 SkASSERT(width > 0.0f && !SkScalarNearlyZero(width)); | 304 SkASSERT(width > 0.0f && !SkScalarNearlyZero(width)); |
| 303 if (SkScalarNearlyZero(height)) { | 305 if (SkScalarNearlyZero(height)) { |
| 304 return SkNormalSource::MakeFlat(); | 306 return SkNormalSource::MakeFlat(); |
| 305 } | 307 } |
| 306 | 308 |
| 307 return sk_make_sp<SkNormalBevelSourceImpl>(type, width, height); | 309 return sk_make_sp<SkNormalBevelSourceImpl>(type, width, height); |
| 308 } | 310 } |
| OLD | NEW |