Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(33)

Unified Diff: ppapi/tests/test_video_decoder.cc

Issue 6961018: Pepper Video Decoder API tester plugin. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Simple lint fixes. Created 9 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« ppapi/tests/test_video_decoder.h ('K') | « ppapi/tests/test_video_decoder.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: ppapi/tests/test_video_decoder.cc
diff --git a/ppapi/tests/test_video_decoder.cc b/ppapi/tests/test_video_decoder.cc
index 4279894f1c1bfd65cd070bbe1eccf59ecdb70605..d0a0bbb91f2d1cd1d9afb10c2f6680fd93a1aca4 100644
--- a/ppapi/tests/test_video_decoder.cc
+++ b/ppapi/tests/test_video_decoder.cc
@@ -4,34 +4,585 @@
#include "ppapi/tests/test_video_decoder.h"
-#include "ppapi/c/dev/ppb_video_decoder_dev.h"
+#include <cstring>
+#include <fstream>
+#include <iostream>
+
+#include "ppapi/c/dev/pp_graphics_3d_dev.h"
+#include "ppapi/c/dev/ppb_buffer_dev.h"
#include "ppapi/c/dev/ppb_testing_dev.h"
-#include "ppapi/c/ppb_var.h"
+#include "ppapi/c/pp_errors.h"
+#include "ppapi/cpp/dev/context_3d_dev.h"
+#include "ppapi/cpp/dev/surface_3d_dev.h"
+#include "ppapi/cpp/dev/video_decoder_dev.h"
+#include "ppapi/lib/gl/include/GLES2/gl2.h"
#include "ppapi/tests/testing_instance.h"
REGISTER_TEST_CASE(VideoDecoder);
bool TestVideoDecoder::Init() {
- video_decoder_interface_ = reinterpret_cast<PPB_VideoDecoder_Dev const*>(
- pp::Module::Get()->GetBrowserInterface(PPB_VIDEODECODER_DEV_INTERFACE));
- var_interface_ = reinterpret_cast<PPB_Var const*>(
- pp::Module::Get()->GetBrowserInterface(PPB_VAR_INTERFACE));
- return video_decoder_interface_ && var_interface_ && InitTestingInterface();
+ return InitTestingInterface();
}
void TestVideoDecoder::RunTest() {
- instance_->LogTest("Create", TestCreate());
+ instance_->LogTest("Configurations", TestConfigurations());
+ instance_->LogTest("H264", TestH264());
}
void TestVideoDecoder::QuitMessageLoop() {
testing_interface_->QuitMessageLoop(instance_->pp_instance());
}
-std::string TestVideoDecoder::TestCreate() {
- PP_Resource decoder = video_decoder_interface_->Create(
- instance_->pp_instance(), NULL);
- if (decoder == 0) {
- return "Error creating the decoder";
- }
+std::string TestVideoDecoder::TestConfigurations() {
+ std::vector<uint32_t> empty_config;
+ std::vector<uint32_t> configs;
+ configs = pp::VideoDecoder::GetConfigs(instance_, empty_config);
PASS();
}
+
+std::string TestVideoDecoder::TestH264() {
+ PASS();
+}
+
+// Pull-based video source to read video data from a file.
+class TestVideoSource {
+ public:
+ TestVideoSource()
+ : file_length_(0),
+ offset_(0) {}
+
+ ~TestVideoSource() {}
+
+ bool Open(const std::string& url) {
+ // TODO(vmr): Use file_util::ReadFileToString or equivalent to read the file
+ // if one-shot reading is used.
+ std::ifstream* file =
+ new std::ifstream(url.c_str(),
+ std::ios::in | std::ios::binary | std::ios::ate);
+ if (!file->good()) {
+ delete file;
+ return false;
+ }
+ file->seekg(0, std::ios::end);
+ uint32_t length = file->tellg();
+ file->seekg(0, std::ios::beg);
+ mem_ = new uint8_t[length];
+ file->read(reinterpret_cast<char*>(mem_), length);
+ file_length_ = length;
+ file->close();
+ delete file;
+ return true;
+ }
+
+ // Reads next packet from the input stream.
+ // Returns number of read bytes on success, 0 on when there was no valid data
+ // to be read and -1 if user gave NULL or too small buffer.
+ // TODO(vmr): Modify to differentiate between errors and EOF.
+ int32_t Read(uint8_t* target_mem, uint32_t size) {
+ if (!target_mem)
+ return -1;
+ uint8_t* unit_begin = NULL;
+ uint8_t* unit_end = NULL;
+ uint8_t* ptr = mem_ + offset_;
+ while (offset_ + 4 < file_length_) {
+ if (ptr[0] == 0 && ptr[1] == 0 && ptr[2] == 0 && ptr[3] == 1) {
+ // start code found
+ if (!unit_begin) {
+ unit_begin = ptr;
+ } else {
+ // back-up 1 byte.
+ unit_end = ptr;
+ break;
+ }
+ }
+ ptr++;
+ offset_++;
+ }
+ if (unit_begin && offset_ + 4 == file_length_) {
+ // Last unit. Set the unit_end to point to the last byte.
+ unit_end = ptr + 4;
+ offset_ += 4;
+ } else if (!unit_begin || !unit_end) {
+ // No unit start codes found in buffer.
+ return 0;
+ }
+ if (static_cast<int32_t>(size) >= unit_end - unit_begin) {
+ memcpy(target_mem, unit_begin, unit_end - unit_begin);
+ return unit_end - unit_begin;
+ }
+ // Rewind to the beginning start code if there is one as it should be
+ // returned with next Read().
+ offset_ = unit_begin - mem_;
+ return -1;
+ }
+
+ private:
+ uint32_t file_length_;
+ uint32_t offset_;
+ uint8_t* mem_;
+};
+
+LocalVideoBitstreamSource::LocalVideoBitstreamSource(std::string filename)
+ : file_(filename),
+ video_source_(new TestVideoSource()),
+ video_source_open_(false) {
+}
+
+LocalVideoBitstreamSource::~LocalVideoBitstreamSource() {
+ delete video_source_;
+}
+
+bool LocalVideoBitstreamSource::GetBitstreamUnit(
+ void* target_mem,
+ uint32_t target_mem_size_in_bytes,
+ int32_t* unit_size_in_bytes) {
+ if (!video_source_open_) {
+ if (!video_source_->Open(file_)) {
+ return false;
+ }
+ video_source_open_ = true;
+ }
+ int32_t read_bytes = video_source_->Read(static_cast<uint8_t*>(target_mem),
+ target_mem_size_in_bytes);
+ if (read_bytes <= 0) {
+ return false;
+ }
+ *unit_size_in_bytes = read_bytes;
+ return true;
+}
+
+// Constants used by VideoDecoderClient.
+static const int32_t kBitstreamBufferCount = 3;
+static const int32_t kBitstreamBufferSize = 256 * 1024 * 1024;
+static const int32_t kDefaultWidth = 640;
+static const int32_t kDefaultHeight = 480;
+
+bool VideoDecoderClient::Initialize() {
+ // Default implementation just assumes everything is set up.
+ if (!InitializeVideoBitstreamInterface()) {
+ return false;
+ }
+ if (!display_->Initialize(kDefaultWidth, kDefaultHeight)) {
+ return false;
+ }
+ video_decoder_ = new pp::VideoDecoder(instance_, decoder_config_, this);
+ if (!video_decoder_) {
+ return false;
+ }
+ ChangeState(kInitialized);
+ return true;
+}
+
+bool VideoDecoderClient::Run() {
+ assert(state_ == kInitialized);
+ // Start the streaming by dispatching the first buffers one by one.
+ for (std::map<int32_t, PP_VideoBitstreamBuffer_Dev>::iterator it =
+ bitstream_buffers_.begin();
+ it == bitstream_buffers_.end();
+ it++) {
+ ReadAndDispatchBitstreamUnit((*it).first);
+ }
+ // Once streaming has been started, we're running.
+ ChangeState(kRunning);
+ return true;
+}
+
+bool VideoDecoderClient::Stop() {
+ assert(state_ == kRunning);
+ // Stop the playback.
+ ChangeState(kInitialized);
+ return true;
+}
+
+bool VideoDecoderClient::Flush() {
+ assert(state_ == kRunning);
+ // Issue the flush request.
+ video_decoder_->Flush(cb_factory_.NewCallback(
+ &VideoDecoderClient::OnUserFlushDone, state_));
+ ChangeState(kFlushing);
+ return true;
+}
+
+bool VideoDecoderClient::Teardown() {
+ assert(state_ == kInitialized);
+ // Teardown the resources.
+ ChangeState(kCreated);
+ return true;
+}
+
+void VideoDecoderClient::ProvidePictureBuffers(
+ uint32_t requested_num_of_buffers,
+ const std::vector<uint32_t>& buffer_properties) {
+ std::vector<PP_GLESBuffer_Dev> buffers;
+ for (uint32_t i = 0; i < requested_num_of_buffers; i++) {
+ PP_GLESBuffer_Dev gles_buffer;
+ if (!display_->ProvideGLESPictureBuffer(buffer_properties, &gles_buffer)) {
+ // TODO(vmr): Handle error properly.
+ return;
+ }
+ buffers.push_back(gles_buffer);
+ }
+ video_decoder_->AssignGLESBuffers(buffers.size(), buffers);
+}
+
+void VideoDecoderClient::DismissPictureBuffer(int32_t picture_buffer_id) {
+ if (!display_->DismissPictureBuffer(picture_buffer_id)) {
+ // TODO(vmr): Handle error properly.
+ return;
+ }
+}
+
+void VideoDecoderClient::PictureReady(const PP_Picture_Dev& picture) {
+ display_->DrawPicture(picture, cb_factory_.NewCallback(
+ &VideoDecoderClient::OnDrawPictureDone, picture.picture_buffer_id));
+}
+
+void VideoDecoderClient::NotifyEndOfStream() {
+ end_of_stream_ = true;
+ video_decoder_->Flush(cb_factory_.NewCallback(
+ &VideoDecoderClient::OnEOSFlushDone));
+}
+
+void VideoDecoderClient::NotifyError(PP_VideoDecodeError_Dev error) {
+ video_decoder_->Flush(cb_factory_.NewCallback(
+ &VideoDecoderClient::OnEOSFlushDone));
+}
+
+int32_t VideoDecoderClient::GetUniqueId() {
+ // Not exactly unique in the current form but close enough for use case.
+ return next_id_++;
+}
+
+void VideoDecoderClient::OnResourcesAcquired() {
+ // We're running normally.
+ ChangeState(kRunning);
+}
+
+void VideoDecoderClient::OnBitstreamBufferProcessed(
+ int32_t result,
+ int32_t bitstream_buffer_id) {
+ // Reuse each bitstream buffer that has been processed by reading data into it
+ // as long as there is more and pass that for decoding.
+ ReadAndDispatchBitstreamUnit(bitstream_buffer_id);
+}
+
+void VideoDecoderClient::OnDrawPictureDone(int32_t result,
+ int32_t picture_buffer_id) {
+ video_decoder_->ReusePictureBuffer(picture_buffer_id);
+}
+
+void VideoDecoderClient::OnUserFlushDone(int32_t result,
+ State target_state) {
+ assert(state_ == kFlushing);
+ // It was a Flush request, return to the running state.
+ ChangeState(target_state);
+}
+
+void VideoDecoderClient::OnEOSFlushDone(int32_t result) {
+ assert(end_of_stream_);
+ // It was end of stream flush.
+ video_decoder_->Abort(cb_factory_.NewCallback(
+ &VideoDecoderClient::OnAbortDone));
+}
+
+void VideoDecoderClient::OnAbortDone(int32_t result) {
+ // We're done.
+}
+
+bool VideoDecoderClient::InitializeVideoBitstreamInterface() {
+ buffer_if_ = static_cast<const struct PPB_Buffer_Dev*>(
+ pp::Module::Get()->GetBrowserInterface(PPB_BUFFER_DEV_INTERFACE));
+ if (!buffer_if_) {
+ return false;
+ }
+ // Allocate |kBitstreamBufferCount| bitstream buffers of
+ // |kBitstreamBufferSize| bytes.
+ for (int32_t i = 0; i < kBitstreamBufferCount; i++) {
+ PP_VideoBitstreamBuffer_Dev bitstream_buffer;
+ bitstream_buffer.data = buffer_if_->Create(instance_->pp_instance(),
+ kBitstreamBufferSize);
+ if (bitstream_buffer.data == 0) {
+ return false;
+ }
+ bitstream_buffer.size = 0;
+ bitstream_buffer.id = GetUniqueId();
+ bitstream_buffers_[bitstream_buffer.id] = bitstream_buffer;
+ }
+ return true;
+}
+
+bool VideoDecoderClient::ReadAndDispatchBitstreamUnit(
+ int32_t bitstream_buffer_id) {
+ // Get the target memory and read the bitstream unit into it.
+ if (bitstream_buffers_.find(bitstream_buffer_id) ==
+ bitstream_buffers_.end()) {
+ return false;
+ }
+ PP_VideoBitstreamBuffer_Dev bitstream_buffer =
+ bitstream_buffers_[bitstream_buffer_id];
+ void* target_mem = buffer_if_->Map(bitstream_buffer.data);
+ if (target_mem == NULL) {
+ return false;
+ }
+ uint32_t size_in_bytes = 0;
+ if (!buffer_if_->Describe(bitstream_buffer.data, &size_in_bytes)) {
+ return false;
+ }
+ bool success = video_source_->GetBitstreamUnit(target_mem, size_in_bytes,
+ &bitstream_buffer.size);
+ if (!success) {
+ return false;
+ }
+ // Dispatch the bitstream unit to the decoder.
+ success = video_decoder_->Decode(
+ bitstream_buffers_[bitstream_buffer_id],
+ cb_factory_.NewCallback(
+ &VideoDecoderClient::OnBitstreamBufferProcessed,
+ bitstream_buffer_id));
+ return success;
+}
+
+void VideoDecoderClient::ChangeState(State to_state) {
+ state_ = to_state;
+}
+
+int32_t VideoDecoderClient::next_id_ = 1;
+
+// Pass-through vertex shader.
+static const char kVertexShader[] =
+ "precision highp float; precision highp int;\n"
+ "varying vec2 interp_tc;\n"
+ "\n"
+ "attribute vec4 in_pos;\n"
+ "attribute vec2 in_tc;\n"
+ "\n"
+ "void main() {\n"
+ " interp_tc = in_tc;\n"
+ " gl_Position = in_pos;\n"
+ "}\n";
+
+// Color shader for EGLImage.
+static const char kFragmentShaderEgl[] =
+ "precision mediump float;\n"
+ "precision mediump int;\n"
+ "varying vec2 interp_tc;\n"
+ "\n"
+ "uniform sampler2D tex;\n"
+ "\n"
+ "void main() {\n"
+ " gl_FragColor = texture2D(tex, interp_tc);\n"
+ "}\n";
+
+// Buffer size for compile errors.
+static const unsigned int kShaderErrorSize = 4096;
+
+void GLES2Display::Graphics3DContextLost() {
+ assert(!"GLES2: Unexpectedly lost graphics context");
+}
+
+bool GLES2Display::Initialize(int32_t width, int32_t height) {
+ if (!InitGL(640, 480)) {
+ return false;
+ }
+ ProgramShaders();
+ return true;
+}
+
+bool GLES2Display::ProvideGLESPictureBuffer(
+ const std::vector<uint32_t>& buffer_properties,
+ PP_GLESBuffer_Dev* picture_buffer) {
+ GLuint texture;
+ // Generate texture and bind (effectively allocate) it.
+ gles2_if_->GenTextures(context_->pp_resource(), 1, &texture);
+ gles2_if_->BindTexture(context_->pp_resource(), GL_TEXTURE_2D, texture);
+ picture_buffer->context = 0; // TODO(vmr): Get proper context id.
+ picture_buffer->texture_id = texture;
+ picture_buffer->info.id = VideoDecoderClient::GetUniqueId();
+ picture_buffer->info.size.width = width_;
+ picture_buffer->info.size.height = height_;
+ // Store the values into the map for GLES buffers.
+ gles_buffers_[picture_buffer->info.id] = *picture_buffer;
+ assertNoGLError();
+ return true;
+}
+
+bool GLES2Display::DismissPictureBuffer(int32_t picture_buffer_id) {
+ gles2_if_->DeleteTextures(context_->pp_resource(), 1,
+ &gles_buffers_[picture_buffer_id].texture_id);
+ gles_buffers_.erase(picture_buffer_id);
+ return true;
+}
+
+bool GLES2Display::DrawPicture(const PP_Picture_Dev& picture,
+ pp::CompletionCallback completion_callback) {
+ // Decoder has finished decoding picture into the texture, we'll have to just
+ // draw the texture to the color buffer and swap the surfaces.
+ // Clear the color buffer.
+ gles2_if_->Clear(context_->pp_resource(), GL_COLOR_BUFFER_BIT |
+ GL_DEPTH_BUFFER_BIT);
+ // Load the texture into texture unit 0.
+ gles2_if_->ActiveTexture(context_->pp_resource(), GL_TEXTURE0);
+ gles2_if_->BindTexture(context_->pp_resource(), GL_TEXTURE_2D,
+ gles_buffers_[picture.picture_buffer_id].texture_id);
+ // Draw the texture.
+ gles2_if_->DrawArrays(context_->pp_resource(), GL_TRIANGLE_STRIP, 0, 4);
+ // Force the execution of pending commands.
+ // TODO(vmr): Do we have to do this? Can we rely command buffer to execute the
+ // commands without Finish call?
+ gles2_if_->Finish(context_->pp_resource());
+ assertNoGLError();
+
+ int32_t error = surface_->SwapBuffers(completion_callback);
+ if (error != PP_OK) {
+ return false;
+ }
+ assertNoGLError();
+ return true;
+}
+
+void GLES2Display::assertNoGLError() {
+ assert(!gles2_if_->GetError(context_->pp_resource()));
+}
+
+bool GLES2Display::InitGL(int width, int height) {
+ width_ = width;
+ height_ = height;
+ assert(width_ && height_);
+ gles2_if_ = static_cast<const struct PPB_OpenGLES2_Dev*>(
+ pp::Module::Get()->GetBrowserInterface(PPB_OPENGLES2_DEV_INTERFACE));
+ // Firstly, we need OpenGL ES context associated with the display our plugin
+ // is rendering to.
+ if (context_) delete(context_);
+ context_ = new pp::Context3D_Dev(*instance_, 0, pp::Context3D_Dev(), NULL);
+ assert(!context_->is_null());
+ // Then we need surface bound to our fresh context. We'll be actually drawing
+ // on this surface and swapping that surface to refresh the displayable data
+ // of the plugin.
+ int32_t surface_attributes[] = {
+ PP_GRAPHICS3DATTRIB_WIDTH, width_,
+ PP_GRAPHICS3DATTRIB_HEIGHT, height_,
+ PP_GRAPHICS3DATTRIB_NONE
+ };
+ if (surface_) delete(surface_);
+ surface_ = new pp::Surface3D_Dev(*instance_, 0, surface_attributes);
+ assert(!surface_->is_null());
+ int32_t bind_error = context_->BindSurfaces(*surface_, *surface_);
+ if (!bind_error) {
+ assert(bind_error);
+ }
+ assertNoGLError();
+
+ bool success = instance_->BindGraphics(*surface_);
+ if (!success) {
+ assert(success);
+ }
+ // Clear the color buffer with opaque white for starters.
+ gles2_if_->ClearColor(context_->pp_resource(), 1.0, 1.0, 1.0, 0.0);
+ gles2_if_->Clear(context_->pp_resource(), GL_COLOR_BUFFER_BIT);
+ // Set the viewport to match the whole GL window.
+ gles2_if_->Viewport(context_->pp_resource(), 0, 0, width_, height_);
+ assertNoGLError();
+ return true;
+}
+
+void GLES2Display::CreateShader(GLuint program, GLenum type,
+ const char* source,
+ int size) {
+ GLuint shader = gles2_if_->CreateShader(context_->pp_resource(), type);
+ gles2_if_->ShaderSource(
+ context_->pp_resource(), shader, 1, &source, &size);
+ gles2_if_->CompileShader(context_->pp_resource(), shader);
+
+ int result = GL_FALSE;
+ gles2_if_->GetShaderiv(
+ context_->pp_resource(), shader, GL_COMPILE_STATUS, &result);
+ if (!result) {
+ char log[kShaderErrorSize];
+ int len = 0;
+ gles2_if_->GetShaderInfoLog(context_->pp_resource(), shader,
+ kShaderErrorSize - 1, &len, log);
+ log[len] = 0;
+ assert(result);
+ }
+ gles2_if_->AttachShader(context_->pp_resource(), program, shader);
+ gles2_if_->DeleteShader(context_->pp_resource(), shader);
+}
+
+void GLES2Display::LinkProgram(const PPB_OpenGLES2_Dev* gles2_if_ ) {
+ gles2_if_->LinkProgram(context_->pp_resource(), program_);
+ int result = GL_FALSE;
+ gles2_if_->GetProgramiv(context_->pp_resource(), program_, GL_LINK_STATUS,
+ &result);
+ if (!result) {
+ char log[kShaderErrorSize];
+ int len = 0;
+ gles2_if_->GetProgramInfoLog(context_->pp_resource(), program_,
+ kShaderErrorSize - 1, &len, log);
+ log[len] = 0;
+ assert(result);
+ }
+ gles2_if_->UseProgram(context_->pp_resource(), program_);
+}
+
+void GLES2Display::ProgramShaders() {
+ // Vertices for a full screen quad.
+ static const float kVertices[] = {
+ -1.f, 1.f,
+ -1.f, -1.f,
+ 1.f, 1.f,
+ 1.f, -1.f,
+ };
+
+ // Texture Coordinates mapping the entire texture for EGL image.
+ static const float kTextureCoordsEgl[] = {
+ 0, 1,
+ 0, 0,
+ 1, 1,
+ 1, 0,
+ };
+ program_ = gles2_if_->CreateProgram(context_->pp_resource());
+
+ // Create shader for EGL image
+ CreateShader(program_, GL_VERTEX_SHADER,
+ kVertexShader, sizeof(kVertexShader));
+ CreateShader(program_, GL_FRAGMENT_SHADER,
+ kFragmentShaderEgl, sizeof(kFragmentShaderEgl));
+ LinkProgram(gles2_if_);
+
+ assertNoGLError();
+ // Bind parameters.
+ gles2_if_->Uniform1i(context_->pp_resource(), gles2_if_->
+ GetUniformLocation(context_->pp_resource(), program_,
+ "tex"), 0);
+ gles2_if_->GenBuffers(context_->pp_resource(), 1, &vertex_);
+ gles2_if_->BindBuffer(context_->pp_resource(), GL_ARRAY_BUFFER,
+ vertex_);
+ gles2_if_->BufferData(context_->pp_resource(), GL_ARRAY_BUFFER,
+ 8 * sizeof(kVertices[0]), kVertices, GL_STREAM_DRAW);
+
+ assertNoGLError();
+ int pos_location = gles2_if_->GetAttribLocation(context_->pp_resource(),
+ program_, "in_pos");
+ gles2_if_->EnableVertexAttribArray(context_->pp_resource(), pos_location);
+ gles2_if_->VertexAttribPointer(context_->pp_resource(), pos_location, 2,
+ GL_FLOAT, GL_FALSE, 0, 0);
+
+ assertNoGLError();
+ gles2_if_->GenBuffers(context_->pp_resource(), 1, &fragment_);
+ gles2_if_->BindBuffer(context_->pp_resource(), GL_ARRAY_BUFFER,
+ fragment_);
+ gles2_if_->BufferData(context_->pp_resource(), GL_ARRAY_BUFFER,
+ 8 * sizeof(kTextureCoordsEgl[0]),
+ kTextureCoordsEgl, GL_STREAM_DRAW);
+ assertNoGLError();
+ int tc_location = gles2_if_->GetAttribLocation(context_->pp_resource(),
+ program_, "in_tc");
+ gles2_if_->EnableVertexAttribArray(context_->pp_resource(), tc_location);
+ gles2_if_->VertexAttribPointer(context_->pp_resource(), tc_location, 2,
+ GL_FLOAT, GL_FALSE, 0, kTextureCoordsEgl);
+ gles2_if_->VertexAttribPointer(context_->pp_resource(), tc_location, 2,
+ GL_FLOAT, GL_FALSE, 0, 0);
+ gles2_if_->Enable(context_->pp_resource(), GL_DEPTH_TEST);
+ assertNoGLError();
+}
+
« ppapi/tests/test_video_decoder.h ('K') | « ppapi/tests/test_video_decoder.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698