Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(220)

Unified Diff: ppapi/examples/video_decoder/video_decoder_session.cc

Issue 6961018: Pepper Video Decoder API tester plugin. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Rebase & compilation fixes related to it. Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « ppapi/examples/video_decoder/video_decoder_session.h ('k') | ppapi/ppapi_tests.gypi » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: ppapi/examples/video_decoder/video_decoder_session.cc
diff --git a/ppapi/examples/video_decoder/video_decoder_session.cc b/ppapi/examples/video_decoder/video_decoder_session.cc
new file mode 100644
index 0000000000000000000000000000000000000000..4fc4278a86f6f095240b81b545f2521725c70ef1
--- /dev/null
+++ b/ppapi/examples/video_decoder/video_decoder_session.cc
@@ -0,0 +1,629 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ppapi/examples/video_decoder/video_decoder_session.h"
+
+#include <cstring>
+#include <fstream>
+#include <iostream>
+
+#include "ppapi/c/dev/pp_graphics_3d_dev.h"
+#include "ppapi/c/dev/ppb_buffer_dev.h"
+#include "ppapi/c/pp_errors.h"
+#include "ppapi/cpp/dev/context_3d_dev.h"
+#include "ppapi/cpp/dev/surface_3d_dev.h"
+#include "ppapi/cpp/dev/video_decoder_dev.h"
+#include "ppapi/lib/gl/include/GLES2/gl2.h"
+
+// Pull-based video source to read video data from a file.
+class TestVideoSource {
+ public:
+ TestVideoSource()
+ : file_length_(0),
+ offset_(0) {}
+
+ ~TestVideoSource() {
+ delete mem_;
+ }
+
+ bool Open(const std::string& url) {
+ // TODO(vmr): Use file_util::ReadFileToString or equivalent to read the file
+ // if one-shot reading is used.
+ std::ifstream* file =
+ new std::ifstream(url.c_str(),
+ std::ios::in | std::ios::binary | std::ios::ate);
+ if (!file->good()) {
+ delete file;
+ return false;
+ }
+ file->seekg(0, std::ios::end);
+ uint32_t length = file->tellg();
+ file->seekg(0, std::ios::beg);
+ mem_ = new uint8_t[length];
+ file->read(reinterpret_cast<char*>(mem_), length);
+ file_length_ = length;
+ file->close();
+ delete file;
+ return true;
+ }
+
+ // Reads next packet from the input stream.
+ // Returns number of read bytes on success, 0 on when there was no valid data
+ // to be read and -1 if user gave NULL or too small buffer.
+ // TODO(vmr): Modify to differentiate between errors and EOF.
+ int32_t Read(uint8_t* target_mem, uint32_t size) {
+ if (!target_mem)
+ return -1;
+ uint8_t* unit_begin = NULL;
+ uint8_t* unit_end = NULL;
+ uint8_t* ptr = mem_ + offset_;
+ // Parses H.264 access units from the file. Access units are delimited by
+ // four-byte start code (0x0001) as specified by ISO 14496-10 Annex B.
+ // Outputted data will look like:
+ // Unit #1 0001 <data>
+ // Unit #2 0001 <data>
+ // ...
+ // Unit #3 0001 <data>
+ while (offset_ + 4 < file_length_) {
+ if (ptr[0] == 0 && ptr[1] == 0 && ptr[2] == 0 && ptr[3] == 1) {
+ // Start code found, store the location.
+ if (!unit_begin) {
+ unit_begin = ptr;
+ } else {
+ // Back-up 1 byte as we have read already one byte of next unit.
+ unit_end = ptr;
+ break;
+ }
+ }
+ ptr++;
+ offset_++;
+ }
+ if (unit_begin && offset_ + 4 == file_length_) {
+ // Last unit. Set the unit_end to point to the last byte including the
+ // remaining content that does not include.
+ unit_end = ptr + 4;
+ offset_ += 4;
+ } else if (!unit_begin || !unit_end) {
+ // No unit start codes found in buffer.
+ return 0;
+ }
+ if (static_cast<int32_t>(size) >= unit_end - unit_begin) {
+ memcpy(target_mem, unit_begin, unit_end - unit_begin);
+ return unit_end - unit_begin;
+ }
+ // Rewind to the beginning start code if there is one as it should be
+ // returned with next Read().
+ offset_ = unit_begin - mem_;
+ return -1;
+ }
+
+ private:
+ uint32_t file_length_;
+ uint32_t offset_;
+ uint8_t* mem_;
+};
+
+LocalVideoBitstreamSource::LocalVideoBitstreamSource(std::string filename)
+ : file_(filename),
+ video_source_(new TestVideoSource()),
+ video_source_open_(false) {
+}
+
+LocalVideoBitstreamSource::~LocalVideoBitstreamSource() {
+ delete video_source_;
+}
+
+bool LocalVideoBitstreamSource::GetBitstreamUnit(
+ void* target_mem,
+ uint32_t target_mem_size_in_bytes,
+ int32_t* unit_size_in_bytes) {
+ if (!video_source_open_) {
+ if (!video_source_->Open(file_))
+ return false;
+ video_source_open_ = true;
+ }
+ int32_t read_bytes = video_source_->Read(static_cast<uint8_t*>(target_mem),
+ target_mem_size_in_bytes);
+ if (read_bytes <= 0)
+ return false;
+
+ *unit_size_in_bytes = read_bytes;
+ return true;
+}
+
+VideoDecoderSessionClient::~VideoDecoderSessionClient() {
+}
+
+// Constants used by VideoDecoderSession.
+static const int32_t kBitstreamBufferCount = 3;
+static const int32_t kBitstreamBufferSize = 256 * 1024 * 1024;
+static const int32_t kDefaultWidth = 640;
+static const int32_t kDefaultHeight = 480;
+
+VideoDecoderSession::VideoDecoderSession(
+ pp::Instance* instance,
+ VideoDecoderSessionClient* client,
+ VideoBitstreamInterface* video_bitstream_if,
+ DisplayInterface* display_if)
+ : cb_factory_(this),
+ instance_(instance),
+ client_(client),
+ video_source_(video_bitstream_if),
+ display_(display_if),
+ end_of_stream_(false),
+ state_(kCreated),
+ next_id_(1) {
+ buffer_if_ = static_cast<const struct PPB_Buffer_Dev*>(
+ pp::Module::Get()->GetBrowserInterface(PPB_BUFFER_DEV_INTERFACE));
+ assert(video_source_ && display_ && buffer_if_);
+}
+
+VideoDecoderSession::~VideoDecoderSession() {}
+
+bool VideoDecoderSession::Initialize(
+ const PP_VideoConfigElement* decoder_config,
+ pp::CompletionCallback completion_callback) {
+ // Default implementation just assumes everything is set up.
+ if (!AllocateInputBuffers())
+ return false;
+
+ pp::CompletionCallback cb = cb_factory_.NewCallback(
+ &VideoDecoderSession::OnInitializeDone, completion_callback);
+ video_decoder_ = new pp::VideoDecoder(instance_, decoder_config, cb, this);
+ if (!video_decoder_)
+ return false;
+
+ return true;
+}
+
+bool VideoDecoderSession::Run(pp::CompletionCallback completion_callback) {
+ assert(state_ == kInitialized);
+ // Start the streaming by dispatching the first buffers one by one.
+ for (std::map<int32_t, PP_VideoBitstreamBuffer_Dev>::iterator it =
+ bitstream_buffers_.begin();
+ it == bitstream_buffers_.end();
+ it++) {
+ if (!ReadAndDispatchBitstreamUnit((*it).first))
+ return false;
+ }
+ // Once streaming has been started, we're running.
+ ChangeState(kRunning);
+ completion_callback.Run(PP_OK);
+ return true;
+}
+
+bool VideoDecoderSession::Stop(pp::CompletionCallback completion_callback) {
+ assert(state_ == kRunning);
+ // Stop the playback.
+ ChangeState(kInitialized);
+ return true;
+}
+
+bool VideoDecoderSession::Flush(pp::CompletionCallback completion_callback) {
+ assert(state_ == kRunning);
+ // Issue the flush request.
+ ChangeState(kFlushing);
+ video_decoder_->Flush(cb_factory_.NewCallback(
+ &VideoDecoderSession::OnUserFlushDone, state_, completion_callback));
+ return true;
+}
+
+bool VideoDecoderSession::Teardown(pp::CompletionCallback completion_callback) {
+ assert(state_ == kInitialized);
+ // Teardown the resources.
+ FreeInputBuffers();
+ ChangeState(kCreated);
+ completion_callback.Run(PP_OK);
+ return true;
+}
+
+void VideoDecoderSession::ProvidePictureBuffers(
+ uint32_t req_num_of_bufs,
+ PP_Size dimensions,
+ enum PP_PictureBufferType_Dev type) {
+ // Currently we support only GLES buffer allocation.
+ if (type == PP_PICTUREBUFFERTYPE_GLESTEXTURE) {
+ std::vector<PP_GLESBuffer_Dev> buffers;
+ if (!display_->ProvideGLESPictureBuffers(req_num_of_bufs, dimensions,
+ buffers)) {
+ video_decoder_->Abort(cb_factory_.NewCallback(
+ &VideoDecoderSession::OnAbortDone));
+ return;
+ }
+ video_decoder_->AssignGLESBuffers(buffers);
+ } else {
+ assert(!"VideoDecoderSession does not support this type of pic buffers");
+ }
+}
+
+void VideoDecoderSession::DismissPictureBuffer(int32_t picture_buffer_id) {
+ if (!display_->DismissPictureBuffer(picture_buffer_id)) {
+ assert(!"Failed to dismiss picture buffer properly");
+ return;
+ }
+}
+
+void VideoDecoderSession::PictureReady(const PP_Picture_Dev& picture) {
+ display_->DrawPicture(picture, cb_factory_.NewCallback(
+ &VideoDecoderSession::OnDrawPictureDone, picture.picture_buffer_id));
+}
+
+void VideoDecoderSession::EndOfStream() {
+ end_of_stream_ = true;
+ video_decoder_->Flush(cb_factory_.NewCallback(
+ &VideoDecoderSession::OnInternalFlushDone));
+}
+
+void VideoDecoderSession::NotifyError(PP_VideoDecodeError_Dev error) {
+ video_decoder_->Flush(cb_factory_.NewCallback(
+ &VideoDecoderSession::OnInternalFlushDone));
+}
+
+void VideoDecoderSession::OnInitializeDone(int32_t result,
+ pp::CompletionCallback callback) {
+ if (state_ != kCreated) {
+ ChangeState(kCreated);
+ callback.Run(PP_ERROR_ABORTED);
+ }
+ if (result != PP_OK) {
+ ChangeState(kInitialized);
+ callback.Run(result);
+ }
+ callback.Run(PP_OK);
+}
+
+void VideoDecoderSession::OnBitstreamBufferProcessed(
+ int32_t result,
+ int32_t bitstream_buffer_id) {
+ // Reuse each bitstream buffer that has been processed by reading data into it
+ // as long as there is more and pass that for decoding.
+ ReadAndDispatchBitstreamUnit(bitstream_buffer_id);
+}
+
+void VideoDecoderSession::OnDrawPictureDone(int32_t result,
+ int32_t picture_buffer_id) {
+ video_decoder_->ReusePictureBuffer(picture_buffer_id);
+}
+
+void VideoDecoderSession::OnUserFlushDone(int32_t result,
+ State target_state,
+ pp::CompletionCallback callback) {
+ assert(state_ == kFlushing);
+ // It was a Flush request, return to the state where we started.
+ ChangeState(target_state);
+ callback.Run(result);
+}
+
+void VideoDecoderSession::OnInternalFlushDone(int32_t result) {
+ if (end_of_stream_) {
+ // It was end of stream flush.
+ video_decoder_->Abort(cb_factory_.NewCallback(
+ &VideoDecoderSession::OnAbortDone));
+ } else {
+ assert(!"Unhandled flush completion!");
+ }
+}
+
+void VideoDecoderSession::OnAbortDone(int32_t result) {
+ client_->OnSessionCompleted(result);
+}
+
+bool VideoDecoderSession::AllocateInputBuffers() {
+ // Allocate |kBitstreamBufferCount| bitstream buffers of
+ // |kBitstreamBufferSize| bytes.
+ for (int32_t i = 0; i < kBitstreamBufferCount; i++) {
+ PP_VideoBitstreamBuffer_Dev bitstream_buffer;
+ bitstream_buffer.data = buffer_if_->Create(instance_->pp_instance(),
+ kBitstreamBufferSize);
+ if (bitstream_buffer.data == 0)
+ return false;
+ bitstream_buffer.size = 0;
+ bitstream_buffer.id = GetUniqueId();
+ bitstream_buffers_[bitstream_buffer.id] = bitstream_buffer;
+ }
+ return true;
+}
+
+void VideoDecoderSession::FreeInputBuffers() {
+ std::map<int32_t, PP_VideoBitstreamBuffer_Dev>::iterator it;
+ for (it = bitstream_buffers_.begin(); it != bitstream_buffers_.end(); it++) {
+ std::pair<int32_t, PP_VideoBitstreamBuffer_Dev> pair = *it;
+ PP_VideoBitstreamBuffer_Dev bitstream_buffer = pair.second;
+ pp::Module::Get()->core()->ReleaseResource(bitstream_buffer.data);
+ bitstream_buffers_.erase(it);
+ }
+}
+
+bool VideoDecoderSession::ReadAndDispatchBitstreamUnit(
+ int32_t bitstream_buffer_id) {
+ // Get the target memory and read the bitstream unit into it.
+ if (bitstream_buffers_.find(bitstream_buffer_id) ==
+ bitstream_buffers_.end())
+ return false;
+
+ PP_VideoBitstreamBuffer_Dev& bitstream_buffer =
+ bitstream_buffers_[bitstream_buffer_id];
+ void* target_mem = buffer_if_->Map(bitstream_buffer.data);
+ if (target_mem == NULL)
+ return false;
+
+ uint32_t size_in_bytes = 0;
+ if (!buffer_if_->Describe(bitstream_buffer.data, &size_in_bytes))
+ return false;
+
+ bool success = video_source_->GetBitstreamUnit(target_mem, size_in_bytes,
+ &bitstream_buffer.size);
+ if (!success)
+ return false;
+
+ // Dispatch the bitstream unit to the decoder.
+ success = video_decoder_->Decode(
+ bitstream_buffer,
+ cb_factory_.NewCallback(
+ &VideoDecoderSession::OnBitstreamBufferProcessed,
+ bitstream_buffer_id));
+ // Finally unmap the buffer for this round.
+ buffer_if_->Unmap(bitstream_buffer.data);
+ return success;
+}
+
+void VideoDecoderSession::ChangeState(State to_state) {
+ state_ = to_state;
+}
+
+int32_t VideoDecoderSession::GetUniqueId() {
+ // Not exactly unique in the current form but close enough for use case.
+ return next_id_++;
+}
+
+// Pass-through vertex shader.
+static const char kVertexShader[] =
+ "varying vec2 interp_tc;\n"
+ "\n"
+ "attribute vec4 in_pos;\n"
+ "attribute vec2 in_tc;\n"
+ "\n"
+ "void main() {\n"
+ " interp_tc = in_tc;\n"
+ " gl_Position = in_pos;\n"
+ "}\n";
+
+// Color shader for EGLImage.
+static const char kFragmentShaderEgl[] =
+ "varying vec2 interp_tc;\n"
+ "\n"
+ "uniform sampler2D tex;\n"
+ "\n"
+ "void main() {\n"
+ " gl_FragColor = texture2D(tex, interp_tc);\n"
+ "}\n";
+
+// Buffer size for compile errors.
+static const unsigned int kShaderErrorSize = 4096;
+
+GLES2Display::GLES2Display(pp::Instance* instance, PP_Size size)
+ : pp::Graphics3DClient_Dev(instance),
+ instance_(instance),
+ surface_size_(size),
+ next_id_(1) {}
+
+GLES2Display::~GLES2Display() {}
+
+void GLES2Display::Graphics3DContextLost() {
+ assert(!"GLES2: Unexpectedly lost graphics context");
+}
+
+bool GLES2Display::Initialize() {
+ if (!InitGL(surface_size_.width, surface_size_.height))
+ return false;
+ ProgramShaders();
+ return true;
+}
+
+bool GLES2Display::ProvideGLESPictureBuffers(
+ uint32_t req_num_of_bufs,
+ PP_Size dimensions,
+ std::vector<PP_GLESBuffer_Dev>& gles_buffers) {
+ GLuint texture;
+ for (uint32_t i = 0; i < req_num_of_bufs; i++) {
+ PP_GLESBuffer_Dev picture_buffer;
+ // Generate texture and bind (effectively allocate) it.
+ gles2_if_->GenTextures(context_->pp_resource(), 1, &texture);
+ gles2_if_->BindTexture(context_->pp_resource(), GL_TEXTURE_2D, texture);
+ picture_buffer.context = 0; // TODO(vmr): Get proper context id.
+ picture_buffer.texture_id = texture;
+ picture_buffer.info.id = GetUniqueId();
+ picture_buffer.info.size.width = surface_size_.width;
+ picture_buffer.info.size.height = surface_size_.height;
+ // Add to output vector and store the values into the map for GLES buffers.
+ gles_buffers.push_back(picture_buffer);
+ gles_buffers_[picture_buffer.info.id] = picture_buffer;
+ AssertNoGLError();
+ }
+ return true;
+}
+
+bool GLES2Display::DismissPictureBuffer(int32_t picture_buffer_id) {
+ gles2_if_->DeleteTextures(context_->pp_resource(), 1,
+ &gles_buffers_[picture_buffer_id].texture_id);
+ gles_buffers_.erase(picture_buffer_id);
+ return true;
+}
+
+bool GLES2Display::DrawPicture(const PP_Picture_Dev& picture,
+ pp::CompletionCallback completion_callback) {
+ // Decoder has finished decoding picture into the texture, we'll have to just
+ // draw the texture to the color buffer and swap the surfaces.
+ // Clear the color buffer.
+ gles2_if_->Clear(context_->pp_resource(), GL_COLOR_BUFFER_BIT |
+ GL_DEPTH_BUFFER_BIT);
+ // Load the texture into texture unit 0.
+ gles2_if_->ActiveTexture(context_->pp_resource(), GL_TEXTURE0);
+ gles2_if_->BindTexture(context_->pp_resource(), GL_TEXTURE_2D,
+ gles_buffers_[picture.picture_buffer_id].texture_id);
+ // Draw the texture.
+ gles2_if_->DrawArrays(context_->pp_resource(), GL_TRIANGLE_STRIP, 0, 4);
+ // Force the execution of pending commands.
+ // TODO(vmr): Do we have to do this? Can we rely command buffer to execute the
+ // commands without Finish call?
+ gles2_if_->Finish(context_->pp_resource());
+ AssertNoGLError();
+
+ int32_t error = surface_->SwapBuffers(completion_callback);
+ if (error != PP_OK)
+ return false;
+
+ AssertNoGLError();
+ return true;
+}
+
+void GLES2Display::AssertNoGLError() {
+ assert(!gles2_if_->GetError(context_->pp_resource()));
+}
+
+bool GLES2Display::InitGL(int width, int height) {
+ assert(width && height);
+ gles2_if_ = static_cast<const struct PPB_OpenGLES2_Dev*>(
+ pp::Module::Get()->GetBrowserInterface(PPB_OPENGLES2_DEV_INTERFACE));
+ // Firstly, we need OpenGL ES context associated with the display our plugin
+ // is rendering to.
+ if (context_) delete(context_);
+ context_ = new pp::Context3D_Dev(*instance_, 0, pp::Context3D_Dev(), NULL);
+ assert(!context_->is_null());
+ // Then we need surface bound to our fresh context. We'll be actually drawing
+ // on this surface and swapping that surface to refresh the displayable data
+ // of the plugin.
+ int32_t surface_attributes[] = {
+ PP_GRAPHICS3DATTRIB_WIDTH, surface_size_.width,
+ PP_GRAPHICS3DATTRIB_HEIGHT, surface_size_.height,
+ PP_GRAPHICS3DATTRIB_NONE
+ };
+ if (surface_) delete(surface_);
+ surface_ = new pp::Surface3D_Dev(*instance_, 0, surface_attributes);
+ assert(!surface_->is_null());
+ int32_t bind_error = context_->BindSurfaces(*surface_, *surface_);
+ if (!bind_error) {
+ assert(bind_error);
+ }
+ AssertNoGLError();
+
+ bool success = instance_->BindGraphics(*surface_);
+ if (!success) {
+ assert(success);
+ }
+ // Clear the color buffer with opaque white for starters.
+ gles2_if_->ClearColor(context_->pp_resource(), 1.0, 1.0, 1.0, 0.0);
+ gles2_if_->Clear(context_->pp_resource(), GL_COLOR_BUFFER_BIT);
+ // Set the viewport to match the whole GL window.
+ gles2_if_->Viewport(context_->pp_resource(), 0, 0, surface_size_.width,
+ surface_size_.height);
+ AssertNoGLError();
+ return true;
+}
+
+void GLES2Display::CreateShader(GLuint program, GLenum type,
+ const char* source,
+ int size) {
+ GLuint shader = gles2_if_->CreateShader(context_->pp_resource(), type);
+ gles2_if_->ShaderSource(
+ context_->pp_resource(), shader, 1, &source, &size);
+ gles2_if_->CompileShader(context_->pp_resource(), shader);
+
+ int result = GL_FALSE;
+ gles2_if_->GetShaderiv(
+ context_->pp_resource(), shader, GL_COMPILE_STATUS, &result);
+ if (!result) {
+ char log[kShaderErrorSize];
+ int len = 0;
+ gles2_if_->GetShaderInfoLog(context_->pp_resource(), shader,
+ kShaderErrorSize - 1, &len, log);
+ log[len] = 0;
+ assert(result);
+ }
+ gles2_if_->AttachShader(context_->pp_resource(), program, shader);
+ gles2_if_->DeleteShader(context_->pp_resource(), shader);
+}
+
+void GLES2Display::LinkProgram(const PPB_OpenGLES2_Dev* gles2_if_ ) {
+ gles2_if_->LinkProgram(context_->pp_resource(), program_);
+ int result = GL_FALSE;
+ gles2_if_->GetProgramiv(context_->pp_resource(), program_, GL_LINK_STATUS,
+ &result);
+ if (!result) {
+ char log[kShaderErrorSize];
+ int len = 0;
+ gles2_if_->GetProgramInfoLog(context_->pp_resource(), program_,
+ kShaderErrorSize - 1, &len, log);
+ log[len] = 0;
+ assert(result);
+ }
+ gles2_if_->UseProgram(context_->pp_resource(), program_);
+}
+
+void GLES2Display::ProgramShaders() {
+ // Vertices for a full screen quad.
+ static const float kVertices[] = {
+ -1.f, 1.f,
+ -1.f, -1.f,
+ 1.f, 1.f,
+ 1.f, -1.f,
+ };
+
+ // Texture Coordinates mapping the entire texture for EGL image.
+ static const float kTextureCoordsEgl[] = {
+ 0, 1,
+ 0, 0,
+ 1, 1,
+ 1, 0,
+ };
+ program_ = gles2_if_->CreateProgram(context_->pp_resource());
+
+ // Create shader for EGL image
+ CreateShader(program_, GL_VERTEX_SHADER,
+ kVertexShader, sizeof(kVertexShader));
+ CreateShader(program_, GL_FRAGMENT_SHADER,
+ kFragmentShaderEgl, sizeof(kFragmentShaderEgl));
+ LinkProgram(gles2_if_);
+
+ AssertNoGLError();
+ // Bind parameters.
+ gles2_if_->Uniform1i(context_->pp_resource(), gles2_if_->
+ GetUniformLocation(context_->pp_resource(), program_,
+ "tex"), 0);
+ gles2_if_->GenBuffers(context_->pp_resource(), 1, &vertex_buffer_);
+ gles2_if_->BindBuffer(context_->pp_resource(), GL_ARRAY_BUFFER,
+ vertex_buffer_);
+ gles2_if_->BufferData(context_->pp_resource(), GL_ARRAY_BUFFER,
+ 8 * sizeof(kVertices[0]), kVertices, GL_STATIC_DRAW);
+
+ AssertNoGLError();
+ int pos_location = gles2_if_->GetAttribLocation(context_->pp_resource(),
+ program_, "in_pos");
+ gles2_if_->EnableVertexAttribArray(context_->pp_resource(), pos_location);
+ gles2_if_->VertexAttribPointer(context_->pp_resource(), pos_location, 2,
+ GL_FLOAT, GL_FALSE, 0, 0);
+
+ AssertNoGLError();
+ gles2_if_->GenBuffers(context_->pp_resource(), 1, &fragment_buffer_);
+ gles2_if_->BindBuffer(context_->pp_resource(), GL_ARRAY_BUFFER,
+ fragment_buffer_);
+ gles2_if_->BufferData(context_->pp_resource(), GL_ARRAY_BUFFER,
+ 8 * sizeof(kTextureCoordsEgl[0]),
+ kTextureCoordsEgl, GL_STATIC_DRAW);
+ AssertNoGLError();
+ int tc_location = gles2_if_->GetAttribLocation(context_->pp_resource(),
+ program_, "in_tc");
+ gles2_if_->EnableVertexAttribArray(context_->pp_resource(), tc_location);
+ gles2_if_->VertexAttribPointer(context_->pp_resource(), tc_location, 2,
+ GL_FLOAT, GL_FALSE, 0, kTextureCoordsEgl);
+ gles2_if_->VertexAttribPointer(context_->pp_resource(), tc_location, 2,
+ GL_FLOAT, GL_FALSE, 0, 0);
+ AssertNoGLError();
+}
+
+int32_t GLES2Display::GetUniqueId() {
+ // Not exactly unique in the current form but close enough for use case.
+ return next_id_++;
+}
+
« no previous file with comments | « ppapi/examples/video_decoder/video_decoder_session.h ('k') | ppapi/ppapi_tests.gypi » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698