| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "ui/gl/gl_context_cgl.h" | |
| 6 | |
| 7 #include <OpenGL/CGLRenderers.h> | |
| 8 #include <OpenGL/CGLTypes.h> | |
| 9 #include <vector> | |
| 10 | |
| 11 #include "base/logging.h" | |
| 12 #include "base/memory/scoped_ptr.h" | |
| 13 #include "base/trace_event/trace_event.h" | |
| 14 #include "ui/gl/gl_bindings.h" | |
| 15 #include "ui/gl/gl_implementation.h" | |
| 16 #include "ui/gl/gl_surface.h" | |
| 17 #include "ui/gl/gpu_switching_manager.h" | |
| 18 | |
| 19 namespace gfx { | |
| 20 | |
| 21 namespace { | |
| 22 | |
| 23 bool g_support_renderer_switching; | |
| 24 | |
| 25 struct CGLRendererInfoObjDeleter { | |
| 26 void operator()(CGLRendererInfoObj* x) { | |
| 27 if (x) | |
| 28 CGLDestroyRendererInfo(*x); | |
| 29 } | |
| 30 }; | |
| 31 | |
| 32 } // namespace | |
| 33 | |
| 34 static CGLPixelFormatObj GetPixelFormat() { | |
| 35 static CGLPixelFormatObj format; | |
| 36 if (format) | |
| 37 return format; | |
| 38 std::vector<CGLPixelFormatAttribute> attribs; | |
| 39 // If the system supports dual gpus then allow offline renderers for every | |
| 40 // context, so that they can all be in the same share group. | |
| 41 if (ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus()) { | |
| 42 attribs.push_back(kCGLPFAAllowOfflineRenderers); | |
| 43 g_support_renderer_switching = true; | |
| 44 } | |
| 45 if (GetGLImplementation() == kGLImplementationAppleGL) { | |
| 46 attribs.push_back(kCGLPFARendererID); | |
| 47 attribs.push_back((CGLPixelFormatAttribute) kCGLRendererGenericFloatID); | |
| 48 g_support_renderer_switching = false; | |
| 49 } | |
| 50 attribs.push_back((CGLPixelFormatAttribute) 0); | |
| 51 | |
| 52 GLint num_virtual_screens; | |
| 53 if (CGLChoosePixelFormat(&attribs.front(), | |
| 54 &format, | |
| 55 &num_virtual_screens) != kCGLNoError) { | |
| 56 LOG(ERROR) << "Error choosing pixel format."; | |
| 57 return NULL; | |
| 58 } | |
| 59 if (!format) { | |
| 60 LOG(ERROR) << "format == 0."; | |
| 61 return NULL; | |
| 62 } | |
| 63 DCHECK_NE(num_virtual_screens, 0); | |
| 64 return format; | |
| 65 } | |
| 66 | |
| 67 GLContextCGL::GLContextCGL(GLShareGroup* share_group) | |
| 68 : GLContextReal(share_group), | |
| 69 context_(NULL), | |
| 70 gpu_preference_(PreferIntegratedGpu), | |
| 71 discrete_pixelformat_(NULL), | |
| 72 screen_(-1), | |
| 73 renderer_id_(-1), | |
| 74 safe_to_force_gpu_switch_(false) { | |
| 75 } | |
| 76 | |
| 77 bool GLContextCGL::Initialize(GLSurface* compatible_surface, | |
| 78 GpuPreference gpu_preference) { | |
| 79 DCHECK(compatible_surface); | |
| 80 | |
| 81 gpu_preference = ui::GpuSwitchingManager::GetInstance()->AdjustGpuPreference( | |
| 82 gpu_preference); | |
| 83 | |
| 84 GLContextCGL* share_context = share_group() ? | |
| 85 static_cast<GLContextCGL*>(share_group()->GetContext()) : NULL; | |
| 86 | |
| 87 CGLPixelFormatObj format = GetPixelFormat(); | |
| 88 if (!format) | |
| 89 return false; | |
| 90 | |
| 91 // If using the discrete gpu, create a pixel format requiring it before we | |
| 92 // create the context. | |
| 93 if (!ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus() || | |
| 94 gpu_preference == PreferDiscreteGpu) { | |
| 95 std::vector<CGLPixelFormatAttribute> discrete_attribs; | |
| 96 discrete_attribs.push_back((CGLPixelFormatAttribute) 0); | |
| 97 GLint num_pixel_formats; | |
| 98 if (CGLChoosePixelFormat(&discrete_attribs.front(), | |
| 99 &discrete_pixelformat_, | |
| 100 &num_pixel_formats) != kCGLNoError) { | |
| 101 LOG(ERROR) << "Error choosing pixel format."; | |
| 102 return false; | |
| 103 } | |
| 104 // The renderer might be switched after this, so ignore the saved ID. | |
| 105 share_group()->SetRendererID(-1); | |
| 106 } | |
| 107 | |
| 108 CGLError res = CGLCreateContext( | |
| 109 format, | |
| 110 share_context ? | |
| 111 static_cast<CGLContextObj>(share_context->GetHandle()) : NULL, | |
| 112 reinterpret_cast<CGLContextObj*>(&context_)); | |
| 113 if (res != kCGLNoError) { | |
| 114 LOG(ERROR) << "Error creating context."; | |
| 115 Destroy(); | |
| 116 return false; | |
| 117 } | |
| 118 | |
| 119 gpu_preference_ = gpu_preference; | |
| 120 return true; | |
| 121 } | |
| 122 | |
| 123 void GLContextCGL::Destroy() { | |
| 124 if (discrete_pixelformat_) { | |
| 125 if (base::MessageLoop::current() != NULL) { | |
| 126 // Delay releasing the pixel format for 10 seconds to reduce the number of | |
| 127 // unnecessary GPU switches. | |
| 128 base::MessageLoop::current()->PostDelayedTask( | |
| 129 FROM_HERE, base::Bind(&CGLReleasePixelFormat, discrete_pixelformat_), | |
| 130 base::TimeDelta::FromSeconds(10)); | |
| 131 } else { | |
| 132 CGLReleasePixelFormat(discrete_pixelformat_); | |
| 133 } | |
| 134 discrete_pixelformat_ = NULL; | |
| 135 } | |
| 136 if (context_) { | |
| 137 CGLDestroyContext(static_cast<CGLContextObj>(context_)); | |
| 138 context_ = NULL; | |
| 139 } | |
| 140 } | |
| 141 | |
| 142 bool GLContextCGL::ForceGpuSwitchIfNeeded() { | |
| 143 DCHECK(context_); | |
| 144 | |
| 145 // The call to CGLSetVirtualScreen can hang on some AMD drivers | |
| 146 // http://crbug.com/227228 | |
| 147 if (safe_to_force_gpu_switch_) { | |
| 148 int renderer_id = share_group()->GetRendererID(); | |
| 149 int screen; | |
| 150 CGLGetVirtualScreen(static_cast<CGLContextObj>(context_), &screen); | |
| 151 | |
| 152 if (g_support_renderer_switching && | |
| 153 !discrete_pixelformat_ && renderer_id != -1 && | |
| 154 (screen != screen_ || renderer_id != renderer_id_)) { | |
| 155 // Attempt to find a virtual screen that's using the requested renderer, | |
| 156 // and switch the context to use that screen. Don't attempt to switch if | |
| 157 // the context requires the discrete GPU. | |
| 158 CGLPixelFormatObj format = GetPixelFormat(); | |
| 159 int virtual_screen_count; | |
| 160 if (CGLDescribePixelFormat(format, 0, kCGLPFAVirtualScreenCount, | |
| 161 &virtual_screen_count) != kCGLNoError) | |
| 162 return false; | |
| 163 | |
| 164 for (int i = 0; i < virtual_screen_count; ++i) { | |
| 165 int screen_renderer_id; | |
| 166 if (CGLDescribePixelFormat(format, i, kCGLPFARendererID, | |
| 167 &screen_renderer_id) != kCGLNoError) | |
| 168 return false; | |
| 169 | |
| 170 screen_renderer_id &= kCGLRendererIDMatchingMask; | |
| 171 if (screen_renderer_id == renderer_id) { | |
| 172 CGLSetVirtualScreen(static_cast<CGLContextObj>(context_), i); | |
| 173 screen_ = i; | |
| 174 break; | |
| 175 } | |
| 176 } | |
| 177 renderer_id_ = renderer_id; | |
| 178 } | |
| 179 } | |
| 180 return true; | |
| 181 } | |
| 182 | |
| 183 bool GLContextCGL::MakeCurrent(GLSurface* surface) { | |
| 184 DCHECK(context_); | |
| 185 | |
| 186 if (!ForceGpuSwitchIfNeeded()) | |
| 187 return false; | |
| 188 | |
| 189 if (IsCurrent(surface)) | |
| 190 return true; | |
| 191 | |
| 192 ScopedReleaseCurrent release_current; | |
| 193 TRACE_EVENT0("gpu", "GLContextCGL::MakeCurrent"); | |
| 194 | |
| 195 if (CGLSetCurrentContext( | |
| 196 static_cast<CGLContextObj>(context_)) != kCGLNoError) { | |
| 197 LOG(ERROR) << "Unable to make gl context current."; | |
| 198 return false; | |
| 199 } | |
| 200 | |
| 201 // Set this as soon as the context is current, since we might call into GL. | |
| 202 SetRealGLApi(); | |
| 203 | |
| 204 SetCurrent(surface); | |
| 205 if (!InitializeDynamicBindings()) { | |
| 206 return false; | |
| 207 } | |
| 208 | |
| 209 if (!surface->OnMakeCurrent(this)) { | |
| 210 LOG(ERROR) << "Unable to make gl context current."; | |
| 211 return false; | |
| 212 } | |
| 213 | |
| 214 release_current.Cancel(); | |
| 215 return true; | |
| 216 } | |
| 217 | |
| 218 void GLContextCGL::ReleaseCurrent(GLSurface* surface) { | |
| 219 if (!IsCurrent(surface)) | |
| 220 return; | |
| 221 | |
| 222 SetCurrent(NULL); | |
| 223 CGLSetCurrentContext(NULL); | |
| 224 } | |
| 225 | |
| 226 bool GLContextCGL::IsCurrent(GLSurface* surface) { | |
| 227 bool native_context_is_current = CGLGetCurrentContext() == context_; | |
| 228 | |
| 229 // If our context is current then our notion of which GLContext is | |
| 230 // current must be correct. On the other hand, third-party code | |
| 231 // using OpenGL might change the current context. | |
| 232 DCHECK(!native_context_is_current || (GetRealCurrent() == this)); | |
| 233 | |
| 234 if (!native_context_is_current) | |
| 235 return false; | |
| 236 | |
| 237 return true; | |
| 238 } | |
| 239 | |
| 240 void* GLContextCGL::GetHandle() { | |
| 241 return context_; | |
| 242 } | |
| 243 | |
| 244 void GLContextCGL::OnSetSwapInterval(int interval) { | |
| 245 DCHECK(IsCurrent(NULL)); | |
| 246 } | |
| 247 | |
| 248 bool GLContextCGL::GetTotalGpuMemory(size_t* bytes) { | |
| 249 DCHECK(bytes); | |
| 250 *bytes = 0; | |
| 251 | |
| 252 CGLContextObj context = reinterpret_cast<CGLContextObj>(context_); | |
| 253 if (!context) | |
| 254 return false; | |
| 255 | |
| 256 // Retrieve the current renderer ID | |
| 257 GLint current_renderer_id = 0; | |
| 258 if (CGLGetParameter(context, | |
| 259 kCGLCPCurrentRendererID, | |
| 260 ¤t_renderer_id) != kCGLNoError) | |
| 261 return false; | |
| 262 | |
| 263 // Iterate through the list of all renderers | |
| 264 GLuint display_mask = static_cast<GLuint>(-1); | |
| 265 CGLRendererInfoObj renderer_info = NULL; | |
| 266 GLint num_renderers = 0; | |
| 267 if (CGLQueryRendererInfo(display_mask, | |
| 268 &renderer_info, | |
| 269 &num_renderers) != kCGLNoError) | |
| 270 return false; | |
| 271 | |
| 272 scoped_ptr<CGLRendererInfoObj, | |
| 273 CGLRendererInfoObjDeleter> scoper(&renderer_info); | |
| 274 | |
| 275 for (GLint renderer_index = 0; | |
| 276 renderer_index < num_renderers; | |
| 277 ++renderer_index) { | |
| 278 // Skip this if this renderer is not the current renderer. | |
| 279 GLint renderer_id = 0; | |
| 280 if (CGLDescribeRenderer(renderer_info, | |
| 281 renderer_index, | |
| 282 kCGLRPRendererID, | |
| 283 &renderer_id) != kCGLNoError) | |
| 284 continue; | |
| 285 if (renderer_id != current_renderer_id) | |
| 286 continue; | |
| 287 // Retrieve the video memory for the renderer. | |
| 288 GLint video_memory = 0; | |
| 289 if (CGLDescribeRenderer(renderer_info, | |
| 290 renderer_index, | |
| 291 kCGLRPVideoMemory, | |
| 292 &video_memory) != kCGLNoError) | |
| 293 continue; | |
| 294 *bytes = video_memory; | |
| 295 return true; | |
| 296 } | |
| 297 | |
| 298 return false; | |
| 299 } | |
| 300 | |
| 301 void GLContextCGL::SetSafeToForceGpuSwitch() { | |
| 302 safe_to_force_gpu_switch_ = true; | |
| 303 } | |
| 304 | |
| 305 | |
| 306 GLContextCGL::~GLContextCGL() { | |
| 307 Destroy(); | |
| 308 } | |
| 309 | |
| 310 GpuPreference GLContextCGL::GetGpuPreference() { | |
| 311 return gpu_preference_; | |
| 312 } | |
| 313 | |
| 314 } // namespace gfx | |
| OLD | NEW |