Note This document describes multiview support in Oculus Browser starting with version 6.0. For previous versions of the Oculus Browser refer to this document.
Warning: Multiview is an experimental feature and you may see behavior that is different from what is described in this document.
To render VR content, you need to draw the same 3D scene twice; once for the left eye, and again for the right eye.
There is usually only a slight difference between the two rendered views, but the difference is what enables the stereoscopic effect that makes VR work.
With stock WebGL the only option available to a developer is to render to the two eye buffers sequentially — essentially incurring double the application and driver overhead — despite the GPU command streams and render states typically being almost identical.
The OpenGL/WebGL multiview extension addresses this inefficiency by enabling simultaneous rendering to multiple elements of a 2D texture array.
Note: Only CPU-bound experiences will benefit from multi-view. Typically a CPU usage reduction of 25% - 50% is possible.
With the multiview extension draw calls are instanced into each corresponding element of the texture array. The vertex program uses a new ViewID
variable to compute per-view values — typically the vertex position and view-dependent variables like reflection.
The formulation of the multiview extension is purposely high-level to allow implementation freedom. On existing hardware, applications and drivers can realize the benefits of a single scene traversal, even if all GPU work is fully duplicated per view.
In WebGL, multiview is exposed via the OVR_multiview2 extension. Only WebGL 2.0 supports this extension; WebGL 1.0 cannot use multiview.
WebGL 2.0 allows developers to explicitly create a framebuffer and attach user’s texture 2D arrays as render targets. Your JavaScript code will be responsible for allocating texture 2D arrays, and creating the multiview framebuffer with proper attachments.
Current definition of the OVR_multiview2 extension is lacking one fundamental feature: anti-aliasing (multisampling).
To address this flaw, Oculus Browser also implements its own extension - OCULUS_multiview
. The differences from the OVR_multiview2
are as follows:
OCULUS_multiview
is available out-of-the-box since version 6.0, while the OVR_multiview2
extension was behind the flag (chrome://flags) till version 6.2. Since Oculus Browser 6.2 release, both of the extensions enabled by default (but they still can be disabled via flags, if necessary);OCULUS_multiview
extension includes all the functionality of the OVR_multiview2
, plus multisampling support: void framebufferTextureMultisampleMultiviewOVR(GLenum target, GLenum attachment, WebGLTexture? texture, GLint level, GLsizei samples, GLint baseViewIndex, GLsizei numViews);
A WebGL app can be relatively easily be modified to benefit from the extension. First of all, the OCULUS_multiview
or OVR_multiview2
extension should be requested:
var is_multiview, is_multisampled = false; var ext = gl.getExtension('OCULUS_multiview'); if (ext) { console.log("OCULUS_multiview extension is supported"); is_multiview = true; is_multisampled = true; } else { console.log("OCULUS_multiview extension is NOT supported"); ext = gl.getExtension('OVR_multiview2'); if (ext) { console.log("OVR_multiview2 extension is supported"); is_multiview = true; } else { console.log("Neither OCULUS_multiview nor OVR_multiview2 extensions are supported"); is_multiview = false; } }
Secondly, need to allocate the texture2D arrays (for color and depth buffers), create a framebuffer and attach the texture arrays to it:
var backFbo = gl.getParameter(gl.FRAMEBUFFER_BINDING); var fbo = null; if (ext) { fbo = gl.createFramebuffer(); gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, fbo); // color texture / attachment var colorTexture = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D_ARRAY, colorTexture); gl.texStorage3D(gl.TEXTURE_2D_ARRAY, 1, gl.RGBA8, width, height, 2); if (!is_multisampled) ext.framebufferTextureMultiviewOVR(gl.DRAW_FRAMEBUFFER, gl.COLOR_ATTACHMENT0, colorTexture, 0, 0, 2); else ext.framebufferTextureMultisampleMultiviewOVR(gl.DRAW_FRAMEBUFFER, gl.COLOR_ATTACHMENT0, colorTexture, 0, samples, 0, 2); // depth texture / attachment var depthStencilTex = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D_ARRAY, depthStencilTex); gl.texStorage3D(gl.TEXTURE_2D_ARRAY, 1, gl.DEPTH32F_STENCIL8, width, height, 2); if (!is_multisampled) ext.framebufferTextureMultiviewOVR(gl.DRAW_FRAMEBUFFER, gl.DEPTH_STENCIL_ATTACHMENT, depthStencilTex, 0, 0, 2); else ext.framebufferTextureMultisampleMultiviewOVR(gl.DRAW_FRAMEBUFFER, gl.DEPTH_STENCIL_ATTACHMENT, depthStencilTex, 0, samples, 0, 2); }
Note, the width
and height
here is dimensions of one eye buffer. It could be calculated as follows:
leftEye = vrDisplay.getEyeParameters("left"); rightEye = vrDisplay.getEyeParameters("right"); let width = Math.max(leftEye.renderWidth, rightEye.renderWidth) * ((is_multiview) ? 1 : 2); let height = Math.max(leftEye.renderHeight, rightEye.renderHeight);
To render into the created multiview framebuffer the user’s code may do the following:
if (vrDisplay.isPresenting) { gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, fbo); gl.enable(gl.SCISSOR_TEST); if (is_multiview) { let projections = [frameData.leftProjectionMatrix, frameData.rightProjectionMatrix]; let viewMats = [frameData.leftViewMatrix, frameData.rightViewMatrix]; let width = Math.max(leftEye.renderWidth, rightEye.renderWidth); let height = Math.max(leftEye.renderHeight, rightEye.renderHeight); gl.viewport(0, 0, width, height); gl.scissor(0, 0, width, height); gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); cubeSea.render(projections, viewMats, stats, /*multiview*/ true); // Now we need to copy rendering from the texture2D array into the actual back // buffer to present it on the device gl.invalidateFramebuffer(gl.DRAW_FRAMEBUFFER, [ gl.DEPTH_STENCIL_ATTACHMENT ]); gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, backFbo); // This function just copies two layers of the texture2D array as side-by-side // stereo into the back buffer. stereoUtil.blit(colorTexture, 0, 0, 1, 1, width*2, height); } else { // no multiview, regular side-by-side stereo rendering.... gl.disable(gl.SCISSOR_TEST); gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); // Note that the viewports use the eyeWidth/height rather than the // canvas width and height. gl.viewport(0, 0, webglCanvas.width * 0.5, webglCanvas.height); cubeSea.render(frameData.leftProjectionMatrix, frameData.leftViewMatrix, stats); gl.viewport(webglCanvas.width * 0.5, 0, webglCanvas.width * 0.5, webglCanvas.height); cubeSea.render(frameData.rightProjectionMatrix, frameData.rightViewMatrix, stats); } gl.disable(gl.SCISSOR_TEST); // If we're currently presenting to the VRDisplay we need to // explicitly indicate we're done rendering. vrDisplay.submitFrame(); } else { //.... regular 2D rendering if not presenting .....
Refer to the Code sample section for fully functional multiview example(s).
Note, the multiview extension may be used without WebVR / WebXR; all what would be necessary is to provide correct view and projection matrices and setup proper viewports.
If you are converting WebGL 1.0 to WebGL 2.0, you should use ES 3.00 shaders: only those support multiview.
The following changes might be necessary for vertex shaders in a multiview-enabled experience:
An example WebGL 1.0 vertex shader...
uniform mat4 projectionMat; uniform mat4 modelViewMat; attribute vec3 position; attribute vec2 texCoord; varying vec2 vTexCoord; void main() { vTexCoord = texCoord; gl_Position = projectionMat * modelViewMat * vec4( position, 1.0 ); }
...and the equivalent multiview ES 3.00 shader:
#version 300 es #extension GL_OVR_multiview2 : require layout(num_views=2) in; uniform mat4 leftProjectionMat; uniform mat4 leftModelViewMat; uniform mat4 rightProjectionMat; uniform mat4 rightModelViewMat; in vec3 position; in vec2 texCoord; out vec2 vTexCoord; void main() { vTexCoord = texCoord; mat4 m = gl_ViewID_OVR == 0u ? (leftProjectionMat * leftModelViewMat) : (rightProjectionMat * rightModelViewMat); gl_Position = m * vec4( position, 1.0 ); }
The fragment (pixel) shader should be modified to comply with ES 3.00 spec as well, even though the shader’s logic remains untouched. (Both vertex and fragment shaders must be written using the same specification, otherwise shaders won’t link.)
The main difference is absence of gl_FragColor
and necessity to use in
and out
modifiers. Use explicit out
declaration instead of gl_FragColor
.
An example WebGL 1.0 fragment shader...
precision mediump float; uniform sampler2D diffuse; varying vec2 vTexCoord; void main() { vec4 color = texture2D(diffuse, vTexCoord); gl_FragColor = color; }
...and the equivalent multiview ES 3.00 shader:
#version 300 es precision mediump float; uniform sampler2D diffuse; in vec2 vTexCoord; out vec4 color; void main() { color = texture(diffuse, vTexCoord); }
Hint: After the conversion, please see console output in the browser developer tools: there will be a detailed error message if the converted shaders have issues.
Cubes (WebGL 2.0) - Source code
window.VRStereoUtil = (function () { "use strict"; var VS = [ "uniform mat4 projectionMat;", "uniform mat4 modelViewMat;", "attribute vec3 position;", "attribute vec2 texCoord;", "varying vec2 vTexCoord;", "void main() {", " vTexCoord = texCoord;", " gl_Position = projectionMat * modelViewMat * vec4( position, 1.0 );", "}", ].join("\n"); var VSMultiview = [ "#version 300 es", "uniform vec2 u_offset;", "uniform vec2 u_scale;", "out mediump vec3 v_texcoord;", "void main() {", // offset of eye quad in -1..1 space " const float eye_offset_x[12] = float[12] (", " 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", " 1.0, 1.0, 1.0, 1.0, 1.0, 1.0", " );", // xy - coords of the quad, normalized to 0..1 // xy - UV of the source texture coordinate. // z - texture layer (eye) index - 0 or 1. " const vec3 quad_positions[12] = vec3[12]", " (", " vec3(0.0, 0.0, 0.0),", " vec3(1.0, 0.0, 0.0),", " vec3(0.0, 1.0, 0.0),", " vec3(0.0, 1.0, 0.0),", " vec3(1.0, 0.0, 0.0),", " vec3(1.0, 1.0, 0.0),", " vec3(0.0, 0.0, 1.0),", " vec3(1.0, 0.0, 1.0),", " vec3(0.0, 1.0, 1.0),", " vec3(0.0, 1.0, 1.0),", " vec3(1.0, 0.0, 1.0),", " vec3(1.0, 1.0, 1.0)", " );", " const vec2 pos_scale = vec2(0.5, 1.0);", " vec2 eye_offset = vec2(eye_offset_x[gl_VertexID], 0.0);", " gl_Position = vec4(((quad_positions[gl_VertexID].xy * u_scale + u_offset) * pos_scale * 2.0) - 1.0 + eye_offset, 0.0, 1.0);", " v_texcoord = vec3(quad_positions[gl_VertexID].xy * u_scale + u_offset, quad_positions[gl_VertexID].z);", "}", ].join("\n"); var FS = [ "precision mediump float;", "uniform sampler2D diffuse;", "varying vec2 vTexCoord;", "void main() {", " vec4 color = texture2D(diffuse, vTexCoord);", " color.r = 1.0; color.g *= 0.8; color.b *= 0.7;", // indicate that Multiview is not in use, for testing " gl_FragColor = color;", "}", ].join("\n"); var FSMultiview = [ "#version 300 es", "uniform mediump sampler2DArray u_source_texture;", "in mediump vec3 v_texcoord;", "out mediump vec4 output_color;", "void main()", "{", " output_color = texture(u_source_texture, v_texcoord);", "}", ].join("\n"); var StereoUtil = function (gl) { this.gl = gl; this.vao = gl.createVertexArray(); console.log("compiling multiview shader"); this.program_multiview = new WGLUProgram(gl); this.program_multiview.attachShaderSource(VSMultiview, gl.VERTEX_SHADER); this.program_multiview.attachShaderSource(FSMultiview, gl.FRAGMENT_SHADER); this.program_multiview.bindAttribLocation({ v_texcoord: 0, }); this.program_multiview.link(); }; StereoUtil.prototype.blit = function ( source_texture, source_rect_uv_x, source_rect_uv_y, source_rect_uv_width, source_rect_uv_height, dest_surface_width, dest_surface_height) { let gl = this.gl; let program = this.program; gl.activeTexture(gl.TEXTURE0); gl.bindTexture(gl.TEXTURE_2D_ARRAY, source_texture); program = this.program_multiview; program.use(); // Render to the destination texture, sampling from the scratch texture gl.disable(gl.SCISSOR_TEST); gl.disable(gl.DEPTH_TEST); gl.disable(gl.STENCIL_TEST); gl.colorMask(true, true, true, true); gl.depthMask(false); gl.viewport(0, 0, dest_surface_width, dest_surface_height); gl.uniform2f(program.uniform.u_scale, source_rect_uv_width, source_rect_uv_height); gl.uniform2f(program.uniform.u_offset, source_rect_uv_x, source_rect_uv_y); gl.uniform1i(program.uniform.u_source_texture, 0); // Start setting up VAO gl.bindVertexArray(this.vao); gl.drawArrays(gl.TRIANGLES, 0, 12); gl.enable(gl.DEPTH_TEST); gl.depthMask(true); }; return StereoUtil; })();