Oculus Quest Development

All Oculus Quest developers MUST PASS the concept review prior to gaining publishing access to the Quest Store and additional resources. Submit a concept document for review as early in your Quest application development cycle as possible. For additional information and context, please see Submitting Your App to the Oculus Quest Store.

Multiview WebGL Rendering

Warning: Multiview is an experimental feature and you may see behavior that is different from what is described in this document.

To render VR content, you need to draw the same 3D scene twice; once for the left eye, and again for the right eye. There is usually only a slight difference between the two rendered views, but the difference is what enables the stereoscopic effect that makes VR work. By default in WebGL, the only option available is to render to the two eye buffers sequentially — essentially incurring double the application and driver overhead — despite the GPU command streams and render states typically being almost identical.

The WebGL multiview extension addresses this inefficiency by enabling simultaneous rendering to multiple elements of a 2D texture array.

Note: Only CPU-bound experiences will benefit from multi-view. Often, a CPU usage reduction of 25% - 50% is possible.

Multiview Design

With the multiview extension draw calls are instanced into each corresponding element of the texture array. The vertex program uses a new ViewID variable to compute per-view values — typically the vertex position and view-dependent variables like reflection.

The formulation of the multiview extension is purposely high-level to allow implementation freedom. On existing hardware, applications and drivers can realize the benefits of a single scene traversal, even if all GPU work is fully duplicated per view.

In WebGL, multiview is exposed via the OVR_multiview2 extension. Only WebGL 2.0 supports this extension; WebGL 1.0 cannot use multiview.

Using Multiview in WebGL 2.0

WebGL 2.0 allows developers to explicitly create a framebuffer and attach user’s texture 2D arrays as render targets. Your JavaScript code will be responsible for allocating texture 2D arrays, and creating the multiview framebuffer with proper attachments.

Current definition of the OVR_multiview2 extension is lacking one fundamental feature: anti-aliasing (multisampling).

To address this flaw, Oculus Browser also implements its own extension - OCULUS_multiview. The differences from the OVR_multiview2 are as follows:

  • The OCULUS_multiview is available out-of-the-box, while the OVR_multiview2 extension is behind the flag (chrome://flags). The latter extension is not enabled by default because it is in the ‘Draft’ state;
  • The OCULUS_multiview extension includes all the functionality of the OVR_multiview2, plus multisampling support:
      void framebufferTextureMultisampleMultiviewOVR(GLenum target, GLenum attachment,
                                            WebGLTexture? texture, GLint level,
                                            GLsizei samples,
                                            GLint baseViewIndex,
                                            GLsizei numViews);

A WebGL app can be relatively easily be modified to benefit from the extension. First of all, the OCULUS_multiview or OVR_multiview2 extension should be requested:

var is_multiview, is_multisampled = false;
var ext = gl.getExtension('OCULUS_multiview');
if (ext) {
  console.log("OCULUS_multiview extension is supported");
  is_multiview = true;
  is_multisampled = true;
else {
  console.log("OCULUS_multiview extension is NOT supported");
  ext = gl.getExtension('OVR_multiview2');
  if (ext) {
    console.log("OVR_multiview2 extension is supported");
    is_multiview = true;
  else {
    console.log("Neither OCULUS_multiview nor OVR_multiview2 extensions are supported");
    is_multiview = false;

Secondly, need to allocate the texture2D arrays (for color and depth buffers), create a framebuffer and attach the texture arrays to it:

var backFbo = gl.getParameter(gl.FRAMEBUFFER_BINDING);
var fbo = null;
if (ext) {
  fbo = gl.createFramebuffer();
  gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, fbo);

  // color texture / attachment
  var colorTexture = gl.createTexture();
  gl.bindTexture(gl.TEXTURE_2D_ARRAY, colorTexture);
  gl.texStorage3D(gl.TEXTURE_2D_ARRAY, 1, gl.RGBA8, width, height, 2);
  if (!is_multisampled)
    ext.framebufferTextureMultiviewOVR(gl.DRAW_FRAMEBUFFER, gl.COLOR_ATTACHMENT0, colorTexture, 0, 0, 2);
    ext.framebufferTextureMultisampleMultiviewOVR(gl.DRAW_FRAMEBUFFER, gl.COLOR_ATTACHMENT0, colorTexture, 0, samples, 0, 2);

  // depth texture / attachment
  var depthStencilTex = gl.createTexture();
  gl.bindTexture(gl.TEXTURE_2D_ARRAY, depthStencilTex);
  gl.texStorage3D(gl.TEXTURE_2D_ARRAY, 1, gl.DEPTH32F_STENCIL8, width, height, 2);
  if (!is_multisampled)
    ext.framebufferTextureMultiviewOVR(gl.DRAW_FRAMEBUFFER, gl.DEPTH_STENCIL_ATTACHMENT, depthStencilTex, 0, 0, 2);
    ext.framebufferTextureMultisampleMultiviewOVR(gl.DRAW_FRAMEBUFFER, gl.DEPTH_STENCIL_ATTACHMENT, depthStencilTex, 0, samples, 0, 2);

Note, the width and height here is dimensions of one eye buffer. It could be calculated as follows:

leftEye = vrDisplay.getEyeParameters("left");
rightEye = vrDisplay.getEyeParameters("right");

let width = Math.max(leftEye.renderWidth, rightEye.renderWidth) * ((is_multiview) ? 1 : 2);
let height = Math.max(leftEye.renderHeight, rightEye.renderHeight);

Render loop

To render into the created multiview framebuffer the user’s code may do the following:

if (vrDisplay.isPresenting) {

  gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, fbo);

  if (is_multiview) {
    let projections = [frameData.leftProjectionMatrix, frameData.rightProjectionMatrix];
    let viewMats = [frameData.leftViewMatrix, frameData.rightViewMatrix];
    let width = Math.max(leftEye.renderWidth, rightEye.renderWidth);
    let height = Math.max(leftEye.renderHeight, rightEye.renderHeight);
    gl.viewport(0, 0, width, height);
    gl.scissor(0, 0, width, height);
    gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

    cubeSea.render(projections, viewMats, stats, /*multiview*/ true);

    // Now we need to copy rendering from the texture2D array into the actual back
    // buffer to present it on the device
    gl.invalidateFramebuffer(gl.DRAW_FRAMEBUFFER, [ gl.DEPTH_STENCIL_ATTACHMENT ]);

    gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, backFbo);

    // This function just copies two layers of the texture2D array as side-by-side
    // stereo into the back buffer.
    stereoUtil.blit(colorTexture, 0, 0, 1, 1, width*2, height);

  } else {
    // no multiview, regular side-by-side stereo rendering....
    gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
    // Note that the viewports use the eyeWidth/height rather than the
    // canvas width and height.
    gl.viewport(0, 0, webglCanvas.width * 0.5, webglCanvas.height);
    cubeSea.render(frameData.leftProjectionMatrix, frameData.leftViewMatrix, stats);

    gl.viewport(webglCanvas.width * 0.5, 0, webglCanvas.width * 0.5, webglCanvas.height);
    cubeSea.render(frameData.rightProjectionMatrix, frameData.rightViewMatrix, stats);

  // If we're currently presenting to the VRDisplay we need to
  // explicitly indicate we're done rendering.
} else {
  //.... regular 2D rendering if not presenting .....

Refer to the Code sample section for fully functional multiview example(s).

Note, the multiview extension may be used withoutWebXR; you just need to provide correct view and projection matrices and setup proper viewports.

Changes in shaders

If you are converting WebGL 1.0 to WebGL 2.0, you should use ES 3.00 shaders: only those support multiview.

The following changes might be necessary for vertex shaders in a multiview-enabled experience:

  • #version 300 es should be added at the top of the shader code;
  • GL_OVR_multiview extension should be requested on the second line: #extension GL_OVR_multiview : require
  • layout(num_views=2) in; must be provided on the following line;
  • in order to convert a WebGL 1.0 shader to ES 3.00, all attribute entries must be changed from in / varying to out:
    • in vec3 position;
    • in vec2 texCoord;
    • out vec2 vTexCoord;
  • Both left and right projection / model matrices must be provided as uniforms:
    • uniform mat4 leftProjectionMat;
    • uniform mat4 leftModelViewMat;
    • uniform mat4 rightProjectionMat;
    • uniform mat4 rightModelViewMat;
  • A built-in view identifier - gl_ViewID_OVR - should be used to determine which matrix set - left or right to use:
    • mat4 m = gl_ViewID_OVR == 0u ? (leftProjectionMat * leftModelViewMat) : (rightProjectionMat * rightModelViewMat);
    • The gl_ViewID_OVR is of unsigned int type.

An example WebGL 1.0 vertex shader...

uniform mat4 projectionMat;
uniform mat4 modelViewMat;
attribute vec3 position;
attribute vec2 texCoord;
varying vec2 vTexCoord;

void main() {
  vTexCoord = texCoord;
  gl_Position = projectionMat * modelViewMat * vec4( position, 1.0 );

...and the equivalent multiview ES 3.00 shader:

#version 300 es
#extension GL_OVR_multiview : require
layout(num_views=2) in;
uniform mat4 leftProjectionMat;
uniform mat4 leftModelViewMat;
uniform mat4 rightProjectionMat;
uniform mat4 rightModelViewMat;
in vec3 position;
in vec2 texCoord;
out vec2 vTexCoord;

void main() {
  vTexCoord = texCoord;
  mat4 m = gl_ViewID_OVR == 0u ? (leftProjectionMat * leftModelViewMat) :
                                 (rightProjectionMat * rightModelViewMat);
  gl_Position = m * vec4( position, 1.0 );

The fragment (pixel) shader should be modified to comply with ES 3.00 spec as well, even though the shader’s logic remains untouched. (Both vertex and fragment shaders must be written using the same specification, otherwise shaders won’t link.)

The main difference is absence of gl_FragColor and necessity to use in and out modifiers. Use explicit out declaration instead of gl_FragColor.

An example WebGL 1.0 fragment shader...

precision mediump float;
uniform sampler2D diffuse;
varying vec2 vTexCoord;

void main() {
  vec4 color = texture2D(diffuse, vTexCoord);
  gl_FragColor = color;

...and the equivalent multiview ES 3.00 shader:

#version 300 es
precision mediump float;
uniform sampler2D diffuse;
in vec2 vTexCoord;
out vec4 color;

void main() {
  color = texture(diffuse, vTexCoord);

Note: After the conversion, please see console output in the browser developer tools: there will be a detailed error message if the converted shaders have issues.

Multi-view WebVR code example

Cubes (WebGL 2.0) - Source code


Source code for blitting texture2D array into the side-by-side framebuffer

window.VRStereoUtil = (function () {
  "use strict";

  var VS = [
    "uniform mat4 projectionMat;",
    "uniform mat4 modelViewMat;",
    "attribute vec3 position;",
    "attribute vec2 texCoord;",
    "varying vec2 vTexCoord;",

    "void main() {",
    "  vTexCoord = texCoord;",
    "  gl_Position = projectionMat * modelViewMat * vec4( position, 1.0 );",

  var VSMultiview = [
      "#version 300 es",
      "uniform vec2 u_offset;",
      "uniform vec2 u_scale;",
      "out mediump vec3 v_texcoord;",

      "void main() {",
      // offset of eye quad in -1..1 space
      "    const float eye_offset_x[12] = float[12] (",
      "        0.0, 0.0, 0.0, 0.0, 0.0, 0.0,",
      "        1.0, 1.0, 1.0, 1.0, 1.0, 1.0",
      "    );",
      //  xy - coords of the quad, normalized to 0..1
      //  xy  - UV of the source texture coordinate.
      //  z   - texture layer (eye) index - 0 or 1.
      "    const vec3 quad_positions[12] = vec3[12]",
      "    (",
      "        vec3(0.0, 0.0, 0.0),",
      "        vec3(1.0, 0.0, 0.0),",
      "        vec3(0.0, 1.0, 0.0),",

      "        vec3(0.0, 1.0, 0.0),",
      "        vec3(1.0, 0.0, 0.0),",
      "        vec3(1.0, 1.0, 0.0),",

      "        vec3(0.0, 0.0, 1.0),",
      "        vec3(1.0, 0.0, 1.0),",
      "        vec3(0.0, 1.0, 1.0),",

      "        vec3(0.0, 1.0, 1.0),",
      "        vec3(1.0, 0.0, 1.0),",
      "        vec3(1.0, 1.0, 1.0)",
      "    );",

      "    const vec2 pos_scale = vec2(0.5, 1.0);",
      "    vec2 eye_offset = vec2(eye_offset_x[gl_VertexID], 0.0);",
      "    gl_Position = vec4(((quad_positions[gl_VertexID].xy * u_scale + u_offset) * pos_scale * 2.0) - 1.0 + eye_offset, 0.0, 1.0);",
      "    v_texcoord = vec3(quad_positions[gl_VertexID].xy * u_scale + u_offset, quad_positions[gl_VertexID].z);",

  var FS = [
    "precision mediump float;",
    "uniform sampler2D diffuse;",
    "varying vec2 vTexCoord;",

    "void main() {",
    "  vec4 color = texture2D(diffuse, vTexCoord);",
    "  color.r = 1.0; color.g *= 0.8; color.b *= 0.7;", // indicate that Multiview is not in use, for testing
    "  gl_FragColor = color;",

  var FSMultiview = [
    "#version 300 es",
    "uniform mediump sampler2DArray u_source_texture;",
    "in mediump vec3 v_texcoord;",
    "out mediump vec4 output_color;",

    "void main()",
    "    output_color = texture(u_source_texture, v_texcoord);",

  var StereoUtil = function (gl) {
    this.gl = gl;

    this.vao = gl.createVertexArray();

    console.log("compiling multiview shader");
    this.program_multiview = new WGLUProgram(gl);
    this.program_multiview.attachShaderSource(VSMultiview, gl.VERTEX_SHADER);
    this.program_multiview.attachShaderSource(FSMultiview, gl.FRAGMENT_SHADER);
      v_texcoord: 0,

  StereoUtil.prototype.blit = function (
    dest_surface_height) {
    let gl = this.gl;
    let program = this.program;

    gl.bindTexture(gl.TEXTURE_2D_ARRAY, source_texture);

    program = this.program_multiview;

    // Render to the destination texture, sampling from the scratch texture
    gl.colorMask(true, true, true, true);

    gl.viewport(0, 0, dest_surface_width, dest_surface_height);

    gl.uniform2f(program.uniform.u_scale, source_rect_uv_width, source_rect_uv_height);
    gl.uniform2f(program.uniform.u_offset, source_rect_uv_x, source_rect_uv_y);
    gl.uniform1i(program.uniform.u_source_texture, 0);

    // Start setting up VAO
    gl.drawArrays(gl.TRIANGLES, 0, 12);


  return StereoUtil;