WebGL2将Uint16Array渲染为画布作为图像 [英] WebGL2 render Uint16Array to canvas as an image

查看:93
本文介绍了WebGL2将Uint16Array渲染为画布作为图像的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我正在尝试使用webgl2纹理在浏览器中向图像渲染 Uint16Array .我有一个 Uint8Array 的工作示例提琴,由于webgl的学习曲线很陡,我正在努力升级到16位.

工作8位小提琴(与下面的代码段相同): Uint8Array http://jsfiddle.net/njxvftc9/2/非工作16位尝试: Uint16Array http://jsfiddle.net/njxvftc9/3/

 //图片数据var w = 128;var h = 128;var size = w * h * 4;var img = new Uint8Array(size);//需要Uint16Arrayfor(var i = 0; i< img.length; i + = 4){img [i + 0] = 255;//rimg [i + 1] = i/64;//Gimg [i + 2] = 0;//bimg [i + 3] = 255;//一种}//程序var canvas = document.getElementById('cv');var gl = canvas.getContext('webgl2');var program = gl.createProgram();//var color_buffer_float_16ui = gl.getExtension('EXT_color_buffer_float');//加为16位//质地var tex = gl.createTexture();//创建空纹理gl.bindTexture(gl.TEXTURE_2D,tex);gl.texParameteri(gl.TEXTURE_2D,gl.TEXTURE_MIN_FILTER,gl.LINEAR);gl.texParameteri(gl.TEXTURE_2D,gl.TEXTURE_MAG_FILTER,gl.LINEAR);gl.texImage2D(gl.TEXTURE_2D,//目标0,//mip级别gl.RGBA,//内部格式->gl.RGBA16UIw,h,//宽度和高度0,//边框gl.RGBA,//format->gm.RGBA_INTEGERgl.UNSIGNED_BYTE,//输入->gl.UNSIGNED_SHORTimg//纹理数据);//缓冲var buffer = gl.createBuffer();var bufferData = new Float32Array([-1,-1,1,-1,1、11、1-1、1-1,-1]);gl.bindBuffer(gl.ARRAY_BUFFER,缓冲区);gl.bufferData(gl.ARRAY_BUFFER,bufferData,gl.STATIC_DRAW);//着色器program.vs = gl.createShader(gl.VERTEX_SHADER);gl.shaderSource(program.vs,"attribute vec4 vertex; \ n" +//传入像素输入?"varying vec2 pixelCoordinate; \ n" +//用于将位置传递给片段着色器的变量"void main(){\ n" +"gl_Position =顶点; \ n" +//将像素输出位置设置为进入位置(通过)"pixelCoordinate = vertex.xy * 0.5 + 0.5; \ n" +//设置片段着色器的坐标} \ n");program.fs = gl.createShader(gl.FRAGMENT_SHADER);gl.shaderSource(program.fs,"precision highp float; \ n" +//吗?统一sampler2D tex; \ n" +//吗?"varying vec2 pixelCoordinate; \ n" +//从顶点着色器接收像素位置"void main(){\ n" +"gl_FragColor = texture2D(tex,pixelCoordinate); \ n" +//在坐标位置的纹理图像中查找颜色并将颜色设置为} \ n");gl.compileShader(program.vs);gl.compileShader(program.fs);gl.attachShader(program,program.vs);gl.attachShader(program,program.fs);gl.deleteShader(program.vs);gl.deleteShader(program.fs);//程序gl.bindAttribLocation(program,0,"vertex");gl.linkProgram(程序);gl.useProgram(program);gl.enableVertexAttribArray(0);gl.vertexAttribPointer(0,2,gl.FLOAT,false,0,0);gl.clear(gl.COLOR_BUFFER_BIT);gl.drawArrays(gl.TRIANGLES,0,6);//执行程序 

 <画布id ="cv" width ="100" height ="100"></canvas> 

我尝试了许多其他技术,参考了规范,尝试在着色器中转换为浮点,并且尝试组合此处看到的方法:解决方案

如果要将16位数据放入纹理中,则需要选择16位纹理内部格式.查看内部格式列表您选择的RGBA是

  gl.RGBA16I//16位整数gl.RGBA16UI//16位无符号整数gl.RGBA16F//16位半浮点数 

您似乎选择了 gl.RGBA16UI

要在着色器中访问此行的纹理

  uniform sampler2D tex; 

必须对此进行更改

 统一的usampler2D tex; 

该语法仅在GLSL 3.00 es中可用,因此您需要在两个着色器的顶部添加 #version 300 es .

切换到 #version 300 es 也意味着其他语法更改.在顶点着色器中,属性变为 in ,在变量着色器中, varying 变为 out ,而在顶点着色器中变为 in 片段着色器. gl_FragColor 消失了,您必须声明自己的输出,例如 out vec4 fooColor; .同样, texture2D 变为 texture

您不能对整数纹理使用过滤,因此这两行

  gl.texParameteri(gl.TEXTURE_2D,gl.TEXTURE_MIN_FILTER,gl.LINEAR);gl.texParameteri(gl.TEXTURE_2D,gl.TEXTURE_MAG_FILTER,gl.LINEAR); 

需要更改为

  gl.texParameteri(gl.TEXTURE_2D,gl.TEXTURE_MIN_FILTER,gl.NEAREST);gl.texParameteri(gl.TEXTURE_2D,gl.TEXTURE_MAG_FILTER,gl.NEAREST); 

当您从纹理中获取数据时,它将是无符号整数,因此此行

  gl_FragColor = texture2D(tex,pixelCoordinate); 

最终会变成类似的东西

  out vec4 fooColor;//将此名称命名为fooColor以使其清晰可见void main(){uvec4 unsignedIntValues = texture(tex,pixelCoordinate);vec4 floatValues0To65535 = vec4(unsignedIntValues);vec4 colorValues0To1 = floatValues0To65535/65535.0;fooColor = colorValues0To1;} 

您当然可以将数学缩短为一行

  fooColor = vec4(texture(tex,pixelCoordinate))/65535.0; 

示例:

 //图片数据var w = 128;var h = 128;var size = w * h * 4;var img = new Uint16Array(size);//需要Uint16Arrayfor(var i = 0; i< img.length; i + = 4){img [i + 0] = 65535;//rimg [i + 1] = i/64 * 256;//Gimg [i + 2] = 0;//bimg [i + 3] = 65535;//一种}//程序var canvas = document.getElementById('cv');var gl = canvas.getContext('webgl2');var program = gl.createProgram();//var color_buffer_float_16ui = gl.getExtension('EXT_color_buffer_float');//加为16位//质地var tex = gl.createTexture();//创建空纹理gl.bindTexture(gl.TEXTURE_2D,tex);gl.texParameteri(gl.TEXTURE_2D,gl.TEXTURE_MIN_FILTER,gl.NEAREST);gl.texParameteri(gl.TEXTURE_2D,gl.TEXTURE_MAG_FILTER,gl.NEAREST);gl.texImage2D(gl.TEXTURE_2D,//目标0,//mip级别gl.RGBA16UI,//内部格式->gl.RGBA16UIw,h,//宽度和高度0,//边框gl.RGBA_INTEGER,//格式->gm.RGBA_INTEGERgl.UNSIGNED_SHORT,//输入->gl.UNSIGNED_SHORTimg//纹理数据);//缓冲var buffer = gl.createBuffer();var bufferData = new Float32Array([-1,-1,1,-1,1、11、1-1、1-1,-1]);gl.bindBuffer(gl.ARRAY_BUFFER,缓冲区);gl.bufferData(gl.ARRAY_BUFFER,bufferData,gl.STATIC_DRAW);//着色器program.vs = gl.createShader(gl.VERTEX_SHADER);gl.shaderSource(program.vs,`#version 300 es在vec4顶点中;//传入的像素输入?出vec2 pixelCoordinate;//用于将位置传递给片段着色器的变量void main(){gl_Position =顶点;//将像素输出位置设置为进入位置(通过)pixelCoordinate = vertex.xy * 0.5 + 0.5;//设置片段着色器的坐标}`);program.fs = gl.createShader(gl.FRAGMENT_SHADER);gl.shaderSource(program.fs,`#version 300 es精密高压浮子;//吗?均匀的高采样率2D tex;//吗?在vec2 pixelCoordinate中;//从顶点着色器接收像素位置出vec4 fooColor;void main(){uvec4 unsignedIntValues = texture(tex,pixelCoordinate);vec4 floatValues0To65535 = vec4(unsignedIntValues);vec4 colorValues0To1 = floatValues0To65535/65535.0;fooColor = colorValues0To1;}`);gl.compileShader(program.vs);checkCompileError(program.vs);gl.compileShader(program.fs);checkCompileError(program.fs);函数checkCompileError(s){如果(!gl.getShaderParameter(s,gl.COMPILE_STATUS)){console.error(gl.getShaderInfoLog(s));}}gl.attachShader(program,program.vs);gl.attachShader(program,program.fs);gl.deleteShader(program.vs);gl.deleteShader(program.fs);//程序gl.bindAttribLocation(program,0,"vertex");gl.linkProgram(程序);gl.useProgram(program);gl.enableVertexAttribArray(0);gl.vertexAttribPointer(0,2,gl.FLOAT,false,0,0);gl.clear(gl.COLOR_BUFFER_BIT);gl.drawArrays(gl.TRIANGLES,0,6);//执行程序 

 <画布id ="cv" width ="100" height ="100"></canvas> 

I am attempting to render a Uint16Array to an image in the browser using webgl2 textures. I have a working example fiddle of a Uint8Array and am struggling with the upgrade to 16bit as webgl has a steep learning curve.

Working 8-bit fiddle (identical to snippet below): Uint8Array http://jsfiddle.net/njxvftc9/2/ Non-working 16-bit attempt: Uint16Array http://jsfiddle.net/njxvftc9/3/

// image data
var w = 128;
var h = 128;
var size = w * h * 4;
var img = new Uint8Array(size); // need Uint16Array
for (var i = 0; i < img.length; i += 4) {
    img[i + 0] = 255; // r
    img[i + 1] = i/64; // g
    img[i + 2] = 0; // b
    img[i + 3] = 255; // a
}

// program
var canvas = document.getElementById('cv');
var gl = canvas.getContext('webgl2');

var program = gl.createProgram();
//var color_buffer_float_16ui = gl.getExtension('EXT_color_buffer_float'); // add for 16-bit

// texture
var tex = gl.createTexture(); // create empty texture
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texImage2D(
    gl.TEXTURE_2D, // target
    0, // mip level
    gl.RGBA, // internal format -> gl.RGBA16UI
    w, h, // width and height
    0, // border
    gl.RGBA, //format -> gm.RGBA_INTEGER
    gl.UNSIGNED_BYTE, // type -> gl.UNSIGNED_SHORT
    img // texture data
);

// buffer
var buffer = gl.createBuffer();
var bufferData =  new Float32Array([
    -1, -1,
    1, -1,
    1, 1,               
    1, 1,
    -1, 1,
    -1, -1
]);
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, bufferData, gl.STATIC_DRAW);

// shaders
program.vs = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(program.vs,
                "attribute vec4 vertex;\n" + // incoming pixel input?
                "varying vec2 pixelCoordinate;\n" + // variable used to pass position to fragment shader
                "void main(){\n" +
                " gl_Position = vertex;\n" + // set pixel output position to incoming position (pass through)
                " pixelCoordinate = vertex.xy*0.5+0.5;\n" + // set coordinate for fragment shader
                "}\n");

program.fs = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(program.fs,
                "precision highp float;\n" + // ?
                "uniform sampler2D tex;\n" + // ?
                "varying vec2 pixelCoordinate;\n" + // receive pixel position from vertex shader
                "void main(){\n" +
                " gl_FragColor = texture2D(tex, pixelCoordinate);\n" + // lookup color in texture image at coordinate position and set color to
                "}\n");

gl.compileShader(program.vs);
gl.compileShader(program.fs);

gl.attachShader(program,program.vs);
gl.attachShader(program,program.fs);

gl.deleteShader(program.vs);
gl.deleteShader(program.fs);

// program
gl.bindAttribLocation(program, 0, "vertex");
gl.linkProgram(program);
gl.useProgram(program);
gl.enableVertexAttribArray(0);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawArrays(gl.TRIANGLES, 0, 6); // execute program

<canvas id="cv" width="100" height="100"></canvas>

I have tried made many other techniques, referenced the spec, tried converting to floating point in the shaders, and have tried to combine methods seen here: Render to 16bits unsigned integer 2D texture in WebGL2 with no success.

I prefer vanilla js but am open to using libraries such as twgl or three.js as long as the input is a Uint16Array, the shaders can output any format (float, etc).

Can anyone with more experience in webgl2 point me in the right direction or provide a working sample fiddle here? Is there a simple mistake in my code, am I missing a larger concept, is this even possible? Any help is greatly appreciated!

解决方案

If you want to put 16bit data into a texture you need to choose a 16bit texture internal format. Looking at the list of internal formats your choices for RGBA are

gl.RGBA16I    // 16bit integers
gl.RGBA16UI   // 16bit unsigned integers
gl.RGBA16F    // 16bit half floats

It looks like you choose gl.RGBA16UI

To access that texture in the shader this line

uniform sampler2D tex;

has to change this this

uniform usampler2D tex;

That syntax is only available with GLSL 3.00 es so you need to add #version 300 es to the top of both shaders.

Switching to #version 300 es also means other syntax changes. attribute becomes in, varying becomes out in a vertex shader and in in a fragment shader. gl_FragColor disappears and you have to declare your own output for example out vec4 fooColor;. Also texture2D becomes just texture

You can not using filtering on integer textures so these 2 lines

gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);

need to change to

gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);

And when you get data out of the texture it will be unsigned integers so this line

gl_FragColor = texture2D(tex, pixelCoordinate);

would end up changing to something like

out vec4 fooColor;  // named this fooColor to make it clear you pick the name

void main() {
  uvec4 unsignedIntValues = texture(tex, pixelCoordinate);
  vec4 floatValues0To65535 = vec4(unsignedIntValues);  
  vec4 colorValues0To1 = floatValues0To65535 / 65535.0;
  fooColor = colorValues0To1;
}

Of course you can shorten that math to a single line

  fooColor = vec4(texture(tex, pixelCoordinate)) / 65535.0;

Example:

// image data
var w = 128;
var h = 128;
var size = w * h * 4;
var img = new Uint16Array(size); // need Uint16Array
for (var i = 0; i < img.length; i += 4) {
    img[i + 0] = 65535; // r
    img[i + 1] = i/64 * 256; // g
    img[i + 2] = 0; // b
    img[i + 3] = 65535; // a
}

// program
var canvas = document.getElementById('cv');
var gl = canvas.getContext('webgl2');

var program = gl.createProgram();
//var color_buffer_float_16ui = gl.getExtension('EXT_color_buffer_float'); // add for 16-bit

// texture
var tex = gl.createTexture(); // create empty texture
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texImage2D(
    gl.TEXTURE_2D, // target
    0, // mip level
    gl.RGBA16UI, // internal format -> gl.RGBA16UI
    w, h, // width and height
    0, // border
    gl.RGBA_INTEGER, //format -> gm.RGBA_INTEGER
    gl.UNSIGNED_SHORT, // type -> gl.UNSIGNED_SHORT
    img // texture data
);

// buffer
var buffer = gl.createBuffer();
var bufferData =  new Float32Array([
    -1, -1,
    1, -1,
    1, 1,               
    1, 1,
    -1, 1,
    -1, -1
]);
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, bufferData, gl.STATIC_DRAW);

// shaders
program.vs = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(program.vs, `#version 300 es
  in vec4 vertex; // incoming pixel input?
  out vec2 pixelCoordinate; // variable used to pass position to fragment shader
  void main(){
     gl_Position = vertex;  // set pixel output position to incoming position (pass through)
     pixelCoordinate = vertex.xy*0.5+0.5; // set coordinate for fragment shader
  }
`);

program.fs = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(program.fs, `#version 300 es
  precision highp float; // ?
  uniform highp usampler2D tex; // ?
  in vec2 pixelCoordinate; // receive pixel position from vertex shader
  out vec4 fooColor;
  void main() {
     uvec4 unsignedIntValues = texture(tex, pixelCoordinate);
     vec4 floatValues0To65535 = vec4(unsignedIntValues);
     vec4 colorValues0To1 = floatValues0To65535 / 65535.0;
     fooColor = colorValues0To1;
  }
`);

gl.compileShader(program.vs);
checkCompileError(program.vs);
gl.compileShader(program.fs);
checkCompileError(program.fs);

function checkCompileError(s) {
  if (!gl.getShaderParameter(s, gl.COMPILE_STATUS)) {
   console.error(gl.getShaderInfoLog(s));
  }
}

gl.attachShader(program,program.vs);
gl.attachShader(program,program.fs);

gl.deleteShader(program.vs);
gl.deleteShader(program.fs);

// program
gl.bindAttribLocation(program, 0, "vertex");
gl.linkProgram(program);
gl.useProgram(program);
gl.enableVertexAttribArray(0);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawArrays(gl.TRIANGLES, 0, 6); // execute program

<canvas id="cv" width="100" height="100"></canvas>

这篇关于WebGL2将Uint16Array渲染为画布作为图像的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆