In a recent project of mine, I procedurally create fragment shaders that look like this (but possibly larger), which I display using WebGL:
precision mediump float;
uniform vec2 u_windowSize;
void main() {
float s = 2.0 / min(u_windowSize.x, u_windowSize.y);
vec2 pos0 = s * (gl_FragCoord.xy - 0.5 * u_windowSize);
if (length(pos0) > 1.0) { gl_FragColor = vec4(0,0,0,0); return; }
vec2 pos1 = pos0/0.8;
vec2 pos2 = ((1.0-length(pos1))/length(pos1)) * pos1;
vec3 col2 = vec3(1.0,1.0,1.0);
vec2 pos3 = pos2;
vec3 col3 = vec3(1.0,0.0,0.0);
vec2 tmp2 = 6.0*(1.0/sqrt(2.0)) * mat2(1.0,1.0,-1.0,1.0) * pos2;
vec3 col4;
if (mod(tmp2.x, 2.0) < 1.0 != mod(tmp2.y, 2.0) < 1.0) {
col4 = col2;
} else {
col4 = col3;
};
vec2 pos5 = pos0;
vec3 col5 = vec3(0.0,1.0,1.0);
vec3 col6;
if (length(pos0) < 0.8) {
col6 = col4;
} else {
col6 = col5;
};
gl_FragColor = vec4(col6, 1.0);
}
Obviously there is some redundancy here that you would not write by hand – copying pos2 into pos3 is pointless, for example. But since I generate this code, this is convenient.
Before I now prematurely start optimizing and make my generator produce hopefully more efficient code, I’d like to know:
Do browsers and/or graphics drivers already optimize such things (so I don't have to)?