vadym@vadym-HP-ProBook-640-G1:~/mesa/mesa-shader-db$ INTEL_DEBUG=vs ~/mesa/apitrace/build/apitrace replay ~/Downloads/citra-qt.trace GLSL IR for native vertex shader 3: ( (declare (location=0 shader_out ) vec4 gl_Position) (declare (location=31 component=0 shader_out ) vec2 frag_tex_coord) (declare (location=16 shader_in ) vec2 vert_position) (declare (location=17 shader_in ) vec2 vert_tex_coord) (declare (location=0 uniform ) mat3x2 modelview_matrix) ( function main (signature void (parameters ) ( (declare (temporary ) mat2 mat_ctor) (assign (xy) (array_ref (var_ref mat_ctor) (constant uint (0)) ) (array_ref (var_ref modelview_matrix) (constant uint (0)) ) ) (assign (xy) (array_ref (var_ref mat_ctor) (constant uint (1)) ) (array_ref (var_ref modelview_matrix) (constant uint (1)) ) ) (declare (temporary ) vec4 vec_ctor) (assign (zw) (var_ref vec_ctor) (constant vec2 (0.000000 1.000000)) ) (declare (temporary ) vec2 flattening_tmp) (assign (xy) (var_ref flattening_tmp) (expression vec2 + (expression vec2 * (array_ref (var_ref mat_ctor) (constant int (0)) ) (swiz x (var_ref vert_position) )) (expression vec2 * (array_ref (var_ref mat_ctor) (constant int (1)) ) (swiz y (var_ref vert_position) )) ) ) (assign (xy) (var_ref vec_ctor) (expression vec2 + (var_ref flattening_tmp) (array_ref (var_ref modelview_matrix) (constant int (2)) ) ) ) (assign (xyzw) (var_ref gl_Position) (var_ref vec_ctor) ) (assign (xy) (var_ref frag_tex_coord) (var_ref vert_tex_coord) ) )) ) ) 1617: message: shader compiler issue 1: FS SIMD8 shader: 4 inst, 0 loops, 222 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 64 to 48 bytes. 1617: message: shader compiler issue 2: FS SIMD16 shader: 4 inst, 0 loops, 230 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 64 to 48 bytes. NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL3 inputs: 0 outputs: 0 uniforms: 48 shared: 0 decl_var uniform INTERP_MODE_NONE mat3x2 modelview_matrix (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_position (VERT_ATTRIB_GENERIC0.xy, 16, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_tex_coord (VERT_ATTRIB_GENERIC1.xy, 17, 0) decl_var shader_out INTERP_MODE_NONE vec4 gl_Position (VARYING_SLOT_POS, 0, 0) decl_var shader_out INTERP_MODE_NONE vec2 frag_tex_coord (VARYING_SLOT_VAR0.xy, 31, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec2 32 ssa_0 = load_const (0x00000000 /* 0.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_2 = intrinsic load_uniform (ssa_1) () (0, 48) /* base=0 */ /* range=48 */ /* modelview_matrix */ vec1 32 ssa_3 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_4 = intrinsic load_uniform (ssa_3) () (0, 48) /* base=0 */ /* range=48 */ /* modelview_matrix */ vec2 32 ssa_5 = intrinsic load_input (ssa_1) () (0, 0) /* base=0 */ /* component=0 */ vec2 32 ssa_6 = fmul ssa_4, ssa_5.yy vec2 32 ssa_7 = ffma ssa_2, ssa_5.xx, ssa_6 vec1 32 ssa_8 = load_const (0x00000020 /* 0.000000 */) vec2 32 ssa_9 = intrinsic load_uniform (ssa_8) () (0, 48) /* base=0 */ /* range=48 */ /* modelview_matrix */ vec2 32 ssa_10 = fadd ssa_7, ssa_9 vec4 32 ssa_11 = vec4 ssa_10.x, ssa_10.y, ssa_0.x, ssa_0.y vec2 32 ssa_12 = intrinsic load_input (ssa_1) () (1, 0) /* base=1 */ /* component=0 */ intrinsic store_output (ssa_11, ssa_1) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* gl_Position */ intrinsic store_output (ssa_12, ssa_1) () (31, 3, 0) /* base=31 */ /* wrmask=xy */ /* component=0 */ /* frag_tex_coord */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL3 inputs: 0 outputs: 0 uniforms: 48 shared: 0 decl_var uniform INTERP_MODE_NONE mat3x2 modelview_matrix (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_position (VERT_ATTRIB_GENERIC0.xy, 16, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_tex_coord (VERT_ATTRIB_GENERIC1.xy, 17, 0) decl_var shader_out INTERP_MODE_NONE vec4 gl_Position (VARYING_SLOT_POS, 0, 0) decl_var shader_out INTERP_MODE_NONE vec2 frag_tex_coord (VARYING_SLOT_VAR0.xy, 31, 0) decl_function main returning void impl main { decl_reg vec4 32 r0 block block_0: /* preds: */ vec2 32 ssa_0 = load_const (0x00000000 /* 0.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_2 = intrinsic load_uniform (ssa_1) () (0, 48) /* base=0 */ /* range=48 */ /* modelview_matrix */ vec1 32 ssa_3 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_4 = intrinsic load_uniform (ssa_3) () (0, 48) /* base=0 */ /* range=48 */ /* modelview_matrix */ vec2 32 ssa_5 = intrinsic load_input (ssa_1) () (0, 0) /* base=0 */ /* component=0 */ vec2 32 ssa_6 = fmul ssa_4, ssa_5.yy vec2 32 ssa_7 = ffma ssa_2, ssa_5.xx, ssa_6 vec1 32 ssa_8 = load_const (0x00000020 /* 0.000000 */) vec2 32 ssa_9 = intrinsic load_uniform (ssa_8) () (0, 48) /* base=0 */ /* range=48 */ /* modelview_matrix */ vec2 32 ssa_10 = fadd ssa_7, ssa_9 r0.xy = imov ssa_10 r0.zw = imov ssa_0.xy vec2 32 ssa_12 = intrinsic load_input (ssa_1) () (1, 0) /* base=1 */ /* component=0 */ intrinsic store_output (r0, ssa_1) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* gl_Position */ intrinsic store_output (ssa_12, ssa_1) () (31, 3, 0) /* base=31 */ /* wrmask=xy */ /* component=0 */ /* frag_tex_coord */ /* succs: block_0 */ block block_0: } VS Output VUE map (3 slots, non-SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_VAR0 Native code for unnamed vertex shader GLSL3: VS vec4 shader: 10 instructions. 0 loops. 56 cycles. 0:0 spills:fills. Compacted 160 to 160 bytes (0%) START B0 (56 cycles) mov(8) g115<1>.zwF [0F, 0F, 0F, 1F]VF { align16 NoDDClr 1Q }; mov(8) g7<1>.xyUD g1<0>.xyyyUD { align16 1Q }; mul(8) g11<1>.xyF g1<0>.zwwwF g2<4>.yF { align16 1Q }; mov(8) g114<1>UD 0x00000000UD { align16 1Q compacted }; mov(8) g116<1>.xyF g3<4>.xyyyF { align16 1Q }; mad(8) g12<1>.xyF g11<4,4,1>.xyyyF g2<4,4,1>.xF g7<4,4,1>.xyyyF { align16 1Q }; add(8) g115<1>.xyF g12<4>.xyyyF g1.4<0>.xyyyF { align16 NoDDChk 1Q }; mov(8) g113<1>UD g0<4>UD { align16 WE_all 1Q }; or(1) g113.5<1>UD g0.5<0,1,0>UD 0x0000ff00UD { align1 WE_all 1N }; send(8) null<1>F g113<4>F urb 0 write HWord interleave complete mlen 5 rlen 0 { align16 1Q EOT }; END B0 1617: message: shader compiler issue 3: VS vec4 shader: 10 inst, 0 loops, 56 cycles, 0:0 spills:fills, compacted 160 to 160 bytes. GLSL IR for native vertex shader 6: ( (declare (location=0 shader_out ) vec4 gl_Position) (declare (location=3 uniform ) (array vec2 4) constarray_0_0) (declare (location=13 sys ) int gl_BaseVertex) (declare (location=12 sys ) int gl_VertexIDMESA) (declare (temporary ) int __VertexID) ( function main (signature void (parameters ) ( (assign (x) (var_ref __VertexID) (expression int + (var_ref gl_VertexIDMESA) (var_ref gl_BaseVertex) ) ) (declare (temporary ) vec4 vec_ctor) (assign (zw) (var_ref vec_ctor) (constant vec2 (0.000000 1.000000)) ) (assign (xy) (var_ref vec_ctor) (array_ref (var_ref constarray_0_0) (var_ref __VertexID) ) ) (assign (xyzw) (var_ref gl_Position) (var_ref vec_ctor) ) )) ) ) 1675: message: shader compiler issue 4: FS SIMD8 shader: 27 inst, 0 loops, 410 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 432 to 320 bytes. 1675: message: shader compiler issue 5: FS SIMD16 shader: 28 inst, 0 loops, 448 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 448 to 336 bytes. NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL6 inputs: 0 outputs: 0 uniforms: 64 shared: 0 decl_var uniform INTERP_MODE_NONE vec2[4] constarray_0_0 (3, 0, 0) = { { -1.000000, -1.000000 }, { 1.000000, -1.000000 }, { -1.000000, 1.000000 }, { 1.000000, 1.000000 } } decl_var shader_out INTERP_MODE_NONE vec4 gl_Position (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec2 32 ssa_0 = load_const (0x00000000 /* 0.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2 = intrinsic load_input (ssa_1) () (0, 2) /* base=0 */ /* component=2 */ vec1 32 ssa_3 = intrinsic load_input (ssa_1) () (0, 0) /* base=0 */ /* component=0 */ vec1 32 ssa_4 = iadd ssa_2, ssa_3 vec1 32 ssa_5 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_6 = ishl ssa_4, ssa_5 vec2 32 ssa_7 = intrinsic load_uniform (ssa_6) () (0, 64) /* base=0 */ /* range=64 */ /* constarray_0_0 */ vec4 32 ssa_8 = vec4 ssa_7.x, ssa_7.y, ssa_0.x, ssa_0.y intrinsic store_output (ssa_8, ssa_1) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* gl_Position */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL6 inputs: 0 outputs: 0 uniforms: 64 shared: 0 decl_var uniform INTERP_MODE_NONE vec2[4] constarray_0_0 (3, 0, 0) = { { -1.000000, -1.000000 }, { 1.000000, -1.000000 }, { -1.000000, 1.000000 }, { 1.000000, 1.000000 } } decl_var shader_out INTERP_MODE_NONE vec4 gl_Position (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { decl_reg vec4 32 r0 block block_0: /* preds: */ vec2 32 ssa_0 = load_const (0x00000000 /* 0.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2 = intrinsic load_input (ssa_1) () (0, 2) /* base=0 */ /* component=2 */ vec1 32 ssa_3 = intrinsic load_input (ssa_1) () (0, 0) /* base=0 */ /* component=0 */ vec1 32 ssa_4 = iadd ssa_2, ssa_3 vec1 32 ssa_5 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_6 = ishl ssa_4, ssa_5 vec2 32 ssa_7 = intrinsic load_uniform (ssa_6) () (0, 64) /* base=0 */ /* range=64 */ /* constarray_0_0 */ r0.xy = imov ssa_7 r0.zw = imov ssa_0.xy intrinsic store_output (r0, ssa_1) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* gl_Position */ /* succs: block_0 */ block block_0: } VS Output VUE map (2 slots, non-SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS Native code for unnamed vertex shader GLSL6: VS vec4 shader: 9 instructions. 0 loops. 68 cycles. 0:0 spills:fills. Compacted 144 to 144 bytes (0%) START B0 (68 cycles) mov(8) g115<1>.zwF [0F, 0F, 0F, 1F]VF { align16 1Q }; add(8) g7<1>.xD g1<4>.zD g1<4>.xD { align16 1Q }; mov(8) g114<1>UD 0x00000000UD { align16 1Q compacted }; shl(8) g12<1>.xD g7<4>.xD 0x00000004UD { align16 1Q }; send(8) g10<1>UD g12<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g115<1>.xyD g10<4>.xyyyD { align16 1Q }; mov(8) g113<1>UD g0<4>UD { align16 WE_all 1Q }; or(1) g113.5<1>UD g0.5<0,1,0>UD 0x0000ff00UD { align1 WE_all 1N }; send(8) null<1>F g113<4>F urb 0 write HWord interleave complete mlen 3 rlen 0 { align16 1Q EOT }; END B0 1675: message: shader compiler issue 6: VS vec4 shader: 9 inst, 0 loops, 68 cycles, 0:0 spills:fills, compacted 144 to 144 bytes. GLSL IR for native vertex shader 8: ( (declare (location=16 shader_in ) vec4 vert_position) (declare (location=17 shader_in ) vec4 vert_color) (declare (location=18 shader_in ) vec2 vert_texcoord0) (declare (location=19 shader_in ) vec2 vert_texcoord1) (declare (location=20 shader_in ) vec2 vert_texcoord2) (declare (location=21 shader_in ) float vert_texcoord0_w) (declare (location=22 shader_in ) vec4 vert_normquat) (declare (location=23 shader_in ) vec3 vert_view) (declare (location=0 shader_out ) vec4 gl_Position) (declare (location=17 shader_out ) (array float 2) gl_ClipDistance) (declare (location=32 shader_out ) vec4 primary_color) (declare (location=33 shader_out ) vec2 texcoord0) (declare (location=34 shader_out ) vec2 texcoord1) (declare (location=35 shader_out ) vec2 texcoord2) (declare (location=36 shader_out ) float texcoord0_w) (declare (location=37 shader_out ) vec4 normquat) (declare (location=38 shader_out ) vec3 view) (declare (location=79 uniform ) vec4 clip_coef) (declare (location=78 uniform ) vec4 tev_combiner_buffer_color) (declare (location=77 uniform ) (array vec4 6) const_color) (declare (location=13 uniform ) (array LightSrc@0x11f95c0 8) light_src) (declare (location=12 uniform ) vec3 lighting_global_ambient) (declare (location=11 uniform ) vec2 proctex_noise_p) (declare (location=10 uniform ) vec2 proctex_noise_a) (declare (location=9 uniform ) vec2 proctex_noise_f) (declare (location=8 uniform ) vec3 fog_color) (declare (location=7 uniform ) int scissor_y2) (declare (location=6 uniform ) int scissor_x2) (declare (location=5 uniform ) int scissor_y1) (declare (location=4 uniform ) int scissor_x1) (declare (location=3 uniform ) float depth_offset) (declare (location=2 uniform ) float depth_scale) (declare (location=1 uniform ) int alphatest_ref) (declare (location=0 uniform ) int framebuffer_scale) ( function main (signature void (parameters ) ( (assign (xyzw) (var_ref primary_color) (var_ref vert_color) ) (assign (xy) (var_ref texcoord0) (var_ref vert_texcoord0) ) (assign (xy) (var_ref texcoord1) (var_ref vert_texcoord1) ) (assign (xy) (var_ref texcoord2) (var_ref vert_texcoord2) ) (assign (x) (var_ref texcoord0_w) (var_ref vert_texcoord0_w) ) (assign (xyzw) (var_ref normquat) (var_ref vert_normquat) ) (assign (xyz) (var_ref view) (var_ref vert_view) ) (assign (xyzw) (var_ref gl_Position) (var_ref vert_position) ) (assign (x) (array_ref (var_ref gl_ClipDistance) (constant int (0)) ) (expression float neg (swiz z (var_ref vert_position) )) ) (declare (temporary ) vec4 ubo_load_temp) (declare (temporary ) uint ubo_load_temp_offset) (assign (x) (var_ref ubo_load_temp_offset) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset) (constant uint (1104)) ) ) ) (assign (x) (array_ref (var_ref gl_ClipDistance) (constant int (1)) ) (expression float dot (var_ref ubo_load_temp) (var_ref vert_position) ) ) )) ) ) NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL8 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE vec4 clip_coef (79, 0, 0) decl_var uniform INTERP_MODE_NONE vec4 tev_combiner_buffer_color (78, 0, 0) decl_var uniform INTERP_MODE_NONE vec4[6] const_color (77, 0, 0) decl_var uniform INTERP_MODE_NONE LightSrc[8] light_src (13, 0, 0) decl_var uniform INTERP_MODE_NONE vec3 lighting_global_ambient (12, 0, 0) decl_var uniform INTERP_MODE_NONE vec2 proctex_noise_p (11, 0, 0) decl_var uniform INTERP_MODE_NONE vec2 proctex_noise_a (10, 0, 0) decl_var uniform INTERP_MODE_NONE vec2 proctex_noise_f (9, 0, 0) decl_var uniform INTERP_MODE_NONE vec3 fog_color (8, 0, 0) decl_var uniform INTERP_MODE_NONE int scissor_y2 (7, 0, 0) decl_var uniform INTERP_MODE_NONE int scissor_x2 (6, 0, 0) decl_var uniform INTERP_MODE_NONE int scissor_y1 (5, 0, 0) decl_var uniform INTERP_MODE_NONE int scissor_x1 (4, 0, 0) decl_var uniform INTERP_MODE_NONE float depth_offset (3, 0, 0) decl_var uniform INTERP_MODE_NONE float depth_scale (2, 0, 0) decl_var uniform INTERP_MODE_NONE int alphatest_ref (1, 0, 0) decl_var uniform INTERP_MODE_NONE int framebuffer_scale (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 vert_position (VERT_ATTRIB_GENERIC0, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 vert_color (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_texcoord0 (VERT_ATTRIB_GENERIC2.xy, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_texcoord1 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_texcoord2 (VERT_ATTRIB_GENERIC4.xy, 20, 0) decl_var shader_in INTERP_MODE_NONE float vert_texcoord0_w (VERT_ATTRIB_GENERIC5.x, 21, 0) decl_var shader_in INTERP_MODE_NONE vec4 vert_normquat (VERT_ATTRIB_GENERIC6, 22, 0) decl_var shader_in INTERP_MODE_NONE vec3 vert_view (VERT_ATTRIB_GENERIC7.xyz, 23, 0) decl_var shader_out INTERP_MODE_NONE vec4 gl_Position (VARYING_SLOT_POS, 0, 0) decl_var shader_out INTERP_MODE_NONE float[2] gl_ClipDistance (VARYING_SLOT_CLIP_DIST0.x, 17, 0) compact decl_var shader_out INTERP_MODE_NONE vec4 primary_color (VARYING_SLOT_VAR1, 32, 0) decl_var shader_out INTERP_MODE_NONE vec2 texcoord0 (VARYING_SLOT_VAR2.xy, 33, 0) decl_var shader_out INTERP_MODE_NONE vec2 texcoord1 (VARYING_SLOT_VAR3.xy, 34, 0) decl_var shader_out INTERP_MODE_NONE vec2 texcoord2 (VARYING_SLOT_VAR4.xy, 35, 0) decl_var shader_out INTERP_MODE_NONE float texcoord0_w (VARYING_SLOT_VAR5.x, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 normquat (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec3 view (VARYING_SLOT_VAR7.xyz, 38, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_1 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_2 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec2 32 ssa_3 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec2 32 ssa_4 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec1 32 ssa_5 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec4 32 ssa_6 = intrinsic load_input (ssa_0) () (6, 0) /* base=6 */ /* component=0 */ vec3 32 ssa_7 = intrinsic load_input (ssa_0) () (7, 0) /* base=7 */ /* component=0 */ vec4 32 ssa_8 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec1 32 ssa_9 = fmov -ssa_8.z vec1 32 ssa_10 = load_const (0x00000450 /* 0.000000 */) vec4 32 ssa_11 = intrinsic load_ubo (ssa_0, ssa_10) () () vec4 32 ssa_12 = fdot_replicated4 ssa_11, ssa_8 vec1 32 ssa_13 = imov ssa_12.x intrinsic store_output (ssa_8, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* gl_Position */ intrinsic store_output (ssa_9, ssa_0) () (17, 1, 0) /* base=17 */ /* wrmask=x */ /* component=0 */ /* gl_ClipDistance */ intrinsic store_output (ssa_13, ssa_0) () (17, 1, 1) /* base=17 */ /* wrmask=x */ /* component=1 */ intrinsic store_output (ssa_1, ssa_0) () (32, 15, 0) /* base=32 */ /* wrmask=xyzw */ /* component=0 */ /* primary_color */ intrinsic store_output (ssa_2, ssa_0) () (33, 3, 0) /* base=33 */ /* wrmask=xy */ /* component=0 */ /* texcoord0 */ intrinsic store_output (ssa_3, ssa_0) () (34, 3, 0) /* base=34 */ /* wrmask=xy */ /* component=0 */ /* texcoord1 */ intrinsic store_output (ssa_4, ssa_0) () (35, 3, 0) /* base=35 */ /* wrmask=xy */ /* component=0 */ /* texcoord2 */ intrinsic store_output (ssa_5, ssa_0) () (36, 1, 0) /* base=36 */ /* wrmask=x */ /* component=0 */ /* texcoord0_w */ intrinsic store_output (ssa_6, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ /* normquat */ intrinsic store_output (ssa_7, ssa_0) () (38, 7, 0) /* base=38 */ /* wrmask=xyz */ /* component=0 */ /* view */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL8 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE vec4 clip_coef (79, 0, 0) decl_var uniform INTERP_MODE_NONE vec4 tev_combiner_buffer_color (78, 0, 0) decl_var uniform INTERP_MODE_NONE vec4[6] const_color (77, 0, 0) decl_var uniform INTERP_MODE_NONE LightSrc[8] light_src (13, 0, 0) decl_var uniform INTERP_MODE_NONE vec3 lighting_global_ambient (12, 0, 0) decl_var uniform INTERP_MODE_NONE vec2 proctex_noise_p (11, 0, 0) decl_var uniform INTERP_MODE_NONE vec2 proctex_noise_a (10, 0, 0) decl_var uniform INTERP_MODE_NONE vec2 proctex_noise_f (9, 0, 0) decl_var uniform INTERP_MODE_NONE vec3 fog_color (8, 0, 0) decl_var uniform INTERP_MODE_NONE int scissor_y2 (7, 0, 0) decl_var uniform INTERP_MODE_NONE int scissor_x2 (6, 0, 0) decl_var uniform INTERP_MODE_NONE int scissor_y1 (5, 0, 0) decl_var uniform INTERP_MODE_NONE int scissor_x1 (4, 0, 0) decl_var uniform INTERP_MODE_NONE float depth_offset (3, 0, 0) decl_var uniform INTERP_MODE_NONE float depth_scale (2, 0, 0) decl_var uniform INTERP_MODE_NONE int alphatest_ref (1, 0, 0) decl_var uniform INTERP_MODE_NONE int framebuffer_scale (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 vert_position (VERT_ATTRIB_GENERIC0, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 vert_color (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_texcoord0 (VERT_ATTRIB_GENERIC2.xy, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_texcoord1 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE vec2 vert_texcoord2 (VERT_ATTRIB_GENERIC4.xy, 20, 0) decl_var shader_in INTERP_MODE_NONE float vert_texcoord0_w (VERT_ATTRIB_GENERIC5.x, 21, 0) decl_var shader_in INTERP_MODE_NONE vec4 vert_normquat (VERT_ATTRIB_GENERIC6, 22, 0) decl_var shader_in INTERP_MODE_NONE vec3 vert_view (VERT_ATTRIB_GENERIC7.xyz, 23, 0) decl_var shader_out INTERP_MODE_NONE vec4 gl_Position (VARYING_SLOT_POS, 0, 0) decl_var shader_out INTERP_MODE_NONE float[2] gl_ClipDistance (VARYING_SLOT_CLIP_DIST0.x, 17, 0) compact decl_var shader_out INTERP_MODE_NONE vec4 primary_color (VARYING_SLOT_VAR1, 32, 0) decl_var shader_out INTERP_MODE_NONE vec2 texcoord0 (VARYING_SLOT_VAR2.xy, 33, 0) decl_var shader_out INTERP_MODE_NONE vec2 texcoord1 (VARYING_SLOT_VAR3.xy, 34, 0) decl_var shader_out INTERP_MODE_NONE vec2 texcoord2 (VARYING_SLOT_VAR4.xy, 35, 0) decl_var shader_out INTERP_MODE_NONE float texcoord0_w (VARYING_SLOT_VAR5.x, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 normquat (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec3 view (VARYING_SLOT_VAR7.xyz, 38, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_1 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_2 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec2 32 ssa_3 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec2 32 ssa_4 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec1 32 ssa_5 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec4 32 ssa_6 = intrinsic load_input (ssa_0) () (6, 0) /* base=6 */ /* component=0 */ vec3 32 ssa_7 = intrinsic load_input (ssa_0) () (7, 0) /* base=7 */ /* component=0 */ vec4 32 ssa_8 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec1 32 ssa_9 = fmov -ssa_8.z vec1 32 ssa_10 = load_const (0x00000450 /* 0.000000 */) vec4 32 ssa_11 = intrinsic load_ubo (ssa_0, ssa_10) () () vec4 32 ssa_12 = fdot_replicated4 ssa_11, ssa_8 vec1 32 ssa_13 = imov ssa_12.x intrinsic store_output (ssa_8, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* gl_Position */ intrinsic store_output (ssa_9, ssa_0) () (17, 1, 0) /* base=17 */ /* wrmask=x */ /* component=0 */ /* gl_ClipDistance */ intrinsic store_output (ssa_13, ssa_0) () (17, 1, 1) /* base=17 */ /* wrmask=x */ /* component=1 */ intrinsic store_output (ssa_1, ssa_0) () (32, 15, 0) /* base=32 */ /* wrmask=xyzw */ /* component=0 */ /* primary_color */ intrinsic store_output (ssa_2, ssa_0) () (33, 3, 0) /* base=33 */ /* wrmask=xy */ /* component=0 */ /* texcoord0 */ intrinsic store_output (ssa_3, ssa_0) () (34, 3, 0) /* base=34 */ /* wrmask=xy */ /* component=0 */ /* texcoord1 */ intrinsic store_output (ssa_4, ssa_0) () (35, 3, 0) /* base=35 */ /* wrmask=xy */ /* component=0 */ /* texcoord2 */ intrinsic store_output (ssa_5, ssa_0) () (36, 1, 0) /* base=36 */ /* wrmask=x */ /* component=0 */ /* texcoord0_w */ intrinsic store_output (ssa_6, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ /* normquat */ intrinsic store_output (ssa_7, ssa_0) () (38, 7, 0) /* base=38 */ /* wrmask=xyz */ /* component=0 */ /* view */ /* succs: block_0 */ block block_0: } VS Output VUE map (12 slots, SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_CLIP_DIST0 [3] VARYING_SLOT_CLIP_DIST1 [4] BRW_VARYING_SLOT_PAD [5] VARYING_SLOT_VAR1 [6] VARYING_SLOT_VAR2 [7] VARYING_SLOT_VAR3 [8] VARYING_SLOT_VAR4 [9] VARYING_SLOT_VAR5 [10] VARYING_SLOT_VAR6 [11] VARYING_SLOT_VAR7 Native code for unnamed vertex shader GLSL8: VS vec4 shader: 16 instructions. 0 loops. 70 cycles. 0:0 spills:fills. Compacted 256 to 224 bytes (12%) START B0 (70 cycles) mov(8) g22<1>.xUD 0x00000450UD { align16 1Q compacted }; mov(8) g114<1>UD 0x00000000UD { align16 1Q compacted }; mov(8) g115<1>F g1<4>F { align16 1Q }; mov(8) g119<1>F g2<4>F { align16 1Q }; mov(8) g120<1>.xyF g3<4>.xyyyF { align16 1Q }; mov(8) g121<1>.xyF g4<4>.xyyyF { align16 1Q }; mov(8) g122<1>.xyF g5<4>.xyyyF { align16 1Q }; mov(8) g123<1>.xF g6<4>.xF { align16 1Q compacted }; mov(8) g124<1>F g7<4>F { align16 1Q }; mov(8) g125<1>.xyzF g8<4>.xyzzF { align16 1Q }; send(8) g21<1>F g22<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; dp4(8) g116<1>.yF g21<4>F g1<4>F { align16 NoDDClr 1Q compacted }; mov(8) g116<1>.xF -g1<4>.zF { align16 NoDDChk 1Q }; mov(8) g113<1>UD g0<4>UD { align16 WE_all 1Q }; or(1) g113.5<1>UD g0.5<0,1,0>UD 0x0000ff00UD { align1 WE_all 1N }; send(8) null<1>F g113<4>F urb 0 write HWord interleave complete mlen 13 rlen 0 { align16 1Q EOT }; END B0 1999: message: shader compiler issue 7: VS vec4 shader: 16 inst, 0 loops, 70 cycles, 0:0 spills:fills, compacted 256 to 224 bytes. 2242: message: shader compiler issue 8: FS SIMD16 shader: 2 inst, 0 loops, 0 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 32 to 32 bytes. 2319: message: shader compiler issue 9: FS SIMD8 shader: 16 inst, 0 loops, 290 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 256 to 208 bytes. 2319: message: shader compiler issue 10: FS SIMD16 shader: 17 inst, 0 loops, 318 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 272 to 224 bytes. 6866: message: shader compiler issue 11: FS SIMD8 shader: 5 inst, 0 loops, 24 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 80 to 48 bytes. 6866: message: shader compiler issue 12: FS SIMD16 shader: 5 inst, 0 loops, 34 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 80 to 48 bytes. ARB_vertex_program 0 ir for native vertex shader # Vertex Program/Shader 0 0: DP4 OUTPUT[0].x, INPUT[0], STATE[0]; 1: DP4 OUTPUT[0].y, INPUT[0], STATE[1]; 2: DP4 OUTPUT[0].z, INPUT[0], STATE[2]; 3: DP4 OUTPUT[0].w, INPUT[0], STATE[3]; 4: MOV OUTPUT[1], INPUT[2]; 5: MOV OUTPUT[12].x, INPUT[15]; 6: END NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX name: ARB0 inputs: 0 outputs: 0 uniforms: 64 shared: 0 decl_var uniform INTERP_MODE_NONE vec4[4] parameters (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 in_0 (VERT_ATTRIB_POS, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 in_2 (VERT_ATTRIB_COLOR0, 2, 0) decl_var shader_in INTERP_MODE_NONE vec4 in_15 (VERT_ATTRIB_POINT_SIZE, 15, 0) decl_var shader_out INTERP_MODE_NONE vec4 out_0 (VARYING_SLOT_POS, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 out_1 (VARYING_SLOT_COL0, 1, 0) decl_var shader_out INTERP_MODE_NONE vec4 out_12 (VARYING_SLOT_PSIZ, 12, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec4 32 ssa_0 = undefined vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2 = intrinsic load_input (ssa_1) () (0, 0) /* base=0 */ /* component=0 */ /* in_0 */ vec4 32 ssa_3 = intrinsic load_uniform (ssa_1) () (0, 64) /* base=0 */ /* range=64 */ /* parameters */ vec4 32 ssa_4 = fdot_replicated4 ssa_2, ssa_3 vec1 32 ssa_5 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_6 = intrinsic load_uniform (ssa_5) () (0, 64) /* base=0 */ /* range=64 */ /* parameters */ vec4 32 ssa_7 = fdot_replicated4 ssa_2, ssa_6 vec1 32 ssa_8 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_9 = intrinsic load_uniform (ssa_8) () (0, 64) /* base=0 */ /* range=64 */ /* parameters */ vec4 32 ssa_10 = fdot_replicated4 ssa_2, ssa_9 vec1 32 ssa_11 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_12 = intrinsic load_uniform (ssa_11) () (0, 64) /* base=0 */ /* range=64 */ /* parameters */ vec4 32 ssa_13 = fdot_replicated4 ssa_2, ssa_12 vec4 32 ssa_14 = vec4 ssa_4.x, ssa_7.x, ssa_10.x, ssa_13.x vec4 32 ssa_15 = intrinsic load_input (ssa_1) () (1, 0) /* base=1 */ /* component=0 */ vec4 32 ssa_16 = intrinsic load_input (ssa_1) () (2, 0) /* base=2 */ /* component=0 */ /* in_2 */ vec4 32 ssa_17 = vec4 ssa_16.x, ssa_0.y, ssa_0.z, ssa_0.w intrinsic store_output (ssa_14, ssa_1) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* out_0 */ intrinsic store_output (ssa_15, ssa_1) () (1, 15, 0) /* base=1 */ /* wrmask=xyzw */ /* component=0 */ /* out_1 */ intrinsic store_output (ssa_17, ssa_1) () (12, 15, 0) /* base=12 */ /* wrmask=xyzw */ /* component=0 */ /* out_12 */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX name: ARB0 inputs: 0 outputs: 0 uniforms: 64 shared: 0 decl_var uniform INTERP_MODE_NONE vec4[4] parameters (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 in_0 (VERT_ATTRIB_POS, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 in_2 (VERT_ATTRIB_COLOR0, 2, 0) decl_var shader_in INTERP_MODE_NONE vec4 in_15 (VERT_ATTRIB_POINT_SIZE, 15, 0) decl_var shader_out INTERP_MODE_NONE vec4 out_0 (VARYING_SLOT_POS, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 out_1 (VARYING_SLOT_COL0, 1, 0) decl_var shader_out INTERP_MODE_NONE vec4 out_12 (VARYING_SLOT_PSIZ, 12, 0) decl_function main returning void impl main { decl_reg vec4 32 r4 decl_reg vec4 32 r5 block block_0: /* preds: */ vec4 32 ssa_0 = undefined vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2 = intrinsic load_input (ssa_1) () (0, 0) /* base=0 */ /* component=0 */ /* in_0 */ vec4 32 ssa_3 = intrinsic load_uniform (ssa_1) () (0, 64) /* base=0 */ /* range=64 */ /* parameters */ vec4 32 ssa_4 = fdot_replicated4 ssa_2, ssa_3 vec1 32 ssa_5 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_6 = intrinsic load_uniform (ssa_5) () (0, 64) /* base=0 */ /* range=64 */ /* parameters */ vec4 32 ssa_7 = fdot_replicated4 ssa_2, ssa_6 vec1 32 ssa_8 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_9 = intrinsic load_uniform (ssa_8) () (0, 64) /* base=0 */ /* range=64 */ /* parameters */ vec4 32 ssa_10 = fdot_replicated4 ssa_2, ssa_9 vec1 32 ssa_11 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_12 = intrinsic load_uniform (ssa_11) () (0, 64) /* base=0 */ /* range=64 */ /* parameters */ vec4 32 ssa_13 = fdot_replicated4 ssa_2, ssa_12 r4.x = imov ssa_4.x r4.y = imov ssa_7.x r4.z = imov ssa_10.x r4.w = imov ssa_13.x vec4 32 ssa_15 = intrinsic load_input (ssa_1) () (1, 0) /* base=1 */ /* component=0 */ vec4 32 ssa_16 = intrinsic load_input (ssa_1) () (2, 0) /* base=2 */ /* component=0 */ /* in_2 */ r5.x = imov ssa_16.x r5.yzw = imov ssa_0.yzw intrinsic store_output (r4, ssa_1) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* out_0 */ intrinsic store_output (ssa_15, ssa_1) () (1, 15, 0) /* base=1 */ /* wrmask=xyzw */ /* component=0 */ /* out_1 */ intrinsic store_output (r5, ssa_1) () (12, 15, 0) /* base=12 */ /* wrmask=xyzw */ /* component=0 */ /* out_12 */ /* succs: block_0 */ block block_0: } VS Output VUE map (3 slots, non-SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_COL0 Native code for unnamed vertex shader ARB0: VS vec4 shader: 10 instructions. 0 loops. 66 cycles. 0:0 spills:fills. Compacted 160 to 128 bytes (20%) START B0 (66 cycles) dp4(8) g115<1>.xF g3<4>F g1<0>F { align16 NoDDClr 1Q compacted }; mov(8) g114<1>UD 0x00000000UD { align16 1Q compacted }; mov.sat(8) g116<1>F g4<4>F { align16 1Q }; dp4(8) g115<1>.yF g3<4>F g1.4<0>F { align16 NoDDClr,NoDDChk 1Q }; mov(8) g114<1>.wF g5<4>.xF { align16 1Q compacted }; dp4(8) g115<1>.zF g3<4>F g2<0>F { align16 NoDDClr,NoDDChk 1Q compacted }; dp4(8) g115<1>.wF g3<4>F g2.4<0>F { align16 NoDDChk 1Q }; mov(8) g113<1>UD g0<4>UD { align16 WE_all 1Q }; or(1) g113.5<1>UD g0.5<0,1,0>UD 0x0000ff00UD { align1 WE_all 1N }; send(8) null<1>F g113<4>F urb 0 write HWord interleave complete mlen 5 rlen 0 { align16 1Q EOT }; END B0 6866: message: shader compiler issue 13: VS vec4 shader: 10 inst, 0 loops, 66 cycles, 0:0 spills:fills, compacted 160 to 128 bytes. 6899: message: shader compiler issue 14: FS SIMD8 shader: 45 inst, 0 loops, 254 cycles, 0:0 spills:fills, Promoted 2 constants, compacted 720 to 512 bytes. 6899: message: shader compiler issue 15: FS SIMD16 shader: 46 inst, 0 loops, 318 cycles, 0:0 spills:fills, Promoted 2 constants, compacted 736 to 528 bytes. GLSL IR for native vertex shader 12: ( (declare (location=16 shader_in ) vec4 vs_in_reg0) (declare (location=17 shader_in ) vec4 vs_in_reg1) (declare (location=18 shader_in ) vec4 vs_in_reg2) (declare (location=31 shader_out ) vec4 vs_out_attr0) (declare (location=32 shader_out ) vec4 vs_out_attr1) (declare (location=33 shader_out ) vec4 vs_out_attr2) (declare (location=0 uniform ) pica_uniforms@0x7f4fbc0490a0 uniforms) (declare () vec4 reg_tmp11) (declare () vec4 reg_tmp12) (declare () vec4 reg_tmp14) (declare () vec4 reg_tmp15) ( function main (signature void (parameters ) ( (assign (zw) (var_ref reg_tmp11) (constant vec2 (0.000000 1.000000)) ) (declare (temporary ) vec4 ubo_load_temp) (declare (temporary ) uint ubo_load_temp_offset) (assign (x) (var_ref ubo_load_temp_offset) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset) (constant uint (1648)) ) ) ) (assign (w) (var_ref reg_tmp14) (swiz z (var_ref ubo_load_temp) )) (assign (xyz) (var_ref reg_tmp14) (swiz xyz (var_ref vs_in_reg0) )) (declare (temporary ) vec4 lhs) (declare (temporary ) vec4 ubo_load_temp@2) (declare (temporary ) uint ubo_load_temp_offset@3) (assign (x) (var_ref ubo_load_temp_offset@3) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@2) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@3) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs) (var_ref ubo_load_temp@2) ) (declare (temporary ) vec4 assignment_tmp) (assign (xyzw) (var_ref assignment_tmp) (expression vec4 * (var_ref lhs) (var_ref reg_tmp14) ) ) (assign (x) (var_ref reg_tmp15) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp) (var_ref assignment_tmp) ) (expression vec4 csel (expression bvec4 != (var_ref lhs) (var_ref lhs) ) (var_ref assignment_tmp) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp14) (var_ref reg_tmp14) ) (var_ref assignment_tmp) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@4) (declare (temporary ) vec4 ubo_load_temp@5) (declare (temporary ) uint ubo_load_temp_offset@6) (assign (x) (var_ref ubo_load_temp_offset@6) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@5) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@6) (constant uint (336)) ) ) ) (assign (xyzw) (var_ref lhs@4) (var_ref ubo_load_temp@5) ) (declare (temporary ) vec4 assignment_tmp@7) (assign (xyzw) (var_ref assignment_tmp@7) (expression vec4 * (var_ref lhs@4) (var_ref reg_tmp14) ) ) (assign (y) (var_ref reg_tmp15) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@7) (var_ref assignment_tmp@7) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@4) (var_ref lhs@4) ) (var_ref assignment_tmp@7) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp14) (var_ref reg_tmp14) ) (var_ref assignment_tmp@7) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@7) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@8) (declare (temporary ) vec4 ubo_load_temp@9) (declare (temporary ) uint ubo_load_temp_offset@10) (assign (x) (var_ref ubo_load_temp_offset@10) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@9) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@10) (constant uint (352)) ) ) ) (assign (xyzw) (var_ref lhs@8) (var_ref ubo_load_temp@9) ) (declare (temporary ) vec4 assignment_tmp@11) (assign (xyzw) (var_ref assignment_tmp@11) (expression vec4 * (var_ref lhs@8) (var_ref reg_tmp14) ) ) (assign (z) (var_ref reg_tmp15) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@11) (var_ref assignment_tmp@11) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@8) (var_ref lhs@8) ) (var_ref assignment_tmp@11) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp14) (var_ref reg_tmp14) ) (var_ref assignment_tmp@11) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@11) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@12) (declare (temporary ) vec4 ubo_load_temp@13) (declare (temporary ) uint ubo_load_temp_offset@14) (assign (x) (var_ref ubo_load_temp_offset@14) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@13) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@14) (constant uint (368)) ) ) ) (assign (xyzw) (var_ref lhs@12) (var_ref ubo_load_temp@13) ) (declare (temporary ) vec4 assignment_tmp@15) (assign (xyzw) (var_ref assignment_tmp@15) (expression vec4 * (var_ref lhs@12) (var_ref reg_tmp14) ) ) (assign (w) (var_ref reg_tmp15) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@15) (var_ref assignment_tmp@15) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@12) (var_ref lhs@12) ) (var_ref assignment_tmp@15) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp14) (var_ref reg_tmp14) ) (var_ref assignment_tmp@15) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@15) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@16) (declare (temporary ) vec4 ubo_load_temp@17) (declare (temporary ) uint ubo_load_temp_offset@18) (assign (x) (var_ref ubo_load_temp_offset@18) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@17) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@18) (constant uint (384)) ) ) ) (assign (xyzw) (var_ref lhs@16) (var_ref ubo_load_temp@17) ) (declare (temporary ) vec4 assignment_tmp@19) (assign (xyzw) (var_ref assignment_tmp@19) (expression vec4 * (var_ref lhs@16) (var_ref reg_tmp15) ) ) (assign (x) (var_ref reg_tmp14) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@19) (var_ref assignment_tmp@19) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@16) (var_ref lhs@16) ) (var_ref assignment_tmp@19) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp15) (var_ref reg_tmp15) ) (var_ref assignment_tmp@19) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@19) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@20) (declare (temporary ) vec4 ubo_load_temp@21) (declare (temporary ) uint ubo_load_temp_offset@22) (assign (x) (var_ref ubo_load_temp_offset@22) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@21) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@22) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@20) (var_ref ubo_load_temp@21) ) (declare (temporary ) vec4 assignment_tmp@23) (assign (xyzw) (var_ref assignment_tmp@23) (expression vec4 * (var_ref lhs@20) (var_ref reg_tmp15) ) ) (assign (y) (var_ref reg_tmp14) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@23) (var_ref assignment_tmp@23) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@20) (var_ref lhs@20) ) (var_ref assignment_tmp@23) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp15) (var_ref reg_tmp15) ) (var_ref assignment_tmp@23) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@23) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@24) (declare (temporary ) vec4 ubo_load_temp@25) (declare (temporary ) uint ubo_load_temp_offset@26) (assign (x) (var_ref ubo_load_temp_offset@26) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@25) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@26) (constant uint (416)) ) ) ) (assign (xyzw) (var_ref lhs@24) (var_ref ubo_load_temp@25) ) (declare (temporary ) vec4 assignment_tmp@27) (assign (xyzw) (var_ref assignment_tmp@27) (expression vec4 * (var_ref lhs@24) (var_ref reg_tmp15) ) ) (assign (z) (var_ref reg_tmp14) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@27) (var_ref assignment_tmp@27) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@24) (var_ref lhs@24) ) (var_ref assignment_tmp@27) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp15) (var_ref reg_tmp15) ) (var_ref assignment_tmp@27) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@27) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@28) (declare (temporary ) vec4 ubo_load_temp@29) (declare (temporary ) uint ubo_load_temp_offset@30) (assign (x) (var_ref ubo_load_temp_offset@30) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@29) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@30) (constant uint (432)) ) ) ) (assign (xyzw) (var_ref lhs@28) (var_ref ubo_load_temp@29) ) (declare (temporary ) vec4 assignment_tmp@31) (assign (xyzw) (var_ref assignment_tmp@31) (expression vec4 * (var_ref lhs@28) (var_ref reg_tmp15) ) ) (assign (w) (var_ref reg_tmp14) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@31) (var_ref assignment_tmp@31) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@28) (var_ref lhs@28) ) (var_ref assignment_tmp@31) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp15) (var_ref reg_tmp15) ) (var_ref assignment_tmp@31) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@31) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (assign (xyzw) (var_ref vs_out_attr0) (var_ref reg_tmp14) ) (declare (temporary ) vec4 lhs@32) (declare (temporary ) vec4 ubo_load_temp@33) (declare (temporary ) uint ubo_load_temp_offset@34) (assign (x) (var_ref ubo_load_temp_offset@34) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@33) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@34) (constant uint (464)) ) ) ) (assign (xyzw) (var_ref lhs@32) (var_ref ubo_load_temp@33) ) (declare (temporary ) vec4 assignment_tmp@35) (assign (xyzw) (var_ref assignment_tmp@35) (expression vec4 * (var_ref lhs@32) (swiz xxxx (var_ref vs_in_reg2) )) ) (declare (temporary ) vec4 ubo_load_temp@36) (declare (temporary ) uint ubo_load_temp_offset@37) (assign (x) (var_ref ubo_load_temp_offset@37) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@36) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@37) (constant uint (1648)) ) ) ) (assign (xyzw) (var_ref reg_tmp12) (expression vec4 + (swiz zzzz (var_ref ubo_load_temp@36) )(expression vec4 neg (swiz xxxx (var_ref vs_in_reg2) )) ) ) (declare (temporary ) vec4 rhs) (declare (temporary ) vec4 ubo_load_temp@38) (declare (temporary ) uint ubo_load_temp_offset@39) (assign (x) (var_ref ubo_load_temp_offset@39) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@38) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@39) (constant uint (448)) ) ) ) (assign (xyzw) (var_ref rhs) (var_ref ubo_load_temp@38) ) (declare (temporary ) vec4 assignment_tmp@40) (assign (xyzw) (var_ref assignment_tmp@40) (expression vec4 * (swiz xxxx (var_ref reg_tmp12) )(var_ref rhs) ) ) (assign (xyzw) (var_ref vs_out_attr1) (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@40) (var_ref assignment_tmp@40) ) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref reg_tmp12) )) (var_ref assignment_tmp@40) (expression vec4 csel (expression bvec4 != (var_ref rhs) (var_ref rhs) ) (var_ref assignment_tmp@40) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@40) ) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@35) (var_ref assignment_tmp@35) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@32) (var_ref lhs@32) ) (var_ref assignment_tmp@35) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref vs_in_reg2) )(swiz xxxx (var_ref vs_in_reg2) )) (var_ref assignment_tmp@35) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@35) ) ) ) (declare (temporary ) vec4 lhs@41) (declare (temporary ) vec4 ubo_load_temp@42) (declare (temporary ) uint ubo_load_temp_offset@43) (assign (x) (var_ref ubo_load_temp_offset@43) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@42) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@43) (constant uint (496)) ) ) ) (assign (xyzw) (var_ref lhs@41) (swiz xyyy (var_ref ubo_load_temp@42) )) (declare (temporary ) vec4 assignment_tmp@44) (assign (xyzw) (var_ref assignment_tmp@44) (expression vec4 * (var_ref lhs@41) (swiz xyyy (var_ref vs_in_reg1) )) ) (declare (temporary ) vec4 ubo_load_temp@45) (declare (temporary ) uint ubo_load_temp_offset@46) (assign (x) (var_ref ubo_load_temp_offset@46) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@45) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@46) (constant uint (480)) ) ) ) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 + (swiz xyyy (var_ref ubo_load_temp@45) )(swiz xyyy (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@44) (var_ref assignment_tmp@44) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@41) (var_ref lhs@41) ) (var_ref assignment_tmp@44) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref vs_in_reg1) )(swiz xyyy (var_ref vs_in_reg1) )) (var_ref assignment_tmp@44) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@44) ) )) )) (assign (xyzw) (var_ref vs_out_attr2) (swiz xyyy (var_ref reg_tmp11) )) )) ) ) NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL12 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE pica_uniforms uniforms (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 vs_in_reg0 (VERT_ATTRIB_GENERIC0, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 vs_in_reg1 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 vs_in_reg2 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr0 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr1 (VARYING_SLOT_VAR1, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr2 (VARYING_SLOT_VAR2, 33, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_1 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_2 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_3 = load_const (0x00000670 /* 0.000000 */) vec4 32 ssa_4 = intrinsic load_ubo (ssa_0, ssa_3) () () vec4 32 ssa_5 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_6 = vec4 ssa_5.x, ssa_5.y, ssa_5.z, ssa_4.z vec1 32 ssa_7 = load_const (0x00000140 /* 0.000000 */) vec4 32 ssa_8 = intrinsic load_ubo (ssa_0, ssa_7) () () vec4 32 ssa_9 = fmul ssa_8, ssa_6 vec4 32 ssa_10 = fne ssa_6, ssa_6 vec4 32 ssa_11 = bcsel ssa_10, ssa_9, ssa_1 vec4 32 ssa_12 = fne ssa_8, ssa_8 vec4 32 ssa_13 = bcsel ssa_12, ssa_9, ssa_11 vec4 32 ssa_14 = fne ssa_9, ssa_9 vec4 32 ssa_15 = bcsel ssa_14, ssa_13, ssa_9 vec4 32 ssa_16 = fdot_replicated4 ssa_15, ssa_2 vec1 32 ssa_17 = load_const (0x00000150 /* 0.000000 */) vec4 32 ssa_18 = intrinsic load_ubo (ssa_0, ssa_17) () () vec4 32 ssa_19 = fmul ssa_18, ssa_6 vec4 32 ssa_20 = bcsel ssa_10, ssa_19, ssa_1 vec4 32 ssa_21 = fne ssa_18, ssa_18 vec4 32 ssa_22 = bcsel ssa_21, ssa_19, ssa_20 vec4 32 ssa_23 = fne ssa_19, ssa_19 vec4 32 ssa_24 = bcsel ssa_23, ssa_22, ssa_19 vec4 32 ssa_25 = fdot_replicated4 ssa_24, ssa_2 vec1 32 ssa_26 = load_const (0x00000160 /* 0.000000 */) vec4 32 ssa_27 = intrinsic load_ubo (ssa_0, ssa_26) () () vec4 32 ssa_28 = fmul ssa_27, ssa_6 vec4 32 ssa_29 = bcsel ssa_10, ssa_28, ssa_1 vec4 32 ssa_30 = fne ssa_27, ssa_27 vec4 32 ssa_31 = bcsel ssa_30, ssa_28, ssa_29 vec4 32 ssa_32 = fne ssa_28, ssa_28 vec4 32 ssa_33 = bcsel ssa_32, ssa_31, ssa_28 vec4 32 ssa_34 = fdot_replicated4 ssa_33, ssa_2 vec1 32 ssa_35 = load_const (0x00000170 /* 0.000000 */) vec4 32 ssa_36 = intrinsic load_ubo (ssa_0, ssa_35) () () vec4 32 ssa_37 = fmul ssa_36, ssa_6 vec4 32 ssa_38 = bcsel ssa_10, ssa_37, ssa_1 vec4 32 ssa_39 = fne ssa_36, ssa_36 vec4 32 ssa_40 = bcsel ssa_39, ssa_37, ssa_38 vec4 32 ssa_41 = fne ssa_37, ssa_37 vec4 32 ssa_42 = bcsel ssa_41, ssa_40, ssa_37 vec4 32 ssa_43 = fdot_replicated4 ssa_42, ssa_2 vec4 32 ssa_44 = vec4 ssa_16.x, ssa_25.x, ssa_34.x, ssa_43.x vec1 32 ssa_45 = load_const (0x00000180 /* 0.000000 */) vec4 32 ssa_46 = intrinsic load_ubo (ssa_0, ssa_45) () () vec4 32 ssa_47 = fmul ssa_46, ssa_44 vec4 32 ssa_48 = fne ssa_44, ssa_44 vec4 32 ssa_49 = bcsel ssa_48, ssa_47, ssa_1 vec4 32 ssa_50 = fne ssa_46, ssa_46 vec4 32 ssa_51 = bcsel ssa_50, ssa_47, ssa_49 vec4 32 ssa_52 = fne ssa_47, ssa_47 vec4 32 ssa_53 = bcsel ssa_52, ssa_51, ssa_47 vec4 32 ssa_54 = fdot_replicated4 ssa_53, ssa_2 vec1 32 ssa_55 = load_const (0x00000190 /* 0.000000 */) vec4 32 ssa_56 = intrinsic load_ubo (ssa_0, ssa_55) () () vec4 32 ssa_57 = fmul ssa_56, ssa_44 vec4 32 ssa_58 = bcsel ssa_48, ssa_57, ssa_1 vec4 32 ssa_59 = fne ssa_56, ssa_56 vec4 32 ssa_60 = bcsel ssa_59, ssa_57, ssa_58 vec4 32 ssa_61 = fne ssa_57, ssa_57 vec4 32 ssa_62 = bcsel ssa_61, ssa_60, ssa_57 vec4 32 ssa_63 = fdot_replicated4 ssa_62, ssa_2 vec1 32 ssa_64 = load_const (0x000001a0 /* 0.000000 */) vec4 32 ssa_65 = intrinsic load_ubo (ssa_0, ssa_64) () () vec4 32 ssa_66 = fmul ssa_65, ssa_44 vec4 32 ssa_67 = bcsel ssa_48, ssa_66, ssa_1 vec4 32 ssa_68 = fne ssa_65, ssa_65 vec4 32 ssa_69 = bcsel ssa_68, ssa_66, ssa_67 vec4 32 ssa_70 = fne ssa_66, ssa_66 vec4 32 ssa_71 = bcsel ssa_70, ssa_69, ssa_66 vec4 32 ssa_72 = fdot_replicated4 ssa_71, ssa_2 vec1 32 ssa_73 = load_const (0x000001b0 /* 0.000000 */) vec4 32 ssa_74 = intrinsic load_ubo (ssa_0, ssa_73) () () vec4 32 ssa_75 = fmul ssa_74, ssa_44 vec4 32 ssa_76 = bcsel ssa_48, ssa_75, ssa_1 vec4 32 ssa_77 = fne ssa_74, ssa_74 vec4 32 ssa_78 = bcsel ssa_77, ssa_75, ssa_76 vec4 32 ssa_79 = fne ssa_75, ssa_75 vec4 32 ssa_80 = bcsel ssa_79, ssa_78, ssa_75 vec4 32 ssa_81 = fdot_replicated4 ssa_80, ssa_2 vec4 32 ssa_82 = vec4 ssa_54.x, ssa_63.x, ssa_72.x, ssa_81.x vec1 32 ssa_83 = load_const (0x000001d0 /* 0.000000 */) vec4 32 ssa_84 = intrinsic load_ubo (ssa_0, ssa_83) () () vec4 32 ssa_85 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_86 = fmul ssa_84, ssa_85.xxxx vec4 32 ssa_87 = fadd ssa_4.zzzz, -ssa_85.xxxx vec1 32 ssa_88 = load_const (0x000001c0 /* 0.000000 */) vec4 32 ssa_89 = intrinsic load_ubo (ssa_0, ssa_88) () () vec4 32 ssa_90 = fmul ssa_87.xxxx, ssa_89 vec4 32 ssa_91 = fne ssa_89, ssa_89 vec4 32 ssa_92 = bcsel ssa_91, ssa_90, ssa_1 vec4 32 ssa_93 = fne ssa_87.xxxx, ssa_87.xxxx vec4 32 ssa_94 = bcsel ssa_93, ssa_90, ssa_92 vec4 32 ssa_95 = fne ssa_90, ssa_90 vec4 32 ssa_96 = bcsel ssa_95, ssa_94, ssa_90 vec4 32 ssa_97 = fne ssa_85.xxxx, ssa_85.xxxx vec4 32 ssa_98 = bcsel ssa_97, ssa_86, ssa_1 vec4 32 ssa_99 = fne ssa_84, ssa_84 vec4 32 ssa_100 = bcsel ssa_99, ssa_86, ssa_98 vec4 32 ssa_101 = fne ssa_86, ssa_86 vec4 32 ssa_102 = bcsel ssa_101, ssa_100, ssa_86 vec4 32 ssa_103 = fadd ssa_96, ssa_102 vec1 32 ssa_104 = load_const (0x000001f0 /* 0.000000 */) vec4 32 ssa_105 = intrinsic load_ubo (ssa_0, ssa_104) () () vec4 32 ssa_106 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec4 32 ssa_107 = fmul ssa_105.xyyy, ssa_106.xyyy vec1 32 ssa_108 = load_const (0x000001e0 /* 0.000000 */) vec4 32 ssa_109 = intrinsic load_ubo (ssa_0, ssa_108) () () vec4 32 ssa_110 = fne ssa_106.xyyy, ssa_106.xyyy vec4 32 ssa_111 = bcsel ssa_110, ssa_107, ssa_1 vec4 32 ssa_112 = fne ssa_105.xyyy, ssa_105.xyyy vec4 32 ssa_113 = bcsel ssa_112, ssa_107, ssa_111 vec4 32 ssa_114 = fne ssa_107, ssa_107 vec4 32 ssa_115 = bcsel ssa_114, ssa_113, ssa_107 vec4 32 ssa_116 = fadd ssa_109.xyyy, ssa_115.xyyy intrinsic store_output (ssa_82, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr0 */ intrinsic store_output (ssa_103, ssa_0) () (32, 15, 0) /* base=32 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr1 */ vec4 32 ssa_117 = imov ssa_116.xyyy intrinsic store_output (ssa_117, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr2 */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL12 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE pica_uniforms uniforms (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 vs_in_reg0 (VERT_ATTRIB_GENERIC0, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 vs_in_reg1 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 vs_in_reg2 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr0 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr1 (VARYING_SLOT_VAR1, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr2 (VARYING_SLOT_VAR2, 33, 0) decl_function main returning void impl main { decl_reg vec4 32 r0 decl_reg vec4 32 r1 decl_reg vec4 32 r2 block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_1 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_2 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_3 = load_const (0x00000670 /* 0.000000 */) vec4 32 ssa_4 = intrinsic load_ubo (ssa_0, ssa_3) () () vec4 32 ssa_5 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ r0.xyz = imov ssa_5.xyz r0.w = imov ssa_4.z vec1 32 ssa_7 = load_const (0x00000140 /* 0.000000 */) vec4 32 ssa_8 = intrinsic load_ubo (ssa_0, ssa_7) () () vec4 32 ssa_9 = fmul ssa_8, r0 vec4 32 ssa_10 = fne r0, r0 vec4 32 ssa_11 = bcsel ssa_10, ssa_9, ssa_1 vec4 32 ssa_12 = fne ssa_8, ssa_8 vec4 32 ssa_13 = bcsel ssa_12, ssa_9, ssa_11 vec4 32 ssa_14 = fne ssa_9, ssa_9 vec4 32 ssa_15 = bcsel ssa_14, ssa_13, ssa_9 vec4 32 ssa_16 = fdot_replicated4 ssa_15, ssa_2 vec1 32 ssa_17 = load_const (0x00000150 /* 0.000000 */) vec4 32 ssa_18 = intrinsic load_ubo (ssa_0, ssa_17) () () vec4 32 ssa_19 = fmul ssa_18, r0 vec4 32 ssa_20 = bcsel ssa_10, ssa_19, ssa_1 vec4 32 ssa_21 = fne ssa_18, ssa_18 vec4 32 ssa_22 = bcsel ssa_21, ssa_19, ssa_20 vec4 32 ssa_23 = fne ssa_19, ssa_19 vec4 32 ssa_24 = bcsel ssa_23, ssa_22, ssa_19 vec4 32 ssa_25 = fdot_replicated4 ssa_24, ssa_2 vec1 32 ssa_26 = load_const (0x00000160 /* 0.000000 */) vec4 32 ssa_27 = intrinsic load_ubo (ssa_0, ssa_26) () () vec4 32 ssa_28 = fmul ssa_27, r0 vec4 32 ssa_29 = bcsel ssa_10, ssa_28, ssa_1 vec4 32 ssa_30 = fne ssa_27, ssa_27 vec4 32 ssa_31 = bcsel ssa_30, ssa_28, ssa_29 vec4 32 ssa_32 = fne ssa_28, ssa_28 vec4 32 ssa_33 = bcsel ssa_32, ssa_31, ssa_28 vec4 32 ssa_34 = fdot_replicated4 ssa_33, ssa_2 vec1 32 ssa_35 = load_const (0x00000170 /* 0.000000 */) vec4 32 ssa_36 = intrinsic load_ubo (ssa_0, ssa_35) () () vec4 32 ssa_37 = fmul ssa_36, r0 vec4 32 ssa_38 = bcsel ssa_10, ssa_37, ssa_1 vec4 32 ssa_39 = fne ssa_36, ssa_36 vec4 32 ssa_40 = bcsel ssa_39, ssa_37, ssa_38 vec4 32 ssa_41 = fne ssa_37, ssa_37 vec4 32 ssa_42 = bcsel ssa_41, ssa_40, ssa_37 vec4 32 ssa_43 = fdot_replicated4 ssa_42, ssa_2 r1.x = imov ssa_16.x r1.y = imov ssa_25.x r1.z = imov ssa_34.x r1.w = imov ssa_43.x vec1 32 ssa_45 = load_const (0x00000180 /* 0.000000 */) vec4 32 ssa_46 = intrinsic load_ubo (ssa_0, ssa_45) () () vec4 32 ssa_47 = fmul ssa_46, r1 vec4 32 ssa_48 = fne r1, r1 vec4 32 ssa_49 = bcsel ssa_48, ssa_47, ssa_1 vec4 32 ssa_50 = fne ssa_46, ssa_46 vec4 32 ssa_51 = bcsel ssa_50, ssa_47, ssa_49 vec4 32 ssa_52 = fne ssa_47, ssa_47 vec4 32 ssa_53 = bcsel ssa_52, ssa_51, ssa_47 vec4 32 ssa_54 = fdot_replicated4 ssa_53, ssa_2 vec1 32 ssa_55 = load_const (0x00000190 /* 0.000000 */) vec4 32 ssa_56 = intrinsic load_ubo (ssa_0, ssa_55) () () vec4 32 ssa_57 = fmul ssa_56, r1 vec4 32 ssa_58 = bcsel ssa_48, ssa_57, ssa_1 vec4 32 ssa_59 = fne ssa_56, ssa_56 vec4 32 ssa_60 = bcsel ssa_59, ssa_57, ssa_58 vec4 32 ssa_61 = fne ssa_57, ssa_57 vec4 32 ssa_62 = bcsel ssa_61, ssa_60, ssa_57 vec4 32 ssa_63 = fdot_replicated4 ssa_62, ssa_2 vec1 32 ssa_64 = load_const (0x000001a0 /* 0.000000 */) vec4 32 ssa_65 = intrinsic load_ubo (ssa_0, ssa_64) () () vec4 32 ssa_66 = fmul ssa_65, r1 vec4 32 ssa_67 = bcsel ssa_48, ssa_66, ssa_1 vec4 32 ssa_68 = fne ssa_65, ssa_65 vec4 32 ssa_69 = bcsel ssa_68, ssa_66, ssa_67 vec4 32 ssa_70 = fne ssa_66, ssa_66 vec4 32 ssa_71 = bcsel ssa_70, ssa_69, ssa_66 vec4 32 ssa_72 = fdot_replicated4 ssa_71, ssa_2 vec1 32 ssa_73 = load_const (0x000001b0 /* 0.000000 */) vec4 32 ssa_74 = intrinsic load_ubo (ssa_0, ssa_73) () () vec4 32 ssa_75 = fmul ssa_74, r1 vec4 32 ssa_76 = bcsel ssa_48, ssa_75, ssa_1 vec4 32 ssa_77 = fne ssa_74, ssa_74 vec4 32 ssa_78 = bcsel ssa_77, ssa_75, ssa_76 vec4 32 ssa_79 = fne ssa_75, ssa_75 vec4 32 ssa_80 = bcsel ssa_79, ssa_78, ssa_75 vec4 32 ssa_81 = fdot_replicated4 ssa_80, ssa_2 r2.x = imov ssa_54.x r2.y = imov ssa_63.x r2.z = imov ssa_72.x r2.w = imov ssa_81.x vec1 32 ssa_83 = load_const (0x000001d0 /* 0.000000 */) vec4 32 ssa_84 = intrinsic load_ubo (ssa_0, ssa_83) () () vec4 32 ssa_85 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_86 = fmul ssa_84, ssa_85.xxxx vec4 32 ssa_87 = fadd r0.wwww, -ssa_85.xxxx vec1 32 ssa_88 = load_const (0x000001c0 /* 0.000000 */) vec4 32 ssa_89 = intrinsic load_ubo (ssa_0, ssa_88) () () vec4 32 ssa_90 = fmul ssa_87.xxxx, ssa_89 vec4 32 ssa_91 = fne ssa_89, ssa_89 vec4 32 ssa_92 = bcsel ssa_91, ssa_90, ssa_1 vec4 32 ssa_93 = fne ssa_87.xxxx, ssa_87.xxxx vec4 32 ssa_94 = bcsel ssa_93, ssa_90, ssa_92 vec4 32 ssa_95 = fne ssa_90, ssa_90 vec4 32 ssa_96 = bcsel ssa_95, ssa_94, ssa_90 vec4 32 ssa_97 = fne ssa_85.xxxx, ssa_85.xxxx vec4 32 ssa_98 = bcsel ssa_97, ssa_86, ssa_1 vec4 32 ssa_99 = fne ssa_84, ssa_84 vec4 32 ssa_100 = bcsel ssa_99, ssa_86, ssa_98 vec4 32 ssa_101 = fne ssa_86, ssa_86 vec4 32 ssa_102 = bcsel ssa_101, ssa_100, ssa_86 vec4 32 ssa_103 = fadd ssa_96, ssa_102 vec1 32 ssa_104 = load_const (0x000001f0 /* 0.000000 */) vec4 32 ssa_105 = intrinsic load_ubo (ssa_0, ssa_104) () () vec4 32 ssa_106 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec4 32 ssa_107 = fmul ssa_105.xyyy, ssa_106.xyyy vec1 32 ssa_108 = load_const (0x000001e0 /* 0.000000 */) vec4 32 ssa_109 = intrinsic load_ubo (ssa_0, ssa_108) () () vec4 32 ssa_110 = fne ssa_106.xyyy, ssa_106.xyyy vec4 32 ssa_111 = bcsel ssa_110, ssa_107, ssa_1 vec4 32 ssa_112 = fne ssa_105.xyyy, ssa_105.xyyy vec4 32 ssa_113 = bcsel ssa_112, ssa_107, ssa_111 vec4 32 ssa_114 = fne ssa_107, ssa_107 vec4 32 ssa_115 = bcsel ssa_114, ssa_113, ssa_107 vec4 32 ssa_116 = fadd ssa_109.xyyy, ssa_115.xyyy intrinsic store_output (r2, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr0 */ intrinsic store_output (ssa_103, ssa_0) () (32, 15, 0) /* base=32 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr1 */ vec4 32 ssa_117 = imov ssa_116.xyyy intrinsic store_output (ssa_117, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr2 */ /* succs: block_0 */ block block_0: } VS Output VUE map (7 slots, SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_CLIP_DIST0 [3] VARYING_SLOT_CLIP_DIST1 [4] VARYING_SLOT_VAR0 [5] VARYING_SLOT_VAR1 [6] VARYING_SLOT_VAR2 Native code for unnamed vertex shader GLSL12: VS vec4 shader: 120 instructions. 0 loops. 1176 cycles. 0:0 spills:fills. Compacted 1920 to 1712 bytes (11%) START B0 (1176 cycles) mov(8) g13<1>.xUD 0x00000670UD { align16 1Q compacted }; mov(8) g4<1>.xyzD g1<4>.xyzzD { align16 1Q }; mov(8) g18<1>.xUD 0x00000140UD { align16 1Q compacted }; mov(8) g30<1>.xUD 0x00000150UD { align16 1Q compacted }; mov(8) g41<1>.xUD 0x00000160UD { align16 1Q compacted }; mov(8) g52<1>.xUD 0x00000170UD { align16 1Q compacted }; mov(8) g63<1>.xUD 0x00000180UD { align16 1Q compacted }; mov(8) g75<1>.xUD 0x00000190UD { align16 1Q compacted }; mov(8) g86<1>.xUD 0x000001a0UD { align16 1Q compacted }; mov(8) g97<1>.xUD 0x000001b0UD { align16 1Q compacted }; mov(8) g108<1>.xUD 0x000001d0UD { align16 1Q compacted }; mov(8) g7<1>.xUD 0x000001c0UD { align16 1Q compacted }; mov(8) g114<1>UD 0x00000000UD { align16 1Q compacted }; send(8) g12<1>F g13<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g17<1>F g18<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g29<1>F g30<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g40<1>F g41<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g51<1>F g52<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g62<1>F g63<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g74<1>F g75<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g85<1>F g86<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g96<1>F g97<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g107<1>F g108<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g6<1>F g7<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g4<1>.wD g12<4>.zD { align16 1Q }; add(8) g111<1>.xF g12<4>.zF -g3<4>.xF { align16 1Q }; mul(8) g110<1>F g107<4>F g3<4>.xF { align16 1Q }; mul(8) g19<1>F g17<4>F g4<4>F { align16 1Q compacted }; cmp.nz.f0(8) g20<1>F g4<4>F g4<4>F { align16 1Q compacted }; mul(8) g31<1>F g29<4>F g4<4>F { align16 1Q compacted }; mul(8) g42<1>F g40<4>F g4<4>F { align16 1Q compacted }; mul(8) g53<1>F g51<4>F g4<4>F { align16 1Q compacted }; mul(8) g8<1>F g111<4>.xF g6<4>F { align16 1Q }; (+f0) sel(8) g21<1>UD g19<4>UD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g17<4>F g17<4>F { align16 1Q switch }; (+f0) sel(8) g23<1>UD g19<4>UD g21<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g19<4>F g19<4>F { align16 1Q switch }; (+f0) sel(8) g25<1>UD g23<4>UD g19<4>UD { align16 1Q }; dp4(8) g5<1>.xF g25<4>F 1F { align16 1Q }; cmp.nz.f0(8) null<1>D g20<4>D 0D { align16 1Q switch }; mov(8) g25<1>.xUD 0x000001f0UD { align16 1Q compacted }; (+f0) sel(8) g32<1>UD g31<4>UD 0x00000000UD { align16 1Q }; send(8) g24<1>F g25<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; cmp.nz.f0(8) null<1>F g29<4>F g29<4>F { align16 1Q switch }; mul(8) g27<1>F g24<4>.xyyyF g2<4>.xyyyF { align16 1Q compacted }; (+f0) sel(8) g34<1>UD g31<4>UD g32<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g31<4>F g31<4>F { align16 1Q switch }; (+f0) sel(8) g36<1>UD g34<4>UD g31<4>UD { align16 1Q }; dp4(8) g5<1>.yF g36<4>F 1F { align16 1Q }; mov(8) g31<1>.xUD 0x000001e0UD { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g20<4>D 0D { align16 1Q switch }; send(8) g30<1>F g31<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; (+f0) sel(8) g43<1>UD g42<4>UD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g40<4>F g40<4>F { align16 1Q switch }; (+f0) sel(8) g45<1>UD g42<4>UD g43<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g42<4>F g42<4>F { align16 1Q switch }; (+f0) sel(8) g47<1>UD g45<4>UD g42<4>UD { align16 1Q }; dp4(8) g5<1>.zF g47<4>F 1F { align16 1Q }; cmp.nz.f0(8) null<1>D g20<4>D 0D { align16 1Q switch }; (+f0) sel(8) g54<1>UD g53<4>UD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g51<4>F g51<4>F { align16 1Q switch }; (+f0) sel(8) g56<1>UD g53<4>UD g54<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g53<4>F g53<4>F { align16 1Q switch }; (+f0) sel(8) g58<1>UD g56<4>UD g53<4>UD { align16 1Q }; dp4(8) g5<1>.wF g58<4>F 1F { align16 1Q }; mul(8) g64<1>F g62<4>F g5<4>F { align16 1Q compacted }; cmp.nz.f0(8) g65<1>F g5<4>F g5<4>F { align16 1Q compacted }; mul(8) g76<1>F g74<4>F g5<4>F { align16 1Q compacted }; mul(8) g87<1>F g85<4>F g5<4>F { align16 1Q compacted }; mul(8) g98<1>F g96<4>F g5<4>F { align16 1Q compacted }; (+f0) sel(8) g66<1>UD g64<4>UD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g62<4>F g62<4>F { align16 1Q switch }; (+f0) sel(8) g68<1>UD g64<4>UD g66<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g64<4>F g64<4>F { align16 1Q switch }; (+f0) sel(8) g70<1>UD g68<4>UD g64<4>UD { align16 1Q }; dp4(8) g118<1>.xF g70<4>F 1F { align16 1Q }; cmp.nz.f0(8) null<1>D g65<4>D 0D { align16 1Q switch }; (+f0) sel(8) g77<1>UD g76<4>UD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g74<4>F g74<4>F { align16 1Q switch }; (+f0) sel(8) g79<1>UD g76<4>UD g77<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g76<4>F g76<4>F { align16 1Q switch }; (+f0) sel(8) g81<1>UD g79<4>UD g76<4>UD { align16 1Q }; dp4(8) g118<1>.yF g81<4>F 1F { align16 1Q }; cmp.nz.f0(8) null<1>D g65<4>D 0D { align16 1Q switch }; (+f0) sel(8) g88<1>UD g87<4>UD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g85<4>F g85<4>F { align16 1Q switch }; (+f0) sel(8) g90<1>UD g87<4>UD g88<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g87<4>F g87<4>F { align16 1Q switch }; (+f0) sel(8) g92<1>UD g90<4>UD g87<4>UD { align16 1Q }; dp4(8) g118<1>.zF g92<4>F 1F { align16 1Q }; cmp.nz.f0(8) null<1>D g65<4>D 0D { align16 1Q switch }; (+f0) sel(8) g99<1>UD g98<4>UD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g96<4>F g96<4>F { align16 1Q switch }; (+f0) sel(8) g101<1>UD g98<4>UD g99<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g98<4>F g98<4>F { align16 1Q switch }; (+f0) sel(8) g103<1>UD g101<4>UD g98<4>UD { align16 1Q }; dp4(8) g118<1>.wF g103<4>F 1F { align16 1Q }; cmp.nz.f0(8) null<1>F g6<4>F g6<4>F { align16 1Q switch }; (+f0) sel(8) g10<1>UD g8<4>UD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g111<4>.xF g111<4>.xF { align16 1Q switch }; (+f0) sel(8) g12<1>UD g8<4>UD g10<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g8<4>F g8<4>F { align16 1Q switch }; (+f0) sel(8) g14<1>UD g12<4>UD g8<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g3<4>.xF g3<4>.xF { align16 1Q switch }; (+f0) sel(8) g16<1>UD g110<4>UD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g107<4>F g107<4>F { align16 1Q switch }; (+f0) sel(8) g18<1>UD g110<4>UD g16<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g110<4>F g110<4>F { align16 1Q switch }; (+f0) sel(8) g20<1>UD g18<4>UD g110<4>UD { align16 1Q }; add(8) g119<1>F g14<4>F g20<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g2<4>.xyyyF g2<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g33<1>.xyUD g27<4>.xyyyUD 0x00000000UD { align16 1Q }; cmp.nz.f0(8) null<1>F g24<4>.xyyyF g24<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g35<1>.xyUD g27<4>.xyyyUD g33<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g27<4>F g27<4>F { align16 1Q switch }; (+f0) sel(8) g37<1>.xyUD g35<4>.xyyyUD g27<4>.xyyyUD { align16 1Q }; add(8) g120<1>F g30<4>.xyyyF g37<4>.xyyyF { align16 1Q compacted }; mov(8) g113<1>UD g0<4>UD { align16 WE_all 1Q }; or(1) g113.5<1>UD g0.5<0,1,0>UD 0x0000ff00UD { align1 WE_all 1N }; send(8) null<1>F g113<4>F urb 0 write HWord interleave complete mlen 9 rlen 0 { align16 1Q EOT }; END B0 6943: message: shader compiler issue 16: VS vec4 shader: 120 inst, 0 loops, 1176 cycles, 0:0 spills:fills, compacted 1920 to 1712 bytes. 6962: message: shader compiler issue 17: GS vec4 shader: 60 inst, 0 loops, 224 cycles, 0:0 spills:fills, compacted 960 to 832 bytes. 6977: message: shader compiler issue 18: FS SIMD8 shader: 19 inst, 0 loops, 306 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 304 to 224 bytes. 6977: message: shader compiler issue 19: FS SIMD16 shader: 20 inst, 0 loops, 340 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 320 to 240 bytes. 7028: message: shader compiler issue 20: FS SIMD8 shader: 19 inst, 0 loops, 306 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 304 to 224 bytes. 7028: message: shader compiler issue 21: FS SIMD16 shader: 20 inst, 0 loops, 340 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 320 to 240 bytes. 7067: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 7216: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7227: message: shader compiler issue 24: FS SIMD8 shader: 19 inst, 0 loops, 306 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 304 to 224 bytes. 7227: message: shader compiler issue 25: FS SIMD16 shader: 20 inst, 0 loops, 340 cycles, 0:0 spills:fills, Promoted 0 constants, compacted 320 to 240 bytes. 7267: message: shader compiler issue 26: FS SIMD8 shader: 64 inst, 0 loops, 466 cycles, 0:0 spills:fills, Promoted 2 constants, compacted 1024 to 736 bytes. 7267: message: shader compiler issue 27: FS SIMD16 shader: 65 inst, 0 loops, 536 cycles, 0:0 spills:fills, Promoted 2 constants, compacted 1040 to 752 bytes. 7293: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer GLSL IR for native vertex shader 18: ( (declare (location=16 shader_in ) vec4 vs_in_reg0) (declare (location=31 shader_out ) vec4 vs_out_attr0) (declare (location=32 shader_out ) vec4 vs_out_attr1) (declare (location=33 shader_out ) vec4 vs_out_attr2) (declare (location=34 shader_out ) vec4 vs_out_attr3) (declare (location=35 shader_out ) vec4 vs_out_attr4) (declare (location=0 uniform ) pica_uniforms@0x7f4fbc0490a0 uniforms) (declare () bvec2 conditional_code) (declare () ivec3 address_registers) (declare () vec4 reg_tmp0) (declare () vec4 reg_tmp1) (declare () vec4 reg_tmp2) (declare () vec4 reg_tmp3) (declare () vec4 reg_tmp4) (declare () vec4 reg_tmp7) (declare () vec4 reg_tmp8) (declare () vec4 reg_tmp9) (declare () vec4 reg_tmp10) (declare () vec4 reg_tmp11) (declare () vec4 reg_tmp12) (declare () vec4 reg_tmp13) (declare () vec4 reg_tmp14) ( function main (signature void (parameters ) ( (assign (xy) (var_ref conditional_code) (constant bvec2 (0 0)) ) (assign (xyz) (var_ref address_registers) (constant ivec3 (0 0 0)) ) (assign (xyzw) (var_ref reg_tmp0) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp1) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp2) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp3) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp4) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp7) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp8) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp9) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp10) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp11) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp12) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp13) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref reg_tmp14) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref vs_out_attr0) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref vs_out_attr1) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref vs_out_attr2) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref vs_out_attr3) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (assign (xyzw) (var_ref vs_out_attr4) (constant vec4 (0.000000 0.000000 0.000000 1.000000)) ) (declare (temporary ) bool return_flag) (assign (x) (var_ref return_flag) (constant bool (0)) ) (declare () uint jmp_to) (assign (x) (var_ref jmp_to) (constant uint (109)) ) (loop ( (declare (temporary ) bool switch_is_fallthru_tmp) (assign (x) (var_ref switch_is_fallthru_tmp) (constant bool (0)) ) (loop ( (declare (temporary ) uint switch_test_tmp) (assign (x) (var_ref switch_test_tmp) (var_ref jmp_to) ) (assign (x) (var_ref switch_is_fallthru_tmp) (expression bool || (var_ref switch_is_fallthru_tmp) (expression bool == (constant uint (109)) (var_ref jmp_to) ) ) ) (if (var_ref switch_is_fallthru_tmp) ( (assign (x) (var_ref address_registers) (swiz x (expression ivec4 f2i (swiz xxxx (var_ref vs_in_reg0) )) )) (declare (temporary ) vec4 ubo_load_temp) (declare (temporary ) uint ubo_load_temp_offset) (assign (x) (var_ref ubo_load_temp_offset) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (9)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp0) (swiz wzyx (var_ref ubo_load_temp) )) (assign (xy) (var_ref reg_tmp1) (swiz zw (var_ref vs_in_reg0) )) (declare (temporary ) vec4 ubo_load_temp@47) (declare (temporary ) uint ubo_load_temp_offset@48) (assign (x) (var_ref ubo_load_temp_offset@48) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@47) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@48) (constant uint (400)) ) ) ) (assign (zw) (var_ref reg_tmp1) (swiz xy (var_ref ubo_load_temp@47) )) (assign (xy) (var_ref address_registers) (swiz xy (expression ivec4 f2i (swiz xyyy (var_ref reg_tmp0) )) )) (declare (temporary ) vec4 ubo_load_temp@49) (declare (temporary ) uint ubo_load_temp_offset@50) (assign (x) (var_ref ubo_load_temp_offset@50) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@49) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@50) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp2) (swiz wzyx (var_ref ubo_load_temp@49) )) (declare (temporary ) bool ubo_load_temp@51) (declare (temporary ) uint ubo_load_temp_offset@52) (assign (x) (var_ref ubo_load_temp_offset@52) (constant uint (0)) ) (assign (x) (var_ref ubo_load_temp@51) (expression bool ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@52) (constant uint (0)) ) ) ) (if (var_ref ubo_load_temp@51) ( (assign (x) (var_ref jmp_to) (constant uint (191)) ) break ) ()) (declare (temporary ) vec4 ubo_load_temp@53) (declare (temporary ) uint ubo_load_temp_offset@54) (assign (x) (var_ref ubo_load_temp_offset@54) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (31)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@53) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@54) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp4) (swiz wzyx (var_ref ubo_load_temp@53) )) (declare (temporary ) vec4 assignment_tmp) (assign (xyzw) (var_ref assignment_tmp) (expression vec4 * (swiz zwww (var_ref vs_in_reg0) )(swiz xyyy (var_ref reg_tmp2) )) ) (assign (xy) (var_ref reg_tmp1) (swiz xy (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp) (var_ref assignment_tmp) ) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref vs_in_reg0) )(swiz zwww (var_ref vs_in_reg0) )) (var_ref assignment_tmp) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp2) )(swiz xyyy (var_ref reg_tmp2) )) (var_ref assignment_tmp) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp) ) (swiz zwww (var_ref reg_tmp2) )) )) (declare (temporary ) vec4 lhs) (declare (temporary ) vec4 ubo_load_temp@55) (declare (temporary ) uint ubo_load_temp_offset@56) (assign (x) (var_ref ubo_load_temp_offset@56) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@55) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@56) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs) (swiz wzyx (var_ref ubo_load_temp@55) )) (declare (temporary ) vec4 assignment_tmp@57) (assign (xyzw) (var_ref assignment_tmp@57) (expression vec4 * (var_ref lhs) (var_ref reg_tmp1) ) ) (assign (x) (var_ref reg_tmp3) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@57) (var_ref assignment_tmp@57) ) (expression vec4 csel (expression bvec4 != (var_ref lhs) (var_ref lhs) ) (var_ref assignment_tmp@57) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@57) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@57) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@58) (declare (temporary ) vec4 ubo_load_temp@59) (declare (temporary ) uint ubo_load_temp_offset@60) (assign (x) (var_ref ubo_load_temp_offset@60) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@59) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@60) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@58) (swiz wzyx (var_ref ubo_load_temp@59) )) (declare (temporary ) vec4 assignment_tmp@61) (assign (xyzw) (var_ref assignment_tmp@61) (expression vec4 * (var_ref lhs@58) (var_ref reg_tmp1) ) ) (assign (y) (var_ref reg_tmp3) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@61) (var_ref assignment_tmp@61) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@58) (var_ref lhs@58) ) (var_ref assignment_tmp@61) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@61) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@61) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@62) (declare (temporary ) vec4 ubo_load_temp@63) (declare (temporary ) uint ubo_load_temp_offset@64) (assign (x) (var_ref ubo_load_temp_offset@64) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (34)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@63) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@64) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@62) (swiz wzyx (var_ref ubo_load_temp@63) )) (declare (temporary ) vec4 assignment_tmp@65) (assign (xyzw) (var_ref assignment_tmp@65) (expression vec4 * (var_ref lhs@62) (var_ref reg_tmp1) ) ) (assign (z) (var_ref reg_tmp3) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@65) (var_ref assignment_tmp@65) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@62) (var_ref lhs@62) ) (var_ref assignment_tmp@65) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@65) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@65) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (assign (w) (var_ref reg_tmp3) (swiz w (var_ref reg_tmp1) )) (declare (temporary ) vec4 ubo_load_temp@66) (declare (temporary ) uint ubo_load_temp_offset@67) (assign (x) (var_ref ubo_load_temp_offset@67) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@66) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@67) (constant uint (384)) ) ) ) (assign (xyzw) (var_ref reg_tmp11) (swiz wzyx (var_ref ubo_load_temp@66) )) (declare (temporary ) vec4 ubo_load_temp@68) (declare (temporary ) uint ubo_load_temp_offset@69) (assign (x) (var_ref ubo_load_temp_offset@69) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (34)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@68) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@69) (constant uint (320)) ) ) ) (assign (z) (var_ref reg_tmp11) (swiz z (expression vec4 + (expression vec4 neg (swiz xxxx (var_ref ubo_load_temp@68) )) (swiz zzzz (var_ref reg_tmp11) )) )) (declare (temporary ) vec4 ubo_load_temp@70) (declare (temporary ) uint ubo_load_temp_offset@71) (assign (x) (var_ref ubo_load_temp_offset@71) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@70) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@71) (constant uint (400)) ) ) ) (assign (x) (var_ref conditional_code) (expression bool != (swiz x (var_ref ubo_load_temp@70) )(swiz x (var_ref reg_tmp11) )) ) (declare (temporary ) vec4 ubo_load_temp@72) (declare (temporary ) uint ubo_load_temp_offset@73) (assign (x) (var_ref ubo_load_temp_offset@73) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@72) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@73) (constant uint (400)) ) ) ) (assign (y) (var_ref conditional_code) (expression bool < (swiz x (var_ref ubo_load_temp@72) )(swiz z (var_ref reg_tmp11) )) ) (if (expression bool all_equal (expression bvec2 ! (var_ref conditional_code) ) (constant bvec2 (0 0)) ) ( (assign (z) (var_ref reg_tmp11) (expression float rcp (swiz z (var_ref reg_tmp11) )) ) (assign (x) (var_ref reg_tmp3) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp3) )(swiz xxxx (var_ref reg_tmp11) )) )) (declare (temporary ) vec4 lhs@74) (assign (xyzw) (var_ref lhs@74) (expression vec4 neg (swiz yyyy (var_ref reg_tmp11) )) ) (declare (temporary ) vec4 assignment_tmp@75) (assign (xyzw) (var_ref assignment_tmp@75) (expression vec4 * (var_ref lhs@74) (swiz zzzz (var_ref reg_tmp11) )) ) (assign (x) (var_ref reg_tmp3) (swiz x (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@75) (var_ref assignment_tmp@75) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@74) (var_ref lhs@74) ) (var_ref assignment_tmp@75) (expression vec4 csel (expression bvec4 != (swiz zzzz (var_ref reg_tmp11) )(swiz zzzz (var_ref reg_tmp11) )) (var_ref assignment_tmp@75) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@75) ) (swiz xxxx (var_ref reg_tmp3) )) )) ) ()) (declare (temporary ) vec4 lhs@76) (declare (temporary ) vec4 ubo_load_temp@77) (declare (temporary ) uint ubo_load_temp_offset@78) (assign (x) (var_ref ubo_load_temp_offset@78) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@77) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@78) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@76) (swiz wzyx (var_ref ubo_load_temp@77) )) (declare (temporary ) vec4 assignment_tmp@79) (assign (xyzw) (var_ref assignment_tmp@79) (expression vec4 * (var_ref lhs@76) (var_ref reg_tmp3) ) ) (assign (x) (var_ref vs_out_attr0) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@79) (var_ref assignment_tmp@79) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@76) (var_ref lhs@76) ) (var_ref assignment_tmp@79) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp3) (var_ref reg_tmp3) ) (var_ref assignment_tmp@79) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@79) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@80) (declare (temporary ) vec4 ubo_load_temp@81) (declare (temporary ) uint ubo_load_temp_offset@82) (assign (x) (var_ref ubo_load_temp_offset@82) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@81) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@82) (constant uint (336)) ) ) ) (assign (xyzw) (var_ref lhs@80) (swiz wzyx (var_ref ubo_load_temp@81) )) (declare (temporary ) vec4 assignment_tmp@83) (assign (xyzw) (var_ref assignment_tmp@83) (expression vec4 * (var_ref lhs@80) (var_ref reg_tmp3) ) ) (assign (y) (var_ref vs_out_attr0) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@83) (var_ref assignment_tmp@83) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@80) (var_ref lhs@80) ) (var_ref assignment_tmp@83) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp3) (var_ref reg_tmp3) ) (var_ref assignment_tmp@83) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@83) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@84) (declare (temporary ) vec4 ubo_load_temp@85) (declare (temporary ) uint ubo_load_temp_offset@86) (assign (x) (var_ref ubo_load_temp_offset@86) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@85) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@86) (constant uint (352)) ) ) ) (assign (xyzw) (var_ref lhs@84) (swiz wzyx (var_ref ubo_load_temp@85) )) (declare (temporary ) vec4 assignment_tmp@87) (assign (xyzw) (var_ref assignment_tmp@87) (expression vec4 * (var_ref lhs@84) (var_ref reg_tmp3) ) ) (assign (z) (var_ref vs_out_attr0) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@87) (var_ref assignment_tmp@87) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@84) (var_ref lhs@84) ) (var_ref assignment_tmp@87) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp3) (var_ref reg_tmp3) ) (var_ref assignment_tmp@87) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@87) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@88) (declare (temporary ) vec4 ubo_load_temp@89) (declare (temporary ) uint ubo_load_temp_offset@90) (assign (x) (var_ref ubo_load_temp_offset@90) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@89) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@90) (constant uint (368)) ) ) ) (assign (xyzw) (var_ref lhs@88) (swiz wzyx (var_ref ubo_load_temp@89) )) (declare (temporary ) vec4 assignment_tmp@91) (assign (xyzw) (var_ref assignment_tmp@91) (expression vec4 * (var_ref lhs@88) (var_ref reg_tmp3) ) ) (assign (w) (var_ref vs_out_attr0) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@91) (var_ref assignment_tmp@91) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@88) (var_ref lhs@88) ) (var_ref assignment_tmp@91) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp3) (var_ref reg_tmp3) ) (var_ref assignment_tmp@91) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@91) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) bvec2 greaterThanEqual_retval) (declare (temporary ) vec4 ubo_load_temp@92) (declare (temporary ) uint ubo_load_temp_offset@93) (assign (x) (var_ref ubo_load_temp_offset@93) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@92) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@93) (constant uint (400)) ) ) ) (assign (xy) (var_ref greaterThanEqual_retval) (expression bvec2 >= (swiz yy (var_ref ubo_load_temp@92) )(swiz ww (var_ref reg_tmp0) )) ) (assign (xy) (var_ref conditional_code) (var_ref greaterThanEqual_retval) ) (if (expression bool all_equal (var_ref greaterThanEqual_retval) (constant bvec2 (1 1)) ) ( (declare (temporary ) vec4 ubo_load_temp@94) (declare (temporary ) uint ubo_load_temp_offset@95) (assign (x) (var_ref ubo_load_temp_offset@95) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@94) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@95) (constant uint (400)) ) ) ) (assign (xyz) (var_ref vs_out_attr1) (swiz yyy (var_ref ubo_load_temp@94) )) (assign (w) (var_ref vs_out_attr1) (swiz w (var_ref reg_tmp0) )) ) ( (assign (y) (var_ref address_registers) (swiz y (expression ivec4 f2i (swiz wwww (var_ref reg_tmp0) )) )) (declare (temporary ) vec4 ubo_load_temp@96) (declare (temporary ) uint ubo_load_temp_offset@97) (assign (x) (var_ref ubo_load_temp_offset@97) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@96) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@97) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp7) (swiz wzyx (var_ref ubo_load_temp@96) )) (declare (temporary ) vec4 ubo_load_temp@98) (declare (temporary ) uint ubo_load_temp_offset@99) (assign (x) (var_ref ubo_load_temp_offset@99) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@98) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@99) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp8) (swiz wzyx (var_ref ubo_load_temp@98) )) (declare (temporary ) vec4 ubo_load_temp@100) (declare (temporary ) uint ubo_load_temp_offset@101) (assign (x) (var_ref ubo_load_temp_offset@101) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (34)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@100) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@101) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp9) (swiz wzyx (var_ref ubo_load_temp@100) )) (declare (temporary ) vec4 ubo_load_temp@102) (declare (temporary ) uint ubo_load_temp_offset@103) (assign (x) (var_ref ubo_load_temp_offset@103) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (35)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@102) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@103) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp10) (swiz wzyx (var_ref ubo_load_temp@102) )) (assign (xy) (var_ref reg_tmp11) (swiz zw (var_ref vs_in_reg0) )) (assign (x) (var_ref reg_tmp14) (swiz x (expression vec4 floor (swiz yyyy (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp14) (swiz x (expression vec4 + (swiz yyyy (var_ref reg_tmp0) )(expression vec4 neg (swiz xxxx (var_ref reg_tmp14) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval) (declare (temporary ) vec4 ubo_load_temp@104) (declare (temporary ) uint ubo_load_temp_offset@105) (assign (x) (var_ref ubo_load_temp_offset@105) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@104) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@105) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval) (expression bvec2 >= (swiz xx (var_ref reg_tmp14) )(swiz ww (var_ref ubo_load_temp@104) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval) ) (if (swiz x (var_ref lessThanEqual_retval) )( (assign (z) (var_ref reg_tmp11) (expression float rcp (swiz x (var_ref reg_tmp4) )) ) (assign (w) (var_ref reg_tmp11) (expression float rcp (swiz y (var_ref reg_tmp4) )) ) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 + (swiz xyyy (var_ref reg_tmp1) )(expression vec4 neg (swiz zwww (var_ref reg_tmp4) )) ) )) (declare (temporary ) vec4 assignment_tmp@106) (assign (xyzw) (var_ref assignment_tmp@106) (expression vec4 * (swiz xyyy (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp11) )) ) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@106) (var_ref assignment_tmp@106) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp11) )(swiz xyyy (var_ref reg_tmp11) )) (var_ref assignment_tmp@106) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp11) )) (var_ref assignment_tmp@106) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@106) ) )) ) ()) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 csel (expression bvec4 < (expression vec4 neg (swiz xyyy (var_ref reg_tmp11) )) (swiz xyyy (var_ref reg_tmp11) )) (swiz xyyy (var_ref reg_tmp11) )(expression vec4 neg (swiz xyyy (var_ref reg_tmp11) )) ) )) (assign (xyzw) (var_ref reg_tmp8) (expression vec4 + (var_ref reg_tmp8) (expression vec4 neg (var_ref reg_tmp7) ) ) ) (declare (temporary ) vec4 assignment_tmp@107) (assign (xyzw) (var_ref assignment_tmp@107) (expression vec4 * (var_ref reg_tmp8) (swiz xxxx (var_ref reg_tmp11) )) ) (assign (xyzw) (var_ref reg_tmp8) (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@107) (var_ref assignment_tmp@107) ) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp8) (var_ref reg_tmp8) ) (var_ref assignment_tmp@107) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp11) )(swiz xxxx (var_ref reg_tmp11) )) (var_ref assignment_tmp@107) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@107) ) (var_ref reg_tmp7) ) ) (assign (xyzw) (var_ref reg_tmp10) (expression vec4 + (var_ref reg_tmp10) (expression vec4 neg (var_ref reg_tmp9) ) ) ) (declare (temporary ) vec4 assignment_tmp@108) (assign (xyzw) (var_ref assignment_tmp@108) (expression vec4 * (var_ref reg_tmp10) (swiz xxxx (var_ref reg_tmp11) )) ) (assign (xyzw) (var_ref reg_tmp10) (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@108) (var_ref assignment_tmp@108) ) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp10) (var_ref reg_tmp10) ) (var_ref assignment_tmp@108) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp11) )(swiz xxxx (var_ref reg_tmp11) )) (var_ref assignment_tmp@108) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@108) ) (var_ref reg_tmp9) ) ) (assign (xyzw) (var_ref reg_tmp10) (expression vec4 + (var_ref reg_tmp10) (expression vec4 neg (var_ref reg_tmp8) ) ) ) (declare (temporary ) vec4 assignment_tmp@109) (assign (xyzw) (var_ref assignment_tmp@109) (expression vec4 * (var_ref reg_tmp10) (swiz yyyy (var_ref reg_tmp11) )) ) (assign (xyzw) (var_ref reg_tmp10) (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@109) (var_ref assignment_tmp@109) ) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp10) (var_ref reg_tmp10) ) (var_ref assignment_tmp@109) (expression vec4 csel (expression bvec4 != (swiz yyyy (var_ref reg_tmp11) )(swiz yyyy (var_ref reg_tmp11) )) (var_ref assignment_tmp@109) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@109) ) (var_ref reg_tmp8) ) ) (assign (xyzw) (var_ref vs_out_attr1) (var_ref reg_tmp10) ) )) (assign (z) (var_ref reg_tmp11) (swiz z (var_ref reg_tmp0) )) (declare (temporary ) vec4 lhs@110) (declare (temporary ) vec4 ubo_load_temp@111) (declare (temporary ) uint ubo_load_temp_offset@112) (assign (x) (var_ref ubo_load_temp_offset@112) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@111) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@112) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@110) (swiz zyzy (var_ref ubo_load_temp@111) )) (declare (temporary ) vec4 assignment_tmp@113) (assign (xyzw) (var_ref assignment_tmp@113) (expression vec4 * (var_ref lhs@110) (swiz zzzz (var_ref reg_tmp0) )) ) (declare (temporary ) vec4 mix_retval) (assign (xyzw) (var_ref mix_retval) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@113) (var_ref assignment_tmp@113) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@110) (var_ref lhs@110) ) (var_ref assignment_tmp@113) (expression vec4 csel (expression bvec4 != (swiz zzzz (var_ref reg_tmp0) )(swiz zzzz (var_ref reg_tmp0) )) (var_ref assignment_tmp@113) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@113) ) ) (assign (zw) (var_ref reg_tmp9) (swiz zw (var_ref mix_retval) )) (assign (xy) (var_ref reg_tmp9) (swiz xy (expression vec4 floor (var_ref mix_retval) ) )) (assign (xy) (var_ref reg_tmp9) (swiz xy (expression vec4 + (swiz zwww (var_ref mix_retval) )(expression vec4 neg (swiz xyyy (var_ref reg_tmp9) )) ) )) (declare (temporary ) vec4 lhs@114) (declare (temporary ) vec4 ubo_load_temp@115) (declare (temporary ) uint ubo_load_temp_offset@116) (assign (x) (var_ref ubo_load_temp_offset@116) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@115) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@116) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@114) (swiz zzzz (var_ref ubo_load_temp@115) )) (declare (temporary ) vec4 assignment_tmp@117) (assign (xyzw) (var_ref assignment_tmp@117) (expression vec4 * (var_ref lhs@114) (var_ref reg_tmp9) ) ) (declare (temporary ) vec4 mix_retval@118) (assign (xyzw) (var_ref mix_retval@118) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@117) (var_ref assignment_tmp@117) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@114) (var_ref lhs@114) ) (var_ref assignment_tmp@117) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp9) (var_ref reg_tmp9) ) (var_ref assignment_tmp@117) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@117) ) ) (assign (xyzw) (var_ref reg_tmp9) (var_ref mix_retval@118) ) (declare (temporary ) vec4 ubo_load_temp@119) (declare (temporary ) uint ubo_load_temp_offset@120) (assign (x) (var_ref ubo_load_temp_offset@120) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@119) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@120) (constant uint (416)) ) ) ) (assign (xyzw) (var_ref reg_tmp14) (swiz wzyx (var_ref ubo_load_temp@119) )) (assign (xy) (var_ref reg_tmp11) (swiz xy (var_ref mix_retval@118) )) (assign (y) (var_ref address_registers) (swiz y (expression ivec4 f2i (swiz zzzz (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 floor (swiz xxxx (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp0) )(expression vec4 neg (var_ref reg_tmp13) ) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@121) (declare (temporary ) vec4 ubo_load_temp@122) (declare (temporary ) uint ubo_load_temp_offset@123) (assign (x) (var_ref ubo_load_temp_offset@123) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@122) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@123) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@121) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@122) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@121) ) (if (swiz x (var_ref lessThanEqual_retval@121) )( (declare (temporary ) vec4 ubo_load_temp@124) (declare (temporary ) uint ubo_load_temp_offset@125) (assign (x) (var_ref ubo_load_temp_offset@125) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@124) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@125) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp12) (swiz xy (expression vec4 + (swiz xyyy (var_ref ubo_load_temp@124) )(swiz zwww (var_ref vs_in_reg0) )) )) (declare (temporary ) vec4 ubo_load_temp@126) (declare (temporary ) uint ubo_load_temp_offset@127) (assign (x) (var_ref ubo_load_temp_offset@127) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@126) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@127) (constant uint (416)) ) ) ) (assign (xy) (var_ref reg_tmp14) (swiz wz (var_ref ubo_load_temp@126) )) (declare (temporary ) vec4 lhs@128) (declare (temporary ) vec4 ubo_load_temp@129) (declare (temporary ) uint ubo_load_temp_offset@130) (assign (x) (var_ref ubo_load_temp_offset@130) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@129) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@130) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@128) (swiz zzzz (var_ref ubo_load_temp@129) )) (declare (temporary ) vec4 assignment_tmp@131) (assign (xyzw) (var_ref assignment_tmp@131) (expression vec4 * (var_ref lhs@128) (swiz xxxx (var_ref reg_tmp0) )) ) (declare (temporary ) vec4 mix_retval@132) (assign (xyzw) (var_ref mix_retval@132) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@131) (var_ref assignment_tmp@131) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@128) (var_ref lhs@128) ) (var_ref assignment_tmp@131) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp0) )(swiz xxxx (var_ref reg_tmp0) )) (var_ref assignment_tmp@131) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@131) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@132) )) (assign (y) (var_ref reg_tmp13) (swiz y (expression vec4 floor (var_ref reg_tmp13) ) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref mix_retval@132) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@133) (declare (temporary ) vec4 ubo_load_temp@134) (declare (temporary ) uint ubo_load_temp_offset@135) (assign (x) (var_ref ubo_load_temp_offset@135) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@134) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@135) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@133) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@134) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@133) ) (if (swiz x (var_ref lessThanEqual_retval@133) )( (assign (xy) (var_ref reg_tmp14) (swiz yx (var_ref reg_tmp14) )) ) ()) (declare (temporary ) vec4 assignment_tmp@136) (assign (xyzw) (var_ref assignment_tmp@136) (expression vec4 * (var_ref reg_tmp14) (var_ref reg_tmp2) ) ) (declare (temporary ) vec4 mix_retval@137) (assign (xyzw) (var_ref mix_retval@137) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@136) (var_ref assignment_tmp@136) ) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp14) (var_ref reg_tmp14) ) (var_ref assignment_tmp@136) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp2) (var_ref reg_tmp2) ) (var_ref assignment_tmp@136) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@136) ) ) (assign (xy) (var_ref reg_tmp14) (swiz xy (var_ref mix_retval@137) )) (declare (temporary ) vec4 lhs@138) (declare (temporary ) vec4 ubo_load_temp@139) (declare (temporary ) uint ubo_load_temp_offset@140) (assign (x) (var_ref ubo_load_temp_offset@140) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@139) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@140) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@138) (swiz zzzz (var_ref ubo_load_temp@139) )) (declare (temporary ) vec4 assignment_tmp@141) (assign (xyzw) (var_ref assignment_tmp@141) (expression vec4 * (var_ref lhs@138) (swiz xxxx (var_ref reg_tmp13) )) ) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@141) (var_ref assignment_tmp@141) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@138) (var_ref lhs@138) ) (var_ref assignment_tmp@141) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@141) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@141) ) )) (declare (temporary ) vec4 lhs@142) (declare (temporary ) vec4 ubo_load_temp@143) (declare (temporary ) uint ubo_load_temp_offset@144) (assign (x) (var_ref ubo_load_temp_offset@144) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@143) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@144) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@142) (swiz zyzy (var_ref ubo_load_temp@143) )) (declare (temporary ) vec4 assignment_tmp@145) (assign (xyzw) (var_ref assignment_tmp@145) (expression vec4 * (var_ref lhs@142) (swiz xxxx (var_ref reg_tmp13) )) ) (declare (temporary ) vec4 mix_retval@146) (assign (xyzw) (var_ref mix_retval@146) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@145) (var_ref assignment_tmp@145) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@142) (var_ref lhs@142) ) (var_ref assignment_tmp@145) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@145) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@145) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@146) )) (assign (zw) (var_ref reg_tmp13) (swiz zw (expression vec4 floor (var_ref mix_retval@146) ) )) (assign (xy) (var_ref reg_tmp13) (swiz xy (expression vec4 + (swiz xyyy (var_ref mix_retval@146) )(expression vec4 neg (swiz zwww (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@147) (declare (temporary ) vec4 ubo_load_temp@148) (declare (temporary ) uint ubo_load_temp_offset@149) (assign (x) (var_ref ubo_load_temp_offset@149) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@148) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@149) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@147) (expression bvec2 >= (swiz xy (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@148) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@147) ) (if (swiz y (var_ref lessThanEqual_retval@147) )( (declare (temporary ) vec4 assignment_tmp@150) (assign (xyzw) (var_ref assignment_tmp@150) (expression vec4 * (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref mix_retval@137) )) ) (assign (x) (var_ref reg_tmp12) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@150) (var_ref assignment_tmp@150) ) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref reg_tmp12) )) (var_ref assignment_tmp@150) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref mix_retval@137) )(swiz xxxx (var_ref mix_retval@137) )) (var_ref assignment_tmp@150) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@150) ) )) ) ()) (if (swiz x (var_ref lessThanEqual_retval@147) )( (declare (temporary ) vec4 assignment_tmp@151) (assign (xyzw) (var_ref assignment_tmp@151) (expression vec4 * (swiz yyyy (var_ref reg_tmp12) )(swiz yyyy (var_ref mix_retval@137) )) ) (declare (temporary ) vec4 ubo_load_temp@152) (declare (temporary ) uint ubo_load_temp_offset@153) (assign (x) (var_ref ubo_load_temp_offset@153) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@152) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@153) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@151) (var_ref assignment_tmp@151) ) (expression vec4 csel (expression bvec4 != (swiz yyyy (var_ref reg_tmp12) )(swiz yyyy (var_ref reg_tmp12) )) (var_ref assignment_tmp@151) (expression vec4 csel (expression bvec4 != (swiz yyyy (var_ref mix_retval@137) )(swiz yyyy (var_ref mix_retval@137) )) (var_ref assignment_tmp@151) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@151) ) (swiz yyyy (var_ref ubo_load_temp@152) )) )) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref reg_tmp12) )(expression vec4 neg (swiz yyyy (var_ref mix_retval@137) )) ) )) ) ()) (declare (temporary ) vec4 ubo_load_temp@154) (declare (temporary ) uint ubo_load_temp_offset@155) (assign (x) (var_ref ubo_load_temp_offset@155) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@154) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@155) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp14) (swiz xy (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@154) )(expression vec4 neg (swiz xyyy (var_ref mix_retval@137) )) ) )) (declare (temporary ) vec4 lhs@156) (declare (temporary ) vec4 ubo_load_temp@157) (declare (temporary ) uint ubo_load_temp_offset@158) (assign (x) (var_ref ubo_load_temp_offset@158) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@157) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@158) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@156) (swiz zzzz (var_ref ubo_load_temp@157) )) (declare (temporary ) vec4 assignment_tmp@159) (assign (xyzw) (var_ref assignment_tmp@159) (expression vec4 * (var_ref lhs@156) (swiz xxxx (var_ref reg_tmp13) )) ) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@159) (var_ref assignment_tmp@159) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@156) (var_ref lhs@156) ) (var_ref assignment_tmp@159) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@159) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@159) ) )) (declare (temporary ) vec4 lhs@160) (declare (temporary ) vec4 ubo_load_temp@161) (declare (temporary ) uint ubo_load_temp_offset@162) (assign (x) (var_ref ubo_load_temp_offset@162) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@161) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@162) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@160) (swiz zyzy (var_ref ubo_load_temp@161) )) (declare (temporary ) vec4 assignment_tmp@163) (assign (xyzw) (var_ref assignment_tmp@163) (expression vec4 * (var_ref lhs@160) (swiz xxxx (var_ref reg_tmp13) )) ) (declare (temporary ) vec4 mix_retval@164) (assign (xyzw) (var_ref mix_retval@164) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@163) (var_ref assignment_tmp@163) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@160) (var_ref lhs@160) ) (var_ref assignment_tmp@163) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@163) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@163) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@164) )) (assign (zw) (var_ref reg_tmp13) (swiz zw (expression vec4 floor (var_ref mix_retval@164) ) )) (assign (xy) (var_ref reg_tmp13) (swiz xy (expression vec4 + (swiz xyyy (var_ref mix_retval@164) )(expression vec4 neg (swiz zwww (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@165) (declare (temporary ) vec4 ubo_load_temp@166) (declare (temporary ) uint ubo_load_temp_offset@167) (assign (x) (var_ref ubo_load_temp_offset@167) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@166) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@167) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@165) (expression bvec2 >= (swiz xy (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@166) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@165) ) (if (swiz y (var_ref lessThanEqual_retval@165) )( (assign (x) (var_ref reg_tmp12) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref reg_tmp14) )) )) ) ()) (if (swiz x (var_ref lessThanEqual_retval@165) )( (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref reg_tmp12) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp14) )) ) )) ) ()) (declare (temporary ) vec4 lhs@168) (declare (temporary ) vec4 ubo_load_temp@169) (declare (temporary ) uint ubo_load_temp_offset@170) (assign (x) (var_ref ubo_load_temp_offset@170) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@169) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@170) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@168) (swiz zzzz (var_ref ubo_load_temp@169) )) (declare (temporary ) vec4 assignment_tmp@171) (assign (xyzw) (var_ref assignment_tmp@171) (expression vec4 * (var_ref lhs@168) (swiz xxxx (var_ref reg_tmp13) )) ) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@171) (var_ref assignment_tmp@171) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@168) (var_ref lhs@168) ) (var_ref assignment_tmp@171) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@171) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@171) ) )) (declare (temporary ) vec4 lhs@172) (declare (temporary ) vec4 ubo_load_temp@173) (declare (temporary ) uint ubo_load_temp_offset@174) (assign (x) (var_ref ubo_load_temp_offset@174) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@173) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@174) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@172) (swiz zyzy (var_ref ubo_load_temp@173) )) (declare (temporary ) vec4 assignment_tmp@175) (assign (xyzw) (var_ref assignment_tmp@175) (expression vec4 * (var_ref lhs@172) (swiz xxxx (var_ref reg_tmp13) )) ) (declare (temporary ) vec4 mix_retval@176) (assign (xyzw) (var_ref mix_retval@176) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@175) (var_ref assignment_tmp@175) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@172) (var_ref lhs@172) ) (var_ref assignment_tmp@175) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@175) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@175) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@176) )) (assign (zw) (var_ref reg_tmp13) (swiz zw (expression vec4 floor (var_ref mix_retval@176) ) )) (assign (xy) (var_ref reg_tmp13) (swiz xy (expression vec4 + (swiz xyyy (var_ref mix_retval@176) )(expression vec4 neg (swiz zwww (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@177) (declare (temporary ) vec4 ubo_load_temp@178) (declare (temporary ) uint ubo_load_temp_offset@179) (assign (x) (var_ref ubo_load_temp_offset@179) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@178) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@179) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@177) (expression bvec2 >= (swiz xy (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@178) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@177) ) (if (swiz y (var_ref lessThanEqual_retval@177) )( (declare (temporary ) vec4 ubo_load_temp@180) (declare (temporary ) uint ubo_load_temp_offset@181) (assign (x) (var_ref ubo_load_temp_offset@181) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@180) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@181) (constant uint (400)) ) ) ) (assign (x) (var_ref reg_tmp12) (swiz x (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@180) )(expression vec4 neg (swiz xxxx (var_ref reg_tmp12) )) ) )) ) ()) (if (swiz x (var_ref lessThanEqual_retval@177) )( (declare (temporary ) vec4 ubo_load_temp@182) (declare (temporary ) uint ubo_load_temp_offset@183) (assign (x) (var_ref ubo_load_temp_offset@183) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@182) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@183) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@182) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp12) )) ) )) ) ()) (declare (temporary ) vec4 lhs@184) (declare (temporary ) vec4 ubo_load_temp@185) (declare (temporary ) uint ubo_load_temp_offset@186) (assign (x) (var_ref ubo_load_temp_offset@186) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@185) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@186) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@184) (swiz zzzz (var_ref ubo_load_temp@185) )) (declare (temporary ) vec4 assignment_tmp@187) (assign (xyzw) (var_ref assignment_tmp@187) (expression vec4 * (var_ref lhs@184) (swiz xxxx (var_ref reg_tmp0) )) ) (declare (temporary ) vec4 mix_retval@188) (assign (xyzw) (var_ref mix_retval@188) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@187) (var_ref assignment_tmp@187) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@184) (var_ref lhs@184) ) (var_ref assignment_tmp@187) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp0) )(swiz xxxx (var_ref reg_tmp0) )) (var_ref assignment_tmp@187) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@187) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@188) )) (assign (y) (var_ref reg_tmp13) (swiz y (expression vec4 floor (var_ref reg_tmp13) ) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref mix_retval@188) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@189) (declare (temporary ) vec4 ubo_load_temp@190) (declare (temporary ) uint ubo_load_temp_offset@191) (assign (x) (var_ref ubo_load_temp_offset@191) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@190) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@191) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@189) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@190) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@189) ) (if (swiz x (var_ref lessThanEqual_retval@189) )( (declare (temporary ) vec4 ubo_load_temp@192) (declare (temporary ) uint ubo_load_temp_offset@193) (assign (x) (var_ref ubo_load_temp_offset@193) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@192) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@193) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp12) (swiz xy (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@192) )(expression vec4 neg (swiz yxxx (var_ref reg_tmp12) )) ) )) ) ()) (declare (temporary ) vec4 ubo_load_temp@194) (declare (temporary ) uint ubo_load_temp_offset@195) (assign (x) (var_ref ubo_load_temp_offset@195) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@194) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@195) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@194) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp12) )) ) )) ) ()) (declare (temporary ) bvec2 lessThanEqual_retval@196) (declare (temporary ) vec4 ubo_load_temp@197) (declare (temporary ) uint ubo_load_temp_offset@198) (assign (x) (var_ref ubo_load_temp_offset@198) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@197) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@198) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@196) (expression bvec2 >= (swiz xy (var_ref mix_retval@118) )(swiz yy (var_ref ubo_load_temp@197) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@196) ) (if (expression bool ! (swiz y (var_ref lessThanEqual_retval@196) )) ( (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 floor (swiz xxxx (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp0) )(expression vec4 neg (var_ref reg_tmp13) ) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@199) (declare (temporary ) vec4 ubo_load_temp@200) (declare (temporary ) uint ubo_load_temp_offset@201) (assign (x) (var_ref ubo_load_temp_offset@201) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@200) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@201) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@199) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@200) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@199) ) (declare (temporary ) vec4 ubo_load_temp@202) (declare (temporary ) uint ubo_load_temp_offset@203) (assign (x) (var_ref ubo_load_temp_offset@203) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@202) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@203) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp13) (swiz wzyx (var_ref ubo_load_temp@202) )) (if (swiz x (var_ref lessThanEqual_retval@199) )( (declare (temporary ) vec4 assignment_tmp@204) (assign (xyzw) (var_ref assignment_tmp@204) (expression vec4 * (swiz xyyy (var_ref reg_tmp12) )(swiz xyyy (var_ref reg_tmp13) )) ) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@204) (var_ref assignment_tmp@204) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp12) )(swiz xyyy (var_ref reg_tmp12) )) (var_ref assignment_tmp@204) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp13) )(swiz xyyy (var_ref reg_tmp13) )) (var_ref assignment_tmp@204) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@204) ) (swiz zwww (var_ref reg_tmp13) )) )) (declare (temporary ) vec4 assignment_tmp@205) (assign (xyzw) (var_ref assignment_tmp@205) (expression vec4 * (swiz xyyy (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp14) )) ) (declare (temporary ) vec4 mix_retval@206) (assign (xyzw) (var_ref mix_retval@206) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@205) (var_ref assignment_tmp@205) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp11) )(swiz xyyy (var_ref reg_tmp11) )) (var_ref assignment_tmp@205) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref reg_tmp14) )(swiz zwww (var_ref reg_tmp14) )) (var_ref assignment_tmp@205) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@205) ) ) (assign (x) (var_ref reg_tmp11) (swiz x (var_ref mix_retval@206) )) (declare (temporary ) vec4 ubo_load_temp@207) (declare (temporary ) uint ubo_load_temp_offset@208) (assign (x) (var_ref ubo_load_temp_offset@208) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@207) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@208) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@207) )(expression vec4 neg (swiz yyyy (var_ref mix_retval@206) )) ) )) ) ( (declare (temporary ) bvec2 notEqual_retval) (declare (temporary ) vec4 ubo_load_temp@209) (declare (temporary ) uint ubo_load_temp_offset@210) (assign (x) (var_ref ubo_load_temp_offset@210) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@209) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@210) (constant uint (400)) ) ) ) (assign (xy) (var_ref notEqual_retval) (expression bvec2 != (swiz xx (var_ref ubo_load_temp@209) )(swiz zw (var_ref vs_in_reg0) )) ) (assign (xy) (var_ref conditional_code) (var_ref notEqual_retval) ) (if (expression bool ! (swiz x (var_ref notEqual_retval) )) ( (assign (x) (var_ref reg_tmp11) (swiz x (var_ref reg_tmp13) )) ) ( (assign (x) (var_ref reg_tmp11) (swiz z (var_ref reg_tmp13) )) )) (if (expression bool ! (swiz y (var_ref notEqual_retval) )) ( (assign (y) (var_ref reg_tmp11) (swiz y (var_ref reg_tmp13) )) ) ( (assign (y) (var_ref reg_tmp11) (swiz w (var_ref reg_tmp13) )) )) )) (declare (temporary ) vec4 ubo_load_temp@211) (declare (temporary ) uint ubo_load_temp_offset@212) (assign (x) (var_ref ubo_load_temp_offset@212) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@211) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@212) (constant uint (400)) ) ) ) (assign (z) (var_ref reg_tmp11) (swiz z (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@211) )(swiz zzzz (var_ref reg_tmp0) )) )) ) ( (if (expression bool ! (swiz x (var_ref conditional_code) )) ( (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 floor (swiz xxxx (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp0) )(expression vec4 neg (var_ref reg_tmp13) ) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@213) (declare (temporary ) vec4 ubo_load_temp@214) (declare (temporary ) uint ubo_load_temp_offset@215) (assign (x) (var_ref ubo_load_temp_offset@215) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@214) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@215) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@213) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@214) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@213) ) (if (swiz x (var_ref lessThanEqual_retval@213) )( (declare (temporary ) vec4 ubo_load_temp@216) (declare (temporary ) uint ubo_load_temp_offset@217) (assign (x) (var_ref ubo_load_temp_offset@217) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@216) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@217) (constant uint (400)) ) ) ) (assign (zw) (var_ref reg_tmp12) (swiz xy (var_ref ubo_load_temp@216) )) (declare (temporary ) vec4 lhs@218) (declare (temporary ) vec4 ubo_load_temp@219) (declare (temporary ) uint ubo_load_temp_offset@220) (assign (x) (var_ref ubo_load_temp_offset@220) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@219) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@220) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@218) (swiz wzyx (var_ref ubo_load_temp@219) )) (declare (temporary ) vec4 assignment_tmp@221) (assign (xyzw) (var_ref assignment_tmp@221) (expression vec4 * (var_ref lhs@218) (var_ref reg_tmp12) ) ) (assign (x) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@221) (var_ref assignment_tmp@221) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@218) (var_ref lhs@218) ) (var_ref assignment_tmp@221) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp12) (var_ref reg_tmp12) ) (var_ref assignment_tmp@221) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@221) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@222) (declare (temporary ) vec4 ubo_load_temp@223) (declare (temporary ) uint ubo_load_temp_offset@224) (assign (x) (var_ref ubo_load_temp_offset@224) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@223) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@224) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@222) (swiz wzyx (var_ref ubo_load_temp@223) )) (declare (temporary ) vec4 assignment_tmp@225) (assign (xyzw) (var_ref assignment_tmp@225) (expression vec4 * (var_ref lhs@222) (var_ref reg_tmp12) ) ) (assign (y) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@225) (var_ref assignment_tmp@225) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@222) (var_ref lhs@222) ) (var_ref assignment_tmp@225) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp12) (var_ref reg_tmp12) ) (var_ref assignment_tmp@225) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@225) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 assignment_tmp@226) (assign (xyzw) (var_ref assignment_tmp@226) (expression vec4 * (swiz xyyy (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp14) )) ) (declare (temporary ) vec4 mix_retval@227) (assign (xyzw) (var_ref mix_retval@227) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@226) (var_ref assignment_tmp@226) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp11) )(swiz xyyy (var_ref reg_tmp11) )) (var_ref assignment_tmp@226) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref reg_tmp14) )(swiz zwww (var_ref reg_tmp14) )) (var_ref assignment_tmp@226) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@226) ) ) (assign (x) (var_ref reg_tmp11) (swiz x (var_ref mix_retval@227) )) (declare (temporary ) vec4 ubo_load_temp@228) (declare (temporary ) uint ubo_load_temp_offset@229) (assign (x) (var_ref ubo_load_temp_offset@229) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@228) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@229) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@228) )(expression vec4 neg (swiz yyyy (var_ref mix_retval@227) )) ) )) ) ( (declare (temporary ) vec4 ubo_load_temp@230) (declare (temporary ) uint ubo_load_temp_offset@231) (assign (x) (var_ref ubo_load_temp_offset@231) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@230) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@231) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp14) (swiz wzyx (var_ref ubo_load_temp@230) )) (declare (temporary ) vec4 ubo_load_temp@232) (declare (temporary ) uint ubo_load_temp_offset@233) (assign (x) (var_ref ubo_load_temp_offset@233) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@232) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@233) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp13) (swiz wzyx (var_ref ubo_load_temp@232) )) (declare (temporary ) bvec2 notEqual_retval@234) (declare (temporary ) vec4 ubo_load_temp@235) (declare (temporary ) uint ubo_load_temp_offset@236) (assign (x) (var_ref ubo_load_temp_offset@236) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@235) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@236) (constant uint (400)) ) ) ) (assign (xy) (var_ref notEqual_retval@234) (expression bvec2 != (swiz xx (var_ref ubo_load_temp@235) )(swiz zw (var_ref vs_in_reg0) )) ) (assign (xy) (var_ref conditional_code) (var_ref notEqual_retval@234) ) (if (expression bool ! (swiz y (var_ref notEqual_retval@234) )) ( (if (expression bool ! (swiz x (var_ref notEqual_retval@234) )) ( (assign (xy) (var_ref reg_tmp11) (swiz xy (var_ref reg_tmp14) )) ) ( (assign (xy) (var_ref reg_tmp11) (swiz zw (var_ref reg_tmp13) )) )) ) ( (if (expression bool ! (swiz x (var_ref notEqual_retval@234) )) ( (assign (xy) (var_ref reg_tmp11) (swiz xy (var_ref reg_tmp13) )) ) ( (assign (xy) (var_ref reg_tmp11) (swiz zw (var_ref reg_tmp14) )) )) )) )) ) ( (declare (temporary ) vec4 lhs@237) (declare (temporary ) vec4 ubo_load_temp@238) (declare (temporary ) uint ubo_load_temp_offset@239) (assign (x) (var_ref ubo_load_temp_offset@239) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@238) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@239) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@237) (swiz wzyx (var_ref ubo_load_temp@238) )) (declare (temporary ) vec4 assignment_tmp@240) (assign (xyzw) (var_ref assignment_tmp@240) (expression vec4 * (var_ref lhs@237) (var_ref reg_tmp1) ) ) (assign (x) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@240) (var_ref assignment_tmp@240) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@237) (var_ref lhs@237) ) (var_ref assignment_tmp@240) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@240) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@240) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@241) (declare (temporary ) vec4 ubo_load_temp@242) (declare (temporary ) uint ubo_load_temp_offset@243) (assign (x) (var_ref ubo_load_temp_offset@243) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@242) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@243) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@241) (swiz wzyx (var_ref ubo_load_temp@242) )) (declare (temporary ) vec4 assignment_tmp@244) (assign (xyzw) (var_ref assignment_tmp@244) (expression vec4 * (var_ref lhs@241) (var_ref reg_tmp1) ) ) (assign (y) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@244) (var_ref assignment_tmp@244) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@241) (var_ref lhs@241) ) (var_ref assignment_tmp@244) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@244) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@244) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) )) (declare (temporary ) vec4 ubo_load_temp@245) (declare (temporary ) uint ubo_load_temp_offset@246) (assign (x) (var_ref ubo_load_temp_offset@246) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@245) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@246) (constant uint (400)) ) ) ) (assign (z) (var_ref reg_tmp11) (swiz z (expression vec4 + (swiz zzzz (var_ref ubo_load_temp@245) )(swiz zzzz (var_ref reg_tmp11) )) )) )) (declare (temporary ) bool ubo_load_temp@247) (declare (temporary ) uint ubo_load_temp_offset@248) (assign (x) (var_ref ubo_load_temp_offset@248) (constant uint (0)) ) (assign (x) (var_ref ubo_load_temp@247) (expression bool ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@248) (constant uint (16)) ) ) ) (if (var_ref ubo_load_temp@247) ( (assign (xy) (var_ref reg_tmp11) (swiz yx (var_ref reg_tmp11) )) (declare (temporary ) vec4 ubo_load_temp@249) (declare (temporary ) uint ubo_load_temp_offset@250) (assign (x) (var_ref ubo_load_temp_offset@250) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@249) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@250) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@249) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp11) )) ) )) ) ()) (declare (temporary ) bool ubo_load_temp@251) (declare (temporary ) uint ubo_load_temp_offset@252) (assign (x) (var_ref ubo_load_temp_offset@252) (constant uint (0)) ) (assign (x) (var_ref ubo_load_temp@251) (expression bool ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@252) (constant uint (32)) ) ) ) (if (var_ref ubo_load_temp@251) ( (declare (temporary ) vec4 ubo_load_temp@253) (declare (temporary ) uint ubo_load_temp_offset@254) (assign (x) (var_ref ubo_load_temp_offset@254) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@253) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@254) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@253) )(expression vec4 neg (swiz yxxx (var_ref reg_tmp11) )) ) )) ) ()) (assign (xyzw) (var_ref vs_out_attr2) (swiz xyyy (var_ref reg_tmp11) )) (declare (temporary ) vec4 lhs@255) (declare (temporary ) vec4 ubo_load_temp@256) (declare (temporary ) uint ubo_load_temp_offset@257) (assign (x) (var_ref ubo_load_temp_offset@257) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@256) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@257) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@255) (swiz zyzy (var_ref ubo_load_temp@256) )) (declare (temporary ) vec4 assignment_tmp@258) (assign (xyzw) (var_ref assignment_tmp@258) (expression vec4 * (var_ref lhs@255) (swiz xxxx (var_ref mix_retval@118) )) ) (declare (temporary ) vec4 mix_retval@259) (assign (xyzw) (var_ref mix_retval@259) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@258) (var_ref assignment_tmp@258) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@255) (var_ref lhs@255) ) (var_ref assignment_tmp@258) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref mix_retval@118) )(swiz xxxx (var_ref mix_retval@118) )) (var_ref assignment_tmp@258) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@258) ) ) (assign (zw) (var_ref reg_tmp9) (swiz zw (var_ref mix_retval@259) )) (assign (xy) (var_ref reg_tmp9) (swiz xy (expression vec4 floor (var_ref mix_retval@259) ) )) (assign (xy) (var_ref reg_tmp9) (swiz xy (expression vec4 + (swiz zwww (var_ref mix_retval@259) )(expression vec4 neg (swiz xyyy (var_ref reg_tmp9) )) ) )) (declare (temporary ) vec4 lhs@260) (declare (temporary ) vec4 ubo_load_temp@261) (declare (temporary ) uint ubo_load_temp_offset@262) (assign (x) (var_ref ubo_load_temp_offset@262) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@261) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@262) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@260) (swiz zzzz (var_ref ubo_load_temp@261) )) (declare (temporary ) vec4 assignment_tmp@263) (assign (xyzw) (var_ref assignment_tmp@263) (expression vec4 * (var_ref lhs@260) (var_ref reg_tmp9) ) ) (declare (temporary ) vec4 mix_retval@264) (assign (xyzw) (var_ref mix_retval@264) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@263) (var_ref assignment_tmp@263) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@260) (var_ref lhs@260) ) (var_ref assignment_tmp@263) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp9) (var_ref reg_tmp9) ) (var_ref assignment_tmp@263) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@263) ) ) (assign (xyzw) (var_ref reg_tmp9) (var_ref mix_retval@264) ) (declare (temporary ) vec4 ubo_load_temp@265) (declare (temporary ) uint ubo_load_temp_offset@266) (assign (x) (var_ref ubo_load_temp_offset@266) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@265) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@266) (constant uint (432)) ) ) ) (assign (xyzw) (var_ref reg_tmp14) (swiz wzyx (var_ref ubo_load_temp@265) )) (assign (xy) (var_ref reg_tmp11) (swiz xy (var_ref mix_retval@264) )) (assign (y) (var_ref address_registers) (swiz y (expression ivec4 f2i (swiz zzzz (var_ref reg_tmp11) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 floor (swiz xxxx (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp0) )(expression vec4 neg (var_ref reg_tmp13) ) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@267) (declare (temporary ) vec4 ubo_load_temp@268) (declare (temporary ) uint ubo_load_temp_offset@269) (assign (x) (var_ref ubo_load_temp_offset@269) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@268) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@269) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@267) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@268) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@267) ) (if (swiz x (var_ref lessThanEqual_retval@267) )( (declare (temporary ) vec4 ubo_load_temp@270) (declare (temporary ) uint ubo_load_temp_offset@271) (assign (x) (var_ref ubo_load_temp_offset@271) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@270) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@271) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp12) (swiz xy (expression vec4 + (swiz xyyy (var_ref ubo_load_temp@270) )(swiz zwww (var_ref vs_in_reg0) )) )) (declare (temporary ) vec4 ubo_load_temp@272) (declare (temporary ) uint ubo_load_temp_offset@273) (assign (x) (var_ref ubo_load_temp_offset@273) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@272) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@273) (constant uint (416)) ) ) ) (assign (xy) (var_ref reg_tmp14) (swiz wz (var_ref ubo_load_temp@272) )) (declare (temporary ) vec4 lhs@274) (declare (temporary ) vec4 ubo_load_temp@275) (declare (temporary ) uint ubo_load_temp_offset@276) (assign (x) (var_ref ubo_load_temp_offset@276) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@275) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@276) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@274) (swiz zzzz (var_ref ubo_load_temp@275) )) (declare (temporary ) vec4 assignment_tmp@277) (assign (xyzw) (var_ref assignment_tmp@277) (expression vec4 * (var_ref lhs@274) (swiz xxxx (var_ref reg_tmp0) )) ) (declare (temporary ) vec4 mix_retval@278) (assign (xyzw) (var_ref mix_retval@278) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@277) (var_ref assignment_tmp@277) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@274) (var_ref lhs@274) ) (var_ref assignment_tmp@277) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp0) )(swiz xxxx (var_ref reg_tmp0) )) (var_ref assignment_tmp@277) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@277) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@278) )) (assign (y) (var_ref reg_tmp13) (swiz y (expression vec4 floor (var_ref reg_tmp13) ) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref mix_retval@278) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@279) (declare (temporary ) vec4 ubo_load_temp@280) (declare (temporary ) uint ubo_load_temp_offset@281) (assign (x) (var_ref ubo_load_temp_offset@281) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@280) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@281) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@279) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@280) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@279) ) (if (swiz x (var_ref lessThanEqual_retval@279) )( (assign (xy) (var_ref reg_tmp14) (swiz yx (var_ref reg_tmp14) )) ) ()) (declare (temporary ) vec4 assignment_tmp@282) (assign (xyzw) (var_ref assignment_tmp@282) (expression vec4 * (var_ref reg_tmp14) (var_ref reg_tmp2) ) ) (declare (temporary ) vec4 mix_retval@283) (assign (xyzw) (var_ref mix_retval@283) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@282) (var_ref assignment_tmp@282) ) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp14) (var_ref reg_tmp14) ) (var_ref assignment_tmp@282) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp2) (var_ref reg_tmp2) ) (var_ref assignment_tmp@282) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@282) ) ) (assign (xy) (var_ref reg_tmp14) (swiz xy (var_ref mix_retval@283) )) (declare (temporary ) vec4 lhs@284) (declare (temporary ) vec4 ubo_load_temp@285) (declare (temporary ) uint ubo_load_temp_offset@286) (assign (x) (var_ref ubo_load_temp_offset@286) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@285) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@286) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@284) (swiz zzzz (var_ref ubo_load_temp@285) )) (declare (temporary ) vec4 assignment_tmp@287) (assign (xyzw) (var_ref assignment_tmp@287) (expression vec4 * (var_ref lhs@284) (swiz xxxx (var_ref reg_tmp13) )) ) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@287) (var_ref assignment_tmp@287) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@284) (var_ref lhs@284) ) (var_ref assignment_tmp@287) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@287) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@287) ) )) (declare (temporary ) vec4 lhs@288) (declare (temporary ) vec4 ubo_load_temp@289) (declare (temporary ) uint ubo_load_temp_offset@290) (assign (x) (var_ref ubo_load_temp_offset@290) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@289) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@290) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@288) (swiz zyzy (var_ref ubo_load_temp@289) )) (declare (temporary ) vec4 assignment_tmp@291) (assign (xyzw) (var_ref assignment_tmp@291) (expression vec4 * (var_ref lhs@288) (swiz xxxx (var_ref reg_tmp13) )) ) (declare (temporary ) vec4 mix_retval@292) (assign (xyzw) (var_ref mix_retval@292) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@291) (var_ref assignment_tmp@291) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@288) (var_ref lhs@288) ) (var_ref assignment_tmp@291) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@291) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@291) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@292) )) (assign (zw) (var_ref reg_tmp13) (swiz zw (expression vec4 floor (var_ref mix_retval@292) ) )) (assign (xy) (var_ref reg_tmp13) (swiz xy (expression vec4 + (swiz xyyy (var_ref mix_retval@292) )(expression vec4 neg (swiz zwww (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@293) (declare (temporary ) vec4 ubo_load_temp@294) (declare (temporary ) uint ubo_load_temp_offset@295) (assign (x) (var_ref ubo_load_temp_offset@295) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@294) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@295) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@293) (expression bvec2 >= (swiz xy (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@294) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@293) ) (if (swiz y (var_ref lessThanEqual_retval@293) )( (declare (temporary ) vec4 assignment_tmp@296) (assign (xyzw) (var_ref assignment_tmp@296) (expression vec4 * (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref mix_retval@283) )) ) (assign (x) (var_ref reg_tmp12) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@296) (var_ref assignment_tmp@296) ) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref reg_tmp12) )) (var_ref assignment_tmp@296) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref mix_retval@283) )(swiz xxxx (var_ref mix_retval@283) )) (var_ref assignment_tmp@296) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@296) ) )) ) ()) (if (swiz x (var_ref lessThanEqual_retval@293) )( (declare (temporary ) vec4 assignment_tmp@297) (assign (xyzw) (var_ref assignment_tmp@297) (expression vec4 * (swiz yyyy (var_ref reg_tmp12) )(swiz yyyy (var_ref mix_retval@283) )) ) (declare (temporary ) vec4 ubo_load_temp@298) (declare (temporary ) uint ubo_load_temp_offset@299) (assign (x) (var_ref ubo_load_temp_offset@299) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@298) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@299) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@297) (var_ref assignment_tmp@297) ) (expression vec4 csel (expression bvec4 != (swiz yyyy (var_ref reg_tmp12) )(swiz yyyy (var_ref reg_tmp12) )) (var_ref assignment_tmp@297) (expression vec4 csel (expression bvec4 != (swiz yyyy (var_ref mix_retval@283) )(swiz yyyy (var_ref mix_retval@283) )) (var_ref assignment_tmp@297) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@297) ) (swiz yyyy (var_ref ubo_load_temp@298) )) )) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref reg_tmp12) )(expression vec4 neg (swiz yyyy (var_ref mix_retval@283) )) ) )) ) ()) (declare (temporary ) vec4 ubo_load_temp@300) (declare (temporary ) uint ubo_load_temp_offset@301) (assign (x) (var_ref ubo_load_temp_offset@301) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@300) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@301) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp14) (swiz xy (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@300) )(expression vec4 neg (swiz xyyy (var_ref mix_retval@283) )) ) )) (declare (temporary ) vec4 lhs@302) (declare (temporary ) vec4 ubo_load_temp@303) (declare (temporary ) uint ubo_load_temp_offset@304) (assign (x) (var_ref ubo_load_temp_offset@304) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@303) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@304) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@302) (swiz zzzz (var_ref ubo_load_temp@303) )) (declare (temporary ) vec4 assignment_tmp@305) (assign (xyzw) (var_ref assignment_tmp@305) (expression vec4 * (var_ref lhs@302) (swiz xxxx (var_ref reg_tmp13) )) ) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@305) (var_ref assignment_tmp@305) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@302) (var_ref lhs@302) ) (var_ref assignment_tmp@305) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@305) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@305) ) )) (declare (temporary ) vec4 lhs@306) (declare (temporary ) vec4 ubo_load_temp@307) (declare (temporary ) uint ubo_load_temp_offset@308) (assign (x) (var_ref ubo_load_temp_offset@308) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@307) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@308) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@306) (swiz zyzy (var_ref ubo_load_temp@307) )) (declare (temporary ) vec4 assignment_tmp@309) (assign (xyzw) (var_ref assignment_tmp@309) (expression vec4 * (var_ref lhs@306) (swiz xxxx (var_ref reg_tmp13) )) ) (declare (temporary ) vec4 mix_retval@310) (assign (xyzw) (var_ref mix_retval@310) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@309) (var_ref assignment_tmp@309) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@306) (var_ref lhs@306) ) (var_ref assignment_tmp@309) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@309) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@309) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@310) )) (assign (zw) (var_ref reg_tmp13) (swiz zw (expression vec4 floor (var_ref mix_retval@310) ) )) (assign (xy) (var_ref reg_tmp13) (swiz xy (expression vec4 + (swiz xyyy (var_ref mix_retval@310) )(expression vec4 neg (swiz zwww (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@311) (declare (temporary ) vec4 ubo_load_temp@312) (declare (temporary ) uint ubo_load_temp_offset@313) (assign (x) (var_ref ubo_load_temp_offset@313) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@312) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@313) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@311) (expression bvec2 >= (swiz xy (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@312) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@311) ) (if (swiz y (var_ref lessThanEqual_retval@311) )( (assign (x) (var_ref reg_tmp12) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref reg_tmp14) )) )) ) ()) (if (swiz x (var_ref lessThanEqual_retval@311) )( (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref reg_tmp12) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp14) )) ) )) ) ()) (declare (temporary ) vec4 lhs@314) (declare (temporary ) vec4 ubo_load_temp@315) (declare (temporary ) uint ubo_load_temp_offset@316) (assign (x) (var_ref ubo_load_temp_offset@316) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@315) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@316) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@314) (swiz zzzz (var_ref ubo_load_temp@315) )) (declare (temporary ) vec4 assignment_tmp@317) (assign (xyzw) (var_ref assignment_tmp@317) (expression vec4 * (var_ref lhs@314) (swiz xxxx (var_ref reg_tmp13) )) ) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@317) (var_ref assignment_tmp@317) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@314) (var_ref lhs@314) ) (var_ref assignment_tmp@317) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@317) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@317) ) )) (declare (temporary ) vec4 lhs@318) (declare (temporary ) vec4 ubo_load_temp@319) (declare (temporary ) uint ubo_load_temp_offset@320) (assign (x) (var_ref ubo_load_temp_offset@320) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@319) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@320) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@318) (swiz zyzy (var_ref ubo_load_temp@319) )) (declare (temporary ) vec4 assignment_tmp@321) (assign (xyzw) (var_ref assignment_tmp@321) (expression vec4 * (var_ref lhs@318) (swiz xxxx (var_ref reg_tmp13) )) ) (declare (temporary ) vec4 mix_retval@322) (assign (xyzw) (var_ref mix_retval@322) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@321) (var_ref assignment_tmp@321) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@318) (var_ref lhs@318) ) (var_ref assignment_tmp@321) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@321) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@321) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@322) )) (assign (zw) (var_ref reg_tmp13) (swiz zw (expression vec4 floor (var_ref mix_retval@322) ) )) (assign (xy) (var_ref reg_tmp13) (swiz xy (expression vec4 + (swiz xyyy (var_ref mix_retval@322) )(expression vec4 neg (swiz zwww (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@323) (declare (temporary ) vec4 ubo_load_temp@324) (declare (temporary ) uint ubo_load_temp_offset@325) (assign (x) (var_ref ubo_load_temp_offset@325) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@324) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@325) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@323) (expression bvec2 >= (swiz xy (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@324) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@323) ) (if (swiz y (var_ref lessThanEqual_retval@323) )( (declare (temporary ) vec4 ubo_load_temp@326) (declare (temporary ) uint ubo_load_temp_offset@327) (assign (x) (var_ref ubo_load_temp_offset@327) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@326) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@327) (constant uint (400)) ) ) ) (assign (x) (var_ref reg_tmp12) (swiz x (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@326) )(expression vec4 neg (swiz xxxx (var_ref reg_tmp12) )) ) )) ) ()) (if (swiz x (var_ref lessThanEqual_retval@323) )( (declare (temporary ) vec4 ubo_load_temp@328) (declare (temporary ) uint ubo_load_temp_offset@329) (assign (x) (var_ref ubo_load_temp_offset@329) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@328) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@329) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@328) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp12) )) ) )) ) ()) (declare (temporary ) vec4 lhs@330) (declare (temporary ) vec4 ubo_load_temp@331) (declare (temporary ) uint ubo_load_temp_offset@332) (assign (x) (var_ref ubo_load_temp_offset@332) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@331) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@332) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@330) (swiz zzzz (var_ref ubo_load_temp@331) )) (declare (temporary ) vec4 assignment_tmp@333) (assign (xyzw) (var_ref assignment_tmp@333) (expression vec4 * (var_ref lhs@330) (swiz xxxx (var_ref reg_tmp0) )) ) (declare (temporary ) vec4 mix_retval@334) (assign (xyzw) (var_ref mix_retval@334) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@333) (var_ref assignment_tmp@333) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@330) (var_ref lhs@330) ) (var_ref assignment_tmp@333) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp0) )(swiz xxxx (var_ref reg_tmp0) )) (var_ref assignment_tmp@333) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@333) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@334) )) (assign (y) (var_ref reg_tmp13) (swiz y (expression vec4 floor (var_ref reg_tmp13) ) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref mix_retval@334) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@335) (declare (temporary ) vec4 ubo_load_temp@336) (declare (temporary ) uint ubo_load_temp_offset@337) (assign (x) (var_ref ubo_load_temp_offset@337) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@336) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@337) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@335) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@336) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@335) ) (if (swiz x (var_ref lessThanEqual_retval@335) )( (declare (temporary ) vec4 ubo_load_temp@338) (declare (temporary ) uint ubo_load_temp_offset@339) (assign (x) (var_ref ubo_load_temp_offset@339) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@338) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@339) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp12) (swiz xy (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@338) )(expression vec4 neg (swiz yxxx (var_ref reg_tmp12) )) ) )) ) ()) (declare (temporary ) vec4 ubo_load_temp@340) (declare (temporary ) uint ubo_load_temp_offset@341) (assign (x) (var_ref ubo_load_temp_offset@341) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@340) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@341) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@340) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp12) )) ) )) ) ()) (declare (temporary ) bvec2 lessThanEqual_retval@342) (declare (temporary ) vec4 ubo_load_temp@343) (declare (temporary ) uint ubo_load_temp_offset@344) (assign (x) (var_ref ubo_load_temp_offset@344) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@343) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@344) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@342) (expression bvec2 >= (swiz xy (var_ref mix_retval@264) )(swiz yy (var_ref ubo_load_temp@343) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@342) ) (if (expression bool ! (swiz y (var_ref lessThanEqual_retval@342) )) ( (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 floor (swiz xxxx (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp0) )(expression vec4 neg (var_ref reg_tmp13) ) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@345) (declare (temporary ) vec4 ubo_load_temp@346) (declare (temporary ) uint ubo_load_temp_offset@347) (assign (x) (var_ref ubo_load_temp_offset@347) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@346) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@347) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@345) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@346) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@345) ) (declare (temporary ) vec4 ubo_load_temp@348) (declare (temporary ) uint ubo_load_temp_offset@349) (assign (x) (var_ref ubo_load_temp_offset@349) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@348) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@349) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp13) (swiz wzyx (var_ref ubo_load_temp@348) )) (if (swiz x (var_ref lessThanEqual_retval@345) )( (declare (temporary ) vec4 assignment_tmp@350) (assign (xyzw) (var_ref assignment_tmp@350) (expression vec4 * (swiz xyyy (var_ref reg_tmp12) )(swiz xyyy (var_ref reg_tmp13) )) ) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@350) (var_ref assignment_tmp@350) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp12) )(swiz xyyy (var_ref reg_tmp12) )) (var_ref assignment_tmp@350) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp13) )(swiz xyyy (var_ref reg_tmp13) )) (var_ref assignment_tmp@350) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@350) ) (swiz zwww (var_ref reg_tmp13) )) )) (declare (temporary ) vec4 assignment_tmp@351) (assign (xyzw) (var_ref assignment_tmp@351) (expression vec4 * (swiz xyyy (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp14) )) ) (declare (temporary ) vec4 mix_retval@352) (assign (xyzw) (var_ref mix_retval@352) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@351) (var_ref assignment_tmp@351) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp11) )(swiz xyyy (var_ref reg_tmp11) )) (var_ref assignment_tmp@351) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref reg_tmp14) )(swiz zwww (var_ref reg_tmp14) )) (var_ref assignment_tmp@351) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@351) ) ) (assign (x) (var_ref reg_tmp11) (swiz x (var_ref mix_retval@352) )) (declare (temporary ) vec4 ubo_load_temp@353) (declare (temporary ) uint ubo_load_temp_offset@354) (assign (x) (var_ref ubo_load_temp_offset@354) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@353) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@354) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@353) )(expression vec4 neg (swiz yyyy (var_ref mix_retval@352) )) ) )) ) ( (declare (temporary ) bvec2 notEqual_retval@355) (declare (temporary ) vec4 ubo_load_temp@356) (declare (temporary ) uint ubo_load_temp_offset@357) (assign (x) (var_ref ubo_load_temp_offset@357) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@356) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@357) (constant uint (400)) ) ) ) (assign (xy) (var_ref notEqual_retval@355) (expression bvec2 != (swiz xx (var_ref ubo_load_temp@356) )(swiz zw (var_ref vs_in_reg0) )) ) (assign (xy) (var_ref conditional_code) (var_ref notEqual_retval@355) ) (if (expression bool ! (swiz x (var_ref notEqual_retval@355) )) ( (assign (x) (var_ref reg_tmp11) (swiz x (var_ref reg_tmp13) )) ) ( (assign (x) (var_ref reg_tmp11) (swiz z (var_ref reg_tmp13) )) )) (if (expression bool ! (swiz y (var_ref notEqual_retval@355) )) ( (assign (y) (var_ref reg_tmp11) (swiz y (var_ref reg_tmp13) )) ) ( (assign (y) (var_ref reg_tmp11) (swiz w (var_ref reg_tmp13) )) )) )) (declare (temporary ) vec4 ubo_load_temp@358) (declare (temporary ) uint ubo_load_temp_offset@359) (assign (x) (var_ref ubo_load_temp_offset@359) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@358) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@359) (constant uint (400)) ) ) ) (assign (z) (var_ref reg_tmp11) (swiz z (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@358) )(swiz zzzz (var_ref reg_tmp11) )) )) ) ( (if (expression bool ! (swiz x (var_ref conditional_code) )) ( (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 floor (swiz xxxx (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp0) )(expression vec4 neg (var_ref reg_tmp13) ) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@360) (declare (temporary ) vec4 ubo_load_temp@361) (declare (temporary ) uint ubo_load_temp_offset@362) (assign (x) (var_ref ubo_load_temp_offset@362) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@361) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@362) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@360) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@361) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@360) ) (if (swiz x (var_ref lessThanEqual_retval@360) )( (declare (temporary ) vec4 ubo_load_temp@363) (declare (temporary ) uint ubo_load_temp_offset@364) (assign (x) (var_ref ubo_load_temp_offset@364) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@363) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@364) (constant uint (400)) ) ) ) (assign (zw) (var_ref reg_tmp12) (swiz xy (var_ref ubo_load_temp@363) )) (declare (temporary ) vec4 lhs@365) (declare (temporary ) vec4 ubo_load_temp@366) (declare (temporary ) uint ubo_load_temp_offset@367) (assign (x) (var_ref ubo_load_temp_offset@367) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@366) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@367) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@365) (swiz wzyx (var_ref ubo_load_temp@366) )) (declare (temporary ) vec4 assignment_tmp@368) (assign (xyzw) (var_ref assignment_tmp@368) (expression vec4 * (var_ref lhs@365) (var_ref reg_tmp12) ) ) (assign (x) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@368) (var_ref assignment_tmp@368) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@365) (var_ref lhs@365) ) (var_ref assignment_tmp@368) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp12) (var_ref reg_tmp12) ) (var_ref assignment_tmp@368) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@368) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@369) (declare (temporary ) vec4 ubo_load_temp@370) (declare (temporary ) uint ubo_load_temp_offset@371) (assign (x) (var_ref ubo_load_temp_offset@371) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@370) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@371) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@369) (swiz wzyx (var_ref ubo_load_temp@370) )) (declare (temporary ) vec4 assignment_tmp@372) (assign (xyzw) (var_ref assignment_tmp@372) (expression vec4 * (var_ref lhs@369) (var_ref reg_tmp12) ) ) (assign (y) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@372) (var_ref assignment_tmp@372) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@369) (var_ref lhs@369) ) (var_ref assignment_tmp@372) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp12) (var_ref reg_tmp12) ) (var_ref assignment_tmp@372) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@372) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 assignment_tmp@373) (assign (xyzw) (var_ref assignment_tmp@373) (expression vec4 * (swiz xyyy (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp14) )) ) (declare (temporary ) vec4 mix_retval@374) (assign (xyzw) (var_ref mix_retval@374) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@373) (var_ref assignment_tmp@373) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp11) )(swiz xyyy (var_ref reg_tmp11) )) (var_ref assignment_tmp@373) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref reg_tmp14) )(swiz zwww (var_ref reg_tmp14) )) (var_ref assignment_tmp@373) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@373) ) ) (assign (x) (var_ref reg_tmp11) (swiz x (var_ref mix_retval@374) )) (declare (temporary ) vec4 ubo_load_temp@375) (declare (temporary ) uint ubo_load_temp_offset@376) (assign (x) (var_ref ubo_load_temp_offset@376) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@375) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@376) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@375) )(expression vec4 neg (swiz yyyy (var_ref mix_retval@374) )) ) )) ) ( (declare (temporary ) vec4 ubo_load_temp@377) (declare (temporary ) uint ubo_load_temp_offset@378) (assign (x) (var_ref ubo_load_temp_offset@378) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@377) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@378) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp14) (swiz wzyx (var_ref ubo_load_temp@377) )) (declare (temporary ) vec4 ubo_load_temp@379) (declare (temporary ) uint ubo_load_temp_offset@380) (assign (x) (var_ref ubo_load_temp_offset@380) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@379) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@380) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp13) (swiz wzyx (var_ref ubo_load_temp@379) )) (declare (temporary ) bvec2 notEqual_retval@381) (declare (temporary ) vec4 ubo_load_temp@382) (declare (temporary ) uint ubo_load_temp_offset@383) (assign (x) (var_ref ubo_load_temp_offset@383) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@382) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@383) (constant uint (400)) ) ) ) (assign (xy) (var_ref notEqual_retval@381) (expression bvec2 != (swiz xx (var_ref ubo_load_temp@382) )(swiz zw (var_ref vs_in_reg0) )) ) (assign (xy) (var_ref conditional_code) (var_ref notEqual_retval@381) ) (if (expression bool ! (swiz y (var_ref notEqual_retval@381) )) ( (if (expression bool ! (swiz x (var_ref notEqual_retval@381) )) ( (assign (xy) (var_ref reg_tmp11) (swiz xy (var_ref reg_tmp14) )) ) ( (assign (xy) (var_ref reg_tmp11) (swiz zw (var_ref reg_tmp13) )) )) ) ( (if (expression bool ! (swiz x (var_ref notEqual_retval@381) )) ( (assign (xy) (var_ref reg_tmp11) (swiz xy (var_ref reg_tmp13) )) ) ( (assign (xy) (var_ref reg_tmp11) (swiz zw (var_ref reg_tmp14) )) )) )) )) ) ( (declare (temporary ) vec4 lhs@384) (declare (temporary ) vec4 ubo_load_temp@385) (declare (temporary ) uint ubo_load_temp_offset@386) (assign (x) (var_ref ubo_load_temp_offset@386) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@385) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@386) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@384) (swiz wzyx (var_ref ubo_load_temp@385) )) (declare (temporary ) vec4 assignment_tmp@387) (assign (xyzw) (var_ref assignment_tmp@387) (expression vec4 * (var_ref lhs@384) (var_ref reg_tmp1) ) ) (assign (x) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@387) (var_ref assignment_tmp@387) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@384) (var_ref lhs@384) ) (var_ref assignment_tmp@387) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@387) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@387) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@388) (declare (temporary ) vec4 ubo_load_temp@389) (declare (temporary ) uint ubo_load_temp_offset@390) (assign (x) (var_ref ubo_load_temp_offset@390) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@389) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@390) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@388) (swiz wzyx (var_ref ubo_load_temp@389) )) (declare (temporary ) vec4 assignment_tmp@391) (assign (xyzw) (var_ref assignment_tmp@391) (expression vec4 * (var_ref lhs@388) (var_ref reg_tmp1) ) ) (assign (y) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@391) (var_ref assignment_tmp@391) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@388) (var_ref lhs@388) ) (var_ref assignment_tmp@391) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@391) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@391) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) )) (declare (temporary ) vec4 ubo_load_temp@392) (declare (temporary ) uint ubo_load_temp_offset@393) (assign (x) (var_ref ubo_load_temp_offset@393) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@392) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@393) (constant uint (400)) ) ) ) (assign (z) (var_ref reg_tmp11) (swiz z (expression vec4 + (swiz zzzz (var_ref ubo_load_temp@392) )(swiz zzzz (var_ref reg_tmp11) )) )) )) (declare (temporary ) bool ubo_load_temp@394) (declare (temporary ) uint ubo_load_temp_offset@395) (assign (x) (var_ref ubo_load_temp_offset@395) (constant uint (0)) ) (assign (x) (var_ref ubo_load_temp@394) (expression bool ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@395) (constant uint (48)) ) ) ) (if (var_ref ubo_load_temp@394) ( (assign (xy) (var_ref reg_tmp11) (swiz yx (var_ref reg_tmp11) )) (declare (temporary ) vec4 ubo_load_temp@396) (declare (temporary ) uint ubo_load_temp_offset@397) (assign (x) (var_ref ubo_load_temp_offset@397) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@396) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@397) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@396) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp11) )) ) )) ) ()) (declare (temporary ) bool ubo_load_temp@398) (declare (temporary ) uint ubo_load_temp_offset@399) (assign (x) (var_ref ubo_load_temp_offset@399) (constant uint (0)) ) (assign (x) (var_ref ubo_load_temp@398) (expression bool ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@399) (constant uint (64)) ) ) ) (if (var_ref ubo_load_temp@398) ( (declare (temporary ) vec4 ubo_load_temp@400) (declare (temporary ) uint ubo_load_temp_offset@401) (assign (x) (var_ref ubo_load_temp_offset@401) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@400) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@401) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@400) )(expression vec4 neg (swiz yxxx (var_ref reg_tmp11) )) ) )) ) ()) (assign (xyzw) (var_ref vs_out_attr3) (swiz xyyy (var_ref reg_tmp11) )) (declare (temporary ) vec4 lhs@402) (declare (temporary ) vec4 ubo_load_temp@403) (declare (temporary ) uint ubo_load_temp_offset@404) (assign (x) (var_ref ubo_load_temp_offset@404) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@403) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@404) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@402) (swiz zyzy (var_ref ubo_load_temp@403) )) (declare (temporary ) vec4 assignment_tmp@405) (assign (xyzw) (var_ref assignment_tmp@405) (expression vec4 * (var_ref lhs@402) (swiz xxxx (var_ref mix_retval@264) )) ) (declare (temporary ) vec4 mix_retval@406) (assign (xyzw) (var_ref mix_retval@406) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@405) (var_ref assignment_tmp@405) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@402) (var_ref lhs@402) ) (var_ref assignment_tmp@405) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref mix_retval@264) )(swiz xxxx (var_ref mix_retval@264) )) (var_ref assignment_tmp@405) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@405) ) ) (assign (zw) (var_ref reg_tmp9) (swiz zw (var_ref mix_retval@406) )) (assign (xy) (var_ref reg_tmp9) (swiz xy (expression vec4 floor (var_ref mix_retval@406) ) )) (assign (xy) (var_ref reg_tmp9) (swiz xy (expression vec4 + (swiz zwww (var_ref mix_retval@406) )(expression vec4 neg (swiz xyyy (var_ref reg_tmp9) )) ) )) (declare (temporary ) vec4 lhs@407) (declare (temporary ) vec4 ubo_load_temp@408) (declare (temporary ) uint ubo_load_temp_offset@409) (assign (x) (var_ref ubo_load_temp_offset@409) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@408) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@409) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@407) (swiz zzzz (var_ref ubo_load_temp@408) )) (declare (temporary ) vec4 assignment_tmp@410) (assign (xyzw) (var_ref assignment_tmp@410) (expression vec4 * (var_ref lhs@407) (var_ref reg_tmp9) ) ) (declare (temporary ) vec4 mix_retval@411) (assign (xyzw) (var_ref mix_retval@411) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@410) (var_ref assignment_tmp@410) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@407) (var_ref lhs@407) ) (var_ref assignment_tmp@410) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp9) (var_ref reg_tmp9) ) (var_ref assignment_tmp@410) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@410) ) ) (assign (xyzw) (var_ref reg_tmp9) (var_ref mix_retval@411) ) (declare (temporary ) vec4 ubo_load_temp@412) (declare (temporary ) uint ubo_load_temp_offset@413) (assign (x) (var_ref ubo_load_temp_offset@413) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@412) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@413) (constant uint (448)) ) ) ) (assign (xyzw) (var_ref reg_tmp14) (swiz wzyx (var_ref ubo_load_temp@412) )) (assign (xy) (var_ref reg_tmp11) (swiz xy (var_ref mix_retval@411) )) (assign (y) (var_ref address_registers) (swiz y (expression ivec4 f2i (swiz zzzz (var_ref reg_tmp11) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 floor (swiz xxxx (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp0) )(expression vec4 neg (var_ref reg_tmp13) ) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@414) (declare (temporary ) vec4 ubo_load_temp@415) (declare (temporary ) uint ubo_load_temp_offset@416) (assign (x) (var_ref ubo_load_temp_offset@416) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@415) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@416) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@414) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@415) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@414) ) (if (swiz x (var_ref lessThanEqual_retval@414) )( (declare (temporary ) vec4 ubo_load_temp@417) (declare (temporary ) uint ubo_load_temp_offset@418) (assign (x) (var_ref ubo_load_temp_offset@418) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@417) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@418) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp12) (swiz xy (expression vec4 + (swiz xyyy (var_ref ubo_load_temp@417) )(swiz zwww (var_ref vs_in_reg0) )) )) (declare (temporary ) vec4 ubo_load_temp@419) (declare (temporary ) uint ubo_load_temp_offset@420) (assign (x) (var_ref ubo_load_temp_offset@420) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@419) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@420) (constant uint (416)) ) ) ) (assign (xy) (var_ref reg_tmp14) (swiz wz (var_ref ubo_load_temp@419) )) (declare (temporary ) vec4 lhs@421) (declare (temporary ) vec4 ubo_load_temp@422) (declare (temporary ) uint ubo_load_temp_offset@423) (assign (x) (var_ref ubo_load_temp_offset@423) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@422) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@423) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@421) (swiz zzzz (var_ref ubo_load_temp@422) )) (declare (temporary ) vec4 assignment_tmp@424) (assign (xyzw) (var_ref assignment_tmp@424) (expression vec4 * (var_ref lhs@421) (swiz xxxx (var_ref reg_tmp0) )) ) (declare (temporary ) vec4 mix_retval@425) (assign (xyzw) (var_ref mix_retval@425) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@424) (var_ref assignment_tmp@424) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@421) (var_ref lhs@421) ) (var_ref assignment_tmp@424) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp0) )(swiz xxxx (var_ref reg_tmp0) )) (var_ref assignment_tmp@424) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@424) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@425) )) (assign (y) (var_ref reg_tmp13) (swiz y (expression vec4 floor (var_ref reg_tmp13) ) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref mix_retval@425) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@426) (declare (temporary ) vec4 ubo_load_temp@427) (declare (temporary ) uint ubo_load_temp_offset@428) (assign (x) (var_ref ubo_load_temp_offset@428) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@427) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@428) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@426) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@427) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@426) ) (if (swiz x (var_ref lessThanEqual_retval@426) )( (assign (xy) (var_ref reg_tmp14) (swiz yx (var_ref reg_tmp14) )) ) ()) (declare (temporary ) vec4 assignment_tmp@429) (assign (xyzw) (var_ref assignment_tmp@429) (expression vec4 * (var_ref reg_tmp14) (var_ref reg_tmp2) ) ) (declare (temporary ) vec4 mix_retval@430) (assign (xyzw) (var_ref mix_retval@430) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@429) (var_ref assignment_tmp@429) ) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp14) (var_ref reg_tmp14) ) (var_ref assignment_tmp@429) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp2) (var_ref reg_tmp2) ) (var_ref assignment_tmp@429) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@429) ) ) (assign (xy) (var_ref reg_tmp14) (swiz xy (var_ref mix_retval@430) )) (declare (temporary ) vec4 lhs@431) (declare (temporary ) vec4 ubo_load_temp@432) (declare (temporary ) uint ubo_load_temp_offset@433) (assign (x) (var_ref ubo_load_temp_offset@433) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@432) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@433) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@431) (swiz zzzz (var_ref ubo_load_temp@432) )) (declare (temporary ) vec4 assignment_tmp@434) (assign (xyzw) (var_ref assignment_tmp@434) (expression vec4 * (var_ref lhs@431) (swiz xxxx (var_ref reg_tmp13) )) ) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@434) (var_ref assignment_tmp@434) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@431) (var_ref lhs@431) ) (var_ref assignment_tmp@434) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@434) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@434) ) )) (declare (temporary ) vec4 lhs@435) (declare (temporary ) vec4 ubo_load_temp@436) (declare (temporary ) uint ubo_load_temp_offset@437) (assign (x) (var_ref ubo_load_temp_offset@437) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@436) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@437) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@435) (swiz zyzy (var_ref ubo_load_temp@436) )) (declare (temporary ) vec4 assignment_tmp@438) (assign (xyzw) (var_ref assignment_tmp@438) (expression vec4 * (var_ref lhs@435) (swiz xxxx (var_ref reg_tmp13) )) ) (declare (temporary ) vec4 mix_retval@439) (assign (xyzw) (var_ref mix_retval@439) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@438) (var_ref assignment_tmp@438) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@435) (var_ref lhs@435) ) (var_ref assignment_tmp@438) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@438) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@438) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@439) )) (assign (zw) (var_ref reg_tmp13) (swiz zw (expression vec4 floor (var_ref mix_retval@439) ) )) (assign (xy) (var_ref reg_tmp13) (swiz xy (expression vec4 + (swiz xyyy (var_ref mix_retval@439) )(expression vec4 neg (swiz zwww (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@440) (declare (temporary ) vec4 ubo_load_temp@441) (declare (temporary ) uint ubo_load_temp_offset@442) (assign (x) (var_ref ubo_load_temp_offset@442) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@441) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@442) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@440) (expression bvec2 >= (swiz xy (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@441) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@440) ) (if (swiz y (var_ref lessThanEqual_retval@440) )( (declare (temporary ) vec4 assignment_tmp@443) (assign (xyzw) (var_ref assignment_tmp@443) (expression vec4 * (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref mix_retval@430) )) ) (assign (x) (var_ref reg_tmp12) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@443) (var_ref assignment_tmp@443) ) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref reg_tmp12) )) (var_ref assignment_tmp@443) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref mix_retval@430) )(swiz xxxx (var_ref mix_retval@430) )) (var_ref assignment_tmp@443) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@443) ) )) ) ()) (if (swiz x (var_ref lessThanEqual_retval@440) )( (declare (temporary ) vec4 assignment_tmp@444) (assign (xyzw) (var_ref assignment_tmp@444) (expression vec4 * (swiz yyyy (var_ref reg_tmp12) )(swiz yyyy (var_ref mix_retval@430) )) ) (declare (temporary ) vec4 ubo_load_temp@445) (declare (temporary ) uint ubo_load_temp_offset@446) (assign (x) (var_ref ubo_load_temp_offset@446) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@445) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@446) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@444) (var_ref assignment_tmp@444) ) (expression vec4 csel (expression bvec4 != (swiz yyyy (var_ref reg_tmp12) )(swiz yyyy (var_ref reg_tmp12) )) (var_ref assignment_tmp@444) (expression vec4 csel (expression bvec4 != (swiz yyyy (var_ref mix_retval@430) )(swiz yyyy (var_ref mix_retval@430) )) (var_ref assignment_tmp@444) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@444) ) (swiz yyyy (var_ref ubo_load_temp@445) )) )) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref reg_tmp12) )(expression vec4 neg (swiz yyyy (var_ref mix_retval@430) )) ) )) ) ()) (declare (temporary ) vec4 ubo_load_temp@447) (declare (temporary ) uint ubo_load_temp_offset@448) (assign (x) (var_ref ubo_load_temp_offset@448) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@447) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@448) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp14) (swiz xy (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@447) )(expression vec4 neg (swiz xyyy (var_ref mix_retval@430) )) ) )) (declare (temporary ) vec4 lhs@449) (declare (temporary ) vec4 ubo_load_temp@450) (declare (temporary ) uint ubo_load_temp_offset@451) (assign (x) (var_ref ubo_load_temp_offset@451) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@450) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@451) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@449) (swiz zzzz (var_ref ubo_load_temp@450) )) (declare (temporary ) vec4 assignment_tmp@452) (assign (xyzw) (var_ref assignment_tmp@452) (expression vec4 * (var_ref lhs@449) (swiz xxxx (var_ref reg_tmp13) )) ) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@452) (var_ref assignment_tmp@452) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@449) (var_ref lhs@449) ) (var_ref assignment_tmp@452) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@452) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@452) ) )) (declare (temporary ) vec4 lhs@453) (declare (temporary ) vec4 ubo_load_temp@454) (declare (temporary ) uint ubo_load_temp_offset@455) (assign (x) (var_ref ubo_load_temp_offset@455) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@454) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@455) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@453) (swiz zyzy (var_ref ubo_load_temp@454) )) (declare (temporary ) vec4 assignment_tmp@456) (assign (xyzw) (var_ref assignment_tmp@456) (expression vec4 * (var_ref lhs@453) (swiz xxxx (var_ref reg_tmp13) )) ) (declare (temporary ) vec4 mix_retval@457) (assign (xyzw) (var_ref mix_retval@457) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@456) (var_ref assignment_tmp@456) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@453) (var_ref lhs@453) ) (var_ref assignment_tmp@456) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@456) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@456) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@457) )) (assign (zw) (var_ref reg_tmp13) (swiz zw (expression vec4 floor (var_ref mix_retval@457) ) )) (assign (xy) (var_ref reg_tmp13) (swiz xy (expression vec4 + (swiz xyyy (var_ref mix_retval@457) )(expression vec4 neg (swiz zwww (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@458) (declare (temporary ) vec4 ubo_load_temp@459) (declare (temporary ) uint ubo_load_temp_offset@460) (assign (x) (var_ref ubo_load_temp_offset@460) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@459) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@460) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@458) (expression bvec2 >= (swiz xy (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@459) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@458) ) (if (swiz y (var_ref lessThanEqual_retval@458) )( (assign (x) (var_ref reg_tmp12) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp12) )(swiz xxxx (var_ref reg_tmp14) )) )) ) ()) (if (swiz x (var_ref lessThanEqual_retval@458) )( (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref reg_tmp12) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp14) )) ) )) ) ()) (declare (temporary ) vec4 lhs@461) (declare (temporary ) vec4 ubo_load_temp@462) (declare (temporary ) uint ubo_load_temp_offset@463) (assign (x) (var_ref ubo_load_temp_offset@463) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@462) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@463) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@461) (swiz zzzz (var_ref ubo_load_temp@462) )) (declare (temporary ) vec4 assignment_tmp@464) (assign (xyzw) (var_ref assignment_tmp@464) (expression vec4 * (var_ref lhs@461) (swiz xxxx (var_ref reg_tmp13) )) ) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@464) (var_ref assignment_tmp@464) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@461) (var_ref lhs@461) ) (var_ref assignment_tmp@464) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@464) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@464) ) )) (declare (temporary ) vec4 lhs@465) (declare (temporary ) vec4 ubo_load_temp@466) (declare (temporary ) uint ubo_load_temp_offset@467) (assign (x) (var_ref ubo_load_temp_offset@467) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@466) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@467) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@465) (swiz zyzy (var_ref ubo_load_temp@466) )) (declare (temporary ) vec4 assignment_tmp@468) (assign (xyzw) (var_ref assignment_tmp@468) (expression vec4 * (var_ref lhs@465) (swiz xxxx (var_ref reg_tmp13) )) ) (declare (temporary ) vec4 mix_retval@469) (assign (xyzw) (var_ref mix_retval@469) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@468) (var_ref assignment_tmp@468) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@465) (var_ref lhs@465) ) (var_ref assignment_tmp@468) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp13) )(swiz xxxx (var_ref reg_tmp13) )) (var_ref assignment_tmp@468) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@468) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@469) )) (assign (zw) (var_ref reg_tmp13) (swiz zw (expression vec4 floor (var_ref mix_retval@469) ) )) (assign (xy) (var_ref reg_tmp13) (swiz xy (expression vec4 + (swiz xyyy (var_ref mix_retval@469) )(expression vec4 neg (swiz zwww (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@470) (declare (temporary ) vec4 ubo_load_temp@471) (declare (temporary ) uint ubo_load_temp_offset@472) (assign (x) (var_ref ubo_load_temp_offset@472) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@471) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@472) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@470) (expression bvec2 >= (swiz xy (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@471) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@470) ) (if (swiz y (var_ref lessThanEqual_retval@470) )( (declare (temporary ) vec4 ubo_load_temp@473) (declare (temporary ) uint ubo_load_temp_offset@474) (assign (x) (var_ref ubo_load_temp_offset@474) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@473) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@474) (constant uint (400)) ) ) ) (assign (x) (var_ref reg_tmp12) (swiz x (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@473) )(expression vec4 neg (swiz xxxx (var_ref reg_tmp12) )) ) )) ) ()) (if (swiz x (var_ref lessThanEqual_retval@470) )( (declare (temporary ) vec4 ubo_load_temp@475) (declare (temporary ) uint ubo_load_temp_offset@476) (assign (x) (var_ref ubo_load_temp_offset@476) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@475) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@476) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@475) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp12) )) ) )) ) ()) (declare (temporary ) vec4 lhs@477) (declare (temporary ) vec4 ubo_load_temp@478) (declare (temporary ) uint ubo_load_temp_offset@479) (assign (x) (var_ref ubo_load_temp_offset@479) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@478) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@479) (constant uint (400)) ) ) ) (assign (xyzw) (var_ref lhs@477) (swiz zzzz (var_ref ubo_load_temp@478) )) (declare (temporary ) vec4 assignment_tmp@480) (assign (xyzw) (var_ref assignment_tmp@480) (expression vec4 * (var_ref lhs@477) (swiz xxxx (var_ref reg_tmp0) )) ) (declare (temporary ) vec4 mix_retval@481) (assign (xyzw) (var_ref mix_retval@481) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@480) (var_ref assignment_tmp@480) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@477) (var_ref lhs@477) ) (var_ref assignment_tmp@480) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp0) )(swiz xxxx (var_ref reg_tmp0) )) (var_ref assignment_tmp@480) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@480) ) ) (assign (xy) (var_ref reg_tmp13) (swiz xy (var_ref mix_retval@481) )) (assign (y) (var_ref reg_tmp13) (swiz y (expression vec4 floor (var_ref reg_tmp13) ) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref mix_retval@481) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp13) )) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@482) (declare (temporary ) vec4 ubo_load_temp@483) (declare (temporary ) uint ubo_load_temp_offset@484) (assign (x) (var_ref ubo_load_temp_offset@484) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@483) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@484) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@482) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@483) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@482) ) (if (swiz x (var_ref lessThanEqual_retval@482) )( (declare (temporary ) vec4 ubo_load_temp@485) (declare (temporary ) uint ubo_load_temp_offset@486) (assign (x) (var_ref ubo_load_temp_offset@486) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@485) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@486) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp12) (swiz xy (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@485) )(expression vec4 neg (swiz yxxx (var_ref reg_tmp12) )) ) )) ) ()) (declare (temporary ) vec4 ubo_load_temp@487) (declare (temporary ) uint ubo_load_temp_offset@488) (assign (x) (var_ref ubo_load_temp_offset@488) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@487) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@488) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp12) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@487) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp12) )) ) )) ) ()) (declare (temporary ) bvec2 lessThanEqual_retval@489) (declare (temporary ) vec4 ubo_load_temp@490) (declare (temporary ) uint ubo_load_temp_offset@491) (assign (x) (var_ref ubo_load_temp_offset@491) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@490) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@491) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@489) (expression bvec2 >= (swiz xy (var_ref mix_retval@411) )(swiz yy (var_ref ubo_load_temp@490) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@489) ) (if (expression bool ! (swiz y (var_ref lessThanEqual_retval@489) )) ( (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 floor (swiz xxxx (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp0) )(expression vec4 neg (var_ref reg_tmp13) ) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@492) (declare (temporary ) vec4 ubo_load_temp@493) (declare (temporary ) uint ubo_load_temp_offset@494) (assign (x) (var_ref ubo_load_temp_offset@494) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@493) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@494) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@492) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@493) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@492) ) (declare (temporary ) vec4 ubo_load_temp@495) (declare (temporary ) uint ubo_load_temp_offset@496) (assign (x) (var_ref ubo_load_temp_offset@496) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@495) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@496) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp13) (swiz wzyx (var_ref ubo_load_temp@495) )) (if (swiz x (var_ref lessThanEqual_retval@492) )( (declare (temporary ) vec4 assignment_tmp@497) (assign (xyzw) (var_ref assignment_tmp@497) (expression vec4 * (swiz xyyy (var_ref reg_tmp12) )(swiz xyyy (var_ref reg_tmp13) )) ) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@497) (var_ref assignment_tmp@497) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp12) )(swiz xyyy (var_ref reg_tmp12) )) (var_ref assignment_tmp@497) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp13) )(swiz xyyy (var_ref reg_tmp13) )) (var_ref assignment_tmp@497) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@497) ) (swiz zwww (var_ref reg_tmp13) )) )) (declare (temporary ) vec4 assignment_tmp@498) (assign (xyzw) (var_ref assignment_tmp@498) (expression vec4 * (swiz xyyy (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp14) )) ) (declare (temporary ) vec4 mix_retval@499) (assign (xyzw) (var_ref mix_retval@499) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@498) (var_ref assignment_tmp@498) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp11) )(swiz xyyy (var_ref reg_tmp11) )) (var_ref assignment_tmp@498) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref reg_tmp14) )(swiz zwww (var_ref reg_tmp14) )) (var_ref assignment_tmp@498) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@498) ) ) (assign (x) (var_ref reg_tmp11) (swiz x (var_ref mix_retval@499) )) (declare (temporary ) vec4 ubo_load_temp@500) (declare (temporary ) uint ubo_load_temp_offset@501) (assign (x) (var_ref ubo_load_temp_offset@501) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@500) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@501) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@500) )(expression vec4 neg (swiz yyyy (var_ref mix_retval@499) )) ) )) ) ( (declare (temporary ) bvec2 notEqual_retval@502) (declare (temporary ) vec4 ubo_load_temp@503) (declare (temporary ) uint ubo_load_temp_offset@504) (assign (x) (var_ref ubo_load_temp_offset@504) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@503) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@504) (constant uint (400)) ) ) ) (assign (xy) (var_ref notEqual_retval@502) (expression bvec2 != (swiz xx (var_ref ubo_load_temp@503) )(swiz zw (var_ref vs_in_reg0) )) ) (assign (xy) (var_ref conditional_code) (var_ref notEqual_retval@502) ) (if (expression bool ! (swiz x (var_ref notEqual_retval@502) )) ( (assign (x) (var_ref reg_tmp11) (swiz x (var_ref reg_tmp13) )) ) ( (assign (x) (var_ref reg_tmp11) (swiz z (var_ref reg_tmp13) )) )) (if (expression bool ! (swiz y (var_ref notEqual_retval@502) )) ( (assign (y) (var_ref reg_tmp11) (swiz y (var_ref reg_tmp13) )) ) ( (assign (y) (var_ref reg_tmp11) (swiz w (var_ref reg_tmp13) )) )) )) (declare (temporary ) vec4 ubo_load_temp@505) (declare (temporary ) uint ubo_load_temp_offset@506) (assign (x) (var_ref ubo_load_temp_offset@506) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@505) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@506) (constant uint (400)) ) ) ) (assign (z) (var_ref reg_tmp11) (swiz z (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@505) )(swiz zzzz (var_ref reg_tmp11) )) )) ) ( (if (expression bool ! (swiz x (var_ref conditional_code) )) ( (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 floor (swiz xxxx (var_ref reg_tmp0) )) )) (assign (x) (var_ref reg_tmp13) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp0) )(expression vec4 neg (var_ref reg_tmp13) ) ) )) (declare (temporary ) bvec2 lessThanEqual_retval@507) (declare (temporary ) vec4 ubo_load_temp@508) (declare (temporary ) uint ubo_load_temp_offset@509) (assign (x) (var_ref ubo_load_temp_offset@509) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@508) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@509) (constant uint (400)) ) ) ) (assign (xy) (var_ref lessThanEqual_retval@507) (expression bvec2 >= (swiz xx (var_ref reg_tmp13) )(swiz ww (var_ref ubo_load_temp@508) )) ) (assign (xy) (var_ref conditional_code) (var_ref lessThanEqual_retval@507) ) (if (swiz x (var_ref lessThanEqual_retval@507) )( (declare (temporary ) vec4 ubo_load_temp@510) (declare (temporary ) uint ubo_load_temp_offset@511) (assign (x) (var_ref ubo_load_temp_offset@511) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@510) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@511) (constant uint (400)) ) ) ) (assign (zw) (var_ref reg_tmp12) (swiz xy (var_ref ubo_load_temp@510) )) (declare (temporary ) vec4 lhs@512) (declare (temporary ) vec4 ubo_load_temp@513) (declare (temporary ) uint ubo_load_temp_offset@514) (assign (x) (var_ref ubo_load_temp_offset@514) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@513) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@514) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@512) (swiz wzyx (var_ref ubo_load_temp@513) )) (declare (temporary ) vec4 assignment_tmp@515) (assign (xyzw) (var_ref assignment_tmp@515) (expression vec4 * (var_ref lhs@512) (var_ref reg_tmp12) ) ) (assign (x) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@515) (var_ref assignment_tmp@515) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@512) (var_ref lhs@512) ) (var_ref assignment_tmp@515) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp12) (var_ref reg_tmp12) ) (var_ref assignment_tmp@515) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@515) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@516) (declare (temporary ) vec4 ubo_load_temp@517) (declare (temporary ) uint ubo_load_temp_offset@518) (assign (x) (var_ref ubo_load_temp_offset@518) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@517) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@518) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@516) (swiz wzyx (var_ref ubo_load_temp@517) )) (declare (temporary ) vec4 assignment_tmp@519) (assign (xyzw) (var_ref assignment_tmp@519) (expression vec4 * (var_ref lhs@516) (var_ref reg_tmp12) ) ) (assign (y) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@519) (var_ref assignment_tmp@519) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@516) (var_ref lhs@516) ) (var_ref assignment_tmp@519) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp12) (var_ref reg_tmp12) ) (var_ref assignment_tmp@519) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@519) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 assignment_tmp@520) (assign (xyzw) (var_ref assignment_tmp@520) (expression vec4 * (swiz xyyy (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp14) )) ) (declare (temporary ) vec4 mix_retval@521) (assign (xyzw) (var_ref mix_retval@521) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@520) (var_ref assignment_tmp@520) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp11) )(swiz xyyy (var_ref reg_tmp11) )) (var_ref assignment_tmp@520) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref reg_tmp14) )(swiz zwww (var_ref reg_tmp14) )) (var_ref assignment_tmp@520) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@520) ) ) (assign (x) (var_ref reg_tmp11) (swiz x (var_ref mix_retval@521) )) (declare (temporary ) vec4 ubo_load_temp@522) (declare (temporary ) uint ubo_load_temp_offset@523) (assign (x) (var_ref ubo_load_temp_offset@523) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@522) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@523) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@522) )(expression vec4 neg (swiz yyyy (var_ref mix_retval@521) )) ) )) ) ( (declare (temporary ) vec4 ubo_load_temp@524) (declare (temporary ) uint ubo_load_temp_offset@525) (assign (x) (var_ref ubo_load_temp_offset@525) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@524) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@525) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp14) (swiz wzyx (var_ref ubo_load_temp@524) )) (declare (temporary ) vec4 ubo_load_temp@526) (declare (temporary ) uint ubo_load_temp_offset@527) (assign (x) (var_ref ubo_load_temp_offset@527) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@526) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@527) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp13) (swiz wzyx (var_ref ubo_load_temp@526) )) (declare (temporary ) bvec2 notEqual_retval@528) (declare (temporary ) vec4 ubo_load_temp@529) (declare (temporary ) uint ubo_load_temp_offset@530) (assign (x) (var_ref ubo_load_temp_offset@530) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@529) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@530) (constant uint (400)) ) ) ) (assign (xy) (var_ref notEqual_retval@528) (expression bvec2 != (swiz xx (var_ref ubo_load_temp@529) )(swiz zw (var_ref vs_in_reg0) )) ) (assign (xy) (var_ref conditional_code) (var_ref notEqual_retval@528) ) (if (expression bool ! (swiz y (var_ref notEqual_retval@528) )) ( (if (expression bool ! (swiz x (var_ref notEqual_retval@528) )) ( (assign (xy) (var_ref reg_tmp11) (swiz xy (var_ref reg_tmp14) )) ) ( (assign (xy) (var_ref reg_tmp11) (swiz zw (var_ref reg_tmp13) )) )) ) ( (if (expression bool ! (swiz x (var_ref notEqual_retval@528) )) ( (assign (xy) (var_ref reg_tmp11) (swiz xy (var_ref reg_tmp13) )) ) ( (assign (xy) (var_ref reg_tmp11) (swiz zw (var_ref reg_tmp14) )) )) )) )) ) ( (declare (temporary ) vec4 lhs@531) (declare (temporary ) vec4 ubo_load_temp@532) (declare (temporary ) uint ubo_load_temp_offset@533) (assign (x) (var_ref ubo_load_temp_offset@533) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@532) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@533) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@531) (swiz wzyx (var_ref ubo_load_temp@532) )) (declare (temporary ) vec4 assignment_tmp@534) (assign (xyzw) (var_ref assignment_tmp@534) (expression vec4 * (var_ref lhs@531) (var_ref reg_tmp1) ) ) (assign (x) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@534) (var_ref assignment_tmp@534) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@531) (var_ref lhs@531) ) (var_ref assignment_tmp@534) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@534) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@534) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@535) (declare (temporary ) vec4 ubo_load_temp@536) (declare (temporary ) uint ubo_load_temp_offset@537) (assign (x) (var_ref ubo_load_temp_offset@537) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@536) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@537) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@535) (swiz wzyx (var_ref ubo_load_temp@536) )) (declare (temporary ) vec4 assignment_tmp@538) (assign (xyzw) (var_ref assignment_tmp@538) (expression vec4 * (var_ref lhs@535) (var_ref reg_tmp1) ) ) (assign (y) (var_ref reg_tmp11) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@538) (var_ref assignment_tmp@538) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@535) (var_ref lhs@535) ) (var_ref assignment_tmp@538) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@538) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@538) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) )) (declare (temporary ) vec4 ubo_load_temp@539) (declare (temporary ) uint ubo_load_temp_offset@540) (assign (x) (var_ref ubo_load_temp_offset@540) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@539) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@540) (constant uint (400)) ) ) ) (assign (z) (var_ref reg_tmp11) (swiz z (expression vec4 + (swiz zzzz (var_ref ubo_load_temp@539) )(swiz zzzz (var_ref reg_tmp11) )) )) )) (declare (temporary ) bool ubo_load_temp@541) (declare (temporary ) uint ubo_load_temp_offset@542) (assign (x) (var_ref ubo_load_temp_offset@542) (constant uint (0)) ) (assign (x) (var_ref ubo_load_temp@541) (expression bool ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@542) (constant uint (80)) ) ) ) (if (var_ref ubo_load_temp@541) ( (assign (xy) (var_ref reg_tmp11) (swiz yx (var_ref reg_tmp11) )) (declare (temporary ) vec4 ubo_load_temp@543) (declare (temporary ) uint ubo_load_temp_offset@544) (assign (x) (var_ref ubo_load_temp_offset@544) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@543) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@544) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@543) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp11) )) ) )) ) ()) (declare (temporary ) bool ubo_load_temp@545) (declare (temporary ) uint ubo_load_temp_offset@546) (assign (x) (var_ref ubo_load_temp_offset@546) (constant uint (0)) ) (assign (x) (var_ref ubo_load_temp@545) (expression bool ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@546) (constant uint (96)) ) ) ) (if (var_ref ubo_load_temp@545) ( (declare (temporary ) vec4 ubo_load_temp@547) (declare (temporary ) uint ubo_load_temp_offset@548) (assign (x) (var_ref ubo_load_temp_offset@548) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@547) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@548) (constant uint (400)) ) ) ) (assign (xy) (var_ref reg_tmp11) (swiz xy (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@547) )(expression vec4 neg (swiz yxxx (var_ref reg_tmp11) )) ) )) ) ()) (assign (xyzw) (var_ref vs_out_attr4) (swiz xyyy (var_ref reg_tmp11) )) (assign (x) (var_ref return_flag) (constant bool (1)) ) break ) ()) (assign (x) (var_ref switch_is_fallthru_tmp) (expression bool || (var_ref switch_is_fallthru_tmp) (expression bool == (constant uint (191)) (var_ref switch_test_tmp) ) ) ) (if (var_ref switch_is_fallthru_tmp) ( (declare (temporary ) vec4 lhs@549) (declare (temporary ) vec4 ubo_load_temp@550) (declare (temporary ) uint ubo_load_temp_offset@551) (assign (x) (var_ref ubo_load_temp_offset@551) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (36)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@550) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@551) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@549) (swiz wzzz (var_ref ubo_load_temp@550) )) (declare (temporary ) vec4 assignment_tmp@552) (assign (xyzw) (var_ref assignment_tmp@552) (expression vec4 * (var_ref lhs@549) (swiz xyyy (var_ref reg_tmp1) )) ) (declare (temporary ) vec4 mix_retval@553) (assign (xyzw) (var_ref mix_retval@553) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@552) (var_ref assignment_tmp@552) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@549) (var_ref lhs@549) ) (var_ref assignment_tmp@552) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp1) )(swiz xyyy (var_ref reg_tmp1) )) (var_ref assignment_tmp@552) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@552) ) ) (declare (temporary ) vec4 ubo_load_temp@554) (declare (temporary ) uint ubo_load_temp_offset@555) (assign (x) (var_ref ubo_load_temp_offset@555) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (35)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@554) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@555) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp14) (swiz wzyx (var_ref ubo_load_temp@554) )) (declare (temporary ) vec4 assignment_tmp@556) (assign (xyzw) (var_ref assignment_tmp@556) (expression vec4 * (swiz xyyy (var_ref mix_retval@553) )(swiz xyyy (var_ref reg_tmp2) )) ) (assign (xy) (var_ref reg_tmp1) (swiz xy (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@556) (var_ref assignment_tmp@556) ) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref mix_retval@553) )(swiz xyyy (var_ref mix_retval@553) )) (var_ref assignment_tmp@556) (expression vec4 csel (expression bvec4 != (swiz xyyy (var_ref reg_tmp2) )(swiz xyyy (var_ref reg_tmp2) )) (var_ref assignment_tmp@556) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@556) ) (swiz zwww (var_ref reg_tmp2) )) )) (declare (temporary ) vec4 assignment_tmp@557) (assign (xyzw) (var_ref assignment_tmp@557) (expression vec4 * (swiz xxxx (var_ref reg_tmp14) )(swiz wwww (var_ref vs_in_reg0) )) ) (assign (x) (var_ref reg_tmp14) (swiz x (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@557) (var_ref assignment_tmp@557) ) (expression vec4 csel (expression bvec4 != (swiz xxxx (var_ref reg_tmp14) )(swiz xxxx (var_ref reg_tmp14) )) (var_ref assignment_tmp@557) (expression vec4 csel (expression bvec4 != (swiz wwww (var_ref vs_in_reg0) )(swiz wwww (var_ref vs_in_reg0) )) (var_ref assignment_tmp@557) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@557) ) (swiz xxxx (var_ref reg_tmp14) )) )) (declare (temporary ) vec4 ubo_load_temp@558) (declare (temporary ) uint ubo_load_temp_offset@559) (assign (x) (var_ref ubo_load_temp_offset@559) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (36)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@558) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@559) (constant uint (320)) ) ) ) (assign (xy) (var_ref reg_tmp1) (swiz xy (expression vec4 + (swiz yxxx (var_ref ubo_load_temp@558) )(swiz xyyy (var_ref reg_tmp1) )) )) (declare (temporary ) vec4 rhs) (declare (temporary ) vec4 ubo_load_temp@560) (declare (temporary ) uint ubo_load_temp_offset@561) (assign (x) (var_ref ubo_load_temp_offset@561) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (36)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@560) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@561) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref rhs) (swiz zzzz (var_ref ubo_load_temp@560) )) (declare (temporary ) vec4 assignment_tmp@562) (assign (xyzw) (var_ref assignment_tmp@562) (expression vec4 * (swiz yyyy (var_ref reg_tmp2) )(var_ref rhs) ) ) (assign (y) (var_ref reg_tmp11) (swiz y (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@562) (var_ref assignment_tmp@562) ) (expression vec4 csel (expression bvec4 != (swiz yyyy (var_ref reg_tmp2) )(swiz yyyy (var_ref reg_tmp2) )) (var_ref assignment_tmp@562) (expression vec4 csel (expression bvec4 != (var_ref rhs) (var_ref rhs) ) (var_ref assignment_tmp@562) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@562) ) (expression vec4 neg (swiz yyyy (var_ref reg_tmp2) )) ) )) (assign (x) (var_ref reg_tmp1) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp1) )(swiz xxxx (var_ref reg_tmp14) )) )) (assign (y) (var_ref reg_tmp1) (swiz y (expression vec4 + (swiz yyyy (var_ref reg_tmp1) )(swiz yyyy (var_ref reg_tmp11) )) )) (declare (temporary ) vec4 lhs@563) (declare (temporary ) vec4 ubo_load_temp@564) (declare (temporary ) uint ubo_load_temp_offset@565) (assign (x) (var_ref ubo_load_temp_offset@565) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@564) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@565) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@563) (swiz wzyx (var_ref ubo_load_temp@564) )) (declare (temporary ) vec4 assignment_tmp@566) (assign (xyzw) (var_ref assignment_tmp@566) (expression vec4 * (var_ref lhs@563) (var_ref reg_tmp1) ) ) (assign (x) (var_ref reg_tmp3) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@566) (var_ref assignment_tmp@566) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@563) (var_ref lhs@563) ) (var_ref assignment_tmp@566) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@566) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@566) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@567) (declare (temporary ) vec4 ubo_load_temp@568) (declare (temporary ) uint ubo_load_temp_offset@569) (assign (x) (var_ref ubo_load_temp_offset@569) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@568) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@569) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@567) (swiz wzyx (var_ref ubo_load_temp@568) )) (declare (temporary ) vec4 assignment_tmp@570) (assign (xyzw) (var_ref assignment_tmp@570) (expression vec4 * (var_ref lhs@567) (var_ref reg_tmp1) ) ) (assign (y) (var_ref reg_tmp3) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@570) (var_ref assignment_tmp@570) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@567) (var_ref lhs@567) ) (var_ref assignment_tmp@570) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@570) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@570) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@571) (declare (temporary ) vec4 ubo_load_temp@572) (declare (temporary ) uint ubo_load_temp_offset@573) (assign (x) (var_ref ubo_load_temp_offset@573) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (34)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@572) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@573) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@571) (swiz wzyx (var_ref ubo_load_temp@572) )) (declare (temporary ) vec4 assignment_tmp@574) (assign (xyzw) (var_ref assignment_tmp@574) (expression vec4 * (var_ref lhs@571) (var_ref reg_tmp1) ) ) (assign (z) (var_ref reg_tmp3) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@574) (var_ref assignment_tmp@574) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@571) (var_ref lhs@571) ) (var_ref assignment_tmp@574) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp1) (var_ref reg_tmp1) ) (var_ref assignment_tmp@574) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@574) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (assign (w) (var_ref reg_tmp3) (swiz w (var_ref reg_tmp1) )) (declare (temporary ) vec4 ubo_load_temp@575) (declare (temporary ) uint ubo_load_temp_offset@576) (assign (x) (var_ref ubo_load_temp_offset@576) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@575) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@576) (constant uint (384)) ) ) ) (assign (xyzw) (var_ref reg_tmp11) (swiz wzyx (var_ref ubo_load_temp@575) )) (declare (temporary ) vec4 ubo_load_temp@577) (declare (temporary ) uint ubo_load_temp_offset@578) (assign (x) (var_ref ubo_load_temp_offset@578) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (34)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@577) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@578) (constant uint (320)) ) ) ) (assign (z) (var_ref reg_tmp11) (swiz z (expression vec4 + (expression vec4 neg (swiz xxxx (var_ref ubo_load_temp@577) )) (swiz zzzz (var_ref reg_tmp11) )) )) (declare (temporary ) vec4 ubo_load_temp@579) (declare (temporary ) uint ubo_load_temp_offset@580) (assign (x) (var_ref ubo_load_temp_offset@580) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@579) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@580) (constant uint (400)) ) ) ) (assign (x) (var_ref conditional_code) (expression bool != (swiz x (var_ref ubo_load_temp@579) )(swiz x (var_ref reg_tmp11) )) ) (declare (temporary ) vec4 ubo_load_temp@581) (declare (temporary ) uint ubo_load_temp_offset@582) (assign (x) (var_ref ubo_load_temp_offset@582) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@581) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@582) (constant uint (400)) ) ) ) (assign (y) (var_ref conditional_code) (expression bool < (swiz x (var_ref ubo_load_temp@581) )(swiz z (var_ref reg_tmp11) )) ) (if (expression bool all_equal (expression bvec2 ! (var_ref conditional_code) ) (constant bvec2 (0 0)) ) ( (assign (z) (var_ref reg_tmp11) (expression float rcp (swiz z (var_ref reg_tmp11) )) ) (assign (x) (var_ref reg_tmp3) (swiz x (expression vec4 + (swiz xxxx (var_ref reg_tmp3) )(swiz xxxx (var_ref reg_tmp11) )) )) (declare (temporary ) vec4 lhs@583) (assign (xyzw) (var_ref lhs@583) (expression vec4 neg (swiz yyyy (var_ref reg_tmp11) )) ) (declare (temporary ) vec4 assignment_tmp@584) (assign (xyzw) (var_ref assignment_tmp@584) (expression vec4 * (var_ref lhs@583) (swiz zzzz (var_ref reg_tmp11) )) ) (assign (x) (var_ref reg_tmp3) (swiz x (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@584) (var_ref assignment_tmp@584) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@583) (var_ref lhs@583) ) (var_ref assignment_tmp@584) (expression vec4 csel (expression bvec4 != (swiz zzzz (var_ref reg_tmp11) )(swiz zzzz (var_ref reg_tmp11) )) (var_ref assignment_tmp@584) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@584) ) (swiz xxxx (var_ref reg_tmp3) )) )) ) ()) (declare (temporary ) vec4 lhs@585) (declare (temporary ) vec4 ubo_load_temp@586) (declare (temporary ) uint ubo_load_temp_offset@587) (assign (x) (var_ref ubo_load_temp_offset@587) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@586) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@587) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@585) (swiz wzyx (var_ref ubo_load_temp@586) )) (declare (temporary ) vec4 assignment_tmp@588) (assign (xyzw) (var_ref assignment_tmp@588) (expression vec4 * (var_ref lhs@585) (var_ref reg_tmp3) ) ) (assign (x) (var_ref vs_out_attr0) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@588) (var_ref assignment_tmp@588) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@585) (var_ref lhs@585) ) (var_ref assignment_tmp@588) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp3) (var_ref reg_tmp3) ) (var_ref assignment_tmp@588) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@588) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@589) (declare (temporary ) vec4 ubo_load_temp@590) (declare (temporary ) uint ubo_load_temp_offset@591) (assign (x) (var_ref ubo_load_temp_offset@591) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@590) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@591) (constant uint (336)) ) ) ) (assign (xyzw) (var_ref lhs@589) (swiz wzyx (var_ref ubo_load_temp@590) )) (declare (temporary ) vec4 assignment_tmp@592) (assign (xyzw) (var_ref assignment_tmp@592) (expression vec4 * (var_ref lhs@589) (var_ref reg_tmp3) ) ) (assign (y) (var_ref vs_out_attr0) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@592) (var_ref assignment_tmp@592) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@589) (var_ref lhs@589) ) (var_ref assignment_tmp@592) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp3) (var_ref reg_tmp3) ) (var_ref assignment_tmp@592) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@592) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@593) (declare (temporary ) vec4 ubo_load_temp@594) (declare (temporary ) uint ubo_load_temp_offset@595) (assign (x) (var_ref ubo_load_temp_offset@595) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@594) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@595) (constant uint (352)) ) ) ) (assign (xyzw) (var_ref lhs@593) (swiz wzyx (var_ref ubo_load_temp@594) )) (declare (temporary ) vec4 assignment_tmp@596) (assign (xyzw) (var_ref assignment_tmp@596) (expression vec4 * (var_ref lhs@593) (var_ref reg_tmp3) ) ) (assign (z) (var_ref vs_out_attr0) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@596) (var_ref assignment_tmp@596) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@593) (var_ref lhs@593) ) (var_ref assignment_tmp@596) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp3) (var_ref reg_tmp3) ) (var_ref assignment_tmp@596) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@596) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) vec4 lhs@597) (declare (temporary ) vec4 ubo_load_temp@598) (declare (temporary ) uint ubo_load_temp_offset@599) (assign (x) (var_ref ubo_load_temp_offset@599) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@598) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@599) (constant uint (368)) ) ) ) (assign (xyzw) (var_ref lhs@597) (swiz wzyx (var_ref ubo_load_temp@598) )) (declare (temporary ) vec4 assignment_tmp@600) (assign (xyzw) (var_ref assignment_tmp@600) (expression vec4 * (var_ref lhs@597) (var_ref reg_tmp3) ) ) (assign (w) (var_ref vs_out_attr0) (expression float dot (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@600) (var_ref assignment_tmp@600) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@597) (var_ref lhs@597) ) (var_ref assignment_tmp@600) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp3) (var_ref reg_tmp3) ) (var_ref assignment_tmp@600) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@600) ) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) ) ) (declare (temporary ) bool ubo_load_temp@601) (declare (temporary ) uint ubo_load_temp_offset@602) (assign (x) (var_ref ubo_load_temp_offset@602) (constant uint (0)) ) (assign (x) (var_ref ubo_load_temp@601) (expression bool ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@602) (constant uint (32)) ) ) ) (if (var_ref ubo_load_temp@601) ( (assign (xyzw) (var_ref reg_tmp11) (expression vec4 csel (expression bvec4 < (expression vec4 neg (swiz zwzw (var_ref vs_in_reg0) )) (swiz zwzw (var_ref vs_in_reg0) )) (swiz zwzw (var_ref vs_in_reg0) )(expression vec4 neg (swiz zwzw (var_ref vs_in_reg0) )) ) ) (assign (xy) (var_ref address_registers) (swiz xy (expression ivec4 f2i (swiz zxxx (var_ref reg_tmp0) )) )) (declare (temporary ) vec4 lhs@603) (declare (temporary ) vec4 ubo_load_temp@604) (declare (temporary ) uint ubo_load_temp_offset@605) (assign (x) (var_ref ubo_load_temp_offset@605) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@604) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@605) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@603) (swiz yxxx (var_ref ubo_load_temp@604) )) (declare (temporary ) vec4 assignment_tmp@606) (assign (xyzw) (var_ref assignment_tmp@606) (expression vec4 * (var_ref lhs@603) (var_ref reg_tmp11) ) ) (declare (temporary ) vec4 mix_retval@607) (assign (xyzw) (var_ref mix_retval@607) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@606) (var_ref assignment_tmp@606) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@603) (var_ref lhs@603) ) (var_ref assignment_tmp@606) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp11) (var_ref reg_tmp11) ) (var_ref assignment_tmp@606) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@606) ) ) (declare (temporary ) vec4 ubo_load_temp@608) (declare (temporary ) uint ubo_load_temp_offset@609) (assign (x) (var_ref ubo_load_temp_offset@609) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@608) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@609) (constant uint (400)) ) ) ) (assign (zw) (var_ref reg_tmp11) (swiz zw (expression vec4 csel (expression bvec4 < (var_ref reg_tmp11) (swiz yyyy (var_ref ubo_load_temp@608) )) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) )) (declare (temporary ) vec4 ubo_load_temp@610) (declare (temporary ) uint ubo_load_temp_offset@611) (assign (x) (var_ref ubo_load_temp_offset@611) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (37)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@610) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@611) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp7) (swiz wzyx (var_ref ubo_load_temp@610) )) (declare (temporary ) vec4 rhs@612) (declare (temporary ) vec4 ubo_load_temp@613) (declare (temporary ) uint ubo_load_temp_offset@614) (assign (x) (var_ref ubo_load_temp_offset@614) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@613) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@614) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref rhs@612) (swiz wzzz (var_ref ubo_load_temp@613) )) (declare (temporary ) vec4 assignment_tmp@615) (assign (xyzw) (var_ref assignment_tmp@615) (expression vec4 * (swiz zwww (var_ref reg_tmp11) )(var_ref rhs@612) ) ) (assign (xy) (var_ref reg_tmp9) (swiz xy (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@615) (var_ref assignment_tmp@615) ) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp11) )) (var_ref assignment_tmp@615) (expression vec4 csel (expression bvec4 != (var_ref rhs@612) (var_ref rhs@612) ) (var_ref assignment_tmp@615) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@615) ) (swiz xyyy (var_ref mix_retval@607) )) )) (declare (temporary ) vec4 ubo_load_temp@616) (declare (temporary ) uint ubo_load_temp_offset@617) (assign (x) (var_ref ubo_load_temp_offset@617) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (38)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@616) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@617) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp8) (swiz wzyx (var_ref ubo_load_temp@616) )) (declare (temporary ) vec4 ubo_load_temp@618) (declare (temporary ) uint ubo_load_temp_offset@619) (assign (x) (var_ref ubo_load_temp_offset@619) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@618) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@619) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp9) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@618) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp9) )) ) )) ) ( (assign (xyzw) (var_ref reg_tmp11) (expression vec4 csel (expression bvec4 < (expression vec4 neg (swiz zwzw (var_ref vs_in_reg0) )) (swiz zwzw (var_ref vs_in_reg0) )) (swiz zwzw (var_ref vs_in_reg0) )(expression vec4 neg (swiz zwzw (var_ref vs_in_reg0) )) ) ) (assign (xy) (var_ref address_registers) (swiz xy (expression ivec4 f2i (swiz zwww (var_ref reg_tmp0) )) )) (declare (temporary ) vec4 lhs@620) (declare (temporary ) vec4 ubo_load_temp@621) (declare (temporary ) uint ubo_load_temp_offset@622) (assign (x) (var_ref ubo_load_temp_offset@622) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@621) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@622) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref lhs@620) (swiz yxxx (var_ref ubo_load_temp@621) )) (declare (temporary ) vec4 assignment_tmp@623) (assign (xyzw) (var_ref assignment_tmp@623) (expression vec4 * (var_ref lhs@620) (var_ref reg_tmp11) ) ) (declare (temporary ) vec4 mix_retval@624) (assign (xyzw) (var_ref mix_retval@624) (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@623) (var_ref assignment_tmp@623) ) (expression vec4 csel (expression bvec4 != (var_ref lhs@620) (var_ref lhs@620) ) (var_ref assignment_tmp@623) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp11) (var_ref reg_tmp11) ) (var_ref assignment_tmp@623) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@623) ) ) (declare (temporary ) vec4 ubo_load_temp@625) (declare (temporary ) uint ubo_load_temp_offset@626) (assign (x) (var_ref ubo_load_temp_offset@626) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@625) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@626) (constant uint (400)) ) ) ) (assign (zw) (var_ref reg_tmp11) (swiz zw (expression vec4 csel (expression bvec4 < (var_ref reg_tmp11) (swiz yyyy (var_ref ubo_load_temp@625) )) (constant vec4 (1.000000 1.000000 1.000000 1.000000)) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) )) (declare (temporary ) vec4 ubo_load_temp@627) (declare (temporary ) uint ubo_load_temp_offset@628) (assign (x) (var_ref ubo_load_temp_offset@628) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@627) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@628) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp7) (swiz wzyx (var_ref ubo_load_temp@627) )) (declare (temporary ) vec4 rhs@629) (declare (temporary ) vec4 ubo_load_temp@630) (declare (temporary ) uint ubo_load_temp_offset@631) (assign (x) (var_ref ubo_load_temp_offset@631) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (swiz x (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@630) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@631) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref rhs@629) (swiz wzzz (var_ref ubo_load_temp@630) )) (declare (temporary ) vec4 assignment_tmp@632) (assign (xyzw) (var_ref assignment_tmp@632) (expression vec4 * (swiz zwww (var_ref reg_tmp11) )(var_ref rhs@629) ) ) (assign (xy) (var_ref reg_tmp9) (swiz xy (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@632) (var_ref assignment_tmp@632) ) (expression vec4 csel (expression bvec4 != (swiz zwww (var_ref reg_tmp11) )(swiz zwww (var_ref reg_tmp11) )) (var_ref assignment_tmp@632) (expression vec4 csel (expression bvec4 != (var_ref rhs@629) (var_ref rhs@629) ) (var_ref assignment_tmp@632) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@632) ) (swiz xyyy (var_ref mix_retval@624) )) )) (declare (temporary ) vec4 ubo_load_temp@633) (declare (temporary ) uint ubo_load_temp_offset@634) (assign (x) (var_ref ubo_load_temp_offset@634) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (33)) (swiz y (var_ref address_registers) )) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref ubo_load_temp@633) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@634) (constant uint (320)) ) ) ) (assign (xyzw) (var_ref reg_tmp8) (swiz wzyx (var_ref ubo_load_temp@633) )) (declare (temporary ) vec4 ubo_load_temp@635) (declare (temporary ) uint ubo_load_temp_offset@636) (assign (x) (var_ref ubo_load_temp_offset@636) (constant uint (0)) ) (assign (xyzw) (var_ref ubo_load_temp@635) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@636) (constant uint (400)) ) ) ) (assign (y) (var_ref reg_tmp9) (swiz y (expression vec4 + (swiz yyyy (var_ref ubo_load_temp@635) )(expression vec4 neg (swiz yyyy (var_ref reg_tmp9) )) ) )) )) (assign (xyzw) (var_ref vs_out_attr2) (var_ref reg_tmp9) ) (assign (xyzw) (var_ref reg_tmp8) (expression vec4 + (var_ref reg_tmp8) (expression vec4 neg (var_ref reg_tmp7) ) ) ) (assign (xyzw) (var_ref vs_out_attr3) (var_ref reg_tmp9) ) (declare (temporary ) vec4 assignment_tmp@637) (assign (xyzw) (var_ref assignment_tmp@637) (expression vec4 * (var_ref reg_tmp8) (swiz yyyy (var_ref reg_tmp11) )) ) (assign (xyzw) (var_ref vs_out_attr1) (expression vec4 + (expression vec4 csel (expression bvec4 != (var_ref assignment_tmp@637) (var_ref assignment_tmp@637) ) (expression vec4 csel (expression bvec4 != (var_ref reg_tmp8) (var_ref reg_tmp8) ) (var_ref assignment_tmp@637) (expression vec4 csel (expression bvec4 != (swiz yyyy (var_ref reg_tmp11) )(swiz yyyy (var_ref reg_tmp11) )) (var_ref assignment_tmp@637) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) ) (var_ref assignment_tmp@637) ) (var_ref reg_tmp7) ) ) (assign (xyzw) (var_ref vs_out_attr4) (var_ref reg_tmp9) ) (assign (x) (var_ref return_flag) (constant bool (1)) ) break ) ()) (assign (x) (var_ref switch_is_fallthru_tmp) (constant bool (1)) ) (assign (x) (var_ref return_flag) (constant bool (1)) ) break )) (if (var_ref return_flag) ( break ) ()) )) (if (var_ref return_flag) ( (assign (x) (var_ref return_flag) (constant bool (1)) ) ) ( (assign (x) (var_ref return_flag) (constant bool (1)) ) )) )) ) ) NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL18 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE pica_uniforms uniforms (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 vs_in_reg0 (VERT_ATTRIB_GENERIC0, 16, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr0 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr1 (VARYING_SLOT_VAR1, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr2 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr3 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr4 (VARYING_SLOT_VAR4, 35, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec3 32 ssa_0 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_1 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_2 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x0000006d /* 0.000000 */) vec1 32 ssa_4 = load_const (0x00000009 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_6 = load_const (0x000000bf /* 0.000000 */) vec1 32 ssa_7 = load_const (0x0000001f /* 0.000000 */) vec4 32 ssa_8 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_9 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_10 = load_const (0x00000021 /* 0.000000 */) vec1 32 ssa_11 = load_const (0x00000022 /* 0.000000 */) vec2 32 ssa_12 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_13 = load_const (0xffffffff /* -nan */, 0xffffffff /* -nan */) vec1 32 ssa_14 = load_const (0x00000023 /* 0.000000 */) vec1 32 ssa_15 = load_const (0xffffffff /* -nan */) vec1 32 ssa_16 = load_const (0x00000024 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000025 /* 0.000000 */) vec1 32 ssa_18 = load_const (0x00000026 /* 0.000000 */) /* succs: block_1 */ loop { block block_1: /* preds: block_0 block_165 */ vec1 32 ssa_19 = phi block_0: ssa_2, block_165: ssa_1329 vec4 32 ssa_20 = phi block_0: ssa_1, block_165: ssa_1330 vec4 32 ssa_21 = phi block_0: ssa_1, block_165: ssa_1331 vec4 32 ssa_22 = phi block_0: ssa_1, block_165: ssa_1332 vec4 32 ssa_23 = phi block_0: ssa_1, block_165: ssa_1333 vec4 32 ssa_24 = phi block_0: ssa_1, block_165: ssa_1334 vec4 32 ssa_25 = phi block_0: ssa_1, block_165: ssa_1335 vec4 32 ssa_26 = phi block_0: ssa_1, block_165: ssa_1336 vec4 32 ssa_27 = phi block_0: ssa_1, block_165: ssa_1337 vec4 32 ssa_28 = phi block_0: ssa_1, block_165: ssa_1338 vec4 32 ssa_29 = phi block_0: ssa_1, block_165: ssa_1339 vec4 32 ssa_30 = phi block_0: ssa_1, block_165: ssa_1340 vec3 32 ssa_31 = phi block_0: ssa_0, block_165: ssa_1341 vec1 32 ssa_32 = phi block_0: ssa_3, block_165: ssa_1342 /* succs: block_2 */ loop { block block_2: /* preds: block_1 */ vec1 32 ssa_33 = ieq ssa_3, ssa_32 /* succs: block_3 block_151 */ if ssa_33 { block block_3: /* preds: block_2 */ vec4 32 ssa_34 = intrinsic load_input (ssa_2) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_35 = f2i32 ssa_34.xxxx vec1 32 ssa_36 = iadd ssa_4, ssa_35.x vec1 32 ssa_37 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_38 = ishl ssa_36, ssa_37 vec1 32 ssa_39 = load_const (0x00000140 /* 0.000000 */) vec1 32 ssa_40 = iadd ssa_39, ssa_38 vec4 32 ssa_41 = intrinsic load_ubo (ssa_2, ssa_40) () () vec4 32 ssa_42 = imov ssa_41.wzyx vec1 32 ssa_43 = load_const (0x00000190 /* 0.000000 */) vec4 32 ssa_44 = intrinsic load_ubo (ssa_2, ssa_43) () () vec4 32 ssa_45 = vec4 ssa_34.z, ssa_34.w, ssa_44.x, ssa_44.y vec4 32 ssa_46 = f2i32 ssa_41.wzzz vec3 32 ssa_47 = vec3 ssa_46.x, ssa_46.y, ssa_31.z vec1 32 ssa_48 = iadd ssa_5, ssa_46.y vec1 32 ssa_49 = ishl ssa_48, ssa_37 vec1 32 ssa_50 = iadd ssa_39, ssa_49 vec4 32 ssa_51 = intrinsic load_ubo (ssa_2, ssa_50) () () vec4 32 ssa_52 = imov ssa_51.wzyx vec1 32 ssa_53 = intrinsic load_ubo (ssa_2, ssa_2) () () vec1 32 ssa_54 = ine ssa_53, ssa_2 /* succs: block_4 block_5 */ if ssa_54 { block block_4: /* preds: block_3 */ break /* succs: block_162 */ } else { block block_5: /* preds: block_3 */ /* succs: block_6 */ } block block_6: /* preds: block_5 */ vec1 32 ssa_55 = iadd ssa_7, ssa_46.x vec1 32 ssa_56 = ishl ssa_55, ssa_37 vec1 32 ssa_57 = iadd ssa_39, ssa_56 vec4 32 ssa_58 = intrinsic load_ubo (ssa_2, ssa_57) () () vec4 32 ssa_59 = fmul ssa_34.zwww, ssa_51.wzzz vec4 32 ssa_60 = fne ssa_51.wzzz, ssa_51.wzzz vec4 32 ssa_61 = bcsel ssa_60, ssa_59, ssa_8 vec4 32 ssa_62 = fne ssa_34.zwww, ssa_34.zwww vec4 32 ssa_63 = bcsel ssa_62, ssa_59, ssa_61 vec4 32 ssa_64 = fne ssa_59, ssa_59 vec4 32 ssa_65 = bcsel ssa_64, ssa_63, ssa_59 vec4 32 ssa_66 = fadd ssa_65, ssa_51.yxxx vec4 32 ssa_67 = vec4 ssa_66.x, ssa_66.y, ssa_44.x, ssa_44.y vec1 32 ssa_68 = iadd ssa_5, ssa_46.x vec1 32 ssa_69 = ishl ssa_68, ssa_37 vec1 32 ssa_70 = iadd ssa_39, ssa_69 vec4 32 ssa_71 = intrinsic load_ubo (ssa_2, ssa_70) () () vec4 32 ssa_72 = fmul ssa_71.wzyx, ssa_67 vec4 32 ssa_73 = fne ssa_67, ssa_67 vec4 32 ssa_74 = bcsel ssa_73, ssa_72, ssa_8 vec4 32 ssa_75 = fne ssa_71.wzyx, ssa_71.wzyx vec4 32 ssa_76 = bcsel ssa_75, ssa_72, ssa_74 vec4 32 ssa_77 = fne ssa_72, ssa_72 vec4 32 ssa_78 = bcsel ssa_77, ssa_76, ssa_72 vec4 32 ssa_79 = fdot_replicated4 ssa_78, ssa_9 vec1 32 ssa_80 = iadd ssa_10, ssa_46.x vec1 32 ssa_81 = ishl ssa_80, ssa_37 vec1 32 ssa_82 = iadd ssa_39, ssa_81 vec4 32 ssa_83 = intrinsic load_ubo (ssa_2, ssa_82) () () vec4 32 ssa_84 = fmul ssa_83.wzyx, ssa_67 vec4 32 ssa_85 = bcsel ssa_73, ssa_84, ssa_8 vec4 32 ssa_86 = fne ssa_83.wzyx, ssa_83.wzyx vec4 32 ssa_87 = bcsel ssa_86, ssa_84, ssa_85 vec4 32 ssa_88 = fne ssa_84, ssa_84 vec4 32 ssa_89 = bcsel ssa_88, ssa_87, ssa_84 vec4 32 ssa_90 = fdot_replicated4 ssa_89, ssa_9 vec1 32 ssa_91 = iadd ssa_11, ssa_46.x vec1 32 ssa_92 = ishl ssa_91, ssa_37 vec1 32 ssa_93 = iadd ssa_39, ssa_92 vec4 32 ssa_94 = intrinsic load_ubo (ssa_2, ssa_93) () () vec4 32 ssa_95 = fmul ssa_94.wzyx, ssa_67 vec4 32 ssa_96 = bcsel ssa_73, ssa_95, ssa_8 vec4 32 ssa_97 = fne ssa_94.wzyx, ssa_94.wzyx vec4 32 ssa_98 = bcsel ssa_97, ssa_95, ssa_96 vec4 32 ssa_99 = fne ssa_95, ssa_95 vec4 32 ssa_100 = bcsel ssa_99, ssa_98, ssa_95 vec4 32 ssa_101 = fdot_replicated4 ssa_100, ssa_9 vec4 32 ssa_102 = vec4 ssa_79.x, ssa_90.x, ssa_101.x, ssa_44.y vec1 32 ssa_103 = load_const (0x00000180 /* 0.000000 */) vec4 32 ssa_104 = intrinsic load_ubo (ssa_2, ssa_103) () () vec4 32 ssa_105 = fadd -ssa_94.xxxx, ssa_104.yyyy vec4 32 ssa_106 = vec4 ssa_104.w, ssa_104.z, ssa_105.z, ssa_104.x vec1 32 ssa_107 = flt ssa_44.x, ssa_105.z vec1 32 ssa_108 = fne ssa_44.x, ssa_104.w vec2 32 ssa_109 = vec2 ssa_108, ssa_107 vec2 32 ssa_110 = inot ssa_109 vec1 32 ssa_111 = ball_iequal2 ssa_110, ssa_12 /* succs: block_7 block_8 */ if ssa_111 { block block_7: /* preds: block_6 */ vec1 32 ssa_112 = frcp ssa_105.z vec4 32 ssa_113 = vec4 ssa_104.w, ssa_104.z, ssa_112, ssa_104.x vec4 32 ssa_114 = fadd ssa_79.xxxx, ssa_104.wwww vec4 32 ssa_115 = fmul ssa_104.zzzz, ssa_112.xxxx vec4 32 ssa_116 = fmov -ssa_115 vec4 32 ssa_117 = fne ssa_112.xxxx, ssa_112.xxxx vec4 32 ssa_118 = bcsel ssa_117, ssa_116, ssa_8 vec4 32 ssa_119 = fne -ssa_104.zzzz, -ssa_104.zzzz vec4 32 ssa_120 = bcsel ssa_119, ssa_116, ssa_118 vec4 32 ssa_121 = fne -ssa_115, -ssa_115 vec4 32 ssa_122 = bcsel ssa_121, ssa_120, ssa_116 vec4 32 ssa_123 = fadd ssa_122, ssa_114.xxxx vec4 32 ssa_124 = vec4 ssa_123.x, ssa_90.x, ssa_101.x, ssa_44.y /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ vec4 32 ssa_125 = phi block_7: ssa_113, block_8: ssa_106 vec4 32 ssa_126 = phi block_7: ssa_124, block_8: ssa_102 vec4 32 ssa_127 = intrinsic load_ubo (ssa_2, ssa_39) () () vec4 32 ssa_128 = fmul ssa_127.wzyx, ssa_126 vec4 32 ssa_129 = fne ssa_126, ssa_126 vec4 32 ssa_130 = bcsel ssa_129, ssa_128, ssa_8 vec4 32 ssa_131 = fne ssa_127.wzyx, ssa_127.wzyx vec4 32 ssa_132 = bcsel ssa_131, ssa_128, ssa_130 vec4 32 ssa_133 = fne ssa_128, ssa_128 vec4 32 ssa_134 = bcsel ssa_133, ssa_132, ssa_128 vec4 32 ssa_135 = fdot_replicated4 ssa_134, ssa_9 vec1 32 ssa_136 = load_const (0x00000150 /* 0.000000 */) vec4 32 ssa_137 = intrinsic load_ubo (ssa_2, ssa_136) () () vec4 32 ssa_138 = fmul ssa_137.wzyx, ssa_126 vec4 32 ssa_139 = bcsel ssa_129, ssa_138, ssa_8 vec4 32 ssa_140 = fne ssa_137.wzyx, ssa_137.wzyx vec4 32 ssa_141 = bcsel ssa_140, ssa_138, ssa_139 vec4 32 ssa_142 = fne ssa_138, ssa_138 vec4 32 ssa_143 = bcsel ssa_142, ssa_141, ssa_138 vec4 32 ssa_144 = fdot_replicated4 ssa_143, ssa_9 vec1 32 ssa_145 = load_const (0x00000160 /* 0.000000 */) vec4 32 ssa_146 = intrinsic load_ubo (ssa_2, ssa_145) () () vec4 32 ssa_147 = fmul ssa_146.wzyx, ssa_126 vec4 32 ssa_148 = bcsel ssa_129, ssa_147, ssa_8 vec4 32 ssa_149 = fne ssa_146.wzyx, ssa_146.wzyx vec4 32 ssa_150 = bcsel ssa_149, ssa_147, ssa_148 vec4 32 ssa_151 = fne ssa_147, ssa_147 vec4 32 ssa_152 = bcsel ssa_151, ssa_150, ssa_147 vec4 32 ssa_153 = fdot_replicated4 ssa_152, ssa_9 vec1 32 ssa_154 = load_const (0x00000170 /* 0.000000 */) vec4 32 ssa_155 = intrinsic load_ubo (ssa_2, ssa_154) () () vec4 32 ssa_156 = fmul ssa_155.wzyx, ssa_126 vec4 32 ssa_157 = bcsel ssa_129, ssa_156, ssa_8 vec4 32 ssa_158 = fne ssa_155.wzyx, ssa_155.wzyx vec4 32 ssa_159 = bcsel ssa_158, ssa_156, ssa_157 vec4 32 ssa_160 = fne ssa_156, ssa_156 vec4 32 ssa_161 = bcsel ssa_160, ssa_159, ssa_156 vec4 32 ssa_162 = fdot_replicated4 ssa_161, ssa_9 vec4 32 ssa_163 = vec4 ssa_135.x, ssa_144.x, ssa_153.x, ssa_162.x vec2 32 ssa_164 = fge ssa_44.yy, ssa_41.xx vec1 32 ssa_165 = ball_iequal2 ssa_164, ssa_13 /* succs: block_10 block_11 */ if ssa_165 { block block_10: /* preds: block_9 */ vec4 32 ssa_166 = vec4 ssa_44.y, ssa_44.y, ssa_44.y, ssa_41.x /* succs: block_15 */ } else { block block_11: /* preds: block_9 */ vec4 32 ssa_167 = f2i32 ssa_41.xxxx vec3 32 ssa_168 = vec3 ssa_46.x, ssa_167.y, ssa_31.z vec1 32 ssa_169 = iadd ssa_5, ssa_167.y vec1 32 ssa_170 = ishl ssa_169, ssa_37 vec1 32 ssa_171 = iadd ssa_39, ssa_170 vec4 32 ssa_172 = intrinsic load_ubo (ssa_2, ssa_171) () () vec1 32 ssa_173 = iadd ssa_10, ssa_167.y vec1 32 ssa_174 = ishl ssa_173, ssa_37 vec1 32 ssa_175 = iadd ssa_39, ssa_174 vec4 32 ssa_176 = intrinsic load_ubo (ssa_2, ssa_175) () () vec1 32 ssa_177 = iadd ssa_11, ssa_167.y vec1 32 ssa_178 = ishl ssa_177, ssa_37 vec1 32 ssa_179 = iadd ssa_39, ssa_178 vec4 32 ssa_180 = intrinsic load_ubo (ssa_2, ssa_179) () () vec1 32 ssa_181 = iadd ssa_14, ssa_167.y vec1 32 ssa_182 = ishl ssa_181, ssa_37 vec1 32 ssa_183 = iadd ssa_39, ssa_182 vec4 32 ssa_184 = intrinsic load_ubo (ssa_2, ssa_183) () () vec4 32 ssa_185 = vec4 ssa_34.z, ssa_34.w, ssa_125.z, ssa_125.w vec4 32 ssa_186 = ffloor ssa_41.zzzz vec4 32 ssa_187 = fadd ssa_41.zzzz, -ssa_186.xxxx vec2 32 ssa_188 = fge ssa_187.xx, ssa_44.ww vec1 32 ssa_189 = imov ssa_188.x /* succs: block_12 block_13 */ if ssa_189 { block block_12: /* preds: block_11 */ vec1 32 ssa_190 = frcp ssa_58.w vec1 32 ssa_191 = frcp ssa_58.z vec4 32 ssa_192 = fadd ssa_66.xyyy, -ssa_58.yxxx vec4 32 ssa_193 = vec4 ssa_192.x, ssa_192.y, ssa_190, ssa_191 vec4 32 ssa_194 = fmul ssa_192.xyyy, ssa_193.zwww vec4 32 ssa_195 = fne ssa_193.zwww, ssa_193.zwww vec4 32 ssa_196 = bcsel ssa_195, ssa_194, ssa_8 vec4 32 ssa_197 = fne ssa_192.xyyy, ssa_192.xyyy vec4 32 ssa_198 = bcsel ssa_197, ssa_194, ssa_196 vec4 32 ssa_199 = fne ssa_194, ssa_194 vec4 32 ssa_200 = bcsel ssa_199, ssa_198, ssa_194 vec4 32 ssa_201 = vec4 ssa_200.x, ssa_200.y, ssa_190, ssa_191 /* succs: block_14 */ } else { block block_13: /* preds: block_11 */ /* succs: block_14 */ } block block_14: /* preds: block_12 block_13 */ vec4 32 ssa_202 = phi block_12: ssa_201, block_13: ssa_185 vec4 32 ssa_203 = fmax -ssa_202.xyyy, ssa_202.xyyy vec4 32 ssa_204 = vec4 ssa_203.x, ssa_203.y, ssa_202.z, ssa_202.w vec4 32 ssa_205 = fadd ssa_176.wzyx, -ssa_172.wzyx vec4 32 ssa_206 = fmul ssa_205, ssa_203.xxxx vec4 32 ssa_207 = fne ssa_203.xxxx, ssa_203.xxxx vec4 32 ssa_208 = bcsel ssa_207, ssa_206, ssa_8 vec4 32 ssa_209 = fne ssa_205, ssa_205 vec4 32 ssa_210 = bcsel ssa_209, ssa_206, ssa_208 vec4 32 ssa_211 = fne ssa_206, ssa_206 vec4 32 ssa_212 = bcsel ssa_211, ssa_210, ssa_206 vec4 32 ssa_213 = fadd ssa_212, ssa_172.wzyx vec4 32 ssa_214 = fadd ssa_184.wzyx, -ssa_180.wzyx vec4 32 ssa_215 = fmul ssa_214, ssa_203.xxxx vec4 32 ssa_216 = bcsel ssa_207, ssa_215, ssa_8 vec4 32 ssa_217 = fne ssa_214, ssa_214 vec4 32 ssa_218 = bcsel ssa_217, ssa_215, ssa_216 vec4 32 ssa_219 = fne ssa_215, ssa_215 vec4 32 ssa_220 = bcsel ssa_219, ssa_218, ssa_215 vec4 32 ssa_221 = fadd ssa_220, ssa_180.wzyx vec4 32 ssa_222 = fadd ssa_221, -ssa_213 vec4 32 ssa_223 = fmul ssa_222, ssa_203.yyyy vec4 32 ssa_224 = fne ssa_203.yyyy, ssa_203.yyyy vec4 32 ssa_225 = bcsel ssa_224, ssa_223, ssa_8 vec4 32 ssa_226 = fne ssa_222, ssa_222 vec4 32 ssa_227 = bcsel ssa_226, ssa_223, ssa_225 vec4 32 ssa_228 = fne ssa_223, ssa_223 vec4 32 ssa_229 = bcsel ssa_228, ssa_227, ssa_223 vec4 32 ssa_230 = fadd ssa_229, ssa_213 /* succs: block_15 */ } block block_15: /* preds: block_10 block_14 */ vec4 32 ssa_231 = phi block_10: ssa_166, block_14: ssa_230 vec4 32 ssa_232 = phi block_10: ssa_125, block_14: ssa_204 vec3 32 ssa_233 = phi block_10: ssa_47, block_14: ssa_168 vec4 32 ssa_234 = fmul ssa_44.zyzy, ssa_41.yyyy vec4 32 ssa_235 = fne ssa_41.yyyy, ssa_41.yyyy vec4 32 ssa_236 = bcsel ssa_235, ssa_234, ssa_8 vec4 32 ssa_237 = fne ssa_44.zyzy, ssa_44.zyzy vec4 32 ssa_238 = bcsel ssa_237, ssa_234, ssa_236 vec4 32 ssa_239 = fne ssa_234, ssa_234 vec4 32 ssa_240 = bcsel ssa_239, ssa_238, ssa_234 vec4 32 ssa_241 = ffloor ssa_240 vec4 32 ssa_242 = fadd ssa_240.zwww, -ssa_241.xyyy vec4 32 ssa_243 = vec4 ssa_242.x, ssa_242.y, ssa_240.z, ssa_240.w vec4 32 ssa_244 = fmul ssa_44.zzzz, ssa_243 vec4 32 ssa_245 = fne ssa_243, ssa_243 vec4 32 ssa_246 = bcsel ssa_245, ssa_244, ssa_8 vec4 32 ssa_247 = fne ssa_44.zzzz, ssa_44.zzzz vec4 32 ssa_248 = bcsel ssa_247, ssa_244, ssa_246 vec4 32 ssa_249 = fne ssa_244, ssa_244 vec4 32 ssa_250 = bcsel ssa_249, ssa_248, ssa_244 vec1 32 ssa_251 = load_const (0x000001a0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_2, ssa_251) () () vec4 32 ssa_253 = imov ssa_252.wzyx vec4 32 ssa_254 = f2i32 ssa_41.yyyy vec4 32 ssa_255 = ffloor ssa_41.wwww vec4 32 ssa_256 = vec4 ssa_255.x, ssa_25.y, ssa_25.z, ssa_25.w vec4 32 ssa_257 = fadd ssa_41.wwww, -ssa_256 vec4 32 ssa_258 = vec4 ssa_257.x, ssa_25.y, ssa_25.z, ssa_25.w vec2 32 ssa_259 = fge ssa_257.xx, ssa_44.ww vec1 32 ssa_260 = imov ssa_259.x /* succs: block_16 block_38 */ if ssa_260 { block block_16: /* preds: block_15 */ vec4 32 ssa_261 = fadd ssa_44.xyyy, ssa_34.zwww vec4 32 ssa_262 = vec4 ssa_261.x, ssa_261.y, ssa_26.z, ssa_26.w vec4 32 ssa_263 = fmul ssa_44.zzzz, ssa_41.wwww vec4 32 ssa_264 = fne ssa_41.wwww, ssa_41.wwww vec4 32 ssa_265 = bcsel ssa_264, ssa_263, ssa_8 vec4 32 ssa_266 = bcsel ssa_247, ssa_263, ssa_265 vec4 32 ssa_267 = fne ssa_263, ssa_263 vec4 32 ssa_268 = bcsel ssa_267, ssa_266, ssa_263 vec4 32 ssa_269 = vec4 ssa_268.x, ssa_268.y, ssa_25.z, ssa_25.w vec4 32 ssa_270 = ffloor ssa_269 vec4 32 ssa_271 = fadd ssa_268.xxxx, -ssa_270.yyyy vec2 32 ssa_272 = fge ssa_271.xx, ssa_44.ww vec4 32 ssa_273 = bcsel ssa_272.xxxx, ssa_252.zwyx, ssa_252.wzyx vec4 32 ssa_274 = fmul ssa_273, ssa_51.wzyx vec4 32 ssa_275 = fne ssa_51.wzyx, ssa_51.wzyx vec4 32 ssa_276 = bcsel ssa_275, ssa_274, ssa_8 vec4 32 ssa_277 = fne ssa_273, ssa_273 vec4 32 ssa_278 = bcsel ssa_277, ssa_274, ssa_276 vec4 32 ssa_279 = fne ssa_274, ssa_274 vec4 32 ssa_280 = bcsel ssa_279, ssa_278, ssa_274 vec4 32 ssa_281 = fmul ssa_44.zzzz, ssa_271.xxxx vec4 32 ssa_282 = fne ssa_271.xxxx, ssa_271.xxxx vec4 32 ssa_283 = bcsel ssa_282, ssa_281, ssa_8 vec4 32 ssa_284 = bcsel ssa_247, ssa_281, ssa_283 vec4 32 ssa_285 = fne ssa_281, ssa_281 vec4 32 ssa_286 = bcsel ssa_285, ssa_284, ssa_281 vec4 32 ssa_287 = fmul ssa_44.zyzy, ssa_286.xxxx vec4 32 ssa_288 = fne ssa_286.xxxx, ssa_286.xxxx vec4 32 ssa_289 = bcsel ssa_288, ssa_287, ssa_8 vec4 32 ssa_290 = bcsel ssa_237, ssa_287, ssa_289 vec4 32 ssa_291 = fne ssa_287, ssa_287 vec4 32 ssa_292 = bcsel ssa_291, ssa_290, ssa_287 vec4 32 ssa_293 = ffloor ssa_292 vec4 32 ssa_294 = fadd ssa_292.xyyy, -ssa_293.zwww vec2 32 ssa_295 = fge ssa_294.xy, ssa_44.ww vec1 32 ssa_296 = imov ssa_295.y /* succs: block_17 block_18 */ if ssa_296 { block block_17: /* preds: block_16 */ vec4 32 ssa_297 = fmul ssa_261.xxxx, ssa_280.xxxx vec4 32 ssa_298 = fne ssa_280.xxxx, ssa_280.xxxx vec4 32 ssa_299 = bcsel ssa_298, ssa_297, ssa_8 vec4 32 ssa_300 = fne ssa_261.xxxx, ssa_261.xxxx vec4 32 ssa_301 = bcsel ssa_300, ssa_297, ssa_299 vec4 32 ssa_302 = fne ssa_297, ssa_297 vec4 32 ssa_303 = bcsel ssa_302, ssa_301, ssa_297 vec4 32 ssa_304 = vec4 ssa_303.x, ssa_261.y, ssa_26.z, ssa_26.w /* succs: block_19 */ } else { block block_18: /* preds: block_16 */ /* succs: block_19 */ } block block_19: /* preds: block_17 block_18 */ vec4 32 ssa_305 = phi block_17: ssa_304, block_18: ssa_262 vec1 32 ssa_306 = imov ssa_295.x /* succs: block_20 block_21 */ if ssa_306 { block block_20: /* preds: block_19 */ vec4 32 ssa_307 = fmul ssa_305.yyyy, ssa_280.yyyy vec4 32 ssa_308 = fne ssa_280.yyyy, ssa_280.yyyy vec4 32 ssa_309 = bcsel ssa_308, ssa_307, ssa_8 vec4 32 ssa_310 = fne ssa_305.yyyy, ssa_305.yyyy vec4 32 ssa_311 = bcsel ssa_310, ssa_307, ssa_309 vec4 32 ssa_312 = fne ssa_307, ssa_307 vec4 32 ssa_313 = bcsel ssa_312, ssa_311, ssa_307 vec4 32 ssa_314 = fadd ssa_313, ssa_44.yyyy vec4 32 ssa_315 = fadd ssa_314.yyyy, -ssa_280.yyyy vec4 32 ssa_316 = vec4 ssa_305.x, ssa_315.y, ssa_305.z, ssa_305.w /* succs: block_22 */ } else { block block_21: /* preds: block_19 */ /* succs: block_22 */ } block block_22: /* preds: block_20 block_21 */ vec4 32 ssa_317 = phi block_20: ssa_316, block_21: ssa_305 vec4 32 ssa_318 = fadd ssa_44.yyyy, -ssa_280.xyyy vec4 32 ssa_319 = vec4 ssa_318.x, ssa_318.y, ssa_273.z, ssa_273.w vec4 32 ssa_320 = fmul ssa_44.zzzz, ssa_294.xxxx vec4 32 ssa_321 = fne ssa_294.xxxx, ssa_294.xxxx vec4 32 ssa_322 = bcsel ssa_321, ssa_320, ssa_8 vec4 32 ssa_323 = bcsel ssa_247, ssa_320, ssa_322 vec4 32 ssa_324 = fne ssa_320, ssa_320 vec4 32 ssa_325 = bcsel ssa_324, ssa_323, ssa_320 vec4 32 ssa_326 = fmul ssa_44.zyzy, ssa_325.xxxx vec4 32 ssa_327 = fne ssa_325.xxxx, ssa_325.xxxx vec4 32 ssa_328 = bcsel ssa_327, ssa_326, ssa_8 vec4 32 ssa_329 = bcsel ssa_237, ssa_326, ssa_328 vec4 32 ssa_330 = fne ssa_326, ssa_326 vec4 32 ssa_331 = bcsel ssa_330, ssa_329, ssa_326 vec4 32 ssa_332 = ffloor ssa_331 vec4 32 ssa_333 = fadd ssa_331.xyyy, -ssa_332.zwww vec2 32 ssa_334 = fge ssa_333.xy, ssa_44.ww vec1 32 ssa_335 = imov ssa_334.y /* succs: block_23 block_24 */ if ssa_335 { block block_23: /* preds: block_22 */ vec4 32 ssa_336 = fadd ssa_317.xxxx, ssa_318.xxxx vec4 32 ssa_337 = vec4 ssa_336.x, ssa_317.y, ssa_317.z, ssa_317.w /* succs: block_25 */ } else { block block_24: /* preds: block_22 */ /* succs: block_25 */ } block block_25: /* preds: block_23 block_24 */ vec4 32 ssa_338 = phi block_23: ssa_337, block_24: ssa_317 vec1 32 ssa_339 = imov ssa_334.x /* succs: block_26 block_27 */ if ssa_339 { block block_26: /* preds: block_25 */ vec4 32 ssa_340 = fadd ssa_338.yyyy, -ssa_318.yyyy vec4 32 ssa_341 = vec4 ssa_338.x, ssa_340.y, ssa_338.z, ssa_338.w /* succs: block_28 */ } else { block block_27: /* preds: block_25 */ /* succs: block_28 */ } block block_28: /* preds: block_26 block_27 */ vec4 32 ssa_342 = phi block_26: ssa_341, block_27: ssa_338 vec4 32 ssa_343 = fmul ssa_44.zzzz, ssa_333.xxxx vec4 32 ssa_344 = fne ssa_333.xxxx, ssa_333.xxxx vec4 32 ssa_345 = bcsel ssa_344, ssa_343, ssa_8 vec4 32 ssa_346 = bcsel ssa_247, ssa_343, ssa_345 vec4 32 ssa_347 = fne ssa_343, ssa_343 vec4 32 ssa_348 = bcsel ssa_347, ssa_346, ssa_343 vec4 32 ssa_349 = fmul ssa_44.zyzy, ssa_348.xxxx vec4 32 ssa_350 = fne ssa_348.xxxx, ssa_348.xxxx vec4 32 ssa_351 = bcsel ssa_350, ssa_349, ssa_8 vec4 32 ssa_352 = bcsel ssa_237, ssa_349, ssa_351 vec4 32 ssa_353 = fne ssa_349, ssa_349 vec4 32 ssa_354 = bcsel ssa_353, ssa_352, ssa_349 vec4 32 ssa_355 = ffloor ssa_354 vec4 32 ssa_356 = fadd ssa_354.xyyy, -ssa_355.zwww vec2 32 ssa_357 = fge ssa_356.xy, ssa_44.ww vec1 32 ssa_358 = imov ssa_357.y /* succs: block_29 block_30 */ if ssa_358 { block block_29: /* preds: block_28 */ vec4 32 ssa_359 = fadd ssa_44.yyyy, -ssa_342.xxxx vec4 32 ssa_360 = vec4 ssa_359.x, ssa_342.y, ssa_342.z, ssa_342.w /* succs: block_31 */ } else { block block_30: /* preds: block_28 */ /* succs: block_31 */ } block block_31: /* preds: block_29 block_30 */ vec4 32 ssa_361 = phi block_29: ssa_360, block_30: ssa_342 vec1 32 ssa_362 = imov ssa_357.x /* succs: block_32 block_33 */ if ssa_362 { block block_32: /* preds: block_31 */ vec4 32 ssa_363 = fadd ssa_44.yyyy, -ssa_361.yyyy vec4 32 ssa_364 = vec4 ssa_361.x, ssa_363.y, ssa_361.z, ssa_361.w /* succs: block_34 */ } else { block block_33: /* preds: block_31 */ /* succs: block_34 */ } block block_34: /* preds: block_32 block_33 */ vec4 32 ssa_365 = phi block_32: ssa_364, block_33: ssa_361 vec4 32 ssa_366 = vec4 ssa_268.x, ssa_268.y, ssa_355.z, ssa_355.w vec4 32 ssa_367 = ffloor ssa_366 vec4 32 ssa_368 = fadd ssa_268.xxxx, -ssa_367.yyyy vec4 32 ssa_369 = vec4 ssa_368.x, ssa_367.y, ssa_355.z, ssa_355.w vec2 32 ssa_370 = fge ssa_368.xx, ssa_44.ww vec1 32 ssa_371 = imov ssa_370.x /* succs: block_35 block_36 */ if ssa_371 { block block_35: /* preds: block_34 */ vec4 32 ssa_372 = fadd ssa_44.yyyy, -ssa_365.yxxx vec4 32 ssa_373 = vec4 ssa_372.x, ssa_372.y, ssa_365.z, ssa_365.w /* succs: block_37 */ } else { block block_36: /* preds: block_34 */ /* succs: block_37 */ } block block_37: /* preds: block_35 block_36 */ vec4 32 ssa_374 = phi block_35: ssa_373, block_36: ssa_365 vec4 32 ssa_375 = fadd ssa_44.yyyy, -ssa_374.yyyy vec4 32 ssa_376 = vec4 ssa_374.x, ssa_375.y, ssa_374.z, ssa_374.w /* succs: block_39 */ } else { block block_38: /* preds: block_15 */ /* succs: block_39 */ } block block_39: /* preds: block_37 block_38 */ vec4 32 ssa_377 = phi block_37: ssa_319, block_38: ssa_253 vec4 32 ssa_378 = phi block_37: ssa_369, block_38: ssa_258 vec4 32 ssa_379 = phi block_37: ssa_376, block_38: ssa_26 vec1 32 ssa_380 = flt ssa_250.y, ssa_44.y /* succs: block_40 block_44 */ if ssa_380 { block block_40: /* preds: block_39 */ vec4 32 ssa_381 = vec4 ssa_255.x, ssa_378.y, ssa_378.z, ssa_378.w vec4 32 ssa_382 = fadd ssa_41.wwww, -ssa_381 vec1 32 ssa_383 = iadd ssa_5, ssa_254.y vec1 32 ssa_384 = ishl ssa_383, ssa_37 vec1 32 ssa_385 = iadd ssa_39, ssa_384 vec4 32 ssa_386 = intrinsic load_ubo (ssa_2, ssa_385) () () vec4 32 ssa_387 = imov ssa_386.wzyx vec2 32 ssa_388 = fge ssa_382.xx, ssa_44.ww vec1 32 ssa_389 = imov ssa_388.x /* succs: block_41 block_42 */ if ssa_389 { block block_41: /* preds: block_40 */ vec4 32 ssa_390 = fmul ssa_379.xyyy, ssa_386.wzzz vec4 32 ssa_391 = fne ssa_386.wzzz, ssa_386.wzzz vec4 32 ssa_392 = bcsel ssa_391, ssa_390, ssa_8 vec4 32 ssa_393 = fne ssa_379.xyyy, ssa_379.xyyy vec4 32 ssa_394 = bcsel ssa_393, ssa_390, ssa_392 vec4 32 ssa_395 = fne ssa_390, ssa_390 vec4 32 ssa_396 = bcsel ssa_395, ssa_394, ssa_390 vec4 32 ssa_397 = fadd ssa_396, ssa_386.yxxx vec4 32 ssa_398 = fmul ssa_397.xyyy, ssa_377.zwww vec4 32 ssa_399 = fne ssa_377.zwww, ssa_377.zwww vec4 32 ssa_400 = bcsel ssa_399, ssa_398, ssa_8 vec4 32 ssa_401 = fne ssa_397.xyyy, ssa_397.xyyy vec4 32 ssa_402 = bcsel ssa_401, ssa_398, ssa_400 vec4 32 ssa_403 = fne ssa_398, ssa_398 vec4 32 ssa_404 = bcsel ssa_403, ssa_402, ssa_398 vec4 32 ssa_405 = fadd ssa_44.yyyy, -ssa_404.yyyy vec4 32 ssa_406 = vec4 ssa_404.x, ssa_405.y, ssa_41.y, ssa_232.w /* succs: block_43 */ } else { block block_42: /* preds: block_40 */ vec4 32 ssa_407 = vec4 ssa_386.w, ssa_250.y, ssa_41.y, ssa_232.w vec4 32 ssa_408 = vec4 ssa_386.y, ssa_250.y, ssa_41.y, ssa_232.w vec2 32 ssa_409 = fne ssa_44.xx, ssa_34.zw vec4 32 ssa_410 = bcsel ssa_409.xxxx, ssa_408, ssa_407 vec4 32 ssa_411 = vec4 ssa_410.x, ssa_386.z, ssa_410.z, ssa_410.w vec4 32 ssa_412 = vec4 ssa_410.x, ssa_386.x, ssa_410.z, ssa_410.w vec4 32 ssa_413 = bcsel ssa_409.yyyy, ssa_412, ssa_411 /* succs: block_43 */ } block block_43: /* preds: block_41 block_42 */ vec4 32 ssa_414 = phi block_41: ssa_406, block_42: ssa_413 vec4 32 ssa_415 = fadd ssa_44.yyyy, ssa_41.yyyy vec4 32 ssa_416 = vec4 ssa_414.x, ssa_414.y, ssa_415.z, ssa_414.w /* succs: block_54 */ } else { block block_44: /* preds: block_39 */ vec1 32 ssa_417 = flt ssa_250.x, ssa_44.y /* succs: block_45 block_52 */ if ssa_417 { block block_45: /* preds: block_44 */ vec4 32 ssa_418 = vec4 ssa_255.x, ssa_378.y, ssa_378.z, ssa_378.w vec4 32 ssa_419 = fadd ssa_41.wwww, -ssa_418 vec4 32 ssa_420 = vec4 ssa_419.x, ssa_378.y, ssa_378.z, ssa_378.w vec2 32 ssa_421 = fge ssa_419.xx, ssa_44.ww vec1 32 ssa_422 = imov ssa_421.x /* succs: block_46 block_47 */ if ssa_422 { block block_46: /* preds: block_45 */ vec4 32 ssa_423 = vec4 ssa_379.x, ssa_379.y, ssa_44.x, ssa_44.y vec1 32 ssa_424 = iadd ssa_5, ssa_254.y vec1 32 ssa_425 = ishl ssa_424, ssa_37 vec1 32 ssa_426 = iadd ssa_39, ssa_425 vec4 32 ssa_427 = intrinsic load_ubo (ssa_2, ssa_426) () () vec4 32 ssa_428 = fmul ssa_427.wzyx, ssa_423 vec4 32 ssa_429 = fne ssa_423, ssa_423 vec4 32 ssa_430 = bcsel ssa_429, ssa_428, ssa_8 vec4 32 ssa_431 = fne ssa_427.wzyx, ssa_427.wzyx vec4 32 ssa_432 = bcsel ssa_431, ssa_428, ssa_430 vec4 32 ssa_433 = fne ssa_428, ssa_428 vec4 32 ssa_434 = bcsel ssa_433, ssa_432, ssa_428 vec4 32 ssa_435 = fdot_replicated4 ssa_434, ssa_9 vec1 32 ssa_436 = iadd ssa_10, ssa_254.y vec1 32 ssa_437 = ishl ssa_436, ssa_37 vec1 32 ssa_438 = iadd ssa_39, ssa_437 vec4 32 ssa_439 = intrinsic load_ubo (ssa_2, ssa_438) () () vec4 32 ssa_440 = fmul ssa_439.wzyx, ssa_423 vec4 32 ssa_441 = bcsel ssa_429, ssa_440, ssa_8 vec4 32 ssa_442 = fne ssa_439.wzyx, ssa_439.wzyx vec4 32 ssa_443 = bcsel ssa_442, ssa_440, ssa_441 vec4 32 ssa_444 = fne ssa_440, ssa_440 vec4 32 ssa_445 = bcsel ssa_444, ssa_443, ssa_440 vec4 32 ssa_446 = fdot_replicated4 ssa_445, ssa_9 vec4 32 ssa_447 = vec4 ssa_435.x, ssa_446.x, ssa_41.y, ssa_232.w vec4 32 ssa_448 = fmul ssa_447.xyyy, ssa_377.zwww vec4 32 ssa_449 = fne ssa_377.zwww, ssa_377.zwww vec4 32 ssa_450 = bcsel ssa_449, ssa_448, ssa_8 vec4 32 ssa_451 = fne ssa_447.xyyy, ssa_447.xyyy vec4 32 ssa_452 = bcsel ssa_451, ssa_448, ssa_450 vec4 32 ssa_453 = fne ssa_448, ssa_448 vec4 32 ssa_454 = bcsel ssa_453, ssa_452, ssa_448 vec4 32 ssa_455 = fadd ssa_44.yyyy, -ssa_454.yyyy vec4 32 ssa_456 = vec4 ssa_454.x, ssa_455.y, ssa_41.y, ssa_232.w /* succs: block_51 */ } else { block block_47: /* preds: block_45 */ vec1 32 ssa_457 = iadd ssa_5, ssa_254.y vec1 32 ssa_458 = ishl ssa_457, ssa_37 vec1 32 ssa_459 = iadd ssa_39, ssa_458 vec4 32 ssa_460 = intrinsic load_ubo (ssa_2, ssa_459) () () vec1 32 ssa_461 = iadd ssa_10, ssa_254.y vec1 32 ssa_462 = ishl ssa_461, ssa_37 vec1 32 ssa_463 = iadd ssa_39, ssa_462 vec4 32 ssa_464 = intrinsic load_ubo (ssa_2, ssa_463) () () vec4 32 ssa_465 = imov ssa_464.wzyx vec2 32 ssa_466 = fne ssa_44.xx, ssa_34.zw vec1 32 ssa_467 = feq ssa_44.x, ssa_34.w /* succs: block_48 block_49 */ if ssa_467 { block block_48: /* preds: block_47 */ vec4 32 ssa_468 = vec4 ssa_460.w, ssa_460.z, ssa_41.y, ssa_232.w vec4 32 ssa_469 = vec4 ssa_464.y, ssa_464.x, ssa_41.y, ssa_232.w vec4 32 ssa_470 = bcsel ssa_466.xxxx, ssa_469, ssa_468 /* succs: block_50 */ } else { block block_49: /* preds: block_47 */ vec4 32 ssa_471 = vec4 ssa_464.w, ssa_464.z, ssa_41.y, ssa_232.w vec4 32 ssa_472 = vec4 ssa_460.y, ssa_460.x, ssa_41.y, ssa_232.w vec4 32 ssa_473 = bcsel ssa_466.xxxx, ssa_472, ssa_471 /* succs: block_50 */ } block block_50: /* preds: block_48 block_49 */ vec4 32 ssa_474 = phi block_48: ssa_470, block_49: ssa_473 /* succs: block_51 */ } block block_51: /* preds: block_46 block_50 */ vec4 32 ssa_475 = phi block_46: ssa_420, block_50: ssa_465 vec4 32 ssa_476 = phi block_46: ssa_423, block_50: ssa_379 vec4 32 ssa_477 = phi block_46: ssa_456, block_50: ssa_474 /* succs: block_53 */ } else { block block_52: /* preds: block_44 */ vec1 32 ssa_478 = iadd ssa_5, ssa_254.y vec1 32 ssa_479 = ishl ssa_478, ssa_37 vec1 32 ssa_480 = iadd ssa_39, ssa_479 vec4 32 ssa_481 = intrinsic load_ubo (ssa_2, ssa_480) () () vec4 32 ssa_482 = fmul ssa_481.wzyx, ssa_67 vec4 32 ssa_483 = bcsel ssa_73, ssa_482, ssa_8 vec4 32 ssa_484 = fne ssa_481.wzyx, ssa_481.wzyx vec4 32 ssa_485 = bcsel ssa_484, ssa_482, ssa_483 vec4 32 ssa_486 = fne ssa_482, ssa_482 vec4 32 ssa_487 = bcsel ssa_486, ssa_485, ssa_482 vec4 32 ssa_488 = fdot_replicated4 ssa_487, ssa_9 vec1 32 ssa_489 = iadd ssa_10, ssa_254.y vec1 32 ssa_490 = ishl ssa_489, ssa_37 vec1 32 ssa_491 = iadd ssa_39, ssa_490 vec4 32 ssa_492 = intrinsic load_ubo (ssa_2, ssa_491) () () vec4 32 ssa_493 = fmul ssa_492.wzyx, ssa_67 vec4 32 ssa_494 = bcsel ssa_73, ssa_493, ssa_8 vec4 32 ssa_495 = fne ssa_492.wzyx, ssa_492.wzyx vec4 32 ssa_496 = bcsel ssa_495, ssa_493, ssa_494 vec4 32 ssa_497 = fne ssa_493, ssa_493 vec4 32 ssa_498 = bcsel ssa_497, ssa_496, ssa_493 vec4 32 ssa_499 = fdot_replicated4 ssa_498, ssa_9 vec4 32 ssa_500 = vec4 ssa_488.x, ssa_499.x, ssa_41.y, ssa_232.w /* succs: block_53 */ } block block_53: /* preds: block_51 block_52 */ vec4 32 ssa_501 = phi block_51: ssa_475, block_52: ssa_378 vec4 32 ssa_502 = phi block_51: ssa_476, block_52: ssa_379 vec4 32 ssa_503 = phi block_51: ssa_477, block_52: ssa_500 vec4 32 ssa_504 = fadd ssa_44.zzzz, ssa_503.zzzz vec4 32 ssa_505 = vec4 ssa_503.x, ssa_503.y, ssa_504.z, ssa_503.w /* succs: block_54 */ } block block_54: /* preds: block_43 block_53 */ vec4 32 ssa_506 = phi block_43: ssa_387, block_53: ssa_501 vec4 32 ssa_507 = phi block_43: ssa_379, block_53: ssa_502 vec4 32 ssa_508 = phi block_43: ssa_416, block_53: ssa_505 vec1 32 ssa_509 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_510 = intrinsic load_ubo (ssa_2, ssa_509) () () vec1 32 ssa_511 = ine ssa_510, ssa_2 /* succs: block_55 block_56 */ if ssa_511 { block block_55: /* preds: block_54 */ vec4 32 ssa_512 = fadd ssa_44.yyyy, -ssa_508.xxxx vec4 32 ssa_513 = vec4 ssa_508.y, ssa_512.y, ssa_508.z, ssa_508.w /* succs: block_57 */ } else { block block_56: /* preds: block_54 */ /* succs: block_57 */ } block block_57: /* preds: block_55 block_56 */ vec4 32 ssa_514 = phi block_55: ssa_513, block_56: ssa_508 vec1 32 ssa_515 = intrinsic load_ubo (ssa_2, ssa_5) () () vec1 32 ssa_516 = ine ssa_515, ssa_2 /* succs: block_58 block_59 */ if ssa_516 { block block_58: /* preds: block_57 */ vec4 32 ssa_517 = fadd ssa_44.yyyy, -ssa_514.yxxx vec4 32 ssa_518 = vec4 ssa_517.x, ssa_517.y, ssa_514.z, ssa_514.w /* succs: block_60 */ } else { block block_59: /* preds: block_57 */ /* succs: block_60 */ } block block_60: /* preds: block_58 block_59 */ vec4 32 ssa_519 = phi block_58: ssa_518, block_59: ssa_514 vec4 32 ssa_520 = imov ssa_519.xyyy vec4 32 ssa_521 = fmul ssa_44.zyzy, ssa_250.xxxx vec4 32 ssa_522 = fne ssa_250.xxxx, ssa_250.xxxx vec4 32 ssa_523 = bcsel ssa_522, ssa_521, ssa_8 vec4 32 ssa_524 = bcsel ssa_237, ssa_521, ssa_523 vec4 32 ssa_525 = fne ssa_521, ssa_521 vec4 32 ssa_526 = bcsel ssa_525, ssa_524, ssa_521 vec4 32 ssa_527 = ffloor ssa_526 vec4 32 ssa_528 = fadd ssa_526.zwww, -ssa_527.xyyy vec4 32 ssa_529 = vec4 ssa_528.x, ssa_528.y, ssa_526.z, ssa_526.w vec4 32 ssa_530 = fmul ssa_44.zzzz, ssa_529 vec4 32 ssa_531 = fne ssa_529, ssa_529 vec4 32 ssa_532 = bcsel ssa_531, ssa_530, ssa_8 vec4 32 ssa_533 = bcsel ssa_247, ssa_530, ssa_532 vec4 32 ssa_534 = fne ssa_530, ssa_530 vec4 32 ssa_535 = bcsel ssa_534, ssa_533, ssa_530 vec1 32 ssa_536 = load_const (0x000001b0 /* 0.000000 */) vec4 32 ssa_537 = intrinsic load_ubo (ssa_2, ssa_536) () () vec4 32 ssa_538 = imov ssa_537.wzyx vec4 32 ssa_539 = f2i32 ssa_519.zzzz vec4 32 ssa_540 = vec4 ssa_255.x, ssa_506.y, ssa_506.z, ssa_506.w vec4 32 ssa_541 = fadd ssa_41.wwww, -ssa_540 vec4 32 ssa_542 = vec4 ssa_541.x, ssa_506.y, ssa_506.z, ssa_506.w vec2 32 ssa_543 = fge ssa_541.xx, ssa_44.ww vec1 32 ssa_544 = imov ssa_543.x /* succs: block_61 block_83 */ if ssa_544 { block block_61: /* preds: block_60 */ vec4 32 ssa_545 = fadd ssa_44.xyyy, ssa_34.zwww vec4 32 ssa_546 = vec4 ssa_545.x, ssa_545.y, ssa_507.z, ssa_507.w vec4 32 ssa_547 = vec4 ssa_252.w, ssa_252.z, ssa_537.y, ssa_537.x vec4 32 ssa_548 = fmul ssa_44.zzzz, ssa_41.wwww vec4 32 ssa_549 = fne ssa_41.wwww, ssa_41.wwww vec4 32 ssa_550 = bcsel ssa_549, ssa_548, ssa_8 vec4 32 ssa_551 = bcsel ssa_247, ssa_548, ssa_550 vec4 32 ssa_552 = fne ssa_548, ssa_548 vec4 32 ssa_553 = bcsel ssa_552, ssa_551, ssa_548 vec4 32 ssa_554 = vec4 ssa_553.x, ssa_553.y, ssa_506.z, ssa_506.w vec4 32 ssa_555 = ffloor ssa_554 vec4 32 ssa_556 = fadd ssa_553.xxxx, -ssa_555.yyyy vec4 32 ssa_557 = vec4 ssa_252.z, ssa_252.w, ssa_537.y, ssa_537.x vec2 32 ssa_558 = fge ssa_556.xx, ssa_44.ww vec4 32 ssa_559 = bcsel ssa_558.xxxx, ssa_557, ssa_547 vec4 32 ssa_560 = fmul ssa_559, ssa_51.wzyx vec4 32 ssa_561 = fne ssa_51.wzyx, ssa_51.wzyx vec4 32 ssa_562 = bcsel ssa_561, ssa_560, ssa_8 vec4 32 ssa_563 = fne ssa_559, ssa_559 vec4 32 ssa_564 = bcsel ssa_563, ssa_560, ssa_562 vec4 32 ssa_565 = fne ssa_560, ssa_560 vec4 32 ssa_566 = bcsel ssa_565, ssa_564, ssa_560 vec4 32 ssa_567 = fmul ssa_44.zzzz, ssa_556.xxxx vec4 32 ssa_568 = fne ssa_556.xxxx, ssa_556.xxxx vec4 32 ssa_569 = bcsel ssa_568, ssa_567, ssa_8 vec4 32 ssa_570 = bcsel ssa_247, ssa_567, ssa_569 vec4 32 ssa_571 = fne ssa_567, ssa_567 vec4 32 ssa_572 = bcsel ssa_571, ssa_570, ssa_567 vec4 32 ssa_573 = fmul ssa_44.zyzy, ssa_572.xxxx vec4 32 ssa_574 = fne ssa_572.xxxx, ssa_572.xxxx vec4 32 ssa_575 = bcsel ssa_574, ssa_573, ssa_8 vec4 32 ssa_576 = bcsel ssa_237, ssa_573, ssa_575 vec4 32 ssa_577 = fne ssa_573, ssa_573 vec4 32 ssa_578 = bcsel ssa_577, ssa_576, ssa_573 vec4 32 ssa_579 = ffloor ssa_578 vec4 32 ssa_580 = fadd ssa_578.xyyy, -ssa_579.zwww vec2 32 ssa_581 = fge ssa_580.xy, ssa_44.ww vec1 32 ssa_582 = imov ssa_581.y /* succs: block_62 block_63 */ if ssa_582 { block block_62: /* preds: block_61 */ vec4 32 ssa_583 = fmul ssa_545.xxxx, ssa_566.xxxx vec4 32 ssa_584 = fne ssa_566.xxxx, ssa_566.xxxx vec4 32 ssa_585 = bcsel ssa_584, ssa_583, ssa_8 vec4 32 ssa_586 = fne ssa_545.xxxx, ssa_545.xxxx vec4 32 ssa_587 = bcsel ssa_586, ssa_583, ssa_585 vec4 32 ssa_588 = fne ssa_583, ssa_583 vec4 32 ssa_589 = bcsel ssa_588, ssa_587, ssa_583 vec4 32 ssa_590 = vec4 ssa_589.x, ssa_545.y, ssa_507.z, ssa_507.w /* succs: block_64 */ } else { block block_63: /* preds: block_61 */ /* succs: block_64 */ } block block_64: /* preds: block_62 block_63 */ vec4 32 ssa_591 = phi block_62: ssa_590, block_63: ssa_546 vec1 32 ssa_592 = imov ssa_581.x /* succs: block_65 block_66 */ if ssa_592 { block block_65: /* preds: block_64 */ vec4 32 ssa_593 = fmul ssa_591.yyyy, ssa_566.yyyy vec4 32 ssa_594 = fne ssa_566.yyyy, ssa_566.yyyy vec4 32 ssa_595 = bcsel ssa_594, ssa_593, ssa_8 vec4 32 ssa_596 = fne ssa_591.yyyy, ssa_591.yyyy vec4 32 ssa_597 = bcsel ssa_596, ssa_593, ssa_595 vec4 32 ssa_598 = fne ssa_593, ssa_593 vec4 32 ssa_599 = bcsel ssa_598, ssa_597, ssa_593 vec4 32 ssa_600 = fadd ssa_599, ssa_44.yyyy vec4 32 ssa_601 = fadd ssa_600.yyyy, -ssa_566.yyyy vec4 32 ssa_602 = vec4 ssa_591.x, ssa_601.y, ssa_591.z, ssa_591.w /* succs: block_67 */ } else { block block_66: /* preds: block_64 */ /* succs: block_67 */ } block block_67: /* preds: block_65 block_66 */ vec4 32 ssa_603 = phi block_65: ssa_602, block_66: ssa_591 vec4 32 ssa_604 = fadd ssa_44.yyyy, -ssa_566.xyyy vec4 32 ssa_605 = vec4 ssa_604.x, ssa_604.y, ssa_559.z, ssa_559.w vec4 32 ssa_606 = fmul ssa_44.zzzz, ssa_580.xxxx vec4 32 ssa_607 = fne ssa_580.xxxx, ssa_580.xxxx vec4 32 ssa_608 = bcsel ssa_607, ssa_606, ssa_8 vec4 32 ssa_609 = bcsel ssa_247, ssa_606, ssa_608 vec4 32 ssa_610 = fne ssa_606, ssa_606 vec4 32 ssa_611 = bcsel ssa_610, ssa_609, ssa_606 vec4 32 ssa_612 = fmul ssa_44.zyzy, ssa_611.xxxx vec4 32 ssa_613 = fne ssa_611.xxxx, ssa_611.xxxx vec4 32 ssa_614 = bcsel ssa_613, ssa_612, ssa_8 vec4 32 ssa_615 = bcsel ssa_237, ssa_612, ssa_614 vec4 32 ssa_616 = fne ssa_612, ssa_612 vec4 32 ssa_617 = bcsel ssa_616, ssa_615, ssa_612 vec4 32 ssa_618 = ffloor ssa_617 vec4 32 ssa_619 = fadd ssa_617.xyyy, -ssa_618.zwww vec2 32 ssa_620 = fge ssa_619.xy, ssa_44.ww vec1 32 ssa_621 = imov ssa_620.y /* succs: block_68 block_69 */ if ssa_621 { block block_68: /* preds: block_67 */ vec4 32 ssa_622 = fadd ssa_603.xxxx, ssa_604.xxxx vec4 32 ssa_623 = vec4 ssa_622.x, ssa_603.y, ssa_603.z, ssa_603.w /* succs: block_70 */ } else { block block_69: /* preds: block_67 */ /* succs: block_70 */ } block block_70: /* preds: block_68 block_69 */ vec4 32 ssa_624 = phi block_68: ssa_623, block_69: ssa_603 vec1 32 ssa_625 = imov ssa_620.x /* succs: block_71 block_72 */ if ssa_625 { block block_71: /* preds: block_70 */ vec4 32 ssa_626 = fadd ssa_624.yyyy, -ssa_604.yyyy vec4 32 ssa_627 = vec4 ssa_624.x, ssa_626.y, ssa_624.z, ssa_624.w /* succs: block_73 */ } else { block block_72: /* preds: block_70 */ /* succs: block_73 */ } block block_73: /* preds: block_71 block_72 */ vec4 32 ssa_628 = phi block_71: ssa_627, block_72: ssa_624 vec4 32 ssa_629 = fmul ssa_44.zzzz, ssa_619.xxxx vec4 32 ssa_630 = fne ssa_619.xxxx, ssa_619.xxxx vec4 32 ssa_631 = bcsel ssa_630, ssa_629, ssa_8 vec4 32 ssa_632 = bcsel ssa_247, ssa_629, ssa_631 vec4 32 ssa_633 = fne ssa_629, ssa_629 vec4 32 ssa_634 = bcsel ssa_633, ssa_632, ssa_629 vec4 32 ssa_635 = fmul ssa_44.zyzy, ssa_634.xxxx vec4 32 ssa_636 = fne ssa_634.xxxx, ssa_634.xxxx vec4 32 ssa_637 = bcsel ssa_636, ssa_635, ssa_8 vec4 32 ssa_638 = bcsel ssa_237, ssa_635, ssa_637 vec4 32 ssa_639 = fne ssa_635, ssa_635 vec4 32 ssa_640 = bcsel ssa_639, ssa_638, ssa_635 vec4 32 ssa_641 = ffloor ssa_640 vec4 32 ssa_642 = fadd ssa_640.xyyy, -ssa_641.zwww vec2 32 ssa_643 = fge ssa_642.xy, ssa_44.ww vec1 32 ssa_644 = imov ssa_643.y /* succs: block_74 block_75 */ if ssa_644 { block block_74: /* preds: block_73 */ vec4 32 ssa_645 = fadd ssa_44.yyyy, -ssa_628.xxxx vec4 32 ssa_646 = vec4 ssa_645.x, ssa_628.y, ssa_628.z, ssa_628.w /* succs: block_76 */ } else { block block_75: /* preds: block_73 */ /* succs: block_76 */ } block block_76: /* preds: block_74 block_75 */ vec4 32 ssa_647 = phi block_74: ssa_646, block_75: ssa_628 vec1 32 ssa_648 = imov ssa_643.x /* succs: block_77 block_78 */ if ssa_648 { block block_77: /* preds: block_76 */ vec4 32 ssa_649 = fadd ssa_44.yyyy, -ssa_647.yyyy vec4 32 ssa_650 = vec4 ssa_647.x, ssa_649.y, ssa_647.z, ssa_647.w /* succs: block_79 */ } else { block block_78: /* preds: block_76 */ /* succs: block_79 */ } block block_79: /* preds: block_77 block_78 */ vec4 32 ssa_651 = phi block_77: ssa_650, block_78: ssa_647 vec4 32 ssa_652 = vec4 ssa_553.x, ssa_553.y, ssa_641.z, ssa_641.w vec4 32 ssa_653 = ffloor ssa_652 vec4 32 ssa_654 = fadd ssa_553.xxxx, -ssa_653.yyyy vec4 32 ssa_655 = vec4 ssa_654.x, ssa_653.y, ssa_641.z, ssa_641.w vec2 32 ssa_656 = fge ssa_654.xx, ssa_44.ww vec1 32 ssa_657 = imov ssa_656.x /* succs: block_80 block_81 */ if ssa_657 { block block_80: /* preds: block_79 */ vec4 32 ssa_658 = fadd ssa_44.yyyy, -ssa_651.yxxx vec4 32 ssa_659 = vec4 ssa_658.x, ssa_658.y, ssa_651.z, ssa_651.w /* succs: block_82 */ } else { block block_81: /* preds: block_79 */ /* succs: block_82 */ } block block_82: /* preds: block_80 block_81 */ vec4 32 ssa_660 = phi block_80: ssa_659, block_81: ssa_651 vec4 32 ssa_661 = fadd ssa_44.yyyy, -ssa_660.yyyy vec4 32 ssa_662 = vec4 ssa_660.x, ssa_661.y, ssa_660.z, ssa_660.w /* succs: block_84 */ } else { block block_83: /* preds: block_60 */ /* succs: block_84 */ } block block_84: /* preds: block_82 block_83 */ vec4 32 ssa_663 = phi block_82: ssa_605, block_83: ssa_538 vec4 32 ssa_664 = phi block_82: ssa_655, block_83: ssa_542 vec4 32 ssa_665 = phi block_82: ssa_662, block_83: ssa_507 vec1 32 ssa_666 = flt ssa_535.y, ssa_44.y /* succs: block_85 block_89 */ if ssa_666 { block block_85: /* preds: block_84 */ vec4 32 ssa_667 = vec4 ssa_255.x, ssa_664.y, ssa_664.z, ssa_664.w vec4 32 ssa_668 = fadd ssa_41.wwww, -ssa_667 vec1 32 ssa_669 = iadd ssa_5, ssa_539.y vec1 32 ssa_670 = ishl ssa_669, ssa_37 vec1 32 ssa_671 = iadd ssa_39, ssa_670 vec4 32 ssa_672 = intrinsic load_ubo (ssa_2, ssa_671) () () vec4 32 ssa_673 = imov ssa_672.wzyx vec2 32 ssa_674 = fge ssa_668.xx, ssa_44.ww vec1 32 ssa_675 = imov ssa_674.x /* succs: block_86 block_87 */ if ssa_675 { block block_86: /* preds: block_85 */ vec4 32 ssa_676 = fmul ssa_665.xyyy, ssa_672.wzzz vec4 32 ssa_677 = fne ssa_672.wzzz, ssa_672.wzzz vec4 32 ssa_678 = bcsel ssa_677, ssa_676, ssa_8 vec4 32 ssa_679 = fne ssa_665.xyyy, ssa_665.xyyy vec4 32 ssa_680 = bcsel ssa_679, ssa_676, ssa_678 vec4 32 ssa_681 = fne ssa_676, ssa_676 vec4 32 ssa_682 = bcsel ssa_681, ssa_680, ssa_676 vec4 32 ssa_683 = fadd ssa_682, ssa_672.yxxx vec4 32 ssa_684 = fmul ssa_683.xyyy, ssa_663.zwww vec4 32 ssa_685 = fne ssa_663.zwww, ssa_663.zwww vec4 32 ssa_686 = bcsel ssa_685, ssa_684, ssa_8 vec4 32 ssa_687 = fne ssa_683.xyyy, ssa_683.xyyy vec4 32 ssa_688 = bcsel ssa_687, ssa_684, ssa_686 vec4 32 ssa_689 = fne ssa_684, ssa_684 vec4 32 ssa_690 = bcsel ssa_689, ssa_688, ssa_684 vec4 32 ssa_691 = fadd ssa_44.yyyy, -ssa_690.yyyy vec4 32 ssa_692 = vec4 ssa_690.x, ssa_691.y, ssa_519.z, ssa_519.w /* succs: block_88 */ } else { block block_87: /* preds: block_85 */ vec4 32 ssa_693 = vec4 ssa_672.w, ssa_535.y, ssa_519.z, ssa_519.w vec4 32 ssa_694 = vec4 ssa_672.y, ssa_535.y, ssa_519.z, ssa_519.w vec2 32 ssa_695 = fne ssa_44.xx, ssa_34.zw vec4 32 ssa_696 = bcsel ssa_695.xxxx, ssa_694, ssa_693 vec4 32 ssa_697 = vec4 ssa_696.x, ssa_672.z, ssa_696.z, ssa_696.w vec4 32 ssa_698 = vec4 ssa_696.x, ssa_672.x, ssa_696.z, ssa_696.w vec4 32 ssa_699 = bcsel ssa_695.yyyy, ssa_698, ssa_697 /* succs: block_88 */ } block block_88: /* preds: block_86 block_87 */ vec4 32 ssa_700 = phi block_86: ssa_692, block_87: ssa_699 vec4 32 ssa_701 = fadd ssa_44.yyyy, ssa_700.zzzz vec4 32 ssa_702 = vec4 ssa_700.x, ssa_700.y, ssa_701.z, ssa_700.w /* succs: block_99 */ } else { block block_89: /* preds: block_84 */ vec1 32 ssa_703 = flt ssa_535.x, ssa_44.y /* succs: block_90 block_97 */ if ssa_703 { block block_90: /* preds: block_89 */ vec4 32 ssa_704 = vec4 ssa_255.x, ssa_664.y, ssa_664.z, ssa_664.w vec4 32 ssa_705 = fadd ssa_41.wwww, -ssa_704 vec4 32 ssa_706 = vec4 ssa_705.x, ssa_664.y, ssa_664.z, ssa_664.w vec2 32 ssa_707 = fge ssa_705.xx, ssa_44.ww vec1 32 ssa_708 = imov ssa_707.x /* succs: block_91 block_92 */ if ssa_708 { block block_91: /* preds: block_90 */ vec4 32 ssa_709 = vec4 ssa_665.x, ssa_665.y, ssa_44.x, ssa_44.y vec1 32 ssa_710 = iadd ssa_5, ssa_539.y vec1 32 ssa_711 = ishl ssa_710, ssa_37 vec1 32 ssa_712 = iadd ssa_39, ssa_711 vec4 32 ssa_713 = intrinsic load_ubo (ssa_2, ssa_712) () () vec4 32 ssa_714 = fmul ssa_713.wzyx, ssa_709 vec4 32 ssa_715 = fne ssa_709, ssa_709 vec4 32 ssa_716 = bcsel ssa_715, ssa_714, ssa_8 vec4 32 ssa_717 = fne ssa_713.wzyx, ssa_713.wzyx vec4 32 ssa_718 = bcsel ssa_717, ssa_714, ssa_716 vec4 32 ssa_719 = fne ssa_714, ssa_714 vec4 32 ssa_720 = bcsel ssa_719, ssa_718, ssa_714 vec4 32 ssa_721 = fdot_replicated4 ssa_720, ssa_9 vec1 32 ssa_722 = iadd ssa_10, ssa_539.y vec1 32 ssa_723 = ishl ssa_722, ssa_37 vec1 32 ssa_724 = iadd ssa_39, ssa_723 vec4 32 ssa_725 = intrinsic load_ubo (ssa_2, ssa_724) () () vec4 32 ssa_726 = fmul ssa_725.wzyx, ssa_709 vec4 32 ssa_727 = bcsel ssa_715, ssa_726, ssa_8 vec4 32 ssa_728 = fne ssa_725.wzyx, ssa_725.wzyx vec4 32 ssa_729 = bcsel ssa_728, ssa_726, ssa_727 vec4 32 ssa_730 = fne ssa_726, ssa_726 vec4 32 ssa_731 = bcsel ssa_730, ssa_729, ssa_726 vec4 32 ssa_732 = fdot_replicated4 ssa_731, ssa_9 vec4 32 ssa_733 = vec4 ssa_721.x, ssa_732.x, ssa_519.z, ssa_519.w vec4 32 ssa_734 = fmul ssa_733.xyyy, ssa_663.zwww vec4 32 ssa_735 = fne ssa_663.zwww, ssa_663.zwww vec4 32 ssa_736 = bcsel ssa_735, ssa_734, ssa_8 vec4 32 ssa_737 = fne ssa_733.xyyy, ssa_733.xyyy vec4 32 ssa_738 = bcsel ssa_737, ssa_734, ssa_736 vec4 32 ssa_739 = fne ssa_734, ssa_734 vec4 32 ssa_740 = bcsel ssa_739, ssa_738, ssa_734 vec4 32 ssa_741 = fadd ssa_44.yyyy, -ssa_740.yyyy vec4 32 ssa_742 = vec4 ssa_740.x, ssa_741.y, ssa_519.z, ssa_519.w /* succs: block_96 */ } else { block block_92: /* preds: block_90 */ vec1 32 ssa_743 = iadd ssa_5, ssa_539.y vec1 32 ssa_744 = ishl ssa_743, ssa_37 vec1 32 ssa_745 = iadd ssa_39, ssa_744 vec4 32 ssa_746 = intrinsic load_ubo (ssa_2, ssa_745) () () vec1 32 ssa_747 = iadd ssa_10, ssa_539.y vec1 32 ssa_748 = ishl ssa_747, ssa_37 vec1 32 ssa_749 = iadd ssa_39, ssa_748 vec4 32 ssa_750 = intrinsic load_ubo (ssa_2, ssa_749) () () vec4 32 ssa_751 = imov ssa_750.wzyx vec2 32 ssa_752 = fne ssa_44.xx, ssa_34.zw vec1 32 ssa_753 = feq ssa_44.x, ssa_34.w /* succs: block_93 block_94 */ if ssa_753 { block block_93: /* preds: block_92 */ vec4 32 ssa_754 = vec4 ssa_746.w, ssa_746.z, ssa_519.z, ssa_519.w vec4 32 ssa_755 = vec4 ssa_750.y, ssa_750.x, ssa_519.z, ssa_519.w vec4 32 ssa_756 = bcsel ssa_752.xxxx, ssa_755, ssa_754 /* succs: block_95 */ } else { block block_94: /* preds: block_92 */ vec4 32 ssa_757 = vec4 ssa_750.w, ssa_750.z, ssa_519.z, ssa_519.w vec4 32 ssa_758 = vec4 ssa_746.y, ssa_746.x, ssa_519.z, ssa_519.w vec4 32 ssa_759 = bcsel ssa_752.xxxx, ssa_758, ssa_757 /* succs: block_95 */ } block block_95: /* preds: block_93 block_94 */ vec4 32 ssa_760 = phi block_93: ssa_756, block_94: ssa_759 /* succs: block_96 */ } block block_96: /* preds: block_91 block_95 */ vec4 32 ssa_761 = phi block_91: ssa_706, block_95: ssa_751 vec4 32 ssa_762 = phi block_91: ssa_709, block_95: ssa_665 vec4 32 ssa_763 = phi block_91: ssa_742, block_95: ssa_760 /* succs: block_98 */ } else { block block_97: /* preds: block_89 */ vec1 32 ssa_764 = iadd ssa_5, ssa_539.y vec1 32 ssa_765 = ishl ssa_764, ssa_37 vec1 32 ssa_766 = iadd ssa_39, ssa_765 vec4 32 ssa_767 = intrinsic load_ubo (ssa_2, ssa_766) () () vec4 32 ssa_768 = fmul ssa_767.wzyx, ssa_67 vec4 32 ssa_769 = bcsel ssa_73, ssa_768, ssa_8 vec4 32 ssa_770 = fne ssa_767.wzyx, ssa_767.wzyx vec4 32 ssa_771 = bcsel ssa_770, ssa_768, ssa_769 vec4 32 ssa_772 = fne ssa_768, ssa_768 vec4 32 ssa_773 = bcsel ssa_772, ssa_771, ssa_768 vec4 32 ssa_774 = fdot_replicated4 ssa_773, ssa_9 vec1 32 ssa_775 = iadd ssa_10, ssa_539.y vec1 32 ssa_776 = ishl ssa_775, ssa_37 vec1 32 ssa_777 = iadd ssa_39, ssa_776 vec4 32 ssa_778 = intrinsic load_ubo (ssa_2, ssa_777) () () vec4 32 ssa_779 = fmul ssa_778.wzyx, ssa_67 vec4 32 ssa_780 = bcsel ssa_73, ssa_779, ssa_8 vec4 32 ssa_781 = fne ssa_778.wzyx, ssa_778.wzyx vec4 32 ssa_782 = bcsel ssa_781, ssa_779, ssa_780 vec4 32 ssa_783 = fne ssa_779, ssa_779 vec4 32 ssa_784 = bcsel ssa_783, ssa_782, ssa_779 vec4 32 ssa_785 = fdot_replicated4 ssa_784, ssa_9 vec4 32 ssa_786 = vec4 ssa_774.x, ssa_785.x, ssa_519.z, ssa_519.w /* succs: block_98 */ } block block_98: /* preds: block_96 block_97 */ vec4 32 ssa_787 = phi block_96: ssa_761, block_97: ssa_664 vec4 32 ssa_788 = phi block_96: ssa_762, block_97: ssa_665 vec4 32 ssa_789 = phi block_96: ssa_763, block_97: ssa_786 vec4 32 ssa_790 = fadd ssa_44.zzzz, ssa_789.zzzz vec4 32 ssa_791 = vec4 ssa_789.x, ssa_789.y, ssa_790.z, ssa_789.w /* succs: block_99 */ } block block_99: /* preds: block_88 block_98 */ vec4 32 ssa_792 = phi block_88: ssa_673, block_98: ssa_787 vec4 32 ssa_793 = phi block_88: ssa_665, block_98: ssa_788 vec4 32 ssa_794 = phi block_88: ssa_702, block_98: ssa_791 vec1 32 ssa_795 = load_const (0x00000030 /* 0.000000 */) vec1 32 ssa_796 = intrinsic load_ubo (ssa_2, ssa_795) () () vec1 32 ssa_797 = ine ssa_796, ssa_2 /* succs: block_100 block_101 */ if ssa_797 { block block_100: /* preds: block_99 */ vec4 32 ssa_798 = fadd ssa_44.yyyy, -ssa_794.xxxx vec4 32 ssa_799 = vec4 ssa_794.y, ssa_798.y, ssa_794.z, ssa_794.w /* succs: block_102 */ } else { block block_101: /* preds: block_99 */ /* succs: block_102 */ } block block_102: /* preds: block_100 block_101 */ vec4 32 ssa_800 = phi block_100: ssa_799, block_101: ssa_794 vec1 32 ssa_801 = load_const (0x00000040 /* 0.000000 */) vec1 32 ssa_802 = intrinsic load_ubo (ssa_2, ssa_801) () () vec1 32 ssa_803 = ine ssa_802, ssa_2 /* succs: block_103 block_104 */ if ssa_803 { block block_103: /* preds: block_102 */ vec4 32 ssa_804 = fadd ssa_44.yyyy, -ssa_800.yxxx vec4 32 ssa_805 = vec4 ssa_804.x, ssa_804.y, ssa_800.z, ssa_800.w /* succs: block_105 */ } else { block block_104: /* preds: block_102 */ /* succs: block_105 */ } block block_105: /* preds: block_103 block_104 */ vec4 32 ssa_806 = phi block_103: ssa_805, block_104: ssa_800 vec4 32 ssa_807 = imov ssa_806.xyyy vec4 32 ssa_808 = fmul ssa_44.zyzy, ssa_535.xxxx vec4 32 ssa_809 = fne ssa_535.xxxx, ssa_535.xxxx vec4 32 ssa_810 = bcsel ssa_809, ssa_808, ssa_8 vec4 32 ssa_811 = bcsel ssa_237, ssa_808, ssa_810 vec4 32 ssa_812 = fne ssa_808, ssa_808 vec4 32 ssa_813 = bcsel ssa_812, ssa_811, ssa_808 vec4 32 ssa_814 = ffloor ssa_813 vec4 32 ssa_815 = fadd ssa_813.zwww, -ssa_814.xyyy vec4 32 ssa_816 = vec4 ssa_815.x, ssa_815.y, ssa_813.z, ssa_813.w vec4 32 ssa_817 = fmul ssa_44.zzzz, ssa_816 vec4 32 ssa_818 = fne ssa_816, ssa_816 vec4 32 ssa_819 = bcsel ssa_818, ssa_817, ssa_8 vec4 32 ssa_820 = bcsel ssa_247, ssa_817, ssa_819 vec4 32 ssa_821 = fne ssa_817, ssa_817 vec4 32 ssa_822 = bcsel ssa_821, ssa_820, ssa_817 vec1 32 ssa_823 = load_const (0x000001c0 /* 0.000000 */) vec4 32 ssa_824 = intrinsic load_ubo (ssa_2, ssa_823) () () vec4 32 ssa_825 = imov ssa_824.wzyx vec4 32 ssa_826 = f2i32 ssa_806.zzzz vec3 32 ssa_827 = vec3 ssa_233.x, ssa_826.y, ssa_233.z vec4 32 ssa_828 = vec4 ssa_255.x, ssa_792.y, ssa_792.z, ssa_792.w vec4 32 ssa_829 = fadd ssa_41.wwww, -ssa_828 vec4 32 ssa_830 = vec4 ssa_829.x, ssa_792.y, ssa_792.z, ssa_792.w vec2 32 ssa_831 = fge ssa_829.xx, ssa_44.ww vec1 32 ssa_832 = imov ssa_831.x /* succs: block_106 block_128 */ if ssa_832 { block block_106: /* preds: block_105 */ vec4 32 ssa_833 = fadd ssa_44.xyyy, ssa_34.zwww vec4 32 ssa_834 = vec4 ssa_833.x, ssa_833.y, ssa_793.z, ssa_793.w vec4 32 ssa_835 = vec4 ssa_252.w, ssa_252.z, ssa_824.y, ssa_824.x vec4 32 ssa_836 = fmul ssa_44.zzzz, ssa_41.wwww vec4 32 ssa_837 = fne ssa_41.wwww, ssa_41.wwww vec4 32 ssa_838 = bcsel ssa_837, ssa_836, ssa_8 vec4 32 ssa_839 = bcsel ssa_247, ssa_836, ssa_838 vec4 32 ssa_840 = fne ssa_836, ssa_836 vec4 32 ssa_841 = bcsel ssa_840, ssa_839, ssa_836 vec4 32 ssa_842 = vec4 ssa_841.x, ssa_841.y, ssa_792.z, ssa_792.w vec4 32 ssa_843 = ffloor ssa_842 vec4 32 ssa_844 = fadd ssa_841.xxxx, -ssa_843.yyyy vec4 32 ssa_845 = vec4 ssa_252.z, ssa_252.w, ssa_824.y, ssa_824.x vec2 32 ssa_846 = fge ssa_844.xx, ssa_44.ww vec4 32 ssa_847 = bcsel ssa_846.xxxx, ssa_845, ssa_835 vec4 32 ssa_848 = fmul ssa_847, ssa_51.wzyx vec4 32 ssa_849 = fne ssa_51.wzyx, ssa_51.wzyx vec4 32 ssa_850 = bcsel ssa_849, ssa_848, ssa_8 vec4 32 ssa_851 = fne ssa_847, ssa_847 vec4 32 ssa_852 = bcsel ssa_851, ssa_848, ssa_850 vec4 32 ssa_853 = fne ssa_848, ssa_848 vec4 32 ssa_854 = bcsel ssa_853, ssa_852, ssa_848 vec4 32 ssa_855 = fmul ssa_44.zzzz, ssa_844.xxxx vec4 32 ssa_856 = fne ssa_844.xxxx, ssa_844.xxxx vec4 32 ssa_857 = bcsel ssa_856, ssa_855, ssa_8 vec4 32 ssa_858 = bcsel ssa_247, ssa_855, ssa_857 vec4 32 ssa_859 = fne ssa_855, ssa_855 vec4 32 ssa_860 = bcsel ssa_859, ssa_858, ssa_855 vec4 32 ssa_861 = fmul ssa_44.zyzy, ssa_860.xxxx vec4 32 ssa_862 = fne ssa_860.xxxx, ssa_860.xxxx vec4 32 ssa_863 = bcsel ssa_862, ssa_861, ssa_8 vec4 32 ssa_864 = bcsel ssa_237, ssa_861, ssa_863 vec4 32 ssa_865 = fne ssa_861, ssa_861 vec4 32 ssa_866 = bcsel ssa_865, ssa_864, ssa_861 vec4 32 ssa_867 = ffloor ssa_866 vec4 32 ssa_868 = fadd ssa_866.xyyy, -ssa_867.zwww vec2 32 ssa_869 = fge ssa_868.xy, ssa_44.ww vec1 32 ssa_870 = imov ssa_869.y /* succs: block_107 block_108 */ if ssa_870 { block block_107: /* preds: block_106 */ vec4 32 ssa_871 = fmul ssa_833.xxxx, ssa_854.xxxx vec4 32 ssa_872 = fne ssa_854.xxxx, ssa_854.xxxx vec4 32 ssa_873 = bcsel ssa_872, ssa_871, ssa_8 vec4 32 ssa_874 = fne ssa_833.xxxx, ssa_833.xxxx vec4 32 ssa_875 = bcsel ssa_874, ssa_871, ssa_873 vec4 32 ssa_876 = fne ssa_871, ssa_871 vec4 32 ssa_877 = bcsel ssa_876, ssa_875, ssa_871 vec4 32 ssa_878 = vec4 ssa_877.x, ssa_833.y, ssa_793.z, ssa_793.w /* succs: block_109 */ } else { block block_108: /* preds: block_106 */ /* succs: block_109 */ } block block_109: /* preds: block_107 block_108 */ vec4 32 ssa_879 = phi block_107: ssa_878, block_108: ssa_834 vec1 32 ssa_880 = imov ssa_869.x /* succs: block_110 block_111 */ if ssa_880 { block block_110: /* preds: block_109 */ vec4 32 ssa_881 = fmul ssa_879.yyyy, ssa_854.yyyy vec4 32 ssa_882 = fne ssa_854.yyyy, ssa_854.yyyy vec4 32 ssa_883 = bcsel ssa_882, ssa_881, ssa_8 vec4 32 ssa_884 = fne ssa_879.yyyy, ssa_879.yyyy vec4 32 ssa_885 = bcsel ssa_884, ssa_881, ssa_883 vec4 32 ssa_886 = fne ssa_881, ssa_881 vec4 32 ssa_887 = bcsel ssa_886, ssa_885, ssa_881 vec4 32 ssa_888 = fadd ssa_887, ssa_44.yyyy vec4 32 ssa_889 = fadd ssa_888.yyyy, -ssa_854.yyyy vec4 32 ssa_890 = vec4 ssa_879.x, ssa_889.y, ssa_879.z, ssa_879.w /* succs: block_112 */ } else { block block_111: /* preds: block_109 */ /* succs: block_112 */ } block block_112: /* preds: block_110 block_111 */ vec4 32 ssa_891 = phi block_110: ssa_890, block_111: ssa_879 vec4 32 ssa_892 = fadd ssa_44.yyyy, -ssa_854.xyyy vec4 32 ssa_893 = vec4 ssa_892.x, ssa_892.y, ssa_847.z, ssa_847.w vec4 32 ssa_894 = fmul ssa_44.zzzz, ssa_868.xxxx vec4 32 ssa_895 = fne ssa_868.xxxx, ssa_868.xxxx vec4 32 ssa_896 = bcsel ssa_895, ssa_894, ssa_8 vec4 32 ssa_897 = bcsel ssa_247, ssa_894, ssa_896 vec4 32 ssa_898 = fne ssa_894, ssa_894 vec4 32 ssa_899 = bcsel ssa_898, ssa_897, ssa_894 vec4 32 ssa_900 = fmul ssa_44.zyzy, ssa_899.xxxx vec4 32 ssa_901 = fne ssa_899.xxxx, ssa_899.xxxx vec4 32 ssa_902 = bcsel ssa_901, ssa_900, ssa_8 vec4 32 ssa_903 = bcsel ssa_237, ssa_900, ssa_902 vec4 32 ssa_904 = fne ssa_900, ssa_900 vec4 32 ssa_905 = bcsel ssa_904, ssa_903, ssa_900 vec4 32 ssa_906 = ffloor ssa_905 vec4 32 ssa_907 = fadd ssa_905.xyyy, -ssa_906.zwww vec2 32 ssa_908 = fge ssa_907.xy, ssa_44.ww vec1 32 ssa_909 = imov ssa_908.y /* succs: block_113 block_114 */ if ssa_909 { block block_113: /* preds: block_112 */ vec4 32 ssa_910 = fadd ssa_891.xxxx, ssa_892.xxxx vec4 32 ssa_911 = vec4 ssa_910.x, ssa_891.y, ssa_891.z, ssa_891.w /* succs: block_115 */ } else { block block_114: /* preds: block_112 */ /* succs: block_115 */ } block block_115: /* preds: block_113 block_114 */ vec4 32 ssa_912 = phi block_113: ssa_911, block_114: ssa_891 vec1 32 ssa_913 = imov ssa_908.x /* succs: block_116 block_117 */ if ssa_913 { block block_116: /* preds: block_115 */ vec4 32 ssa_914 = fadd ssa_912.yyyy, -ssa_892.yyyy vec4 32 ssa_915 = vec4 ssa_912.x, ssa_914.y, ssa_912.z, ssa_912.w /* succs: block_118 */ } else { block block_117: /* preds: block_115 */ /* succs: block_118 */ } block block_118: /* preds: block_116 block_117 */ vec4 32 ssa_916 = phi block_116: ssa_915, block_117: ssa_912 vec4 32 ssa_917 = fmul ssa_44.zzzz, ssa_907.xxxx vec4 32 ssa_918 = fne ssa_907.xxxx, ssa_907.xxxx vec4 32 ssa_919 = bcsel ssa_918, ssa_917, ssa_8 vec4 32 ssa_920 = bcsel ssa_247, ssa_917, ssa_919 vec4 32 ssa_921 = fne ssa_917, ssa_917 vec4 32 ssa_922 = bcsel ssa_921, ssa_920, ssa_917 vec4 32 ssa_923 = fmul ssa_44.zyzy, ssa_922.xxxx vec4 32 ssa_924 = fne ssa_922.xxxx, ssa_922.xxxx vec4 32 ssa_925 = bcsel ssa_924, ssa_923, ssa_8 vec4 32 ssa_926 = bcsel ssa_237, ssa_923, ssa_925 vec4 32 ssa_927 = fne ssa_923, ssa_923 vec4 32 ssa_928 = bcsel ssa_927, ssa_926, ssa_923 vec4 32 ssa_929 = ffloor ssa_928 vec4 32 ssa_930 = fadd ssa_928.xyyy, -ssa_929.zwww vec2 32 ssa_931 = fge ssa_930.xy, ssa_44.ww vec1 32 ssa_932 = imov ssa_931.y /* succs: block_119 block_120 */ if ssa_932 { block block_119: /* preds: block_118 */ vec4 32 ssa_933 = fadd ssa_44.yyyy, -ssa_916.xxxx vec4 32 ssa_934 = vec4 ssa_933.x, ssa_916.y, ssa_916.z, ssa_916.w /* succs: block_121 */ } else { block block_120: /* preds: block_118 */ /* succs: block_121 */ } block block_121: /* preds: block_119 block_120 */ vec4 32 ssa_935 = phi block_119: ssa_934, block_120: ssa_916 vec1 32 ssa_936 = imov ssa_931.x /* succs: block_122 block_123 */ if ssa_936 { block block_122: /* preds: block_121 */ vec4 32 ssa_937 = fadd ssa_44.yyyy, -ssa_935.yyyy vec4 32 ssa_938 = vec4 ssa_935.x, ssa_937.y, ssa_935.z, ssa_935.w /* succs: block_124 */ } else { block block_123: /* preds: block_121 */ /* succs: block_124 */ } block block_124: /* preds: block_122 block_123 */ vec4 32 ssa_939 = phi block_122: ssa_938, block_123: ssa_935 vec4 32 ssa_940 = vec4 ssa_841.x, ssa_841.y, ssa_929.z, ssa_929.w vec4 32 ssa_941 = ffloor ssa_940 vec4 32 ssa_942 = fadd ssa_841.xxxx, -ssa_941.yyyy vec4 32 ssa_943 = vec4 ssa_942.x, ssa_941.y, ssa_929.z, ssa_929.w vec2 32 ssa_944 = fge ssa_942.xx, ssa_44.ww vec1 32 ssa_945 = imov ssa_944.x /* succs: block_125 block_126 */ if ssa_945 { block block_125: /* preds: block_124 */ vec4 32 ssa_946 = fadd ssa_44.yyyy, -ssa_939.yxxx vec4 32 ssa_947 = vec4 ssa_946.x, ssa_946.y, ssa_939.z, ssa_939.w /* succs: block_127 */ } else { block block_126: /* preds: block_124 */ /* succs: block_127 */ } block block_127: /* preds: block_125 block_126 */ vec4 32 ssa_948 = phi block_125: ssa_947, block_126: ssa_939 vec4 32 ssa_949 = fadd ssa_44.yyyy, -ssa_948.yyyy vec4 32 ssa_950 = vec4 ssa_948.x, ssa_949.y, ssa_948.z, ssa_948.w /* succs: block_129 */ } else { block block_128: /* preds: block_105 */ /* succs: block_129 */ } block block_129: /* preds: block_127 block_128 */ vec4 32 ssa_951 = phi block_127: ssa_893, block_128: ssa_825 vec4 32 ssa_952 = phi block_127: ssa_943, block_128: ssa_830 vec4 32 ssa_953 = phi block_127: ssa_950, block_128: ssa_793 vec1 32 ssa_954 = flt ssa_822.y, ssa_44.y /* succs: block_130 block_134 */ if ssa_954 { block block_130: /* preds: block_129 */ vec4 32 ssa_955 = vec4 ssa_255.x, ssa_952.y, ssa_952.z, ssa_952.w vec4 32 ssa_956 = fadd ssa_41.wwww, -ssa_955 vec1 32 ssa_957 = iadd ssa_5, ssa_826.y vec1 32 ssa_958 = ishl ssa_957, ssa_37 vec1 32 ssa_959 = iadd ssa_39, ssa_958 vec4 32 ssa_960 = intrinsic load_ubo (ssa_2, ssa_959) () () vec4 32 ssa_961 = imov ssa_960.wzyx vec2 32 ssa_962 = fge ssa_956.xx, ssa_44.ww vec1 32 ssa_963 = imov ssa_962.x /* succs: block_131 block_132 */ if ssa_963 { block block_131: /* preds: block_130 */ vec4 32 ssa_964 = fmul ssa_953.xyyy, ssa_960.wzzz vec4 32 ssa_965 = fne ssa_960.wzzz, ssa_960.wzzz vec4 32 ssa_966 = bcsel ssa_965, ssa_964, ssa_8 vec4 32 ssa_967 = fne ssa_953.xyyy, ssa_953.xyyy vec4 32 ssa_968 = bcsel ssa_967, ssa_964, ssa_966 vec4 32 ssa_969 = fne ssa_964, ssa_964 vec4 32 ssa_970 = bcsel ssa_969, ssa_968, ssa_964 vec4 32 ssa_971 = fadd ssa_970, ssa_960.yxxx vec4 32 ssa_972 = fmul ssa_971.xyyy, ssa_951.zwww vec4 32 ssa_973 = fne ssa_951.zwww, ssa_951.zwww vec4 32 ssa_974 = bcsel ssa_973, ssa_972, ssa_8 vec4 32 ssa_975 = fne ssa_971.xyyy, ssa_971.xyyy vec4 32 ssa_976 = bcsel ssa_975, ssa_972, ssa_974 vec4 32 ssa_977 = fne ssa_972, ssa_972 vec4 32 ssa_978 = bcsel ssa_977, ssa_976, ssa_972 vec4 32 ssa_979 = fadd ssa_44.yyyy, -ssa_978.yyyy vec4 32 ssa_980 = vec4 ssa_978.x, ssa_979.y, ssa_806.z, ssa_806.w /* succs: block_133 */ } else { block block_132: /* preds: block_130 */ vec4 32 ssa_981 = vec4 ssa_960.w, ssa_822.y, ssa_806.z, ssa_806.w vec4 32 ssa_982 = vec4 ssa_960.y, ssa_822.y, ssa_806.z, ssa_806.w vec2 32 ssa_983 = fne ssa_44.xx, ssa_34.zw vec4 32 ssa_984 = bcsel ssa_983.xxxx, ssa_982, ssa_981 vec4 32 ssa_985 = vec4 ssa_984.x, ssa_960.z, ssa_984.z, ssa_984.w vec4 32 ssa_986 = vec4 ssa_984.x, ssa_960.x, ssa_984.z, ssa_984.w vec4 32 ssa_987 = bcsel ssa_983.yyyy, ssa_986, ssa_985 /* succs: block_133 */ } block block_133: /* preds: block_131 block_132 */ vec4 32 ssa_988 = phi block_131: ssa_980, block_132: ssa_987 vec4 32 ssa_989 = fadd ssa_44.yyyy, ssa_988.zzzz vec4 32 ssa_990 = vec4 ssa_988.x, ssa_988.y, ssa_989.z, ssa_988.w /* succs: block_144 */ } else { block block_134: /* preds: block_129 */ vec1 32 ssa_991 = flt ssa_822.x, ssa_44.y /* succs: block_135 block_142 */ if ssa_991 { block block_135: /* preds: block_134 */ vec4 32 ssa_992 = vec4 ssa_255.x, ssa_952.y, ssa_952.z, ssa_952.w vec4 32 ssa_993 = fadd ssa_41.wwww, -ssa_992 vec4 32 ssa_994 = vec4 ssa_993.x, ssa_952.y, ssa_952.z, ssa_952.w vec2 32 ssa_995 = fge ssa_993.xx, ssa_44.ww vec1 32 ssa_996 = imov ssa_995.x /* succs: block_136 block_137 */ if ssa_996 { block block_136: /* preds: block_135 */ vec4 32 ssa_997 = vec4 ssa_953.x, ssa_953.y, ssa_44.x, ssa_44.y vec1 32 ssa_998 = iadd ssa_5, ssa_826.y vec1 32 ssa_999 = ishl ssa_998, ssa_37 vec1 32 ssa_1000 = iadd ssa_39, ssa_999 vec4 32 ssa_1001 = intrinsic load_ubo (ssa_2, ssa_1000) () () vec4 32 ssa_1002 = fmul ssa_1001.wzyx, ssa_997 vec4 32 ssa_1003 = fne ssa_997, ssa_997 vec4 32 ssa_1004 = bcsel ssa_1003, ssa_1002, ssa_8 vec4 32 ssa_1005 = fne ssa_1001.wzyx, ssa_1001.wzyx vec4 32 ssa_1006 = bcsel ssa_1005, ssa_1002, ssa_1004 vec4 32 ssa_1007 = fne ssa_1002, ssa_1002 vec4 32 ssa_1008 = bcsel ssa_1007, ssa_1006, ssa_1002 vec4 32 ssa_1009 = fdot_replicated4 ssa_1008, ssa_9 vec1 32 ssa_1010 = iadd ssa_10, ssa_826.y vec1 32 ssa_1011 = ishl ssa_1010, ssa_37 vec1 32 ssa_1012 = iadd ssa_39, ssa_1011 vec4 32 ssa_1013 = intrinsic load_ubo (ssa_2, ssa_1012) () () vec4 32 ssa_1014 = fmul ssa_1013.wzyx, ssa_997 vec4 32 ssa_1015 = bcsel ssa_1003, ssa_1014, ssa_8 vec4 32 ssa_1016 = fne ssa_1013.wzyx, ssa_1013.wzyx vec4 32 ssa_1017 = bcsel ssa_1016, ssa_1014, ssa_1015 vec4 32 ssa_1018 = fne ssa_1014, ssa_1014 vec4 32 ssa_1019 = bcsel ssa_1018, ssa_1017, ssa_1014 vec4 32 ssa_1020 = fdot_replicated4 ssa_1019, ssa_9 vec4 32 ssa_1021 = vec4 ssa_1009.x, ssa_1020.x, ssa_806.z, ssa_806.w vec4 32 ssa_1022 = fmul ssa_1021.xyyy, ssa_951.zwww vec4 32 ssa_1023 = fne ssa_951.zwww, ssa_951.zwww vec4 32 ssa_1024 = bcsel ssa_1023, ssa_1022, ssa_8 vec4 32 ssa_1025 = fne ssa_1021.xyyy, ssa_1021.xyyy vec4 32 ssa_1026 = bcsel ssa_1025, ssa_1022, ssa_1024 vec4 32 ssa_1027 = fne ssa_1022, ssa_1022 vec4 32 ssa_1028 = bcsel ssa_1027, ssa_1026, ssa_1022 vec4 32 ssa_1029 = fadd ssa_44.yyyy, -ssa_1028.yyyy vec4 32 ssa_1030 = vec4 ssa_1028.x, ssa_1029.y, ssa_806.z, ssa_806.w /* succs: block_141 */ } else { block block_137: /* preds: block_135 */ vec1 32 ssa_1031 = iadd ssa_5, ssa_826.y vec1 32 ssa_1032 = ishl ssa_1031, ssa_37 vec1 32 ssa_1033 = iadd ssa_39, ssa_1032 vec4 32 ssa_1034 = intrinsic load_ubo (ssa_2, ssa_1033) () () vec1 32 ssa_1035 = iadd ssa_10, ssa_826.y vec1 32 ssa_1036 = ishl ssa_1035, ssa_37 vec1 32 ssa_1037 = iadd ssa_39, ssa_1036 vec4 32 ssa_1038 = intrinsic load_ubo (ssa_2, ssa_1037) () () vec4 32 ssa_1039 = imov ssa_1038.wzyx vec2 32 ssa_1040 = fne ssa_44.xx, ssa_34.zw vec1 32 ssa_1041 = feq ssa_44.x, ssa_34.w /* succs: block_138 block_139 */ if ssa_1041 { block block_138: /* preds: block_137 */ vec4 32 ssa_1042 = vec4 ssa_1034.w, ssa_1034.z, ssa_806.z, ssa_806.w vec4 32 ssa_1043 = vec4 ssa_1038.y, ssa_1038.x, ssa_806.z, ssa_806.w vec4 32 ssa_1044 = bcsel ssa_1040.xxxx, ssa_1043, ssa_1042 /* succs: block_140 */ } else { block block_139: /* preds: block_137 */ vec4 32 ssa_1045 = vec4 ssa_1038.w, ssa_1038.z, ssa_806.z, ssa_806.w vec4 32 ssa_1046 = vec4 ssa_1034.y, ssa_1034.x, ssa_806.z, ssa_806.w vec4 32 ssa_1047 = bcsel ssa_1040.xxxx, ssa_1046, ssa_1045 /* succs: block_140 */ } block block_140: /* preds: block_138 block_139 */ vec4 32 ssa_1048 = phi block_138: ssa_1044, block_139: ssa_1047 /* succs: block_141 */ } block block_141: /* preds: block_136 block_140 */ vec4 32 ssa_1049 = phi block_136: ssa_994, block_140: ssa_1039 vec4 32 ssa_1050 = phi block_136: ssa_997, block_140: ssa_953 vec4 32 ssa_1051 = phi block_136: ssa_1030, block_140: ssa_1048 /* succs: block_143 */ } else { block block_142: /* preds: block_134 */ vec1 32 ssa_1052 = iadd ssa_5, ssa_826.y vec1 32 ssa_1053 = ishl ssa_1052, ssa_37 vec1 32 ssa_1054 = iadd ssa_39, ssa_1053 vec4 32 ssa_1055 = intrinsic load_ubo (ssa_2, ssa_1054) () () vec4 32 ssa_1056 = fmul ssa_1055.wzyx, ssa_67 vec4 32 ssa_1057 = bcsel ssa_73, ssa_1056, ssa_8 vec4 32 ssa_1058 = fne ssa_1055.wzyx, ssa_1055.wzyx vec4 32 ssa_1059 = bcsel ssa_1058, ssa_1056, ssa_1057 vec4 32 ssa_1060 = fne ssa_1056, ssa_1056 vec4 32 ssa_1061 = bcsel ssa_1060, ssa_1059, ssa_1056 vec4 32 ssa_1062 = fdot_replicated4 ssa_1061, ssa_9 vec1 32 ssa_1063 = iadd ssa_10, ssa_826.y vec1 32 ssa_1064 = ishl ssa_1063, ssa_37 vec1 32 ssa_1065 = iadd ssa_39, ssa_1064 vec4 32 ssa_1066 = intrinsic load_ubo (ssa_2, ssa_1065) () () vec4 32 ssa_1067 = fmul ssa_1066.wzyx, ssa_67 vec4 32 ssa_1068 = bcsel ssa_73, ssa_1067, ssa_8 vec4 32 ssa_1069 = fne ssa_1066.wzyx, ssa_1066.wzyx vec4 32 ssa_1070 = bcsel ssa_1069, ssa_1067, ssa_1068 vec4 32 ssa_1071 = fne ssa_1067, ssa_1067 vec4 32 ssa_1072 = bcsel ssa_1071, ssa_1070, ssa_1067 vec4 32 ssa_1073 = fdot_replicated4 ssa_1072, ssa_9 vec4 32 ssa_1074 = vec4 ssa_1062.x, ssa_1073.x, ssa_806.z, ssa_806.w /* succs: block_143 */ } block block_143: /* preds: block_141 block_142 */ vec4 32 ssa_1075 = phi block_141: ssa_1049, block_142: ssa_952 vec4 32 ssa_1076 = phi block_141: ssa_1050, block_142: ssa_953 vec4 32 ssa_1077 = phi block_141: ssa_1051, block_142: ssa_1074 vec4 32 ssa_1078 = fadd ssa_44.zzzz, ssa_1077.zzzz vec4 32 ssa_1079 = vec4 ssa_1077.x, ssa_1077.y, ssa_1078.z, ssa_1077.w /* succs: block_144 */ } block block_144: /* preds: block_133 block_143 */ vec4 32 ssa_1080 = phi block_133: ssa_961, block_143: ssa_1075 vec4 32 ssa_1081 = phi block_133: ssa_953, block_143: ssa_1076 vec4 32 ssa_1082 = phi block_133: ssa_990, block_143: ssa_1079 vec1 32 ssa_1083 = load_const (0x00000050 /* 0.000000 */) vec1 32 ssa_1084 = intrinsic load_ubo (ssa_2, ssa_1083) () () vec1 32 ssa_1085 = ine ssa_1084, ssa_2 /* succs: block_145 block_146 */ if ssa_1085 { block block_145: /* preds: block_144 */ vec4 32 ssa_1086 = fadd ssa_44.yyyy, -ssa_1082.xxxx vec4 32 ssa_1087 = vec4 ssa_1082.y, ssa_1086.y, ssa_1082.z, ssa_1082.w /* succs: block_147 */ } else { block block_146: /* preds: block_144 */ /* succs: block_147 */ } block block_147: /* preds: block_145 block_146 */ vec4 32 ssa_1088 = phi block_145: ssa_1087, block_146: ssa_1082 vec1 32 ssa_1089 = load_const (0x00000060 /* 0.000000 */) vec1 32 ssa_1090 = intrinsic load_ubo (ssa_2, ssa_1089) () () vec1 32 ssa_1091 = ine ssa_1090, ssa_2 /* succs: block_148 block_149 */ if ssa_1091 { block block_148: /* preds: block_147 */ vec4 32 ssa_1092 = fadd ssa_44.yyyy, -ssa_1088.yxxx vec4 32 ssa_1093 = vec4 ssa_1092.x, ssa_1092.y, ssa_1088.z, ssa_1088.w /* succs: block_150 */ } else { block block_149: /* preds: block_147 */ /* succs: block_150 */ } block block_150: /* preds: block_148 block_149 */ vec4 32 ssa_1094 = phi block_148: ssa_1093, block_149: ssa_1088 vec4 32 ssa_1095 = imov ssa_1094.xyyy break /* succs: block_162 */ } else { block block_151: /* preds: block_2 */ /* succs: block_152 */ } block block_152: /* preds: block_151 */ vec1 32 ssa_1096 = ieq ssa_6, ssa_32 vec1 32 ssa_1097 = ior ssa_33, ssa_1096 /* succs: block_153 block_160 */ if ssa_1097 { block block_153: /* preds: block_152 */ vec1 32 ssa_1098 = iadd ssa_16, ssa_31.x vec1 32 ssa_1099 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1100 = ishl ssa_1098, ssa_1099 vec1 32 ssa_1101 = load_const (0x00000140 /* 0.000000 */) vec1 32 ssa_1102 = iadd ssa_1101, ssa_1100 vec4 32 ssa_1103 = intrinsic load_ubo (ssa_2, ssa_1102) () () vec4 32 ssa_1104 = fmul ssa_1103.wzzz, ssa_29.xyyy vec4 32 ssa_1105 = fne ssa_29.xyyy, ssa_29.xyyy vec4 32 ssa_1106 = bcsel ssa_1105, ssa_1104, ssa_8 vec4 32 ssa_1107 = fne ssa_1103.wzzz, ssa_1103.wzzz vec4 32 ssa_1108 = bcsel ssa_1107, ssa_1104, ssa_1106 vec4 32 ssa_1109 = fne ssa_1104, ssa_1104 vec4 32 ssa_1110 = bcsel ssa_1109, ssa_1108, ssa_1104 vec1 32 ssa_1111 = iadd ssa_14, ssa_31.x vec1 32 ssa_1112 = ishl ssa_1111, ssa_1099 vec1 32 ssa_1113 = iadd ssa_1101, ssa_1112 vec4 32 ssa_1114 = intrinsic load_ubo (ssa_2, ssa_1113) () () vec4 32 ssa_1115 = fmul ssa_1110.xyyy, ssa_28.xyyy vec4 32 ssa_1116 = fne ssa_28.xyyy, ssa_28.xyyy vec4 32 ssa_1117 = bcsel ssa_1116, ssa_1115, ssa_8 vec4 32 ssa_1118 = fne ssa_1110.xyyy, ssa_1110.xyyy vec4 32 ssa_1119 = bcsel ssa_1118, ssa_1115, ssa_1117 vec4 32 ssa_1120 = fne ssa_1115, ssa_1115 vec4 32 ssa_1121 = bcsel ssa_1120, ssa_1119, ssa_1115 vec4 32 ssa_1122 = fadd ssa_1121, ssa_28.zwww vec4 32 ssa_1123 = intrinsic load_input (ssa_2) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_1124 = fmul ssa_1114.wwww, ssa_1123.wwww vec4 32 ssa_1125 = fne ssa_1123.wwww, ssa_1123.wwww vec4 32 ssa_1126 = bcsel ssa_1125, ssa_1124, ssa_8 vec4 32 ssa_1127 = fne ssa_1114.wwww, ssa_1114.wwww vec4 32 ssa_1128 = bcsel ssa_1127, ssa_1124, ssa_1126 vec4 32 ssa_1129 = fne ssa_1124, ssa_1124 vec4 32 ssa_1130 = bcsel ssa_1129, ssa_1128, ssa_1124 vec4 32 ssa_1131 = fadd ssa_1130, ssa_1114.wwww vec4 32 ssa_1132 = fadd ssa_1103.yxxx, ssa_1122.xyyy vec4 32 ssa_1133 = fmul ssa_28.yyyy, ssa_1103.zzzz vec4 32 ssa_1134 = fne ssa_1103.zzzz, ssa_1103.zzzz vec4 32 ssa_1135 = bcsel ssa_1134, ssa_1133, ssa_8 vec4 32 ssa_1136 = fne ssa_28.yyyy, ssa_28.yyyy vec4 32 ssa_1137 = bcsel ssa_1136, ssa_1133, ssa_1135 vec4 32 ssa_1138 = fne ssa_1133, ssa_1133 vec4 32 ssa_1139 = bcsel ssa_1138, ssa_1137, ssa_1133 vec4 32 ssa_1140 = fadd ssa_1139, -ssa_28.yyyy vec4 32 ssa_1141 = fadd ssa_1132.xxxx, ssa_1131.xxxx vec4 32 ssa_1142 = fadd ssa_1132.yyyy, ssa_1140.yyyy vec4 32 ssa_1143 = vec4 ssa_1141.x, ssa_1142.y, ssa_29.z, ssa_29.w vec1 32 ssa_1144 = iadd ssa_5, ssa_31.x vec1 32 ssa_1145 = ishl ssa_1144, ssa_1099 vec1 32 ssa_1146 = iadd ssa_1101, ssa_1145 vec4 32 ssa_1147 = intrinsic load_ubo (ssa_2, ssa_1146) () () vec4 32 ssa_1148 = fmul ssa_1147.wzyx, ssa_1143 vec4 32 ssa_1149 = fne ssa_1143, ssa_1143 vec4 32 ssa_1150 = bcsel ssa_1149, ssa_1148, ssa_8 vec4 32 ssa_1151 = fne ssa_1147.wzyx, ssa_1147.wzyx vec4 32 ssa_1152 = bcsel ssa_1151, ssa_1148, ssa_1150 vec4 32 ssa_1153 = fne ssa_1148, ssa_1148 vec4 32 ssa_1154 = bcsel ssa_1153, ssa_1152, ssa_1148 vec4 32 ssa_1155 = fdot_replicated4 ssa_1154, ssa_9 vec1 32 ssa_1156 = iadd ssa_10, ssa_31.x vec1 32 ssa_1157 = ishl ssa_1156, ssa_1099 vec1 32 ssa_1158 = iadd ssa_1101, ssa_1157 vec4 32 ssa_1159 = intrinsic load_ubo (ssa_2, ssa_1158) () () vec4 32 ssa_1160 = fmul ssa_1159.wzyx, ssa_1143 vec4 32 ssa_1161 = bcsel ssa_1149, ssa_1160, ssa_8 vec4 32 ssa_1162 = fne ssa_1159.wzyx, ssa_1159.wzyx vec4 32 ssa_1163 = bcsel ssa_1162, ssa_1160, ssa_1161 vec4 32 ssa_1164 = fne ssa_1160, ssa_1160 vec4 32 ssa_1165 = bcsel ssa_1164, ssa_1163, ssa_1160 vec4 32 ssa_1166 = fdot_replicated4 ssa_1165, ssa_9 vec1 32 ssa_1167 = iadd ssa_11, ssa_31.x vec1 32 ssa_1168 = ishl ssa_1167, ssa_1099 vec1 32 ssa_1169 = iadd ssa_1101, ssa_1168 vec4 32 ssa_1170 = intrinsic load_ubo (ssa_2, ssa_1169) () () vec4 32 ssa_1171 = fmul ssa_1170.wzyx, ssa_1143 vec4 32 ssa_1172 = bcsel ssa_1149, ssa_1171, ssa_8 vec4 32 ssa_1173 = fne ssa_1170.wzyx, ssa_1170.wzyx vec4 32 ssa_1174 = bcsel ssa_1173, ssa_1171, ssa_1172 vec4 32 ssa_1175 = fne ssa_1171, ssa_1171 vec4 32 ssa_1176 = bcsel ssa_1175, ssa_1174, ssa_1171 vec4 32 ssa_1177 = fdot_replicated4 ssa_1176, ssa_9 vec4 32 ssa_1178 = vec4 ssa_1155.x, ssa_1166.x, ssa_1177.x, ssa_29.w vec1 32 ssa_1179 = load_const (0x00000180 /* 0.000000 */) vec4 32 ssa_1180 = intrinsic load_ubo (ssa_2, ssa_1179) () () vec4 32 ssa_1181 = fadd -ssa_1170.xxxx, ssa_1180.yyyy vec1 32 ssa_1182 = load_const (0x00000190 /* 0.000000 */) vec4 32 ssa_1183 = intrinsic load_ubo (ssa_2, ssa_1182) () () vec1 32 ssa_1184 = flt ssa_1183.x, ssa_1181.z vec1 32 ssa_1185 = fne ssa_1183.x, ssa_1180.w vec2 32 ssa_1186 = vec2 ssa_1185, ssa_1184 vec2 32 ssa_1187 = inot ssa_1186 vec1 32 ssa_1188 = ball_iequal2 ssa_1187, ssa_12 /* succs: block_154 block_155 */ if ssa_1188 { block block_154: /* preds: block_153 */ vec1 32 ssa_1189 = frcp ssa_1181.z vec4 32 ssa_1190 = fadd ssa_1155.xxxx, ssa_1180.wwww vec4 32 ssa_1191 = fmul ssa_1180.zzzz, ssa_1189.xxxx vec4 32 ssa_1192 = fmov -ssa_1191 vec4 32 ssa_1193 = fne ssa_1189.xxxx, ssa_1189.xxxx vec4 32 ssa_1194 = bcsel ssa_1193, ssa_1192, ssa_8 vec4 32 ssa_1195 = fne -ssa_1180.zzzz, -ssa_1180.zzzz vec4 32 ssa_1196 = bcsel ssa_1195, ssa_1192, ssa_1194 vec4 32 ssa_1197 = fne -ssa_1191, -ssa_1191 vec4 32 ssa_1198 = bcsel ssa_1197, ssa_1196, ssa_1192 vec4 32 ssa_1199 = fadd ssa_1198, ssa_1190.xxxx vec4 32 ssa_1200 = vec4 ssa_1199.x, ssa_1166.x, ssa_1177.x, ssa_29.w /* succs: block_156 */ } else { block block_155: /* preds: block_153 */ /* succs: block_156 */ } block block_156: /* preds: block_154 block_155 */ vec4 32 ssa_1201 = phi block_154: ssa_1200, block_155: ssa_1178 vec4 32 ssa_1202 = intrinsic load_ubo (ssa_2, ssa_1101) () () vec4 32 ssa_1203 = fmul ssa_1202.wzyx, ssa_1201 vec4 32 ssa_1204 = fne ssa_1201, ssa_1201 vec4 32 ssa_1205 = bcsel ssa_1204, ssa_1203, ssa_8 vec4 32 ssa_1206 = fne ssa_1202.wzyx, ssa_1202.wzyx vec4 32 ssa_1207 = bcsel ssa_1206, ssa_1203, ssa_1205 vec4 32 ssa_1208 = fne ssa_1203, ssa_1203 vec4 32 ssa_1209 = bcsel ssa_1208, ssa_1207, ssa_1203 vec4 32 ssa_1210 = fdot_replicated4 ssa_1209, ssa_9 vec1 32 ssa_1211 = load_const (0x00000150 /* 0.000000 */) vec4 32 ssa_1212 = intrinsic load_ubo (ssa_2, ssa_1211) () () vec4 32 ssa_1213 = fmul ssa_1212.wzyx, ssa_1201 vec4 32 ssa_1214 = bcsel ssa_1204, ssa_1213, ssa_8 vec4 32 ssa_1215 = fne ssa_1212.wzyx, ssa_1212.wzyx vec4 32 ssa_1216 = bcsel ssa_1215, ssa_1213, ssa_1214 vec4 32 ssa_1217 = fne ssa_1213, ssa_1213 vec4 32 ssa_1218 = bcsel ssa_1217, ssa_1216, ssa_1213 vec4 32 ssa_1219 = fdot_replicated4 ssa_1218, ssa_9 vec1 32 ssa_1220 = load_const (0x00000160 /* 0.000000 */) vec4 32 ssa_1221 = intrinsic load_ubo (ssa_2, ssa_1220) () () vec4 32 ssa_1222 = fmul ssa_1221.wzyx, ssa_1201 vec4 32 ssa_1223 = bcsel ssa_1204, ssa_1222, ssa_8 vec4 32 ssa_1224 = fne ssa_1221.wzyx, ssa_1221.wzyx vec4 32 ssa_1225 = bcsel ssa_1224, ssa_1222, ssa_1223 vec4 32 ssa_1226 = fne ssa_1222, ssa_1222 vec4 32 ssa_1227 = bcsel ssa_1226, ssa_1225, ssa_1222 vec4 32 ssa_1228 = fdot_replicated4 ssa_1227, ssa_9 vec1 32 ssa_1229 = load_const (0x00000170 /* 0.000000 */) vec4 32 ssa_1230 = intrinsic load_ubo (ssa_2, ssa_1229) () () vec4 32 ssa_1231 = fmul ssa_1230.wzyx, ssa_1201 vec4 32 ssa_1232 = bcsel ssa_1204, ssa_1231, ssa_8 vec4 32 ssa_1233 = fne ssa_1230.wzyx, ssa_1230.wzyx vec4 32 ssa_1234 = bcsel ssa_1233, ssa_1231, ssa_1232 vec4 32 ssa_1235 = fne ssa_1231, ssa_1231 vec4 32 ssa_1236 = bcsel ssa_1235, ssa_1234, ssa_1231 vec4 32 ssa_1237 = fdot_replicated4 ssa_1236, ssa_9 vec4 32 ssa_1238 = vec4 ssa_1210.x, ssa_1219.x, ssa_1228.x, ssa_1237.x vec1 32 ssa_1239 = intrinsic load_ubo (ssa_2, ssa_5) () () vec1 32 ssa_1240 = ine ssa_1239, ssa_2 /* succs: block_157 block_158 */ if ssa_1240 { block block_157: /* preds: block_156 */ vec4 32 ssa_1241 = fmax -ssa_1123.zwzw, ssa_1123.zwzw vec4 32 ssa_1242 = f2i32 ssa_30.zxxx vec3 32 ssa_1243 = vec3 ssa_1242.x, ssa_1242.y, ssa_31.z vec1 32 ssa_1244 = iadd ssa_5, ssa_1242.x vec1 32 ssa_1245 = ishl ssa_1244, ssa_1099 vec1 32 ssa_1246 = iadd ssa_1101, ssa_1245 vec4 32 ssa_1247 = intrinsic load_ubo (ssa_2, ssa_1246) () () vec4 32 ssa_1248 = fmul ssa_1247.yxxx, ssa_1241 vec4 32 ssa_1249 = fne ssa_1241, ssa_1241 vec4 32 ssa_1250 = bcsel ssa_1249, ssa_1248, ssa_8 vec4 32 ssa_1251 = fne ssa_1247.yxxx, ssa_1247.yxxx vec4 32 ssa_1252 = bcsel ssa_1251, ssa_1248, ssa_1250 vec4 32 ssa_1253 = fne ssa_1248, ssa_1248 vec4 32 ssa_1254 = bcsel ssa_1253, ssa_1252, ssa_1248 vec4 32 ssa_1255 = flt ssa_1241, ssa_1183.yyyy vec4 32 ssa_1256 = b2f ssa_1255 vec4 32 ssa_1257 = vec4 ssa_1241.x, ssa_1241.y, ssa_1256.z, ssa_1256.w vec1 32 ssa_1258 = iadd ssa_17, ssa_1242.y vec1 32 ssa_1259 = ishl ssa_1258, ssa_1099 vec1 32 ssa_1260 = iadd ssa_1101, ssa_1259 vec4 32 ssa_1261 = intrinsic load_ubo (ssa_2, ssa_1260) () () vec4 32 ssa_1262 = imov ssa_1261.wzyx vec4 32 ssa_1263 = fmul ssa_1256.zwww, ssa_1247.wzzz vec4 32 ssa_1264 = fne ssa_1247.wzzz, ssa_1247.wzzz vec4 32 ssa_1265 = bcsel ssa_1264, ssa_1263, ssa_8 vec4 32 ssa_1266 = fne ssa_1256.zwww, ssa_1256.zwww vec4 32 ssa_1267 = bcsel ssa_1266, ssa_1263, ssa_1265 vec4 32 ssa_1268 = fne ssa_1263, ssa_1263 vec4 32 ssa_1269 = bcsel ssa_1268, ssa_1267, ssa_1263 vec4 32 ssa_1270 = fadd ssa_1269, ssa_1254.xyyy vec1 32 ssa_1271 = iadd ssa_18, ssa_1242.y vec1 32 ssa_1272 = ishl ssa_1271, ssa_1099 vec1 32 ssa_1273 = iadd ssa_1101, ssa_1272 vec4 32 ssa_1274 = intrinsic load_ubo (ssa_2, ssa_1273) () () vec4 32 ssa_1275 = imov ssa_1274.wzyx vec4 32 ssa_1276 = fadd ssa_1183.yyyy, -ssa_1270.yyyy vec4 32 ssa_1277 = vec4 ssa_1270.x, ssa_1276.y, ssa_27.z, ssa_27.w /* succs: block_159 */ } else { block block_158: /* preds: block_156 */ vec4 32 ssa_1278 = fmax -ssa_1123.zwzw, ssa_1123.zwzw vec4 32 ssa_1279 = f2i32 ssa_30.zwww vec3 32 ssa_1280 = vec3 ssa_1279.x, ssa_1279.y, ssa_31.z vec1 32 ssa_1281 = iadd ssa_5, ssa_1279.x vec1 32 ssa_1282 = ishl ssa_1281, ssa_1099 vec1 32 ssa_1283 = iadd ssa_1101, ssa_1282 vec4 32 ssa_1284 = intrinsic load_ubo (ssa_2, ssa_1283) () () vec4 32 ssa_1285 = fmul ssa_1284.yxxx, ssa_1278 vec4 32 ssa_1286 = fne ssa_1278, ssa_1278 vec4 32 ssa_1287 = bcsel ssa_1286, ssa_1285, ssa_8 vec4 32 ssa_1288 = fne ssa_1284.yxxx, ssa_1284.yxxx vec4 32 ssa_1289 = bcsel ssa_1288, ssa_1285, ssa_1287 vec4 32 ssa_1290 = fne ssa_1285, ssa_1285 vec4 32 ssa_1291 = bcsel ssa_1290, ssa_1289, ssa_1285 vec4 32 ssa_1292 = flt ssa_1278, ssa_1183.yyyy vec4 32 ssa_1293 = b2f ssa_1292 vec4 32 ssa_1294 = vec4 ssa_1278.x, ssa_1278.y, ssa_1293.z, ssa_1293.w vec1 32 ssa_1295 = iadd ssa_5, ssa_1279.y vec1 32 ssa_1296 = ishl ssa_1295, ssa_1099 vec1 32 ssa_1297 = iadd ssa_1101, ssa_1296 vec4 32 ssa_1298 = intrinsic load_ubo (ssa_2, ssa_1297) () () vec4 32 ssa_1299 = imov ssa_1298.wzyx vec4 32 ssa_1300 = fmul ssa_1293.zwww, ssa_1284.wzzz vec4 32 ssa_1301 = fne ssa_1284.wzzz, ssa_1284.wzzz vec4 32 ssa_1302 = bcsel ssa_1301, ssa_1300, ssa_8 vec4 32 ssa_1303 = fne ssa_1293.zwww, ssa_1293.zwww vec4 32 ssa_1304 = bcsel ssa_1303, ssa_1300, ssa_1302 vec4 32 ssa_1305 = fne ssa_1300, ssa_1300 vec4 32 ssa_1306 = bcsel ssa_1305, ssa_1304, ssa_1300 vec4 32 ssa_1307 = fadd ssa_1306, ssa_1291.xyyy vec1 32 ssa_1308 = iadd ssa_10, ssa_1279.y vec1 32 ssa_1309 = ishl ssa_1308, ssa_1099 vec1 32 ssa_1310 = iadd ssa_1101, ssa_1309 vec4 32 ssa_1311 = intrinsic load_ubo (ssa_2, ssa_1310) () () vec4 32 ssa_1312 = imov ssa_1311.wzyx vec4 32 ssa_1313 = fadd ssa_1183.yyyy, -ssa_1307.yyyy vec4 32 ssa_1314 = vec4 ssa_1307.x, ssa_1313.y, ssa_27.z, ssa_27.w /* succs: block_159 */ } block block_159: /* preds: block_157 block_158 */ vec4 32 ssa_1315 = phi block_157: ssa_1257, block_158: ssa_1294 vec4 32 ssa_1316 = phi block_157: ssa_1277, block_158: ssa_1314 vec4 32 ssa_1317 = phi block_157: ssa_1275, block_158: ssa_1312 vec4 32 ssa_1318 = phi block_157: ssa_1262, block_158: ssa_1299 vec3 32 ssa_1319 = phi block_157: ssa_1243, block_158: ssa_1280 vec4 32 ssa_1320 = fadd ssa_1317, -ssa_1318 vec4 32 ssa_1321 = fmul ssa_1320, ssa_1315.yyyy vec4 32 ssa_1322 = fne ssa_1315.yyyy, ssa_1315.yyyy vec4 32 ssa_1323 = bcsel ssa_1322, ssa_1321, ssa_8 vec4 32 ssa_1324 = fne ssa_1320, ssa_1320 vec4 32 ssa_1325 = bcsel ssa_1324, ssa_1321, ssa_1323 vec4 32 ssa_1326 = fne ssa_1321, ssa_1321 vec4 32 ssa_1327 = bcsel ssa_1326, ssa_1325, ssa_1321 vec4 32 ssa_1328 = fadd ssa_1327, ssa_1318 break /* succs: block_162 */ } else { block block_160: /* preds: block_152 */ /* succs: block_161 */ } block block_161: /* preds: block_160 */ break /* succs: block_162 */ } block block_162: /* preds: block_4 block_150 block_159 block_161 */ vec1 32 ssa_1329 = phi block_4: ssa_19, block_150: ssa_15, block_159: ssa_15, block_161: ssa_15 vec4 32 ssa_1330 = phi block_4: ssa_20, block_150: ssa_1095, block_159: ssa_1316, block_161: ssa_20 vec4 32 ssa_1331 = phi block_4: ssa_21, block_150: ssa_807, block_159: ssa_1316, block_161: ssa_21 vec4 32 ssa_1332 = phi block_4: ssa_22, block_150: ssa_520, block_159: ssa_1316, block_161: ssa_22 vec4 32 ssa_1333 = phi block_4: ssa_23, block_150: ssa_231, block_159: ssa_1328, block_161: ssa_23 vec4 32 ssa_1334 = phi block_4: ssa_24, block_150: ssa_163, block_159: ssa_1238, block_161: ssa_24 vec4 32 ssa_1335 = phi block_4: ssa_25, block_150: ssa_1080, block_159: ssa_25, block_161: ssa_25 vec4 32 ssa_1336 = phi block_4: ssa_26, block_150: ssa_1081, block_159: ssa_26, block_161: ssa_26 vec4 32 ssa_1337 = phi block_4: ssa_27, block_150: ssa_822, block_159: ssa_1316, block_161: ssa_27 vec4 32 ssa_1338 = phi block_4: ssa_52, block_150: ssa_52, block_159: ssa_28, block_161: ssa_28 vec4 32 ssa_1339 = phi block_4: ssa_45, block_150: ssa_67, block_159: ssa_1143, block_161: ssa_29 vec4 32 ssa_1340 = phi block_4: ssa_42, block_150: ssa_42, block_159: ssa_30, block_161: ssa_30 vec3 32 ssa_1341 = phi block_4: ssa_47, block_150: ssa_827, block_159: ssa_1319, block_161: ssa_31 vec1 32 ssa_1342 = phi block_4: ssa_6, block_150: ssa_32, block_159: ssa_32, block_161: ssa_32 /* succs: block_163 block_164 */ if ssa_1329 { block block_163: /* preds: block_162 */ break /* succs: block_166 */ } else { block block_164: /* preds: block_162 */ /* succs: block_165 */ } block block_165: /* preds: block_164 */ /* succs: block_1 */ } block block_166: /* preds: block_163 */ intrinsic store_output (ssa_1334, ssa_2) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr0 */ intrinsic store_output (ssa_1333, ssa_2) () (32, 15, 0) /* base=32 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr1 */ intrinsic store_output (ssa_1332, ssa_2) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr2 */ intrinsic store_output (ssa_1331, ssa_2) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr3 */ intrinsic store_output (ssa_1330, ssa_2) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr4 */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX name: GLSL18 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE pica_uniforms uniforms (0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 vs_in_reg0 (VERT_ATTRIB_GENERIC0, 16, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr0 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr1 (VARYING_SLOT_VAR1, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr2 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr3 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_out_attr4 (VARYING_SLOT_VAR4, 35, 0) decl_function main returning void impl main { decl_reg vec1 32 r0 decl_reg vec4 32 r1 decl_reg vec4 32 r2 decl_reg vec4 32 r3 decl_reg vec4 32 r4 decl_reg vec4 32 r5 decl_reg vec4 32 r6 decl_reg vec4 32 r7 decl_reg vec4 32 r8 decl_reg vec4 32 r9 decl_reg vec4 32 r10 decl_reg vec4 32 r11 decl_reg vec3 32 r12 decl_reg vec1 32 r13 decl_reg vec4 32 r14 decl_reg vec3 32 r15 decl_reg vec4 32 r16 decl_reg vec4 32 r17 decl_reg vec4 32 r18 decl_reg vec4 32 r19 decl_reg vec4 32 r20 decl_reg vec4 32 r21 decl_reg vec4 32 r22 decl_reg vec4 32 r23 decl_reg vec4 32 r24 decl_reg vec4 32 r25 decl_reg vec4 32 r26 decl_reg vec4 32 r27 decl_reg vec4 32 r28 decl_reg vec4 32 r29 decl_reg vec4 32 r30 decl_reg vec4 32 r31 decl_reg vec4 32 r32 decl_reg vec4 32 r33 decl_reg vec4 32 r34 decl_reg vec4 32 r35 decl_reg vec4 32 r36 decl_reg vec4 32 r37 decl_reg vec4 32 r38 decl_reg vec4 32 r39 decl_reg vec2 32 r40 decl_reg vec4 32 r41 decl_reg vec4 32 r42 decl_reg vec4 32 r43 decl_reg vec4 32 r44 decl_reg vec4 32 r45 decl_reg vec4 32 r46 decl_reg vec4 32 r47 decl_reg vec4 32 r48 decl_reg vec4 32 r49 decl_reg vec4 32 r50 decl_reg vec4 32 r51 decl_reg vec4 32 r52 decl_reg vec4 32 r53 decl_reg vec4 32 r54 decl_reg vec4 32 r55 decl_reg vec4 32 r56 decl_reg vec4 32 r57 decl_reg vec4 32 r58 decl_reg vec4 32 r59 decl_reg vec4 32 r60 decl_reg vec4 32 r61 decl_reg vec4 32 r62 decl_reg vec4 32 r63 decl_reg vec4 32 r64 decl_reg vec4 32 r65 decl_reg vec4 32 r66 decl_reg vec4 32 r67 decl_reg vec4 32 r68 decl_reg vec4 32 r69 decl_reg vec4 32 r70 decl_reg vec4 32 r71 decl_reg vec4 32 r72 decl_reg vec4 32 r73 decl_reg vec4 32 r74 decl_reg vec4 32 r75 decl_reg vec4 32 r76 decl_reg vec4 32 r77 decl_reg vec4 32 r78 decl_reg vec4 32 r79 decl_reg vec4 32 r80 decl_reg vec4 32 r81 decl_reg vec4 32 r82 decl_reg vec4 32 r83 decl_reg vec4 32 r84 decl_reg vec4 32 r85 decl_reg vec4 32 r86 decl_reg vec4 32 r87 decl_reg vec4 32 r88 decl_reg vec4 32 r89 decl_reg vec4 32 r90 decl_reg vec2 32 r91 block block_0: /* preds: */ vec3 32 ssa_0 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_1 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_2 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x0000006d /* 0.000000 */) vec1 32 ssa_4 = load_const (0x00000009 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_6 = load_const (0x000000bf /* 0.000000 */) vec1 32 ssa_7 = load_const (0x0000001f /* 0.000000 */) vec4 32 ssa_8 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_9 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec1 32 ssa_10 = load_const (0x00000021 /* 0.000000 */) vec1 32 ssa_11 = load_const (0x00000022 /* 0.000000 */) vec2 32 ssa_12 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_13 = load_const (0xffffffff /* -nan */, 0xffffffff /* -nan */) vec1 32 ssa_14 = load_const (0x00000023 /* 0.000000 */) vec1 32 ssa_15 = load_const (0xffffffff /* -nan */) vec1 32 ssa_16 = load_const (0x00000024 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000025 /* 0.000000 */) vec1 32 ssa_18 = load_const (0x00000026 /* 0.000000 */) r13 = imov ssa_3 r12 = imov ssa_0 r11 = imov ssa_1 r10 = imov r11 r9 = imov r10 r8 = imov r9 r7 = imov r8 r6 = imov r7 r5 = imov r6 r4 = imov r5 r3 = imov r4 r2 = imov r3 r1 = imov r2 r0 = imov ssa_2 /* succs: block_1 */ loop { block block_1: /* preds: block_0 block_165 */ /* succs: block_2 */ loop { block block_2: /* preds: block_1 */ vec1 32 ssa_33 = ieq ssa_3, r13 /* succs: block_3 block_151 */ if ssa_33 { block block_3: /* preds: block_2 */ vec4 32 ssa_34 = intrinsic load_input (ssa_2) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_35 = f2i32 ssa_34.xxxx vec1 32 ssa_36 = iadd ssa_4, ssa_35.x vec1 32 ssa_37 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_38 = ishl ssa_36, ssa_37 vec1 32 ssa_39 = load_const (0x00000140 /* 0.000000 */) vec1 32 ssa_40 = iadd ssa_39, ssa_38 vec4 32 ssa_41 = intrinsic load_ubo (ssa_2, ssa_40) () () r11 = imov ssa_41.wzyx vec1 32 ssa_43 = load_const (0x00000190 /* 0.000000 */) vec4 32 ssa_44 = intrinsic load_ubo (ssa_2, ssa_43) () () r14.xy = imov ssa_34.zw r14.zw = imov ssa_44.xy vec4 32 ssa_46 = f2i32 ssa_41.wzzz r15.xy = imov ssa_46.xy r15.z = imov r12.z vec1 32 ssa_48 = iadd ssa_5, ssa_46.y vec1 32 ssa_49 = ishl ssa_48, ssa_37 vec1 32 ssa_50 = iadd ssa_39, ssa_49 vec4 32 ssa_51 = intrinsic load_ubo (ssa_2, ssa_50) () () r9 = imov ssa_51.wzyx vec1 32 ssa_53 = intrinsic load_ubo (ssa_2, ssa_2) () () vec1 32 ssa_54 = ine ssa_53, ssa_2 /* succs: block_4 block_5 */ if ssa_54 { block block_4: /* preds: block_3 */ r13 = imov ssa_6 break /* succs: block_162 */ } else { block block_5: /* preds: block_3 */ /* succs: block_6 */ } block block_6: /* preds: block_5 */ vec1 32 ssa_55 = iadd ssa_7, ssa_46.x vec1 32 ssa_56 = ishl ssa_55, ssa_37 vec1 32 ssa_57 = iadd ssa_39, ssa_56 vec4 32 ssa_58 = intrinsic load_ubo (ssa_2, ssa_57) () () vec4 32 ssa_59 = fmul ssa_34.zwww, ssa_51.wzzz vec4 32 ssa_60 = fne ssa_51.wzzz, ssa_51.wzzz vec4 32 ssa_61 = bcsel ssa_60, ssa_59, ssa_8 vec4 32 ssa_62 = fne ssa_34.zwww, ssa_34.zwww vec4 32 ssa_63 = bcsel ssa_62, ssa_59, ssa_61 vec4 32 ssa_64 = fne ssa_59, ssa_59 vec4 32 ssa_65 = bcsel ssa_64, ssa_63, ssa_59 vec4 32 ssa_66 = fadd ssa_65, ssa_51.yxxx r14.xy = imov ssa_66.xy r14.zw = imov ssa_44.xy vec1 32 ssa_68 = iadd ssa_5, ssa_46.x vec1 32 ssa_69 = ishl ssa_68, ssa_37 vec1 32 ssa_70 = iadd ssa_39, ssa_69 vec4 32 ssa_71 = intrinsic load_ubo (ssa_2, ssa_70) () () vec4 32 ssa_72 = fmul ssa_71.wzyx, r14 vec4 32 ssa_73 = fne r14, r14 vec4 32 ssa_74 = bcsel ssa_73, ssa_72, ssa_8 vec4 32 ssa_75 = fne ssa_71.wzyx, ssa_71.wzyx vec4 32 ssa_76 = bcsel ssa_75, ssa_72, ssa_74 vec4 32 ssa_77 = fne ssa_72, ssa_72 vec4 32 ssa_78 = bcsel ssa_77, ssa_76, ssa_72 vec4 32 ssa_79 = fdot_replicated4 ssa_78, ssa_9 vec1 32 ssa_80 = iadd ssa_10, ssa_46.x vec1 32 ssa_81 = ishl ssa_80, ssa_37 vec1 32 ssa_82 = iadd ssa_39, ssa_81 vec4 32 ssa_83 = intrinsic load_ubo (ssa_2, ssa_82) () () vec4 32 ssa_84 = fmul ssa_83.wzyx, r14 vec4 32 ssa_85 = bcsel ssa_73, ssa_84, ssa_8 vec4 32 ssa_86 = fne ssa_83.wzyx, ssa_83.wzyx vec4 32 ssa_87 = bcsel ssa_86, ssa_84, ssa_85 vec4 32 ssa_88 = fne ssa_84, ssa_84 vec4 32 ssa_89 = bcsel ssa_88, ssa_87, ssa_84 vec4 32 ssa_90 = fdot_replicated4 ssa_89, ssa_9 vec1 32 ssa_91 = iadd ssa_11, ssa_46.x vec1 32 ssa_92 = ishl ssa_91, ssa_37 vec1 32 ssa_93 = iadd ssa_39, ssa_92 vec4 32 ssa_94 = intrinsic load_ubo (ssa_2, ssa_93) () () vec4 32 ssa_95 = fmul ssa_94.wzyx, r14 vec4 32 ssa_96 = bcsel ssa_73, ssa_95, ssa_8 vec4 32 ssa_97 = fne ssa_94.wzyx, ssa_94.wzyx vec4 32 ssa_98 = bcsel ssa_97, ssa_95, ssa_96 vec4 32 ssa_99 = fne ssa_95, ssa_95 vec4 32 ssa_100 = bcsel ssa_99, ssa_98, ssa_95 vec4 32 ssa_101 = fdot_replicated4 ssa_100, ssa_9 r16.x = imov ssa_79.x r16.y = imov ssa_90.x r16.z = imov ssa_101.x r16.w = imov ssa_44.y vec1 32 ssa_103 = load_const (0x00000180 /* 0.000000 */) vec4 32 ssa_104 = intrinsic load_ubo (ssa_2, ssa_103) () () vec4 32 ssa_105 = fadd -ssa_94.xxxx, ssa_104.yyyy r17.xyw = imov ssa_104.wzx r17.z = imov ssa_105.z vec1 32 ssa_107 = flt ssa_44.x, ssa_105.z vec1 32 ssa_108 = fne ssa_44.x, ssa_104.w r40.x = imov ssa_108 r40.y = imov ssa_107.x vec2 32 ssa_110 = inot r40 vec1 32 ssa_111 = ball_iequal2 ssa_110, ssa_12 /* succs: block_7 block_8 */ if ssa_111 { block block_7: /* preds: block_6 */ vec1 32 ssa_112 = frcp ssa_105.z r17.xyw = imov ssa_104.wzx r17.z = imov ssa_112.x vec4 32 ssa_114 = fadd ssa_79.xxxx, ssa_104.wwww vec4 32 ssa_115 = fmul ssa_104.zzzz, ssa_112.xxxx vec4 32 ssa_116 = fmov -ssa_115 vec4 32 ssa_117 = fne ssa_112.xxxx, ssa_112.xxxx vec4 32 ssa_118 = bcsel ssa_117, ssa_116, ssa_8 vec4 32 ssa_119 = fne -ssa_104.zzzz, -ssa_104.zzzz vec4 32 ssa_120 = bcsel ssa_119, ssa_116, ssa_118 vec4 32 ssa_121 = fne -ssa_115, -ssa_115 vec4 32 ssa_122 = bcsel ssa_121, ssa_120, ssa_116 vec4 32 ssa_123 = fadd ssa_122, ssa_114.xxxx r16.x = imov ssa_123.x r16.y = imov ssa_90.x r16.z = imov ssa_101.x r16.w = imov ssa_44.y /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ vec4 32 ssa_127 = intrinsic load_ubo (ssa_2, ssa_39) () () vec4 32 ssa_128 = fmul ssa_127.wzyx, r16 vec4 32 ssa_129 = fne r16, r16 vec4 32 ssa_130 = bcsel ssa_129, ssa_128, ssa_8 vec4 32 ssa_131 = fne ssa_127.wzyx, ssa_127.wzyx vec4 32 ssa_132 = bcsel ssa_131, ssa_128, ssa_130 vec4 32 ssa_133 = fne ssa_128, ssa_128 vec4 32 ssa_134 = bcsel ssa_133, ssa_132, ssa_128 vec4 32 ssa_135 = fdot_replicated4 ssa_134, ssa_9 vec1 32 ssa_136 = load_const (0x00000150 /* 0.000000 */) vec4 32 ssa_137 = intrinsic load_ubo (ssa_2, ssa_136) () () vec4 32 ssa_138 = fmul ssa_137.wzyx, r16 vec4 32 ssa_139 = bcsel ssa_129, ssa_138, ssa_8 vec4 32 ssa_140 = fne ssa_137.wzyx, ssa_137.wzyx vec4 32 ssa_141 = bcsel ssa_140, ssa_138, ssa_139 vec4 32 ssa_142 = fne ssa_138, ssa_138 vec4 32 ssa_143 = bcsel ssa_142, ssa_141, ssa_138 vec4 32 ssa_144 = fdot_replicated4 ssa_143, ssa_9 vec1 32 ssa_145 = load_const (0x00000160 /* 0.000000 */) vec4 32 ssa_146 = intrinsic load_ubo (ssa_2, ssa_145) () () vec4 32 ssa_147 = fmul ssa_146.wzyx, r16 vec4 32 ssa_148 = bcsel ssa_129, ssa_147, ssa_8 vec4 32 ssa_149 = fne ssa_146.wzyx, ssa_146.wzyx vec4 32 ssa_150 = bcsel ssa_149, ssa_147, ssa_148 vec4 32 ssa_151 = fne ssa_147, ssa_147 vec4 32 ssa_152 = bcsel ssa_151, ssa_150, ssa_147 vec4 32 ssa_153 = fdot_replicated4 ssa_152, ssa_9 vec1 32 ssa_154 = load_const (0x00000170 /* 0.000000 */) vec4 32 ssa_155 = intrinsic load_ubo (ssa_2, ssa_154) () () vec4 32 ssa_156 = fmul ssa_155.wzyx, r16 vec4 32 ssa_157 = bcsel ssa_129, ssa_156, ssa_8 vec4 32 ssa_158 = fne ssa_155.wzyx, ssa_155.wzyx vec4 32 ssa_159 = bcsel ssa_158, ssa_156, ssa_157 vec4 32 ssa_160 = fne ssa_156, ssa_156 vec4 32 ssa_161 = bcsel ssa_160, ssa_159, ssa_156 vec4 32 ssa_162 = fdot_replicated4 ssa_161, ssa_9 r5.x = imov ssa_135.x r5.y = imov ssa_144.x r5.z = imov ssa_153.x r5.w = imov ssa_162.x vec2 32 ssa_164 = fge ssa_44.yy, ssa_41.xx vec1 32 ssa_165 = ball_iequal2 ssa_164, ssa_13 /* succs: block_10 block_11 */ if ssa_165 { block block_10: /* preds: block_9 */ r4.xyz = imov ssa_44.yyy r4.w = imov ssa_41.x /* succs: block_15 */ } else { block block_11: /* preds: block_9 */ vec4 32 ssa_167 = f2i32 ssa_41.xxxx r15.x = imov ssa_46.x r15.y = imov ssa_167.y r15.z = imov r12.z vec1 32 ssa_169 = iadd ssa_5, ssa_167.y vec1 32 ssa_170 = ishl ssa_169, ssa_37 vec1 32 ssa_171 = iadd ssa_39, ssa_170 vec4 32 ssa_172 = intrinsic load_ubo (ssa_2, ssa_171) () () vec1 32 ssa_173 = iadd ssa_10, ssa_167.y vec1 32 ssa_174 = ishl ssa_173, ssa_37 vec1 32 ssa_175 = iadd ssa_39, ssa_174 vec4 32 ssa_176 = intrinsic load_ubo (ssa_2, ssa_175) () () vec1 32 ssa_177 = iadd ssa_11, ssa_167.y vec1 32 ssa_178 = ishl ssa_177, ssa_37 vec1 32 ssa_179 = iadd ssa_39, ssa_178 vec4 32 ssa_180 = intrinsic load_ubo (ssa_2, ssa_179) () () vec1 32 ssa_181 = iadd ssa_14, ssa_167.y vec1 32 ssa_182 = ishl ssa_181, ssa_37 vec1 32 ssa_183 = iadd ssa_39, ssa_182 vec4 32 ssa_184 = intrinsic load_ubo (ssa_2, ssa_183) () () r18.xy = imov ssa_34.zw r18.zw = imov r17.zw vec4 32 ssa_186 = ffloor ssa_41.zzzz vec4 32 ssa_187 = fadd ssa_41.zzzz, -ssa_186.xxxx vec2 32 ssa_188 = fge ssa_187.xx, ssa_44.ww vec1 32 ssa_189 = imov ssa_188.x /* succs: block_12 block_13 */ if ssa_189 { block block_12: /* preds: block_11 */ vec1 32 ssa_190 = frcp ssa_58.w vec1 32 ssa_191 = frcp ssa_58.z vec4 32 ssa_192 = fadd ssa_66.xyyy, -ssa_58.yxxx r41.xy = imov ssa_192.xy r41.z = imov ssa_190.x r41.w = imov ssa_191.x vec4 32 ssa_194 = fmul r41.xyyy, r41.zwww vec4 32 ssa_195 = fne r41.zwww, r41.zwww vec4 32 ssa_196 = bcsel ssa_195, ssa_194, ssa_8 vec4 32 ssa_197 = fne r41.xyyy, r41.xyyy vec4 32 ssa_198 = bcsel ssa_197, ssa_194, ssa_196 vec4 32 ssa_199 = fne ssa_194, ssa_194 vec4 32 ssa_200 = bcsel ssa_199, ssa_198, ssa_194 r18.xy = imov ssa_200.xy r18.zw = imov r41.zw /* succs: block_14 */ } else { block block_13: /* preds: block_11 */ /* succs: block_14 */ } block block_14: /* preds: block_12 block_13 */ vec4 32 ssa_203 = fmax -r18.xyyy, r18.xyyy r17.xy = imov ssa_203.xy r17.zw = imov r18.zw vec4 32 ssa_205 = fadd ssa_176.wzyx, -ssa_172.wzyx vec4 32 ssa_206 = fmul ssa_205, ssa_203.xxxx vec4 32 ssa_207 = fne ssa_203.xxxx, ssa_203.xxxx vec4 32 ssa_208 = bcsel ssa_207, ssa_206, ssa_8 vec4 32 ssa_209 = fne ssa_205, ssa_205 vec4 32 ssa_210 = bcsel ssa_209, ssa_206, ssa_208 vec4 32 ssa_211 = fne ssa_206, ssa_206 vec4 32 ssa_212 = bcsel ssa_211, ssa_210, ssa_206 vec4 32 ssa_213 = fadd ssa_212, ssa_172.wzyx vec4 32 ssa_214 = fadd ssa_184.wzyx, -ssa_180.wzyx vec4 32 ssa_215 = fmul ssa_214, ssa_203.xxxx vec4 32 ssa_216 = bcsel ssa_207, ssa_215, ssa_8 vec4 32 ssa_217 = fne ssa_214, ssa_214 vec4 32 ssa_218 = bcsel ssa_217, ssa_215, ssa_216 vec4 32 ssa_219 = fne ssa_215, ssa_215 vec4 32 ssa_220 = bcsel ssa_219, ssa_218, ssa_215 vec4 32 ssa_221 = fadd ssa_220, ssa_180.wzyx vec4 32 ssa_222 = fadd ssa_221, -ssa_213 vec4 32 ssa_223 = fmul ssa_222, ssa_203.yyyy vec4 32 ssa_224 = fne ssa_203.yyyy, ssa_203.yyyy vec4 32 ssa_225 = bcsel ssa_224, ssa_223, ssa_8 vec4 32 ssa_226 = fne ssa_222, ssa_222 vec4 32 ssa_227 = bcsel ssa_226, ssa_223, ssa_225 vec4 32 ssa_228 = fne ssa_223, ssa_223 vec4 32 ssa_229 = bcsel ssa_228, ssa_227, ssa_223 r4 = fadd ssa_229, ssa_213 /* succs: block_15 */ } block block_15: /* preds: block_10 block_14 */ vec4 32 ssa_234 = fmul ssa_44.zyzy, ssa_41.yyyy vec4 32 ssa_235 = fne ssa_41.yyyy, ssa_41.yyyy vec4 32 ssa_236 = bcsel ssa_235, ssa_234, ssa_8 vec4 32 ssa_237 = fne ssa_44.zyzy, ssa_44.zyzy vec4 32 ssa_238 = bcsel ssa_237, ssa_234, ssa_236 vec4 32 ssa_239 = fne ssa_234, ssa_234 vec4 32 ssa_240 = bcsel ssa_239, ssa_238, ssa_234 vec4 32 ssa_241 = ffloor ssa_240 vec4 32 ssa_242 = fadd ssa_240.zwww, -ssa_241.xyyy r42.xy = imov ssa_242.xy r42.zw = imov ssa_240.zw vec4 32 ssa_244 = fmul ssa_44.zzzz, r42 vec4 32 ssa_245 = fne r42, r42 vec4 32 ssa_246 = bcsel ssa_245, ssa_244, ssa_8 vec4 32 ssa_247 = fne ssa_44.zzzz, ssa_44.zzzz vec4 32 ssa_248 = bcsel ssa_247, ssa_244, ssa_246 vec4 32 ssa_249 = fne ssa_244, ssa_244 vec4 32 ssa_250 = bcsel ssa_249, ssa_248, ssa_244 vec1 32 ssa_251 = load_const (0x000001a0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_2, ssa_251) () () r19 = imov ssa_252.wzyx vec4 32 ssa_254 = f2i32 ssa_41.yyyy vec4 32 ssa_255 = ffloor ssa_41.wwww r43.x = imov ssa_255.x r43.yzw = imov r6.yzw vec4 32 ssa_257 = fadd ssa_41.wwww, -r43 r20.x = imov ssa_257.x r20.yzw = imov r6.yzw vec2 32 ssa_259 = fge ssa_257.xx, ssa_44.ww vec1 32 ssa_260 = imov ssa_259.x /* succs: block_16 block_38 */ if ssa_260 { block block_16: /* preds: block_15 */ vec4 32 ssa_261 = fadd ssa_44.xyyy, ssa_34.zwww r21.xy = imov ssa_261.xy r21.zw = imov r7.zw vec4 32 ssa_263 = fmul ssa_44.zzzz, ssa_41.wwww vec4 32 ssa_264 = fne ssa_41.wwww, ssa_41.wwww vec4 32 ssa_265 = bcsel ssa_264, ssa_263, ssa_8 vec4 32 ssa_266 = bcsel ssa_247, ssa_263, ssa_265 vec4 32 ssa_267 = fne ssa_263, ssa_263 vec4 32 ssa_268 = bcsel ssa_267, ssa_266, ssa_263 r44.xy = imov ssa_268.xy r44.zw = imov r6.zw vec4 32 ssa_270 = ffloor r44 vec4 32 ssa_271 = fadd r44.xxxx, -ssa_270.yyyy vec2 32 ssa_272 = fge ssa_271.xx, ssa_44.ww vec4 32 ssa_273 = bcsel ssa_272.xxxx, ssa_252.zwyx, ssa_252.wzyx vec4 32 ssa_274 = fmul ssa_273, ssa_51.wzyx vec4 32 ssa_275 = fne ssa_51.wzyx, ssa_51.wzyx vec4 32 ssa_276 = bcsel ssa_275, ssa_274, ssa_8 vec4 32 ssa_277 = fne ssa_273, ssa_273 vec4 32 ssa_278 = bcsel ssa_277, ssa_274, ssa_276 vec4 32 ssa_279 = fne ssa_274, ssa_274 vec4 32 ssa_280 = bcsel ssa_279, ssa_278, ssa_274 vec4 32 ssa_281 = fmul ssa_44.zzzz, ssa_271.xxxx vec4 32 ssa_282 = fne ssa_271.xxxx, ssa_271.xxxx vec4 32 ssa_283 = bcsel ssa_282, ssa_281, ssa_8 vec4 32 ssa_284 = bcsel ssa_247, ssa_281, ssa_283 vec4 32 ssa_285 = fne ssa_281, ssa_281 vec4 32 ssa_286 = bcsel ssa_285, ssa_284, ssa_281 vec4 32 ssa_287 = fmul ssa_44.zyzy, ssa_286.xxxx vec4 32 ssa_288 = fne ssa_286.xxxx, ssa_286.xxxx vec4 32 ssa_289 = bcsel ssa_288, ssa_287, ssa_8 vec4 32 ssa_290 = bcsel ssa_237, ssa_287, ssa_289 vec4 32 ssa_291 = fne ssa_287, ssa_287 vec4 32 ssa_292 = bcsel ssa_291, ssa_290, ssa_287 vec4 32 ssa_293 = ffloor ssa_292 vec4 32 ssa_294 = fadd ssa_292.xyyy, -ssa_293.zwww vec2 32 ssa_295 = fge ssa_294.xy, ssa_44.ww vec1 32 ssa_296 = imov ssa_295.y /* succs: block_17 block_18 */ if ssa_296 { block block_17: /* preds: block_16 */ vec4 32 ssa_297 = fmul ssa_261.xxxx, ssa_280.xxxx vec4 32 ssa_298 = fne ssa_280.xxxx, ssa_280.xxxx vec4 32 ssa_299 = bcsel ssa_298, ssa_297, ssa_8 vec4 32 ssa_300 = fne ssa_261.xxxx, ssa_261.xxxx vec4 32 ssa_301 = bcsel ssa_300, ssa_297, ssa_299 vec4 32 ssa_302 = fne ssa_297, ssa_297 vec4 32 ssa_303 = bcsel ssa_302, ssa_301, ssa_297 r21.x = imov ssa_303.x r21.y = imov ssa_261.y r21.zw = imov r7.zw /* succs: block_19 */ } else { block block_18: /* preds: block_16 */ /* succs: block_19 */ } block block_19: /* preds: block_17 block_18 */ vec1 32 ssa_306 = imov ssa_295.x /* succs: block_20 block_21 */ if ssa_306 { block block_20: /* preds: block_19 */ vec4 32 ssa_307 = fmul r21.yyyy, ssa_280.yyyy vec4 32 ssa_308 = fne ssa_280.yyyy, ssa_280.yyyy vec4 32 ssa_309 = bcsel ssa_308, ssa_307, ssa_8 vec4 32 ssa_310 = fne r21.yyyy, r21.yyyy vec4 32 ssa_311 = bcsel ssa_310, ssa_307, ssa_309 vec4 32 ssa_312 = fne ssa_307, ssa_307 vec4 32 ssa_313 = bcsel ssa_312, ssa_311, ssa_307 vec4 32 ssa_314 = fadd ssa_313, ssa_44.yyyy vec4 32 ssa_315 = fadd ssa_314.yyyy, -ssa_280.yyyy r21.y = imov ssa_315.y /* succs: block_22 */ } else { block block_21: /* preds: block_19 */ /* succs: block_22 */ } block block_22: /* preds: block_20 block_21 */ vec4 32 ssa_318 = fadd ssa_44.yyyy, -ssa_280.xyyy r19.xy = imov ssa_318.xy r19.zw = imov ssa_273.zw vec4 32 ssa_320 = fmul ssa_44.zzzz, ssa_294.xxxx vec4 32 ssa_321 = fne ssa_294.xxxx, ssa_294.xxxx vec4 32 ssa_322 = bcsel ssa_321, ssa_320, ssa_8 vec4 32 ssa_323 = bcsel ssa_247, ssa_320, ssa_322 vec4 32 ssa_324 = fne ssa_320, ssa_320 vec4 32 ssa_325 = bcsel ssa_324, ssa_323, ssa_320 vec4 32 ssa_326 = fmul ssa_44.zyzy, ssa_325.xxxx vec4 32 ssa_327 = fne ssa_325.xxxx, ssa_325.xxxx vec4 32 ssa_328 = bcsel ssa_327, ssa_326, ssa_8 vec4 32 ssa_329 = bcsel ssa_237, ssa_326, ssa_328 vec4 32 ssa_330 = fne ssa_326, ssa_326 vec4 32 ssa_331 = bcsel ssa_330, ssa_329, ssa_326 vec4 32 ssa_332 = ffloor ssa_331 vec4 32 ssa_333 = fadd ssa_331.xyyy, -ssa_332.zwww vec2 32 ssa_334 = fge ssa_333.xy, ssa_44.ww vec1 32 ssa_335 = imov ssa_334.y /* succs: block_23 block_24 */ if ssa_335 { block block_23: /* preds: block_22 */ vec4 32 ssa_336 = fadd r21.xxxx, ssa_318.xxxx r21.x = imov ssa_336.x /* succs: block_25 */ } else { block block_24: /* preds: block_22 */ /* succs: block_25 */ } block block_25: /* preds: block_23 block_24 */ vec1 32 ssa_339 = imov ssa_334.x /* succs: block_26 block_27 */ if ssa_339 { block block_26: /* preds: block_25 */ vec4 32 ssa_340 = fadd r21.yyyy, -ssa_318.yyyy r21.y = imov ssa_340.y /* succs: block_28 */ } else { block block_27: /* preds: block_25 */ /* succs: block_28 */ } block block_28: /* preds: block_26 block_27 */ vec4 32 ssa_343 = fmul ssa_44.zzzz, ssa_333.xxxx vec4 32 ssa_344 = fne ssa_333.xxxx, ssa_333.xxxx vec4 32 ssa_345 = bcsel ssa_344, ssa_343, ssa_8 vec4 32 ssa_346 = bcsel ssa_247, ssa_343, ssa_345 vec4 32 ssa_347 = fne ssa_343, ssa_343 vec4 32 ssa_348 = bcsel ssa_347, ssa_346, ssa_343 vec4 32 ssa_349 = fmul ssa_44.zyzy, ssa_348.xxxx vec4 32 ssa_350 = fne ssa_348.xxxx, ssa_348.xxxx vec4 32 ssa_351 = bcsel ssa_350, ssa_349, ssa_8 vec4 32 ssa_352 = bcsel ssa_237, ssa_349, ssa_351 vec4 32 ssa_353 = fne ssa_349, ssa_349 vec4 32 ssa_354 = bcsel ssa_353, ssa_352, ssa_349 vec4 32 ssa_355 = ffloor ssa_354 vec4 32 ssa_356 = fadd ssa_354.xyyy, -ssa_355.zwww vec2 32 ssa_357 = fge ssa_356.xy, ssa_44.ww vec1 32 ssa_358 = imov ssa_357.y /* succs: block_29 block_30 */ if ssa_358 { block block_29: /* preds: block_28 */ vec4 32 ssa_359 = fadd ssa_44.yyyy, -r21.xxxx r21.x = imov ssa_359.x /* succs: block_31 */ } else { block block_30: /* preds: block_28 */ /* succs: block_31 */ } block block_31: /* preds: block_29 block_30 */ vec1 32 ssa_362 = imov ssa_357.x /* succs: block_32 block_33 */ if ssa_362 { block block_32: /* preds: block_31 */ vec4 32 ssa_363 = fadd ssa_44.yyyy, -r21.yyyy r21.y = imov ssa_363.y /* succs: block_34 */ } else { block block_33: /* preds: block_31 */ /* succs: block_34 */ } block block_34: /* preds: block_32 block_33 */ r45.xy = imov r44.xy r45.zw = imov ssa_355.zw vec4 32 ssa_367 = ffloor r45 vec4 32 ssa_368 = fadd r45.xxxx, -ssa_367.yyyy r20.x = imov ssa_368.x r20.y = imov ssa_367.y r20.zw = imov r45.zw vec2 32 ssa_370 = fge ssa_368.xx, ssa_44.ww vec1 32 ssa_371 = imov ssa_370.x /* succs: block_35 block_36 */ if ssa_371 { block block_35: /* preds: block_34 */ vec4 32 ssa_372 = fadd ssa_44.yyyy, -r21.yxxx r21.xy = imov ssa_372.xy /* succs: block_37 */ } else { block block_36: /* preds: block_34 */ /* succs: block_37 */ } block block_37: /* preds: block_35 block_36 */ vec4 32 ssa_375 = fadd ssa_44.yyyy, -r21.yyyy r7.xzw = imov r21.xzw r7.y = imov ssa_375.y /* succs: block_39 */ } else { block block_38: /* preds: block_15 */ /* succs: block_39 */ } block block_39: /* preds: block_37 block_38 */ vec1 32 ssa_380 = flt ssa_250.y, ssa_44.y /* succs: block_40 block_44 */ if ssa_380 { block block_40: /* preds: block_39 */ r46.x = imov r43.x r46.yzw = imov r20.yzw vec4 32 ssa_382 = fadd ssa_41.wwww, -r46 vec1 32 ssa_383 = iadd ssa_5, ssa_254.y vec1 32 ssa_384 = ishl ssa_383, ssa_37 vec1 32 ssa_385 = iadd ssa_39, ssa_384 vec4 32 ssa_386 = intrinsic load_ubo (ssa_2, ssa_385) () () r20 = imov ssa_386.wzyx vec2 32 ssa_388 = fge ssa_382.xx, ssa_44.ww vec1 32 ssa_389 = imov ssa_388.x /* succs: block_41 block_42 */ if ssa_389 { block block_41: /* preds: block_40 */ vec4 32 ssa_390 = fmul r7.xyyy, ssa_386.wzzz vec4 32 ssa_391 = fne ssa_386.wzzz, ssa_386.wzzz vec4 32 ssa_392 = bcsel ssa_391, ssa_390, ssa_8 vec4 32 ssa_393 = fne r7.xyyy, r7.xyyy vec4 32 ssa_394 = bcsel ssa_393, ssa_390, ssa_392 vec4 32 ssa_395 = fne ssa_390, ssa_390 vec4 32 ssa_396 = bcsel ssa_395, ssa_394, ssa_390 vec4 32 ssa_397 = fadd ssa_396, ssa_386.yxxx vec4 32 ssa_398 = fmul ssa_397.xyyy, r19.zwww vec4 32 ssa_399 = fne r19.zwww, r19.zwww vec4 32 ssa_400 = bcsel ssa_399, ssa_398, ssa_8 vec4 32 ssa_401 = fne ssa_397.xyyy, ssa_397.xyyy vec4 32 ssa_402 = bcsel ssa_401, ssa_398, ssa_400 vec4 32 ssa_403 = fne ssa_398, ssa_398 vec4 32 ssa_404 = bcsel ssa_403, ssa_402, ssa_398 vec4 32 ssa_405 = fadd ssa_44.yyyy, -ssa_404.yyyy r22.x = imov ssa_404.x r22.y = imov ssa_405.y r22.z = imov ssa_41.y r22.w = imov r17.w /* succs: block_43 */ } else { block block_42: /* preds: block_40 */ r47.x = imov ssa_386.w r47.y = imov ssa_250.y r47.z = imov ssa_41.y r47.w = imov r17.w r48.x = imov ssa_386.y r48.yz = imov r47.yz r48.w = imov r17.w vec2 32 ssa_409 = fne ssa_44.xx, ssa_34.zw vec4 32 ssa_410 = bcsel ssa_409.xxxx, r48, r47 r49.xzw = imov ssa_410.xzw r49.y = imov ssa_386.z r50.xzw = imov r49.xzw r50.y = imov ssa_386.x r22 = bcsel ssa_409.yyyy, r50, r49 /* succs: block_43 */ } block block_43: /* preds: block_41 block_42 */ vec4 32 ssa_415 = fadd ssa_44.yyyy, ssa_41.yyyy r23.xyw = imov r22.xyw r23.z = imov ssa_415.z /* succs: block_54 */ } else { block block_44: /* preds: block_39 */ vec1 32 ssa_417 = flt ssa_250.x, ssa_44.y /* succs: block_45 block_52 */ if ssa_417 { block block_45: /* preds: block_44 */ r51.x = imov r43.x r51.yzw = imov r20.yzw vec4 32 ssa_419 = fadd ssa_41.wwww, -r51 r20.x = imov ssa_419.x vec2 32 ssa_421 = fge ssa_419.xx, ssa_44.ww vec1 32 ssa_422 = imov ssa_421.x /* succs: block_46 block_47 */ if ssa_422 { block block_46: /* preds: block_45 */ r7.zw = imov ssa_44.xy vec1 32 ssa_424 = iadd ssa_5, ssa_254.y vec1 32 ssa_425 = ishl ssa_424, ssa_37 vec1 32 ssa_426 = iadd ssa_39, ssa_425 vec4 32 ssa_427 = intrinsic load_ubo (ssa_2, ssa_426) () () vec4 32 ssa_428 = fmul ssa_427.wzyx, r7 vec4 32 ssa_429 = fne r7, r7 vec4 32 ssa_430 = bcsel ssa_429, ssa_428, ssa_8 vec4 32 ssa_431 = fne ssa_427.wzyx, ssa_427.wzyx vec4 32 ssa_432 = bcsel ssa_431, ssa_428, ssa_430 vec4 32 ssa_433 = fne ssa_428, ssa_428 vec4 32 ssa_434 = bcsel ssa_433, ssa_432, ssa_428 vec4 32 ssa_435 = fdot_replicated4 ssa_434, ssa_9 vec1 32 ssa_436 = iadd ssa_10, ssa_254.y vec1 32 ssa_437 = ishl ssa_436, ssa_37 vec1 32 ssa_438 = iadd ssa_39, ssa_437 vec4 32 ssa_439 = intrinsic load_ubo (ssa_2, ssa_438) () () vec4 32 ssa_440 = fmul ssa_439.wzyx, r7 vec4 32 ssa_441 = bcsel ssa_429, ssa_440, ssa_8 vec4 32 ssa_442 = fne ssa_439.wzyx, ssa_439.wzyx vec4 32 ssa_443 = bcsel ssa_442, ssa_440, ssa_441 vec4 32 ssa_444 = fne ssa_440, ssa_440 vec4 32 ssa_445 = bcsel ssa_444, ssa_443, ssa_440 vec4 32 ssa_446 = fdot_replicated4 ssa_445, ssa_9 r52.x = imov ssa_435.x r52.y = imov ssa_446.x r52.z = imov ssa_41.y r52.w = imov r17.w vec4 32 ssa_448 = fmul r52.xyyy, r19.zwww vec4 32 ssa_449 = fne r19.zwww, r19.zwww vec4 32 ssa_450 = bcsel ssa_449, ssa_448, ssa_8 vec4 32 ssa_451 = fne r52.xyyy, r52.xyyy vec4 32 ssa_452 = bcsel ssa_451, ssa_448, ssa_450 vec4 32 ssa_453 = fne ssa_448, ssa_448 vec4 32 ssa_454 = bcsel ssa_453, ssa_452, ssa_448 vec4 32 ssa_455 = fadd ssa_44.yyyy, -ssa_454.yyyy r24.x = imov ssa_454.x r24.y = imov ssa_455.y r24.z = imov r52.z r24.w = imov r17.w /* succs: block_51 */ } else { block block_47: /* preds: block_45 */ vec1 32 ssa_457 = iadd ssa_5, ssa_254.y vec1 32 ssa_458 = ishl ssa_457, ssa_37 vec1 32 ssa_459 = iadd ssa_39, ssa_458 vec4 32 ssa_460 = intrinsic load_ubo (ssa_2, ssa_459) () () vec1 32 ssa_461 = iadd ssa_10, ssa_254.y vec1 32 ssa_462 = ishl ssa_461, ssa_37 vec1 32 ssa_463 = iadd ssa_39, ssa_462 vec4 32 ssa_464 = intrinsic load_ubo (ssa_2, ssa_463) () () r20 = imov ssa_464.wzyx vec2 32 ssa_466 = fne ssa_44.xx, ssa_34.zw vec1 32 ssa_467 = feq ssa_44.x, ssa_34.w /* succs: block_48 block_49 */ if ssa_467 { block block_48: /* preds: block_47 */ r53.xy = imov ssa_460.wz r53.z = imov ssa_41.y r53.w = imov r17.w r54.xy = imov ssa_464.yx r54.z = imov r53.z r54.w = imov r17.w r24 = bcsel ssa_466.xxxx, r54, r53 /* succs: block_50 */ } else { block block_49: /* preds: block_47 */ r55.xy = imov ssa_464.wz r55.z = imov ssa_41.y r55.w = imov r17.w r56.xy = imov ssa_460.yx r56.z = imov r55.z r56.w = imov r17.w r24 = bcsel ssa_466.xxxx, r56, r55 /* succs: block_50 */ } block block_50: /* preds: block_48 block_49 */ /* succs: block_51 */ } block block_51: /* preds: block_46 block_50 */ /* succs: block_53 */ } else { block block_52: /* preds: block_44 */ vec1 32 ssa_478 = iadd ssa_5, ssa_254.y vec1 32 ssa_479 = ishl ssa_478, ssa_37 vec1 32 ssa_480 = iadd ssa_39, ssa_479 vec4 32 ssa_481 = intrinsic load_ubo (ssa_2, ssa_480) () () vec4 32 ssa_482 = fmul ssa_481.wzyx, r14 vec4 32 ssa_483 = bcsel ssa_73, ssa_482, ssa_8 vec4 32 ssa_484 = fne ssa_481.wzyx, ssa_481.wzyx vec4 32 ssa_485 = bcsel ssa_484, ssa_482, ssa_483 vec4 32 ssa_486 = fne ssa_482, ssa_482 vec4 32 ssa_487 = bcsel ssa_486, ssa_485, ssa_482 vec4 32 ssa_488 = fdot_replicated4 ssa_487, ssa_9 vec1 32 ssa_489 = iadd ssa_10, ssa_254.y vec1 32 ssa_490 = ishl ssa_489, ssa_37 vec1 32 ssa_491 = iadd ssa_39, ssa_490 vec4 32 ssa_492 = intrinsic load_ubo (ssa_2, ssa_491) () () vec4 32 ssa_493 = fmul ssa_492.wzyx, r14 vec4 32 ssa_494 = bcsel ssa_73, ssa_493, ssa_8 vec4 32 ssa_495 = fne ssa_492.wzyx, ssa_492.wzyx vec4 32 ssa_496 = bcsel ssa_495, ssa_493, ssa_494 vec4 32 ssa_497 = fne ssa_493, ssa_493 vec4 32 ssa_498 = bcsel ssa_497, ssa_496, ssa_493 vec4 32 ssa_499 = fdot_replicated4 ssa_498, ssa_9 r24.x = imov ssa_488.x r24.y = imov ssa_499.x r24.z = imov ssa_41.y r24.w = imov r17.w /* succs: block_53 */ } block block_53: /* preds: block_51 block_52 */ vec4 32 ssa_504 = fadd ssa_44.zzzz, r24.zzzz r23.xyw = imov r24.xyw r23.z = imov ssa_504.z /* succs: block_54 */ } block block_54: /* preds: block_43 block_53 */ vec1 32 ssa_509 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_510 = intrinsic load_ubo (ssa_2, ssa_509) () () vec1 32 ssa_511 = ine ssa_510, ssa_2 /* succs: block_55 block_56 */ if ssa_511 { block block_55: /* preds: block_54 */ vec4 32 ssa_512 = fadd ssa_44.yyyy, -r23.xxxx r23.x = imov r23.y r23.y = imov ssa_512.y /* succs: block_57 */ } else { block block_56: /* preds: block_54 */ /* succs: block_57 */ } block block_57: /* preds: block_55 block_56 */ vec1 32 ssa_515 = intrinsic load_ubo (ssa_2, ssa_5) () () vec1 32 ssa_516 = ine ssa_515, ssa_2 /* succs: block_58 block_59 */ if ssa_516 { block block_58: /* preds: block_57 */ vec4 32 ssa_517 = fadd ssa_44.yyyy, -r23.yxxx r23.xy = imov ssa_517.xy /* succs: block_60 */ } else { block block_59: /* preds: block_57 */ /* succs: block_60 */ } block block_60: /* preds: block_58 block_59 */ r3 = imov r23.xyyy vec4 32 ssa_521 = fmul ssa_44.zyzy, ssa_250.xxxx vec4 32 ssa_522 = fne ssa_250.xxxx, ssa_250.xxxx vec4 32 ssa_523 = bcsel ssa_522, ssa_521, ssa_8 vec4 32 ssa_524 = bcsel ssa_237, ssa_521, ssa_523 vec4 32 ssa_525 = fne ssa_521, ssa_521 vec4 32 ssa_526 = bcsel ssa_525, ssa_524, ssa_521 vec4 32 ssa_527 = ffloor ssa_526 vec4 32 ssa_528 = fadd ssa_526.zwww, -ssa_527.xyyy r57.xy = imov ssa_528.xy r57.zw = imov ssa_526.zw vec4 32 ssa_530 = fmul ssa_44.zzzz, r57 vec4 32 ssa_531 = fne r57, r57 vec4 32 ssa_532 = bcsel ssa_531, ssa_530, ssa_8 vec4 32 ssa_533 = bcsel ssa_247, ssa_530, ssa_532 vec4 32 ssa_534 = fne ssa_530, ssa_530 vec4 32 ssa_535 = bcsel ssa_534, ssa_533, ssa_530 vec1 32 ssa_536 = load_const (0x000001b0 /* 0.000000 */) vec4 32 ssa_537 = intrinsic load_ubo (ssa_2, ssa_536) () () r25 = imov ssa_537.wzyx vec4 32 ssa_539 = f2i32 r23.zzzz r58.x = imov r43.x r58.yzw = imov r20.yzw vec4 32 ssa_541 = fadd ssa_41.wwww, -r58 r26.x = imov ssa_541.x r26.yzw = imov r20.yzw vec2 32 ssa_543 = fge ssa_541.xx, ssa_44.ww vec1 32 ssa_544 = imov ssa_543.x /* succs: block_61 block_83 */ if ssa_544 { block block_61: /* preds: block_60 */ vec4 32 ssa_545 = fadd ssa_44.xyyy, ssa_34.zwww r27.xy = imov ssa_545.xy r27.zw = imov r7.zw r59.xy = imov ssa_252.wz r59.zw = imov ssa_537.yx vec4 32 ssa_548 = fmul ssa_44.zzzz, ssa_41.wwww vec4 32 ssa_549 = fne ssa_41.wwww, ssa_41.wwww vec4 32 ssa_550 = bcsel ssa_549, ssa_548, ssa_8 vec4 32 ssa_551 = bcsel ssa_247, ssa_548, ssa_550 vec4 32 ssa_552 = fne ssa_548, ssa_548 vec4 32 ssa_553 = bcsel ssa_552, ssa_551, ssa_548 r60.xy = imov ssa_553.xy r60.zw = imov r20.zw vec4 32 ssa_555 = ffloor r60 vec4 32 ssa_556 = fadd r60.xxxx, -ssa_555.yyyy r61 = imov r59.yxzw vec2 32 ssa_558 = fge ssa_556.xx, ssa_44.ww vec4 32 ssa_559 = bcsel ssa_558.xxxx, r61, r61.yxzw vec4 32 ssa_560 = fmul ssa_559, ssa_51.wzyx vec4 32 ssa_561 = fne ssa_51.wzyx, ssa_51.wzyx vec4 32 ssa_562 = bcsel ssa_561, ssa_560, ssa_8 vec4 32 ssa_563 = fne ssa_559, ssa_559 vec4 32 ssa_564 = bcsel ssa_563, ssa_560, ssa_562 vec4 32 ssa_565 = fne ssa_560, ssa_560 vec4 32 ssa_566 = bcsel ssa_565, ssa_564, ssa_560 vec4 32 ssa_567 = fmul ssa_44.zzzz, ssa_556.xxxx vec4 32 ssa_568 = fne ssa_556.xxxx, ssa_556.xxxx vec4 32 ssa_569 = bcsel ssa_568, ssa_567, ssa_8 vec4 32 ssa_570 = bcsel ssa_247, ssa_567, ssa_569 vec4 32 ssa_571 = fne ssa_567, ssa_567 vec4 32 ssa_572 = bcsel ssa_571, ssa_570, ssa_567 vec4 32 ssa_573 = fmul ssa_44.zyzy, ssa_572.xxxx vec4 32 ssa_574 = fne ssa_572.xxxx, ssa_572.xxxx vec4 32 ssa_575 = bcsel ssa_574, ssa_573, ssa_8 vec4 32 ssa_576 = bcsel ssa_237, ssa_573, ssa_575 vec4 32 ssa_577 = fne ssa_573, ssa_573 vec4 32 ssa_578 = bcsel ssa_577, ssa_576, ssa_573 vec4 32 ssa_579 = ffloor ssa_578 vec4 32 ssa_580 = fadd ssa_578.xyyy, -ssa_579.zwww vec2 32 ssa_581 = fge ssa_580.xy, ssa_44.ww vec1 32 ssa_582 = imov ssa_581.y /* succs: block_62 block_63 */ if ssa_582 { block block_62: /* preds: block_61 */ vec4 32 ssa_583 = fmul ssa_545.xxxx, ssa_566.xxxx vec4 32 ssa_584 = fne ssa_566.xxxx, ssa_566.xxxx vec4 32 ssa_585 = bcsel ssa_584, ssa_583, ssa_8 vec4 32 ssa_586 = fne ssa_545.xxxx, ssa_545.xxxx vec4 32 ssa_587 = bcsel ssa_586, ssa_583, ssa_585 vec4 32 ssa_588 = fne ssa_583, ssa_583 vec4 32 ssa_589 = bcsel ssa_588, ssa_587, ssa_583 r27.x = imov ssa_589.x r27.y = imov ssa_545.y r27.zw = imov r7.zw /* succs: block_64 */ } else { block block_63: /* preds: block_61 */ /* succs: block_64 */ } block block_64: /* preds: block_62 block_63 */ vec1 32 ssa_592 = imov ssa_581.x /* succs: block_65 block_66 */ if ssa_592 { block block_65: /* preds: block_64 */ vec4 32 ssa_593 = fmul r27.yyyy, ssa_566.yyyy vec4 32 ssa_594 = fne ssa_566.yyyy, ssa_566.yyyy vec4 32 ssa_595 = bcsel ssa_594, ssa_593, ssa_8 vec4 32 ssa_596 = fne r27.yyyy, r27.yyyy vec4 32 ssa_597 = bcsel ssa_596, ssa_593, ssa_595 vec4 32 ssa_598 = fne ssa_593, ssa_593 vec4 32 ssa_599 = bcsel ssa_598, ssa_597, ssa_593 vec4 32 ssa_600 = fadd ssa_599, ssa_44.yyyy vec4 32 ssa_601 = fadd ssa_600.yyyy, -ssa_566.yyyy r27.y = imov ssa_601.y /* succs: block_67 */ } else { block block_66: /* preds: block_64 */ /* succs: block_67 */ } block block_67: /* preds: block_65 block_66 */ vec4 32 ssa_604 = fadd ssa_44.yyyy, -ssa_566.xyyy r25.xy = imov ssa_604.xy r25.zw = imov ssa_559.zw vec4 32 ssa_606 = fmul ssa_44.zzzz, ssa_580.xxxx vec4 32 ssa_607 = fne ssa_580.xxxx, ssa_580.xxxx vec4 32 ssa_608 = bcsel ssa_607, ssa_606, ssa_8 vec4 32 ssa_609 = bcsel ssa_247, ssa_606, ssa_608 vec4 32 ssa_610 = fne ssa_606, ssa_606 vec4 32 ssa_611 = bcsel ssa_610, ssa_609, ssa_606 vec4 32 ssa_612 = fmul ssa_44.zyzy, ssa_611.xxxx vec4 32 ssa_613 = fne ssa_611.xxxx, ssa_611.xxxx vec4 32 ssa_614 = bcsel ssa_613, ssa_612, ssa_8 vec4 32 ssa_615 = bcsel ssa_237, ssa_612, ssa_614 vec4 32 ssa_616 = fne ssa_612, ssa_612 vec4 32 ssa_617 = bcsel ssa_616, ssa_615, ssa_612 vec4 32 ssa_618 = ffloor ssa_617 vec4 32 ssa_619 = fadd ssa_617.xyyy, -ssa_618.zwww vec2 32 ssa_620 = fge ssa_619.xy, ssa_44.ww vec1 32 ssa_621 = imov ssa_620.y /* succs: block_68 block_69 */ if ssa_621 { block block_68: /* preds: block_67 */ vec4 32 ssa_622 = fadd r27.xxxx, ssa_604.xxxx r27.x = imov ssa_622.x /* succs: block_70 */ } else { block block_69: /* preds: block_67 */ /* succs: block_70 */ } block block_70: /* preds: block_68 block_69 */ vec1 32 ssa_625 = imov ssa_620.x /* succs: block_71 block_72 */ if ssa_625 { block block_71: /* preds: block_70 */ vec4 32 ssa_626 = fadd r27.yyyy, -ssa_604.yyyy r27.y = imov ssa_626.y /* succs: block_73 */ } else { block block_72: /* preds: block_70 */ /* succs: block_73 */ } block block_73: /* preds: block_71 block_72 */ vec4 32 ssa_629 = fmul ssa_44.zzzz, ssa_619.xxxx vec4 32 ssa_630 = fne ssa_619.xxxx, ssa_619.xxxx vec4 32 ssa_631 = bcsel ssa_630, ssa_629, ssa_8 vec4 32 ssa_632 = bcsel ssa_247, ssa_629, ssa_631 vec4 32 ssa_633 = fne ssa_629, ssa_629 vec4 32 ssa_634 = bcsel ssa_633, ssa_632, ssa_629 vec4 32 ssa_635 = fmul ssa_44.zyzy, ssa_634.xxxx vec4 32 ssa_636 = fne ssa_634.xxxx, ssa_634.xxxx vec4 32 ssa_637 = bcsel ssa_636, ssa_635, ssa_8 vec4 32 ssa_638 = bcsel ssa_237, ssa_635, ssa_637 vec4 32 ssa_639 = fne ssa_635, ssa_635 vec4 32 ssa_640 = bcsel ssa_639, ssa_638, ssa_635 vec4 32 ssa_641 = ffloor ssa_640 vec4 32 ssa_642 = fadd ssa_640.xyyy, -ssa_641.zwww vec2 32 ssa_643 = fge ssa_642.xy, ssa_44.ww vec1 32 ssa_644 = imov ssa_643.y /* succs: block_74 block_75 */ if ssa_644 { block block_74: /* preds: block_73 */ vec4 32 ssa_645 = fadd ssa_44.yyyy, -r27.xxxx r27.x = imov ssa_645.x /* succs: block_76 */ } else { block block_75: /* preds: block_73 */ /* succs: block_76 */ } block block_76: /* preds: block_74 block_75 */ vec1 32 ssa_648 = imov ssa_643.x /* succs: block_77 block_78 */ if ssa_648 { block block_77: /* preds: block_76 */ vec4 32 ssa_649 = fadd ssa_44.yyyy, -r27.yyyy r27.y = imov ssa_649.y /* succs: block_79 */ } else { block block_78: /* preds: block_76 */ /* succs: block_79 */ } block block_79: /* preds: block_77 block_78 */ r62.xy = imov r60.xy r62.zw = imov ssa_641.zw vec4 32 ssa_653 = ffloor r62 vec4 32 ssa_654 = fadd r62.xxxx, -ssa_653.yyyy r26.x = imov ssa_654.x r26.y = imov ssa_653.y r26.zw = imov r62.zw vec2 32 ssa_656 = fge ssa_654.xx, ssa_44.ww vec1 32 ssa_657 = imov ssa_656.x /* succs: block_80 block_81 */ if ssa_657 { block block_80: /* preds: block_79 */ vec4 32 ssa_658 = fadd ssa_44.yyyy, -r27.yxxx r27.xy = imov ssa_658.xy /* succs: block_82 */ } else { block block_81: /* preds: block_79 */ /* succs: block_82 */ } block block_82: /* preds: block_80 block_81 */ vec4 32 ssa_661 = fadd ssa_44.yyyy, -r27.yyyy r7.xzw = imov r27.xzw r7.y = imov ssa_661.y /* succs: block_84 */ } else { block block_83: /* preds: block_60 */ /* succs: block_84 */ } block block_84: /* preds: block_82 block_83 */ vec1 32 ssa_666 = flt ssa_535.y, ssa_44.y /* succs: block_85 block_89 */ if ssa_666 { block block_85: /* preds: block_84 */ r63.x = imov r58.x r63.yzw = imov r26.yzw vec4 32 ssa_668 = fadd ssa_41.wwww, -r63 vec1 32 ssa_669 = iadd ssa_5, ssa_539.y vec1 32 ssa_670 = ishl ssa_669, ssa_37 vec1 32 ssa_671 = iadd ssa_39, ssa_670 vec4 32 ssa_672 = intrinsic load_ubo (ssa_2, ssa_671) () () r26 = imov ssa_672.wzyx vec2 32 ssa_674 = fge ssa_668.xx, ssa_44.ww vec1 32 ssa_675 = imov ssa_674.x /* succs: block_86 block_87 */ if ssa_675 { block block_86: /* preds: block_85 */ vec4 32 ssa_676 = fmul r7.xyyy, ssa_672.wzzz vec4 32 ssa_677 = fne ssa_672.wzzz, ssa_672.wzzz vec4 32 ssa_678 = bcsel ssa_677, ssa_676, ssa_8 vec4 32 ssa_679 = fne r7.xyyy, r7.xyyy vec4 32 ssa_680 = bcsel ssa_679, ssa_676, ssa_678 vec4 32 ssa_681 = fne ssa_676, ssa_676 vec4 32 ssa_682 = bcsel ssa_681, ssa_680, ssa_676 vec4 32 ssa_683 = fadd ssa_682, ssa_672.yxxx vec4 32 ssa_684 = fmul ssa_683.xyyy, r25.zwww vec4 32 ssa_685 = fne r25.zwww, r25.zwww vec4 32 ssa_686 = bcsel ssa_685, ssa_684, ssa_8 vec4 32 ssa_687 = fne ssa_683.xyyy, ssa_683.xyyy vec4 32 ssa_688 = bcsel ssa_687, ssa_684, ssa_686 vec4 32 ssa_689 = fne ssa_684, ssa_684 vec4 32 ssa_690 = bcsel ssa_689, ssa_688, ssa_684 vec4 32 ssa_691 = fadd ssa_44.yyyy, -ssa_690.yyyy r28.x = imov ssa_690.x r28.y = imov ssa_691.y r28.zw = imov r23.zw /* succs: block_88 */ } else { block block_87: /* preds: block_85 */ r64.x = imov ssa_672.w r64.y = imov ssa_535.y r64.zw = imov r23.zw r65.x = imov ssa_672.y r65.y = imov r64.y r65.zw = imov r23.zw vec2 32 ssa_695 = fne ssa_44.xx, ssa_34.zw vec4 32 ssa_696 = bcsel ssa_695.xxxx, r65, r64 r66.xzw = imov ssa_696.xzw r66.y = imov ssa_672.z r67.xzw = imov r66.xzw r67.y = imov ssa_672.x r28 = bcsel ssa_695.yyyy, r67, r66 /* succs: block_88 */ } block block_88: /* preds: block_86 block_87 */ vec4 32 ssa_701 = fadd ssa_44.yyyy, r28.zzzz r29.xyw = imov r28.xyw r29.z = imov ssa_701.z /* succs: block_99 */ } else { block block_89: /* preds: block_84 */ vec1 32 ssa_703 = flt ssa_535.x, ssa_44.y /* succs: block_90 block_97 */ if ssa_703 { block block_90: /* preds: block_89 */ r68.x = imov r58.x r68.yzw = imov r26.yzw vec4 32 ssa_705 = fadd ssa_41.wwww, -r68 r26.x = imov ssa_705.x vec2 32 ssa_707 = fge ssa_705.xx, ssa_44.ww vec1 32 ssa_708 = imov ssa_707.x /* succs: block_91 block_92 */ if ssa_708 { block block_91: /* preds: block_90 */ r7.zw = imov ssa_44.xy vec1 32 ssa_710 = iadd ssa_5, ssa_539.y vec1 32 ssa_711 = ishl ssa_710, ssa_37 vec1 32 ssa_712 = iadd ssa_39, ssa_711 vec4 32 ssa_713 = intrinsic load_ubo (ssa_2, ssa_712) () () vec4 32 ssa_714 = fmul ssa_713.wzyx, r7 vec4 32 ssa_715 = fne r7, r7 vec4 32 ssa_716 = bcsel ssa_715, ssa_714, ssa_8 vec4 32 ssa_717 = fne ssa_713.wzyx, ssa_713.wzyx vec4 32 ssa_718 = bcsel ssa_717, ssa_714, ssa_716 vec4 32 ssa_719 = fne ssa_714, ssa_714 vec4 32 ssa_720 = bcsel ssa_719, ssa_718, ssa_714 vec4 32 ssa_721 = fdot_replicated4 ssa_720, ssa_9 vec1 32 ssa_722 = iadd ssa_10, ssa_539.y vec1 32 ssa_723 = ishl ssa_722, ssa_37 vec1 32 ssa_724 = iadd ssa_39, ssa_723 vec4 32 ssa_725 = intrinsic load_ubo (ssa_2, ssa_724) () () vec4 32 ssa_726 = fmul ssa_725.wzyx, r7 vec4 32 ssa_727 = bcsel ssa_715, ssa_726, ssa_8 vec4 32 ssa_728 = fne ssa_725.wzyx, ssa_725.wzyx vec4 32 ssa_729 = bcsel ssa_728, ssa_726, ssa_727 vec4 32 ssa_730 = fne ssa_726, ssa_726 vec4 32 ssa_731 = bcsel ssa_730, ssa_729, ssa_726 vec4 32 ssa_732 = fdot_replicated4 ssa_731, ssa_9 r69.x = imov ssa_721.x r69.y = imov ssa_732.x r69.zw = imov r23.zw vec4 32 ssa_734 = fmul r69.xyyy, r25.zwww vec4 32 ssa_735 = fne r25.zwww, r25.zwww vec4 32 ssa_736 = bcsel ssa_735, ssa_734, ssa_8 vec4 32 ssa_737 = fne r69.xyyy, r69.xyyy vec4 32 ssa_738 = bcsel ssa_737, ssa_734, ssa_736 vec4 32 ssa_739 = fne ssa_734, ssa_734 vec4 32 ssa_740 = bcsel ssa_739, ssa_738, ssa_734 vec4 32 ssa_741 = fadd ssa_44.yyyy, -ssa_740.yyyy r30.x = imov ssa_740.x r30.y = imov ssa_741.y r30.zw = imov r23.zw /* succs: block_96 */ } else { block block_92: /* preds: block_90 */ vec1 32 ssa_743 = iadd ssa_5, ssa_539.y vec1 32 ssa_744 = ishl ssa_743, ssa_37 vec1 32 ssa_745 = iadd ssa_39, ssa_744 vec4 32 ssa_746 = intrinsic load_ubo (ssa_2, ssa_745) () () vec1 32 ssa_747 = iadd ssa_10, ssa_539.y vec1 32 ssa_748 = ishl ssa_747, ssa_37 vec1 32 ssa_749 = iadd ssa_39, ssa_748 vec4 32 ssa_750 = intrinsic load_ubo (ssa_2, ssa_749) () () r26 = imov ssa_750.wzyx vec2 32 ssa_752 = fne ssa_44.xx, ssa_34.zw vec1 32 ssa_753 = feq ssa_44.x, ssa_34.w /* succs: block_93 block_94 */ if ssa_753 { block block_93: /* preds: block_92 */ r70.xy = imov ssa_746.wz r70.zw = imov r23.zw r71.xy = imov ssa_750.yx r71.zw = imov r23.zw r30 = bcsel ssa_752.xxxx, r71, r70 /* succs: block_95 */ } else { block block_94: /* preds: block_92 */ r72.xy = imov ssa_750.wz r72.zw = imov r23.zw r73.xy = imov ssa_746.yx r73.zw = imov r23.zw r30 = bcsel ssa_752.xxxx, r73, r72 /* succs: block_95 */ } block block_95: /* preds: block_93 block_94 */ /* succs: block_96 */ } block block_96: /* preds: block_91 block_95 */ /* succs: block_98 */ } else { block block_97: /* preds: block_89 */ vec1 32 ssa_764 = iadd ssa_5, ssa_539.y vec1 32 ssa_765 = ishl ssa_764, ssa_37 vec1 32 ssa_766 = iadd ssa_39, ssa_765 vec4 32 ssa_767 = intrinsic load_ubo (ssa_2, ssa_766) () () vec4 32 ssa_768 = fmul ssa_767.wzyx, r14 vec4 32 ssa_769 = bcsel ssa_73, ssa_768, ssa_8 vec4 32 ssa_770 = fne ssa_767.wzyx, ssa_767.wzyx vec4 32 ssa_771 = bcsel ssa_770, ssa_768, ssa_769 vec4 32 ssa_772 = fne ssa_768, ssa_768 vec4 32 ssa_773 = bcsel ssa_772, ssa_771, ssa_768 vec4 32 ssa_774 = fdot_replicated4 ssa_773, ssa_9 vec1 32 ssa_775 = iadd ssa_10, ssa_539.y vec1 32 ssa_776 = ishl ssa_775, ssa_37 vec1 32 ssa_777 = iadd ssa_39, ssa_776 vec4 32 ssa_778 = intrinsic load_ubo (ssa_2, ssa_777) () () vec4 32 ssa_779 = fmul ssa_778.wzyx, r14 vec4 32 ssa_780 = bcsel ssa_73, ssa_779, ssa_8 vec4 32 ssa_781 = fne ssa_778.wzyx, ssa_778.wzyx vec4 32 ssa_782 = bcsel ssa_781, ssa_779, ssa_780 vec4 32 ssa_783 = fne ssa_779, ssa_779 vec4 32 ssa_784 = bcsel ssa_783, ssa_782, ssa_779 vec4 32 ssa_785 = fdot_replicated4 ssa_784, ssa_9 r30.x = imov ssa_774.x r30.y = imov ssa_785.x r30.zw = imov r23.zw /* succs: block_98 */ } block block_98: /* preds: block_96 block_97 */ vec4 32 ssa_790 = fadd ssa_44.zzzz, r30.zzzz r29.xyw = imov r30.xyw r29.z = imov ssa_790.z /* succs: block_99 */ } block block_99: /* preds: block_88 block_98 */ vec1 32 ssa_795 = load_const (0x00000030 /* 0.000000 */) vec1 32 ssa_796 = intrinsic load_ubo (ssa_2, ssa_795) () () vec1 32 ssa_797 = ine ssa_796, ssa_2 /* succs: block_100 block_101 */ if ssa_797 { block block_100: /* preds: block_99 */ vec4 32 ssa_798 = fadd ssa_44.yyyy, -r29.xxxx r29.x = imov r29.y r29.y = imov ssa_798.y /* succs: block_102 */ } else { block block_101: /* preds: block_99 */ /* succs: block_102 */ } block block_102: /* preds: block_100 block_101 */ vec1 32 ssa_801 = load_const (0x00000040 /* 0.000000 */) vec1 32 ssa_802 = intrinsic load_ubo (ssa_2, ssa_801) () () vec1 32 ssa_803 = ine ssa_802, ssa_2 /* succs: block_103 block_104 */ if ssa_803 { block block_103: /* preds: block_102 */ vec4 32 ssa_804 = fadd ssa_44.yyyy, -r29.yxxx r29.xy = imov ssa_804.xy /* succs: block_105 */ } else { block block_104: /* preds: block_102 */ /* succs: block_105 */ } block block_105: /* preds: block_103 block_104 */ r2 = imov r29.xyyy vec4 32 ssa_808 = fmul ssa_44.zyzy, ssa_535.xxxx vec4 32 ssa_809 = fne ssa_535.xxxx, ssa_535.xxxx vec4 32 ssa_810 = bcsel ssa_809, ssa_808, ssa_8 vec4 32 ssa_811 = bcsel ssa_237, ssa_808, ssa_810 vec4 32 ssa_812 = fne ssa_808, ssa_808 vec4 32 ssa_813 = bcsel ssa_812, ssa_811, ssa_808 vec4 32 ssa_814 = ffloor ssa_813 vec4 32 ssa_815 = fadd ssa_813.zwww, -ssa_814.xyyy r74.xy = imov ssa_815.xy r74.zw = imov ssa_813.zw vec4 32 ssa_817 = fmul ssa_44.zzzz, r74 vec4 32 ssa_818 = fne r74, r74 vec4 32 ssa_819 = bcsel ssa_818, ssa_817, ssa_8 vec4 32 ssa_820 = bcsel ssa_247, ssa_817, ssa_819 vec4 32 ssa_821 = fne ssa_817, ssa_817 r8 = bcsel ssa_821, ssa_820, ssa_817 vec1 32 ssa_823 = load_const (0x000001c0 /* 0.000000 */) vec4 32 ssa_824 = intrinsic load_ubo (ssa_2, ssa_823) () () r31 = imov ssa_824.wzyx vec4 32 ssa_826 = f2i32 r29.zzzz r15.y = imov ssa_826.y r75.x = imov r58.x r75.yzw = imov r26.yzw vec4 32 ssa_829 = fadd ssa_41.wwww, -r75 r6.x = imov ssa_829.x r6.yzw = imov r26.yzw vec2 32 ssa_831 = fge ssa_829.xx, ssa_44.ww vec1 32 ssa_832 = imov ssa_831.x /* succs: block_106 block_128 */ if ssa_832 { block block_106: /* preds: block_105 */ vec4 32 ssa_833 = fadd ssa_44.xyyy, ssa_34.zwww r32.xy = imov ssa_833.xy r32.zw = imov r7.zw r76.xy = imov ssa_252.wz r76.zw = imov ssa_824.yx vec4 32 ssa_836 = fmul ssa_44.zzzz, ssa_41.wwww vec4 32 ssa_837 = fne ssa_41.wwww, ssa_41.wwww vec4 32 ssa_838 = bcsel ssa_837, ssa_836, ssa_8 vec4 32 ssa_839 = bcsel ssa_247, ssa_836, ssa_838 vec4 32 ssa_840 = fne ssa_836, ssa_836 vec4 32 ssa_841 = bcsel ssa_840, ssa_839, ssa_836 r77.xy = imov ssa_841.xy r77.zw = imov r26.zw vec4 32 ssa_843 = ffloor r77 vec4 32 ssa_844 = fadd r77.xxxx, -ssa_843.yyyy r78 = imov r76.yxzw vec2 32 ssa_846 = fge ssa_844.xx, ssa_44.ww vec4 32 ssa_847 = bcsel ssa_846.xxxx, r78, r78.yxzw vec4 32 ssa_848 = fmul ssa_847, ssa_51.wzyx vec4 32 ssa_849 = fne ssa_51.wzyx, ssa_51.wzyx vec4 32 ssa_850 = bcsel ssa_849, ssa_848, ssa_8 vec4 32 ssa_851 = fne ssa_847, ssa_847 vec4 32 ssa_852 = bcsel ssa_851, ssa_848, ssa_850 vec4 32 ssa_853 = fne ssa_848, ssa_848 vec4 32 ssa_854 = bcsel ssa_853, ssa_852, ssa_848 vec4 32 ssa_855 = fmul ssa_44.zzzz, ssa_844.xxxx vec4 32 ssa_856 = fne ssa_844.xxxx, ssa_844.xxxx vec4 32 ssa_857 = bcsel ssa_856, ssa_855, ssa_8 vec4 32 ssa_858 = bcsel ssa_247, ssa_855, ssa_857 vec4 32 ssa_859 = fne ssa_855, ssa_855 vec4 32 ssa_860 = bcsel ssa_859, ssa_858, ssa_855 vec4 32 ssa_861 = fmul ssa_44.zyzy, ssa_860.xxxx vec4 32 ssa_862 = fne ssa_860.xxxx, ssa_860.xxxx vec4 32 ssa_863 = bcsel ssa_862, ssa_861, ssa_8 vec4 32 ssa_864 = bcsel ssa_237, ssa_861, ssa_863 vec4 32 ssa_865 = fne ssa_861, ssa_861 vec4 32 ssa_866 = bcsel ssa_865, ssa_864, ssa_861 vec4 32 ssa_867 = ffloor ssa_866 vec4 32 ssa_868 = fadd ssa_866.xyyy, -ssa_867.zwww vec2 32 ssa_869 = fge ssa_868.xy, ssa_44.ww vec1 32 ssa_870 = imov ssa_869.y /* succs: block_107 block_108 */ if ssa_870 { block block_107: /* preds: block_106 */ vec4 32 ssa_871 = fmul ssa_833.xxxx, ssa_854.xxxx vec4 32 ssa_872 = fne ssa_854.xxxx, ssa_854.xxxx vec4 32 ssa_873 = bcsel ssa_872, ssa_871, ssa_8 vec4 32 ssa_874 = fne ssa_833.xxxx, ssa_833.xxxx vec4 32 ssa_875 = bcsel ssa_874, ssa_871, ssa_873 vec4 32 ssa_876 = fne ssa_871, ssa_871 vec4 32 ssa_877 = bcsel ssa_876, ssa_875, ssa_871 r32.x = imov ssa_877.x r32.y = imov ssa_833.y r32.zw = imov r7.zw /* succs: block_109 */ } else { block block_108: /* preds: block_106 */ /* succs: block_109 */ } block block_109: /* preds: block_107 block_108 */ vec1 32 ssa_880 = imov ssa_869.x /* succs: block_110 block_111 */ if ssa_880 { block block_110: /* preds: block_109 */ vec4 32 ssa_881 = fmul r32.yyyy, ssa_854.yyyy vec4 32 ssa_882 = fne ssa_854.yyyy, ssa_854.yyyy vec4 32 ssa_883 = bcsel ssa_882, ssa_881, ssa_8 vec4 32 ssa_884 = fne r32.yyyy, r32.yyyy vec4 32 ssa_885 = bcsel ssa_884, ssa_881, ssa_883 vec4 32 ssa_886 = fne ssa_881, ssa_881 vec4 32 ssa_887 = bcsel ssa_886, ssa_885, ssa_881 vec4 32 ssa_888 = fadd ssa_887, ssa_44.yyyy vec4 32 ssa_889 = fadd ssa_888.yyyy, -ssa_854.yyyy r32.y = imov ssa_889.y /* succs: block_112 */ } else { block block_111: /* preds: block_109 */ /* succs: block_112 */ } block block_112: /* preds: block_110 block_111 */ vec4 32 ssa_892 = fadd ssa_44.yyyy, -ssa_854.xyyy r31.xy = imov ssa_892.xy r31.zw = imov ssa_847.zw vec4 32 ssa_894 = fmul ssa_44.zzzz, ssa_868.xxxx vec4 32 ssa_895 = fne ssa_868.xxxx, ssa_868.xxxx vec4 32 ssa_896 = bcsel ssa_895, ssa_894, ssa_8 vec4 32 ssa_897 = bcsel ssa_247, ssa_894, ssa_896 vec4 32 ssa_898 = fne ssa_894, ssa_894 vec4 32 ssa_899 = bcsel ssa_898, ssa_897, ssa_894 vec4 32 ssa_900 = fmul ssa_44.zyzy, ssa_899.xxxx vec4 32 ssa_901 = fne ssa_899.xxxx, ssa_899.xxxx vec4 32 ssa_902 = bcsel ssa_901, ssa_900, ssa_8 vec4 32 ssa_903 = bcsel ssa_237, ssa_900, ssa_902 vec4 32 ssa_904 = fne ssa_900, ssa_900 vec4 32 ssa_905 = bcsel ssa_904, ssa_903, ssa_900 vec4 32 ssa_906 = ffloor ssa_905 vec4 32 ssa_907 = fadd ssa_905.xyyy, -ssa_906.zwww vec2 32 ssa_908 = fge ssa_907.xy, ssa_44.ww vec1 32 ssa_909 = imov ssa_908.y /* succs: block_113 block_114 */ if ssa_909 { block block_113: /* preds: block_112 */ vec4 32 ssa_910 = fadd r32.xxxx, ssa_892.xxxx r32.x = imov ssa_910.x /* succs: block_115 */ } else { block block_114: /* preds: block_112 */ /* succs: block_115 */ } block block_115: /* preds: block_113 block_114 */ vec1 32 ssa_913 = imov ssa_908.x /* succs: block_116 block_117 */ if ssa_913 { block block_116: /* preds: block_115 */ vec4 32 ssa_914 = fadd r32.yyyy, -ssa_892.yyyy r32.y = imov ssa_914.y /* succs: block_118 */ } else { block block_117: /* preds: block_115 */ /* succs: block_118 */ } block block_118: /* preds: block_116 block_117 */ vec4 32 ssa_917 = fmul ssa_44.zzzz, ssa_907.xxxx vec4 32 ssa_918 = fne ssa_907.xxxx, ssa_907.xxxx vec4 32 ssa_919 = bcsel ssa_918, ssa_917, ssa_8 vec4 32 ssa_920 = bcsel ssa_247, ssa_917, ssa_919 vec4 32 ssa_921 = fne ssa_917, ssa_917 vec4 32 ssa_922 = bcsel ssa_921, ssa_920, ssa_917 vec4 32 ssa_923 = fmul ssa_44.zyzy, ssa_922.xxxx vec4 32 ssa_924 = fne ssa_922.xxxx, ssa_922.xxxx vec4 32 ssa_925 = bcsel ssa_924, ssa_923, ssa_8 vec4 32 ssa_926 = bcsel ssa_237, ssa_923, ssa_925 vec4 32 ssa_927 = fne ssa_923, ssa_923 vec4 32 ssa_928 = bcsel ssa_927, ssa_926, ssa_923 vec4 32 ssa_929 = ffloor ssa_928 vec4 32 ssa_930 = fadd ssa_928.xyyy, -ssa_929.zwww vec2 32 ssa_931 = fge ssa_930.xy, ssa_44.ww vec1 32 ssa_932 = imov ssa_931.y /* succs: block_119 block_120 */ if ssa_932 { block block_119: /* preds: block_118 */ vec4 32 ssa_933 = fadd ssa_44.yyyy, -r32.xxxx r32.x = imov ssa_933.x /* succs: block_121 */ } else { block block_120: /* preds: block_118 */ /* succs: block_121 */ } block block_121: /* preds: block_119 block_120 */ vec1 32 ssa_936 = imov ssa_931.x /* succs: block_122 block_123 */ if ssa_936 { block block_122: /* preds: block_121 */ vec4 32 ssa_937 = fadd ssa_44.yyyy, -r32.yyyy r32.y = imov ssa_937.y /* succs: block_124 */ } else { block block_123: /* preds: block_121 */ /* succs: block_124 */ } block block_124: /* preds: block_122 block_123 */ r79.xy = imov r77.xy r79.zw = imov ssa_929.zw vec4 32 ssa_941 = ffloor r79 vec4 32 ssa_942 = fadd r79.xxxx, -ssa_941.yyyy r6.x = imov ssa_942.x r6.y = imov ssa_941.y r6.zw = imov r79.zw vec2 32 ssa_944 = fge ssa_942.xx, ssa_44.ww vec1 32 ssa_945 = imov ssa_944.x /* succs: block_125 block_126 */ if ssa_945 { block block_125: /* preds: block_124 */ vec4 32 ssa_946 = fadd ssa_44.yyyy, -r32.yxxx r32.xy = imov ssa_946.xy /* succs: block_127 */ } else { block block_126: /* preds: block_124 */ /* succs: block_127 */ } block block_127: /* preds: block_125 block_126 */ vec4 32 ssa_949 = fadd ssa_44.yyyy, -r32.yyyy r7.xzw = imov r32.xzw r7.y = imov ssa_949.y /* succs: block_129 */ } else { block block_128: /* preds: block_105 */ /* succs: block_129 */ } block block_129: /* preds: block_127 block_128 */ vec1 32 ssa_954 = flt r8.y, ssa_44.y /* succs: block_130 block_134 */ if ssa_954 { block block_130: /* preds: block_129 */ r80.x = imov r75.x r80.yzw = imov r6.yzw vec4 32 ssa_956 = fadd ssa_41.wwww, -r80 vec1 32 ssa_957 = iadd ssa_5, ssa_826.y vec1 32 ssa_958 = ishl ssa_957, ssa_37 vec1 32 ssa_959 = iadd ssa_39, ssa_958 vec4 32 ssa_960 = intrinsic load_ubo (ssa_2, ssa_959) () () r6 = imov ssa_960.wzyx vec2 32 ssa_962 = fge ssa_956.xx, ssa_44.ww vec1 32 ssa_963 = imov ssa_962.x /* succs: block_131 block_132 */ if ssa_963 { block block_131: /* preds: block_130 */ vec4 32 ssa_964 = fmul r7.xyyy, ssa_960.wzzz vec4 32 ssa_965 = fne ssa_960.wzzz, ssa_960.wzzz vec4 32 ssa_966 = bcsel ssa_965, ssa_964, ssa_8 vec4 32 ssa_967 = fne r7.xyyy, r7.xyyy vec4 32 ssa_968 = bcsel ssa_967, ssa_964, ssa_966 vec4 32 ssa_969 = fne ssa_964, ssa_964 vec4 32 ssa_970 = bcsel ssa_969, ssa_968, ssa_964 vec4 32 ssa_971 = fadd ssa_970, ssa_960.yxxx vec4 32 ssa_972 = fmul ssa_971.xyyy, r31.zwww vec4 32 ssa_973 = fne r31.zwww, r31.zwww vec4 32 ssa_974 = bcsel ssa_973, ssa_972, ssa_8 vec4 32 ssa_975 = fne ssa_971.xyyy, ssa_971.xyyy vec4 32 ssa_976 = bcsel ssa_975, ssa_972, ssa_974 vec4 32 ssa_977 = fne ssa_972, ssa_972 vec4 32 ssa_978 = bcsel ssa_977, ssa_976, ssa_972 vec4 32 ssa_979 = fadd ssa_44.yyyy, -ssa_978.yyyy r33.x = imov ssa_978.x r33.y = imov ssa_979.y r33.zw = imov r29.zw /* succs: block_133 */ } else { block block_132: /* preds: block_130 */ r81.x = imov ssa_960.w r81.y = imov r8.y r81.zw = imov r29.zw r82.x = imov ssa_960.y r82.y = imov r8.y r82.zw = imov r29.zw vec2 32 ssa_983 = fne ssa_44.xx, ssa_34.zw vec4 32 ssa_984 = bcsel ssa_983.xxxx, r82, r81 r83.xzw = imov ssa_984.xzw r83.y = imov ssa_960.z r84.xzw = imov r83.xzw r84.y = imov ssa_960.x r33 = bcsel ssa_983.yyyy, r84, r83 /* succs: block_133 */ } block block_133: /* preds: block_131 block_132 */ vec4 32 ssa_989 = fadd ssa_44.yyyy, r33.zzzz r34.xyw = imov r33.xyw r34.z = imov ssa_989.z /* succs: block_144 */ } else { block block_134: /* preds: block_129 */ vec1 32 ssa_991 = flt r8.x, ssa_44.y /* succs: block_135 block_142 */ if ssa_991 { block block_135: /* preds: block_134 */ r85.x = imov r75.x r85.yzw = imov r6.yzw vec4 32 ssa_993 = fadd ssa_41.wwww, -r85 r6.x = imov ssa_993.x vec2 32 ssa_995 = fge ssa_993.xx, ssa_44.ww vec1 32 ssa_996 = imov ssa_995.x /* succs: block_136 block_137 */ if ssa_996 { block block_136: /* preds: block_135 */ r7.zw = imov ssa_44.xy vec1 32 ssa_998 = iadd ssa_5, ssa_826.y vec1 32 ssa_999 = ishl ssa_998, ssa_37 vec1 32 ssa_1000 = iadd ssa_39, ssa_999 vec4 32 ssa_1001 = intrinsic load_ubo (ssa_2, ssa_1000) () () vec4 32 ssa_1002 = fmul ssa_1001.wzyx, r7 vec4 32 ssa_1003 = fne r7, r7 vec4 32 ssa_1004 = bcsel ssa_1003, ssa_1002, ssa_8 vec4 32 ssa_1005 = fne ssa_1001.wzyx, ssa_1001.wzyx vec4 32 ssa_1006 = bcsel ssa_1005, ssa_1002, ssa_1004 vec4 32 ssa_1007 = fne ssa_1002, ssa_1002 vec4 32 ssa_1008 = bcsel ssa_1007, ssa_1006, ssa_1002 vec4 32 ssa_1009 = fdot_replicated4 ssa_1008, ssa_9 vec1 32 ssa_1010 = iadd ssa_10, ssa_826.y vec1 32 ssa_1011 = ishl ssa_1010, ssa_37 vec1 32 ssa_1012 = iadd ssa_39, ssa_1011 vec4 32 ssa_1013 = intrinsic load_ubo (ssa_2, ssa_1012) () () vec4 32 ssa_1014 = fmul ssa_1013.wzyx, r7 vec4 32 ssa_1015 = bcsel ssa_1003, ssa_1014, ssa_8 vec4 32 ssa_1016 = fne ssa_1013.wzyx, ssa_1013.wzyx vec4 32 ssa_1017 = bcsel ssa_1016, ssa_1014, ssa_1015 vec4 32 ssa_1018 = fne ssa_1014, ssa_1014 vec4 32 ssa_1019 = bcsel ssa_1018, ssa_1017, ssa_1014 vec4 32 ssa_1020 = fdot_replicated4 ssa_1019, ssa_9 r86.x = imov ssa_1009.x r86.y = imov ssa_1020.x r86.zw = imov r29.zw vec4 32 ssa_1022 = fmul r86.xyyy, r31.zwww vec4 32 ssa_1023 = fne r31.zwww, r31.zwww vec4 32 ssa_1024 = bcsel ssa_1023, ssa_1022, ssa_8 vec4 32 ssa_1025 = fne r86.xyyy, r86.xyyy vec4 32 ssa_1026 = bcsel ssa_1025, ssa_1022, ssa_1024 vec4 32 ssa_1027 = fne ssa_1022, ssa_1022 vec4 32 ssa_1028 = bcsel ssa_1027, ssa_1026, ssa_1022 vec4 32 ssa_1029 = fadd ssa_44.yyyy, -ssa_1028.yyyy r35.x = imov ssa_1028.x r35.y = imov ssa_1029.y r35.zw = imov r29.zw /* succs: block_141 */ } else { block block_137: /* preds: block_135 */ vec1 32 ssa_1031 = iadd ssa_5, ssa_826.y vec1 32 ssa_1032 = ishl ssa_1031, ssa_37 vec1 32 ssa_1033 = iadd ssa_39, ssa_1032 vec4 32 ssa_1034 = intrinsic load_ubo (ssa_2, ssa_1033) () () vec1 32 ssa_1035 = iadd ssa_10, ssa_826.y vec1 32 ssa_1036 = ishl ssa_1035, ssa_37 vec1 32 ssa_1037 = iadd ssa_39, ssa_1036 vec4 32 ssa_1038 = intrinsic load_ubo (ssa_2, ssa_1037) () () r6 = imov ssa_1038.wzyx vec2 32 ssa_1040 = fne ssa_44.xx, ssa_34.zw vec1 32 ssa_1041 = feq ssa_44.x, ssa_34.w /* succs: block_138 block_139 */ if ssa_1041 { block block_138: /* preds: block_137 */ r87.xy = imov ssa_1034.wz r87.zw = imov r29.zw r88.xy = imov ssa_1038.yx r88.zw = imov r29.zw r35 = bcsel ssa_1040.xxxx, r88, r87 /* succs: block_140 */ } else { block block_139: /* preds: block_137 */ r89.xy = imov ssa_1038.wz r89.zw = imov r29.zw r90.xy = imov ssa_1034.yx r90.zw = imov r29.zw r35 = bcsel ssa_1040.xxxx, r90, r89 /* succs: block_140 */ } block block_140: /* preds: block_138 block_139 */ /* succs: block_141 */ } block block_141: /* preds: block_136 block_140 */ /* succs: block_143 */ } else { block block_142: /* preds: block_134 */ vec1 32 ssa_1052 = iadd ssa_5, ssa_826.y vec1 32 ssa_1053 = ishl ssa_1052, ssa_37 vec1 32 ssa_1054 = iadd ssa_39, ssa_1053 vec4 32 ssa_1055 = intrinsic load_ubo (ssa_2, ssa_1054) () () vec4 32 ssa_1056 = fmul ssa_1055.wzyx, r14 vec4 32 ssa_1057 = bcsel ssa_73, ssa_1056, ssa_8 vec4 32 ssa_1058 = fne ssa_1055.wzyx, ssa_1055.wzyx vec4 32 ssa_1059 = bcsel ssa_1058, ssa_1056, ssa_1057 vec4 32 ssa_1060 = fne ssa_1056, ssa_1056 vec4 32 ssa_1061 = bcsel ssa_1060, ssa_1059, ssa_1056 vec4 32 ssa_1062 = fdot_replicated4 ssa_1061, ssa_9 vec1 32 ssa_1063 = iadd ssa_10, ssa_826.y vec1 32 ssa_1064 = ishl ssa_1063, ssa_37 vec1 32 ssa_1065 = iadd ssa_39, ssa_1064 vec4 32 ssa_1066 = intrinsic load_ubo (ssa_2, ssa_1065) () () vec4 32 ssa_1067 = fmul ssa_1066.wzyx, r14 vec4 32 ssa_1068 = bcsel ssa_73, ssa_1067, ssa_8 vec4 32 ssa_1069 = fne ssa_1066.wzyx, ssa_1066.wzyx vec4 32 ssa_1070 = bcsel ssa_1069, ssa_1067, ssa_1068 vec4 32 ssa_1071 = fne ssa_1067, ssa_1067 vec4 32 ssa_1072 = bcsel ssa_1071, ssa_1070, ssa_1067 vec4 32 ssa_1073 = fdot_replicated4 ssa_1072, ssa_9 r35.x = imov ssa_1062.x r35.y = imov ssa_1073.x r35.zw = imov r29.zw /* succs: block_143 */ } block block_143: /* preds: block_141 block_142 */ vec4 32 ssa_1078 = fadd ssa_44.zzzz, r35.zzzz r34.xyw = imov r35.xyw r34.z = imov ssa_1078.z /* succs: block_144 */ } block block_144: /* preds: block_133 block_143 */ vec1 32 ssa_1083 = load_const (0x00000050 /* 0.000000 */) vec1 32 ssa_1084 = intrinsic load_ubo (ssa_2, ssa_1083) () () vec1 32 ssa_1085 = ine ssa_1084, ssa_2 /* succs: block_145 block_146 */ if ssa_1085 { block block_145: /* preds: block_144 */ vec4 32 ssa_1086 = fadd ssa_44.yyyy, -r34.xxxx r34.x = imov r34.y r34.y = imov ssa_1086.y /* succs: block_147 */ } else { block block_146: /* preds: block_144 */ /* succs: block_147 */ } block block_147: /* preds: block_145 block_146 */ vec1 32 ssa_1089 = load_const (0x00000060 /* 0.000000 */) vec1 32 ssa_1090 = intrinsic load_ubo (ssa_2, ssa_1089) () () vec1 32 ssa_1091 = ine ssa_1090, ssa_2 /* succs: block_148 block_149 */ if ssa_1091 { block block_148: /* preds: block_147 */ vec4 32 ssa_1092 = fadd ssa_44.yyyy, -r34.yxxx r34.xy = imov ssa_1092.xy /* succs: block_150 */ } else { block block_149: /* preds: block_147 */ /* succs: block_150 */ } block block_150: /* preds: block_148 block_149 */ r1 = imov r34.xyyy r0 = imov ssa_15 break /* succs: block_162 */ } else { block block_151: /* preds: block_2 */ /* succs: block_152 */ } block block_152: /* preds: block_151 */ vec1 32 ssa_1096 = ieq ssa_6, r13 vec1 32 ssa_1097 = ior ssa_33, ssa_1096 /* succs: block_153 block_160 */ if ssa_1097 { block block_153: /* preds: block_152 */ vec1 32 ssa_1098 = iadd ssa_16, r12.x vec1 32 ssa_1099 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1100 = ishl ssa_1098, ssa_1099 vec1 32 ssa_1101 = load_const (0x00000140 /* 0.000000 */) vec1 32 ssa_1102 = iadd ssa_1101, ssa_1100 vec4 32 ssa_1103 = intrinsic load_ubo (ssa_2, ssa_1102) () () vec4 32 ssa_1104 = fmul ssa_1103.wzzz, r10.xyyy vec4 32 ssa_1105 = fne r10.xyyy, r10.xyyy vec4 32 ssa_1106 = bcsel ssa_1105, ssa_1104, ssa_8 vec4 32 ssa_1107 = fne ssa_1103.wzzz, ssa_1103.wzzz vec4 32 ssa_1108 = bcsel ssa_1107, ssa_1104, ssa_1106 vec4 32 ssa_1109 = fne ssa_1104, ssa_1104 vec4 32 ssa_1110 = bcsel ssa_1109, ssa_1108, ssa_1104 vec1 32 ssa_1111 = iadd ssa_14, r12.x vec1 32 ssa_1112 = ishl ssa_1111, ssa_1099 vec1 32 ssa_1113 = iadd ssa_1101, ssa_1112 vec4 32 ssa_1114 = intrinsic load_ubo (ssa_2, ssa_1113) () () vec4 32 ssa_1115 = fmul ssa_1110.xyyy, r9.xyyy vec4 32 ssa_1116 = fne r9.xyyy, r9.xyyy vec4 32 ssa_1117 = bcsel ssa_1116, ssa_1115, ssa_8 vec4 32 ssa_1118 = fne ssa_1110.xyyy, ssa_1110.xyyy vec4 32 ssa_1119 = bcsel ssa_1118, ssa_1115, ssa_1117 vec4 32 ssa_1120 = fne ssa_1115, ssa_1115 vec4 32 ssa_1121 = bcsel ssa_1120, ssa_1119, ssa_1115 vec4 32 ssa_1122 = fadd ssa_1121, r9.zwww vec4 32 ssa_1123 = intrinsic load_input (ssa_2) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_1124 = fmul ssa_1114.wwww, ssa_1123.wwww vec4 32 ssa_1125 = fne ssa_1123.wwww, ssa_1123.wwww vec4 32 ssa_1126 = bcsel ssa_1125, ssa_1124, ssa_8 vec4 32 ssa_1127 = fne ssa_1114.wwww, ssa_1114.wwww vec4 32 ssa_1128 = bcsel ssa_1127, ssa_1124, ssa_1126 vec4 32 ssa_1129 = fne ssa_1124, ssa_1124 vec4 32 ssa_1130 = bcsel ssa_1129, ssa_1128, ssa_1124 vec4 32 ssa_1131 = fadd ssa_1130, ssa_1114.wwww vec4 32 ssa_1132 = fadd ssa_1103.yxxx, ssa_1122.xyyy vec4 32 ssa_1133 = fmul r9.yyyy, ssa_1103.zzzz vec4 32 ssa_1134 = fne ssa_1103.zzzz, ssa_1103.zzzz vec4 32 ssa_1135 = bcsel ssa_1134, ssa_1133, ssa_8 vec4 32 ssa_1136 = fne r9.yyyy, r9.yyyy vec4 32 ssa_1137 = bcsel ssa_1136, ssa_1133, ssa_1135 vec4 32 ssa_1138 = fne ssa_1133, ssa_1133 vec4 32 ssa_1139 = bcsel ssa_1138, ssa_1137, ssa_1133 vec4 32 ssa_1140 = fadd ssa_1139, -r9.yyyy vec4 32 ssa_1141 = fadd ssa_1132.xxxx, ssa_1131.xxxx vec4 32 ssa_1142 = fadd ssa_1132.yyyy, ssa_1140.yyyy r14.x = imov ssa_1141.x r14.y = imov ssa_1142.y r14.zw = imov r10.zw vec1 32 ssa_1144 = iadd ssa_5, r12.x vec1 32 ssa_1145 = ishl ssa_1144, ssa_1099 vec1 32 ssa_1146 = iadd ssa_1101, ssa_1145 vec4 32 ssa_1147 = intrinsic load_ubo (ssa_2, ssa_1146) () () vec4 32 ssa_1148 = fmul ssa_1147.wzyx, r14 vec4 32 ssa_1149 = fne r14, r14 vec4 32 ssa_1150 = bcsel ssa_1149, ssa_1148, ssa_8 vec4 32 ssa_1151 = fne ssa_1147.wzyx, ssa_1147.wzyx vec4 32 ssa_1152 = bcsel ssa_1151, ssa_1148, ssa_1150 vec4 32 ssa_1153 = fne ssa_1148, ssa_1148 vec4 32 ssa_1154 = bcsel ssa_1153, ssa_1152, ssa_1148 vec4 32 ssa_1155 = fdot_replicated4 ssa_1154, ssa_9 vec1 32 ssa_1156 = iadd ssa_10, r12.x vec1 32 ssa_1157 = ishl ssa_1156, ssa_1099 vec1 32 ssa_1158 = iadd ssa_1101, ssa_1157 vec4 32 ssa_1159 = intrinsic load_ubo (ssa_2, ssa_1158) () () vec4 32 ssa_1160 = fmul ssa_1159.wzyx, r14 vec4 32 ssa_1161 = bcsel ssa_1149, ssa_1160, ssa_8 vec4 32 ssa_1162 = fne ssa_1159.wzyx, ssa_1159.wzyx vec4 32 ssa_1163 = bcsel ssa_1162, ssa_1160, ssa_1161 vec4 32 ssa_1164 = fne ssa_1160, ssa_1160 vec4 32 ssa_1165 = bcsel ssa_1164, ssa_1163, ssa_1160 vec4 32 ssa_1166 = fdot_replicated4 ssa_1165, ssa_9 vec1 32 ssa_1167 = iadd ssa_11, r12.x vec1 32 ssa_1168 = ishl ssa_1167, ssa_1099 vec1 32 ssa_1169 = iadd ssa_1101, ssa_1168 vec4 32 ssa_1170 = intrinsic load_ubo (ssa_2, ssa_1169) () () vec4 32 ssa_1171 = fmul ssa_1170.wzyx, r14 vec4 32 ssa_1172 = bcsel ssa_1149, ssa_1171, ssa_8 vec4 32 ssa_1173 = fne ssa_1170.wzyx, ssa_1170.wzyx vec4 32 ssa_1174 = bcsel ssa_1173, ssa_1171, ssa_1172 vec4 32 ssa_1175 = fne ssa_1171, ssa_1171 vec4 32 ssa_1176 = bcsel ssa_1175, ssa_1174, ssa_1171 vec4 32 ssa_1177 = fdot_replicated4 ssa_1176, ssa_9 r36.x = imov ssa_1155.x r36.y = imov ssa_1166.x r36.z = imov ssa_1177.x r36.w = imov r10.w vec1 32 ssa_1179 = load_const (0x00000180 /* 0.000000 */) vec4 32 ssa_1180 = intrinsic load_ubo (ssa_2, ssa_1179) () () vec4 32 ssa_1181 = fadd -ssa_1170.xxxx, ssa_1180.yyyy vec1 32 ssa_1182 = load_const (0x00000190 /* 0.000000 */) vec4 32 ssa_1183 = intrinsic load_ubo (ssa_2, ssa_1182) () () vec1 32 ssa_1184 = flt ssa_1183.x, ssa_1181.z vec1 32 ssa_1185 = fne ssa_1183.x, ssa_1180.w r91.x = imov ssa_1185 r91.y = imov ssa_1184.x vec2 32 ssa_1187 = inot r91 vec1 32 ssa_1188 = ball_iequal2 ssa_1187, ssa_12 /* succs: block_154 block_155 */ if ssa_1188 { block block_154: /* preds: block_153 */ vec1 32 ssa_1189 = frcp ssa_1181.z vec4 32 ssa_1190 = fadd ssa_1155.xxxx, ssa_1180.wwww vec4 32 ssa_1191 = fmul ssa_1180.zzzz, ssa_1189.xxxx vec4 32 ssa_1192 = fmov -ssa_1191 vec4 32 ssa_1193 = fne ssa_1189.xxxx, ssa_1189.xxxx vec4 32 ssa_1194 = bcsel ssa_1193, ssa_1192, ssa_8 vec4 32 ssa_1195 = fne -ssa_1180.zzzz, -ssa_1180.zzzz vec4 32 ssa_1196 = bcsel ssa_1195, ssa_1192, ssa_1194 vec4 32 ssa_1197 = fne -ssa_1191, -ssa_1191 vec4 32 ssa_1198 = bcsel ssa_1197, ssa_1196, ssa_1192 vec4 32 ssa_1199 = fadd ssa_1198, ssa_1190.xxxx r36.x = imov ssa_1199.x r36.y = imov ssa_1166.x r36.z = imov ssa_1177.x r36.w = imov r10.w /* succs: block_156 */ } else { block block_155: /* preds: block_153 */ /* succs: block_156 */ } block block_156: /* preds: block_154 block_155 */ vec4 32 ssa_1202 = intrinsic load_ubo (ssa_2, ssa_1101) () () vec4 32 ssa_1203 = fmul ssa_1202.wzyx, r36 vec4 32 ssa_1204 = fne r36, r36 vec4 32 ssa_1205 = bcsel ssa_1204, ssa_1203, ssa_8 vec4 32 ssa_1206 = fne ssa_1202.wzyx, ssa_1202.wzyx vec4 32 ssa_1207 = bcsel ssa_1206, ssa_1203, ssa_1205 vec4 32 ssa_1208 = fne ssa_1203, ssa_1203 vec4 32 ssa_1209 = bcsel ssa_1208, ssa_1207, ssa_1203 vec4 32 ssa_1210 = fdot_replicated4 ssa_1209, ssa_9 vec1 32 ssa_1211 = load_const (0x00000150 /* 0.000000 */) vec4 32 ssa_1212 = intrinsic load_ubo (ssa_2, ssa_1211) () () vec4 32 ssa_1213 = fmul ssa_1212.wzyx, r36 vec4 32 ssa_1214 = bcsel ssa_1204, ssa_1213, ssa_8 vec4 32 ssa_1215 = fne ssa_1212.wzyx, ssa_1212.wzyx vec4 32 ssa_1216 = bcsel ssa_1215, ssa_1213, ssa_1214 vec4 32 ssa_1217 = fne ssa_1213, ssa_1213 vec4 32 ssa_1218 = bcsel ssa_1217, ssa_1216, ssa_1213 vec4 32 ssa_1219 = fdot_replicated4 ssa_1218, ssa_9 vec1 32 ssa_1220 = load_const (0x00000160 /* 0.000000 */) vec4 32 ssa_1221 = intrinsic load_ubo (ssa_2, ssa_1220) () () vec4 32 ssa_1222 = fmul ssa_1221.wzyx, r36 vec4 32 ssa_1223 = bcsel ssa_1204, ssa_1222, ssa_8 vec4 32 ssa_1224 = fne ssa_1221.wzyx, ssa_1221.wzyx vec4 32 ssa_1225 = bcsel ssa_1224, ssa_1222, ssa_1223 vec4 32 ssa_1226 = fne ssa_1222, ssa_1222 vec4 32 ssa_1227 = bcsel ssa_1226, ssa_1225, ssa_1222 vec4 32 ssa_1228 = fdot_replicated4 ssa_1227, ssa_9 vec1 32 ssa_1229 = load_const (0x00000170 /* 0.000000 */) vec4 32 ssa_1230 = intrinsic load_ubo (ssa_2, ssa_1229) () () vec4 32 ssa_1231 = fmul ssa_1230.wzyx, r36 vec4 32 ssa_1232 = bcsel ssa_1204, ssa_1231, ssa_8 vec4 32 ssa_1233 = fne ssa_1230.wzyx, ssa_1230.wzyx vec4 32 ssa_1234 = bcsel ssa_1233, ssa_1231, ssa_1232 vec4 32 ssa_1235 = fne ssa_1231, ssa_1231 vec4 32 ssa_1236 = bcsel ssa_1235, ssa_1234, ssa_1231 vec4 32 ssa_1237 = fdot_replicated4 ssa_1236, ssa_9 r5.x = imov ssa_1210.x r5.y = imov ssa_1219.x r5.z = imov ssa_1228.x r5.w = imov ssa_1237.x vec1 32 ssa_1239 = intrinsic load_ubo (ssa_2, ssa_5) () () vec1 32 ssa_1240 = ine ssa_1239, ssa_2 /* succs: block_157 block_158 */ if ssa_1240 { block block_157: /* preds: block_156 */ vec4 32 ssa_1241 = fmax -ssa_1123.zwzw, ssa_1123.zwzw vec4 32 ssa_1242 = f2i32 r11.zxxx r15.xy = imov ssa_1242.xy r15.z = imov r12.z vec1 32 ssa_1244 = iadd ssa_5, ssa_1242.x vec1 32 ssa_1245 = ishl ssa_1244, ssa_1099 vec1 32 ssa_1246 = iadd ssa_1101, ssa_1245 vec4 32 ssa_1247 = intrinsic load_ubo (ssa_2, ssa_1246) () () vec4 32 ssa_1248 = fmul ssa_1247.yxxx, ssa_1241 vec4 32 ssa_1249 = fne ssa_1241, ssa_1241 vec4 32 ssa_1250 = bcsel ssa_1249, ssa_1248, ssa_8 vec4 32 ssa_1251 = fne ssa_1247.yxxx, ssa_1247.yxxx vec4 32 ssa_1252 = bcsel ssa_1251, ssa_1248, ssa_1250 vec4 32 ssa_1253 = fne ssa_1248, ssa_1248 vec4 32 ssa_1254 = bcsel ssa_1253, ssa_1252, ssa_1248 vec4 32 ssa_1255 = flt ssa_1241, ssa_1183.yyyy vec4 32 ssa_1256 = b2f ssa_1255 r37.xy = imov ssa_1241.xy r37.zw = imov ssa_1256.zw vec1 32 ssa_1258 = iadd ssa_17, ssa_1242.y vec1 32 ssa_1259 = ishl ssa_1258, ssa_1099 vec1 32 ssa_1260 = iadd ssa_1101, ssa_1259 vec4 32 ssa_1261 = intrinsic load_ubo (ssa_2, ssa_1260) () () r38 = imov ssa_1261.wzyx vec4 32 ssa_1263 = fmul ssa_1256.zwww, ssa_1247.wzzz vec4 32 ssa_1264 = fne ssa_1247.wzzz, ssa_1247.wzzz vec4 32 ssa_1265 = bcsel ssa_1264, ssa_1263, ssa_8 vec4 32 ssa_1266 = fne ssa_1256.zwww, ssa_1256.zwww vec4 32 ssa_1267 = bcsel ssa_1266, ssa_1263, ssa_1265 vec4 32 ssa_1268 = fne ssa_1263, ssa_1263 vec4 32 ssa_1269 = bcsel ssa_1268, ssa_1267, ssa_1263 vec4 32 ssa_1270 = fadd ssa_1269, ssa_1254.xyyy vec1 32 ssa_1271 = iadd ssa_18, ssa_1242.y vec1 32 ssa_1272 = ishl ssa_1271, ssa_1099 vec1 32 ssa_1273 = iadd ssa_1101, ssa_1272 vec4 32 ssa_1274 = intrinsic load_ubo (ssa_2, ssa_1273) () () r39 = imov ssa_1274.wzyx vec4 32 ssa_1276 = fadd ssa_1183.yyyy, -ssa_1270.yyyy r1.x = imov ssa_1270.x r1.y = imov ssa_1276.y r1.zw = imov r8.zw /* succs: block_159 */ } else { block block_158: /* preds: block_156 */ vec4 32 ssa_1278 = fmax -ssa_1123.zwzw, ssa_1123.zwzw vec4 32 ssa_1279 = f2i32 r11.zwww r15.xy = imov ssa_1279.xy r15.z = imov r12.z vec1 32 ssa_1281 = iadd ssa_5, ssa_1279.x vec1 32 ssa_1282 = ishl ssa_1281, ssa_1099 vec1 32 ssa_1283 = iadd ssa_1101, ssa_1282 vec4 32 ssa_1284 = intrinsic load_ubo (ssa_2, ssa_1283) () () vec4 32 ssa_1285 = fmul ssa_1284.yxxx, ssa_1278 vec4 32 ssa_1286 = fne ssa_1278, ssa_1278 vec4 32 ssa_1287 = bcsel ssa_1286, ssa_1285, ssa_8 vec4 32 ssa_1288 = fne ssa_1284.yxxx, ssa_1284.yxxx vec4 32 ssa_1289 = bcsel ssa_1288, ssa_1285, ssa_1287 vec4 32 ssa_1290 = fne ssa_1285, ssa_1285 vec4 32 ssa_1291 = bcsel ssa_1290, ssa_1289, ssa_1285 vec4 32 ssa_1292 = flt ssa_1278, ssa_1183.yyyy vec4 32 ssa_1293 = b2f ssa_1292 r37.xy = imov ssa_1278.xy r37.zw = imov ssa_1293.zw vec1 32 ssa_1295 = iadd ssa_5, ssa_1279.y vec1 32 ssa_1296 = ishl ssa_1295, ssa_1099 vec1 32 ssa_1297 = iadd ssa_1101, ssa_1296 vec4 32 ssa_1298 = intrinsic load_ubo (ssa_2, ssa_1297) () () r38 = imov ssa_1298.wzyx vec4 32 ssa_1300 = fmul ssa_1293.zwww, ssa_1284.wzzz vec4 32 ssa_1301 = fne ssa_1284.wzzz, ssa_1284.wzzz vec4 32 ssa_1302 = bcsel ssa_1301, ssa_1300, ssa_8 vec4 32 ssa_1303 = fne ssa_1293.zwww, ssa_1293.zwww vec4 32 ssa_1304 = bcsel ssa_1303, ssa_1300, ssa_1302 vec4 32 ssa_1305 = fne ssa_1300, ssa_1300 vec4 32 ssa_1306 = bcsel ssa_1305, ssa_1304, ssa_1300 vec4 32 ssa_1307 = fadd ssa_1306, ssa_1291.xyyy vec1 32 ssa_1308 = iadd ssa_10, ssa_1279.y vec1 32 ssa_1309 = ishl ssa_1308, ssa_1099 vec1 32 ssa_1310 = iadd ssa_1101, ssa_1309 vec4 32 ssa_1311 = intrinsic load_ubo (ssa_2, ssa_1310) () () r39 = imov ssa_1311.wzyx vec4 32 ssa_1313 = fadd ssa_1183.yyyy, -ssa_1307.yyyy r1.x = imov ssa_1307.x r1.y = imov ssa_1313.y r1.zw = imov r8.zw /* succs: block_159 */ } block block_159: /* preds: block_157 block_158 */ vec4 32 ssa_1320 = fadd r39, -r38 vec4 32 ssa_1321 = fmul ssa_1320, r37.yyyy vec4 32 ssa_1322 = fne r37.yyyy, r37.yyyy vec4 32 ssa_1323 = bcsel ssa_1322, ssa_1321, ssa_8 vec4 32 ssa_1324 = fne ssa_1320, ssa_1320 vec4 32 ssa_1325 = bcsel ssa_1324, ssa_1321, ssa_1323 vec4 32 ssa_1326 = fne ssa_1321, ssa_1321 vec4 32 ssa_1327 = bcsel ssa_1326, ssa_1325, ssa_1321 r4 = fadd ssa_1327, r38 r8 = imov r1 r3 = imov r8 r2 = imov r3 r0 = imov ssa_15 break /* succs: block_162 */ } else { block block_160: /* preds: block_152 */ /* succs: block_161 */ } block block_161: /* preds: block_160 */ r15 = imov r12 r14 = imov r10 r0 = imov ssa_15 break /* succs: block_162 */ } block block_162: /* preds: block_4 block_150 block_159 block_161 */ /* succs: block_163 block_164 */ if r0 { block block_163: /* preds: block_162 */ break /* succs: block_166 */ } else { block block_164: /* preds: block_162 */ /* succs: block_165 */ } block block_165: /* preds: block_164 */ r12 = imov r15 r10 = imov r14 /* succs: block_1 */ } block block_166: /* preds: block_163 */ intrinsic store_output (r5, ssa_2) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr0 */ intrinsic store_output (r4, ssa_2) () (32, 15, 0) /* base=32 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr1 */ intrinsic store_output (r3, ssa_2) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr2 */ intrinsic store_output (r2, ssa_2) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr3 */ intrinsic store_output (r1, ssa_2) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ /* vs_out_attr4 */ /* succs: block_0 */ block block_0: } VS Output VUE map (9 slots, SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_CLIP_DIST0 [3] VARYING_SLOT_CLIP_DIST1 [4] VARYING_SLOT_VAR0 [5] VARYING_SLOT_VAR1 [6] VARYING_SLOT_VAR2 [7] VARYING_SLOT_VAR3 [8] VARYING_SLOT_VAR4 Native code for unnamed vertex shader GLSL18: VS vec4 shader: 1473 instructions. 2 loops. 1394952 cycles. 0:0 spills:fills. Compacted 23568 to 22144 bytes (6%) START B0 (64 cycles) mov(8) g95<1>F [0F, 0F, 0F, 1F]VF { align16 1Q }; mov(8) g96<1>.xUD 0x00000000UD { align16 1Q compacted }; mov(8) g97<1>.xUD 0x0000006dUD { align16 1Q compacted }; mov(8) g98<1>.xUD 0x00000009UD { align16 1Q compacted }; mov(8) g99<1>.xUD 0x00000020UD { align16 1Q compacted }; mov(8) g100<1>.xUD 0x000000bfUD { align16 1Q compacted }; mov(8) g101<1>.xUD 0x0000001fUD { align16 1Q compacted }; mov(8) g102<1>UD 0x00000000UD { align16 1Q compacted }; mov(8) g103<1>D 1065353216D { align16 1Q }; mov(8) g104<1>.xUD 0x00000021UD { align16 1Q compacted }; mov(8) g105<1>.xUD 0x00000022UD { align16 1Q compacted }; mov(8) g106<1>.xyD 0D { align16 1Q }; mov(8) g107<1>.xyD -1D { align16 1Q }; mov(8) g108<1>.xUD 0x00000023UD { align16 1Q compacted }; mov(8) g109<1>.xUD 0xffffffffUD { align16 1Q compacted }; mov(8) g110<1>.xUD 0x00000024UD { align16 1Q compacted }; mov(8) g111<1>.xUD 0x00000025UD { align16 1Q compacted }; mov(8) g19<1>.xUD 0x00000026UD { align16 1Q compacted }; mov(8) g15<1>.xUD 0x0000006dUD { align16 1Q compacted }; mov(8) g14<1>.xyzD 0D { align16 1Q }; mov(8) g2<1>.xUD 0x00000000UD { align16 1Q compacted }; mov(8) g13<1>.xzwD g95<4>.xxzwD { align16 1Q }; mov(8) g12<1>D g95<4>D { align16 1Q }; mov(8) g11<1>D g95<4>D { align16 1Q }; mov(8) g10<1>.zwD g95<4>.zzzwD { align16 1Q }; mov(8) g9<1>D g95<4>D { align16 1Q }; mov(8) g8<1>.yzwD g95<4>.yyzwD { align16 1Q }; mov(8) g7<1>D g95<4>D { align16 1Q }; mov(8) g6<1>D g95<4>D { align16 1Q }; mov(8) g5<1>D g95<4>D { align16 1Q }; mov(8) g4<1>D g95<4>D { align16 1Q }; mov(8) g3<1>D g95<4>D { align16 1Q }; END B0 ->B1 START B3 <-B2 <-B127 (18 cycles) cmp.z.f0(8) g38<1>.xD g97<4>.xD g15<4>.xD { align16 1Q compacted }; END B2 ->B3 ->B128 (+f0.x) if(8) JIP: 2218 UIP: 2218 { align16 1Q }; END B3 ->B4 ->B119 START B4 <-B3 (174 cycles) mov(8) g39<1>.zwUD g1<4>.zzzwUD { align16 1Q }; mov(8) g40<1>.xD g1<4>.xF { align16 1Q compacted }; mov(8) g47<1>.xUD 0x00000004UD { align16 1Q compacted }; mov(8) g49<1>.xUD 0x00000140UD { align16 1Q compacted }; mov(8) g65<1>.xUD 0x00000190UD { align16 1Q compacted }; mov(8) g16<1>.xyD g1<4>.zwwwD { align16 1Q }; mov(8) g76<1>.xUD 0x00000000UD { align16 1Q compacted }; add(8) g41<1>.xD g98<4>.xD g40<4>.xD { align16 1Q compacted }; send(8) g64<1>F g65<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g75<1>F g76<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; shl(8) g48<1>.xD g41<4>.xD 0x00000004UD { align16 1Q }; mov(8) g63<1>UD g64<4>UD { align16 1Q }; mov(8) g16<1>.zwD g64<4>.xxxyD { align16 1Q }; cmp.nz.f0(8) null<1>.xD g75<4>.xD g96<4>.xD { align16 1Q switch }; add(8) g56<1>.xD g48<4>.xD 320D { align16 1Q compacted }; send(8) g55<1>F g56<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g53<1>UD g55<4>UD { align16 1Q }; mov(8) g13<1>.xzwD g55<4>.wwyxD { align16 1Q }; mov(8) g66<1>.xyD g55<4>.wzzzF { align16 1Q }; mov(8) g17<1>.xyD g66<4>.xyyyD { align16 NoDDClr 1Q }; add(8) g67<1>.xD g99<4>.xD g66<4>.yD { align16 1Q }; mov(8) g17<1>.zD g14<4>.zD { align16 NoDDChk 1Q }; shl(8) g68<1>.xD g67<4>.xD 0x00000004UD { align16 1Q }; add(8) g73<1>.xD g68<4>.xD 320D { align16 1Q compacted }; send(8) g72<1>F g73<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g70<1>UD g72<4>UD { align16 1Q }; mov(8) g11<1>D g72<4>.wzyxD { align16 1Q }; (+f0.x) if(8) JIP: 5 UIP: 5 { align16 1Q }; END B4 ->B5 ->B6 START B5 <-B4 (4 cycles) mov(8) g15<1>.xD g100<4>.xD { align16 1Q compacted }; break(8) JIP: 2 UIP: 2639 { align16 1Q }; END B5 ->B2 START B6 <-B4 (544 cycles) endif(8) JIP: 2165 { align16 1Q }; add(8) g78<1>.xD g101<4>.xD g66<4>.xD { align16 1Q compacted }; mul(8) g85<1>F g39<4>.zwwwF g70<4>.wzzzF { align16 1Q }; cmp.nz.f0(8) null<1>F g70<4>.wzzzF g70<4>.wzzzF { align16 1Q switch }; add(8) g93<1>.xD g99<4>.xD g66<4>.xD { align16 1Q compacted }; add(8) g48<1>.xD g104<4>.xD g66<4>.xD { align16 1Q compacted }; add(8) g64<1>.xD g105<4>.xD g66<4>.xD { align16 1Q compacted }; mov(8) g88<1>.xUD 0x00000000UD { align16 1Q compacted }; shl(8) g79<1>.xD g78<4>.xD g47<4>.xUD { align16 1Q }; (+f0) sel(8) g87<1>.xyUD g85<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; shl(8) g94<1>.xD g93<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g50<1>.xD g48<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g65<1>.xD g64<4>.xD g47<4>.xUD { align16 1Q }; add(8) g84<1>.xD g49<4>.xD g79<4>.xD { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g39<4>.zwwwF g39<4>.zwwwF { align16 1Q switch }; add(8) g20<1>.xD g49<4>.xD g94<4>.xD { align16 1Q compacted }; add(8) g55<1>.xD g49<4>.xD g50<4>.xD { align16 1Q compacted }; add(8) g71<1>.xD g49<4>.xD g65<4>.xD { align16 1Q compacted }; send(8) g83<1>F g84<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; (+f0) sel(8) g89<1>.xyUD g85<4>.xyyyUD g87<4>.xyyyUD { align16 1Q }; send(8) g18<1>F g20<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g54<1>F g55<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g69<1>F g71<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g81<1>UD g83<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g85<4>F g85<4>F { align16 1Q switch }; mov(8) g83<1>.xUD 0x00000180UD { align16 1Q compacted }; (+f0) sel(8) g91<1>.xyUD g89<4>.xyyyUD g85<4>.xyyyUD { align16 1Q }; send(8) g82<1>F g83<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; add(8) g92<1>.xyF g91<4>.xyyyF g70<4>.yxxxF { align16 1Q }; add(8) g84<1>.zF -g69<4>.xF g82<4>.yF { align16 1Q }; mov(8) g16<1>.xyD g92<4>.xyyyD { align16 NoDDClr 1Q }; mov(8) g16<1>.zwD g63<4>.xxxyD { align16 NoDDChk 1Q }; mul(8) g21<1>F g18<4>.wzyxF g16<4>F { align16 1Q }; cmp.nz.f0(8) g40<1>F g16<4>F g16<4>F { align16 1Q compacted }; mul(8) g56<1>F g54<4>.wzyxF g16<4>F { align16 1Q }; mul(8) g72<1>F g69<4>.wzyxF g16<4>F { align16 1Q }; (+f0) sel(8) g41<1>UD g21<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g18<4>.wzyxF g18<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g43<1>UD g21<4>UD g41<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g21<4>F g21<4>F { align16 1Q switch }; (+f0) sel(8) g45<1>UD g43<4>UD g21<4>UD { align16 1Q }; dp4(8) g46<1>.xF g45<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g40<4>D 0D { align16 1Q switch }; mov(8) g18<1>.xD g46<4>.xD { align16 1Q compacted }; (+f0) sel(8) g57<1>UD g56<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g54<4>.wzyxF g54<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g59<1>UD g56<4>UD g57<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g56<4>F g56<4>F { align16 1Q switch }; (+f0) sel(8) g61<1>UD g59<4>UD g56<4>UD { align16 1Q }; dp4(8) g62<1>.xF g61<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g40<4>D 0D { align16 1Q switch }; mov(8) g18<1>.yD g62<4>.xD { align16 1Q }; (+f0) sel(8) g73<1>UD g72<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g69<4>.wzyxF g69<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g75<1>UD g72<4>UD g73<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g72<4>F g72<4>F { align16 1Q switch }; (+f0) sel(8) g77<1>UD g75<4>UD g72<4>UD { align16 1Q }; dp4(8) g78<1>.xF g77<4>F g103<4>F { align16 1Q compacted }; cmp.l.f0(8) g42<1>.yF g63<4>.xF g84<4>.zF { align16 NoDDClr 1Q }; mov(8) g18<1>.zD g78<4>.xD { align16 NoDDClr 1Q }; cmp.nz.f0(8) g42<1>.xF g63<4>.xF g82<4>.wF { align16 NoDDChk 1Q }; mov(8) g18<1>.wD g63<4>.yD { align16 NoDDChk 1Q }; not(8) g87<1>.xyD g42<4>.xyyyD { align16 1Q }; cmp.z.f0(8) null<1>D g87<4>.xyyyD g106<4>.xyyyD { align16 1Q switch }; (+f0.all4h) mov(8) g88<1>.xD -1D { align16 1Q }; mov.nz.f0(8) null<1>.xD g88<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 27 UIP: 27 { align16 1Q }; END B6 ->B7 ->B8 START B7 <-B6 (164 cycles) math inv(8) g89<1>.xF g84<4>.zF null<4>F { align16 1Q }; add(8) g90<1>.xF g46<4>.xF g82<4>.wF { align16 1Q }; mul(8) g91<1>F g82<4>.zF g89<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g89<4>.xF g89<4>.xF { align16 1Q switch }; (+f0) sel(8) g94<1>.xF -g91<4>.xF g102<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F -g82<4>.zF -g82<4>.zF { align16 1Q switch }; (+f0) sel(8) g20<1>.xF -g91<4>.xF g94<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F -g91<4>F -g91<4>F { align16 1Q switch }; (+f0) sel(8) g22<1>.xF g20<4>.xF -g91<4>.xF { align16 1Q }; add(8) g18<1>.xF g22<4>.xF g90<4>.xF { align16 NoDDClr 1Q compacted }; mov(8) g18<1>.yD g62<4>.xD { align16 NoDDClr,NoDDChk 1Q }; mov(8) g18<1>.zD g78<4>.xD { align16 NoDDClr,NoDDChk 1Q }; mov(8) g18<1>.wD g63<4>.yD { align16 NoDDChk 1Q }; END B7 ->B8 START B8 <-B6 <-B7 (492 cycles) endif(8) JIP: 2021 { align16 1Q }; mov(8) g26<1>.xUD 0x00000140UD { align16 1Q compacted }; cmp.nz.f0(8) g28<1>F g18<4>F g18<4>F { align16 1Q compacted }; mov(8) g41<1>.xUD 0x00000150UD { align16 1Q compacted }; mov(8) g54<1>.xUD 0x00000160UD { align16 1Q compacted }; mov(8) g65<1>.xUD 0x00000170UD { align16 1Q compacted }; mov(8) g76<1>.xUD 0x00000000UD { align16 1Q compacted }; send(8) g25<1>F g26<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g37<1>F g41<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g52<1>F g54<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g64<1>F g65<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g27<1>F g25<4>.wzyxF g18<4>F { align16 1Q }; mul(8) g42<1>F g37<4>.wzyxF g18<4>F { align16 1Q }; mul(8) g55<1>F g52<4>.wzyxF g18<4>F { align16 1Q }; mul(8) g67<1>F g64<4>.wzyxF g18<4>F { align16 1Q }; (+f0) sel(8) g29<1>UD g27<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g25<4>.wzyxF g25<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g31<1>UD g27<4>UD g29<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g27<4>F g27<4>F { align16 1Q switch }; (+f0) sel(8) g33<1>UD g31<4>UD g27<4>UD { align16 1Q }; dp4(8) g7<1>.xF g33<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g28<4>D 0D { align16 1Q switch }; (+f0) sel(8) g43<1>UD g42<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g37<4>.wzyxF g37<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g45<1>UD g42<4>UD g43<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g42<4>F g42<4>F { align16 1Q switch }; (+f0) sel(8) g48<1>UD g45<4>UD g42<4>UD { align16 1Q }; dp4(8) g7<1>.yF g48<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g28<4>D 0D { align16 1Q switch }; (+f0) sel(8) g56<1>UD g55<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g52<4>.wzyxF g52<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g58<1>UD g55<4>UD g56<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g55<4>F g55<4>F { align16 1Q switch }; (+f0) sel(8) g60<1>UD g58<4>UD g55<4>UD { align16 1Q }; dp4(8) g7<1>.zF g60<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g28<4>D 0D { align16 1Q switch }; (+f0) sel(8) g68<1>UD g67<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g64<4>.wzyxF g64<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g71<1>UD g67<4>UD g68<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g67<4>F g67<4>F { align16 1Q switch }; (+f0) sel(8) g73<1>UD g71<4>UD g67<4>UD { align16 1Q }; dp4(8) g7<1>.wF g73<4>F g103<4>F { align16 1Q compacted }; cmp.ge.f0(8) g75<1>.xyF g63<4>.yF g53<4>.xF { align16 1Q }; cmp.z.f0(8) null<1>D g75<4>.xyyyD g107<4>.xyyyD { align16 1Q switch }; (+f0.all4h) mov(8) g76<1>.xD -1D { align16 1Q }; mov.nz.f0(8) null<1>.xD g76<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 7 UIP: 136 { align16 1Q }; END B8 ->B9 ->B10 START B9 <-B8 (20 cycles) mov(8) g6<1>.xyzD g63<4>.yD { align16 NoDDClr 1Q }; mov(8) g6<1>.wD g53<4>.xD { align16 NoDDChk 1Q compacted }; else(8) JIP: 131 { align16 1Q }; END B9 ->B13 START B10 <-B8 (104 cycles) mov(8) g77<1>.yD g53<4>.xF { align16 1Q }; mov(8) g17<1>.xD g66<4>.xD { align16 NoDDClr 1Q compacted }; rndd(8) g29<1>.xF g53<4>.zF { align16 1Q }; add(8) g78<1>.xD g99<4>.xD g77<4>.yD { align16 1Q }; add(8) g86<1>.xD g104<4>.xD g77<4>.yD { align16 1Q }; add(8) g94<1>.xD g105<4>.xD g77<4>.yD { align16 1Q }; add(8) g22<1>.xD g108<4>.xD g77<4>.yD { align16 1Q }; mov(8) g17<1>.zD g14<4>.zD { align16 NoDDChk 1Q }; add(8) g30<1>.xF g53<4>.zF -g29<4>.xF { align16 1Q }; shl(8) g79<1>.xD g78<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g87<1>.xD g86<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g95<1>.xD g94<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g23<1>.xD g22<4>.xD g47<4>.xUD { align16 1Q }; cmp.ge.f0(8) null<1>.xF g30<4>.xF g63<4>.wF { align16 1Q switch }; add(8) g85<1>.xD g49<4>.xD g79<4>.xD { align16 1Q compacted }; add(8) g93<1>.xD g49<4>.xD g87<4>.xD { align16 1Q compacted }; add(8) g21<1>.xD g49<4>.xD g95<4>.xD { align16 1Q compacted }; add(8) g28<1>.xD g49<4>.xD g23<4>.xD { align16 1Q compacted }; send(8) g84<1>F g85<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g91<1>F g93<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g20<1>F g21<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g27<1>F g28<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g82<1>UD g84<4>UD { align16 1Q }; mov(8) g89<1>UD g91<4>UD { align16 1Q }; mov(8) g18<1>UD g20<4>UD { align16 1Q }; mov(8) g25<1>UD g27<4>UD { align16 1Q }; mov(8) g20<1>.xyD g39<4>.zwwwD { align16 1Q }; (+f0.x) if(8) JIP: 24 UIP: 24 { align16 1Q }; END B10 ->B11 ->B12 START B11 <-B10 (132 cycles) math inv(8) g43<1>.zF g81<4>.wF null<4>F { align16 1Q }; add(8) g35<1>.xyF g92<4>.xyyyF -g81<4>.yxxxF { align16 1Q }; math inv(8) g43<1>.wF g81<4>.zF null<4>F { align16 1Q }; mul(8) g36<1>F g35<4>.xyyyF g43<4>.zwwwF { align16 1Q }; cmp.nz.f0(8) null<1>F g43<4>.zwwwF g43<4>.zwwwF { align16 1Q switch }; (+f0) sel(8) g41<1>.xyUD g36<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g35<4>.xyyyF g35<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g43<1>.xyUD g36<4>.xyyyUD g41<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g36<4>F g36<4>F { align16 1Q switch }; (+f0) sel(8) g45<1>.xyUD g43<4>.xyyyUD g36<4>.xyyyUD { align16 1Q }; mov(8) g20<1>.xyD g45<4>.xyyyD { align16 1Q }; END B11 ->B12 START B12 <-B10 <-B11 (344 cycles) endif(8) JIP: 56 { align16 1Q }; sel.ge(8) g46<1>.xyF -g20<4>.xyyyF g20<4>.xyyyF { align16 1Q }; add(8) g48<1>F g89<4>.wzyxF -g82<4>.wzyxF { align16 1Q }; add(8) g58<1>F g25<4>.wzyxF -g18<4>.wzyxF { align16 1Q }; cmp.nz.f0(8) g51<1>F g46<4>.xF g46<4>.xF { align16 1Q compacted }; mul(8) g50<1>F g48<4>F g46<4>.xF { align16 1Q }; mul(8) g59<1>F g58<4>F g46<4>.xF { align16 1Q }; (+f0) sel(8) g52<1>UD g50<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g48<4>F g48<4>F { align16 1Q switch }; (+f0) sel(8) g54<1>UD g50<4>UD g52<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g50<4>F g50<4>F { align16 1Q switch }; (+f0) sel(8) g56<1>UD g54<4>UD g50<4>UD { align16 1Q }; add(8) g57<1>F g56<4>F g82<4>.wzyxF { align16 1Q }; cmp.nz.f0(8) null<1>D g51<4>D 0D { align16 1Q switch }; (+f0) sel(8) g60<1>UD g59<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g58<4>F g58<4>F { align16 1Q switch }; (+f0) sel(8) g62<1>UD g59<4>UD g60<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g59<4>F g59<4>F { align16 1Q switch }; (+f0) sel(8) g64<1>UD g62<4>UD g59<4>UD { align16 1Q }; add(8) g65<1>F g64<4>F g18<4>.wzyxF { align16 1Q }; cmp.nz.f0(8) null<1>F g46<4>.yF g46<4>.yF { align16 1Q switch }; add(8) g66<1>F g65<4>F -g57<4>F { align16 1Q }; mul(8) g67<1>F g66<4>F g46<4>.yF { align16 1Q }; (+f0) sel(8) g69<1>UD g67<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g66<4>F g66<4>F { align16 1Q switch }; (+f0) sel(8) g71<1>UD g67<4>UD g69<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g67<4>F g67<4>F { align16 1Q switch }; (+f0) sel(8) g73<1>UD g71<4>UD g67<4>UD { align16 1Q }; add(8) g6<1>F g73<4>F g57<4>F { align16 1Q compacted }; END B12 ->B13 START B13 <-B12 <-B9 (264 cycles) endif(8) JIP: 1803 { align16 1Q }; mul(8) g74<1>F g63<4>.zyzyF g53<4>.yF { align16 1Q }; cmp.nz.f0(8) null<1>F g53<4>.yF g53<4>.yF { align16 1Q switch }; mov(8) g18<1>.xUD 0x000001a0UD { align16 1Q compacted }; mov(8) g20<1>.yD g53<4>.yF { align16 1Q }; rndd(8) g45<1>.xF g53<4>.wF { align16 1Q }; mov(8) g22<1>.yzwD g8<4>.yyzwD { align16 1Q }; (+f0) sel(8) g76<1>UD g74<4>UD g102<4>UD { align16 1Q }; send(8) g95<1>F g18<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; add(8) g23<1>.xF g53<4>.wF -g45<4>.xF { align16 1Q }; cmp.nz.f0(8) g80<1>F g63<4>.zyzyF g63<4>.zyzyF { align16 1Q }; mov(8) g94<1>.zwUD g95<4>.zzzwUD { align16 1Q }; mov(8) g21<1>.zwD g95<4>.yyyxD { align16 1Q }; (+f0) sel(8) g81<1>UD g74<4>UD g76<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g74<4>F g74<4>F { align16 1Q switch }; (+f0) sel(8) g83<1>UD g81<4>UD g74<4>UD { align16 1Q }; rndd(8) g84<1>.xyF g83<4>.xyyyF { align16 1Q }; add(8) g44<1>.xyF g83<4>.zwwwF -g84<4>.xyyyF { align16 NoDDClr 1Q }; mov(8) g44<1>.zwD g83<4>.zzzwD { align16 NoDDChk 1Q }; mul(8) g86<1>F g63<4>.zF g44<4>F { align16 1Q }; cmp.nz.f0(8) null<1>F g44<4>F g44<4>F { align16 1Q switch }; (+f0) sel(8) g88<1>.xyUD g86<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) g89<1>F g63<4>.zF g63<4>.zF { align16 1Q }; (+f0) sel(8) g90<1>.xyUD g86<4>.xyyyUD g88<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g86<4>F g86<4>F { align16 1Q switch }; (+f0) sel(8) g92<1>.xyUD g90<4>.xyyyUD g86<4>.xyyyUD { align16 1Q }; cmp.ge.f0(8) null<1>.xF g23<4>.xF g63<4>.wF { align16 1Q switch }; (+f0.x) if(8) JIP: 253 UIP: 253 { align16 1Q }; END B13 ->B14 ->B29 START B14 <-B13 (540 cycles) add(8) g26<1>.xyF g63<4>.xyyyF g39<4>.zwwwF { align16 1Q }; mul(8) g27<1>F g63<4>.zF g53<4>.wF { align16 1Q }; cmp.nz.f0(8) null<1>F g53<4>.wF g53<4>.wF { align16 1Q switch }; mov(8) g23<1>.xyD g26<4>.xyyyD { align16 1Q }; (+f0) sel(8) g29<1>.xyUD g27<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; mov(8) g23<1>.zwD g9<4>.zzzwD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g30<1>.xyUD g27<4>.xyyyUD g29<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g27<4>F g27<4>F { align16 1Q switch }; (+f0) sel(8) g32<1>.xyUD g30<4>.xyyyUD g27<4>.xyyyUD { align16 1Q }; mov(8) g46<1>.xyD g32<4>.xyyyD { align16 1Q }; rndd(8) g33<1>.yF g32<4>.yF { align16 1Q }; add(8) g34<1>.xF g32<4>.xF -g33<4>.yF { align16 1Q }; cmp.ge.f0(8) g35<1>.xF g34<4>.xF g63<4>.wF { align16 1Q }; mul(8) g50<1>F g63<4>.zF g34<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>D g35<4>.xD 0D { align16 1Q switch }; (+f0) sel(8) g36<1>UD g95<4>.zwyxUD g95<4>.wzyxUD { align16 1Q }; mul(8) g37<1>F g36<4>F g70<4>.wzyxF { align16 1Q }; cmp.nz.f0(8) null<1>F g70<4>.wzyxF g70<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g41<1>.xyUD g37<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g36<4>F g36<4>F { align16 1Q switch }; (+f0) sel(8) g43<1>.xyUD g37<4>.xyyyUD g41<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g37<4>F g37<4>F { align16 1Q switch }; (+f0) sel(8) g48<1>.xyUD g43<4>.xyyyUD g37<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g34<4>.xF g34<4>.xF { align16 1Q switch }; (+f0) sel(8) g52<1>.xUD g50<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g54<1>.xUD g50<4>.xUD g52<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g50<4>F g50<4>F { align16 1Q switch }; (+f0) sel(8) g56<1>.xUD g54<4>.xUD g50<4>.xUD { align16 1Q }; mul(8) g57<1>F g63<4>.zyzyF g56<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g56<4>.xF g56<4>.xF { align16 1Q switch }; (+f0) sel(8) g59<1>UD g57<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; (+f0) sel(8) g60<1>UD g57<4>UD g59<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g57<4>F g57<4>F { align16 1Q switch }; (+f0) sel(8) g62<1>UD g60<4>UD g57<4>UD { align16 1Q }; rndd(8) g64<1>.zwF g62<4>.zzzwF { align16 1Q }; add(8) g65<1>.xyF g62<4>.xyyyF -g64<4>.zwwwF { align16 1Q }; cmp.ge.f0(8) g66<1>.xyF g65<4>.xyyyF g63<4>.wF { align16 1Q }; mov.nz.f0(8) null<1>.xD g66<4>.yD { align16 1Q }; (+f0.x) if(8) JIP: 20 UIP: 20 { align16 1Q }; END B14 ->B15 ->B16 START B15 <-B14 (132 cycles) mul(8) g68<1>F g26<4>.xF g48<4>.xF { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g48<4>.xF g48<4>.xF { align16 1Q switch }; (+f0) sel(8) g71<1>.xUD g68<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g26<4>.xF g26<4>.xF { align16 1Q switch }; (+f0) sel(8) g73<1>.xUD g68<4>.xUD g71<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g68<4>F g68<4>F { align16 1Q switch }; (+f0) sel(8) g75<1>.xUD g73<4>.xUD g68<4>.xUD { align16 1Q }; mov(8) g23<1>.xD g75<4>.xD { align16 NoDDClr 1Q compacted }; mov(8) g23<1>.yD g26<4>.yD { align16 NoDDClr,NoDDChk 1Q }; mov(8) g23<1>.zwD g9<4>.zzzwD { align16 NoDDChk 1Q }; END B15 ->B16 START B16 <-B14 <-B15 (20 cycles) endif(8) JIP: 149 { align16 1Q }; mov.nz.f0(8) null<1>.xD g66<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 20 UIP: 20 { align16 1Q }; END B16 ->B17 ->B18 START B17 <-B16 (116 cycles) mul(8) g77<1>F g23<4>.yF g48<4>.yF { align16 1Q }; cmp.nz.f0(8) null<1>F g48<4>.yF g48<4>.yF { align16 1Q switch }; (+f0) sel(8) g79<1>.yUD g77<4>.yUD g102<4>.yUD { align16 1Q }; cmp.nz.f0(8) null<1>F g23<4>.yF g23<4>.yF { align16 1Q switch }; (+f0) sel(8) g81<1>.yUD g77<4>.yUD g79<4>.yUD { align16 1Q }; cmp.nz.f0(8) null<1>F g77<4>F g77<4>F { align16 1Q switch }; (+f0) sel(8) g83<1>.yUD g81<4>.yUD g77<4>.yUD { align16 1Q }; add(8) g84<1>.yF g83<4>.yF g63<4>.yF { align16 1Q }; add(8) g23<1>.yF g84<4>.yF -g48<4>.yF { align16 1Q }; END B17 ->B18 START B18 <-B16 <-B17 (268 cycles) endif(8) JIP: 125 { align16 1Q }; add(8) g86<1>.xyF g63<4>.yF -g48<4>.xyyyF { align16 1Q }; mov(8) g21<1>.zwD g36<4>.zzzwD { align16 1Q }; mul(8) g87<1>F g63<4>.zF g65<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g65<4>.xF g65<4>.xF { align16 1Q switch }; (+f0) sel(8) g90<1>.xUD g87<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g91<1>.xUD g87<4>.xUD g90<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g87<4>F g87<4>F { align16 1Q switch }; (+f0) sel(8) g93<1>.xUD g91<4>.xUD g87<4>.xUD { align16 1Q }; mul(8) g95<1>F g63<4>.zyzyF g93<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g93<4>.xF g93<4>.xF { align16 1Q switch }; (+f0) sel(8) g18<1>UD g95<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; (+f0) sel(8) g24<1>UD g95<4>UD g18<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g95<4>F g95<4>F { align16 1Q switch }; (+f0) sel(8) g26<1>UD g24<4>UD g95<4>UD { align16 1Q }; rndd(8) g27<1>.zwF g26<4>.zzzwF { align16 1Q }; add(8) g28<1>.xyF g26<4>.xyyyF -g27<4>.zwwwF { align16 1Q }; cmp.ge.f0(8) g29<1>.xyF g28<4>.xyyyF g63<4>.wF { align16 1Q }; mov.nz.f0(8) null<1>.xD g29<4>.yD { align16 1Q }; (+f0.x) if(8) JIP: 3 UIP: 3 { align16 1Q }; END B18 ->B19 ->B20 START B19 <-B18 (2 cycles) add(8) g23<1>.xF g23<4>.xF g86<4>.xF { align16 1Q compacted }; END B19 ->B20 START B20 <-B18 <-B19 (20 cycles) endif(8) JIP: 80 { align16 1Q }; mov.nz.f0(8) null<1>.xD g29<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B20 ->B21 ->B22 START B21 <-B20 (2 cycles) add(8) g23<1>.yF g23<4>.yF -g86<4>.yF { align16 1Q }; END B21 ->B22 START B22 <-B20 <-B21 (264 cycles) endif(8) JIP: 72 { align16 1Q }; mul(8) g34<1>F g63<4>.zF g28<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g28<4>.xF g28<4>.xF { align16 1Q switch }; (+f0) sel(8) g36<1>.xUD g34<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g37<1>.xUD g34<4>.xUD g36<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g34<4>F g34<4>F { align16 1Q switch }; (+f0) sel(8) g41<1>.xUD g37<4>.xUD g34<4>.xUD { align16 1Q }; mul(8) g42<1>F g63<4>.zyzyF g41<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g41<4>.xF g41<4>.xF { align16 1Q switch }; (+f0) sel(8) g44<1>UD g42<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; (+f0) sel(8) g48<1>UD g42<4>UD g44<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g42<4>F g42<4>F { align16 1Q switch }; (+f0) sel(8) g50<1>UD g48<4>UD g42<4>UD { align16 1Q }; rndd(8) g51<1>.zwF g50<4>.zzzwF { align16 1Q }; add(8) g52<1>.xyF g50<4>.xyyyF -g51<4>.zwwwF { align16 1Q }; cmp.ge.f0(8) g54<1>.xyF g52<4>.xyyyF g63<4>.wF { align16 1Q }; mov.nz.f0(8) null<1>.xD g54<4>.yD { align16 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B22 ->B23 ->B24 START B23 <-B22 (2 cycles) add(8) g23<1>.xF g63<4>.yF -g23<4>.xF { align16 1Q }; END B23 ->B24 START B24 <-B22 <-B23 (20 cycles) endif(8) JIP: 30 { align16 1Q }; mov.nz.f0(8) null<1>.xD g54<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B24 ->B25 ->B26 START B25 <-B24 (2 cycles) add(8) g23<1>.yF g63<4>.yF -g23<4>.yF { align16 1Q }; END B25 ->B26 START B26 <-B24 <-B25 (52 cycles) endif(8) JIP: 22 { align16 1Q }; rndd(8) g59<1>.yF g46<4>.yF { align16 1Q }; add(8) g60<1>.xF g46<4>.xF -g59<4>.yF { align16 1Q }; mov(8) g22<1>.yD g59<4>.yD { align16 NoDDClr 1Q }; cmp.ge.f0(8) null<1>.xF g60<4>.xF g63<4>.wF { align16 1Q switch }; mov(8) g22<1>.zwD g51<4>.zzzwD { align16 NoDDChk 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B26 ->B27 ->B28 START B27 <-B26 (2 cycles) add(8) g23<1>.xyF g63<4>.yF -g23<4>.yxxxF { align16 1Q }; END B27 ->B28 START B28 <-B26 <-B27 (20 cycles) endif(8) JIP: 6 { align16 1Q }; add(8) g9<1>.yF g63<4>.yF -g23<4>.yF { align16 NoDDClr 1Q }; mov(8) g9<1>.xzwD g23<4>.xxzwD { align16 NoDDChk 1Q }; END B28 ->B29 START B29 <-B13 <-B28 (20 cycles) endif(8) JIP: 1497 { align16 1Q }; cmp.l.f0(8) null<1>.xF g92<4>.yF g63<4>.yF { align16 1Q switch }; (+f0.x) if(8) JIP: 78 UIP: 264 { align16 1Q }; END B29 ->B30 ->B34 START B30 <-B29 (72 cycles) add(8) g66<1>.xF g53<4>.wF -g45<4>.xF { align16 1Q }; add(8) g67<1>.xD g99<4>.xD g20<4>.yD { align16 1Q }; cmp.ge.f0(8) null<1>.xF g66<4>.xF g63<4>.wF { align16 1Q switch }; shl(8) g68<1>.xD g67<4>.xD g47<4>.xUD { align16 1Q }; add(8) g74<1>.xD g49<4>.xD g68<4>.xD { align16 1Q compacted }; send(8) g73<1>F g74<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g71<1>UD g73<4>UD { align16 1Q }; mov(8) g22<1>.yzwD g73<4>.zzyxD { align16 1Q }; (+f0.x) if(8) JIP: 37 UIP: 53 { align16 1Q }; END B30 ->B31 ->B32 START B31 <-B30 (230 cycles) mul(8) g77<1>F g9<4>.xyyyF g73<4>.wzzzF { align16 1Q }; cmp.nz.f0(8) null<1>F g73<4>.wzzzF g73<4>.wzzzF { align16 1Q switch }; (+f0) sel(8) g79<1>.xyUD g77<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g9<4>.xyyyF g9<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g81<1>.xyUD g77<4>.xyyyUD g79<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g77<4>F g77<4>F { align16 1Q switch }; (+f0) sel(8) g83<1>.xyUD g81<4>.xyyyUD g77<4>.xyyyUD { align16 1Q }; add(8) g84<1>.xyF g83<4>.xyyyF g73<4>.yxxxF { align16 1Q }; cmp.nz.f0(8) null<1>F g21<4>.zwwwF g21<4>.zwwwF { align16 1Q switch }; mul(8) g85<1>F g84<4>.xyyyF g21<4>.zwwwF { align16 1Q }; (+f0) sel(8) g87<1>.xyUD g85<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g84<4>.xyyyF g84<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g90<1>.xyUD g85<4>.xyyyUD g87<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g85<4>F g85<4>F { align16 1Q switch }; (+f0) sel(8) g93<1>.xyUD g90<4>.xyyyUD g85<4>.xyyyUD { align16 1Q }; add(8) g24<1>.yF g63<4>.yF -g93<4>.yF { align16 NoDDClr 1Q }; mov(8) g24<1>.xD g93<4>.xD { align16 NoDDChk 1Q compacted }; else(8) JIP: 18 { align16 1Q }; END B31 ->B33 START B32 <-B30 (84 cycles) cmp.nz.f0(8) g95<1>.xyF g63<4>.xF g39<4>.zwwwF { align16 1Q }; cmp.nz.f0(8) null<1>D g95<4>.xD 0D { align16 1Q switch }; (+f0) sel(8) g18<1>.xUD g71<4>.yUD g71<4>.wUD { align16 1Q }; mov(8) g51<1>.xD g18<4>.xD { align16 NoDDClr 1Q compacted }; mov(8) g52<1>.xD g18<4>.xD { align16 NoDDClr 1Q compacted }; cmp.nz.f0(8) null<1>D g95<4>.yD 0D { align16 1Q switch }; mov(8) g51<1>.yD g71<4>.zD { align16 NoDDChk 1Q }; mov(8) g52<1>.yD g71<4>.xD { align16 NoDDChk 1Q }; (+f0) sel(8) g24<1>.xyUD g52<4>.xyyyUD g51<4>.xyyyUD { align16 1Q }; END B32 ->B33 START B33 <-B32 <-B31 (22 cycles) endif(8) JIP: 6 { align16 1Q }; add(8) g25<1>.zF g63<4>.yF g53<4>.yF { align16 NoDDClr 1Q }; mov(8) g25<1>.xyD g24<4>.xyyyD { align16 NoDDChk 1Q }; else(8) JIP: 188 { align16 1Q }; END B33 ->B44 START B34 <-B29 (18 cycles) cmp.l.f0(8) null<1>.xF g92<4>.xF g63<4>.yF { align16 1Q switch }; (+f0.x) if(8) JIP: 132 UIP: 178 { align16 1Q }; END B34 ->B35 ->B42 START B35 <-B34 (34 cycles) add(8) g23<1>.xF g53<4>.wF -g45<4>.xF { align16 1Q }; cmp.ge.f0(8) null<1>.xF g23<4>.xF g63<4>.wF { align16 1Q switch }; (+f0.x) if(8) JIP: 68 UIP: 122 { align16 1Q }; END B35 ->B36 ->B37 START B36 <-B35 (408 cycles) mov(8) g9<1>.zwD g63<4>.xxxyD { align16 1Q }; add(8) g26<1>.xD g99<4>.xD g20<4>.yD { align16 1Q }; add(8) g43<1>.xD g104<4>.xD g20<4>.yD { align16 1Q }; cmp.nz.f0(8) g34<1>F g9<4>F g9<4>F { align16 1Q compacted }; shl(8) g27<1>.xD g26<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g44<1>.xD g43<4>.xD g47<4>.xUD { align16 1Q }; add(8) g32<1>.xD g49<4>.xD g27<4>.xD { align16 1Q compacted }; add(8) g50<1>.xD g49<4>.xD g44<4>.xD { align16 1Q compacted }; send(8) g31<1>F g32<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g48<1>F g50<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g33<1>F g31<4>.wzyxF g9<4>F { align16 1Q }; mul(8) g51<1>F g48<4>.wzyxF g9<4>F { align16 1Q }; (+f0) sel(8) g35<1>UD g33<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g31<4>.wzyxF g31<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g37<1>UD g33<4>UD g35<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g33<4>F g33<4>F { align16 1Q switch }; (+f0) sel(8) g41<1>UD g37<4>UD g33<4>UD { align16 1Q }; dp4(8) g54<1>.xF g41<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g34<4>D 0D { align16 1Q switch }; (+f0) sel(8) g52<1>UD g51<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g48<4>.wzyxF g48<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g55<1>UD g51<4>UD g52<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g51<4>F g51<4>F { align16 1Q switch }; (+f0) sel(8) g57<1>UD g55<4>UD g51<4>UD { align16 1Q }; dp4(8) g54<1>.yF g57<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g21<4>.zwwwF g21<4>.zwwwF { align16 1Q switch }; mul(8) g59<1>F g54<4>.xyyyF g21<4>.zwwwF { align16 1Q }; (+f0) sel(8) g61<1>.xyUD g59<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g54<4>.xyyyF g54<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g64<1>.xyUD g59<4>.xyyyUD g61<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g59<4>F g59<4>F { align16 1Q switch }; (+f0) sel(8) g66<1>.xyUD g64<4>.xyyyUD g59<4>.xyyyUD { align16 1Q }; add(8) g26<1>.yF g63<4>.yF -g66<4>.yF { align16 NoDDClr 1Q }; mov(8) g26<1>.xD g66<4>.xD { align16 NoDDClr,NoDDChk 1Q compacted }; mov(8) g26<1>.zD g53<4>.yD { align16 NoDDChk 1Q }; else(8) JIP: 56 { align16 1Q }; END B36 ->B41 START B37 <-B35 (72 cycles) add(8) g68<1>.xD g99<4>.xD g20<4>.yD { align16 1Q }; add(8) g75<1>.xD g104<4>.xD g20<4>.yD { align16 1Q }; cmp.nz.f0(8) g83<1>.xF g63<4>.xF g39<4>.zF { align16 1Q }; cmp.z.f0(8) null<1>.xF g63<4>.xF g39<4>.wF { align16 1Q switch }; shl(8) g69<1>.xD g68<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g76<1>.xD g75<4>.xD g47<4>.xUD { align16 1Q }; add(8) g74<1>.xD g49<4>.xD g69<4>.xD { align16 1Q compacted }; add(8) g82<1>.xD g49<4>.xD g76<4>.xD { align16 1Q compacted }; send(8) g73<1>F g74<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g81<1>F g82<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g71<1>.xyUD g73<4>.xyyyUD { align16 1Q }; mov(8) g78<1>.zwUD g81<4>.zzzwUD { align16 1Q }; mov(8) g22<1>.yzwD g81<4>.zzyxD { align16 1Q }; (+f0.x) if(8) JIP: 16 UIP: 28 { align16 1Q }; END B37 ->B38 ->B39 START B38 <-B37 (38 cycles) mov(8) g55<1>.xyD g73<4>.wzzzD { align16 NoDDClr 1Q }; mov(8) g56<1>.xyD g81<4>.yxxxD { align16 NoDDClr 1Q }; cmp.nz.f0(8) null<1>D g83<4>.xD 0D { align16 1Q switch }; mov(8) g55<1>.zD g53<4>.yD { align16 NoDDChk 1Q }; mov(8) g56<1>.zD g53<4>.yD { align16 NoDDChk 1Q }; (+f0) sel(8) g26<1>.xyzUD g56<4>.xyzzUD g55<4>.xyzzUD { align16 1Q }; else(8) JIP: 14 { align16 1Q }; END B38 ->B40 START B39 <-B37 (36 cycles) mov(8) g57<1>.xyD g78<4>.wzzzD { align16 NoDDClr 1Q }; mov(8) g58<1>.xyD g71<4>.yxxxD { align16 NoDDClr 1Q }; cmp.nz.f0(8) null<1>D g83<4>.xD 0D { align16 1Q switch }; mov(8) g57<1>.zD g53<4>.yD { align16 NoDDChk 1Q }; mov(8) g58<1>.zD g53<4>.yD { align16 NoDDChk 1Q }; (+f0) sel(8) g26<1>.xyzUD g58<4>.xyzzUD g57<4>.xyzzUD { align16 1Q }; END B39 ->B40 START B40 <-B39 <-B38 (2 cycles) endif(8) JIP: 2 { align16 1Q }; END B40 ->B41 START B41 <-B40 <-B36 (4 cycles) endif(8) JIP: 2 { align16 1Q }; else(8) JIP: 48 { align16 1Q }; END B41 ->B43 START B42 <-B34 (276 cycles) add(8) g85<1>.xD g99<4>.xD g20<4>.yD { align16 1Q }; cmp.nz.f0(8) null<1>D g40<4>D 0D { align16 1Q switch }; add(8) g23<1>.xD g104<4>.xD g20<4>.yD { align16 1Q }; shl(8) g86<1>.xD g85<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g24<1>.xD g23<4>.xD g47<4>.xUD { align16 1Q }; add(8) g91<1>.xD g49<4>.xD g86<4>.xD { align16 1Q compacted }; add(8) g29<1>.xD g49<4>.xD g24<4>.xD { align16 1Q compacted }; send(8) g90<1>F g91<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g28<1>F g29<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g93<1>F g90<4>.wzyxF g16<4>F { align16 1Q }; mul(8) g30<1>F g28<4>.wzyxF g16<4>F { align16 1Q }; (+f0) sel(8) g95<1>UD g93<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g90<4>.wzyxF g90<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g18<1>UD g93<4>UD g95<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g93<4>F g93<4>F { align16 1Q switch }; (+f0) sel(8) g21<1>UD g18<4>UD g93<4>UD { align16 1Q }; dp4(8) g26<1>.xF g21<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g40<4>D 0D { align16 1Q switch }; (+f0) sel(8) g31<1>UD g30<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g28<4>.wzyxF g28<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g33<1>UD g30<4>UD g31<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g30<4>F g30<4>F { align16 1Q switch }; (+f0) sel(8) g35<1>UD g33<4>UD g30<4>UD { align16 1Q }; dp4(8) g26<1>.yF g35<4>F g103<4>F { align16 NoDDClr 1Q compacted }; mov(8) g26<1>.zD g53<4>.yD { align16 NoDDChk 1Q }; END B42 ->B43 START B43 <-B42 <-B41 (20 cycles) endif(8) JIP: 6 { align16 1Q }; add(8) g25<1>.zF g63<4>.zF g26<4>.zF { align16 NoDDClr 1Q }; mov(8) g25<1>.xyD g26<4>.xyyyD { align16 NoDDChk 1Q }; END B43 ->B44 START B44 <-B43 <-B33 (52 cycles) endif(8) JIP: 1229 { align16 1Q }; mov(8) g42<1>.xUD 0x00000010UD { align16 1Q compacted }; send(8) g41<1>F g42<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; cmp.nz.f0(8) null<1>.xD g41<4>.xD g96<4>.xD { align16 1Q switch }; (+f0.x) if(8) JIP: 8 UIP: 8 { align16 1Q }; END B44 ->B45 ->B46 START B45 <-B44 (34 cycles) add(8) g44<1>.yF g63<4>.yF -g25<4>.xF { align16 1Q }; mov(8) g25<1>.xD g25<4>.yD { align16 NoDDClr 1Q }; mov(8) g25<1>.yD g44<4>.yD { align16 NoDDChk 1Q }; END B45 ->B46 START B46 <-B44 <-B45 (52 cycles) endif(8) JIP: 1214 { align16 1Q }; mov(8) g48<1>.xUD 0x00000020UD { align16 1Q compacted }; send(8) g46<1>F g48<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; cmp.nz.f0(8) null<1>.xD g46<4>.xD g96<4>.xD { align16 1Q switch }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B46 ->B47 ->B48 START B47 <-B46 (2 cycles) add(8) g25<1>.xyF g63<4>.yF -g25<4>.yxxxF { align16 1Q }; END B47 ->B48 START B48 <-B46 <-B47 (268 cycles) endif(8) JIP: 1203 { align16 1Q }; mov(8) g5<1>D g25<4>.xyyyD { align16 1Q }; mul(8) g51<1>F g63<4>.zyzyF g92<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g92<4>.xF g92<4>.xF { align16 1Q switch }; mov(8) g81<1>.xUD 0x000001b0UD { align16 1Q compacted }; mov(8) g82<1>.yD g25<4>.zF { align16 1Q }; add(8) g83<1>.xF g53<4>.wF -g45<4>.xF { align16 1Q }; mov(8) g28<1>.yzwD g22<4>.yyzwD { align16 1Q }; (+f0) sel(8) g54<1>UD g51<4>UD g102<4>UD { align16 1Q }; send(8) g79<1>F g81<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; mov(8) g27<1>.zwD g79<4>.yyyxD { align16 1Q }; (+f0) sel(8) g55<1>UD g51<4>UD g54<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g51<4>F g51<4>F { align16 1Q switch }; (+f0) sel(8) g57<1>UD g55<4>UD g51<4>UD { align16 1Q }; rndd(8) g58<1>.xyF g57<4>.xyyyF { align16 1Q }; add(8) g59<1>.xyF g57<4>.zwwwF -g58<4>.xyyyF { align16 NoDDClr 1Q }; mov(8) g59<1>.zwD g57<4>.zzzwD { align16 NoDDChk 1Q }; mul(8) g60<1>F g63<4>.zF g59<4>F { align16 1Q }; cmp.nz.f0(8) null<1>F g59<4>F g59<4>F { align16 1Q switch }; (+f0) sel(8) g62<1>.xyUD g60<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g64<1>.xyUD g60<4>.xyyyUD g62<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g60<4>F g60<4>F { align16 1Q switch }; (+f0) sel(8) g76<1>.xyUD g64<4>.xyyyUD g60<4>.xyyyUD { align16 1Q }; mov(8) g60<1>.xD g45<4>.xD { align16 1Q compacted }; cmp.ge.f0(8) null<1>.xF g83<4>.xF g63<4>.wF { align16 1Q switch }; (+f0.x) if(8) JIP: 257 UIP: 257 { align16 1Q }; END B48 ->B49 ->B64 START B49 <-B48 (542 cycles) add(8) g86<1>.xyF g63<4>.xyyyF g39<4>.zwwwF { align16 1Q }; mov(8) g61<1>.xyD g94<4>.wzzzD { align16 NoDDClr 1Q }; mul(8) g87<1>F g63<4>.zF g53<4>.wF { align16 1Q }; cmp.nz.f0(8) null<1>F g53<4>.wF g53<4>.wF { align16 1Q switch }; mov(8) g29<1>.xyD g86<4>.xyyyD { align16 1Q }; mov(8) g61<1>.zwD g79<4>.yyyxD { align16 NoDDChk 1Q }; (+f0) sel(8) g90<1>.xyUD g87<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; mov(8) g29<1>.zwD g9<4>.zzzwD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g91<1>.xyUD g87<4>.xyyyUD g90<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g87<4>F g87<4>F { align16 1Q switch }; (+f0) sel(8) g93<1>.xyUD g91<4>.xyyyUD g87<4>.xyyyUD { align16 1Q }; mov(8) g62<1>.xyD g93<4>.xyyyD { align16 1Q }; rndd(8) g95<1>.yF g93<4>.yF { align16 1Q }; add(8) g18<1>.xF g93<4>.xF -g95<4>.yF { align16 1Q }; cmp.ge.f0(8) g20<1>.xF g18<4>.xF g63<4>.wF { align16 1Q }; mul(8) g31<1>F g63<4>.zF g18<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>D g20<4>.xD 0D { align16 1Q switch }; (+f0) sel(8) g21<1>UD g61<4>.yxzwUD g61<4>UD { align16 1Q }; mul(8) g22<1>F g21<4>F g70<4>.wzyxF { align16 1Q }; cmp.nz.f0(8) null<1>F g70<4>.wzyxF g70<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g24<1>.xyUD g22<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g21<4>F g21<4>F { align16 1Q switch }; (+f0) sel(8) g26<1>.xyUD g22<4>.xyyyUD g24<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g22<4>F g22<4>F { align16 1Q switch }; (+f0) sel(8) g30<1>.xyUD g26<4>.xyyyUD g22<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g18<4>.xF g18<4>.xF { align16 1Q switch }; (+f0) sel(8) g33<1>.xUD g31<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g34<1>.xUD g31<4>.xUD g33<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g31<4>F g31<4>F { align16 1Q switch }; (+f0) sel(8) g36<1>.xUD g34<4>.xUD g31<4>.xUD { align16 1Q }; mul(8) g37<1>F g63<4>.zyzyF g36<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g36<4>.xF g36<4>.xF { align16 1Q switch }; (+f0) sel(8) g41<1>UD g37<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; (+f0) sel(8) g42<1>UD g37<4>UD g41<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g37<4>F g37<4>F { align16 1Q switch }; (+f0) sel(8) g44<1>UD g42<4>UD g37<4>UD { align16 1Q }; rndd(8) g45<1>.zwF g44<4>.zzzwF { align16 1Q }; add(8) g46<1>.xyF g44<4>.xyyyF -g45<4>.zwwwF { align16 1Q }; cmp.ge.f0(8) g48<1>.xyF g46<4>.xyyyF g63<4>.wF { align16 1Q }; mov.nz.f0(8) null<1>.xD g48<4>.yD { align16 1Q }; (+f0.x) if(8) JIP: 20 UIP: 20 { align16 1Q }; END B49 ->B50 ->B51 START B50 <-B49 (132 cycles) mul(8) g50<1>F g86<4>.xF g30<4>.xF { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g30<4>.xF g30<4>.xF { align16 1Q switch }; (+f0) sel(8) g52<1>.xUD g50<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g86<4>.xF g86<4>.xF { align16 1Q switch }; (+f0) sel(8) g54<1>.xUD g50<4>.xUD g52<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g50<4>F g50<4>F { align16 1Q switch }; (+f0) sel(8) g56<1>.xUD g54<4>.xUD g50<4>.xUD { align16 1Q }; mov(8) g29<1>.xD g56<4>.xD { align16 NoDDClr 1Q compacted }; mov(8) g29<1>.yD g86<4>.yD { align16 NoDDClr,NoDDChk 1Q }; mov(8) g29<1>.zwD g9<4>.zzzwD { align16 NoDDChk 1Q }; END B50 ->B51 START B51 <-B49 <-B50 (20 cycles) endif(8) JIP: 149 { align16 1Q }; mov.nz.f0(8) null<1>.xD g48<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 20 UIP: 20 { align16 1Q }; END B51 ->B52 ->B53 START B52 <-B51 (116 cycles) mul(8) g58<1>F g29<4>.yF g30<4>.yF { align16 1Q }; cmp.nz.f0(8) null<1>F g30<4>.yF g30<4>.yF { align16 1Q switch }; (+f0) sel(8) g61<1>.yUD g58<4>.yUD g102<4>.yUD { align16 1Q }; cmp.nz.f0(8) null<1>F g29<4>.yF g29<4>.yF { align16 1Q switch }; (+f0) sel(8) g64<1>.yUD g58<4>.yUD g61<4>.yUD { align16 1Q }; cmp.nz.f0(8) null<1>F g58<4>F g58<4>F { align16 1Q switch }; (+f0) sel(8) g66<1>.yUD g64<4>.yUD g58<4>.yUD { align16 1Q }; add(8) g67<1>.yF g66<4>.yF g63<4>.yF { align16 1Q }; add(8) g29<1>.yF g67<4>.yF -g30<4>.yF { align16 1Q }; END B52 ->B53 START B53 <-B51 <-B52 (268 cycles) endif(8) JIP: 125 { align16 1Q }; add(8) g69<1>.xyF g63<4>.yF -g30<4>.xyyyF { align16 1Q }; mov(8) g27<1>.zwD g21<4>.zzzwD { align16 1Q }; mul(8) g71<1>F g63<4>.zF g46<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g46<4>.xF g46<4>.xF { align16 1Q switch }; (+f0) sel(8) g73<1>.xUD g71<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g74<1>.xUD g71<4>.xUD g73<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g71<4>F g71<4>F { align16 1Q switch }; (+f0) sel(8) g77<1>.xUD g74<4>.xUD g71<4>.xUD { align16 1Q }; mul(8) g78<1>F g63<4>.zyzyF g77<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g77<4>.xF g77<4>.xF { align16 1Q switch }; (+f0) sel(8) g81<1>UD g78<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; (+f0) sel(8) g83<1>UD g78<4>UD g81<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g78<4>F g78<4>F { align16 1Q switch }; (+f0) sel(8) g85<1>UD g83<4>UD g78<4>UD { align16 1Q }; rndd(8) g86<1>.zwF g85<4>.zzzwF { align16 1Q }; add(8) g87<1>.xyF g85<4>.xyyyF -g86<4>.zwwwF { align16 1Q }; cmp.ge.f0(8) g88<1>.xyF g87<4>.xyyyF g63<4>.wF { align16 1Q }; mov.nz.f0(8) null<1>.xD g88<4>.yD { align16 1Q }; (+f0.x) if(8) JIP: 3 UIP: 3 { align16 1Q }; END B53 ->B54 ->B55 START B54 <-B53 (2 cycles) add(8) g29<1>.xF g29<4>.xF g69<4>.xF { align16 1Q compacted }; END B54 ->B55 START B55 <-B53 <-B54 (20 cycles) endif(8) JIP: 80 { align16 1Q }; mov.nz.f0(8) null<1>.xD g88<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B55 ->B56 ->B57 START B56 <-B55 (2 cycles) add(8) g29<1>.yF g29<4>.yF -g69<4>.yF { align16 1Q }; END B56 ->B57 START B57 <-B55 <-B56 (264 cycles) endif(8) JIP: 72 { align16 1Q }; mul(8) g93<1>F g63<4>.zF g87<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g87<4>.xF g87<4>.xF { align16 1Q switch }; (+f0) sel(8) g95<1>.xUD g93<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g18<1>.xUD g93<4>.xUD g95<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g93<4>F g93<4>F { align16 1Q switch }; (+f0) sel(8) g20<1>.xUD g18<4>.xUD g93<4>.xUD { align16 1Q }; mul(8) g21<1>F g63<4>.zyzyF g20<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g20<4>.xF g20<4>.xF { align16 1Q switch }; (+f0) sel(8) g23<1>UD g21<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; (+f0) sel(8) g24<1>UD g21<4>UD g23<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g21<4>F g21<4>F { align16 1Q switch }; (+f0) sel(8) g26<1>UD g24<4>UD g21<4>UD { align16 1Q }; rndd(8) g30<1>.zwF g26<4>.zzzwF { align16 1Q }; add(8) g31<1>.xyF g26<4>.xyyyF -g30<4>.zwwwF { align16 1Q }; cmp.ge.f0(8) g32<1>.xyF g31<4>.xyyyF g63<4>.wF { align16 1Q }; mov.nz.f0(8) null<1>.xD g32<4>.yD { align16 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B57 ->B58 ->B59 START B58 <-B57 (2 cycles) add(8) g29<1>.xF g63<4>.yF -g29<4>.xF { align16 1Q }; END B58 ->B59 START B59 <-B57 <-B58 (20 cycles) endif(8) JIP: 30 { align16 1Q }; mov.nz.f0(8) null<1>.xD g32<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B59 ->B60 ->B61 START B60 <-B59 (2 cycles) add(8) g29<1>.yF g63<4>.yF -g29<4>.yF { align16 1Q }; END B60 ->B61 START B61 <-B59 <-B60 (52 cycles) endif(8) JIP: 22 { align16 1Q }; rndd(8) g37<1>.yF g62<4>.yF { align16 1Q }; add(8) g41<1>.xF g62<4>.xF -g37<4>.yF { align16 1Q }; mov(8) g28<1>.yD g37<4>.yD { align16 NoDDClr 1Q }; cmp.ge.f0(8) null<1>.xF g41<4>.xF g63<4>.wF { align16 1Q switch }; mov(8) g28<1>.zwD g30<4>.zzzwD { align16 NoDDChk 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B61 ->B62 ->B63 START B62 <-B61 (2 cycles) add(8) g29<1>.xyF g63<4>.yF -g29<4>.yxxxF { align16 1Q }; END B62 ->B63 START B63 <-B61 <-B62 (20 cycles) endif(8) JIP: 6 { align16 1Q }; add(8) g9<1>.yF g63<4>.yF -g29<4>.yF { align16 NoDDClr 1Q }; mov(8) g9<1>.xzwD g29<4>.xxzwD { align16 NoDDChk 1Q }; END B63 ->B64 START B64 <-B48 <-B63 (20 cycles) endif(8) JIP: 894 { align16 1Q }; cmp.l.f0(8) null<1>.xF g76<4>.yF g63<4>.yF { align16 1Q switch }; (+f0.x) if(8) JIP: 90 UIP: 276 { align16 1Q }; END B64 ->B65 ->B69 START B65 <-B64 (72 cycles) add(8) g48<1>.xF g53<4>.wF -g60<4>.xF { align16 1Q }; add(8) g50<1>.xD g99<4>.xD g82<4>.yD { align16 1Q }; cmp.ge.f0(8) null<1>.xF g48<4>.xF g63<4>.wF { align16 1Q switch }; shl(8) g51<1>.xD g50<4>.xD g47<4>.xUD { align16 1Q }; add(8) g57<1>.xD g49<4>.xD g51<4>.xD { align16 1Q compacted }; send(8) g56<1>F g57<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g54<1>UD g56<4>UD { align16 1Q }; mov(8) g28<1>.yzwD g56<4>.zzyxD { align16 1Q }; (+f0.x) if(8) JIP: 39 UIP: 65 { align16 1Q }; END B65 ->B66 ->B67 START B66 <-B65 (246 cycles) mul(8) g61<1>F g9<4>.xyyyF g56<4>.wzzzF { align16 1Q }; cmp.nz.f0(8) null<1>F g56<4>.wzzzF g56<4>.wzzzF { align16 1Q switch }; (+f0) sel(8) g64<1>.xyUD g61<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g9<4>.xyyyF g9<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g66<1>.xyUD g61<4>.xyyyUD g64<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g61<4>F g61<4>F { align16 1Q switch }; (+f0) sel(8) g68<1>.xyUD g66<4>.xyyyUD g61<4>.xyyyUD { align16 1Q }; add(8) g69<1>.xyF g68<4>.xyyyF g56<4>.yxxxF { align16 1Q }; cmp.nz.f0(8) null<1>F g27<4>.zwwwF g27<4>.zwwwF { align16 1Q switch }; mul(8) g71<1>F g69<4>.xyyyF g27<4>.zwwwF { align16 1Q }; (+f0) sel(8) g73<1>.xyUD g71<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g69<4>.xyyyF g69<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g75<1>.xyUD g71<4>.xyyyUD g73<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g71<4>F g71<4>F { align16 1Q switch }; (+f0) sel(8) g77<1>.xyUD g75<4>.xyyyUD g71<4>.xyyyUD { align16 1Q }; add(8) g30<1>.yF g63<4>.yF -g77<4>.yF { align16 NoDDClr 1Q }; mov(8) g30<1>.xD g77<4>.xD { align16 NoDDClr,NoDDChk 1Q compacted }; mov(8) g30<1>.zD g25<4>.zD { align16 NoDDChk 1Q }; else(8) JIP: 28 { align16 1Q }; END B66 ->B68 START B67 <-B65 (88 cycles) mov(8) g66<1>.xD g54<4>.wD { align16 NoDDClr 1Q }; mov(8) g67<1>.xD g54<4>.yD { align16 NoDDClr 1Q }; cmp.nz.f0(8) g79<1>.xyF g63<4>.xF g39<4>.zwwwF { align16 1Q }; mov(8) g66<1>.zD g25<4>.zD { align16 NoDDChk 1Q }; mov(8) g67<1>.zD g25<4>.zD { align16 NoDDChk 1Q }; cmp.nz.f0(8) null<1>D g79<4>.xD 0D { align16 1Q switch }; (+f0) sel(8) g81<1>.xzUD g67<4>.xxzzUD g66<4>.xxzzUD { align16 1Q }; mov(8) g68<1>.xzD g81<4>.xxzzD { align16 NoDDClr 1Q }; mov(8) g69<1>.xzD g81<4>.xxzzD { align16 NoDDClr 1Q }; cmp.nz.f0(8) null<1>D g79<4>.yD 0D { align16 1Q switch }; mov(8) g68<1>.yD g54<4>.zD { align16 NoDDChk 1Q }; mov(8) g69<1>.yD g54<4>.xD { align16 NoDDChk 1Q }; (+f0) sel(8) g30<1>.xyzUD g69<4>.xyzzUD g68<4>.xyzzUD { align16 1Q }; END B67 ->B68 START B68 <-B67 <-B66 (22 cycles) endif(8) JIP: 6 { align16 1Q }; add(8) g31<1>.zF g63<4>.yF g30<4>.zF { align16 NoDDClr 1Q }; mov(8) g31<1>.xyD g30<4>.xyyyD { align16 NoDDChk 1Q }; else(8) JIP: 188 { align16 1Q }; END B68 ->B79 START B69 <-B64 (18 cycles) cmp.l.f0(8) null<1>.xF g76<4>.xF g63<4>.yF { align16 1Q switch }; (+f0.x) if(8) JIP: 132 UIP: 178 { align16 1Q }; END B69 ->B70 ->B77 START B70 <-B69 (34 cycles) add(8) g84<1>.xF g53<4>.wF -g60<4>.xF { align16 1Q }; cmp.ge.f0(8) null<1>.xF g84<4>.xF g63<4>.wF { align16 1Q switch }; (+f0.x) if(8) JIP: 68 UIP: 122 { align16 1Q }; END B70 ->B71 ->B72 START B71 <-B70 (408 cycles) mov(8) g9<1>.zwD g63<4>.xxxyD { align16 1Q }; add(8) g87<1>.xD g99<4>.xD g82<4>.yD { align16 1Q }; add(8) g26<1>.xD g104<4>.xD g82<4>.yD { align16 1Q }; cmp.nz.f0(8) g18<1>F g9<4>F g9<4>F { align16 1Q compacted }; shl(8) g88<1>.xD g87<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g29<1>.xD g26<4>.xD g47<4>.xUD { align16 1Q }; add(8) g93<1>.xD g49<4>.xD g88<4>.xD { align16 1Q compacted }; add(8) g34<1>.xD g49<4>.xD g29<4>.xD { align16 1Q compacted }; send(8) g92<1>F g93<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g33<1>F g34<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g95<1>F g92<4>.wzyxF g9<4>F { align16 1Q }; mul(8) g35<1>F g33<4>.wzyxF g9<4>F { align16 1Q }; (+f0) sel(8) g20<1>UD g95<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g92<4>.wzyxF g92<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g22<1>UD g95<4>UD g20<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g95<4>F g95<4>F { align16 1Q switch }; (+f0) sel(8) g24<1>UD g22<4>UD g95<4>UD { align16 1Q }; dp4(8) g71<1>.xF g24<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g18<4>D 0D { align16 1Q switch }; (+f0) sel(8) g36<1>UD g35<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g33<4>.wzyxF g33<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g41<1>UD g35<4>UD g36<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g35<4>F g35<4>F { align16 1Q switch }; (+f0) sel(8) g43<1>UD g41<4>UD g35<4>UD { align16 1Q }; dp4(8) g71<1>.yF g43<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g27<4>.zwwwF g27<4>.zwwwF { align16 1Q switch }; mul(8) g45<1>F g71<4>.xyyyF g27<4>.zwwwF { align16 1Q }; (+f0) sel(8) g48<1>.xyUD g45<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g71<4>.xyyyF g71<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g50<1>.xyUD g45<4>.xyyyUD g48<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g45<4>F g45<4>F { align16 1Q switch }; (+f0) sel(8) g52<1>.xyUD g50<4>.xyyyUD g45<4>.xyyyUD { align16 1Q }; add(8) g32<1>.yF g63<4>.yF -g52<4>.yF { align16 NoDDClr 1Q }; mov(8) g32<1>.xD g52<4>.xD { align16 NoDDClr,NoDDChk 1Q compacted }; mov(8) g32<1>.zD g25<4>.zD { align16 NoDDChk 1Q }; else(8) JIP: 56 { align16 1Q }; END B71 ->B76 START B72 <-B70 (72 cycles) add(8) g54<1>.xD g99<4>.xD g82<4>.yD { align16 1Q }; add(8) g62<1>.xD g104<4>.xD g82<4>.yD { align16 1Q }; cmp.nz.f0(8) g71<1>.xF g63<4>.xF g39<4>.zF { align16 1Q }; cmp.z.f0(8) null<1>.xF g63<4>.xF g39<4>.wF { align16 1Q switch }; shl(8) g55<1>.xD g54<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g64<1>.xD g62<4>.xD g47<4>.xUD { align16 1Q }; add(8) g61<1>.xD g49<4>.xD g55<4>.xD { align16 1Q compacted }; add(8) g69<1>.xD g49<4>.xD g64<4>.xD { align16 1Q compacted }; send(8) g59<1>F g61<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g68<1>F g69<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g57<1>.xyUD g59<4>.xyyyUD { align16 1Q }; mov(8) g66<1>.zwUD g68<4>.zzzwUD { align16 1Q }; mov(8) g28<1>.yzwD g68<4>.zzyxD { align16 1Q }; (+f0.x) if(8) JIP: 16 UIP: 28 { align16 1Q }; END B72 ->B73 ->B74 START B73 <-B72 (38 cycles) mov(8) g72<1>.xyD g59<4>.wzzzD { align16 NoDDClr 1Q }; mov(8) g73<1>.xyD g68<4>.yxxxD { align16 NoDDClr 1Q }; cmp.nz.f0(8) null<1>D g71<4>.xD 0D { align16 1Q switch }; mov(8) g72<1>.zD g25<4>.zD { align16 NoDDChk 1Q }; mov(8) g73<1>.zD g25<4>.zD { align16 NoDDChk 1Q }; (+f0) sel(8) g32<1>.xyzUD g73<4>.xyzzUD g72<4>.xyzzUD { align16 1Q }; else(8) JIP: 14 { align16 1Q }; END B73 ->B75 START B74 <-B72 (36 cycles) mov(8) g74<1>.xyD g66<4>.wzzzD { align16 NoDDClr 1Q }; mov(8) g75<1>.xyD g57<4>.yxxxD { align16 NoDDClr 1Q }; cmp.nz.f0(8) null<1>D g71<4>.xD 0D { align16 1Q switch }; mov(8) g74<1>.zD g25<4>.zD { align16 NoDDChk 1Q }; mov(8) g75<1>.zD g25<4>.zD { align16 NoDDChk 1Q }; (+f0) sel(8) g32<1>.xyzUD g75<4>.xyzzUD g74<4>.xyzzUD { align16 1Q }; END B74 ->B75 START B75 <-B74 <-B73 (2 cycles) endif(8) JIP: 2 { align16 1Q }; END B75 ->B76 START B76 <-B75 <-B71 (4 cycles) endif(8) JIP: 2 { align16 1Q }; else(8) JIP: 48 { align16 1Q }; END B76 ->B78 START B77 <-B69 (276 cycles) add(8) g73<1>.xD g99<4>.xD g82<4>.yD { align16 1Q }; cmp.nz.f0(8) null<1>D g40<4>D 0D { align16 1Q switch }; add(8) g90<1>.xD g104<4>.xD g82<4>.yD { align16 1Q }; shl(8) g74<1>.xD g73<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g91<1>.xD g90<4>.xD g47<4>.xUD { align16 1Q }; add(8) g79<1>.xD g49<4>.xD g74<4>.xD { align16 1Q compacted }; add(8) g18<1>.xD g49<4>.xD g91<4>.xD { align16 1Q compacted }; send(8) g78<1>F g79<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g95<1>F g18<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g81<1>F g78<4>.wzyxF g16<4>F { align16 1Q }; mul(8) g20<1>F g95<4>.wzyxF g16<4>F { align16 1Q }; (+f0) sel(8) g83<1>UD g81<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g78<4>.wzyxF g78<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g85<1>UD g81<4>UD g83<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g81<4>F g81<4>F { align16 1Q switch }; (+f0) sel(8) g87<1>UD g85<4>UD g81<4>UD { align16 1Q }; dp4(8) g32<1>.xF g87<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g40<4>D 0D { align16 1Q switch }; (+f0) sel(8) g21<1>UD g20<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g95<4>.wzyxF g95<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g23<1>UD g20<4>UD g21<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g20<4>F g20<4>F { align16 1Q switch }; (+f0) sel(8) g26<1>UD g23<4>UD g20<4>UD { align16 1Q }; dp4(8) g32<1>.yF g26<4>F g103<4>F { align16 NoDDClr 1Q compacted }; mov(8) g32<1>.zD g25<4>.zD { align16 NoDDChk 1Q }; END B77 ->B78 START B78 <-B77 <-B76 (20 cycles) endif(8) JIP: 6 { align16 1Q }; add(8) g31<1>.zF g63<4>.zF g32<4>.zF { align16 NoDDClr 1Q }; mov(8) g31<1>.xyD g32<4>.xyyyD { align16 NoDDChk 1Q }; END B78 ->B79 START B79 <-B78 <-B68 (52 cycles) endif(8) JIP: 614 { align16 1Q }; mov(8) g33<1>.xUD 0x00000030UD { align16 1Q compacted }; send(8) g32<1>F g33<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; cmp.nz.f0(8) null<1>.xD g32<4>.xD g96<4>.xD { align16 1Q switch }; (+f0.x) if(8) JIP: 8 UIP: 8 { align16 1Q }; END B79 ->B80 ->B81 START B80 <-B79 (34 cycles) add(8) g35<1>.yF g63<4>.yF -g31<4>.xF { align16 1Q }; mov(8) g31<1>.xD g31<4>.yD { align16 NoDDClr 1Q }; mov(8) g31<1>.yD g35<4>.yD { align16 NoDDChk 1Q }; END B80 ->B81 START B81 <-B79 <-B80 (52 cycles) endif(8) JIP: 599 { align16 1Q }; mov(8) g42<1>.xUD 0x00000040UD { align16 1Q compacted }; send(8) g41<1>F g42<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; cmp.nz.f0(8) null<1>.xD g41<4>.xD g96<4>.xD { align16 1Q switch }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B81 ->B82 ->B83 START B82 <-B81 (2 cycles) add(8) g31<1>.xyF g63<4>.yF -g31<4>.yxxxF { align16 1Q }; END B82 ->B83 START B83 <-B81 <-B82 (266 cycles) endif(8) JIP: 588 { align16 1Q }; mov(8) g4<1>D g31<4>.xyyyD { align16 1Q }; mul(8) g45<1>F g63<4>.zyzyF g76<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g76<4>.xF g76<4>.xF { align16 1Q switch }; mov(8) g65<1>.xUD 0x000001c0UD { align16 1Q compacted }; mov(8) g66<1>.yD g31<4>.zF { align16 1Q }; mov(8) g77<1>.xD g60<4>.xD { align16 1Q compacted }; add(8) g67<1>.xF g53<4>.wF -g60<4>.xF { align16 1Q }; mov(8) g8<1>.yzwD g28<4>.yyzwD { align16 1Q }; (+f0) sel(8) g48<1>UD g45<4>UD g102<4>UD { align16 1Q }; send(8) g64<1>F g65<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g17<1>.yD g66<4>.yD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; mov(8) g33<1>.zwD g64<4>.yyyxD { align16 1Q }; (+f0) sel(8) g50<1>UD g45<4>UD g48<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g45<4>F g45<4>F { align16 1Q switch }; (+f0) sel(8) g52<1>UD g50<4>UD g45<4>UD { align16 1Q }; rndd(8) g54<1>.xyF g52<4>.xyyyF { align16 1Q }; add(8) g76<1>.xyF g52<4>.zwwwF -g54<4>.xyyyF { align16 NoDDClr 1Q }; mov(8) g76<1>.zwD g52<4>.zzzwD { align16 NoDDChk 1Q }; mul(8) g56<1>F g63<4>.zF g76<4>F { align16 1Q }; cmp.nz.f0(8) null<1>F g76<4>F g76<4>F { align16 1Q switch }; (+f0) sel(8) g58<1>UD g56<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g59<1>UD g56<4>UD g58<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g56<4>F g56<4>F { align16 1Q switch }; (+f0) sel(8) g10<1>UD g59<4>UD g56<4>UD { align16 1Q }; cmp.ge.f0(8) null<1>.xF g67<4>.xF g63<4>.wF { align16 1Q switch }; (+f0.x) if(8) JIP: 257 UIP: 257 { align16 1Q }; END B83 ->B84 ->B99 START B84 <-B83 (542 cycles) add(8) g71<1>.xyF g63<4>.xyyyF g39<4>.zwwwF { align16 1Q }; mov(8) g78<1>.xyD g94<4>.wzzzD { align16 NoDDClr 1Q }; mul(8) g72<1>F g63<4>.zF g53<4>.wF { align16 1Q }; cmp.nz.f0(8) null<1>F g53<4>.wF g53<4>.wF { align16 1Q switch }; mov(8) g34<1>.xyD g71<4>.xyyyD { align16 1Q }; mov(8) g78<1>.zwD g64<4>.yyyxD { align16 NoDDChk 1Q }; (+f0) sel(8) g74<1>.xyUD g72<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; mov(8) g34<1>.zwD g9<4>.zzzwD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g75<1>.xyUD g72<4>.xyyyUD g74<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g72<4>F g72<4>F { align16 1Q switch }; (+f0) sel(8) g81<1>.xyUD g75<4>.xyyyUD g72<4>.xyyyUD { align16 1Q }; mov(8) g79<1>.xyD g81<4>.xyyyD { align16 1Q }; rndd(8) g82<1>.yF g81<4>.yF { align16 1Q }; add(8) g83<1>.xF g81<4>.xF -g82<4>.yF { align16 1Q }; cmp.ge.f0(8) g84<1>.xF g83<4>.xF g63<4>.wF { align16 1Q }; mul(8) g93<1>F g63<4>.zF g83<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>D g84<4>.xD 0D { align16 1Q switch }; (+f0) sel(8) g85<1>UD g78<4>.yxzwUD g78<4>UD { align16 1Q }; mul(8) g86<1>F g85<4>F g70<4>.wzyxF { align16 1Q }; cmp.nz.f0(8) null<1>F g70<4>.wzyxF g70<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g88<1>.xyUD g86<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g85<4>F g85<4>F { align16 1Q switch }; (+f0) sel(8) g90<1>.xyUD g86<4>.xyyyUD g88<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g86<4>F g86<4>F { align16 1Q switch }; (+f0) sel(8) g92<1>.xyUD g90<4>.xyyyUD g86<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g83<4>.xF g83<4>.xF { align16 1Q switch }; (+f0) sel(8) g95<1>.xUD g93<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g18<1>.xUD g93<4>.xUD g95<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g93<4>F g93<4>F { align16 1Q switch }; (+f0) sel(8) g20<1>.xUD g18<4>.xUD g93<4>.xUD { align16 1Q }; mul(8) g21<1>F g63<4>.zyzyF g20<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g20<4>.xF g20<4>.xF { align16 1Q switch }; (+f0) sel(8) g23<1>UD g21<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; (+f0) sel(8) g24<1>UD g21<4>UD g23<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g21<4>F g21<4>F { align16 1Q switch }; (+f0) sel(8) g26<1>UD g24<4>UD g21<4>UD { align16 1Q }; rndd(8) g27<1>.zwF g26<4>.zzzwF { align16 1Q }; add(8) g28<1>.xyF g26<4>.xyyyF -g27<4>.zwwwF { align16 1Q }; cmp.ge.f0(8) g29<1>.xyF g28<4>.xyyyF g63<4>.wF { align16 1Q }; mov.nz.f0(8) null<1>.xD g29<4>.yD { align16 1Q }; (+f0.x) if(8) JIP: 20 UIP: 20 { align16 1Q }; END B84 ->B85 ->B86 START B85 <-B84 (132 cycles) mul(8) g31<1>F g71<4>.xF g92<4>.xF { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g92<4>.xF g92<4>.xF { align16 1Q switch }; (+f0) sel(8) g35<1>.xUD g31<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g71<4>.xF g71<4>.xF { align16 1Q switch }; (+f0) sel(8) g37<1>.xUD g31<4>.xUD g35<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g31<4>F g31<4>F { align16 1Q switch }; (+f0) sel(8) g41<1>.xUD g37<4>.xUD g31<4>.xUD { align16 1Q }; mov(8) g34<1>.xD g41<4>.xD { align16 NoDDClr 1Q compacted }; mov(8) g34<1>.yD g71<4>.yD { align16 NoDDClr,NoDDChk 1Q }; mov(8) g34<1>.zwD g9<4>.zzzwD { align16 NoDDChk 1Q }; END B85 ->B86 START B86 <-B84 <-B85 (20 cycles) endif(8) JIP: 149 { align16 1Q }; mov.nz.f0(8) null<1>.xD g29<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 20 UIP: 20 { align16 1Q }; END B86 ->B87 ->B88 START B87 <-B86 (116 cycles) mul(8) g43<1>F g34<4>.yF g92<4>.yF { align16 1Q }; cmp.nz.f0(8) null<1>F g92<4>.yF g92<4>.yF { align16 1Q switch }; (+f0) sel(8) g45<1>.yUD g43<4>.yUD g102<4>.yUD { align16 1Q }; cmp.nz.f0(8) null<1>F g34<4>.yF g34<4>.yF { align16 1Q switch }; (+f0) sel(8) g48<1>.yUD g43<4>.yUD g45<4>.yUD { align16 1Q }; cmp.nz.f0(8) null<1>F g43<4>F g43<4>F { align16 1Q switch }; (+f0) sel(8) g50<1>.yUD g48<4>.yUD g43<4>.yUD { align16 1Q }; add(8) g51<1>.yF g50<4>.yF g63<4>.yF { align16 1Q }; add(8) g34<1>.yF g51<4>.yF -g92<4>.yF { align16 1Q }; END B87 ->B88 START B88 <-B86 <-B87 (268 cycles) endif(8) JIP: 125 { align16 1Q }; add(8) g54<1>.xyF g63<4>.yF -g92<4>.xyyyF { align16 1Q }; mov(8) g33<1>.zwD g85<4>.zzzwD { align16 1Q }; mul(8) g55<1>F g63<4>.zF g28<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g28<4>.xF g28<4>.xF { align16 1Q switch }; (+f0) sel(8) g57<1>.xUD g55<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g58<1>.xUD g55<4>.xUD g57<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g55<4>F g55<4>F { align16 1Q switch }; (+f0) sel(8) g60<1>.xUD g58<4>.xUD g55<4>.xUD { align16 1Q }; mul(8) g61<1>F g63<4>.zyzyF g60<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g60<4>.xF g60<4>.xF { align16 1Q switch }; (+f0) sel(8) g64<1>UD g61<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; (+f0) sel(8) g65<1>UD g61<4>UD g64<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g61<4>F g61<4>F { align16 1Q switch }; (+f0) sel(8) g67<1>UD g65<4>UD g61<4>UD { align16 1Q }; rndd(8) g68<1>.zwF g67<4>.zzzwF { align16 1Q }; add(8) g69<1>.xyF g67<4>.xyyyF -g68<4>.zwwwF { align16 1Q }; cmp.ge.f0(8) g70<1>.xyF g69<4>.xyyyF g63<4>.wF { align16 1Q }; mov.nz.f0(8) null<1>.xD g70<4>.yD { align16 1Q }; (+f0.x) if(8) JIP: 3 UIP: 3 { align16 1Q }; END B88 ->B89 ->B90 START B89 <-B88 (2 cycles) add(8) g34<1>.xF g34<4>.xF g54<4>.xF { align16 1Q compacted }; END B89 ->B90 START B90 <-B88 <-B89 (20 cycles) endif(8) JIP: 80 { align16 1Q }; mov.nz.f0(8) null<1>.xD g70<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B90 ->B91 ->B92 START B91 <-B90 (2 cycles) add(8) g34<1>.yF g34<4>.yF -g54<4>.yF { align16 1Q }; END B91 ->B92 START B92 <-B90 <-B91 (264 cycles) endif(8) JIP: 72 { align16 1Q }; mul(8) g75<1>F g63<4>.zF g69<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g69<4>.xF g69<4>.xF { align16 1Q switch }; (+f0) sel(8) g78<1>.xUD g75<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>D g89<4>D 0D { align16 1Q switch }; (+f0) sel(8) g81<1>.xUD g75<4>.xUD g78<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g75<4>F g75<4>F { align16 1Q switch }; (+f0) sel(8) g83<1>.xUD g81<4>.xUD g75<4>.xUD { align16 1Q }; mul(8) g84<1>F g63<4>.zyzyF g83<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g83<4>.xF g83<4>.xF { align16 1Q switch }; (+f0) sel(8) g86<1>UD g84<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>D g80<4>D 0D { align16 1Q switch }; (+f0) sel(8) g87<1>UD g84<4>UD g86<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g84<4>F g84<4>F { align16 1Q switch }; (+f0) sel(8) g89<1>UD g87<4>UD g84<4>UD { align16 1Q }; rndd(8) g90<1>.zwF g89<4>.zzzwF { align16 1Q }; add(8) g91<1>.xyF g89<4>.xyyyF -g90<4>.zwwwF { align16 1Q }; cmp.ge.f0(8) g92<1>.xyF g91<4>.xyyyF g63<4>.wF { align16 1Q }; mov.nz.f0(8) null<1>.xD g92<4>.yD { align16 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B92 ->B93 ->B94 START B93 <-B92 (2 cycles) add(8) g34<1>.xF g63<4>.yF -g34<4>.xF { align16 1Q }; END B93 ->B94 START B94 <-B92 <-B93 (20 cycles) endif(8) JIP: 30 { align16 1Q }; mov.nz.f0(8) null<1>.xD g92<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B94 ->B95 ->B96 START B95 <-B94 (2 cycles) add(8) g34<1>.yF g63<4>.yF -g34<4>.yF { align16 1Q }; END B95 ->B96 START B96 <-B94 <-B95 (52 cycles) endif(8) JIP: 22 { align16 1Q }; rndd(8) g18<1>.yF g79<4>.yF { align16 1Q }; add(8) g20<1>.xF g79<4>.xF -g18<4>.yF { align16 1Q }; mov(8) g8<1>.yD g18<4>.yD { align16 NoDDClr 1Q }; cmp.ge.f0(8) null<1>.xF g20<4>.xF g63<4>.wF { align16 1Q switch }; mov(8) g8<1>.zwD g90<4>.zzzwD { align16 NoDDChk 1Q }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B96 ->B97 ->B98 START B97 <-B96 (2 cycles) add(8) g34<1>.xyF g63<4>.yF -g34<4>.yxxxF { align16 1Q }; END B97 ->B98 START B98 <-B96 <-B97 (20 cycles) endif(8) JIP: 6 { align16 1Q }; add(8) g9<1>.yF g63<4>.yF -g34<4>.yF { align16 NoDDClr 1Q }; mov(8) g9<1>.xzwD g34<4>.xxzwD { align16 NoDDChk 1Q }; END B98 ->B99 START B99 <-B83 <-B98 (20 cycles) endif(8) JIP: 277 { align16 1Q }; cmp.l.f0(8) null<1>.xF g10<4>.yF g63<4>.yF { align16 1Q switch }; (+f0.x) if(8) JIP: 76 UIP: 240 { align16 1Q }; END B99 ->B100 ->B104 START B100 <-B99 (72 cycles) add(8) g26<1>.xF g53<4>.wF -g77<4>.xF { align16 1Q }; add(8) g27<1>.xD g99<4>.xD g66<4>.yD { align16 1Q }; cmp.ge.f0(8) null<1>.xF g26<4>.xF g63<4>.wF { align16 1Q switch }; shl(8) g28<1>.xD g27<4>.xD g47<4>.xUD { align16 1Q }; add(8) g34<1>.xD g49<4>.xD g28<4>.xD { align16 1Q compacted }; send(8) g32<1>F g34<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g30<1>UD g32<4>UD { align16 1Q }; mov(8) g8<1>.yzwD g32<4>.zzyxD { align16 1Q }; (+f0.x) if(8) JIP: 37 UIP: 53 { align16 1Q }; END B100 ->B101 ->B102 START B101 <-B100 (230 cycles) mul(8) g37<1>F g9<4>.xyyyF g32<4>.wzzzF { align16 1Q }; cmp.nz.f0(8) null<1>F g32<4>.wzzzF g32<4>.wzzzF { align16 1Q switch }; (+f0) sel(8) g41<1>.xyUD g37<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g9<4>.xyyyF g9<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g43<1>.xyUD g37<4>.xyyyUD g41<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g37<4>F g37<4>F { align16 1Q switch }; (+f0) sel(8) g45<1>.xyUD g43<4>.xyyyUD g37<4>.xyyyUD { align16 1Q }; add(8) g46<1>.xyF g45<4>.xyyyF g32<4>.yxxxF { align16 1Q }; cmp.nz.f0(8) null<1>F g33<4>.zwwwF g33<4>.zwwwF { align16 1Q switch }; mul(8) g48<1>F g46<4>.xyyyF g33<4>.zwwwF { align16 1Q }; (+f0) sel(8) g50<1>.xyUD g48<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g46<4>.xyyyF g46<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g52<1>.xyUD g48<4>.xyyyUD g50<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g48<4>F g48<4>F { align16 1Q switch }; (+f0) sel(8) g54<1>.xyUD g52<4>.xyyyUD g48<4>.xyyyUD { align16 1Q }; add(8) g35<1>.yF g63<4>.yF -g54<4>.yF { align16 NoDDClr 1Q }; mov(8) g35<1>.xD g54<4>.xD { align16 NoDDChk 1Q compacted }; else(8) JIP: 18 { align16 1Q }; END B101 ->B103 START B102 <-B100 (84 cycles) cmp.nz.f0(8) g56<1>.xyF g63<4>.xF g39<4>.zwwwF { align16 1Q }; cmp.nz.f0(8) null<1>D g56<4>.xD 0D { align16 1Q switch }; (+f0) sel(8) g57<1>.xUD g30<4>.yUD g30<4>.wUD { align16 1Q }; mov(8) g85<1>.xD g57<4>.xD { align16 NoDDClr 1Q compacted }; mov(8) g86<1>.xD g57<4>.xD { align16 NoDDClr 1Q compacted }; cmp.nz.f0(8) null<1>D g56<4>.yD 0D { align16 1Q switch }; mov(8) g85<1>.yD g30<4>.zD { align16 NoDDChk 1Q }; mov(8) g86<1>.yD g30<4>.xD { align16 NoDDChk 1Q }; (+f0) sel(8) g35<1>.xyUD g86<4>.xyyyUD g85<4>.xyyyUD { align16 1Q }; END B102 ->B103 START B103 <-B102 <-B101 (6 cycles) endif(8) JIP: 4 { align16 1Q }; mov(8) g36<1>.xyD g35<4>.xyyyD { align16 1Q }; else(8) JIP: 166 { align16 1Q }; END B103 ->B114 START B104 <-B99 (18 cycles) cmp.l.f0(8) null<1>.xF g10<4>.xF g63<4>.yF { align16 1Q switch }; (+f0.x) if(8) JIP: 114 UIP: 158 { align16 1Q }; END B104 ->B105 ->B112 START B105 <-B104 (34 cycles) add(8) g60<1>.xF g53<4>.wF -g77<4>.xF { align16 1Q }; cmp.ge.f0(8) null<1>.xF g60<4>.xF g63<4>.wF { align16 1Q switch }; (+f0.x) if(8) JIP: 66 UIP: 104 { align16 1Q }; END B105 ->B106 ->B107 START B106 <-B105 (392 cycles) mov(8) g9<1>.zwD g63<4>.xxxyD { align16 1Q }; add(8) g64<1>.xD g99<4>.xD g66<4>.yD { align16 1Q }; add(8) g79<1>.xD g104<4>.xD g66<4>.yD { align16 1Q }; cmp.nz.f0(8) g72<1>F g9<4>F g9<4>F { align16 1Q compacted }; shl(8) g65<1>.xD g64<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g80<1>.xD g79<4>.xD g47<4>.xUD { align16 1Q }; add(8) g70<1>.xD g49<4>.xD g65<4>.xD { align16 1Q compacted }; add(8) g85<1>.xD g49<4>.xD g80<4>.xD { align16 1Q compacted }; send(8) g69<1>F g70<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g84<1>F g85<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g71<1>F g69<4>.wzyxF g9<4>F { align16 1Q }; mul(8) g86<1>F g84<4>.wzyxF g9<4>F { align16 1Q }; (+f0) sel(8) g73<1>UD g71<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g69<4>.wzyxF g69<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g75<1>UD g71<4>UD g73<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g71<4>F g71<4>F { align16 1Q switch }; (+f0) sel(8) g77<1>UD g75<4>UD g71<4>UD { align16 1Q }; dp4(8) g88<1>.xF g77<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g72<4>D 0D { align16 1Q switch }; (+f0) sel(8) g87<1>UD g86<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g84<4>.wzyxF g84<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g89<1>UD g86<4>UD g87<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g86<4>F g86<4>F { align16 1Q switch }; (+f0) sel(8) g91<1>UD g89<4>UD g86<4>UD { align16 1Q }; dp4(8) g88<1>.yF g91<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g33<4>.zwwwF g33<4>.zwwwF { align16 1Q switch }; mul(8) g93<1>F g88<4>.xyyyF g33<4>.zwwwF { align16 1Q }; (+f0) sel(8) g95<1>.xyUD g93<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g88<4>.xyyyF g88<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g18<1>.xyUD g93<4>.xyyyUD g95<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g93<4>F g93<4>F { align16 1Q switch }; (+f0) sel(8) g20<1>.xyUD g18<4>.xyyyUD g93<4>.xyyyUD { align16 1Q }; add(8) g37<1>.yF g63<4>.yF -g20<4>.yF { align16 NoDDClr 1Q }; mov(8) g37<1>.xD g20<4>.xD { align16 NoDDChk 1Q compacted }; else(8) JIP: 40 { align16 1Q }; END B106 ->B111 START B107 <-B105 (72 cycles) add(8) g22<1>.xD g99<4>.xD g66<4>.yD { align16 1Q }; add(8) g29<1>.xD g104<4>.xD g66<4>.yD { align16 1Q }; cmp.nz.f0(8) g41<1>.xF g63<4>.xF g39<4>.zF { align16 1Q }; cmp.z.f0(8) null<1>.xF g63<4>.xF g39<4>.wF { align16 1Q switch }; shl(8) g23<1>.xD g22<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g30<1>.xD g29<4>.xD g47<4>.xUD { align16 1Q }; add(8) g28<1>.xD g49<4>.xD g23<4>.xD { align16 1Q compacted }; add(8) g35<1>.xD g49<4>.xD g30<4>.xD { align16 1Q compacted }; send(8) g27<1>F g28<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g34<1>F g35<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mov(8) g25<1>.xyUD g27<4>.xyyyUD { align16 1Q }; mov(8) g32<1>.zwUD g34<4>.zzzwUD { align16 1Q }; mov(8) g8<1>.yzwD g34<4>.zzyxD { align16 1Q }; (+f0.x) if(8) JIP: 8 UIP: 12 { align16 1Q }; END B107 ->B108 ->B109 START B108 <-B107 (20 cycles) cmp.nz.f0(8) null<1>D g41<4>.xD 0D { align16 1Q switch }; (+f0) sel(8) g37<1>.xyUD g34<4>.yxxxUD g27<4>.wzzzUD { align16 1Q }; else(8) JIP: 6 { align16 1Q }; END B108 ->B110 START B109 <-B107 (18 cycles) cmp.nz.f0(8) null<1>D g41<4>.xD 0D { align16 1Q switch }; (+f0) sel(8) g37<1>.xyUD g25<4>.yxxxUD g32<4>.wzzzUD { align16 1Q }; END B109 ->B110 START B110 <-B109 <-B108 (2 cycles) endif(8) JIP: 2 { align16 1Q }; END B110 ->B111 START B111 <-B110 <-B106 (4 cycles) endif(8) JIP: 2 { align16 1Q }; else(8) JIP: 46 { align16 1Q }; END B111 ->B113 START B112 <-B104 (260 cycles) add(8) g43<1>.xD g99<4>.xD g66<4>.yD { align16 1Q }; cmp.nz.f0(8) null<1>D g40<4>D 0D { align16 1Q switch }; add(8) g58<1>.xD g104<4>.xD g66<4>.yD { align16 1Q }; shl(8) g44<1>.xD g43<4>.xD g47<4>.xUD { align16 1Q }; shl(8) g59<1>.xD g58<4>.xD g47<4>.xUD { align16 1Q }; add(8) g50<1>.xD g49<4>.xD g44<4>.xD { align16 1Q compacted }; add(8) g65<1>.xD g49<4>.xD g59<4>.xD { align16 1Q compacted }; send(8) g48<1>F g50<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g64<1>F g65<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g51<1>F g48<4>.wzyxF g16<4>F { align16 1Q }; mul(8) g66<1>F g64<4>.wzyxF g16<4>F { align16 1Q }; (+f0) sel(8) g52<1>UD g51<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g48<4>.wzyxF g48<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g54<1>UD g51<4>UD g52<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g51<4>F g51<4>F { align16 1Q switch }; (+f0) sel(8) g56<1>UD g54<4>UD g51<4>UD { align16 1Q }; dp4(8) g37<1>.xF g56<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g40<4>D 0D { align16 1Q switch }; (+f0) sel(8) g67<1>UD g66<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g64<4>.wzyxF g64<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g69<1>UD g66<4>UD g67<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g66<4>F g66<4>F { align16 1Q switch }; (+f0) sel(8) g71<1>UD g69<4>UD g66<4>UD { align16 1Q }; dp4(8) g37<1>.yF g71<4>F g103<4>F { align16 1Q compacted }; END B112 ->B113 START B113 <-B112 <-B111 (4 cycles) endif(8) JIP: 4 { align16 1Q }; mov(8) g36<1>.xyD g37<4>.xyyyD { align16 1Q }; END B113 ->B114 START B114 <-B113 <-B103 (52 cycles) endif(8) JIP: 33 { align16 1Q }; mov(8) g77<1>.xUD 0x00000050UD { align16 1Q compacted }; send(8) g76<1>F g77<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; cmp.nz.f0(8) null<1>.xD g76<4>.xD g96<4>.xD { align16 1Q switch }; (+f0.x) if(8) JIP: 8 UIP: 8 { align16 1Q }; END B114 ->B115 ->B116 START B115 <-B114 (34 cycles) add(8) g79<1>.yF g63<4>.yF -g36<4>.xF { align16 1Q }; mov(8) g36<1>.xD g36<4>.yD { align16 NoDDClr 1Q }; mov(8) g36<1>.yD g79<4>.yD { align16 NoDDChk 1Q }; END B115 ->B116 START B116 <-B114 <-B115 (52 cycles) endif(8) JIP: 18 { align16 1Q }; mov(8) g83<1>.xUD 0x00000060UD { align16 1Q compacted }; send(8) g82<1>F g83<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; cmp.nz.f0(8) null<1>.xD g82<4>.xD g96<4>.xD { align16 1Q switch }; (+f0.x) if(8) JIP: 4 UIP: 4 { align16 1Q }; END B116 ->B117 ->B118 START B117 <-B116 (2 cycles) add(8) g36<1>.xyF g63<4>.yF -g36<4>.yxxxF { align16 1Q }; END B117 ->B118 START B118 <-B116 <-B117 (8 cycles) endif(8) JIP: 7 { align16 1Q }; mov(8) g3<1>D g36<4>.xyyyD { align16 1Q }; mov(8) g2<1>.xD g109<4>.xD { align16 1Q compacted }; break(8) JIP: 2 UIP: 474 { align16 1Q }; END B118 ->B2 START B119 <-B3 (36 cycles) endif(8) JIP: 472 { align16 1Q }; cmp.z.f0(8) g86<1>.xD g100<4>.xD g15<4>.xD { align16 1Q compacted }; or.nz.f0(8) null<1>.xUD g38<4>.xUD g86<4>.xUD { align16 1Q compacted }; (+f0.x) if(8) JIP: 459 UIP: 459 { align16 1Q }; END B119 ->B120 ->B126 START B120 <-B119 (908 cycles) add(8) g88<1>.xD g110<4>.xD g14<4>.xD { align16 1Q compacted }; mov(8) g89<1>.xUD 0x00000004UD { align16 1Q compacted }; mov(8) g91<1>.xUD 0x00000140UD { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g12<4>.xyyyF g12<4>.xyyyF { align16 1Q switch }; add(8) g27<1>.xD g108<4>.xD g14<4>.xD { align16 1Q compacted }; mov(8) g42<1>.zwUD g1<4>.zzzwUD { align16 1Q }; add(8) g62<1>.xD g99<4>.xD g14<4>.xD { align16 1Q compacted }; add(8) g77<1>.xD g104<4>.xD g14<4>.xD { align16 1Q compacted }; add(8) g93<1>.xD g105<4>.xD g14<4>.xD { align16 1Q compacted }; mov(8) g31<1>.xUD 0x00000180UD { align16 1Q compacted }; shl(8) g90<1>.xD g88<4>.xD 0x00000004UD { align16 1Q }; shl(8) g28<1>.xD g27<4>.xD 0x00000004UD { align16 1Q }; shl(8) g63<1>.xD g62<4>.xD 0x00000004UD { align16 1Q }; shl(8) g78<1>.xD g77<4>.xD 0x00000004UD { align16 1Q }; shl(8) g94<1>.xD g93<4>.xD 0x00000004UD { align16 1Q }; send(8) g30<1>F g31<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; add(8) g18<1>.xD g90<4>.xD 320D { align16 1Q compacted }; add(8) g33<1>.xD g28<4>.xD 320D { align16 1Q compacted }; add(8) g68<1>.xD g63<4>.xD 320D { align16 1Q compacted }; add(8) g83<1>.xD g78<4>.xD 320D { align16 1Q compacted }; send(8) g95<1>F g18<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g32<1>F g33<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g67<1>F g68<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g82<1>F g83<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g20<1>F g95<4>.wzzzF g12<4>.xyyyF { align16 1Q }; mul(8) g52<1>F g11<4>.yF g95<4>.zF { align16 1Q }; mul(8) g43<1>F g32<4>.wF g1<4>.wF { align16 1Q }; (+f0) sel(8) g22<1>.xyUD g20<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g95<4>.wzzzF g95<4>.wzzzF { align16 1Q switch }; (+f0) sel(8) g24<1>.xyUD g20<4>.xyyyUD g22<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g20<4>F g20<4>F { align16 1Q switch }; (+f0) sel(8) g26<1>.xyUD g24<4>.xyyyUD g20<4>.xyyyUD { align16 1Q }; mul(8) g34<1>F g26<4>.xyyyF g11<4>.xyyyF { align16 1Q compacted }; add(8) g20<1>.xD g94<4>.xD 320D { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g11<4>.xyyyF g11<4>.xyyyF { align16 1Q switch }; send(8) g18<1>F g20<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; (+f0) sel(8) g36<1>.xyUD g34<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g26<4>.xyyyF g26<4>.xyyyF { align16 1Q switch }; (+f0) sel(8) g38<1>.xyUD g34<4>.xyyyUD g36<4>.xyyyUD { align16 1Q }; mov(8) g36<1>.xUD 0x00000190UD { align16 1Q compacted }; cmp.nz.f0(8) null<1>F g34<4>F g34<4>F { align16 1Q switch }; send(8) g35<1>F g36<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; (+f0) sel(8) g40<1>.xyUD g38<4>.xyyyUD g34<4>.xyyyUD { align16 1Q }; add(8) g41<1>.xyF g40<4>.xyyyF g11<4>.zwwwF { align16 1Q }; mov(8) g34<1>.yUD g35<4>.yUD { align16 1Q }; cmp.nz.f0(8) null<1>F g1<4>.wF g1<4>.wF { align16 1Q switch }; add(8) g51<1>.xyF g95<4>.yxxxF g41<4>.xyyyF { align16 1Q }; mov(8) g40<1>.xUD 0x00000000UD { align16 1Q compacted }; (+f0) sel(8) g45<1>.xUD g43<4>.xUD g102<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g32<4>.wF g32<4>.wF { align16 1Q switch }; (+f0) sel(8) g47<1>.xUD g43<4>.xUD g45<4>.xUD { align16 1Q }; cmp.nz.f0(8) null<1>F g43<4>F g43<4>F { align16 1Q switch }; (+f0) sel(8) g49<1>.xUD g47<4>.xUD g43<4>.xUD { align16 1Q }; add(8) g50<1>.xF g49<4>.xF g32<4>.wF { align16 1Q }; cmp.nz.f0(8) null<1>F g95<4>.zF g95<4>.zF { align16 1Q switch }; add(8) g16<1>.xF g51<4>.xF g50<4>.xF { align16 1Q compacted }; add(8) g32<1>.zF -g18<4>.xF g30<4>.yF { align16 1Q }; (+f0) sel(8) g54<1>.yUD g52<4>.yUD g102<4>.yUD { align16 1Q }; cmp.nz.f0(8) null<1>F g11<4>.yF g11<4>.yF { align16 1Q switch }; (+f0) sel(8) g56<1>.yUD g52<4>.yUD g54<4>.yUD { align16 1Q }; cmp.nz.f0(8) null<1>F g52<4>F g52<4>F { align16 1Q switch }; (+f0) sel(8) g58<1>.yUD g56<4>.yUD g52<4>.yUD { align16 1Q }; add(8) g59<1>.yF g58<4>.yF -g11<4>.yF { align16 1Q }; add(8) g16<1>.yF g51<4>.yF g59<4>.yF { align16 NoDDClr 1Q }; mov(8) g16<1>.zwD g12<4>.zzzwD { align16 NoDDChk 1Q }; mul(8) g69<1>F g67<4>.wzyxF g16<4>F { align16 1Q }; cmp.nz.f0(8) g70<1>F g16<4>F g16<4>F { align16 1Q compacted }; mul(8) g84<1>F g82<4>.wzyxF g16<4>F { align16 1Q }; mul(8) g21<1>F g18<4>.wzyxF g16<4>F { align16 1Q }; (+f0) sel(8) g71<1>UD g69<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g67<4>.wzyxF g67<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g73<1>UD g69<4>UD g71<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g69<4>F g69<4>F { align16 1Q switch }; (+f0) sel(8) g75<1>UD g73<4>UD g69<4>UD { align16 1Q }; dp4(8) g76<1>.xF g75<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g70<4>D 0D { align16 1Q switch }; mov(8) g38<1>.xD g76<4>.xD { align16 1Q compacted }; (+f0) sel(8) g85<1>UD g84<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g82<4>.wzyxF g82<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g87<1>UD g84<4>UD g85<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g84<4>F g84<4>F { align16 1Q switch }; (+f0) sel(8) g90<1>UD g87<4>UD g84<4>UD { align16 1Q }; dp4(8) g92<1>.xF g90<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g70<4>D 0D { align16 1Q switch }; mov(8) g38<1>.yD g92<4>.xD { align16 1Q }; (+f0) sel(8) g22<1>UD g21<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g18<4>.wzyxF g18<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g24<1>UD g21<4>UD g22<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g21<4>F g21<4>F { align16 1Q switch }; (+f0) sel(8) g26<1>UD g24<4>UD g21<4>UD { align16 1Q }; dp4(8) g27<1>.xF g26<4>F g103<4>F { align16 1Q compacted }; cmp.l.f0(8) g93<1>.yF g35<4>.xF g32<4>.zF { align16 NoDDClr 1Q }; mov(8) g38<1>.zD g27<4>.xD { align16 NoDDClr 1Q }; cmp.nz.f0(8) g93<1>.xF g35<4>.xF g30<4>.wF { align16 NoDDChk 1Q }; mov(8) g38<1>.wD g12<4>.wD { align16 NoDDChk 1Q }; not(8) g39<1>.xyD g93<4>.xyyyD { align16 1Q }; cmp.z.f0(8) null<1>D g39<4>.xyyyD g106<4>.xyyyD { align16 1Q switch }; (+f0.all4h) mov(8) g40<1>.xD -1D { align16 1Q }; mov.nz.f0(8) null<1>.xD g40<4>.xD { align16 1Q }; (+f0.x) if(8) JIP: 27 UIP: 27 { align16 1Q }; END B120 ->B121 ->B122 START B121 <-B120 (164 cycles) math inv(8) g41<1>.xF g32<4>.zF null<4>F { align16 1Q }; add(8) g43<1>.xF g76<4>.xF g30<4>.wF { align16 1Q }; mul(8) g44<1>F g30<4>.zF g41<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F g41<4>.xF g41<4>.xF { align16 1Q switch }; (+f0) sel(8) g47<1>.xF -g44<4>.xF g102<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F -g30<4>.zF -g30<4>.zF { align16 1Q switch }; (+f0) sel(8) g49<1>.xF -g44<4>.xF g47<4>.xF { align16 1Q }; cmp.nz.f0(8) null<1>F -g44<4>F -g44<4>F { align16 1Q switch }; (+f0) sel(8) g51<1>.xF g49<4>.xF -g44<4>.xF { align16 1Q }; add(8) g38<1>.xF g51<4>.xF g43<4>.xF { align16 NoDDClr 1Q compacted }; mov(8) g38<1>.yD g92<4>.xD { align16 NoDDClr,NoDDChk 1Q }; mov(8) g38<1>.zD g27<4>.xD { align16 NoDDClr,NoDDChk 1Q }; mov(8) g38<1>.wD g12<4>.wD { align16 NoDDChk 1Q }; END B121 ->B122 START B122 <-B120 <-B121 (444 cycles) endif(8) JIP: 254 { align16 1Q }; mov(8) g55<1>.xUD 0x00000140UD { align16 1Q compacted }; cmp.nz.f0(8) g57<1>F g38<4>F g38<4>F { align16 1Q compacted }; mov(8) g67<1>.xUD 0x00000150UD { align16 1Q compacted }; mov(8) g78<1>.xUD 0x00000160UD { align16 1Q compacted }; mov(8) g90<1>.xUD 0x00000170UD { align16 1Q compacted }; mov(8) g22<1>.xUD 0x00000020UD { align16 1Q compacted }; send(8) g54<1>F g55<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g66<1>F g67<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g77<1>F g78<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g88<1>F g90<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g21<1>F g22<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g56<1>F g54<4>.wzyxF g38<4>F { align16 1Q }; mul(8) g68<1>F g66<4>.wzyxF g38<4>F { align16 1Q }; mul(8) g79<1>F g77<4>.wzyxF g38<4>F { align16 1Q }; mul(8) g92<1>F g88<4>.wzyxF g38<4>F { align16 1Q }; (+f0) sel(8) g58<1>UD g56<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g54<4>.wzyxF g54<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g60<1>UD g56<4>UD g58<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g56<4>F g56<4>F { align16 1Q switch }; (+f0) sel(8) g62<1>UD g60<4>UD g56<4>UD { align16 1Q }; dp4(8) g7<1>.xF g62<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g57<4>D 0D { align16 1Q switch }; (+f0) sel(8) g69<1>UD g68<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g66<4>.wzyxF g66<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g71<1>UD g68<4>UD g69<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g68<4>F g68<4>F { align16 1Q switch }; (+f0) sel(8) g73<1>UD g71<4>UD g68<4>UD { align16 1Q }; dp4(8) g7<1>.yF g73<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g57<4>D 0D { align16 1Q switch }; (+f0) sel(8) g80<1>UD g79<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g77<4>.wzyxF g77<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g82<1>UD g79<4>UD g80<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g79<4>F g79<4>F { align16 1Q switch }; (+f0) sel(8) g84<1>UD g82<4>UD g79<4>UD { align16 1Q }; dp4(8) g7<1>.zF g84<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>D g57<4>D 0D { align16 1Q switch }; (+f0) sel(8) g93<1>UD g92<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g88<4>.wzyxF g88<4>.wzyxF { align16 1Q switch }; (+f0) sel(8) g95<1>UD g92<4>UD g93<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g92<4>F g92<4>F { align16 1Q switch }; (+f0) sel(8) g18<1>UD g95<4>UD g92<4>UD { align16 1Q }; dp4(8) g7<1>.wF g18<4>F g103<4>F { align16 1Q compacted }; cmp.nz.f0(8) null<1>.xD g21<4>.xD g96<4>.xD { align16 1Q switch }; (+f0.x) if(8) JIP: 76 UIP: 148 { align16 1Q }; END B122 ->B123 ->B124 START B123 <-B122 (362 cycles) sel.ge(8) g24<1>F -g42<4>.zwzwF g42<4>.zwzwF { align16 1Q }; mov(8) g25<1>.xyD g13<4>.zxxxF { align16 1Q }; cmp.nz.f0(8) null<1>F g24<4>F g24<4>F { align16 1Q switch }; mov(8) g39<1>.yD g24<4>.yD { align16 1Q }; mov(8) g17<1>.xyD g25<4>.xyyyD { align16 NoDDClr 1Q }; add(8) g26<1>.xD g99<4>.xD g25<4>.xD { align16 1Q compacted }; add(8) g45<1>.xD g111<4>.xD g25<4>.yD { align16 1Q }; add(8) g60<1>.xD g19<4>.xD g25<4>.yD { align16 1Q }; mov(8) g17<1>.zD g14<4>.zD { align16 NoDDChk 1Q }; shl(8) g27<1>.xD g26<4>.xD g89<4>.xUD { align16 1Q }; shl(8) g46<1>.xD g45<4>.xD g89<4>.xUD { align16 1Q }; shl(8) g61<1>.xD g60<4>.xD g89<4>.xUD { align16 1Q }; add(8) g32<1>.xD g91<4>.xD g27<4>.xD { align16 1Q compacted }; add(8) g51<1>.xD g91<4>.xD g46<4>.xD { align16 1Q compacted }; add(8) g66<1>.xD g91<4>.xD g61<4>.xD { align16 1Q compacted }; send(8) g31<1>F g32<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g50<1>F g51<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g65<1>F g66<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g33<1>F g31<4>.yxxxF g24<4>F { align16 1Q }; mov(8) g40<1>D g50<4>.wzyxD { align16 1Q }; (+f0) sel(8) g35<1>.xyUD g33<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g31<4>.yxxxF g31<4>.yxxxF { align16 1Q switch }; (+f0) sel(8) g37<1>.xyUD g33<4>.xyyyUD g35<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g33<4>F g33<4>F { align16 1Q switch }; (+f0) sel(8) g41<1>.xyUD g37<4>.xyyyUD g33<4>.xyyyUD { align16 1Q }; cmp.l.f0(8) g43<1>.zwF g24<4>.zzzwF g34<4>.yF { align16 1Q }; cmp.nz.f0(8) null<1>F g31<4>.wzzzF g31<4>.wzzzF { align16 1Q switch }; mov(8) g44<1>.zwF -g43<4>.zzzwD { align16 1Q }; mul(8) g52<1>F g44<4>.zwwwF g31<4>.wzzzF { align16 1Q }; (+f0) sel(8) g54<1>.xyUD g52<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g44<4>.zwwwF g44<4>.zwwwF { align16 1Q switch }; (+f0) sel(8) g56<1>.xyUD g52<4>.xyyyUD g54<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g52<4>F g52<4>F { align16 1Q switch }; (+f0) sel(8) g58<1>.xyUD g56<4>.xyyyUD g52<4>.xyyyUD { align16 1Q }; add(8) g59<1>.xyF g58<4>.xyyyF g41<4>.xyyyF { align16 1Q compacted }; add(8) g3<1>.yF g34<4>.yF -g59<4>.yF { align16 NoDDClr 1Q }; mov(8) g41<1>D g65<4>.wzyxD { align16 1Q }; mov(8) g3<1>.xD g59<4>.xD { align16 NoDDClr,NoDDChk 1Q compacted }; mov(8) g3<1>.zwD g10<4>.zzzwD { align16 NoDDChk 1Q }; else(8) JIP: 74 { align16 1Q }; END B123 ->B125 START B124 <-B122 (360 cycles) sel.ge(8) g68<1>F -g42<4>.zwzwF g42<4>.zwzwF { align16 1Q }; mov(8) g69<1>.xyD g13<4>.zwwwF { align16 1Q }; cmp.nz.f0(8) null<1>F g68<4>F g68<4>F { align16 1Q switch }; mov(8) g39<1>.yD g68<4>.yD { align16 1Q }; mov(8) g17<1>.xyD g69<4>.xyyyD { align16 NoDDClr 1Q }; add(8) g70<1>.xD g99<4>.xD g69<4>.xD { align16 1Q compacted }; add(8) g86<1>.xD g99<4>.xD g69<4>.yD { align16 1Q }; add(8) g24<1>.xD g104<4>.xD g69<4>.yD { align16 1Q }; mov(8) g17<1>.zD g14<4>.zD { align16 NoDDChk 1Q }; shl(8) g71<1>.xD g70<4>.xD g89<4>.xUD { align16 1Q }; shl(8) g87<1>.xD g86<4>.xD g89<4>.xUD { align16 1Q }; shl(8) g25<1>.xD g24<4>.xD g89<4>.xUD { align16 1Q }; add(8) g76<1>.xD g91<4>.xD g71<4>.xD { align16 1Q compacted }; add(8) g93<1>.xD g91<4>.xD g87<4>.xD { align16 1Q compacted }; add(8) g30<1>.xD g91<4>.xD g25<4>.xD { align16 1Q compacted }; send(8) g75<1>F g76<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g92<1>F g93<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; send(8) g29<1>F g30<4>.xUD sampler ld SIMD4x2 Surface = 0 Sampler = 0 mlen 1 rlen 1 { align16 1Q }; mul(8) g77<1>F g75<4>.yxxxF g68<4>F { align16 1Q }; mov(8) g40<1>D g92<4>.wzyxD { align16 1Q }; mov(8) g41<1>D g29<4>.wzyxD { align16 1Q }; (+f0) sel(8) g79<1>.xyUD g77<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g75<4>.yxxxF g75<4>.yxxxF { align16 1Q switch }; (+f0) sel(8) g81<1>.xyUD g77<4>.xyyyUD g79<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g77<4>F g77<4>F { align16 1Q switch }; (+f0) sel(8) g83<1>.xyUD g81<4>.xyyyUD g77<4>.xyyyUD { align16 1Q }; cmp.l.f0(8) g84<1>.zwF g68<4>.zzzwF g34<4>.yF { align16 1Q }; cmp.nz.f0(8) null<1>F g75<4>.wzzzF g75<4>.wzzzF { align16 1Q switch }; mov(8) g85<1>.zwF -g84<4>.zzzwD { align16 1Q }; mul(8) g94<1>F g85<4>.zwwwF g75<4>.wzzzF { align16 1Q }; (+f0) sel(8) g18<1>.xyUD g94<4>.xyyyUD g102<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g85<4>.zwwwF g85<4>.zwwwF { align16 1Q switch }; (+f0) sel(8) g20<1>.xyUD g94<4>.xyyyUD g18<4>.xyyyUD { align16 1Q }; cmp.nz.f0(8) null<1>F g94<4>F g94<4>F { align16 1Q switch }; (+f0) sel(8) g22<1>.xyUD g20<4>.xyyyUD g94<4>.xyyyUD { align16 1Q }; add(8) g23<1>.xyF g22<4>.xyyyF g83<4>.xyyyF { align16 1Q compacted }; add(8) g3<1>.yF g34<4>.yF -g23<4>.yF { align16 NoDDClr 1Q }; mov(8) g3<1>.xD g23<4>.xD { align16 NoDDClr,NoDDChk 1Q compacted }; mov(8) g3<1>.zwD g10<4>.zzzwD { align16 NoDDChk 1Q }; END B124 ->B125 START B125 <-B124 <-B123 (118 cycles) endif(8) JIP: 28 { align16 1Q }; add(8) g32<1>F g41<4>F -g40<4>F { align16 1Q }; cmp.nz.f0(8) null<1>F g39<4>.yF g39<4>.yF { align16 1Q switch }; mov(8) g10<1>.zwD g3<4>.zzzwD { align16 1Q }; mov(8) g5<1>D g3<4>D { align16 1Q }; mov(8) g4<1>D g3<4>D { align16 1Q }; mov(8) g2<1>.xD g109<4>.xD { align16 1Q compacted }; mul(8) g33<1>F g32<4>F g39<4>.yF { align16 1Q }; (+f0) sel(8) g35<1>UD g33<4>UD g102<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g32<4>F g32<4>F { align16 1Q switch }; (+f0) sel(8) g37<1>UD g33<4>UD g35<4>UD { align16 1Q }; cmp.nz.f0(8) null<1>F g33<4>F g33<4>F { align16 1Q switch }; (+f0) sel(8) g39<1>UD g37<4>UD g33<4>UD { align16 1Q }; add(8) g6<1>F g39<4>F g40<4>F { align16 1Q compacted }; break(8) JIP: 2 UIP: 11 { align16 1Q }; END B125 ->B2 START B126 <-B119 (10 cycles) endif(8) JIP: 9 { align16 1Q }; mov(8) g17<1>.xyzD g14<4>.xyzzD { align16 1Q }; mov(8) g16<1>D g12<4>D { align16 1Q }; mov(8) g2<1>.xD g109<4>.xD { align16 1Q compacted }; break(8) JIP: 2 UIP: 2 { align16 1Q }; END B126 ->B2 START B127 (2 cycles) while(8) JIP: -2691 { align16 1Q }; END B127 ->B3 START B128 <-B2 (18 cycles) mov.nz.f0(8) null<1>.xD g2<4>.xD { align16 1Q }; (+f0.x) break(8) JIP: 6 UIP: 6 { align16 1Q }; END B128 ->B1 ->B129 START B129 <-B128 (6 cycles) mov(8) g14<1>.xyzD g17<4>.xyzzD { align16 1Q }; mov(8) g12<1>D g16<4>D { align16 1Q }; while(8) JIP: -2701 { align16 1Q }; END B129 ->B2 START B130 <-B1 (28 cycles) mov(8) g114<1>UD 0x00000000UD { align16 1Q compacted }; mov(8) g118<1>F g7<4>F { align16 1Q }; mov(8) g119<1>F g6<4>F { align16 1Q }; mov(8) g120<1>F g5<4>F { align16 1Q }; mov(8) g121<1>F g4<4>F { align16 1Q }; mov(8) g122<1>F g3<4>F { align16 1Q }; mov(8) g113<1>UD g0<4>UD { align16 WE_all 1Q }; or(1) g113.5<1>UD g0.5<0,1,0>UD 0x0000ff00UD { align1 WE_all 1N }; send(8) null<1>F g113<4>F urb 0 write HWord interleave complete mlen 11 rlen 0 { align16 1Q EOT }; END B130 7307: message: shader compiler issue 28: VS vec4 shader: 1473 inst, 2 loops, 1394952 cycles, 0:0 spills:fills, compacted 23568 to 22144 bytes. 7307: message: api performance issue 29: Copying to larger program cache: 16 kB -> 32 kB 7326: message: shader compiler issue 30: GS vec4 shader: 60 inst, 0 loops, 224 cycles, 0:0 spills:fills, compacted 960 to 816 bytes. 7368: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7498: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7520: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7539: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7622: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 7635: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7791: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7813: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7832: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7924: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7946: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 7965: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8048: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 8061: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8205: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8227: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8246: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8327: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8349: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8368: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8440: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 8453: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8586: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8608: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8627: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8708: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8730: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8749: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8821: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 8834: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 8967: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9008: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9027: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9108: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9130: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9149: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9221: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 9234: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9367: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9389: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9408: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9489: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9511: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9530: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9602: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 9615: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9748: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9770: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9789: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9870: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9892: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9911: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 9983: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 9996: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10129: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10151: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10170: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10251: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10273: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10292: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10364: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 10377: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10510: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10532: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10551: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10632: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10654: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10673: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10745: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 10758: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10891: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10932: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 10951: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11032: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11054: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11073: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11145: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 11158: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11291: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11313: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11332: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11409: message: shader compiler issue 31: FS SIMD8 shader: 64 inst, 0 loops, 473 cycles, 0:0 spills:fills, Promoted 2 constants, compacted 1024 to 736 bytes. 11409: message: shader compiler issue 32: FS SIMD16 shader: 65 inst, 0 loops, 548 cycles, 0:0 spills:fills, Promoted 2 constants, compacted 1040 to 752 bytes. 11435: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11482: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11505: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11524: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11574: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11614: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 11627: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11760: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11783: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11802: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11852: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11901: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11924: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11943: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 11993: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12033: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 12046: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12179: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12202: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12221: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12271: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12320: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12343: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12362: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12412: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12452: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 12465: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12598: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12621: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12640: message: api performance issue 23: using glBufferSubData(buffer 4, offset 0, size 1120) to update a GL_STATIC_DRAW buffer 12690: message: api performance issue 23: too many identical messages; ignoring 12871: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 13290: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 13709: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 14128: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 14547: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 14966: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 15385: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 15804: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 16223: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 16642: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 17061: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 17480: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 17899: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 18318: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 18737: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 19156: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 19575: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 19994: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 20413: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 20832: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 21251: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 21670: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 22089: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 22508: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 22927: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 23346: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 23778: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 24197: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 24616: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 25035: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 25454: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 25873: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 26292: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 26711: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 27130: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 27549: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 27968: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 28387: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 28806: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 29225: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 29644: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 30063: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 30482: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 30901: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 31283: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 31664: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 32045: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 32426: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 32807: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 33188: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 33569: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 33950: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 34331: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 34712: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 35093: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 35474: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 35855: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 36236: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 36617: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 36998: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 37379: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 37760: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 38141: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 38522: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 38903: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 39284: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 39665: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 40083: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 40502: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 40921: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 41340: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 41759: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 42178: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 42597: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 43017: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 43436: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 43855: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 44274: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 44693: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 45112: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 45531: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 45950: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 46369: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 46788: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 47207: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 47626: message: api performance issue 22: Failed to fast clear 240x400 depth because of scissors. Possible 5% performance win if avoided. 48045: message: api performance issue 22: too many identical messages; ignoring Mesa: User error: GL_INVALID_OPERATION in glGetNamedBufferPointerv(non-existent buffer object 2) 80064: message: major api error 33: GL_INVALID_OPERATION in glGetNamedBufferPointerv(non-existent buffer object 2) 80064 @1 glDeleteBuffers(n = 1, buffer = &2) 80064: warning: glGetError(glDeleteBuffers) = GL_INVALID_OPERATION Rendered 249 frames in 1.50747 secs, average of 165.177 fps vadym@vadym-HP-ProBook-640-G1:~/mesa/mesa-shader-db$