Playing: /home/den/Downloads/clipcanvas_14348_ProResHQ_720p50.mov (+) Video --vid=1 (*) (prores 1280x720 50.000fps) (+) Audio --aid=1 --alang=eng (*) (pcm_s16be 2ch 48000Hz) AO: [pulse] 48000Hz stereo 2ch s16 VO: [gpu] 1280x720 yuv422p10 GLSL IR for native compute shader 19: ( (declare (location=7 uniform ) vec2 tex_scale0) (declare (location=6 uniform ) vec2 out_scale) (declare (location=5 uniform ) vec2 pixel_size0) (declare (location=4 uniform ) vec2 texture_off0) (declare (location=3 uniform ) mat2 texture_rot0) (declare (location=2 uniform ) vec2 texture_size0) (declare (location=1 uniform ) vec3 dst_luma) (declare (location=0 uniform ) vec3 src_luma) (declare (location=32 sys ) uvec3 gl_GlobalInvocationID) (declare (location=33 sys ) uvec3 gl_WorkGroupID) (declare (location=30 sys ) uvec3 gl_LocalInvocationID) (declare (location=8 uniform ) sampler1D lut) (declare (location=9 format=881a writeonly uniform ) image2D out_image) (declare (location=10 uniform ) sampler2D texture0) (declare (shader_shared ) (array float 420) in0) (declare (shader_shared ) (array float 420) in1) (declare (shader_shared ) (array float 420) in2) ( function main (signature void (parameters ) ( (declare () int y) (declare () float wsum) (declare () int idx) (declare () vec4 color) (declare (temporary ) vec2 assignment_tmp) (declare (temporary ) mat2 ubo_load_temp) (declare (temporary ) uint ubo_load_temp_offset) (assign (x) (var_ref ubo_load_temp_offset) (constant uint (0)) ) (assign (xy) (array_ref (var_ref ubo_load_temp) (constant uint (0)) ) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset) (constant uint (48)) ) ) ) (assign (xy) (array_ref (var_ref ubo_load_temp) (constant uint (1)) ) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset) (constant uint (64)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@2) (declare (temporary ) uint ubo_load_temp_offset@3) (assign (x) (var_ref ubo_load_temp_offset@3) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@2) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@3) (constant uint (104)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@4) (declare (temporary ) uint ubo_load_temp_offset@5) (assign (x) (var_ref ubo_load_temp_offset@5) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@4) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@5) (constant uint (96)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@6) (declare (temporary ) uint ubo_load_temp_offset@7) (assign (x) (var_ref ubo_load_temp_offset@7) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@6) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@7) (constant uint (88)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@8) (declare (temporary ) uint ubo_load_temp_offset@9) (assign (x) (var_ref ubo_load_temp_offset@9) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@8) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@9) (constant uint (80)) ) ) ) (declare (temporary ) vec2 flattening_tmp) (declare (temporary ) vec2 mat_op_to_vec) (assign (xy) (var_ref mat_op_to_vec) (expression vec2 * (var_ref ubo_load_temp@2) (expression vec2 * (var_ref ubo_load_temp@4) (expression vec2 + (swiz xy (expression vec3 u2f (var_ref gl_GlobalInvocationID) ) )(constant vec2 (0.500000 0.500000)) ) ) ) ) (assign (xy) (var_ref flattening_tmp) (expression vec2 + (expression vec2 * (array_ref (var_ref ubo_load_temp) (constant int (0)) ) (swiz x (var_ref mat_op_to_vec) )) (expression vec2 * (array_ref (var_ref ubo_load_temp) (constant int (1)) ) (swiz y (var_ref mat_op_to_vec) )) ) ) (assign (xy) (var_ref assignment_tmp) (expression vec2 + (var_ref flattening_tmp) (expression vec2 * (var_ref ubo_load_temp@6) (var_ref ubo_load_temp@8) ) ) ) (assign (xyzw) (var_ref color) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) (declare (temporary ) vec2 assignment_tmp@10) (declare (temporary ) mat2 ubo_load_temp@11) (declare (temporary ) uint ubo_load_temp_offset@12) (assign (x) (var_ref ubo_load_temp_offset@12) (constant uint (0)) ) (assign (xy) (array_ref (var_ref ubo_load_temp@11) (constant uint (0)) ) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@12) (constant uint (48)) ) ) ) (assign (xy) (array_ref (var_ref ubo_load_temp@11) (constant uint (1)) ) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@12) (constant uint (64)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@13) (declare (temporary ) uint ubo_load_temp_offset@14) (assign (x) (var_ref ubo_load_temp_offset@14) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@13) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@14) (constant uint (104)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@15) (declare (temporary ) uint ubo_load_temp_offset@16) (assign (x) (var_ref ubo_load_temp_offset@16) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@15) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@16) (constant uint (96)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@17) (declare (temporary ) uint ubo_load_temp_offset@18) (assign (x) (var_ref ubo_load_temp_offset@18) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@17) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@18) (constant uint (88)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@19) (declare (temporary ) uint ubo_load_temp_offset@20) (assign (x) (var_ref ubo_load_temp_offset@20) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@19) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@20) (constant uint (80)) ) ) ) (declare (temporary ) vec2 flattening_tmp@21) (declare (temporary ) vec2 mat_op_to_vec@22) (assign (xy) (var_ref mat_op_to_vec@22) (expression vec2 * (var_ref ubo_load_temp@13) (expression vec2 * (var_ref ubo_load_temp@15) (expression vec2 + (swiz xy (expression vec3 u2f (expression uvec3 * (var_ref gl_WorkGroupID) (constant uvec3 (32 8 1)) ) ) )(constant vec2 (0.500000 0.500000)) ) ) ) ) (assign (xy) (var_ref flattening_tmp@21) (expression vec2 + (expression vec2 * (array_ref (var_ref ubo_load_temp@11) (constant int (0)) ) (swiz x (var_ref mat_op_to_vec@22) )) (expression vec2 * (array_ref (var_ref ubo_load_temp@11) (constant int (1)) ) (swiz y (var_ref mat_op_to_vec@22) )) ) ) (assign (xy) (var_ref assignment_tmp@10) (expression vec2 + (var_ref flattening_tmp@21) (expression vec2 * (var_ref ubo_load_temp@17) (var_ref ubo_load_temp@19) ) ) ) (declare (temporary ) vec2 assignment_tmp@23) (declare (temporary ) vec2 ubo_load_temp@24) (declare (temporary ) uint ubo_load_temp_offset@25) (assign (x) (var_ref ubo_load_temp_offset@25) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@24) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@25) (constant uint (88)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@26) (declare (temporary ) uint ubo_load_temp_offset@27) (assign (x) (var_ref ubo_load_temp_offset@27) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@26) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@27) (constant uint (32)) ) ) ) (assign (xy) (var_ref assignment_tmp@23) (expression vec2 + (var_ref assignment_tmp@10) (expression vec2 neg (expression vec2 * (var_ref ubo_load_temp@24) (expression vec2 fract (expression vec2 + (expression vec2 * (var_ref assignment_tmp@10) (var_ref ubo_load_temp@26) ) (constant vec2 (-0.500000 -0.500000)) ) ) ) ) ) ) (declare (temporary ) vec2 fract_retval) (declare (temporary ) vec2 ubo_load_temp@28) (declare (temporary ) uint ubo_load_temp_offset@29) (assign (x) (var_ref ubo_load_temp_offset@29) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@28) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@29) (constant uint (32)) ) ) ) (assign (xy) (var_ref fract_retval) (expression vec2 fract (expression vec2 + (expression vec2 * (var_ref assignment_tmp) (var_ref ubo_load_temp@28) ) (constant vec2 (-0.500000 -0.500000)) ) ) ) (declare (temporary ) ivec2 vec_ctor) (declare (temporary ) vec2 ubo_load_temp@30) (declare (temporary ) uint ubo_load_temp_offset@31) (assign (x) (var_ref ubo_load_temp_offset@31) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@30) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@31) (constant uint (32)) ) ) ) (declare (temporary ) vec2 ubo_load_temp@32) (declare (temporary ) uint ubo_load_temp_offset@33) (assign (x) (var_ref ubo_load_temp_offset@33) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@32) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@33) (constant uint (88)) ) ) ) (assign (xy) (var_ref vec_ctor) (expression ivec2 f2i (expression vec2 round_even (expression vec2 * (expression vec2 + (expression vec2 + (var_ref assignment_tmp) (expression vec2 neg (expression vec2 * (var_ref ubo_load_temp@32) (var_ref fract_retval) ) ) ) (expression vec2 neg (var_ref assignment_tmp@23) ) ) (var_ref ubo_load_temp@30) ) ) ) ) (assign (x) (var_ref wsum) (constant float (0.000000)) ) (assign (x) (var_ref y) (expression int u2i (swiz y (var_ref gl_LocalInvocationID) )) ) (loop ( (declare () int x) (if (expression bool >= (var_ref y) (constant int (14)) ) ( break ) ()) (assign (x) (var_ref x) (expression int u2i (swiz x (var_ref gl_LocalInvocationID) )) ) (loop ( (if (expression bool >= (var_ref x) (constant int (30)) ) ( break ) ()) (declare (temporary ) vec2 vec_ctor@34) (assign (x) (var_ref vec_ctor@34) (expression float i2f (expression int + (var_ref x) (constant int (-3)) ) ) ) (assign (y) (var_ref vec_ctor@34) (expression float i2f (expression int + (var_ref y) (constant int (-3)) ) ) ) (declare (temporary ) vec4 texture_retval) (declare (temporary ) vec2 ubo_load_temp@35) (declare (temporary ) uint ubo_load_temp_offset@36) (assign (x) (var_ref ubo_load_temp_offset@36) (constant uint (0)) ) (assign (xy) (var_ref ubo_load_temp@35) (expression vec2 ubo_load (constant uint (0)) (expression uint + (var_ref ubo_load_temp_offset@36) (constant uint (88)) ) ) ) (assign (xyzw) (var_ref texture_retval) (tex vec4 (var_ref texture0) (expression vec2 + (var_ref assignment_tmp@23) (expression vec2 * (var_ref ubo_load_temp@35) (var_ref vec_ctor@34) ) ) 0 1 () )) (declare (temporary ) float shared_store_temp) (declare (temporary ) uint shared_store_temp_offset) (assign (x) (var_ref shared_store_temp_offset) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (expression int * (constant int (30)) (var_ref y) ) (var_ref x) ) ) (constant uint (4)) ) ) ) (assign (x) (var_ref shared_store_temp) (swiz x (var_ref texture_retval) )) (call __intrinsic_store_shared ((expression uint + (var_ref shared_store_temp_offset) (constant uint (0)) ) (var_ref shared_store_temp) (constant uint (1)) )) (declare (temporary ) float shared_store_temp@37) (declare (temporary ) uint shared_store_temp_offset@38) (assign (x) (var_ref shared_store_temp_offset@38) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (expression int * (constant int (30)) (var_ref y) ) (var_ref x) ) ) (constant uint (4)) ) ) ) (assign (x) (var_ref shared_store_temp@37) (swiz y (var_ref texture_retval) )) (call __intrinsic_store_shared ((expression uint + (var_ref shared_store_temp_offset@38) (constant uint (1680)) ) (var_ref shared_store_temp@37) (constant uint (1)) )) (declare (temporary ) float shared_store_temp@39) (declare (temporary ) uint shared_store_temp_offset@40) (assign (x) (var_ref shared_store_temp_offset@40) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (expression int * (constant int (30)) (var_ref y) ) (var_ref x) ) ) (constant uint (4)) ) ) ) (assign (x) (var_ref shared_store_temp@39) (swiz z (var_ref texture_retval) )) (call __intrinsic_store_shared ((expression uint + (var_ref shared_store_temp_offset@40) (constant uint (3360)) ) (var_ref shared_store_temp@39) (constant uint (1)) )) (assign (x) (var_ref x) (expression int + (var_ref x) (constant int (32)) ) ) )) (assign (x) (var_ref y) (expression int + (var_ref y) (constant int (8)) ) ) )) (call __intrinsic_group_memory_barrier ()) (barrier) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (3)) ) ) (declare (temporary ) float length_retval) (declare (temporary ) vec2 x@41) (assign (xy) (var_ref x@41) (expression vec2 + (constant vec2 (0.000000 -3.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval) (expression float sqrt (expression float dot (var_ref x@41) (var_ref x@41) ) ) ) (if (expression bool < (var_ref length_retval) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@42) (assign (xyzw) (var_ref texture_retval@42) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (swiz x (var_ref texture_retval@42) )) (declare (temporary ) float shared_load_temp) (declare (temporary ) uint shared_load_temp_offset) (assign (x) (var_ref shared_load_temp_offset) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result) (call __intrinsic_load_shared (var_ref shared_load_result) ((expression uint + (var_ref shared_load_temp_offset) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp) (var_ref shared_load_result) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@42) )(var_ref shared_load_temp) ) ) ) (declare (temporary ) float shared_load_temp@43) (declare (temporary ) uint shared_load_temp_offset@44) (assign (x) (var_ref shared_load_temp_offset@44) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@45) (call __intrinsic_load_shared (var_ref shared_load_result@45) ((expression uint + (var_ref shared_load_temp_offset@44) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@43) (var_ref shared_load_result@45) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@42) )(var_ref shared_load_temp@43) ) ) ) (declare (temporary ) float shared_load_temp@46) (declare (temporary ) uint shared_load_temp_offset@47) (assign (x) (var_ref shared_load_temp_offset@47) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@48) (call __intrinsic_load_shared (var_ref shared_load_result@48) ((expression uint + (var_ref shared_load_temp_offset@47) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@46) (var_ref shared_load_result@48) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@42) )(var_ref shared_load_temp@46) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (4)) ) ) (declare (temporary ) float length_retval@49) (declare (temporary ) vec2 x@50) (assign (xy) (var_ref x@50) (expression vec2 + (constant vec2 (1.000000 -3.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@49) (expression float sqrt (expression float dot (var_ref x@50) (var_ref x@50) ) ) ) (if (expression bool < (var_ref length_retval@49) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@51) (assign (xyzw) (var_ref texture_retval@51) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@49) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@51) )) ) (declare (temporary ) float shared_load_temp@52) (declare (temporary ) uint shared_load_temp_offset@53) (assign (x) (var_ref shared_load_temp_offset@53) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@54) (call __intrinsic_load_shared (var_ref shared_load_result@54) ((expression uint + (var_ref shared_load_temp_offset@53) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@52) (var_ref shared_load_result@54) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@51) )(var_ref shared_load_temp@52) ) ) ) (declare (temporary ) float shared_load_temp@55) (declare (temporary ) uint shared_load_temp_offset@56) (assign (x) (var_ref shared_load_temp_offset@56) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@57) (call __intrinsic_load_shared (var_ref shared_load_result@57) ((expression uint + (var_ref shared_load_temp_offset@56) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@55) (var_ref shared_load_result@57) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@51) )(var_ref shared_load_temp@55) ) ) ) (declare (temporary ) float shared_load_temp@58) (declare (temporary ) uint shared_load_temp_offset@59) (assign (x) (var_ref shared_load_temp_offset@59) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@60) (call __intrinsic_load_shared (var_ref shared_load_result@60) ((expression uint + (var_ref shared_load_temp_offset@59) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@58) (var_ref shared_load_result@60) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@51) )(var_ref shared_load_temp@58) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (31)) ) ) (declare (temporary ) float length_retval@61) (declare (temporary ) vec2 x@62) (assign (xy) (var_ref x@62) (expression vec2 + (constant vec2 (-2.000000 -2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@61) (expression float sqrt (expression float dot (var_ref x@62) (var_ref x@62) ) ) ) (if (expression bool < (var_ref length_retval@61) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@63) (assign (xyzw) (var_ref texture_retval@63) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@61) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@63) )) ) (declare (temporary ) float shared_load_temp@64) (declare (temporary ) uint shared_load_temp_offset@65) (assign (x) (var_ref shared_load_temp_offset@65) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@66) (call __intrinsic_load_shared (var_ref shared_load_result@66) ((expression uint + (var_ref shared_load_temp_offset@65) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@64) (var_ref shared_load_result@66) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@63) )(var_ref shared_load_temp@64) ) ) ) (declare (temporary ) float shared_load_temp@67) (declare (temporary ) uint shared_load_temp_offset@68) (assign (x) (var_ref shared_load_temp_offset@68) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@69) (call __intrinsic_load_shared (var_ref shared_load_result@69) ((expression uint + (var_ref shared_load_temp_offset@68) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@67) (var_ref shared_load_result@69) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@63) )(var_ref shared_load_temp@67) ) ) ) (declare (temporary ) float shared_load_temp@70) (declare (temporary ) uint shared_load_temp_offset@71) (assign (x) (var_ref shared_load_temp_offset@71) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@72) (call __intrinsic_load_shared (var_ref shared_load_result@72) ((expression uint + (var_ref shared_load_temp_offset@71) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@70) (var_ref shared_load_result@72) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@63) )(var_ref shared_load_temp@70) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (32)) ) ) (declare (temporary ) float length_retval@73) (declare (temporary ) vec2 x@74) (assign (xy) (var_ref x@74) (expression vec2 + (constant vec2 (-1.000000 -2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@73) (expression float sqrt (expression float dot (var_ref x@74) (var_ref x@74) ) ) ) (if (expression bool < (var_ref length_retval@73) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@75) (assign (xyzw) (var_ref texture_retval@75) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@73) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@75) )) ) (declare (temporary ) float shared_load_temp@76) (declare (temporary ) uint shared_load_temp_offset@77) (assign (x) (var_ref shared_load_temp_offset@77) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@78) (call __intrinsic_load_shared (var_ref shared_load_result@78) ((expression uint + (var_ref shared_load_temp_offset@77) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@76) (var_ref shared_load_result@78) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@75) )(var_ref shared_load_temp@76) ) ) ) (declare (temporary ) float shared_load_temp@79) (declare (temporary ) uint shared_load_temp_offset@80) (assign (x) (var_ref shared_load_temp_offset@80) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@81) (call __intrinsic_load_shared (var_ref shared_load_result@81) ((expression uint + (var_ref shared_load_temp_offset@80) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@79) (var_ref shared_load_result@81) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@75) )(var_ref shared_load_temp@79) ) ) ) (declare (temporary ) float shared_load_temp@82) (declare (temporary ) uint shared_load_temp_offset@83) (assign (x) (var_ref shared_load_temp_offset@83) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@84) (call __intrinsic_load_shared (var_ref shared_load_result@84) ((expression uint + (var_ref shared_load_temp_offset@83) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@82) (var_ref shared_load_result@84) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@75) )(var_ref shared_load_temp@82) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (33)) ) ) (declare (temporary ) float length_retval@85) (declare (temporary ) vec2 x@86) (assign (xy) (var_ref x@86) (expression vec2 + (constant vec2 (0.000000 -2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@85) (expression float sqrt (expression float dot (var_ref x@86) (var_ref x@86) ) ) ) (if (expression bool < (var_ref length_retval@85) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@87) (assign (xyzw) (var_ref texture_retval@87) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@85) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@87) )) ) (declare (temporary ) float shared_load_temp@88) (declare (temporary ) uint shared_load_temp_offset@89) (assign (x) (var_ref shared_load_temp_offset@89) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@90) (call __intrinsic_load_shared (var_ref shared_load_result@90) ((expression uint + (var_ref shared_load_temp_offset@89) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@88) (var_ref shared_load_result@90) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@87) )(var_ref shared_load_temp@88) ) ) ) (declare (temporary ) float shared_load_temp@91) (declare (temporary ) uint shared_load_temp_offset@92) (assign (x) (var_ref shared_load_temp_offset@92) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@93) (call __intrinsic_load_shared (var_ref shared_load_result@93) ((expression uint + (var_ref shared_load_temp_offset@92) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@91) (var_ref shared_load_result@93) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@87) )(var_ref shared_load_temp@91) ) ) ) (declare (temporary ) float shared_load_temp@94) (declare (temporary ) uint shared_load_temp_offset@95) (assign (x) (var_ref shared_load_temp_offset@95) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@96) (call __intrinsic_load_shared (var_ref shared_load_result@96) ((expression uint + (var_ref shared_load_temp_offset@95) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@94) (var_ref shared_load_result@96) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@87) )(var_ref shared_load_temp@94) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (34)) ) ) (declare (temporary ) float length_retval@97) (declare (temporary ) vec2 x@98) (assign (xy) (var_ref x@98) (expression vec2 + (constant vec2 (1.000000 -2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@97) (expression float sqrt (expression float dot (var_ref x@98) (var_ref x@98) ) ) ) (if (expression bool < (var_ref length_retval@97) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@99) (assign (xyzw) (var_ref texture_retval@99) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@97) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@99) )) ) (declare (temporary ) float shared_load_temp@100) (declare (temporary ) uint shared_load_temp_offset@101) (assign (x) (var_ref shared_load_temp_offset@101) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@102) (call __intrinsic_load_shared (var_ref shared_load_result@102) ((expression uint + (var_ref shared_load_temp_offset@101) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@100) (var_ref shared_load_result@102) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@99) )(var_ref shared_load_temp@100) ) ) ) (declare (temporary ) float shared_load_temp@103) (declare (temporary ) uint shared_load_temp_offset@104) (assign (x) (var_ref shared_load_temp_offset@104) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@105) (call __intrinsic_load_shared (var_ref shared_load_result@105) ((expression uint + (var_ref shared_load_temp_offset@104) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@103) (var_ref shared_load_result@105) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@99) )(var_ref shared_load_temp@103) ) ) ) (declare (temporary ) float shared_load_temp@106) (declare (temporary ) uint shared_load_temp_offset@107) (assign (x) (var_ref shared_load_temp_offset@107) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@108) (call __intrinsic_load_shared (var_ref shared_load_result@108) ((expression uint + (var_ref shared_load_temp_offset@107) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@106) (var_ref shared_load_result@108) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@99) )(var_ref shared_load_temp@106) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (35)) ) ) (declare (temporary ) float length_retval@109) (declare (temporary ) vec2 x@110) (assign (xy) (var_ref x@110) (expression vec2 + (constant vec2 (2.000000 -2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@109) (expression float sqrt (expression float dot (var_ref x@110) (var_ref x@110) ) ) ) (if (expression bool < (var_ref length_retval@109) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@111) (assign (xyzw) (var_ref texture_retval@111) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@109) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@111) )) ) (declare (temporary ) float shared_load_temp@112) (declare (temporary ) uint shared_load_temp_offset@113) (assign (x) (var_ref shared_load_temp_offset@113) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@114) (call __intrinsic_load_shared (var_ref shared_load_result@114) ((expression uint + (var_ref shared_load_temp_offset@113) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@112) (var_ref shared_load_result@114) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@111) )(var_ref shared_load_temp@112) ) ) ) (declare (temporary ) float shared_load_temp@115) (declare (temporary ) uint shared_load_temp_offset@116) (assign (x) (var_ref shared_load_temp_offset@116) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@117) (call __intrinsic_load_shared (var_ref shared_load_result@117) ((expression uint + (var_ref shared_load_temp_offset@116) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@115) (var_ref shared_load_result@117) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@111) )(var_ref shared_load_temp@115) ) ) ) (declare (temporary ) float shared_load_temp@118) (declare (temporary ) uint shared_load_temp_offset@119) (assign (x) (var_ref shared_load_temp_offset@119) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@120) (call __intrinsic_load_shared (var_ref shared_load_result@120) ((expression uint + (var_ref shared_load_temp_offset@119) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@118) (var_ref shared_load_result@120) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@111) )(var_ref shared_load_temp@118) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (36)) ) ) (declare (temporary ) float length_retval@121) (declare (temporary ) vec2 x@122) (assign (xy) (var_ref x@122) (expression vec2 + (constant vec2 (3.000000 -2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@121) (expression float sqrt (expression float dot (var_ref x@122) (var_ref x@122) ) ) ) (if (expression bool < (var_ref length_retval@121) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@123) (assign (xyzw) (var_ref texture_retval@123) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@121) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@123) )) ) (declare (temporary ) float shared_load_temp@124) (declare (temporary ) uint shared_load_temp_offset@125) (assign (x) (var_ref shared_load_temp_offset@125) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@126) (call __intrinsic_load_shared (var_ref shared_load_result@126) ((expression uint + (var_ref shared_load_temp_offset@125) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@124) (var_ref shared_load_result@126) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@123) )(var_ref shared_load_temp@124) ) ) ) (declare (temporary ) float shared_load_temp@127) (declare (temporary ) uint shared_load_temp_offset@128) (assign (x) (var_ref shared_load_temp_offset@128) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@129) (call __intrinsic_load_shared (var_ref shared_load_result@129) ((expression uint + (var_ref shared_load_temp_offset@128) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@127) (var_ref shared_load_result@129) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@123) )(var_ref shared_load_temp@127) ) ) ) (declare (temporary ) float shared_load_temp@130) (declare (temporary ) uint shared_load_temp_offset@131) (assign (x) (var_ref shared_load_temp_offset@131) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@132) (call __intrinsic_load_shared (var_ref shared_load_result@132) ((expression uint + (var_ref shared_load_temp_offset@131) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@130) (var_ref shared_load_result@132) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@123) )(var_ref shared_load_temp@130) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (61)) ) ) (declare (temporary ) float length_retval@133) (declare (temporary ) vec2 x@134) (assign (xy) (var_ref x@134) (expression vec2 + (constant vec2 (-2.000000 -1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@133) (expression float sqrt (expression float dot (var_ref x@134) (var_ref x@134) ) ) ) (if (expression bool < (var_ref length_retval@133) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@135) (assign (xyzw) (var_ref texture_retval@135) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@133) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@135) )) ) (declare (temporary ) float shared_load_temp@136) (declare (temporary ) uint shared_load_temp_offset@137) (assign (x) (var_ref shared_load_temp_offset@137) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@138) (call __intrinsic_load_shared (var_ref shared_load_result@138) ((expression uint + (var_ref shared_load_temp_offset@137) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@136) (var_ref shared_load_result@138) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@135) )(var_ref shared_load_temp@136) ) ) ) (declare (temporary ) float shared_load_temp@139) (declare (temporary ) uint shared_load_temp_offset@140) (assign (x) (var_ref shared_load_temp_offset@140) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@141) (call __intrinsic_load_shared (var_ref shared_load_result@141) ((expression uint + (var_ref shared_load_temp_offset@140) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@139) (var_ref shared_load_result@141) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@135) )(var_ref shared_load_temp@139) ) ) ) (declare (temporary ) float shared_load_temp@142) (declare (temporary ) uint shared_load_temp_offset@143) (assign (x) (var_ref shared_load_temp_offset@143) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@144) (call __intrinsic_load_shared (var_ref shared_load_result@144) ((expression uint + (var_ref shared_load_temp_offset@143) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@142) (var_ref shared_load_result@144) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@135) )(var_ref shared_load_temp@142) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (62)) ) ) (declare (temporary ) vec2 x@145) (assign (xy) (var_ref x@145) (expression vec2 + (constant vec2 (-1.000000 -1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@146) (assign (xyzw) (var_ref texture_retval@146) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@145) (var_ref x@145) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@146) )) ) (declare (temporary ) float shared_load_temp@147) (declare (temporary ) uint shared_load_temp_offset@148) (assign (x) (var_ref shared_load_temp_offset@148) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@149) (call __intrinsic_load_shared (var_ref shared_load_result@149) ((expression uint + (var_ref shared_load_temp_offset@148) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@147) (var_ref shared_load_result@149) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@146) )(var_ref shared_load_temp@147) ) ) ) (declare (temporary ) float shared_load_temp@150) (declare (temporary ) uint shared_load_temp_offset@151) (assign (x) (var_ref shared_load_temp_offset@151) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@152) (call __intrinsic_load_shared (var_ref shared_load_result@152) ((expression uint + (var_ref shared_load_temp_offset@151) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@150) (var_ref shared_load_result@152) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@146) )(var_ref shared_load_temp@150) ) ) ) (declare (temporary ) float shared_load_temp@153) (declare (temporary ) uint shared_load_temp_offset@154) (assign (x) (var_ref shared_load_temp_offset@154) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@155) (call __intrinsic_load_shared (var_ref shared_load_result@155) ((expression uint + (var_ref shared_load_temp_offset@154) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@153) (var_ref shared_load_result@155) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@146) )(var_ref shared_load_temp@153) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (63)) ) ) (declare (temporary ) vec2 x@156) (assign (xy) (var_ref x@156) (expression vec2 + (constant vec2 (0.000000 -1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@157) (assign (xyzw) (var_ref texture_retval@157) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@156) (var_ref x@156) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@157) )) ) (declare (temporary ) float shared_load_temp@158) (declare (temporary ) uint shared_load_temp_offset@159) (assign (x) (var_ref shared_load_temp_offset@159) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@160) (call __intrinsic_load_shared (var_ref shared_load_result@160) ((expression uint + (var_ref shared_load_temp_offset@159) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@158) (var_ref shared_load_result@160) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@157) )(var_ref shared_load_temp@158) ) ) ) (declare (temporary ) float shared_load_temp@161) (declare (temporary ) uint shared_load_temp_offset@162) (assign (x) (var_ref shared_load_temp_offset@162) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@163) (call __intrinsic_load_shared (var_ref shared_load_result@163) ((expression uint + (var_ref shared_load_temp_offset@162) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@161) (var_ref shared_load_result@163) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@157) )(var_ref shared_load_temp@161) ) ) ) (declare (temporary ) float shared_load_temp@164) (declare (temporary ) uint shared_load_temp_offset@165) (assign (x) (var_ref shared_load_temp_offset@165) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@166) (call __intrinsic_load_shared (var_ref shared_load_result@166) ((expression uint + (var_ref shared_load_temp_offset@165) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@164) (var_ref shared_load_result@166) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@157) )(var_ref shared_load_temp@164) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (64)) ) ) (declare (temporary ) vec2 x@167) (assign (xy) (var_ref x@167) (expression vec2 + (constant vec2 (1.000000 -1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@168) (assign (xyzw) (var_ref texture_retval@168) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@167) (var_ref x@167) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@168) )) ) (declare (temporary ) float shared_load_temp@169) (declare (temporary ) uint shared_load_temp_offset@170) (assign (x) (var_ref shared_load_temp_offset@170) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@171) (call __intrinsic_load_shared (var_ref shared_load_result@171) ((expression uint + (var_ref shared_load_temp_offset@170) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@169) (var_ref shared_load_result@171) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@168) )(var_ref shared_load_temp@169) ) ) ) (declare (temporary ) float shared_load_temp@172) (declare (temporary ) uint shared_load_temp_offset@173) (assign (x) (var_ref shared_load_temp_offset@173) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@174) (call __intrinsic_load_shared (var_ref shared_load_result@174) ((expression uint + (var_ref shared_load_temp_offset@173) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@172) (var_ref shared_load_result@174) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@168) )(var_ref shared_load_temp@172) ) ) ) (declare (temporary ) float shared_load_temp@175) (declare (temporary ) uint shared_load_temp_offset@176) (assign (x) (var_ref shared_load_temp_offset@176) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@177) (call __intrinsic_load_shared (var_ref shared_load_result@177) ((expression uint + (var_ref shared_load_temp_offset@176) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@175) (var_ref shared_load_result@177) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@168) )(var_ref shared_load_temp@175) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (65)) ) ) (declare (temporary ) vec2 x@178) (assign (xy) (var_ref x@178) (expression vec2 + (constant vec2 (2.000000 -1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@179) (assign (xyzw) (var_ref texture_retval@179) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@178) (var_ref x@178) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@179) )) ) (declare (temporary ) float shared_load_temp@180) (declare (temporary ) uint shared_load_temp_offset@181) (assign (x) (var_ref shared_load_temp_offset@181) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@182) (call __intrinsic_load_shared (var_ref shared_load_result@182) ((expression uint + (var_ref shared_load_temp_offset@181) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@180) (var_ref shared_load_result@182) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@179) )(var_ref shared_load_temp@180) ) ) ) (declare (temporary ) float shared_load_temp@183) (declare (temporary ) uint shared_load_temp_offset@184) (assign (x) (var_ref shared_load_temp_offset@184) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@185) (call __intrinsic_load_shared (var_ref shared_load_result@185) ((expression uint + (var_ref shared_load_temp_offset@184) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@183) (var_ref shared_load_result@185) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@179) )(var_ref shared_load_temp@183) ) ) ) (declare (temporary ) float shared_load_temp@186) (declare (temporary ) uint shared_load_temp_offset@187) (assign (x) (var_ref shared_load_temp_offset@187) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@188) (call __intrinsic_load_shared (var_ref shared_load_result@188) ((expression uint + (var_ref shared_load_temp_offset@187) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@186) (var_ref shared_load_result@188) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@179) )(var_ref shared_load_temp@186) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (66)) ) ) (declare (temporary ) float length_retval@189) (declare (temporary ) vec2 x@190) (assign (xy) (var_ref x@190) (expression vec2 + (constant vec2 (3.000000 -1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@189) (expression float sqrt (expression float dot (var_ref x@190) (var_ref x@190) ) ) ) (if (expression bool < (var_ref length_retval@189) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@191) (assign (xyzw) (var_ref texture_retval@191) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@189) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@191) )) ) (declare (temporary ) float shared_load_temp@192) (declare (temporary ) uint shared_load_temp_offset@193) (assign (x) (var_ref shared_load_temp_offset@193) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@194) (call __intrinsic_load_shared (var_ref shared_load_result@194) ((expression uint + (var_ref shared_load_temp_offset@193) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@192) (var_ref shared_load_result@194) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@191) )(var_ref shared_load_temp@192) ) ) ) (declare (temporary ) float shared_load_temp@195) (declare (temporary ) uint shared_load_temp_offset@196) (assign (x) (var_ref shared_load_temp_offset@196) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@197) (call __intrinsic_load_shared (var_ref shared_load_result@197) ((expression uint + (var_ref shared_load_temp_offset@196) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@195) (var_ref shared_load_result@197) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@191) )(var_ref shared_load_temp@195) ) ) ) (declare (temporary ) float shared_load_temp@198) (declare (temporary ) uint shared_load_temp_offset@199) (assign (x) (var_ref shared_load_temp_offset@199) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@200) (call __intrinsic_load_shared (var_ref shared_load_result@200) ((expression uint + (var_ref shared_load_temp_offset@199) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@198) (var_ref shared_load_result@200) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@191) )(var_ref shared_load_temp@198) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (90)) ) ) (declare (temporary ) float length_retval@201) (declare (temporary ) vec2 x@202) (assign (xy) (var_ref x@202) (expression vec2 + (constant vec2 (-3.000000 0.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@201) (expression float sqrt (expression float dot (var_ref x@202) (var_ref x@202) ) ) ) (if (expression bool < (var_ref length_retval@201) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@203) (assign (xyzw) (var_ref texture_retval@203) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@201) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@203) )) ) (declare (temporary ) float shared_load_temp@204) (declare (temporary ) uint shared_load_temp_offset@205) (assign (x) (var_ref shared_load_temp_offset@205) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@206) (call __intrinsic_load_shared (var_ref shared_load_result@206) ((expression uint + (var_ref shared_load_temp_offset@205) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@204) (var_ref shared_load_result@206) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@203) )(var_ref shared_load_temp@204) ) ) ) (declare (temporary ) float shared_load_temp@207) (declare (temporary ) uint shared_load_temp_offset@208) (assign (x) (var_ref shared_load_temp_offset@208) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@209) (call __intrinsic_load_shared (var_ref shared_load_result@209) ((expression uint + (var_ref shared_load_temp_offset@208) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@207) (var_ref shared_load_result@209) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@203) )(var_ref shared_load_temp@207) ) ) ) (declare (temporary ) float shared_load_temp@210) (declare (temporary ) uint shared_load_temp_offset@211) (assign (x) (var_ref shared_load_temp_offset@211) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@212) (call __intrinsic_load_shared (var_ref shared_load_result@212) ((expression uint + (var_ref shared_load_temp_offset@211) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@210) (var_ref shared_load_result@212) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@203) )(var_ref shared_load_temp@210) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (91)) ) ) (declare (temporary ) float length_retval@213) (declare (temporary ) vec2 x@214) (assign (xy) (var_ref x@214) (expression vec2 + (constant vec2 (-2.000000 0.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@213) (expression float sqrt (expression float dot (var_ref x@214) (var_ref x@214) ) ) ) (if (expression bool < (var_ref length_retval@213) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@215) (assign (xyzw) (var_ref texture_retval@215) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@213) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@215) )) ) (declare (temporary ) float shared_load_temp@216) (declare (temporary ) uint shared_load_temp_offset@217) (assign (x) (var_ref shared_load_temp_offset@217) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@218) (call __intrinsic_load_shared (var_ref shared_load_result@218) ((expression uint + (var_ref shared_load_temp_offset@217) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@216) (var_ref shared_load_result@218) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@215) )(var_ref shared_load_temp@216) ) ) ) (declare (temporary ) float shared_load_temp@219) (declare (temporary ) uint shared_load_temp_offset@220) (assign (x) (var_ref shared_load_temp_offset@220) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@221) (call __intrinsic_load_shared (var_ref shared_load_result@221) ((expression uint + (var_ref shared_load_temp_offset@220) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@219) (var_ref shared_load_result@221) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@215) )(var_ref shared_load_temp@219) ) ) ) (declare (temporary ) float shared_load_temp@222) (declare (temporary ) uint shared_load_temp_offset@223) (assign (x) (var_ref shared_load_temp_offset@223) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@224) (call __intrinsic_load_shared (var_ref shared_load_result@224) ((expression uint + (var_ref shared_load_temp_offset@223) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@222) (var_ref shared_load_result@224) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@215) )(var_ref shared_load_temp@222) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (92)) ) ) (declare (temporary ) vec2 x@225) (assign (xy) (var_ref x@225) (expression vec2 + (constant vec2 (-1.000000 0.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@226) (assign (xyzw) (var_ref texture_retval@226) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@225) (var_ref x@225) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@226) )) ) (declare (temporary ) float shared_load_temp@227) (declare (temporary ) uint shared_load_temp_offset@228) (assign (x) (var_ref shared_load_temp_offset@228) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@229) (call __intrinsic_load_shared (var_ref shared_load_result@229) ((expression uint + (var_ref shared_load_temp_offset@228) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@227) (var_ref shared_load_result@229) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@226) )(var_ref shared_load_temp@227) ) ) ) (declare (temporary ) float shared_load_temp@230) (declare (temporary ) uint shared_load_temp_offset@231) (assign (x) (var_ref shared_load_temp_offset@231) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@232) (call __intrinsic_load_shared (var_ref shared_load_result@232) ((expression uint + (var_ref shared_load_temp_offset@231) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@230) (var_ref shared_load_result@232) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@226) )(var_ref shared_load_temp@230) ) ) ) (declare (temporary ) float shared_load_temp@233) (declare (temporary ) uint shared_load_temp_offset@234) (assign (x) (var_ref shared_load_temp_offset@234) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@235) (call __intrinsic_load_shared (var_ref shared_load_result@235) ((expression uint + (var_ref shared_load_temp_offset@234) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@233) (var_ref shared_load_result@235) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@226) )(var_ref shared_load_temp@233) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (93)) ) ) (declare (temporary ) vec2 x@236) (assign (xy) (var_ref x@236) (expression vec2 neg (var_ref fract_retval) ) ) (declare (temporary ) vec4 texture_retval@237) (assign (xyzw) (var_ref texture_retval@237) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@236) (var_ref x@236) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@237) )) ) (declare (temporary ) float shared_load_temp@238) (declare (temporary ) uint shared_load_temp_offset@239) (assign (x) (var_ref shared_load_temp_offset@239) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@240) (call __intrinsic_load_shared (var_ref shared_load_result@240) ((expression uint + (var_ref shared_load_temp_offset@239) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@238) (var_ref shared_load_result@240) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@237) )(var_ref shared_load_temp@238) ) ) ) (declare (temporary ) float shared_load_temp@241) (declare (temporary ) uint shared_load_temp_offset@242) (assign (x) (var_ref shared_load_temp_offset@242) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@243) (call __intrinsic_load_shared (var_ref shared_load_result@243) ((expression uint + (var_ref shared_load_temp_offset@242) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@241) (var_ref shared_load_result@243) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@237) )(var_ref shared_load_temp@241) ) ) ) (declare (temporary ) float shared_load_temp@244) (declare (temporary ) uint shared_load_temp_offset@245) (assign (x) (var_ref shared_load_temp_offset@245) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@246) (call __intrinsic_load_shared (var_ref shared_load_result@246) ((expression uint + (var_ref shared_load_temp_offset@245) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@244) (var_ref shared_load_result@246) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@237) )(var_ref shared_load_temp@244) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (94)) ) ) (declare (temporary ) vec2 x@247) (assign (xy) (var_ref x@247) (expression vec2 + (constant vec2 (1.000000 0.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@248) (assign (xyzw) (var_ref texture_retval@248) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@247) (var_ref x@247) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@248) )) ) (declare (temporary ) float shared_load_temp@249) (declare (temporary ) uint shared_load_temp_offset@250) (assign (x) (var_ref shared_load_temp_offset@250) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@251) (call __intrinsic_load_shared (var_ref shared_load_result@251) ((expression uint + (var_ref shared_load_temp_offset@250) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@249) (var_ref shared_load_result@251) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@248) )(var_ref shared_load_temp@249) ) ) ) (declare (temporary ) float shared_load_temp@252) (declare (temporary ) uint shared_load_temp_offset@253) (assign (x) (var_ref shared_load_temp_offset@253) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@254) (call __intrinsic_load_shared (var_ref shared_load_result@254) ((expression uint + (var_ref shared_load_temp_offset@253) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@252) (var_ref shared_load_result@254) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@248) )(var_ref shared_load_temp@252) ) ) ) (declare (temporary ) float shared_load_temp@255) (declare (temporary ) uint shared_load_temp_offset@256) (assign (x) (var_ref shared_load_temp_offset@256) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@257) (call __intrinsic_load_shared (var_ref shared_load_result@257) ((expression uint + (var_ref shared_load_temp_offset@256) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@255) (var_ref shared_load_result@257) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@248) )(var_ref shared_load_temp@255) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (95)) ) ) (declare (temporary ) vec2 x@258) (assign (xy) (var_ref x@258) (expression vec2 + (constant vec2 (2.000000 0.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@259) (assign (xyzw) (var_ref texture_retval@259) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@258) (var_ref x@258) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@259) )) ) (declare (temporary ) float shared_load_temp@260) (declare (temporary ) uint shared_load_temp_offset@261) (assign (x) (var_ref shared_load_temp_offset@261) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@262) (call __intrinsic_load_shared (var_ref shared_load_result@262) ((expression uint + (var_ref shared_load_temp_offset@261) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@260) (var_ref shared_load_result@262) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@259) )(var_ref shared_load_temp@260) ) ) ) (declare (temporary ) float shared_load_temp@263) (declare (temporary ) uint shared_load_temp_offset@264) (assign (x) (var_ref shared_load_temp_offset@264) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@265) (call __intrinsic_load_shared (var_ref shared_load_result@265) ((expression uint + (var_ref shared_load_temp_offset@264) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@263) (var_ref shared_load_result@265) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@259) )(var_ref shared_load_temp@263) ) ) ) (declare (temporary ) float shared_load_temp@266) (declare (temporary ) uint shared_load_temp_offset@267) (assign (x) (var_ref shared_load_temp_offset@267) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@268) (call __intrinsic_load_shared (var_ref shared_load_result@268) ((expression uint + (var_ref shared_load_temp_offset@267) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@266) (var_ref shared_load_result@268) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@259) )(var_ref shared_load_temp@266) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (96)) ) ) (declare (temporary ) float length_retval@269) (declare (temporary ) vec2 x@270) (assign (xy) (var_ref x@270) (expression vec2 + (constant vec2 (3.000000 0.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@269) (expression float sqrt (expression float dot (var_ref x@270) (var_ref x@270) ) ) ) (if (expression bool < (var_ref length_retval@269) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@271) (assign (xyzw) (var_ref texture_retval@271) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@269) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@271) )) ) (declare (temporary ) float shared_load_temp@272) (declare (temporary ) uint shared_load_temp_offset@273) (assign (x) (var_ref shared_load_temp_offset@273) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@274) (call __intrinsic_load_shared (var_ref shared_load_result@274) ((expression uint + (var_ref shared_load_temp_offset@273) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@272) (var_ref shared_load_result@274) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@271) )(var_ref shared_load_temp@272) ) ) ) (declare (temporary ) float shared_load_temp@275) (declare (temporary ) uint shared_load_temp_offset@276) (assign (x) (var_ref shared_load_temp_offset@276) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@277) (call __intrinsic_load_shared (var_ref shared_load_result@277) ((expression uint + (var_ref shared_load_temp_offset@276) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@275) (var_ref shared_load_result@277) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@271) )(var_ref shared_load_temp@275) ) ) ) (declare (temporary ) float shared_load_temp@278) (declare (temporary ) uint shared_load_temp_offset@279) (assign (x) (var_ref shared_load_temp_offset@279) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@280) (call __intrinsic_load_shared (var_ref shared_load_result@280) ((expression uint + (var_ref shared_load_temp_offset@279) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@278) (var_ref shared_load_result@280) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@271) )(var_ref shared_load_temp@278) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (97)) ) ) (declare (temporary ) float length_retval@281) (declare (temporary ) vec2 x@282) (assign (xy) (var_ref x@282) (expression vec2 + (constant vec2 (4.000000 0.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@281) (expression float sqrt (expression float dot (var_ref x@282) (var_ref x@282) ) ) ) (if (expression bool < (var_ref length_retval@281) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@283) (assign (xyzw) (var_ref texture_retval@283) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@281) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@283) )) ) (declare (temporary ) float shared_load_temp@284) (declare (temporary ) uint shared_load_temp_offset@285) (assign (x) (var_ref shared_load_temp_offset@285) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@286) (call __intrinsic_load_shared (var_ref shared_load_result@286) ((expression uint + (var_ref shared_load_temp_offset@285) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@284) (var_ref shared_load_result@286) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@283) )(var_ref shared_load_temp@284) ) ) ) (declare (temporary ) float shared_load_temp@287) (declare (temporary ) uint shared_load_temp_offset@288) (assign (x) (var_ref shared_load_temp_offset@288) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@289) (call __intrinsic_load_shared (var_ref shared_load_result@289) ((expression uint + (var_ref shared_load_temp_offset@288) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@287) (var_ref shared_load_result@289) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@283) )(var_ref shared_load_temp@287) ) ) ) (declare (temporary ) float shared_load_temp@290) (declare (temporary ) uint shared_load_temp_offset@291) (assign (x) (var_ref shared_load_temp_offset@291) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@292) (call __intrinsic_load_shared (var_ref shared_load_result@292) ((expression uint + (var_ref shared_load_temp_offset@291) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@290) (var_ref shared_load_result@292) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@283) )(var_ref shared_load_temp@290) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (120)) ) ) (declare (temporary ) float length_retval@293) (declare (temporary ) vec2 x@294) (assign (xy) (var_ref x@294) (expression vec2 + (constant vec2 (-3.000000 1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@293) (expression float sqrt (expression float dot (var_ref x@294) (var_ref x@294) ) ) ) (if (expression bool < (var_ref length_retval@293) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@295) (assign (xyzw) (var_ref texture_retval@295) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@293) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@295) )) ) (declare (temporary ) float shared_load_temp@296) (declare (temporary ) uint shared_load_temp_offset@297) (assign (x) (var_ref shared_load_temp_offset@297) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@298) (call __intrinsic_load_shared (var_ref shared_load_result@298) ((expression uint + (var_ref shared_load_temp_offset@297) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@296) (var_ref shared_load_result@298) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@295) )(var_ref shared_load_temp@296) ) ) ) (declare (temporary ) float shared_load_temp@299) (declare (temporary ) uint shared_load_temp_offset@300) (assign (x) (var_ref shared_load_temp_offset@300) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@301) (call __intrinsic_load_shared (var_ref shared_load_result@301) ((expression uint + (var_ref shared_load_temp_offset@300) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@299) (var_ref shared_load_result@301) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@295) )(var_ref shared_load_temp@299) ) ) ) (declare (temporary ) float shared_load_temp@302) (declare (temporary ) uint shared_load_temp_offset@303) (assign (x) (var_ref shared_load_temp_offset@303) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@304) (call __intrinsic_load_shared (var_ref shared_load_result@304) ((expression uint + (var_ref shared_load_temp_offset@303) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@302) (var_ref shared_load_result@304) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@295) )(var_ref shared_load_temp@302) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (121)) ) ) (declare (temporary ) float length_retval@305) (declare (temporary ) vec2 x@306) (assign (xy) (var_ref x@306) (expression vec2 + (constant vec2 (-2.000000 1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@305) (expression float sqrt (expression float dot (var_ref x@306) (var_ref x@306) ) ) ) (if (expression bool < (var_ref length_retval@305) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@307) (assign (xyzw) (var_ref texture_retval@307) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@305) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@307) )) ) (declare (temporary ) float shared_load_temp@308) (declare (temporary ) uint shared_load_temp_offset@309) (assign (x) (var_ref shared_load_temp_offset@309) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@310) (call __intrinsic_load_shared (var_ref shared_load_result@310) ((expression uint + (var_ref shared_load_temp_offset@309) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@308) (var_ref shared_load_result@310) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@307) )(var_ref shared_load_temp@308) ) ) ) (declare (temporary ) float shared_load_temp@311) (declare (temporary ) uint shared_load_temp_offset@312) (assign (x) (var_ref shared_load_temp_offset@312) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@313) (call __intrinsic_load_shared (var_ref shared_load_result@313) ((expression uint + (var_ref shared_load_temp_offset@312) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@311) (var_ref shared_load_result@313) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@307) )(var_ref shared_load_temp@311) ) ) ) (declare (temporary ) float shared_load_temp@314) (declare (temporary ) uint shared_load_temp_offset@315) (assign (x) (var_ref shared_load_temp_offset@315) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@316) (call __intrinsic_load_shared (var_ref shared_load_result@316) ((expression uint + (var_ref shared_load_temp_offset@315) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@314) (var_ref shared_load_result@316) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@307) )(var_ref shared_load_temp@314) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (122)) ) ) (declare (temporary ) vec2 x@317) (assign (xy) (var_ref x@317) (expression vec2 + (constant vec2 (-1.000000 1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@318) (assign (xyzw) (var_ref texture_retval@318) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@317) (var_ref x@317) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@318) )) ) (declare (temporary ) float shared_load_temp@319) (declare (temporary ) uint shared_load_temp_offset@320) (assign (x) (var_ref shared_load_temp_offset@320) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@321) (call __intrinsic_load_shared (var_ref shared_load_result@321) ((expression uint + (var_ref shared_load_temp_offset@320) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@319) (var_ref shared_load_result@321) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@318) )(var_ref shared_load_temp@319) ) ) ) (declare (temporary ) float shared_load_temp@322) (declare (temporary ) uint shared_load_temp_offset@323) (assign (x) (var_ref shared_load_temp_offset@323) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@324) (call __intrinsic_load_shared (var_ref shared_load_result@324) ((expression uint + (var_ref shared_load_temp_offset@323) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@322) (var_ref shared_load_result@324) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@318) )(var_ref shared_load_temp@322) ) ) ) (declare (temporary ) float shared_load_temp@325) (declare (temporary ) uint shared_load_temp_offset@326) (assign (x) (var_ref shared_load_temp_offset@326) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@327) (call __intrinsic_load_shared (var_ref shared_load_result@327) ((expression uint + (var_ref shared_load_temp_offset@326) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@325) (var_ref shared_load_result@327) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@318) )(var_ref shared_load_temp@325) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (123)) ) ) (declare (temporary ) vec2 x@328) (assign (xy) (var_ref x@328) (expression vec2 + (constant vec2 (0.000000 1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@329) (assign (xyzw) (var_ref texture_retval@329) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@328) (var_ref x@328) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@329) )) ) (declare (temporary ) float shared_load_temp@330) (declare (temporary ) uint shared_load_temp_offset@331) (assign (x) (var_ref shared_load_temp_offset@331) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@332) (call __intrinsic_load_shared (var_ref shared_load_result@332) ((expression uint + (var_ref shared_load_temp_offset@331) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@330) (var_ref shared_load_result@332) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@329) )(var_ref shared_load_temp@330) ) ) ) (declare (temporary ) float shared_load_temp@333) (declare (temporary ) uint shared_load_temp_offset@334) (assign (x) (var_ref shared_load_temp_offset@334) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@335) (call __intrinsic_load_shared (var_ref shared_load_result@335) ((expression uint + (var_ref shared_load_temp_offset@334) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@333) (var_ref shared_load_result@335) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@329) )(var_ref shared_load_temp@333) ) ) ) (declare (temporary ) float shared_load_temp@336) (declare (temporary ) uint shared_load_temp_offset@337) (assign (x) (var_ref shared_load_temp_offset@337) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@338) (call __intrinsic_load_shared (var_ref shared_load_result@338) ((expression uint + (var_ref shared_load_temp_offset@337) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@336) (var_ref shared_load_result@338) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@329) )(var_ref shared_load_temp@336) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (124)) ) ) (declare (temporary ) vec2 x@339) (assign (xy) (var_ref x@339) (expression vec2 + (constant vec2 (1.000000 1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@340) (assign (xyzw) (var_ref texture_retval@340) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@339) (var_ref x@339) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@340) )) ) (declare (temporary ) float shared_load_temp@341) (declare (temporary ) uint shared_load_temp_offset@342) (assign (x) (var_ref shared_load_temp_offset@342) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@343) (call __intrinsic_load_shared (var_ref shared_load_result@343) ((expression uint + (var_ref shared_load_temp_offset@342) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@341) (var_ref shared_load_result@343) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@340) )(var_ref shared_load_temp@341) ) ) ) (declare (temporary ) float shared_load_temp@344) (declare (temporary ) uint shared_load_temp_offset@345) (assign (x) (var_ref shared_load_temp_offset@345) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@346) (call __intrinsic_load_shared (var_ref shared_load_result@346) ((expression uint + (var_ref shared_load_temp_offset@345) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@344) (var_ref shared_load_result@346) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@340) )(var_ref shared_load_temp@344) ) ) ) (declare (temporary ) float shared_load_temp@347) (declare (temporary ) uint shared_load_temp_offset@348) (assign (x) (var_ref shared_load_temp_offset@348) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@349) (call __intrinsic_load_shared (var_ref shared_load_result@349) ((expression uint + (var_ref shared_load_temp_offset@348) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@347) (var_ref shared_load_result@349) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@340) )(var_ref shared_load_temp@347) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (125)) ) ) (declare (temporary ) vec2 x@350) (assign (xy) (var_ref x@350) (expression vec2 + (constant vec2 (2.000000 1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@351) (assign (xyzw) (var_ref texture_retval@351) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@350) (var_ref x@350) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@351) )) ) (declare (temporary ) float shared_load_temp@352) (declare (temporary ) uint shared_load_temp_offset@353) (assign (x) (var_ref shared_load_temp_offset@353) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@354) (call __intrinsic_load_shared (var_ref shared_load_result@354) ((expression uint + (var_ref shared_load_temp_offset@353) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@352) (var_ref shared_load_result@354) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@351) )(var_ref shared_load_temp@352) ) ) ) (declare (temporary ) float shared_load_temp@355) (declare (temporary ) uint shared_load_temp_offset@356) (assign (x) (var_ref shared_load_temp_offset@356) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@357) (call __intrinsic_load_shared (var_ref shared_load_result@357) ((expression uint + (var_ref shared_load_temp_offset@356) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@355) (var_ref shared_load_result@357) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@351) )(var_ref shared_load_temp@355) ) ) ) (declare (temporary ) float shared_load_temp@358) (declare (temporary ) uint shared_load_temp_offset@359) (assign (x) (var_ref shared_load_temp_offset@359) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@360) (call __intrinsic_load_shared (var_ref shared_load_result@360) ((expression uint + (var_ref shared_load_temp_offset@359) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@358) (var_ref shared_load_result@360) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@351) )(var_ref shared_load_temp@358) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (126)) ) ) (declare (temporary ) float length_retval@361) (declare (temporary ) vec2 x@362) (assign (xy) (var_ref x@362) (expression vec2 + (constant vec2 (3.000000 1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@361) (expression float sqrt (expression float dot (var_ref x@362) (var_ref x@362) ) ) ) (if (expression bool < (var_ref length_retval@361) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@363) (assign (xyzw) (var_ref texture_retval@363) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@361) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@363) )) ) (declare (temporary ) float shared_load_temp@364) (declare (temporary ) uint shared_load_temp_offset@365) (assign (x) (var_ref shared_load_temp_offset@365) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@366) (call __intrinsic_load_shared (var_ref shared_load_result@366) ((expression uint + (var_ref shared_load_temp_offset@365) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@364) (var_ref shared_load_result@366) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@363) )(var_ref shared_load_temp@364) ) ) ) (declare (temporary ) float shared_load_temp@367) (declare (temporary ) uint shared_load_temp_offset@368) (assign (x) (var_ref shared_load_temp_offset@368) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@369) (call __intrinsic_load_shared (var_ref shared_load_result@369) ((expression uint + (var_ref shared_load_temp_offset@368) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@367) (var_ref shared_load_result@369) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@363) )(var_ref shared_load_temp@367) ) ) ) (declare (temporary ) float shared_load_temp@370) (declare (temporary ) uint shared_load_temp_offset@371) (assign (x) (var_ref shared_load_temp_offset@371) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@372) (call __intrinsic_load_shared (var_ref shared_load_result@372) ((expression uint + (var_ref shared_load_temp_offset@371) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@370) (var_ref shared_load_result@372) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@363) )(var_ref shared_load_temp@370) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (127)) ) ) (declare (temporary ) float length_retval@373) (declare (temporary ) vec2 x@374) (assign (xy) (var_ref x@374) (expression vec2 + (constant vec2 (4.000000 1.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@373) (expression float sqrt (expression float dot (var_ref x@374) (var_ref x@374) ) ) ) (if (expression bool < (var_ref length_retval@373) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@375) (assign (xyzw) (var_ref texture_retval@375) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@373) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@375) )) ) (declare (temporary ) float shared_load_temp@376) (declare (temporary ) uint shared_load_temp_offset@377) (assign (x) (var_ref shared_load_temp_offset@377) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@378) (call __intrinsic_load_shared (var_ref shared_load_result@378) ((expression uint + (var_ref shared_load_temp_offset@377) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@376) (var_ref shared_load_result@378) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@375) )(var_ref shared_load_temp@376) ) ) ) (declare (temporary ) float shared_load_temp@379) (declare (temporary ) uint shared_load_temp_offset@380) (assign (x) (var_ref shared_load_temp_offset@380) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@381) (call __intrinsic_load_shared (var_ref shared_load_result@381) ((expression uint + (var_ref shared_load_temp_offset@380) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@379) (var_ref shared_load_result@381) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@375) )(var_ref shared_load_temp@379) ) ) ) (declare (temporary ) float shared_load_temp@382) (declare (temporary ) uint shared_load_temp_offset@383) (assign (x) (var_ref shared_load_temp_offset@383) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@384) (call __intrinsic_load_shared (var_ref shared_load_result@384) ((expression uint + (var_ref shared_load_temp_offset@383) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@382) (var_ref shared_load_result@384) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@375) )(var_ref shared_load_temp@382) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (151)) ) ) (declare (temporary ) float length_retval@385) (declare (temporary ) vec2 x@386) (assign (xy) (var_ref x@386) (expression vec2 + (constant vec2 (-2.000000 2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@385) (expression float sqrt (expression float dot (var_ref x@386) (var_ref x@386) ) ) ) (if (expression bool < (var_ref length_retval@385) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@387) (assign (xyzw) (var_ref texture_retval@387) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@385) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@387) )) ) (declare (temporary ) float shared_load_temp@388) (declare (temporary ) uint shared_load_temp_offset@389) (assign (x) (var_ref shared_load_temp_offset@389) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@390) (call __intrinsic_load_shared (var_ref shared_load_result@390) ((expression uint + (var_ref shared_load_temp_offset@389) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@388) (var_ref shared_load_result@390) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@387) )(var_ref shared_load_temp@388) ) ) ) (declare (temporary ) float shared_load_temp@391) (declare (temporary ) uint shared_load_temp_offset@392) (assign (x) (var_ref shared_load_temp_offset@392) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@393) (call __intrinsic_load_shared (var_ref shared_load_result@393) ((expression uint + (var_ref shared_load_temp_offset@392) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@391) (var_ref shared_load_result@393) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@387) )(var_ref shared_load_temp@391) ) ) ) (declare (temporary ) float shared_load_temp@394) (declare (temporary ) uint shared_load_temp_offset@395) (assign (x) (var_ref shared_load_temp_offset@395) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@396) (call __intrinsic_load_shared (var_ref shared_load_result@396) ((expression uint + (var_ref shared_load_temp_offset@395) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@394) (var_ref shared_load_result@396) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@387) )(var_ref shared_load_temp@394) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (152)) ) ) (declare (temporary ) vec2 x@397) (assign (xy) (var_ref x@397) (expression vec2 + (constant vec2 (-1.000000 2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@398) (assign (xyzw) (var_ref texture_retval@398) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@397) (var_ref x@397) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@398) )) ) (declare (temporary ) float shared_load_temp@399) (declare (temporary ) uint shared_load_temp_offset@400) (assign (x) (var_ref shared_load_temp_offset@400) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@401) (call __intrinsic_load_shared (var_ref shared_load_result@401) ((expression uint + (var_ref shared_load_temp_offset@400) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@399) (var_ref shared_load_result@401) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@398) )(var_ref shared_load_temp@399) ) ) ) (declare (temporary ) float shared_load_temp@402) (declare (temporary ) uint shared_load_temp_offset@403) (assign (x) (var_ref shared_load_temp_offset@403) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@404) (call __intrinsic_load_shared (var_ref shared_load_result@404) ((expression uint + (var_ref shared_load_temp_offset@403) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@402) (var_ref shared_load_result@404) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@398) )(var_ref shared_load_temp@402) ) ) ) (declare (temporary ) float shared_load_temp@405) (declare (temporary ) uint shared_load_temp_offset@406) (assign (x) (var_ref shared_load_temp_offset@406) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@407) (call __intrinsic_load_shared (var_ref shared_load_result@407) ((expression uint + (var_ref shared_load_temp_offset@406) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@405) (var_ref shared_load_result@407) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@398) )(var_ref shared_load_temp@405) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (153)) ) ) (declare (temporary ) vec2 x@408) (assign (xy) (var_ref x@408) (expression vec2 + (constant vec2 (0.000000 2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@409) (assign (xyzw) (var_ref texture_retval@409) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@408) (var_ref x@408) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@409) )) ) (declare (temporary ) float shared_load_temp@410) (declare (temporary ) uint shared_load_temp_offset@411) (assign (x) (var_ref shared_load_temp_offset@411) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@412) (call __intrinsic_load_shared (var_ref shared_load_result@412) ((expression uint + (var_ref shared_load_temp_offset@411) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@410) (var_ref shared_load_result@412) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@409) )(var_ref shared_load_temp@410) ) ) ) (declare (temporary ) float shared_load_temp@413) (declare (temporary ) uint shared_load_temp_offset@414) (assign (x) (var_ref shared_load_temp_offset@414) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@415) (call __intrinsic_load_shared (var_ref shared_load_result@415) ((expression uint + (var_ref shared_load_temp_offset@414) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@413) (var_ref shared_load_result@415) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@409) )(var_ref shared_load_temp@413) ) ) ) (declare (temporary ) float shared_load_temp@416) (declare (temporary ) uint shared_load_temp_offset@417) (assign (x) (var_ref shared_load_temp_offset@417) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@418) (call __intrinsic_load_shared (var_ref shared_load_result@418) ((expression uint + (var_ref shared_load_temp_offset@417) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@416) (var_ref shared_load_result@418) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@409) )(var_ref shared_load_temp@416) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (154)) ) ) (declare (temporary ) vec2 x@419) (assign (xy) (var_ref x@419) (expression vec2 + (constant vec2 (1.000000 2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@420) (assign (xyzw) (var_ref texture_retval@420) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@419) (var_ref x@419) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@420) )) ) (declare (temporary ) float shared_load_temp@421) (declare (temporary ) uint shared_load_temp_offset@422) (assign (x) (var_ref shared_load_temp_offset@422) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@423) (call __intrinsic_load_shared (var_ref shared_load_result@423) ((expression uint + (var_ref shared_load_temp_offset@422) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@421) (var_ref shared_load_result@423) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@420) )(var_ref shared_load_temp@421) ) ) ) (declare (temporary ) float shared_load_temp@424) (declare (temporary ) uint shared_load_temp_offset@425) (assign (x) (var_ref shared_load_temp_offset@425) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@426) (call __intrinsic_load_shared (var_ref shared_load_result@426) ((expression uint + (var_ref shared_load_temp_offset@425) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@424) (var_ref shared_load_result@426) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@420) )(var_ref shared_load_temp@424) ) ) ) (declare (temporary ) float shared_load_temp@427) (declare (temporary ) uint shared_load_temp_offset@428) (assign (x) (var_ref shared_load_temp_offset@428) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@429) (call __intrinsic_load_shared (var_ref shared_load_result@429) ((expression uint + (var_ref shared_load_temp_offset@428) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@427) (var_ref shared_load_result@429) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@420) )(var_ref shared_load_temp@427) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (155)) ) ) (declare (temporary ) vec2 x@430) (assign (xy) (var_ref x@430) (expression vec2 + (constant vec2 (2.000000 2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (declare (temporary ) vec4 texture_retval@431) (assign (xyzw) (var_ref texture_retval@431) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (expression float sqrt (expression float dot (var_ref x@430) (var_ref x@430) ) ) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@431) )) ) (declare (temporary ) float shared_load_temp@432) (declare (temporary ) uint shared_load_temp_offset@433) (assign (x) (var_ref shared_load_temp_offset@433) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@434) (call __intrinsic_load_shared (var_ref shared_load_result@434) ((expression uint + (var_ref shared_load_temp_offset@433) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@432) (var_ref shared_load_result@434) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@431) )(var_ref shared_load_temp@432) ) ) ) (declare (temporary ) float shared_load_temp@435) (declare (temporary ) uint shared_load_temp_offset@436) (assign (x) (var_ref shared_load_temp_offset@436) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@437) (call __intrinsic_load_shared (var_ref shared_load_result@437) ((expression uint + (var_ref shared_load_temp_offset@436) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@435) (var_ref shared_load_result@437) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@431) )(var_ref shared_load_temp@435) ) ) ) (declare (temporary ) float shared_load_temp@438) (declare (temporary ) uint shared_load_temp_offset@439) (assign (x) (var_ref shared_load_temp_offset@439) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@440) (call __intrinsic_load_shared (var_ref shared_load_result@440) ((expression uint + (var_ref shared_load_temp_offset@439) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@438) (var_ref shared_load_result@440) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@431) )(var_ref shared_load_temp@438) ) ) ) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (156)) ) ) (declare (temporary ) float length_retval@441) (declare (temporary ) vec2 x@442) (assign (xy) (var_ref x@442) (expression vec2 + (constant vec2 (3.000000 2.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@441) (expression float sqrt (expression float dot (var_ref x@442) (var_ref x@442) ) ) ) (if (expression bool < (var_ref length_retval@441) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@443) (assign (xyzw) (var_ref texture_retval@443) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@441) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@443) )) ) (declare (temporary ) float shared_load_temp@444) (declare (temporary ) uint shared_load_temp_offset@445) (assign (x) (var_ref shared_load_temp_offset@445) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@446) (call __intrinsic_load_shared (var_ref shared_load_result@446) ((expression uint + (var_ref shared_load_temp_offset@445) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@444) (var_ref shared_load_result@446) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@443) )(var_ref shared_load_temp@444) ) ) ) (declare (temporary ) float shared_load_temp@447) (declare (temporary ) uint shared_load_temp_offset@448) (assign (x) (var_ref shared_load_temp_offset@448) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@449) (call __intrinsic_load_shared (var_ref shared_load_result@449) ((expression uint + (var_ref shared_load_temp_offset@448) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@447) (var_ref shared_load_result@449) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@443) )(var_ref shared_load_temp@447) ) ) ) (declare (temporary ) float shared_load_temp@450) (declare (temporary ) uint shared_load_temp_offset@451) (assign (x) (var_ref shared_load_temp_offset@451) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@452) (call __intrinsic_load_shared (var_ref shared_load_result@452) ((expression uint + (var_ref shared_load_temp_offset@451) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@450) (var_ref shared_load_result@452) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@443) )(var_ref shared_load_temp@450) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (181)) ) ) (declare (temporary ) float length_retval@453) (declare (temporary ) vec2 x@454) (assign (xy) (var_ref x@454) (expression vec2 + (constant vec2 (-2.000000 3.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@453) (expression float sqrt (expression float dot (var_ref x@454) (var_ref x@454) ) ) ) (if (expression bool < (var_ref length_retval@453) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@455) (assign (xyzw) (var_ref texture_retval@455) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@453) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@455) )) ) (declare (temporary ) float shared_load_temp@456) (declare (temporary ) uint shared_load_temp_offset@457) (assign (x) (var_ref shared_load_temp_offset@457) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@458) (call __intrinsic_load_shared (var_ref shared_load_result@458) ((expression uint + (var_ref shared_load_temp_offset@457) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@456) (var_ref shared_load_result@458) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@455) )(var_ref shared_load_temp@456) ) ) ) (declare (temporary ) float shared_load_temp@459) (declare (temporary ) uint shared_load_temp_offset@460) (assign (x) (var_ref shared_load_temp_offset@460) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@461) (call __intrinsic_load_shared (var_ref shared_load_result@461) ((expression uint + (var_ref shared_load_temp_offset@460) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@459) (var_ref shared_load_result@461) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@455) )(var_ref shared_load_temp@459) ) ) ) (declare (temporary ) float shared_load_temp@462) (declare (temporary ) uint shared_load_temp_offset@463) (assign (x) (var_ref shared_load_temp_offset@463) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@464) (call __intrinsic_load_shared (var_ref shared_load_result@464) ((expression uint + (var_ref shared_load_temp_offset@463) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@462) (var_ref shared_load_result@464) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@455) )(var_ref shared_load_temp@462) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (182)) ) ) (declare (temporary ) float length_retval@465) (declare (temporary ) vec2 x@466) (assign (xy) (var_ref x@466) (expression vec2 + (constant vec2 (-1.000000 3.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@465) (expression float sqrt (expression float dot (var_ref x@466) (var_ref x@466) ) ) ) (if (expression bool < (var_ref length_retval@465) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@467) (assign (xyzw) (var_ref texture_retval@467) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@465) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@467) )) ) (declare (temporary ) float shared_load_temp@468) (declare (temporary ) uint shared_load_temp_offset@469) (assign (x) (var_ref shared_load_temp_offset@469) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@470) (call __intrinsic_load_shared (var_ref shared_load_result@470) ((expression uint + (var_ref shared_load_temp_offset@469) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@468) (var_ref shared_load_result@470) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@467) )(var_ref shared_load_temp@468) ) ) ) (declare (temporary ) float shared_load_temp@471) (declare (temporary ) uint shared_load_temp_offset@472) (assign (x) (var_ref shared_load_temp_offset@472) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@473) (call __intrinsic_load_shared (var_ref shared_load_result@473) ((expression uint + (var_ref shared_load_temp_offset@472) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@471) (var_ref shared_load_result@473) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@467) )(var_ref shared_load_temp@471) ) ) ) (declare (temporary ) float shared_load_temp@474) (declare (temporary ) uint shared_load_temp_offset@475) (assign (x) (var_ref shared_load_temp_offset@475) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@476) (call __intrinsic_load_shared (var_ref shared_load_result@476) ((expression uint + (var_ref shared_load_temp_offset@475) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@474) (var_ref shared_load_result@476) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@467) )(var_ref shared_load_temp@474) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (183)) ) ) (declare (temporary ) float length_retval@477) (declare (temporary ) vec2 x@478) (assign (xy) (var_ref x@478) (expression vec2 + (constant vec2 (0.000000 3.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@477) (expression float sqrt (expression float dot (var_ref x@478) (var_ref x@478) ) ) ) (if (expression bool < (var_ref length_retval@477) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@479) (assign (xyzw) (var_ref texture_retval@479) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@477) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@479) )) ) (declare (temporary ) float shared_load_temp@480) (declare (temporary ) uint shared_load_temp_offset@481) (assign (x) (var_ref shared_load_temp_offset@481) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@482) (call __intrinsic_load_shared (var_ref shared_load_result@482) ((expression uint + (var_ref shared_load_temp_offset@481) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@480) (var_ref shared_load_result@482) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@479) )(var_ref shared_load_temp@480) ) ) ) (declare (temporary ) float shared_load_temp@483) (declare (temporary ) uint shared_load_temp_offset@484) (assign (x) (var_ref shared_load_temp_offset@484) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@485) (call __intrinsic_load_shared (var_ref shared_load_result@485) ((expression uint + (var_ref shared_load_temp_offset@484) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@483) (var_ref shared_load_result@485) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@479) )(var_ref shared_load_temp@483) ) ) ) (declare (temporary ) float shared_load_temp@486) (declare (temporary ) uint shared_load_temp_offset@487) (assign (x) (var_ref shared_load_temp_offset@487) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@488) (call __intrinsic_load_shared (var_ref shared_load_result@488) ((expression uint + (var_ref shared_load_temp_offset@487) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@486) (var_ref shared_load_result@488) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@479) )(var_ref shared_load_temp@486) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (184)) ) ) (declare (temporary ) float length_retval@489) (declare (temporary ) vec2 x@490) (assign (xy) (var_ref x@490) (expression vec2 + (constant vec2 (1.000000 3.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@489) (expression float sqrt (expression float dot (var_ref x@490) (var_ref x@490) ) ) ) (if (expression bool < (var_ref length_retval@489) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@491) (assign (xyzw) (var_ref texture_retval@491) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@489) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@491) )) ) (declare (temporary ) float shared_load_temp@492) (declare (temporary ) uint shared_load_temp_offset@493) (assign (x) (var_ref shared_load_temp_offset@493) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@494) (call __intrinsic_load_shared (var_ref shared_load_result@494) ((expression uint + (var_ref shared_load_temp_offset@493) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@492) (var_ref shared_load_result@494) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@491) )(var_ref shared_load_temp@492) ) ) ) (declare (temporary ) float shared_load_temp@495) (declare (temporary ) uint shared_load_temp_offset@496) (assign (x) (var_ref shared_load_temp_offset@496) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@497) (call __intrinsic_load_shared (var_ref shared_load_result@497) ((expression uint + (var_ref shared_load_temp_offset@496) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@495) (var_ref shared_load_result@497) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@491) )(var_ref shared_load_temp@495) ) ) ) (declare (temporary ) float shared_load_temp@498) (declare (temporary ) uint shared_load_temp_offset@499) (assign (x) (var_ref shared_load_temp_offset@499) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@500) (call __intrinsic_load_shared (var_ref shared_load_result@500) ((expression uint + (var_ref shared_load_temp_offset@499) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@498) (var_ref shared_load_result@500) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@491) )(var_ref shared_load_temp@498) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (185)) ) ) (declare (temporary ) float length_retval@501) (declare (temporary ) vec2 x@502) (assign (xy) (var_ref x@502) (expression vec2 + (constant vec2 (2.000000 3.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@501) (expression float sqrt (expression float dot (var_ref x@502) (var_ref x@502) ) ) ) (if (expression bool < (var_ref length_retval@501) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@503) (assign (xyzw) (var_ref texture_retval@503) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@501) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@503) )) ) (declare (temporary ) float shared_load_temp@504) (declare (temporary ) uint shared_load_temp_offset@505) (assign (x) (var_ref shared_load_temp_offset@505) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@506) (call __intrinsic_load_shared (var_ref shared_load_result@506) ((expression uint + (var_ref shared_load_temp_offset@505) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@504) (var_ref shared_load_result@506) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@503) )(var_ref shared_load_temp@504) ) ) ) (declare (temporary ) float shared_load_temp@507) (declare (temporary ) uint shared_load_temp_offset@508) (assign (x) (var_ref shared_load_temp_offset@508) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@509) (call __intrinsic_load_shared (var_ref shared_load_result@509) ((expression uint + (var_ref shared_load_temp_offset@508) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@507) (var_ref shared_load_result@509) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@503) )(var_ref shared_load_temp@507) ) ) ) (declare (temporary ) float shared_load_temp@510) (declare (temporary ) uint shared_load_temp_offset@511) (assign (x) (var_ref shared_load_temp_offset@511) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@512) (call __intrinsic_load_shared (var_ref shared_load_result@512) ((expression uint + (var_ref shared_load_temp_offset@511) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@510) (var_ref shared_load_result@512) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@503) )(var_ref shared_load_temp@510) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (186)) ) ) (declare (temporary ) float length_retval@513) (declare (temporary ) vec2 x@514) (assign (xy) (var_ref x@514) (expression vec2 + (constant vec2 (3.000000 3.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@513) (expression float sqrt (expression float dot (var_ref x@514) (var_ref x@514) ) ) ) (if (expression bool < (var_ref length_retval@513) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@515) (assign (xyzw) (var_ref texture_retval@515) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@513) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@515) )) ) (declare (temporary ) float shared_load_temp@516) (declare (temporary ) uint shared_load_temp_offset@517) (assign (x) (var_ref shared_load_temp_offset@517) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@518) (call __intrinsic_load_shared (var_ref shared_load_result@518) ((expression uint + (var_ref shared_load_temp_offset@517) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@516) (var_ref shared_load_result@518) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@515) )(var_ref shared_load_temp@516) ) ) ) (declare (temporary ) float shared_load_temp@519) (declare (temporary ) uint shared_load_temp_offset@520) (assign (x) (var_ref shared_load_temp_offset@520) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@521) (call __intrinsic_load_shared (var_ref shared_load_result@521) ((expression uint + (var_ref shared_load_temp_offset@520) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@519) (var_ref shared_load_result@521) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@515) )(var_ref shared_load_temp@519) ) ) ) (declare (temporary ) float shared_load_temp@522) (declare (temporary ) uint shared_load_temp_offset@523) (assign (x) (var_ref shared_load_temp_offset@523) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@524) (call __intrinsic_load_shared (var_ref shared_load_result@524) ((expression uint + (var_ref shared_load_temp_offset@523) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@522) (var_ref shared_load_result@524) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@515) )(var_ref shared_load_temp@522) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (213)) ) ) (declare (temporary ) float length_retval@525) (declare (temporary ) vec2 x@526) (assign (xy) (var_ref x@526) (expression vec2 + (constant vec2 (0.000000 4.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@525) (expression float sqrt (expression float dot (var_ref x@526) (var_ref x@526) ) ) ) (if (expression bool < (var_ref length_retval@525) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@527) (assign (xyzw) (var_ref texture_retval@527) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@525) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@527) )) ) (declare (temporary ) float shared_load_temp@528) (declare (temporary ) uint shared_load_temp_offset@529) (assign (x) (var_ref shared_load_temp_offset@529) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@530) (call __intrinsic_load_shared (var_ref shared_load_result@530) ((expression uint + (var_ref shared_load_temp_offset@529) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@528) (var_ref shared_load_result@530) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@527) )(var_ref shared_load_temp@528) ) ) ) (declare (temporary ) float shared_load_temp@531) (declare (temporary ) uint shared_load_temp_offset@532) (assign (x) (var_ref shared_load_temp_offset@532) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@533) (call __intrinsic_load_shared (var_ref shared_load_result@533) ((expression uint + (var_ref shared_load_temp_offset@532) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@531) (var_ref shared_load_result@533) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@527) )(var_ref shared_load_temp@531) ) ) ) (declare (temporary ) float shared_load_temp@534) (declare (temporary ) uint shared_load_temp_offset@535) (assign (x) (var_ref shared_load_temp_offset@535) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@536) (call __intrinsic_load_shared (var_ref shared_load_result@536) ((expression uint + (var_ref shared_load_temp_offset@535) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@534) (var_ref shared_load_result@536) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@527) )(var_ref shared_load_temp@534) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (214)) ) ) (declare (temporary ) float length_retval@537) (declare (temporary ) vec2 x@538) (assign (xy) (var_ref x@538) (expression vec2 + (constant vec2 (1.000000 4.000000)) (expression vec2 neg (var_ref fract_retval) ) ) ) (assign (x) (var_ref length_retval@537) (expression float sqrt (expression float dot (var_ref x@538) (var_ref x@538) ) ) ) (if (expression bool < (var_ref length_retval@537) (constant float (3.032708)) ) ( (declare (temporary ) vec4 texture_retval@539) (assign (xyzw) (var_ref texture_retval@539) (tex vec4 (var_ref lut) (expression float lrp (constant float (0.007812)) (constant float (0.992188)) (expression float * (var_ref length_retval@537) (expression float rcp (constant float (3.238315)) ) ) ) 0 1 () )) (assign (x) (var_ref wsum) (expression float + (var_ref wsum) (swiz x (var_ref texture_retval@539) )) ) (declare (temporary ) float shared_load_temp@540) (declare (temporary ) uint shared_load_temp_offset@541) (assign (x) (var_ref shared_load_temp_offset@541) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@542) (call __intrinsic_load_shared (var_ref shared_load_result@542) ((expression uint + (var_ref shared_load_temp_offset@541) (constant uint (0)) ) )) (assign (x) (var_ref shared_load_temp@540) (var_ref shared_load_result@542) ) (assign (x) (var_ref color) (expression float + (swiz x (var_ref color) )(expression float * (swiz x (var_ref texture_retval@539) )(var_ref shared_load_temp@540) ) ) ) (declare (temporary ) float shared_load_temp@543) (declare (temporary ) uint shared_load_temp_offset@544) (assign (x) (var_ref shared_load_temp_offset@544) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@545) (call __intrinsic_load_shared (var_ref shared_load_result@545) ((expression uint + (var_ref shared_load_temp_offset@544) (constant uint (1680)) ) )) (assign (x) (var_ref shared_load_temp@543) (var_ref shared_load_result@545) ) (assign (y) (var_ref color) (expression float + (swiz y (var_ref color) )(expression float * (swiz x (var_ref texture_retval@539) )(var_ref shared_load_temp@543) ) ) ) (declare (temporary ) float shared_load_temp@546) (declare (temporary ) uint shared_load_temp_offset@547) (assign (x) (var_ref shared_load_temp_offset@547) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref idx) ) (constant uint (4)) ) ) ) (declare (temporary ) float shared_load_result@548) (call __intrinsic_load_shared (var_ref shared_load_result@548) ((expression uint + (var_ref shared_load_temp_offset@547) (constant uint (3360)) ) )) (assign (x) (var_ref shared_load_temp@546) (var_ref shared_load_result@548) ) (assign (z) (var_ref color) (expression float + (swiz z (var_ref color) )(expression float * (swiz x (var_ref texture_retval@539) )(var_ref shared_load_temp@546) ) ) ) ) ()) (assign (x) (var_ref idx) (expression int + (expression int + (expression int * (constant int (30)) (swiz y (var_ref vec_ctor) )) (swiz x (var_ref vec_ctor) )) (constant int (217)) ) ) (assign (xyzw) (var_ref color) (expression vec4 * (var_ref color) (expression vec4 rcp (swiz xxxx (var_ref wsum) )) ) ) (assign (xyz) (var_ref color) (swiz xyz (var_ref color) )) (assign (w) (var_ref color) (constant float (1.000000)) ) (assign (xyz) (var_ref color) (expression vec3 sat (swiz xyz (var_ref color) )) ) (assign (xyz) (var_ref color) (expression vec3 * (expression vec3 + (expression vec3 rcp (expression vec3 + (constant float (1.000000)) (expression vec3 exp2 (expression vec3 * (expression vec3 * (constant float (6.500000)) (expression vec3 + (constant float (0.750000)) (expression vec3 neg (swiz xyz (var_ref color) )) ) ) (constant float (1.442695)) ) ) ) ) (constant float (-0.007577)) ) (expression float rcp (constant float (0.827906)) ) ) ) (assign (xyz) (var_ref color) (swiz xyz (var_ref color) )) (assign (xyz) (var_ref color) (swiz xyz (var_ref color) )) (assign (xyz) (var_ref color) (expression vec3 sat (swiz xyz (var_ref color) )) ) (assign (xyz) (var_ref color) (swiz xyz (var_ref color) )) (assign (xyz) (var_ref color) (expression vec3 pow (swiz xyz (var_ref color) )(constant vec3 (0.416667 0.416667 0.416667)) ) ) (call __intrinsic_image_store ((var_ref out_image) (swiz xy (expression ivec3 u2i (var_ref gl_GlobalInvocationID) ) )(var_ref color) )) )) ) ) NIR (SSA form) for compute shader: shader: MESA_SHADER_COMPUTE name: GLSL19 local-size: 32, 8, 1 shared-size: 5040 inputs: 0 outputs: 0 uniforms: 80 shared: 0 decl_var ubo INTERP_MODE_NONE vec2 tex_scale0 (7, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 out_scale (6, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 pixel_size0 (5, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 texture_off0 (4, 0, 0) decl_var ubo INTERP_MODE_NONE mat2 texture_rot0 (3, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 texture_size0 (2, 0, 0) decl_var ubo INTERP_MODE_NONE vec3 dst_luma (1, 0, 0) decl_var ubo INTERP_MODE_NONE vec3 src_luma (0, 0, 0) decl_var uniform INTERP_MODE_NONE sampler1D lut (8, 0, 0) decl_var uniform INTERP_MODE_NONE writeonly GL_RGBA16F image2D out_image (9, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D texture0 (10, 0, 1) decl_function main (0 params) impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x00000040 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000060 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_4 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_5 = load_const (0xbf000000 /* -0.500000 */) vec1 32 ssa_6 = load_const (0x0000000e /* 0.000000 */) vec1 32 ssa_7 = load_const (0x0000001e /* 0.000000 */) vec1 32 ssa_8 = load_const (0xfffffffd /* -nan */) vec1 32 ssa_9 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_10 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_11 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_12 = load_const (0xc0400000 /* -3.000000 */) vec1 32 ssa_13 = load_const (0x404217e3 /* 3.032708 */) vec1 32 ssa_14 = load_const (0x3c000000 /* 0.007812 */) vec1 32 ssa_15 = load_const (0x3f7e0000 /* 0.992188 */) vec1 32 ssa_16 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_17 = load_const (0x0000001f /* 0.000000 */) vec1 32 ssa_18 = load_const (0xc0000000 /* -2.000000 */) vec1 32 ssa_19 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_20 = load_const (0x00000021 /* 0.000000 */) vec1 32 ssa_21 = load_const (0x00000022 /* 0.000000 */) vec1 32 ssa_22 = load_const (0x00000023 /* 0.000000 */) vec1 32 ssa_23 = load_const (0x40000000 /* 2.000000 */) vec1 32 ssa_24 = load_const (0x00000024 /* 0.000000 */) vec1 32 ssa_25 = load_const (0x40400000 /* 3.000000 */) vec1 32 ssa_26 = load_const (0x0000003d /* 0.000000 */) vec1 32 ssa_27 = load_const (0x0000003e /* 0.000000 */) vec1 32 ssa_28 = load_const (0x0000003f /* 0.000000 */) vec1 32 ssa_29 = load_const (0x00000041 /* 0.000000 */) vec1 32 ssa_30 = load_const (0x00000042 /* 0.000000 */) vec1 32 ssa_31 = load_const (0x0000005a /* 0.000000 */) vec1 32 ssa_32 = load_const (0x0000005b /* 0.000000 */) vec1 32 ssa_33 = load_const (0x0000005c /* 0.000000 */) vec1 32 ssa_34 = load_const (0x0000005d /* 0.000000 */) vec1 32 ssa_35 = load_const (0x0000005e /* 0.000000 */) vec1 32 ssa_36 = load_const (0x0000005f /* 0.000000 */) vec1 32 ssa_37 = load_const (0x00000061 /* 0.000000 */) vec1 32 ssa_38 = load_const (0x40800000 /* 4.000000 */) vec1 32 ssa_39 = load_const (0x00000078 /* 0.000000 */) vec1 32 ssa_40 = load_const (0x00000079 /* 0.000000 */) vec1 32 ssa_41 = load_const (0x0000007a /* 0.000000 */) vec1 32 ssa_42 = load_const (0x0000007b /* 0.000000 */) vec1 32 ssa_43 = load_const (0x0000007c /* 0.000000 */) vec1 32 ssa_44 = load_const (0x0000007d /* 0.000000 */) vec1 32 ssa_45 = load_const (0x0000007e /* 0.000000 */) vec1 32 ssa_46 = load_const (0x0000007f /* 0.000000 */) vec1 32 ssa_47 = load_const (0x00000097 /* 0.000000 */) vec1 32 ssa_48 = load_const (0x00000098 /* 0.000000 */) vec1 32 ssa_49 = load_const (0x00000099 /* 0.000000 */) vec1 32 ssa_50 = load_const (0x0000009a /* 0.000000 */) vec1 32 ssa_51 = load_const (0x0000009b /* 0.000000 */) vec1 32 ssa_52 = load_const (0x0000009c /* 0.000000 */) vec1 32 ssa_53 = load_const (0x000000b5 /* 0.000000 */) vec1 32 ssa_54 = load_const (0x000000b6 /* 0.000000 */) vec1 32 ssa_55 = load_const (0x000000b7 /* 0.000000 */) vec1 32 ssa_56 = load_const (0x000000b8 /* 0.000000 */) vec1 32 ssa_57 = load_const (0x000000b9 /* 0.000000 */) vec1 32 ssa_58 = load_const (0x000000ba /* 0.000000 */) vec1 32 ssa_59 = load_const (0x000000d5 /* 0.000000 */) vec1 32 ssa_60 = load_const (0x000000d6 /* 0.000000 */) vec1 32 ssa_61 = load_const (0x3f400000 /* 0.750000 */) vec1 32 ssa_62 = load_const (0xbbf8487c /* -0.007577 */) vec1 32 ssa_63 = load_const (0x3ed55555 /* 0.416667 */) vec1 32 ssa_64 = load_const (0x00000030 /* 0.000000 */) vec2 32 ssa_65 = intrinsic load_ubo (ssa_0, ssa_64) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec2 32 ssa_66 = intrinsic load_ubo (ssa_0, ssa_1) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_67 = load_const (0x00000068 /* 0.000000 */) vec2 32 ssa_68 = intrinsic load_ubo (ssa_0, ssa_67) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec2 32 ssa_69 = intrinsic load_ubo (ssa_0, ssa_2) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_70 = load_const (0x00000058 /* 0.000000 */) vec2 32 ssa_71 = intrinsic load_ubo (ssa_0, ssa_70) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_72 = load_const (0x00000050 /* 0.000000 */) vec2 32 ssa_73 = intrinsic load_ubo (ssa_0, ssa_72) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec3 32 ssa_74 = intrinsic load_work_group_id () () vec1 32 ssa_75 = intrinsic load_subgroup_id () () vec1 32 ssa_76 = ishl ssa_75, ssa_11 vec1 32 ssa_77 = intrinsic load_subgroup_invocation () () vec1 32 ssa_78 = iadd ssa_77, ssa_76 vec1 32 ssa_79 = iand ssa_78, ssa_17 vec1 32 ssa_80 = load_const (0x00000005 /* 0.000000 */) vec1 32 ssa_81 = ushr ssa_78, ssa_80 vec1 32 ssa_82 = load_const (0x00000007 /* 0.000000 */) vec1 32 ssa_83 = iand ssa_81, ssa_82 vec1 32 ssa_84 = ishl ssa_74.x, ssa_80 vec1 32 ssa_85 = ishl ssa_74.y, ssa_11 vec1 32 ssa_86 = iadd ssa_84, ssa_79 vec1 32 ssa_87 = iadd ssa_85, ssa_83 vec1 32 ssa_88 = u2f32 ssa_86 vec1 32 ssa_89 = u2f32 ssa_87 vec1 32 ssa_90 = fadd ssa_88, ssa_3 vec1 32 ssa_91 = fadd ssa_89, ssa_3 vec1 32 ssa_92 = fmul ssa_69.x, ssa_90 vec1 32 ssa_93 = fmul ssa_69.y, ssa_91 vec1 32 ssa_94 = fmul ssa_68.x, ssa_92 vec1 32 ssa_95 = fmul ssa_68.y, ssa_93 vec1 32 ssa_96 = fmul ssa_66.x, ssa_95 vec1 32 ssa_97 = fmul ssa_66.y, ssa_95 vec1 32 ssa_98 = ffma ssa_65.x, ssa_94, ssa_96 vec1 32 ssa_99 = ffma ssa_65.y, ssa_94, ssa_97 vec1 32 ssa_100 = ffma ssa_71.x, ssa_73.x, ssa_98 vec1 32 ssa_101 = ffma ssa_71.y, ssa_73.y, ssa_99 vec1 32 ssa_102 = u2f32 ssa_84 vec1 32 ssa_103 = u2f32 ssa_85 vec1 32 ssa_104 = fadd ssa_102, ssa_3 vec1 32 ssa_105 = fadd ssa_103, ssa_3 vec1 32 ssa_106 = fmul ssa_69.x, ssa_104 vec1 32 ssa_107 = fmul ssa_69.y, ssa_105 vec1 32 ssa_108 = fmul ssa_68.x, ssa_106 vec1 32 ssa_109 = fmul ssa_68.y, ssa_107 vec1 32 ssa_110 = fmul ssa_66.x, ssa_109 vec1 32 ssa_111 = fmul ssa_66.y, ssa_109 vec1 32 ssa_112 = ffma ssa_65.x, ssa_108, ssa_110 vec1 32 ssa_113 = ffma ssa_65.y, ssa_108, ssa_111 vec1 32 ssa_114 = ffma ssa_71.x, ssa_73.x, ssa_112 vec1 32 ssa_115 = ffma ssa_71.y, ssa_73.y, ssa_113 vec2 32 ssa_116 = intrinsic load_ubo (ssa_0, ssa_4) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_117 = ffma ssa_114, ssa_116.x, ssa_5 vec1 32 ssa_118 = ffma ssa_115, ssa_116.y, ssa_5 vec1 32 ssa_119 = ffract ssa_117 vec1 32 ssa_120 = ffract ssa_118 vec1 32 ssa_121 = ffma -ssa_71.x, ssa_119, ssa_114 vec1 32 ssa_122 = ffma -ssa_71.y, ssa_120, ssa_115 vec1 32 ssa_123 = ffma ssa_100, ssa_116.x, ssa_5 vec1 32 ssa_124 = ffma ssa_101, ssa_116.y, ssa_5 vec1 32 ssa_125 = ffract ssa_123 vec1 32 ssa_126 = ffract ssa_124 vec1 32 ssa_127 = ffma -ssa_71.x, ssa_125, ssa_100 vec1 32 ssa_128 = ffma -ssa_71.y, ssa_126, ssa_101 vec1 32 ssa_129 = fadd ssa_127, -ssa_121 vec1 32 ssa_130 = fadd ssa_128, -ssa_122 vec1 32 ssa_131 = fmul ssa_129, ssa_116.x vec1 32 ssa_132 = fmul ssa_130, ssa_116.y vec1 32 ssa_133 = fround_even ssa_131 vec1 32 ssa_134 = fround_even ssa_132 vec1 32 ssa_135 = f2i32 ssa_133 vec1 32 ssa_136 = f2i32 ssa_134 /* succs: block_1 */ loop { block block_1: /* preds: block_0 block_9 */ vec1 32 ssa_137 = phi block_0: ssa_83, block_9: ssa_161 vec1 32 ssa_138 = ige32 ssa_137, ssa_6 /* succs: block_2 block_3 */ if ssa_138 { block block_2: /* preds: block_1 */ break /* succs: block_10 */ } else { block block_3: /* preds: block_1 */ /* succs: block_4 */ } block block_4: /* preds: block_3 */ /* succs: block_5 */ loop { block block_5: /* preds: block_4 block_8 */ vec1 32 ssa_139 = phi block_4: ssa_79, block_8: ssa_160 vec1 32 ssa_140 = ige32 ssa_139, ssa_7 /* succs: block_6 block_7 */ if ssa_140 { block block_6: /* preds: block_5 */ break /* succs: block_9 */ } else { block block_7: /* preds: block_5 */ /* succs: block_8 */ } block block_8: /* preds: block_7 */ vec1 32 ssa_141 = iadd ssa_139, ssa_8 vec1 32 ssa_142 = i2f32 ssa_141 vec1 32 ssa_143 = iadd ssa_137, ssa_8 vec1 32 ssa_144 = i2f32 ssa_143 vec1 32 ssa_145 = ffma ssa_71.x, ssa_142, ssa_121 vec1 32 ssa_146 = ffma ssa_71.y, ssa_144, ssa_122 vec2 32 ssa_147 = vec2 ssa_145, ssa_146 vec4 32 ssa_148 = tex ssa_147 (coord), ssa_0 (lod), 1 (texture), 1 (sampler), vec1 32 ssa_149 = imul ssa_7, ssa_137 vec1 32 ssa_150 = iadd ssa_149, ssa_139 vec1 32 ssa_151 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_152 = ishl ssa_150, ssa_151 vec1 32 ssa_153 = imov ssa_148.x intrinsic store_shared (ssa_153, ssa_152) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_154 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_155 = iadd ssa_154, ssa_152 vec1 32 ssa_156 = imov ssa_148.y intrinsic store_shared (ssa_156, ssa_155) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_157 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_158 = iadd ssa_157, ssa_152 vec1 32 ssa_159 = imov ssa_148.z intrinsic store_shared (ssa_159, ssa_158) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_160 = iadd ssa_139, ssa_4 /* succs: block_5 */ } block block_9: /* preds: block_6 */ vec1 32 ssa_161 = iadd ssa_137, ssa_10 /* succs: block_1 */ } block block_10: /* preds: block_2 */ intrinsic group_memory_barrier () () intrinsic barrier () () vec1 32 ssa_162 = imul ssa_7, ssa_136 vec1 32 ssa_163 = iadd ssa_162, ssa_135 vec1 32 ssa_164 = iadd ssa_163, ssa_11 vec1 32 ssa_165 = fadd ssa_12, -ssa_126 vec1 32 ssa_166 = fmul ssa_165, ssa_165 vec1 32 ssa_167 = ffma ssa_125, ssa_125, ssa_166 vec1 32 ssa_168 = fsqrt ssa_167 vec1 32 ssa_169 = flt32 ssa_168, ssa_13 /* succs: block_11 block_12 */ if ssa_169 { block block_11: /* preds: block_10 */ vec1 32 ssa_170 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_171 = fmul ssa_168, ssa_170 vec1 32 ssa_172 = flrp ssa_14, ssa_15, ssa_171 vec4 32 ssa_173 = tex ssa_172 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_174 = imov ssa_173.x vec1 32 ssa_175 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_176 = ishl ssa_164, ssa_175 vec1 32 ssa_177 = intrinsic load_shared (ssa_176) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_178 = fmul ssa_173.x, ssa_177 vec1 32 ssa_179 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_180 = iadd ssa_179, ssa_176 vec1 32 ssa_181 = intrinsic load_shared (ssa_180) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_182 = fmul ssa_173.x, ssa_181 vec1 32 ssa_183 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_184 = iadd ssa_183, ssa_176 vec1 32 ssa_185 = intrinsic load_shared (ssa_184) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_186 = fmul ssa_173.x, ssa_185 /* succs: block_13 */ } else { block block_12: /* preds: block_10 */ /* succs: block_13 */ } block block_13: /* preds: block_11 block_12 */ vec1 32 ssa_187 = phi block_11: ssa_178, block_12: ssa_0 vec1 32 ssa_188 = phi block_11: ssa_182, block_12: ssa_0 vec1 32 ssa_189 = phi block_11: ssa_186, block_12: ssa_0 vec1 32 ssa_190 = phi block_11: ssa_174, block_12: ssa_0 vec1 32 ssa_191 = iadd ssa_163, ssa_9 vec1 32 ssa_192 = fadd ssa_16, -ssa_125 vec1 32 ssa_193 = ffma ssa_192, ssa_192, ssa_166 vec1 32 ssa_194 = fsqrt ssa_193 vec1 32 ssa_195 = flt32 ssa_194, ssa_13 /* succs: block_14 block_15 */ if ssa_195 { block block_14: /* preds: block_13 */ vec1 32 ssa_196 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_197 = fmul ssa_194, ssa_196 vec1 32 ssa_198 = flrp ssa_14, ssa_15, ssa_197 vec4 32 ssa_199 = tex ssa_198 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_200 = fadd ssa_190, ssa_199.x vec1 32 ssa_201 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_202 = ishl ssa_191, ssa_201 vec1 32 ssa_203 = intrinsic load_shared (ssa_202) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_204 = ffma ssa_199.x, ssa_203, ssa_187 vec1 32 ssa_205 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_206 = iadd ssa_205, ssa_202 vec1 32 ssa_207 = intrinsic load_shared (ssa_206) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_208 = ffma ssa_199.x, ssa_207, ssa_188 vec1 32 ssa_209 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_210 = iadd ssa_209, ssa_202 vec1 32 ssa_211 = intrinsic load_shared (ssa_210) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_212 = ffma ssa_199.x, ssa_211, ssa_189 /* succs: block_16 */ } else { block block_15: /* preds: block_13 */ /* succs: block_16 */ } block block_16: /* preds: block_14 block_15 */ vec1 32 ssa_213 = phi block_14: ssa_204, block_15: ssa_187 vec1 32 ssa_214 = phi block_14: ssa_208, block_15: ssa_188 vec1 32 ssa_215 = phi block_14: ssa_212, block_15: ssa_189 vec1 32 ssa_216 = phi block_14: ssa_200, block_15: ssa_190 vec1 32 ssa_217 = iadd ssa_163, ssa_17 vec1 32 ssa_218 = fadd ssa_18, -ssa_125 vec1 32 ssa_219 = fadd ssa_18, -ssa_126 vec1 32 ssa_220 = fmul ssa_219, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = fsqrt ssa_221 vec1 32 ssa_223 = flt32 ssa_222, ssa_13 /* succs: block_17 block_18 */ if ssa_223 { block block_17: /* preds: block_16 */ vec1 32 ssa_224 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_225 = fmul ssa_222, ssa_224 vec1 32 ssa_226 = flrp ssa_14, ssa_15, ssa_225 vec4 32 ssa_227 = tex ssa_226 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_228 = fadd ssa_216, ssa_227.x vec1 32 ssa_229 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_230 = ishl ssa_217, ssa_229 vec1 32 ssa_231 = intrinsic load_shared (ssa_230) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_232 = ffma ssa_227.x, ssa_231, ssa_213 vec1 32 ssa_233 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_234 = iadd ssa_233, ssa_230 vec1 32 ssa_235 = intrinsic load_shared (ssa_234) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_236 = ffma ssa_227.x, ssa_235, ssa_214 vec1 32 ssa_237 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_238 = iadd ssa_237, ssa_230 vec1 32 ssa_239 = intrinsic load_shared (ssa_238) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_240 = ffma ssa_227.x, ssa_239, ssa_215 /* succs: block_19 */ } else { block block_18: /* preds: block_16 */ /* succs: block_19 */ } block block_19: /* preds: block_17 block_18 */ vec1 32 ssa_241 = phi block_17: ssa_232, block_18: ssa_213 vec1 32 ssa_242 = phi block_17: ssa_236, block_18: ssa_214 vec1 32 ssa_243 = phi block_17: ssa_240, block_18: ssa_215 vec1 32 ssa_244 = phi block_17: ssa_228, block_18: ssa_216 vec1 32 ssa_245 = iadd ssa_163, ssa_4 vec1 32 ssa_246 = fadd ssa_19, -ssa_125 vec1 32 ssa_247 = ffma ssa_246, ssa_246, ssa_220 vec1 32 ssa_248 = fsqrt ssa_247 vec1 32 ssa_249 = flt32 ssa_248, ssa_13 /* succs: block_20 block_21 */ if ssa_249 { block block_20: /* preds: block_19 */ vec1 32 ssa_250 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_251 = fmul ssa_248, ssa_250 vec1 32 ssa_252 = flrp ssa_14, ssa_15, ssa_251 vec4 32 ssa_253 = tex ssa_252 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_254 = fadd ssa_244, ssa_253.x vec1 32 ssa_255 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_256 = ishl ssa_245, ssa_255 vec1 32 ssa_257 = intrinsic load_shared (ssa_256) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_258 = ffma ssa_253.x, ssa_257, ssa_241 vec1 32 ssa_259 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_260 = iadd ssa_259, ssa_256 vec1 32 ssa_261 = intrinsic load_shared (ssa_260) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_262 = ffma ssa_253.x, ssa_261, ssa_242 vec1 32 ssa_263 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_264 = iadd ssa_263, ssa_256 vec1 32 ssa_265 = intrinsic load_shared (ssa_264) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_266 = ffma ssa_253.x, ssa_265, ssa_243 /* succs: block_22 */ } else { block block_21: /* preds: block_19 */ /* succs: block_22 */ } block block_22: /* preds: block_20 block_21 */ vec1 32 ssa_267 = phi block_20: ssa_258, block_21: ssa_241 vec1 32 ssa_268 = phi block_20: ssa_262, block_21: ssa_242 vec1 32 ssa_269 = phi block_20: ssa_266, block_21: ssa_243 vec1 32 ssa_270 = phi block_20: ssa_254, block_21: ssa_244 vec1 32 ssa_271 = iadd ssa_163, ssa_20 vec1 32 ssa_272 = ffma ssa_125, ssa_125, ssa_220 vec1 32 ssa_273 = fsqrt ssa_272 vec1 32 ssa_274 = flt32 ssa_273, ssa_13 /* succs: block_23 block_24 */ if ssa_274 { block block_23: /* preds: block_22 */ vec1 32 ssa_275 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_276 = fmul ssa_273, ssa_275 vec1 32 ssa_277 = flrp ssa_14, ssa_15, ssa_276 vec4 32 ssa_278 = tex ssa_277 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_279 = fadd ssa_270, ssa_278.x vec1 32 ssa_280 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_281 = ishl ssa_271, ssa_280 vec1 32 ssa_282 = intrinsic load_shared (ssa_281) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_283 = ffma ssa_278.x, ssa_282, ssa_267 vec1 32 ssa_284 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_285 = iadd ssa_284, ssa_281 vec1 32 ssa_286 = intrinsic load_shared (ssa_285) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_287 = ffma ssa_278.x, ssa_286, ssa_268 vec1 32 ssa_288 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_289 = iadd ssa_288, ssa_281 vec1 32 ssa_290 = intrinsic load_shared (ssa_289) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_291 = ffma ssa_278.x, ssa_290, ssa_269 /* succs: block_25 */ } else { block block_24: /* preds: block_22 */ /* succs: block_25 */ } block block_25: /* preds: block_23 block_24 */ vec1 32 ssa_292 = phi block_23: ssa_283, block_24: ssa_267 vec1 32 ssa_293 = phi block_23: ssa_287, block_24: ssa_268 vec1 32 ssa_294 = phi block_23: ssa_291, block_24: ssa_269 vec1 32 ssa_295 = phi block_23: ssa_279, block_24: ssa_270 vec1 32 ssa_296 = iadd ssa_163, ssa_21 vec1 32 ssa_297 = ffma ssa_192, ssa_192, ssa_220 vec1 32 ssa_298 = fsqrt ssa_297 vec1 32 ssa_299 = flt32 ssa_298, ssa_13 /* succs: block_26 block_27 */ if ssa_299 { block block_26: /* preds: block_25 */ vec1 32 ssa_300 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_301 = fmul ssa_298, ssa_300 vec1 32 ssa_302 = flrp ssa_14, ssa_15, ssa_301 vec4 32 ssa_303 = tex ssa_302 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_304 = fadd ssa_295, ssa_303.x vec1 32 ssa_305 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_306 = ishl ssa_296, ssa_305 vec1 32 ssa_307 = intrinsic load_shared (ssa_306) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_308 = ffma ssa_303.x, ssa_307, ssa_292 vec1 32 ssa_309 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_310 = iadd ssa_309, ssa_306 vec1 32 ssa_311 = intrinsic load_shared (ssa_310) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_312 = ffma ssa_303.x, ssa_311, ssa_293 vec1 32 ssa_313 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_314 = iadd ssa_313, ssa_306 vec1 32 ssa_315 = intrinsic load_shared (ssa_314) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_316 = ffma ssa_303.x, ssa_315, ssa_294 /* succs: block_28 */ } else { block block_27: /* preds: block_25 */ /* succs: block_28 */ } block block_28: /* preds: block_26 block_27 */ vec1 32 ssa_317 = phi block_26: ssa_308, block_27: ssa_292 vec1 32 ssa_318 = phi block_26: ssa_312, block_27: ssa_293 vec1 32 ssa_319 = phi block_26: ssa_316, block_27: ssa_294 vec1 32 ssa_320 = phi block_26: ssa_304, block_27: ssa_295 vec1 32 ssa_321 = iadd ssa_163, ssa_22 vec1 32 ssa_322 = fadd ssa_23, -ssa_125 vec1 32 ssa_323 = ffma ssa_322, ssa_322, ssa_220 vec1 32 ssa_324 = fsqrt ssa_323 vec1 32 ssa_325 = flt32 ssa_324, ssa_13 /* succs: block_29 block_30 */ if ssa_325 { block block_29: /* preds: block_28 */ vec1 32 ssa_326 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_327 = fmul ssa_324, ssa_326 vec1 32 ssa_328 = flrp ssa_14, ssa_15, ssa_327 vec4 32 ssa_329 = tex ssa_328 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_330 = fadd ssa_320, ssa_329.x vec1 32 ssa_331 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_332 = ishl ssa_321, ssa_331 vec1 32 ssa_333 = intrinsic load_shared (ssa_332) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_334 = ffma ssa_329.x, ssa_333, ssa_317 vec1 32 ssa_335 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_336 = iadd ssa_335, ssa_332 vec1 32 ssa_337 = intrinsic load_shared (ssa_336) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_338 = ffma ssa_329.x, ssa_337, ssa_318 vec1 32 ssa_339 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_340 = iadd ssa_339, ssa_332 vec1 32 ssa_341 = intrinsic load_shared (ssa_340) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_342 = ffma ssa_329.x, ssa_341, ssa_319 /* succs: block_31 */ } else { block block_30: /* preds: block_28 */ /* succs: block_31 */ } block block_31: /* preds: block_29 block_30 */ vec1 32 ssa_343 = phi block_29: ssa_334, block_30: ssa_317 vec1 32 ssa_344 = phi block_29: ssa_338, block_30: ssa_318 vec1 32 ssa_345 = phi block_29: ssa_342, block_30: ssa_319 vec1 32 ssa_346 = phi block_29: ssa_330, block_30: ssa_320 vec1 32 ssa_347 = iadd ssa_163, ssa_24 vec1 32 ssa_348 = fadd ssa_25, -ssa_125 vec1 32 ssa_349 = ffma ssa_348, ssa_348, ssa_220 vec1 32 ssa_350 = fsqrt ssa_349 vec1 32 ssa_351 = flt32 ssa_350, ssa_13 /* succs: block_32 block_33 */ if ssa_351 { block block_32: /* preds: block_31 */ vec1 32 ssa_352 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_353 = fmul ssa_350, ssa_352 vec1 32 ssa_354 = flrp ssa_14, ssa_15, ssa_353 vec4 32 ssa_355 = tex ssa_354 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_356 = fadd ssa_346, ssa_355.x vec1 32 ssa_357 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_358 = ishl ssa_347, ssa_357 vec1 32 ssa_359 = intrinsic load_shared (ssa_358) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_360 = ffma ssa_355.x, ssa_359, ssa_343 vec1 32 ssa_361 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_362 = iadd ssa_361, ssa_358 vec1 32 ssa_363 = intrinsic load_shared (ssa_362) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_364 = ffma ssa_355.x, ssa_363, ssa_344 vec1 32 ssa_365 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_366 = iadd ssa_365, ssa_358 vec1 32 ssa_367 = intrinsic load_shared (ssa_366) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_368 = ffma ssa_355.x, ssa_367, ssa_345 /* succs: block_34 */ } else { block block_33: /* preds: block_31 */ /* succs: block_34 */ } block block_34: /* preds: block_32 block_33 */ vec1 32 ssa_369 = phi block_32: ssa_360, block_33: ssa_343 vec1 32 ssa_370 = phi block_32: ssa_364, block_33: ssa_344 vec1 32 ssa_371 = phi block_32: ssa_368, block_33: ssa_345 vec1 32 ssa_372 = phi block_32: ssa_356, block_33: ssa_346 vec1 32 ssa_373 = iadd ssa_163, ssa_26 vec1 32 ssa_374 = fadd ssa_19, -ssa_126 vec1 32 ssa_375 = fmul ssa_374, ssa_374 vec1 32 ssa_376 = ffma ssa_218, ssa_218, ssa_375 vec1 32 ssa_377 = fsqrt ssa_376 vec1 32 ssa_378 = flt32 ssa_377, ssa_13 /* succs: block_35 block_36 */ if ssa_378 { block block_35: /* preds: block_34 */ vec1 32 ssa_379 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_380 = fmul ssa_377, ssa_379 vec1 32 ssa_381 = flrp ssa_14, ssa_15, ssa_380 vec4 32 ssa_382 = tex ssa_381 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_383 = fadd ssa_372, ssa_382.x vec1 32 ssa_384 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_385 = ishl ssa_373, ssa_384 vec1 32 ssa_386 = intrinsic load_shared (ssa_385) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_387 = ffma ssa_382.x, ssa_386, ssa_369 vec1 32 ssa_388 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_389 = iadd ssa_388, ssa_385 vec1 32 ssa_390 = intrinsic load_shared (ssa_389) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_391 = ffma ssa_382.x, ssa_390, ssa_370 vec1 32 ssa_392 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_393 = iadd ssa_392, ssa_385 vec1 32 ssa_394 = intrinsic load_shared (ssa_393) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_395 = ffma ssa_382.x, ssa_394, ssa_371 /* succs: block_37 */ } else { block block_36: /* preds: block_34 */ /* succs: block_37 */ } block block_37: /* preds: block_35 block_36 */ vec1 32 ssa_396 = phi block_35: ssa_387, block_36: ssa_369 vec1 32 ssa_397 = phi block_35: ssa_391, block_36: ssa_370 vec1 32 ssa_398 = phi block_35: ssa_395, block_36: ssa_371 vec1 32 ssa_399 = phi block_35: ssa_383, block_36: ssa_372 vec1 32 ssa_400 = iadd ssa_163, ssa_27 vec1 32 ssa_401 = ffma ssa_246, ssa_246, ssa_375 vec1 32 ssa_402 = fsqrt ssa_401 vec1 32 ssa_403 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_404 = fmul ssa_402, ssa_403 vec1 32 ssa_405 = flrp ssa_14, ssa_15, ssa_404 vec4 32 ssa_406 = tex ssa_405 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_407 = fadd ssa_399, ssa_406.x vec1 32 ssa_408 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_409 = ishl ssa_400, ssa_408 vec1 32 ssa_410 = intrinsic load_shared (ssa_409) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_411 = ffma ssa_406.x, ssa_410, ssa_396 vec1 32 ssa_412 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_413 = iadd ssa_412, ssa_409 vec1 32 ssa_414 = intrinsic load_shared (ssa_413) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_415 = ffma ssa_406.x, ssa_414, ssa_397 vec1 32 ssa_416 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_417 = iadd ssa_416, ssa_409 vec1 32 ssa_418 = intrinsic load_shared (ssa_417) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_419 = ffma ssa_406.x, ssa_418, ssa_398 vec1 32 ssa_420 = iadd ssa_163, ssa_28 vec1 32 ssa_421 = ffma ssa_125, ssa_125, ssa_375 vec1 32 ssa_422 = fsqrt ssa_421 vec1 32 ssa_423 = fmul ssa_422, ssa_403 vec1 32 ssa_424 = flrp ssa_14, ssa_15, ssa_423 vec4 32 ssa_425 = tex ssa_424 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_426 = fadd ssa_407, ssa_425.x vec1 32 ssa_427 = ishl ssa_420, ssa_408 vec1 32 ssa_428 = intrinsic load_shared (ssa_427) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_429 = ffma ssa_425.x, ssa_428, ssa_411 vec1 32 ssa_430 = iadd ssa_412, ssa_427 vec1 32 ssa_431 = intrinsic load_shared (ssa_430) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_432 = ffma ssa_425.x, ssa_431, ssa_415 vec1 32 ssa_433 = iadd ssa_416, ssa_427 vec1 32 ssa_434 = intrinsic load_shared (ssa_433) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_435 = ffma ssa_425.x, ssa_434, ssa_419 vec1 32 ssa_436 = iadd ssa_163, ssa_1 vec1 32 ssa_437 = ffma ssa_192, ssa_192, ssa_375 vec1 32 ssa_438 = fsqrt ssa_437 vec1 32 ssa_439 = fmul ssa_438, ssa_403 vec1 32 ssa_440 = flrp ssa_14, ssa_15, ssa_439 vec4 32 ssa_441 = tex ssa_440 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_442 = fadd ssa_426, ssa_441.x vec1 32 ssa_443 = ishl ssa_436, ssa_408 vec1 32 ssa_444 = intrinsic load_shared (ssa_443) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_445 = ffma ssa_441.x, ssa_444, ssa_429 vec1 32 ssa_446 = iadd ssa_412, ssa_443 vec1 32 ssa_447 = intrinsic load_shared (ssa_446) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_448 = ffma ssa_441.x, ssa_447, ssa_432 vec1 32 ssa_449 = iadd ssa_416, ssa_443 vec1 32 ssa_450 = intrinsic load_shared (ssa_449) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_451 = ffma ssa_441.x, ssa_450, ssa_435 vec1 32 ssa_452 = iadd ssa_163, ssa_29 vec1 32 ssa_453 = ffma ssa_322, ssa_322, ssa_375 vec1 32 ssa_454 = fsqrt ssa_453 vec1 32 ssa_455 = fmul ssa_454, ssa_403 vec1 32 ssa_456 = flrp ssa_14, ssa_15, ssa_455 vec4 32 ssa_457 = tex ssa_456 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_458 = fadd ssa_442, ssa_457.x vec1 32 ssa_459 = ishl ssa_452, ssa_408 vec1 32 ssa_460 = intrinsic load_shared (ssa_459) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_461 = ffma ssa_457.x, ssa_460, ssa_445 vec1 32 ssa_462 = iadd ssa_412, ssa_459 vec1 32 ssa_463 = intrinsic load_shared (ssa_462) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_464 = ffma ssa_457.x, ssa_463, ssa_448 vec1 32 ssa_465 = iadd ssa_416, ssa_459 vec1 32 ssa_466 = intrinsic load_shared (ssa_465) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_467 = ffma ssa_457.x, ssa_466, ssa_451 vec1 32 ssa_468 = iadd ssa_163, ssa_30 vec1 32 ssa_469 = ffma ssa_348, ssa_348, ssa_375 vec1 32 ssa_470 = fsqrt ssa_469 vec1 32 ssa_471 = flt32 ssa_470, ssa_13 /* succs: block_38 block_39 */ if ssa_471 { block block_38: /* preds: block_37 */ vec1 32 ssa_472 = fmul ssa_470, ssa_403 vec1 32 ssa_473 = flrp ssa_14, ssa_15, ssa_472 vec4 32 ssa_474 = tex ssa_473 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_475 = fadd ssa_458, ssa_474.x vec1 32 ssa_476 = ishl ssa_468, ssa_408 vec1 32 ssa_477 = intrinsic load_shared (ssa_476) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_478 = ffma ssa_474.x, ssa_477, ssa_461 vec1 32 ssa_479 = iadd ssa_412, ssa_476 vec1 32 ssa_480 = intrinsic load_shared (ssa_479) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_481 = ffma ssa_474.x, ssa_480, ssa_464 vec1 32 ssa_482 = iadd ssa_416, ssa_476 vec1 32 ssa_483 = intrinsic load_shared (ssa_482) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_484 = ffma ssa_474.x, ssa_483, ssa_467 /* succs: block_40 */ } else { block block_39: /* preds: block_37 */ /* succs: block_40 */ } block block_40: /* preds: block_38 block_39 */ vec1 32 ssa_485 = phi block_38: ssa_478, block_39: ssa_461 vec1 32 ssa_486 = phi block_38: ssa_481, block_39: ssa_464 vec1 32 ssa_487 = phi block_38: ssa_484, block_39: ssa_467 vec1 32 ssa_488 = phi block_38: ssa_475, block_39: ssa_458 vec1 32 ssa_489 = iadd ssa_163, ssa_31 vec1 32 ssa_490 = fadd ssa_12, -ssa_125 vec1 32 ssa_491 = fmul ssa_126, ssa_126 vec1 32 ssa_492 = ffma ssa_490, ssa_490, ssa_491 vec1 32 ssa_493 = fsqrt ssa_492 vec1 32 ssa_494 = flt32 ssa_493, ssa_13 /* succs: block_41 block_42 */ if ssa_494 { block block_41: /* preds: block_40 */ vec1 32 ssa_495 = fmul ssa_493, ssa_403 vec1 32 ssa_496 = flrp ssa_14, ssa_15, ssa_495 vec4 32 ssa_497 = tex ssa_496 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_498 = fadd ssa_488, ssa_497.x vec1 32 ssa_499 = ishl ssa_489, ssa_408 vec1 32 ssa_500 = intrinsic load_shared (ssa_499) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_501 = ffma ssa_497.x, ssa_500, ssa_485 vec1 32 ssa_502 = iadd ssa_412, ssa_499 vec1 32 ssa_503 = intrinsic load_shared (ssa_502) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_504 = ffma ssa_497.x, ssa_503, ssa_486 vec1 32 ssa_505 = iadd ssa_416, ssa_499 vec1 32 ssa_506 = intrinsic load_shared (ssa_505) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_507 = ffma ssa_497.x, ssa_506, ssa_487 /* succs: block_43 */ } else { block block_42: /* preds: block_40 */ /* succs: block_43 */ } block block_43: /* preds: block_41 block_42 */ vec1 32 ssa_508 = phi block_41: ssa_501, block_42: ssa_485 vec1 32 ssa_509 = phi block_41: ssa_504, block_42: ssa_486 vec1 32 ssa_510 = phi block_41: ssa_507, block_42: ssa_487 vec1 32 ssa_511 = phi block_41: ssa_498, block_42: ssa_488 vec1 32 ssa_512 = iadd ssa_163, ssa_32 vec1 32 ssa_513 = ffma ssa_218, ssa_218, ssa_491 vec1 32 ssa_514 = fsqrt ssa_513 vec1 32 ssa_515 = flt32 ssa_514, ssa_13 /* succs: block_44 block_45 */ if ssa_515 { block block_44: /* preds: block_43 */ vec1 32 ssa_516 = fmul ssa_514, ssa_403 vec1 32 ssa_517 = flrp ssa_14, ssa_15, ssa_516 vec4 32 ssa_518 = tex ssa_517 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_519 = fadd ssa_511, ssa_518.x vec1 32 ssa_520 = ishl ssa_512, ssa_408 vec1 32 ssa_521 = intrinsic load_shared (ssa_520) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_522 = ffma ssa_518.x, ssa_521, ssa_508 vec1 32 ssa_523 = iadd ssa_412, ssa_520 vec1 32 ssa_524 = intrinsic load_shared (ssa_523) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_525 = ffma ssa_518.x, ssa_524, ssa_509 vec1 32 ssa_526 = iadd ssa_416, ssa_520 vec1 32 ssa_527 = intrinsic load_shared (ssa_526) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_528 = ffma ssa_518.x, ssa_527, ssa_510 /* succs: block_46 */ } else { block block_45: /* preds: block_43 */ /* succs: block_46 */ } block block_46: /* preds: block_44 block_45 */ vec1 32 ssa_529 = phi block_44: ssa_522, block_45: ssa_508 vec1 32 ssa_530 = phi block_44: ssa_525, block_45: ssa_509 vec1 32 ssa_531 = phi block_44: ssa_528, block_45: ssa_510 vec1 32 ssa_532 = phi block_44: ssa_519, block_45: ssa_511 vec1 32 ssa_533 = iadd ssa_163, ssa_33 vec1 32 ssa_534 = ffma ssa_246, ssa_246, ssa_491 vec1 32 ssa_535 = fsqrt ssa_534 vec1 32 ssa_536 = fmul ssa_535, ssa_403 vec1 32 ssa_537 = flrp ssa_14, ssa_15, ssa_536 vec4 32 ssa_538 = tex ssa_537 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_539 = fadd ssa_532, ssa_538.x vec1 32 ssa_540 = ishl ssa_533, ssa_408 vec1 32 ssa_541 = intrinsic load_shared (ssa_540) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_542 = ffma ssa_538.x, ssa_541, ssa_529 vec1 32 ssa_543 = iadd ssa_412, ssa_540 vec1 32 ssa_544 = intrinsic load_shared (ssa_543) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_545 = ffma ssa_538.x, ssa_544, ssa_530 vec1 32 ssa_546 = iadd ssa_416, ssa_540 vec1 32 ssa_547 = intrinsic load_shared (ssa_546) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_548 = ffma ssa_538.x, ssa_547, ssa_531 vec1 32 ssa_549 = iadd ssa_163, ssa_34 vec1 32 ssa_550 = ffma ssa_125, ssa_125, ssa_491 vec1 32 ssa_551 = fsqrt ssa_550 vec1 32 ssa_552 = fmul ssa_551, ssa_403 vec1 32 ssa_553 = flrp ssa_14, ssa_15, ssa_552 vec4 32 ssa_554 = tex ssa_553 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_555 = fadd ssa_539, ssa_554.x vec1 32 ssa_556 = ishl ssa_549, ssa_408 vec1 32 ssa_557 = intrinsic load_shared (ssa_556) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_558 = ffma ssa_554.x, ssa_557, ssa_542 vec1 32 ssa_559 = iadd ssa_412, ssa_556 vec1 32 ssa_560 = intrinsic load_shared (ssa_559) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_561 = ffma ssa_554.x, ssa_560, ssa_545 vec1 32 ssa_562 = iadd ssa_416, ssa_556 vec1 32 ssa_563 = intrinsic load_shared (ssa_562) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_564 = ffma ssa_554.x, ssa_563, ssa_548 vec1 32 ssa_565 = iadd ssa_163, ssa_35 vec1 32 ssa_566 = ffma ssa_192, ssa_192, ssa_491 vec1 32 ssa_567 = fsqrt ssa_566 vec1 32 ssa_568 = fmul ssa_567, ssa_403 vec1 32 ssa_569 = flrp ssa_14, ssa_15, ssa_568 vec4 32 ssa_570 = tex ssa_569 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_571 = fadd ssa_555, ssa_570.x vec1 32 ssa_572 = ishl ssa_565, ssa_408 vec1 32 ssa_573 = intrinsic load_shared (ssa_572) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_574 = ffma ssa_570.x, ssa_573, ssa_558 vec1 32 ssa_575 = iadd ssa_412, ssa_572 vec1 32 ssa_576 = intrinsic load_shared (ssa_575) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_577 = ffma ssa_570.x, ssa_576, ssa_561 vec1 32 ssa_578 = iadd ssa_416, ssa_572 vec1 32 ssa_579 = intrinsic load_shared (ssa_578) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_580 = ffma ssa_570.x, ssa_579, ssa_564 vec1 32 ssa_581 = iadd ssa_163, ssa_36 vec1 32 ssa_582 = ffma ssa_322, ssa_322, ssa_491 vec1 32 ssa_583 = fsqrt ssa_582 vec1 32 ssa_584 = fmul ssa_583, ssa_403 vec1 32 ssa_585 = flrp ssa_14, ssa_15, ssa_584 vec4 32 ssa_586 = tex ssa_585 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_587 = fadd ssa_571, ssa_586.x vec1 32 ssa_588 = ishl ssa_581, ssa_408 vec1 32 ssa_589 = intrinsic load_shared (ssa_588) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_590 = ffma ssa_586.x, ssa_589, ssa_574 vec1 32 ssa_591 = iadd ssa_412, ssa_588 vec1 32 ssa_592 = intrinsic load_shared (ssa_591) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_593 = ffma ssa_586.x, ssa_592, ssa_577 vec1 32 ssa_594 = iadd ssa_416, ssa_588 vec1 32 ssa_595 = intrinsic load_shared (ssa_594) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_596 = ffma ssa_586.x, ssa_595, ssa_580 vec1 32 ssa_597 = iadd ssa_163, ssa_2 vec1 32 ssa_598 = ffma ssa_348, ssa_348, ssa_491 vec1 32 ssa_599 = fsqrt ssa_598 vec1 32 ssa_600 = flt32 ssa_599, ssa_13 /* succs: block_47 block_48 */ if ssa_600 { block block_47: /* preds: block_46 */ vec1 32 ssa_601 = fmul ssa_599, ssa_403 vec1 32 ssa_602 = flrp ssa_14, ssa_15, ssa_601 vec4 32 ssa_603 = tex ssa_602 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_604 = fadd ssa_587, ssa_603.x vec1 32 ssa_605 = ishl ssa_597, ssa_408 vec1 32 ssa_606 = intrinsic load_shared (ssa_605) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_607 = ffma ssa_603.x, ssa_606, ssa_590 vec1 32 ssa_608 = iadd ssa_412, ssa_605 vec1 32 ssa_609 = intrinsic load_shared (ssa_608) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_610 = ffma ssa_603.x, ssa_609, ssa_593 vec1 32 ssa_611 = iadd ssa_416, ssa_605 vec1 32 ssa_612 = intrinsic load_shared (ssa_611) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_613 = ffma ssa_603.x, ssa_612, ssa_596 /* succs: block_49 */ } else { block block_48: /* preds: block_46 */ /* succs: block_49 */ } block block_49: /* preds: block_47 block_48 */ vec1 32 ssa_614 = phi block_47: ssa_607, block_48: ssa_590 vec1 32 ssa_615 = phi block_47: ssa_610, block_48: ssa_593 vec1 32 ssa_616 = phi block_47: ssa_613, block_48: ssa_596 vec1 32 ssa_617 = phi block_47: ssa_604, block_48: ssa_587 vec1 32 ssa_618 = iadd ssa_163, ssa_37 vec1 32 ssa_619 = fadd ssa_38, -ssa_125 vec1 32 ssa_620 = ffma ssa_619, ssa_619, ssa_491 vec1 32 ssa_621 = fsqrt ssa_620 vec1 32 ssa_622 = flt32 ssa_621, ssa_13 /* succs: block_50 block_51 */ if ssa_622 { block block_50: /* preds: block_49 */ vec1 32 ssa_623 = fmul ssa_621, ssa_403 vec1 32 ssa_624 = flrp ssa_14, ssa_15, ssa_623 vec4 32 ssa_625 = tex ssa_624 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_626 = fadd ssa_617, ssa_625.x vec1 32 ssa_627 = ishl ssa_618, ssa_408 vec1 32 ssa_628 = intrinsic load_shared (ssa_627) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_629 = ffma ssa_625.x, ssa_628, ssa_614 vec1 32 ssa_630 = iadd ssa_412, ssa_627 vec1 32 ssa_631 = intrinsic load_shared (ssa_630) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_632 = ffma ssa_625.x, ssa_631, ssa_615 vec1 32 ssa_633 = iadd ssa_416, ssa_627 vec1 32 ssa_634 = intrinsic load_shared (ssa_633) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_635 = ffma ssa_625.x, ssa_634, ssa_616 /* succs: block_52 */ } else { block block_51: /* preds: block_49 */ /* succs: block_52 */ } block block_52: /* preds: block_50 block_51 */ vec1 32 ssa_636 = phi block_50: ssa_629, block_51: ssa_614 vec1 32 ssa_637 = phi block_50: ssa_632, block_51: ssa_615 vec1 32 ssa_638 = phi block_50: ssa_635, block_51: ssa_616 vec1 32 ssa_639 = phi block_50: ssa_626, block_51: ssa_617 vec1 32 ssa_640 = iadd ssa_163, ssa_39 vec1 32 ssa_641 = fadd ssa_16, -ssa_126 vec1 32 ssa_642 = fmul ssa_641, ssa_641 vec1 32 ssa_643 = ffma ssa_490, ssa_490, ssa_642 vec1 32 ssa_644 = fsqrt ssa_643 vec1 32 ssa_645 = flt32 ssa_644, ssa_13 /* succs: block_53 block_54 */ if ssa_645 { block block_53: /* preds: block_52 */ vec1 32 ssa_646 = fmul ssa_644, ssa_403 vec1 32 ssa_647 = flrp ssa_14, ssa_15, ssa_646 vec4 32 ssa_648 = tex ssa_647 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_649 = fadd ssa_639, ssa_648.x vec1 32 ssa_650 = ishl ssa_640, ssa_408 vec1 32 ssa_651 = intrinsic load_shared (ssa_650) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_652 = ffma ssa_648.x, ssa_651, ssa_636 vec1 32 ssa_653 = iadd ssa_412, ssa_650 vec1 32 ssa_654 = intrinsic load_shared (ssa_653) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_655 = ffma ssa_648.x, ssa_654, ssa_637 vec1 32 ssa_656 = iadd ssa_416, ssa_650 vec1 32 ssa_657 = intrinsic load_shared (ssa_656) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_658 = ffma ssa_648.x, ssa_657, ssa_638 /* succs: block_55 */ } else { block block_54: /* preds: block_52 */ /* succs: block_55 */ } block block_55: /* preds: block_53 block_54 */ vec1 32 ssa_659 = phi block_53: ssa_652, block_54: ssa_636 vec1 32 ssa_660 = phi block_53: ssa_655, block_54: ssa_637 vec1 32 ssa_661 = phi block_53: ssa_658, block_54: ssa_638 vec1 32 ssa_662 = phi block_53: ssa_649, block_54: ssa_639 vec1 32 ssa_663 = iadd ssa_163, ssa_40 vec1 32 ssa_664 = ffma ssa_218, ssa_218, ssa_642 vec1 32 ssa_665 = fsqrt ssa_664 vec1 32 ssa_666 = flt32 ssa_665, ssa_13 /* succs: block_56 block_57 */ if ssa_666 { block block_56: /* preds: block_55 */ vec1 32 ssa_667 = fmul ssa_665, ssa_403 vec1 32 ssa_668 = flrp ssa_14, ssa_15, ssa_667 vec4 32 ssa_669 = tex ssa_668 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_670 = fadd ssa_662, ssa_669.x vec1 32 ssa_671 = ishl ssa_663, ssa_408 vec1 32 ssa_672 = intrinsic load_shared (ssa_671) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_673 = ffma ssa_669.x, ssa_672, ssa_659 vec1 32 ssa_674 = iadd ssa_412, ssa_671 vec1 32 ssa_675 = intrinsic load_shared (ssa_674) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_676 = ffma ssa_669.x, ssa_675, ssa_660 vec1 32 ssa_677 = iadd ssa_416, ssa_671 vec1 32 ssa_678 = intrinsic load_shared (ssa_677) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_679 = ffma ssa_669.x, ssa_678, ssa_661 /* succs: block_58 */ } else { block block_57: /* preds: block_55 */ /* succs: block_58 */ } block block_58: /* preds: block_56 block_57 */ vec1 32 ssa_680 = phi block_56: ssa_673, block_57: ssa_659 vec1 32 ssa_681 = phi block_56: ssa_676, block_57: ssa_660 vec1 32 ssa_682 = phi block_56: ssa_679, block_57: ssa_661 vec1 32 ssa_683 = phi block_56: ssa_670, block_57: ssa_662 vec1 32 ssa_684 = iadd ssa_163, ssa_41 vec1 32 ssa_685 = ffma ssa_246, ssa_246, ssa_642 vec1 32 ssa_686 = fsqrt ssa_685 vec1 32 ssa_687 = fmul ssa_686, ssa_403 vec1 32 ssa_688 = flrp ssa_14, ssa_15, ssa_687 vec4 32 ssa_689 = tex ssa_688 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_690 = fadd ssa_683, ssa_689.x vec1 32 ssa_691 = ishl ssa_684, ssa_408 vec1 32 ssa_692 = intrinsic load_shared (ssa_691) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_693 = ffma ssa_689.x, ssa_692, ssa_680 vec1 32 ssa_694 = iadd ssa_412, ssa_691 vec1 32 ssa_695 = intrinsic load_shared (ssa_694) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_696 = ffma ssa_689.x, ssa_695, ssa_681 vec1 32 ssa_697 = iadd ssa_416, ssa_691 vec1 32 ssa_698 = intrinsic load_shared (ssa_697) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_699 = ffma ssa_689.x, ssa_698, ssa_682 vec1 32 ssa_700 = iadd ssa_163, ssa_42 vec1 32 ssa_701 = ffma ssa_125, ssa_125, ssa_642 vec1 32 ssa_702 = fsqrt ssa_701 vec1 32 ssa_703 = fmul ssa_702, ssa_403 vec1 32 ssa_704 = flrp ssa_14, ssa_15, ssa_703 vec4 32 ssa_705 = tex ssa_704 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_706 = fadd ssa_690, ssa_705.x vec1 32 ssa_707 = ishl ssa_700, ssa_408 vec1 32 ssa_708 = intrinsic load_shared (ssa_707) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_709 = ffma ssa_705.x, ssa_708, ssa_693 vec1 32 ssa_710 = iadd ssa_412, ssa_707 vec1 32 ssa_711 = intrinsic load_shared (ssa_710) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_712 = ffma ssa_705.x, ssa_711, ssa_696 vec1 32 ssa_713 = iadd ssa_416, ssa_707 vec1 32 ssa_714 = intrinsic load_shared (ssa_713) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_715 = ffma ssa_705.x, ssa_714, ssa_699 vec1 32 ssa_716 = iadd ssa_163, ssa_43 vec1 32 ssa_717 = ffma ssa_192, ssa_192, ssa_642 vec1 32 ssa_718 = fsqrt ssa_717 vec1 32 ssa_719 = fmul ssa_718, ssa_403 vec1 32 ssa_720 = flrp ssa_14, ssa_15, ssa_719 vec4 32 ssa_721 = tex ssa_720 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_722 = fadd ssa_706, ssa_721.x vec1 32 ssa_723 = ishl ssa_716, ssa_408 vec1 32 ssa_724 = intrinsic load_shared (ssa_723) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_725 = ffma ssa_721.x, ssa_724, ssa_709 vec1 32 ssa_726 = iadd ssa_412, ssa_723 vec1 32 ssa_727 = intrinsic load_shared (ssa_726) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_728 = ffma ssa_721.x, ssa_727, ssa_712 vec1 32 ssa_729 = iadd ssa_416, ssa_723 vec1 32 ssa_730 = intrinsic load_shared (ssa_729) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_731 = ffma ssa_721.x, ssa_730, ssa_715 vec1 32 ssa_732 = iadd ssa_163, ssa_44 vec1 32 ssa_733 = ffma ssa_322, ssa_322, ssa_642 vec1 32 ssa_734 = fsqrt ssa_733 vec1 32 ssa_735 = fmul ssa_734, ssa_403 vec1 32 ssa_736 = flrp ssa_14, ssa_15, ssa_735 vec4 32 ssa_737 = tex ssa_736 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_738 = fadd ssa_722, ssa_737.x vec1 32 ssa_739 = ishl ssa_732, ssa_408 vec1 32 ssa_740 = intrinsic load_shared (ssa_739) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_741 = ffma ssa_737.x, ssa_740, ssa_725 vec1 32 ssa_742 = iadd ssa_412, ssa_739 vec1 32 ssa_743 = intrinsic load_shared (ssa_742) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_744 = ffma ssa_737.x, ssa_743, ssa_728 vec1 32 ssa_745 = iadd ssa_416, ssa_739 vec1 32 ssa_746 = intrinsic load_shared (ssa_745) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_747 = ffma ssa_737.x, ssa_746, ssa_731 vec1 32 ssa_748 = iadd ssa_163, ssa_45 vec1 32 ssa_749 = ffma ssa_348, ssa_348, ssa_642 vec1 32 ssa_750 = fsqrt ssa_749 vec1 32 ssa_751 = flt32 ssa_750, ssa_13 /* succs: block_59 block_60 */ if ssa_751 { block block_59: /* preds: block_58 */ vec1 32 ssa_752 = fmul ssa_750, ssa_403 vec1 32 ssa_753 = flrp ssa_14, ssa_15, ssa_752 vec4 32 ssa_754 = tex ssa_753 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_755 = fadd ssa_738, ssa_754.x vec1 32 ssa_756 = ishl ssa_748, ssa_408 vec1 32 ssa_757 = intrinsic load_shared (ssa_756) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_758 = ffma ssa_754.x, ssa_757, ssa_741 vec1 32 ssa_759 = iadd ssa_412, ssa_756 vec1 32 ssa_760 = intrinsic load_shared (ssa_759) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_761 = ffma ssa_754.x, ssa_760, ssa_744 vec1 32 ssa_762 = iadd ssa_416, ssa_756 vec1 32 ssa_763 = intrinsic load_shared (ssa_762) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_764 = ffma ssa_754.x, ssa_763, ssa_747 /* succs: block_61 */ } else { block block_60: /* preds: block_58 */ /* succs: block_61 */ } block block_61: /* preds: block_59 block_60 */ vec1 32 ssa_765 = phi block_59: ssa_758, block_60: ssa_741 vec1 32 ssa_766 = phi block_59: ssa_761, block_60: ssa_744 vec1 32 ssa_767 = phi block_59: ssa_764, block_60: ssa_747 vec1 32 ssa_768 = phi block_59: ssa_755, block_60: ssa_738 vec1 32 ssa_769 = iadd ssa_163, ssa_46 vec1 32 ssa_770 = ffma ssa_619, ssa_619, ssa_642 vec1 32 ssa_771 = fsqrt ssa_770 vec1 32 ssa_772 = flt32 ssa_771, ssa_13 /* succs: block_62 block_63 */ if ssa_772 { block block_62: /* preds: block_61 */ vec1 32 ssa_773 = fmul ssa_771, ssa_403 vec1 32 ssa_774 = flrp ssa_14, ssa_15, ssa_773 vec4 32 ssa_775 = tex ssa_774 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_776 = fadd ssa_768, ssa_775.x vec1 32 ssa_777 = ishl ssa_769, ssa_408 vec1 32 ssa_778 = intrinsic load_shared (ssa_777) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_779 = ffma ssa_775.x, ssa_778, ssa_765 vec1 32 ssa_780 = iadd ssa_412, ssa_777 vec1 32 ssa_781 = intrinsic load_shared (ssa_780) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_782 = ffma ssa_775.x, ssa_781, ssa_766 vec1 32 ssa_783 = iadd ssa_416, ssa_777 vec1 32 ssa_784 = intrinsic load_shared (ssa_783) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_785 = ffma ssa_775.x, ssa_784, ssa_767 /* succs: block_64 */ } else { block block_63: /* preds: block_61 */ /* succs: block_64 */ } block block_64: /* preds: block_62 block_63 */ vec1 32 ssa_786 = phi block_62: ssa_779, block_63: ssa_765 vec1 32 ssa_787 = phi block_62: ssa_782, block_63: ssa_766 vec1 32 ssa_788 = phi block_62: ssa_785, block_63: ssa_767 vec1 32 ssa_789 = phi block_62: ssa_776, block_63: ssa_768 vec1 32 ssa_790 = iadd ssa_163, ssa_47 vec1 32 ssa_791 = fadd ssa_23, -ssa_126 vec1 32 ssa_792 = fmul ssa_791, ssa_791 vec1 32 ssa_793 = ffma ssa_218, ssa_218, ssa_792 vec1 32 ssa_794 = fsqrt ssa_793 vec1 32 ssa_795 = flt32 ssa_794, ssa_13 /* succs: block_65 block_66 */ if ssa_795 { block block_65: /* preds: block_64 */ vec1 32 ssa_796 = fmul ssa_794, ssa_403 vec1 32 ssa_797 = flrp ssa_14, ssa_15, ssa_796 vec4 32 ssa_798 = tex ssa_797 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_799 = fadd ssa_789, ssa_798.x vec1 32 ssa_800 = ishl ssa_790, ssa_408 vec1 32 ssa_801 = intrinsic load_shared (ssa_800) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_802 = ffma ssa_798.x, ssa_801, ssa_786 vec1 32 ssa_803 = iadd ssa_412, ssa_800 vec1 32 ssa_804 = intrinsic load_shared (ssa_803) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_805 = ffma ssa_798.x, ssa_804, ssa_787 vec1 32 ssa_806 = iadd ssa_416, ssa_800 vec1 32 ssa_807 = intrinsic load_shared (ssa_806) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_808 = ffma ssa_798.x, ssa_807, ssa_788 /* succs: block_67 */ } else { block block_66: /* preds: block_64 */ /* succs: block_67 */ } block block_67: /* preds: block_65 block_66 */ vec1 32 ssa_809 = phi block_65: ssa_802, block_66: ssa_786 vec1 32 ssa_810 = phi block_65: ssa_805, block_66: ssa_787 vec1 32 ssa_811 = phi block_65: ssa_808, block_66: ssa_788 vec1 32 ssa_812 = phi block_65: ssa_799, block_66: ssa_789 vec1 32 ssa_813 = iadd ssa_163, ssa_48 vec1 32 ssa_814 = ffma ssa_246, ssa_246, ssa_792 vec1 32 ssa_815 = fsqrt ssa_814 vec1 32 ssa_816 = fmul ssa_815, ssa_403 vec1 32 ssa_817 = flrp ssa_14, ssa_15, ssa_816 vec4 32 ssa_818 = tex ssa_817 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_819 = fadd ssa_812, ssa_818.x vec1 32 ssa_820 = ishl ssa_813, ssa_408 vec1 32 ssa_821 = intrinsic load_shared (ssa_820) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_822 = ffma ssa_818.x, ssa_821, ssa_809 vec1 32 ssa_823 = iadd ssa_412, ssa_820 vec1 32 ssa_824 = intrinsic load_shared (ssa_823) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_825 = ffma ssa_818.x, ssa_824, ssa_810 vec1 32 ssa_826 = iadd ssa_416, ssa_820 vec1 32 ssa_827 = intrinsic load_shared (ssa_826) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_828 = ffma ssa_818.x, ssa_827, ssa_811 vec1 32 ssa_829 = iadd ssa_163, ssa_49 vec1 32 ssa_830 = ffma ssa_125, ssa_125, ssa_792 vec1 32 ssa_831 = fsqrt ssa_830 vec1 32 ssa_832 = fmul ssa_831, ssa_403 vec1 32 ssa_833 = flrp ssa_14, ssa_15, ssa_832 vec4 32 ssa_834 = tex ssa_833 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_835 = fadd ssa_819, ssa_834.x vec1 32 ssa_836 = ishl ssa_829, ssa_408 vec1 32 ssa_837 = intrinsic load_shared (ssa_836) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_838 = ffma ssa_834.x, ssa_837, ssa_822 vec1 32 ssa_839 = iadd ssa_412, ssa_836 vec1 32 ssa_840 = intrinsic load_shared (ssa_839) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_841 = ffma ssa_834.x, ssa_840, ssa_825 vec1 32 ssa_842 = iadd ssa_416, ssa_836 vec1 32 ssa_843 = intrinsic load_shared (ssa_842) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_844 = ffma ssa_834.x, ssa_843, ssa_828 vec1 32 ssa_845 = iadd ssa_163, ssa_50 vec1 32 ssa_846 = ffma ssa_192, ssa_192, ssa_792 vec1 32 ssa_847 = fsqrt ssa_846 vec1 32 ssa_848 = fmul ssa_847, ssa_403 vec1 32 ssa_849 = flrp ssa_14, ssa_15, ssa_848 vec4 32 ssa_850 = tex ssa_849 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_851 = fadd ssa_835, ssa_850.x vec1 32 ssa_852 = ishl ssa_845, ssa_408 vec1 32 ssa_853 = intrinsic load_shared (ssa_852) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_854 = ffma ssa_850.x, ssa_853, ssa_838 vec1 32 ssa_855 = iadd ssa_412, ssa_852 vec1 32 ssa_856 = intrinsic load_shared (ssa_855) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_857 = ffma ssa_850.x, ssa_856, ssa_841 vec1 32 ssa_858 = iadd ssa_416, ssa_852 vec1 32 ssa_859 = intrinsic load_shared (ssa_858) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_860 = ffma ssa_850.x, ssa_859, ssa_844 vec1 32 ssa_861 = iadd ssa_163, ssa_51 vec1 32 ssa_862 = ffma ssa_322, ssa_322, ssa_792 vec1 32 ssa_863 = fsqrt ssa_862 vec1 32 ssa_864 = fmul ssa_863, ssa_403 vec1 32 ssa_865 = flrp ssa_14, ssa_15, ssa_864 vec4 32 ssa_866 = tex ssa_865 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_867 = fadd ssa_851, ssa_866.x vec1 32 ssa_868 = ishl ssa_861, ssa_408 vec1 32 ssa_869 = intrinsic load_shared (ssa_868) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_870 = ffma ssa_866.x, ssa_869, ssa_854 vec1 32 ssa_871 = iadd ssa_412, ssa_868 vec1 32 ssa_872 = intrinsic load_shared (ssa_871) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_873 = ffma ssa_866.x, ssa_872, ssa_857 vec1 32 ssa_874 = iadd ssa_416, ssa_868 vec1 32 ssa_875 = intrinsic load_shared (ssa_874) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_876 = ffma ssa_866.x, ssa_875, ssa_860 vec1 32 ssa_877 = iadd ssa_163, ssa_52 vec1 32 ssa_878 = ffma ssa_348, ssa_348, ssa_792 vec1 32 ssa_879 = fsqrt ssa_878 vec1 32 ssa_880 = flt32 ssa_879, ssa_13 /* succs: block_68 block_69 */ if ssa_880 { block block_68: /* preds: block_67 */ vec1 32 ssa_881 = fmul ssa_879, ssa_403 vec1 32 ssa_882 = flrp ssa_14, ssa_15, ssa_881 vec4 32 ssa_883 = tex ssa_882 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_884 = fadd ssa_867, ssa_883.x vec1 32 ssa_885 = ishl ssa_877, ssa_408 vec1 32 ssa_886 = intrinsic load_shared (ssa_885) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_887 = ffma ssa_883.x, ssa_886, ssa_870 vec1 32 ssa_888 = iadd ssa_412, ssa_885 vec1 32 ssa_889 = intrinsic load_shared (ssa_888) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_890 = ffma ssa_883.x, ssa_889, ssa_873 vec1 32 ssa_891 = iadd ssa_416, ssa_885 vec1 32 ssa_892 = intrinsic load_shared (ssa_891) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_893 = ffma ssa_883.x, ssa_892, ssa_876 /* succs: block_70 */ } else { block block_69: /* preds: block_67 */ /* succs: block_70 */ } block block_70: /* preds: block_68 block_69 */ vec1 32 ssa_894 = phi block_68: ssa_887, block_69: ssa_870 vec1 32 ssa_895 = phi block_68: ssa_890, block_69: ssa_873 vec1 32 ssa_896 = phi block_68: ssa_893, block_69: ssa_876 vec1 32 ssa_897 = phi block_68: ssa_884, block_69: ssa_867 vec1 32 ssa_898 = iadd ssa_163, ssa_53 vec1 32 ssa_899 = fadd ssa_25, -ssa_126 vec1 32 ssa_900 = fmul ssa_899, ssa_899 vec1 32 ssa_901 = ffma ssa_218, ssa_218, ssa_900 vec1 32 ssa_902 = fsqrt ssa_901 vec1 32 ssa_903 = flt32 ssa_902, ssa_13 /* succs: block_71 block_72 */ if ssa_903 { block block_71: /* preds: block_70 */ vec1 32 ssa_904 = fmul ssa_902, ssa_403 vec1 32 ssa_905 = flrp ssa_14, ssa_15, ssa_904 vec4 32 ssa_906 = tex ssa_905 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_907 = fadd ssa_897, ssa_906.x vec1 32 ssa_908 = ishl ssa_898, ssa_408 vec1 32 ssa_909 = intrinsic load_shared (ssa_908) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_910 = ffma ssa_906.x, ssa_909, ssa_894 vec1 32 ssa_911 = iadd ssa_412, ssa_908 vec1 32 ssa_912 = intrinsic load_shared (ssa_911) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_913 = ffma ssa_906.x, ssa_912, ssa_895 vec1 32 ssa_914 = iadd ssa_416, ssa_908 vec1 32 ssa_915 = intrinsic load_shared (ssa_914) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_916 = ffma ssa_906.x, ssa_915, ssa_896 /* succs: block_73 */ } else { block block_72: /* preds: block_70 */ /* succs: block_73 */ } block block_73: /* preds: block_71 block_72 */ vec1 32 ssa_917 = phi block_71: ssa_910, block_72: ssa_894 vec1 32 ssa_918 = phi block_71: ssa_913, block_72: ssa_895 vec1 32 ssa_919 = phi block_71: ssa_916, block_72: ssa_896 vec1 32 ssa_920 = phi block_71: ssa_907, block_72: ssa_897 vec1 32 ssa_921 = iadd ssa_163, ssa_54 vec1 32 ssa_922 = ffma ssa_246, ssa_246, ssa_900 vec1 32 ssa_923 = fsqrt ssa_922 vec1 32 ssa_924 = flt32 ssa_923, ssa_13 /* succs: block_74 block_75 */ if ssa_924 { block block_74: /* preds: block_73 */ vec1 32 ssa_925 = fmul ssa_923, ssa_403 vec1 32 ssa_926 = flrp ssa_14, ssa_15, ssa_925 vec4 32 ssa_927 = tex ssa_926 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_928 = fadd ssa_920, ssa_927.x vec1 32 ssa_929 = ishl ssa_921, ssa_408 vec1 32 ssa_930 = intrinsic load_shared (ssa_929) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_931 = ffma ssa_927.x, ssa_930, ssa_917 vec1 32 ssa_932 = iadd ssa_412, ssa_929 vec1 32 ssa_933 = intrinsic load_shared (ssa_932) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_934 = ffma ssa_927.x, ssa_933, ssa_918 vec1 32 ssa_935 = iadd ssa_416, ssa_929 vec1 32 ssa_936 = intrinsic load_shared (ssa_935) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_937 = ffma ssa_927.x, ssa_936, ssa_919 /* succs: block_76 */ } else { block block_75: /* preds: block_73 */ /* succs: block_76 */ } block block_76: /* preds: block_74 block_75 */ vec1 32 ssa_938 = phi block_74: ssa_931, block_75: ssa_917 vec1 32 ssa_939 = phi block_74: ssa_934, block_75: ssa_918 vec1 32 ssa_940 = phi block_74: ssa_937, block_75: ssa_919 vec1 32 ssa_941 = phi block_74: ssa_928, block_75: ssa_920 vec1 32 ssa_942 = iadd ssa_163, ssa_55 vec1 32 ssa_943 = ffma ssa_125, ssa_125, ssa_900 vec1 32 ssa_944 = fsqrt ssa_943 vec1 32 ssa_945 = flt32 ssa_944, ssa_13 /* succs: block_77 block_78 */ if ssa_945 { block block_77: /* preds: block_76 */ vec1 32 ssa_946 = fmul ssa_944, ssa_403 vec1 32 ssa_947 = flrp ssa_14, ssa_15, ssa_946 vec4 32 ssa_948 = tex ssa_947 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_949 = fadd ssa_941, ssa_948.x vec1 32 ssa_950 = ishl ssa_942, ssa_408 vec1 32 ssa_951 = intrinsic load_shared (ssa_950) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_952 = ffma ssa_948.x, ssa_951, ssa_938 vec1 32 ssa_953 = iadd ssa_412, ssa_950 vec1 32 ssa_954 = intrinsic load_shared (ssa_953) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_955 = ffma ssa_948.x, ssa_954, ssa_939 vec1 32 ssa_956 = iadd ssa_416, ssa_950 vec1 32 ssa_957 = intrinsic load_shared (ssa_956) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_958 = ffma ssa_948.x, ssa_957, ssa_940 /* succs: block_79 */ } else { block block_78: /* preds: block_76 */ /* succs: block_79 */ } block block_79: /* preds: block_77 block_78 */ vec1 32 ssa_959 = phi block_77: ssa_952, block_78: ssa_938 vec1 32 ssa_960 = phi block_77: ssa_955, block_78: ssa_939 vec1 32 ssa_961 = phi block_77: ssa_958, block_78: ssa_940 vec1 32 ssa_962 = phi block_77: ssa_949, block_78: ssa_941 vec1 32 ssa_963 = iadd ssa_163, ssa_56 vec1 32 ssa_964 = ffma ssa_192, ssa_192, ssa_900 vec1 32 ssa_965 = fsqrt ssa_964 vec1 32 ssa_966 = flt32 ssa_965, ssa_13 /* succs: block_80 block_81 */ if ssa_966 { block block_80: /* preds: block_79 */ vec1 32 ssa_967 = fmul ssa_965, ssa_403 vec1 32 ssa_968 = flrp ssa_14, ssa_15, ssa_967 vec4 32 ssa_969 = tex ssa_968 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_970 = fadd ssa_962, ssa_969.x vec1 32 ssa_971 = ishl ssa_963, ssa_408 vec1 32 ssa_972 = intrinsic load_shared (ssa_971) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_973 = ffma ssa_969.x, ssa_972, ssa_959 vec1 32 ssa_974 = iadd ssa_412, ssa_971 vec1 32 ssa_975 = intrinsic load_shared (ssa_974) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_976 = ffma ssa_969.x, ssa_975, ssa_960 vec1 32 ssa_977 = iadd ssa_416, ssa_971 vec1 32 ssa_978 = intrinsic load_shared (ssa_977) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_979 = ffma ssa_969.x, ssa_978, ssa_961 /* succs: block_82 */ } else { block block_81: /* preds: block_79 */ /* succs: block_82 */ } block block_82: /* preds: block_80 block_81 */ vec1 32 ssa_980 = phi block_80: ssa_973, block_81: ssa_959 vec1 32 ssa_981 = phi block_80: ssa_976, block_81: ssa_960 vec1 32 ssa_982 = phi block_80: ssa_979, block_81: ssa_961 vec1 32 ssa_983 = phi block_80: ssa_970, block_81: ssa_962 vec1 32 ssa_984 = iadd ssa_163, ssa_57 vec1 32 ssa_985 = ffma ssa_322, ssa_322, ssa_900 vec1 32 ssa_986 = fsqrt ssa_985 vec1 32 ssa_987 = flt32 ssa_986, ssa_13 /* succs: block_83 block_84 */ if ssa_987 { block block_83: /* preds: block_82 */ vec1 32 ssa_988 = fmul ssa_986, ssa_403 vec1 32 ssa_989 = flrp ssa_14, ssa_15, ssa_988 vec4 32 ssa_990 = tex ssa_989 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_991 = fadd ssa_983, ssa_990.x vec1 32 ssa_992 = ishl ssa_984, ssa_408 vec1 32 ssa_993 = intrinsic load_shared (ssa_992) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_994 = ffma ssa_990.x, ssa_993, ssa_980 vec1 32 ssa_995 = iadd ssa_412, ssa_992 vec1 32 ssa_996 = intrinsic load_shared (ssa_995) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_997 = ffma ssa_990.x, ssa_996, ssa_981 vec1 32 ssa_998 = iadd ssa_416, ssa_992 vec1 32 ssa_999 = intrinsic load_shared (ssa_998) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1000 = ffma ssa_990.x, ssa_999, ssa_982 /* succs: block_85 */ } else { block block_84: /* preds: block_82 */ /* succs: block_85 */ } block block_85: /* preds: block_83 block_84 */ vec1 32 ssa_1001 = phi block_83: ssa_994, block_84: ssa_980 vec1 32 ssa_1002 = phi block_83: ssa_997, block_84: ssa_981 vec1 32 ssa_1003 = phi block_83: ssa_1000, block_84: ssa_982 vec1 32 ssa_1004 = phi block_83: ssa_991, block_84: ssa_983 vec1 32 ssa_1005 = iadd ssa_163, ssa_58 vec1 32 ssa_1006 = ffma ssa_348, ssa_348, ssa_900 vec1 32 ssa_1007 = fsqrt ssa_1006 vec1 32 ssa_1008 = flt32 ssa_1007, ssa_13 /* succs: block_86 block_87 */ if ssa_1008 { block block_86: /* preds: block_85 */ vec1 32 ssa_1009 = fmul ssa_1007, ssa_403 vec1 32 ssa_1010 = flrp ssa_14, ssa_15, ssa_1009 vec4 32 ssa_1011 = tex ssa_1010 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_1012 = fadd ssa_1004, ssa_1011.x vec1 32 ssa_1013 = ishl ssa_1005, ssa_408 vec1 32 ssa_1014 = intrinsic load_shared (ssa_1013) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1015 = ffma ssa_1011.x, ssa_1014, ssa_1001 vec1 32 ssa_1016 = iadd ssa_412, ssa_1013 vec1 32 ssa_1017 = intrinsic load_shared (ssa_1016) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1018 = ffma ssa_1011.x, ssa_1017, ssa_1002 vec1 32 ssa_1019 = iadd ssa_416, ssa_1013 vec1 32 ssa_1020 = intrinsic load_shared (ssa_1019) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1021 = ffma ssa_1011.x, ssa_1020, ssa_1003 /* succs: block_88 */ } else { block block_87: /* preds: block_85 */ /* succs: block_88 */ } block block_88: /* preds: block_86 block_87 */ vec1 32 ssa_1022 = phi block_86: ssa_1015, block_87: ssa_1001 vec1 32 ssa_1023 = phi block_86: ssa_1018, block_87: ssa_1002 vec1 32 ssa_1024 = phi block_86: ssa_1021, block_87: ssa_1003 vec1 32 ssa_1025 = phi block_86: ssa_1012, block_87: ssa_1004 vec1 32 ssa_1026 = iadd ssa_163, ssa_59 vec1 32 ssa_1027 = fadd ssa_38, -ssa_126 vec1 32 ssa_1028 = fmul ssa_1027, ssa_1027 vec1 32 ssa_1029 = ffma ssa_125, ssa_125, ssa_1028 vec1 32 ssa_1030 = fsqrt ssa_1029 vec1 32 ssa_1031 = flt32 ssa_1030, ssa_13 /* succs: block_89 block_90 */ if ssa_1031 { block block_89: /* preds: block_88 */ vec1 32 ssa_1032 = fmul ssa_1030, ssa_403 vec1 32 ssa_1033 = flrp ssa_14, ssa_15, ssa_1032 vec4 32 ssa_1034 = tex ssa_1033 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_1035 = fadd ssa_1025, ssa_1034.x vec1 32 ssa_1036 = ishl ssa_1026, ssa_408 vec1 32 ssa_1037 = intrinsic load_shared (ssa_1036) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1038 = ffma ssa_1034.x, ssa_1037, ssa_1022 vec1 32 ssa_1039 = iadd ssa_412, ssa_1036 vec1 32 ssa_1040 = intrinsic load_shared (ssa_1039) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1041 = ffma ssa_1034.x, ssa_1040, ssa_1023 vec1 32 ssa_1042 = iadd ssa_416, ssa_1036 vec1 32 ssa_1043 = intrinsic load_shared (ssa_1042) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1044 = ffma ssa_1034.x, ssa_1043, ssa_1024 /* succs: block_91 */ } else { block block_90: /* preds: block_88 */ /* succs: block_91 */ } block block_91: /* preds: block_89 block_90 */ vec1 32 ssa_1045 = phi block_89: ssa_1038, block_90: ssa_1022 vec1 32 ssa_1046 = phi block_89: ssa_1041, block_90: ssa_1023 vec1 32 ssa_1047 = phi block_89: ssa_1044, block_90: ssa_1024 vec1 32 ssa_1048 = phi block_89: ssa_1035, block_90: ssa_1025 vec1 32 ssa_1049 = iadd ssa_163, ssa_60 vec1 32 ssa_1050 = ffma ssa_192, ssa_192, ssa_1028 vec1 32 ssa_1051 = fsqrt ssa_1050 vec1 32 ssa_1052 = flt32 ssa_1051, ssa_13 /* succs: block_92 block_93 */ if ssa_1052 { block block_92: /* preds: block_91 */ vec1 32 ssa_1053 = fmul ssa_1051, ssa_403 vec1 32 ssa_1054 = flrp ssa_14, ssa_15, ssa_1053 vec4 32 ssa_1055 = tex ssa_1054 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_1056 = fadd ssa_1048, ssa_1055.x vec1 32 ssa_1057 = ishl ssa_1049, ssa_408 vec1 32 ssa_1058 = intrinsic load_shared (ssa_1057) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1059 = ffma ssa_1055.x, ssa_1058, ssa_1045 vec1 32 ssa_1060 = iadd ssa_412, ssa_1057 vec1 32 ssa_1061 = intrinsic load_shared (ssa_1060) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1062 = ffma ssa_1055.x, ssa_1061, ssa_1046 vec1 32 ssa_1063 = iadd ssa_416, ssa_1057 vec1 32 ssa_1064 = intrinsic load_shared (ssa_1063) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1065 = ffma ssa_1055.x, ssa_1064, ssa_1047 /* succs: block_94 */ } else { block block_93: /* preds: block_91 */ /* succs: block_94 */ } block block_94: /* preds: block_92 block_93 */ vec1 32 ssa_1066 = phi block_92: ssa_1059, block_93: ssa_1045 vec1 32 ssa_1067 = phi block_92: ssa_1062, block_93: ssa_1046 vec1 32 ssa_1068 = phi block_92: ssa_1065, block_93: ssa_1047 vec1 32 ssa_1069 = phi block_92: ssa_1056, block_93: ssa_1048 vec1 32 ssa_1070 = frcp ssa_1069 vec1 32 ssa_1071 = fmul.sat ssa_1066, ssa_1070 vec1 32 ssa_1072 = fmul.sat ssa_1067, ssa_1070 vec1 32 ssa_1073 = fmul.sat ssa_1068, ssa_1070 vec1 32 ssa_1074 = fadd ssa_61, -ssa_1071 vec1 32 ssa_1075 = fadd ssa_61, -ssa_1072 vec1 32 ssa_1076 = fadd ssa_61, -ssa_1073 vec1 32 ssa_1077 = load_const (0x41160a50 /* 9.377518 */) vec1 32 ssa_1078 = fmul ssa_1077, ssa_1074 vec1 32 ssa_1079 = fmul ssa_1077, ssa_1075 vec1 32 ssa_1080 = fmul ssa_1077, ssa_1076 vec1 32 ssa_1081 = fexp2 ssa_1078 vec1 32 ssa_1082 = fexp2 ssa_1079 vec1 32 ssa_1083 = fexp2 ssa_1080 vec1 32 ssa_1084 = fadd ssa_16, ssa_1081 vec1 32 ssa_1085 = fadd ssa_16, ssa_1082 vec1 32 ssa_1086 = fadd ssa_16, ssa_1083 vec1 32 ssa_1087 = frcp ssa_1084 vec1 32 ssa_1088 = frcp ssa_1085 vec1 32 ssa_1089 = frcp ssa_1086 vec1 32 ssa_1090 = fadd ssa_1087, ssa_62 vec1 32 ssa_1091 = fadd ssa_1088, ssa_62 vec1 32 ssa_1092 = fadd ssa_1089, ssa_62 vec1 32 ssa_1093 = load_const (0x3f9a9b5f /* 1.207867 */) vec1 32 ssa_1094 = fmul.sat ssa_1090, ssa_1093 vec1 32 ssa_1095 = fmul.sat ssa_1091, ssa_1093 vec1 32 ssa_1096 = fmul.sat ssa_1092, ssa_1093 vec1 32 ssa_1097 = fpow ssa_1094, ssa_63 vec1 32 ssa_1098 = fpow ssa_1095, ssa_63 vec1 32 ssa_1099 = fpow ssa_1096, ssa_63 vec1 32 ssa_1100 = undefined vec4 32 ssa_1101 = vec4 ssa_86, ssa_87, ssa_1100, ssa_1100 vec4 32 ssa_1102 = vec4 ssa_1097, ssa_1098, ssa_1099, ssa_16 intrinsic image_store (ssa_0, ssa_1101, ssa_1100, ssa_1102) (1, 0, 34842, 8) /* image_dim=2D */ /* image_dim=true */ /* format=34842 */ /* access=8 */ /* succs: block_95 */ block block_95: } NIR (final form) for compute shader: shader: MESA_SHADER_COMPUTE name: GLSL19 local-size: 32, 8, 1 shared-size: 5040 inputs: 0 outputs: 0 uniforms: 80 shared: 0 decl_var ubo INTERP_MODE_NONE vec2 tex_scale0 (7, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 out_scale (6, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 pixel_size0 (5, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 texture_off0 (4, 0, 0) decl_var ubo INTERP_MODE_NONE mat2 texture_rot0 (3, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 texture_size0 (2, 0, 0) decl_var ubo INTERP_MODE_NONE vec3 dst_luma (1, 0, 0) decl_var ubo INTERP_MODE_NONE vec3 src_luma (0, 0, 0) decl_var uniform INTERP_MODE_NONE sampler1D lut (8, 0, 0) decl_var uniform INTERP_MODE_NONE writeonly GL_RGBA16F image2D out_image (9, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D texture0 (10, 0, 1) decl_function main (0 params) impl main { decl_reg vec1 32 r0 decl_reg vec1 32 r1 decl_reg vec1 32 r2 decl_reg vec1 32 r3 decl_reg vec1 32 r4 decl_reg vec1 32 r5 decl_reg vec1 32 r6 decl_reg vec1 32 r7 decl_reg vec1 32 r8 decl_reg vec1 32 r9 decl_reg vec1 32 r10 decl_reg vec1 32 r11 decl_reg vec1 32 r12 decl_reg vec1 32 r13 decl_reg vec1 32 r14 decl_reg vec1 32 r15 decl_reg vec1 32 r16 decl_reg vec1 32 r17 decl_reg vec1 32 r18 decl_reg vec1 32 r19 decl_reg vec1 32 r20 decl_reg vec1 32 r21 decl_reg vec1 32 r22 block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x00000040 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000060 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_4 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_5 = load_const (0xbf000000 /* -0.500000 */) vec1 32 ssa_6 = load_const (0x0000000e /* 0.000000 */) vec1 32 ssa_7 = load_const (0x0000001e /* 0.000000 */) vec1 32 ssa_8 = load_const (0xfffffffd /* -nan */) vec1 32 ssa_9 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_10 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_11 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_12 = load_const (0xc0400000 /* -3.000000 */) vec1 32 ssa_13 = load_const (0x404217e3 /* 3.032708 */) vec1 32 ssa_14 = load_const (0x3c000000 /* 0.007812 */) vec1 32 ssa_15 = load_const (0x3f7e0000 /* 0.992188 */) vec1 32 ssa_16 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_17 = load_const (0x0000001f /* 0.000000 */) vec1 32 ssa_18 = load_const (0xc0000000 /* -2.000000 */) vec1 32 ssa_19 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_20 = load_const (0x00000021 /* 0.000000 */) vec1 32 ssa_21 = load_const (0x00000022 /* 0.000000 */) vec1 32 ssa_22 = load_const (0x00000023 /* 0.000000 */) vec1 32 ssa_23 = load_const (0x40000000 /* 2.000000 */) vec1 32 ssa_24 = load_const (0x00000024 /* 0.000000 */) vec1 32 ssa_25 = load_const (0x40400000 /* 3.000000 */) vec1 32 ssa_26 = load_const (0x0000003d /* 0.000000 */) vec1 32 ssa_27 = load_const (0x0000003e /* 0.000000 */) vec1 32 ssa_28 = load_const (0x0000003f /* 0.000000 */) vec1 32 ssa_29 = load_const (0x00000041 /* 0.000000 */) vec1 32 ssa_30 = load_const (0x00000042 /* 0.000000 */) vec1 32 ssa_31 = load_const (0x0000005a /* 0.000000 */) vec1 32 ssa_32 = load_const (0x0000005b /* 0.000000 */) vec1 32 ssa_33 = load_const (0x0000005c /* 0.000000 */) vec1 32 ssa_34 = load_const (0x0000005d /* 0.000000 */) vec1 32 ssa_35 = load_const (0x0000005e /* 0.000000 */) vec1 32 ssa_36 = load_const (0x0000005f /* 0.000000 */) vec1 32 ssa_37 = load_const (0x00000061 /* 0.000000 */) vec1 32 ssa_38 = load_const (0x40800000 /* 4.000000 */) vec1 32 ssa_39 = load_const (0x00000078 /* 0.000000 */) vec1 32 ssa_40 = load_const (0x00000079 /* 0.000000 */) vec1 32 ssa_41 = load_const (0x0000007a /* 0.000000 */) vec1 32 ssa_42 = load_const (0x0000007b /* 0.000000 */) vec1 32 ssa_43 = load_const (0x0000007c /* 0.000000 */) vec1 32 ssa_44 = load_const (0x0000007d /* 0.000000 */) vec1 32 ssa_45 = load_const (0x0000007e /* 0.000000 */) vec1 32 ssa_46 = load_const (0x0000007f /* 0.000000 */) vec1 32 ssa_47 = load_const (0x00000097 /* 0.000000 */) vec1 32 ssa_48 = load_const (0x00000098 /* 0.000000 */) vec1 32 ssa_49 = load_const (0x00000099 /* 0.000000 */) vec1 32 ssa_50 = load_const (0x0000009a /* 0.000000 */) vec1 32 ssa_51 = load_const (0x0000009b /* 0.000000 */) vec1 32 ssa_52 = load_const (0x0000009c /* 0.000000 */) vec1 32 ssa_53 = load_const (0x000000b5 /* 0.000000 */) vec1 32 ssa_54 = load_const (0x000000b6 /* 0.000000 */) vec1 32 ssa_55 = load_const (0x000000b7 /* 0.000000 */) vec1 32 ssa_56 = load_const (0x000000b8 /* 0.000000 */) vec1 32 ssa_57 = load_const (0x000000b9 /* 0.000000 */) vec1 32 ssa_58 = load_const (0x000000ba /* 0.000000 */) vec1 32 ssa_59 = load_const (0x000000d5 /* 0.000000 */) vec1 32 ssa_60 = load_const (0x000000d6 /* 0.000000 */) vec1 32 ssa_61 = load_const (0x3f400000 /* 0.750000 */) vec1 32 ssa_62 = load_const (0xbbf8487c /* -0.007577 */) vec1 32 ssa_63 = load_const (0x3ed55555 /* 0.416667 */) vec1 32 ssa_64 = load_const (0x00000030 /* 0.000000 */) vec2 32 ssa_65 = intrinsic load_ubo (ssa_0, ssa_64) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec2 32 ssa_66 = intrinsic load_ubo (ssa_0, ssa_1) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_67 = load_const (0x00000068 /* 0.000000 */) vec2 32 ssa_68 = intrinsic load_ubo (ssa_0, ssa_67) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec2 32 ssa_69 = intrinsic load_ubo (ssa_0, ssa_2) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_70 = load_const (0x00000058 /* 0.000000 */) vec2 32 ssa_71 = intrinsic load_ubo (ssa_0, ssa_70) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_72 = load_const (0x00000050 /* 0.000000 */) vec2 32 ssa_73 = intrinsic load_ubo (ssa_0, ssa_72) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec3 32 ssa_74 = intrinsic load_work_group_id () () vec1 32 ssa_75 = intrinsic load_subgroup_id () () vec1 32 ssa_76 = ishl ssa_75, ssa_11 vec1 32 ssa_77 = intrinsic load_subgroup_invocation () () vec1 32 ssa_78 = iadd ssa_77, ssa_76 r0 = iand ssa_78, ssa_17 vec1 32 ssa_80 = load_const (0x00000005 /* 0.000000 */) vec1 32 ssa_81 = ushr ssa_78, ssa_80 vec1 32 ssa_82 = load_const (0x00000007 /* 0.000000 */) r1 = iand ssa_81, ssa_82 vec1 32 ssa_84 = ishl ssa_74.x, ssa_80 vec1 32 ssa_85 = ishl ssa_74.y, ssa_11 vec1 32 ssa_86 = iadd ssa_84, r0 vec1 32 ssa_87 = iadd ssa_85, r1 vec1 32 ssa_88 = u2f32 ssa_86 vec1 32 ssa_89 = u2f32 ssa_87 vec1 32 ssa_90 = fadd ssa_88, ssa_3 vec1 32 ssa_91 = fadd ssa_89, ssa_3 vec1 32 ssa_92 = fmul ssa_69.x, ssa_90 vec1 32 ssa_93 = fmul ssa_69.y, ssa_91 vec1 32 ssa_94 = fmul ssa_68.x, ssa_92 vec1 32 ssa_95 = fmul ssa_68.y, ssa_93 vec1 32 ssa_96 = fmul ssa_66.x, ssa_95 vec1 32 ssa_97 = fmul ssa_66.y, ssa_95 vec1 32 ssa_98 = ffma ssa_65.x, ssa_94, ssa_96 vec1 32 ssa_99 = ffma ssa_65.y, ssa_94, ssa_97 vec1 32 ssa_100 = ffma ssa_71.x, ssa_73.x, ssa_98 vec1 32 ssa_101 = ffma ssa_71.y, ssa_73.y, ssa_99 vec1 32 ssa_102 = u2f32 ssa_84 vec1 32 ssa_103 = u2f32 ssa_85 vec1 32 ssa_104 = fadd ssa_102, ssa_3 vec1 32 ssa_105 = fadd ssa_103, ssa_3 vec1 32 ssa_106 = fmul ssa_69.x, ssa_104 vec1 32 ssa_107 = fmul ssa_69.y, ssa_105 vec1 32 ssa_108 = fmul ssa_68.x, ssa_106 vec1 32 ssa_109 = fmul ssa_68.y, ssa_107 vec1 32 ssa_110 = fmul ssa_66.x, ssa_109 vec1 32 ssa_111 = fmul ssa_66.y, ssa_109 vec1 32 ssa_112 = ffma ssa_65.x, ssa_108, ssa_110 vec1 32 ssa_113 = ffma ssa_65.y, ssa_108, ssa_111 vec1 32 ssa_114 = ffma ssa_71.x, ssa_73.x, ssa_112 vec1 32 ssa_115 = ffma ssa_71.y, ssa_73.y, ssa_113 vec2 32 ssa_116 = intrinsic load_ubo (ssa_0, ssa_4) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_117 = ffma ssa_114, ssa_116.x, ssa_5 vec1 32 ssa_118 = ffma ssa_115, ssa_116.y, ssa_5 vec1 32 ssa_119 = ffract ssa_117 vec1 32 ssa_120 = ffract ssa_118 vec1 32 ssa_121 = ffma -ssa_71.x, ssa_119, ssa_114 vec1 32 ssa_122 = ffma -ssa_71.y, ssa_120, ssa_115 vec1 32 ssa_123 = ffma ssa_100, ssa_116.x, ssa_5 vec1 32 ssa_124 = ffma ssa_101, ssa_116.y, ssa_5 vec1 32 ssa_125 = ffract ssa_123 vec1 32 ssa_126 = ffract ssa_124 vec1 32 ssa_127 = ffma -ssa_71.x, ssa_125, ssa_100 vec1 32 ssa_128 = ffma -ssa_71.y, ssa_126, ssa_101 vec1 32 ssa_129 = fadd ssa_127, -ssa_121 vec1 32 ssa_130 = fadd ssa_128, -ssa_122 vec1 32 ssa_131 = fmul ssa_129, ssa_116.x vec1 32 ssa_132 = fmul ssa_130, ssa_116.y vec1 32 ssa_133 = fround_even ssa_131 vec1 32 ssa_134 = fround_even ssa_132 vec1 32 ssa_135 = f2i32 ssa_133 vec1 32 ssa_136 = f2i32 ssa_134 /* succs: block_1 */ loop { block block_1: /* preds: block_0 block_9 */ vec1 32 ssa_138 = ige32 r1, ssa_6 /* succs: block_2 block_3 */ if ssa_138 { block block_2: /* preds: block_1 */ break /* succs: block_10 */ } else { block block_3: /* preds: block_1 */ /* succs: block_4 */ } block block_4: /* preds: block_3 */ r2 = imov r0 /* succs: block_5 */ loop { block block_5: /* preds: block_4 block_8 */ vec1 32 ssa_140 = ige32 r2, ssa_7 /* succs: block_6 block_7 */ if ssa_140 { block block_6: /* preds: block_5 */ break /* succs: block_9 */ } else { block block_7: /* preds: block_5 */ /* succs: block_8 */ } block block_8: /* preds: block_7 */ vec1 32 ssa_141 = iadd r2, ssa_8 vec1 32 ssa_142 = i2f32 ssa_141 vec1 32 ssa_143 = iadd r1, ssa_8 vec1 32 ssa_144 = i2f32 ssa_143 vec1 32 ssa_145 = ffma ssa_71.x, ssa_142, ssa_121 vec1 32 ssa_146 = ffma ssa_71.y, ssa_144, ssa_122 vec2 32 ssa_147 = vec2 ssa_145, ssa_146 vec4 32 ssa_148 = tex ssa_147 (coord), ssa_0 (lod), 1 (texture), 1 (sampler), vec1 32 ssa_149 = imul ssa_7, r1 vec1 32 ssa_150 = iadd ssa_149, r2 vec1 32 ssa_151 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_152 = ishl ssa_150, ssa_151 vec1 32 ssa_153 = imov ssa_148.x intrinsic store_shared (ssa_153, ssa_152) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_154 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_155 = iadd ssa_154, ssa_152 vec1 32 ssa_156 = imov ssa_148.y intrinsic store_shared (ssa_156, ssa_155) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_157 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_158 = iadd ssa_157, ssa_152 vec1 32 ssa_159 = imov ssa_148.z intrinsic store_shared (ssa_159, ssa_158) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ r2 = iadd r2, ssa_4 /* succs: block_5 */ } block block_9: /* preds: block_6 */ r1 = iadd r1, ssa_10 /* succs: block_1 */ } block block_10: /* preds: block_2 */ intrinsic group_memory_barrier () () intrinsic barrier () () vec1 32 ssa_162 = imul ssa_7, ssa_136 vec1 32 ssa_163 = iadd ssa_162, ssa_135 vec1 32 ssa_164 = iadd ssa_163, ssa_11 vec1 32 ssa_165 = fadd ssa_12, -ssa_126 vec1 32 ssa_166 = fmul ssa_165, ssa_165 vec1 32 ssa_167 = ffma ssa_125, ssa_125, ssa_166 vec1 32 ssa_168 = fsqrt ssa_167 vec1 32 ssa_169 = flt32 ssa_168, ssa_13 /* succs: block_11 block_12 */ if ssa_169 { block block_11: /* preds: block_10 */ vec1 32 ssa_170 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_171 = fmul ssa_168, ssa_170 vec1 32 ssa_172 = flrp ssa_14, ssa_15, ssa_171 vec4 32 ssa_173 = tex ssa_172 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = imov ssa_173.x vec1 32 ssa_175 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_176 = ishl ssa_164, ssa_175 vec1 32 ssa_177 = intrinsic load_shared (ssa_176) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = fmul ssa_173.x, ssa_177 vec1 32 ssa_179 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_180 = iadd ssa_179, ssa_176 vec1 32 ssa_181 = intrinsic load_shared (ssa_180) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = fmul ssa_173.x, ssa_181 vec1 32 ssa_183 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_184 = iadd ssa_183, ssa_176 vec1 32 ssa_185 = intrinsic load_shared (ssa_184) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = fmul ssa_173.x, ssa_185 /* succs: block_13 */ } else { block block_12: /* preds: block_10 */ r3 = imov ssa_0 r6 = imov r3 r5 = imov r6 r4 = imov r5 /* succs: block_13 */ } block block_13: /* preds: block_11 block_12 */ vec1 32 ssa_191 = iadd ssa_163, ssa_9 vec1 32 ssa_192 = fadd ssa_16, -ssa_125 vec1 32 ssa_193 = ffma ssa_192, ssa_192, ssa_166 vec1 32 ssa_194 = fsqrt ssa_193 vec1 32 ssa_195 = flt32 ssa_194, ssa_13 /* succs: block_14 block_15 */ if ssa_195 { block block_14: /* preds: block_13 */ vec1 32 ssa_196 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_197 = fmul ssa_194, ssa_196 vec1 32 ssa_198 = flrp ssa_14, ssa_15, ssa_197 vec4 32 ssa_199 = tex ssa_198 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_199.x vec1 32 ssa_201 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_202 = ishl ssa_191, ssa_201 vec1 32 ssa_203 = intrinsic load_shared (ssa_202) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_199.x, ssa_203, r4 vec1 32 ssa_205 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_206 = iadd ssa_205, ssa_202 vec1 32 ssa_207 = intrinsic load_shared (ssa_206) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_199.x, ssa_207, r5 vec1 32 ssa_209 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_210 = iadd ssa_209, ssa_202 vec1 32 ssa_211 = intrinsic load_shared (ssa_210) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_199.x, ssa_211, r6 /* succs: block_16 */ } else { block block_15: /* preds: block_13 */ /* succs: block_16 */ } block block_16: /* preds: block_14 block_15 */ vec1 32 ssa_217 = iadd ssa_163, ssa_17 vec1 32 ssa_218 = fadd ssa_18, -ssa_125 vec1 32 ssa_219 = fadd ssa_18, -ssa_126 vec1 32 ssa_220 = fmul ssa_219, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = fsqrt ssa_221 vec1 32 ssa_223 = flt32 ssa_222, ssa_13 /* succs: block_17 block_18 */ if ssa_223 { block block_17: /* preds: block_16 */ vec1 32 ssa_224 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_225 = fmul ssa_222, ssa_224 vec1 32 ssa_226 = flrp ssa_14, ssa_15, ssa_225 vec4 32 ssa_227 = tex ssa_226 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_227.x vec1 32 ssa_229 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_230 = ishl ssa_217, ssa_229 vec1 32 ssa_231 = intrinsic load_shared (ssa_230) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_227.x, ssa_231, r4 vec1 32 ssa_233 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_234 = iadd ssa_233, ssa_230 vec1 32 ssa_235 = intrinsic load_shared (ssa_234) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_227.x, ssa_235, r5 vec1 32 ssa_237 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_238 = iadd ssa_237, ssa_230 vec1 32 ssa_239 = intrinsic load_shared (ssa_238) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_227.x, ssa_239, r6 /* succs: block_19 */ } else { block block_18: /* preds: block_16 */ /* succs: block_19 */ } block block_19: /* preds: block_17 block_18 */ vec1 32 ssa_245 = iadd ssa_163, ssa_4 vec1 32 ssa_246 = fadd ssa_19, -ssa_125 vec1 32 ssa_247 = ffma ssa_246, ssa_246, ssa_220 vec1 32 ssa_248 = fsqrt ssa_247 vec1 32 ssa_249 = flt32 ssa_248, ssa_13 /* succs: block_20 block_21 */ if ssa_249 { block block_20: /* preds: block_19 */ vec1 32 ssa_250 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_251 = fmul ssa_248, ssa_250 vec1 32 ssa_252 = flrp ssa_14, ssa_15, ssa_251 vec4 32 ssa_253 = tex ssa_252 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_253.x vec1 32 ssa_255 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_256 = ishl ssa_245, ssa_255 vec1 32 ssa_257 = intrinsic load_shared (ssa_256) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_253.x, ssa_257, r4 vec1 32 ssa_259 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_260 = iadd ssa_259, ssa_256 vec1 32 ssa_261 = intrinsic load_shared (ssa_260) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_253.x, ssa_261, r5 vec1 32 ssa_263 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_264 = iadd ssa_263, ssa_256 vec1 32 ssa_265 = intrinsic load_shared (ssa_264) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_253.x, ssa_265, r6 /* succs: block_22 */ } else { block block_21: /* preds: block_19 */ /* succs: block_22 */ } block block_22: /* preds: block_20 block_21 */ vec1 32 ssa_271 = iadd ssa_163, ssa_20 vec1 32 ssa_272 = ffma ssa_125, ssa_125, ssa_220 vec1 32 ssa_273 = fsqrt ssa_272 vec1 32 ssa_274 = flt32 ssa_273, ssa_13 /* succs: block_23 block_24 */ if ssa_274 { block block_23: /* preds: block_22 */ vec1 32 ssa_275 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_276 = fmul ssa_273, ssa_275 vec1 32 ssa_277 = flrp ssa_14, ssa_15, ssa_276 vec4 32 ssa_278 = tex ssa_277 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_278.x vec1 32 ssa_280 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_281 = ishl ssa_271, ssa_280 vec1 32 ssa_282 = intrinsic load_shared (ssa_281) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_278.x, ssa_282, r4 vec1 32 ssa_284 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_285 = iadd ssa_284, ssa_281 vec1 32 ssa_286 = intrinsic load_shared (ssa_285) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_278.x, ssa_286, r5 vec1 32 ssa_288 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_289 = iadd ssa_288, ssa_281 vec1 32 ssa_290 = intrinsic load_shared (ssa_289) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_278.x, ssa_290, r6 /* succs: block_25 */ } else { block block_24: /* preds: block_22 */ /* succs: block_25 */ } block block_25: /* preds: block_23 block_24 */ vec1 32 ssa_296 = iadd ssa_163, ssa_21 vec1 32 ssa_297 = ffma ssa_192, ssa_192, ssa_220 vec1 32 ssa_298 = fsqrt ssa_297 vec1 32 ssa_299 = flt32 ssa_298, ssa_13 /* succs: block_26 block_27 */ if ssa_299 { block block_26: /* preds: block_25 */ vec1 32 ssa_300 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_301 = fmul ssa_298, ssa_300 vec1 32 ssa_302 = flrp ssa_14, ssa_15, ssa_301 vec4 32 ssa_303 = tex ssa_302 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_303.x vec1 32 ssa_305 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_306 = ishl ssa_296, ssa_305 vec1 32 ssa_307 = intrinsic load_shared (ssa_306) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_303.x, ssa_307, r4 vec1 32 ssa_309 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_310 = iadd ssa_309, ssa_306 vec1 32 ssa_311 = intrinsic load_shared (ssa_310) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_303.x, ssa_311, r5 vec1 32 ssa_313 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_314 = iadd ssa_313, ssa_306 vec1 32 ssa_315 = intrinsic load_shared (ssa_314) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_303.x, ssa_315, r6 /* succs: block_28 */ } else { block block_27: /* preds: block_25 */ /* succs: block_28 */ } block block_28: /* preds: block_26 block_27 */ vec1 32 ssa_321 = iadd ssa_163, ssa_22 vec1 32 ssa_322 = fadd ssa_23, -ssa_125 vec1 32 ssa_323 = ffma ssa_322, ssa_322, ssa_220 vec1 32 ssa_324 = fsqrt ssa_323 vec1 32 ssa_325 = flt32 ssa_324, ssa_13 /* succs: block_29 block_30 */ if ssa_325 { block block_29: /* preds: block_28 */ vec1 32 ssa_326 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_327 = fmul ssa_324, ssa_326 vec1 32 ssa_328 = flrp ssa_14, ssa_15, ssa_327 vec4 32 ssa_329 = tex ssa_328 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_329.x vec1 32 ssa_331 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_332 = ishl ssa_321, ssa_331 vec1 32 ssa_333 = intrinsic load_shared (ssa_332) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_329.x, ssa_333, r4 vec1 32 ssa_335 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_336 = iadd ssa_335, ssa_332 vec1 32 ssa_337 = intrinsic load_shared (ssa_336) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_329.x, ssa_337, r5 vec1 32 ssa_339 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_340 = iadd ssa_339, ssa_332 vec1 32 ssa_341 = intrinsic load_shared (ssa_340) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_329.x, ssa_341, r6 /* succs: block_31 */ } else { block block_30: /* preds: block_28 */ /* succs: block_31 */ } block block_31: /* preds: block_29 block_30 */ vec1 32 ssa_347 = iadd ssa_163, ssa_24 vec1 32 ssa_348 = fadd ssa_25, -ssa_125 vec1 32 ssa_349 = ffma ssa_348, ssa_348, ssa_220 vec1 32 ssa_350 = fsqrt ssa_349 vec1 32 ssa_351 = flt32 ssa_350, ssa_13 /* succs: block_32 block_33 */ if ssa_351 { block block_32: /* preds: block_31 */ vec1 32 ssa_352 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_353 = fmul ssa_350, ssa_352 vec1 32 ssa_354 = flrp ssa_14, ssa_15, ssa_353 vec4 32 ssa_355 = tex ssa_354 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_355.x vec1 32 ssa_357 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_358 = ishl ssa_347, ssa_357 vec1 32 ssa_359 = intrinsic load_shared (ssa_358) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_355.x, ssa_359, r4 vec1 32 ssa_361 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_362 = iadd ssa_361, ssa_358 vec1 32 ssa_363 = intrinsic load_shared (ssa_362) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_355.x, ssa_363, r5 vec1 32 ssa_365 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_366 = iadd ssa_365, ssa_358 vec1 32 ssa_367 = intrinsic load_shared (ssa_366) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_355.x, ssa_367, r6 /* succs: block_34 */ } else { block block_33: /* preds: block_31 */ /* succs: block_34 */ } block block_34: /* preds: block_32 block_33 */ vec1 32 ssa_373 = iadd ssa_163, ssa_26 vec1 32 ssa_374 = fadd ssa_19, -ssa_126 vec1 32 ssa_375 = fmul ssa_374, ssa_374 vec1 32 ssa_376 = ffma ssa_218, ssa_218, ssa_375 vec1 32 ssa_377 = fsqrt ssa_376 vec1 32 ssa_378 = flt32 ssa_377, ssa_13 /* succs: block_35 block_36 */ if ssa_378 { block block_35: /* preds: block_34 */ vec1 32 ssa_379 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_380 = fmul ssa_377, ssa_379 vec1 32 ssa_381 = flrp ssa_14, ssa_15, ssa_380 vec4 32 ssa_382 = tex ssa_381 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_382.x vec1 32 ssa_384 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_385 = ishl ssa_373, ssa_384 vec1 32 ssa_386 = intrinsic load_shared (ssa_385) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_382.x, ssa_386, r4 vec1 32 ssa_388 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_389 = iadd ssa_388, ssa_385 vec1 32 ssa_390 = intrinsic load_shared (ssa_389) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_382.x, ssa_390, r5 vec1 32 ssa_392 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_393 = iadd ssa_392, ssa_385 vec1 32 ssa_394 = intrinsic load_shared (ssa_393) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_382.x, ssa_394, r6 /* succs: block_37 */ } else { block block_36: /* preds: block_34 */ /* succs: block_37 */ } block block_37: /* preds: block_35 block_36 */ vec1 32 ssa_400 = iadd ssa_163, ssa_27 vec1 32 ssa_401 = ffma ssa_246, ssa_246, ssa_375 vec1 32 ssa_402 = fsqrt ssa_401 vec1 32 ssa_403 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_404 = fmul ssa_402, ssa_403 vec1 32 ssa_405 = flrp ssa_14, ssa_15, ssa_404 vec4 32 ssa_406 = tex ssa_405 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_407 = fadd r3, ssa_406.x vec1 32 ssa_408 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_409 = ishl ssa_400, ssa_408 vec1 32 ssa_410 = intrinsic load_shared (ssa_409) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_411 = ffma ssa_406.x, ssa_410, r4 vec1 32 ssa_412 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_413 = iadd ssa_412, ssa_409 vec1 32 ssa_414 = intrinsic load_shared (ssa_413) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_415 = ffma ssa_406.x, ssa_414, r5 vec1 32 ssa_416 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_417 = iadd ssa_416, ssa_409 vec1 32 ssa_418 = intrinsic load_shared (ssa_417) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_419 = ffma ssa_406.x, ssa_418, r6 vec1 32 ssa_420 = iadd ssa_163, ssa_28 vec1 32 ssa_421 = ffma ssa_125, ssa_125, ssa_375 vec1 32 ssa_422 = fsqrt ssa_421 vec1 32 ssa_423 = fmul ssa_422, ssa_403 vec1 32 ssa_424 = flrp ssa_14, ssa_15, ssa_423 vec4 32 ssa_425 = tex ssa_424 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_426 = fadd ssa_407, ssa_425.x vec1 32 ssa_427 = ishl ssa_420, ssa_408 vec1 32 ssa_428 = intrinsic load_shared (ssa_427) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_429 = ffma ssa_425.x, ssa_428, ssa_411 vec1 32 ssa_430 = iadd ssa_412, ssa_427 vec1 32 ssa_431 = intrinsic load_shared (ssa_430) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_432 = ffma ssa_425.x, ssa_431, ssa_415 vec1 32 ssa_433 = iadd ssa_416, ssa_427 vec1 32 ssa_434 = intrinsic load_shared (ssa_433) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_435 = ffma ssa_425.x, ssa_434, ssa_419 vec1 32 ssa_436 = iadd ssa_163, ssa_1 vec1 32 ssa_437 = ffma ssa_192, ssa_192, ssa_375 vec1 32 ssa_438 = fsqrt ssa_437 vec1 32 ssa_439 = fmul ssa_438, ssa_403 vec1 32 ssa_440 = flrp ssa_14, ssa_15, ssa_439 vec4 32 ssa_441 = tex ssa_440 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_442 = fadd ssa_426, ssa_441.x vec1 32 ssa_443 = ishl ssa_436, ssa_408 vec1 32 ssa_444 = intrinsic load_shared (ssa_443) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_445 = ffma ssa_441.x, ssa_444, ssa_429 vec1 32 ssa_446 = iadd ssa_412, ssa_443 vec1 32 ssa_447 = intrinsic load_shared (ssa_446) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_448 = ffma ssa_441.x, ssa_447, ssa_432 vec1 32 ssa_449 = iadd ssa_416, ssa_443 vec1 32 ssa_450 = intrinsic load_shared (ssa_449) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_451 = ffma ssa_441.x, ssa_450, ssa_435 vec1 32 ssa_452 = iadd ssa_163, ssa_29 vec1 32 ssa_453 = ffma ssa_322, ssa_322, ssa_375 vec1 32 ssa_454 = fsqrt ssa_453 vec1 32 ssa_455 = fmul ssa_454, ssa_403 vec1 32 ssa_456 = flrp ssa_14, ssa_15, ssa_455 vec4 32 ssa_457 = tex ssa_456 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r7 = fadd ssa_442, ssa_457.x vec1 32 ssa_459 = ishl ssa_452, ssa_408 vec1 32 ssa_460 = intrinsic load_shared (ssa_459) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r8 = ffma ssa_457.x, ssa_460, ssa_445 vec1 32 ssa_462 = iadd ssa_412, ssa_459 vec1 32 ssa_463 = intrinsic load_shared (ssa_462) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r9 = ffma ssa_457.x, ssa_463, ssa_448 vec1 32 ssa_465 = iadd ssa_416, ssa_459 vec1 32 ssa_466 = intrinsic load_shared (ssa_465) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r10 = ffma ssa_457.x, ssa_466, ssa_451 vec1 32 ssa_468 = iadd ssa_163, ssa_30 vec1 32 ssa_469 = ffma ssa_348, ssa_348, ssa_375 vec1 32 ssa_470 = fsqrt ssa_469 vec1 32 ssa_471 = flt32 ssa_470, ssa_13 /* succs: block_38 block_39 */ if ssa_471 { block block_38: /* preds: block_37 */ vec1 32 ssa_472 = fmul ssa_470, ssa_403 vec1 32 ssa_473 = flrp ssa_14, ssa_15, ssa_472 vec4 32 ssa_474 = tex ssa_473 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r7 = fadd r7, ssa_474.x vec1 32 ssa_476 = ishl ssa_468, ssa_408 vec1 32 ssa_477 = intrinsic load_shared (ssa_476) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r8 = ffma ssa_474.x, ssa_477, r8 vec1 32 ssa_479 = iadd ssa_412, ssa_476 vec1 32 ssa_480 = intrinsic load_shared (ssa_479) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r9 = ffma ssa_474.x, ssa_480, r9 vec1 32 ssa_482 = iadd ssa_416, ssa_476 vec1 32 ssa_483 = intrinsic load_shared (ssa_482) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r10 = ffma ssa_474.x, ssa_483, r10 /* succs: block_40 */ } else { block block_39: /* preds: block_37 */ /* succs: block_40 */ } block block_40: /* preds: block_38 block_39 */ vec1 32 ssa_489 = iadd ssa_163, ssa_31 vec1 32 ssa_490 = fadd ssa_12, -ssa_125 vec1 32 ssa_491 = fmul ssa_126, ssa_126 vec1 32 ssa_492 = ffma ssa_490, ssa_490, ssa_491 vec1 32 ssa_493 = fsqrt ssa_492 vec1 32 ssa_494 = flt32 ssa_493, ssa_13 /* succs: block_41 block_42 */ if ssa_494 { block block_41: /* preds: block_40 */ vec1 32 ssa_495 = fmul ssa_493, ssa_403 vec1 32 ssa_496 = flrp ssa_14, ssa_15, ssa_495 vec4 32 ssa_497 = tex ssa_496 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r7 = fadd r7, ssa_497.x vec1 32 ssa_499 = ishl ssa_489, ssa_408 vec1 32 ssa_500 = intrinsic load_shared (ssa_499) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r8 = ffma ssa_497.x, ssa_500, r8 vec1 32 ssa_502 = iadd ssa_412, ssa_499 vec1 32 ssa_503 = intrinsic load_shared (ssa_502) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r9 = ffma ssa_497.x, ssa_503, r9 vec1 32 ssa_505 = iadd ssa_416, ssa_499 vec1 32 ssa_506 = intrinsic load_shared (ssa_505) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r10 = ffma ssa_497.x, ssa_506, r10 /* succs: block_43 */ } else { block block_42: /* preds: block_40 */ /* succs: block_43 */ } block block_43: /* preds: block_41 block_42 */ vec1 32 ssa_512 = iadd ssa_163, ssa_32 vec1 32 ssa_513 = ffma ssa_218, ssa_218, ssa_491 vec1 32 ssa_514 = fsqrt ssa_513 vec1 32 ssa_515 = flt32 ssa_514, ssa_13 /* succs: block_44 block_45 */ if ssa_515 { block block_44: /* preds: block_43 */ vec1 32 ssa_516 = fmul ssa_514, ssa_403 vec1 32 ssa_517 = flrp ssa_14, ssa_15, ssa_516 vec4 32 ssa_518 = tex ssa_517 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r7 = fadd r7, ssa_518.x vec1 32 ssa_520 = ishl ssa_512, ssa_408 vec1 32 ssa_521 = intrinsic load_shared (ssa_520) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r8 = ffma ssa_518.x, ssa_521, r8 vec1 32 ssa_523 = iadd ssa_412, ssa_520 vec1 32 ssa_524 = intrinsic load_shared (ssa_523) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r9 = ffma ssa_518.x, ssa_524, r9 vec1 32 ssa_526 = iadd ssa_416, ssa_520 vec1 32 ssa_527 = intrinsic load_shared (ssa_526) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r10 = ffma ssa_518.x, ssa_527, r10 /* succs: block_46 */ } else { block block_45: /* preds: block_43 */ /* succs: block_46 */ } block block_46: /* preds: block_44 block_45 */ vec1 32 ssa_533 = iadd ssa_163, ssa_33 vec1 32 ssa_534 = ffma ssa_246, ssa_246, ssa_491 vec1 32 ssa_535 = fsqrt ssa_534 vec1 32 ssa_536 = fmul ssa_535, ssa_403 vec1 32 ssa_537 = flrp ssa_14, ssa_15, ssa_536 vec4 32 ssa_538 = tex ssa_537 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_539 = fadd r7, ssa_538.x vec1 32 ssa_540 = ishl ssa_533, ssa_408 vec1 32 ssa_541 = intrinsic load_shared (ssa_540) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_542 = ffma ssa_538.x, ssa_541, r8 vec1 32 ssa_543 = iadd ssa_412, ssa_540 vec1 32 ssa_544 = intrinsic load_shared (ssa_543) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_545 = ffma ssa_538.x, ssa_544, r9 vec1 32 ssa_546 = iadd ssa_416, ssa_540 vec1 32 ssa_547 = intrinsic load_shared (ssa_546) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_548 = ffma ssa_538.x, ssa_547, r10 vec1 32 ssa_549 = iadd ssa_163, ssa_34 vec1 32 ssa_550 = ffma ssa_125, ssa_125, ssa_491 vec1 32 ssa_551 = fsqrt ssa_550 vec1 32 ssa_552 = fmul ssa_551, ssa_403 vec1 32 ssa_553 = flrp ssa_14, ssa_15, ssa_552 vec4 32 ssa_554 = tex ssa_553 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_555 = fadd ssa_539, ssa_554.x vec1 32 ssa_556 = ishl ssa_549, ssa_408 vec1 32 ssa_557 = intrinsic load_shared (ssa_556) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_558 = ffma ssa_554.x, ssa_557, ssa_542 vec1 32 ssa_559 = iadd ssa_412, ssa_556 vec1 32 ssa_560 = intrinsic load_shared (ssa_559) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_561 = ffma ssa_554.x, ssa_560, ssa_545 vec1 32 ssa_562 = iadd ssa_416, ssa_556 vec1 32 ssa_563 = intrinsic load_shared (ssa_562) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_564 = ffma ssa_554.x, ssa_563, ssa_548 vec1 32 ssa_565 = iadd ssa_163, ssa_35 vec1 32 ssa_566 = ffma ssa_192, ssa_192, ssa_491 vec1 32 ssa_567 = fsqrt ssa_566 vec1 32 ssa_568 = fmul ssa_567, ssa_403 vec1 32 ssa_569 = flrp ssa_14, ssa_15, ssa_568 vec4 32 ssa_570 = tex ssa_569 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_571 = fadd ssa_555, ssa_570.x vec1 32 ssa_572 = ishl ssa_565, ssa_408 vec1 32 ssa_573 = intrinsic load_shared (ssa_572) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_574 = ffma ssa_570.x, ssa_573, ssa_558 vec1 32 ssa_575 = iadd ssa_412, ssa_572 vec1 32 ssa_576 = intrinsic load_shared (ssa_575) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_577 = ffma ssa_570.x, ssa_576, ssa_561 vec1 32 ssa_578 = iadd ssa_416, ssa_572 vec1 32 ssa_579 = intrinsic load_shared (ssa_578) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_580 = ffma ssa_570.x, ssa_579, ssa_564 vec1 32 ssa_581 = iadd ssa_163, ssa_36 vec1 32 ssa_582 = ffma ssa_322, ssa_322, ssa_491 vec1 32 ssa_583 = fsqrt ssa_582 vec1 32 ssa_584 = fmul ssa_583, ssa_403 vec1 32 ssa_585 = flrp ssa_14, ssa_15, ssa_584 vec4 32 ssa_586 = tex ssa_585 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd ssa_571, ssa_586.x vec1 32 ssa_588 = ishl ssa_581, ssa_408 vec1 32 ssa_589 = intrinsic load_shared (ssa_588) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_586.x, ssa_589, ssa_574 vec1 32 ssa_591 = iadd ssa_412, ssa_588 vec1 32 ssa_592 = intrinsic load_shared (ssa_591) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_586.x, ssa_592, ssa_577 vec1 32 ssa_594 = iadd ssa_416, ssa_588 vec1 32 ssa_595 = intrinsic load_shared (ssa_594) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_586.x, ssa_595, ssa_580 vec1 32 ssa_597 = iadd ssa_163, ssa_2 vec1 32 ssa_598 = ffma ssa_348, ssa_348, ssa_491 vec1 32 ssa_599 = fsqrt ssa_598 vec1 32 ssa_600 = flt32 ssa_599, ssa_13 /* succs: block_47 block_48 */ if ssa_600 { block block_47: /* preds: block_46 */ vec1 32 ssa_601 = fmul ssa_599, ssa_403 vec1 32 ssa_602 = flrp ssa_14, ssa_15, ssa_601 vec4 32 ssa_603 = tex ssa_602 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd r11, ssa_603.x vec1 32 ssa_605 = ishl ssa_597, ssa_408 vec1 32 ssa_606 = intrinsic load_shared (ssa_605) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_603.x, ssa_606, r12 vec1 32 ssa_608 = iadd ssa_412, ssa_605 vec1 32 ssa_609 = intrinsic load_shared (ssa_608) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_603.x, ssa_609, r13 vec1 32 ssa_611 = iadd ssa_416, ssa_605 vec1 32 ssa_612 = intrinsic load_shared (ssa_611) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_603.x, ssa_612, r14 /* succs: block_49 */ } else { block block_48: /* preds: block_46 */ /* succs: block_49 */ } block block_49: /* preds: block_47 block_48 */ vec1 32 ssa_618 = iadd ssa_163, ssa_37 vec1 32 ssa_619 = fadd ssa_38, -ssa_125 vec1 32 ssa_620 = ffma ssa_619, ssa_619, ssa_491 vec1 32 ssa_621 = fsqrt ssa_620 vec1 32 ssa_622 = flt32 ssa_621, ssa_13 /* succs: block_50 block_51 */ if ssa_622 { block block_50: /* preds: block_49 */ vec1 32 ssa_623 = fmul ssa_621, ssa_403 vec1 32 ssa_624 = flrp ssa_14, ssa_15, ssa_623 vec4 32 ssa_625 = tex ssa_624 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd r11, ssa_625.x vec1 32 ssa_627 = ishl ssa_618, ssa_408 vec1 32 ssa_628 = intrinsic load_shared (ssa_627) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_625.x, ssa_628, r12 vec1 32 ssa_630 = iadd ssa_412, ssa_627 vec1 32 ssa_631 = intrinsic load_shared (ssa_630) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_625.x, ssa_631, r13 vec1 32 ssa_633 = iadd ssa_416, ssa_627 vec1 32 ssa_634 = intrinsic load_shared (ssa_633) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_625.x, ssa_634, r14 /* succs: block_52 */ } else { block block_51: /* preds: block_49 */ /* succs: block_52 */ } block block_52: /* preds: block_50 block_51 */ vec1 32 ssa_640 = iadd ssa_163, ssa_39 vec1 32 ssa_641 = fadd ssa_16, -ssa_126 vec1 32 ssa_642 = fmul ssa_641, ssa_641 vec1 32 ssa_643 = ffma ssa_490, ssa_490, ssa_642 vec1 32 ssa_644 = fsqrt ssa_643 vec1 32 ssa_645 = flt32 ssa_644, ssa_13 /* succs: block_53 block_54 */ if ssa_645 { block block_53: /* preds: block_52 */ vec1 32 ssa_646 = fmul ssa_644, ssa_403 vec1 32 ssa_647 = flrp ssa_14, ssa_15, ssa_646 vec4 32 ssa_648 = tex ssa_647 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd r11, ssa_648.x vec1 32 ssa_650 = ishl ssa_640, ssa_408 vec1 32 ssa_651 = intrinsic load_shared (ssa_650) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_648.x, ssa_651, r12 vec1 32 ssa_653 = iadd ssa_412, ssa_650 vec1 32 ssa_654 = intrinsic load_shared (ssa_653) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_648.x, ssa_654, r13 vec1 32 ssa_656 = iadd ssa_416, ssa_650 vec1 32 ssa_657 = intrinsic load_shared (ssa_656) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_648.x, ssa_657, r14 /* succs: block_55 */ } else { block block_54: /* preds: block_52 */ /* succs: block_55 */ } block block_55: /* preds: block_53 block_54 */ vec1 32 ssa_663 = iadd ssa_163, ssa_40 vec1 32 ssa_664 = ffma ssa_218, ssa_218, ssa_642 vec1 32 ssa_665 = fsqrt ssa_664 vec1 32 ssa_666 = flt32 ssa_665, ssa_13 /* succs: block_56 block_57 */ if ssa_666 { block block_56: /* preds: block_55 */ vec1 32 ssa_667 = fmul ssa_665, ssa_403 vec1 32 ssa_668 = flrp ssa_14, ssa_15, ssa_667 vec4 32 ssa_669 = tex ssa_668 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd r11, ssa_669.x vec1 32 ssa_671 = ishl ssa_663, ssa_408 vec1 32 ssa_672 = intrinsic load_shared (ssa_671) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_669.x, ssa_672, r12 vec1 32 ssa_674 = iadd ssa_412, ssa_671 vec1 32 ssa_675 = intrinsic load_shared (ssa_674) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_669.x, ssa_675, r13 vec1 32 ssa_677 = iadd ssa_416, ssa_671 vec1 32 ssa_678 = intrinsic load_shared (ssa_677) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_669.x, ssa_678, r14 /* succs: block_58 */ } else { block block_57: /* preds: block_55 */ /* succs: block_58 */ } block block_58: /* preds: block_56 block_57 */ vec1 32 ssa_684 = iadd ssa_163, ssa_41 vec1 32 ssa_685 = ffma ssa_246, ssa_246, ssa_642 vec1 32 ssa_686 = fsqrt ssa_685 vec1 32 ssa_687 = fmul ssa_686, ssa_403 vec1 32 ssa_688 = flrp ssa_14, ssa_15, ssa_687 vec4 32 ssa_689 = tex ssa_688 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_690 = fadd r11, ssa_689.x vec1 32 ssa_691 = ishl ssa_684, ssa_408 vec1 32 ssa_692 = intrinsic load_shared (ssa_691) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_693 = ffma ssa_689.x, ssa_692, r12 vec1 32 ssa_694 = iadd ssa_412, ssa_691 vec1 32 ssa_695 = intrinsic load_shared (ssa_694) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_696 = ffma ssa_689.x, ssa_695, r13 vec1 32 ssa_697 = iadd ssa_416, ssa_691 vec1 32 ssa_698 = intrinsic load_shared (ssa_697) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_699 = ffma ssa_689.x, ssa_698, r14 vec1 32 ssa_700 = iadd ssa_163, ssa_42 vec1 32 ssa_701 = ffma ssa_125, ssa_125, ssa_642 vec1 32 ssa_702 = fsqrt ssa_701 vec1 32 ssa_703 = fmul ssa_702, ssa_403 vec1 32 ssa_704 = flrp ssa_14, ssa_15, ssa_703 vec4 32 ssa_705 = tex ssa_704 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_706 = fadd ssa_690, ssa_705.x vec1 32 ssa_707 = ishl ssa_700, ssa_408 vec1 32 ssa_708 = intrinsic load_shared (ssa_707) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_709 = ffma ssa_705.x, ssa_708, ssa_693 vec1 32 ssa_710 = iadd ssa_412, ssa_707 vec1 32 ssa_711 = intrinsic load_shared (ssa_710) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_712 = ffma ssa_705.x, ssa_711, ssa_696 vec1 32 ssa_713 = iadd ssa_416, ssa_707 vec1 32 ssa_714 = intrinsic load_shared (ssa_713) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_715 = ffma ssa_705.x, ssa_714, ssa_699 vec1 32 ssa_716 = iadd ssa_163, ssa_43 vec1 32 ssa_717 = ffma ssa_192, ssa_192, ssa_642 vec1 32 ssa_718 = fsqrt ssa_717 vec1 32 ssa_719 = fmul ssa_718, ssa_403 vec1 32 ssa_720 = flrp ssa_14, ssa_15, ssa_719 vec4 32 ssa_721 = tex ssa_720 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_722 = fadd ssa_706, ssa_721.x vec1 32 ssa_723 = ishl ssa_716, ssa_408 vec1 32 ssa_724 = intrinsic load_shared (ssa_723) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_725 = ffma ssa_721.x, ssa_724, ssa_709 vec1 32 ssa_726 = iadd ssa_412, ssa_723 vec1 32 ssa_727 = intrinsic load_shared (ssa_726) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_728 = ffma ssa_721.x, ssa_727, ssa_712 vec1 32 ssa_729 = iadd ssa_416, ssa_723 vec1 32 ssa_730 = intrinsic load_shared (ssa_729) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_731 = ffma ssa_721.x, ssa_730, ssa_715 vec1 32 ssa_732 = iadd ssa_163, ssa_44 vec1 32 ssa_733 = ffma ssa_322, ssa_322, ssa_642 vec1 32 ssa_734 = fsqrt ssa_733 vec1 32 ssa_735 = fmul ssa_734, ssa_403 vec1 32 ssa_736 = flrp ssa_14, ssa_15, ssa_735 vec4 32 ssa_737 = tex ssa_736 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r15 = fadd ssa_722, ssa_737.x vec1 32 ssa_739 = ishl ssa_732, ssa_408 vec1 32 ssa_740 = intrinsic load_shared (ssa_739) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r16 = ffma ssa_737.x, ssa_740, ssa_725 vec1 32 ssa_742 = iadd ssa_412, ssa_739 vec1 32 ssa_743 = intrinsic load_shared (ssa_742) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r17 = ffma ssa_737.x, ssa_743, ssa_728 vec1 32 ssa_745 = iadd ssa_416, ssa_739 vec1 32 ssa_746 = intrinsic load_shared (ssa_745) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r18 = ffma ssa_737.x, ssa_746, ssa_731 vec1 32 ssa_748 = iadd ssa_163, ssa_45 vec1 32 ssa_749 = ffma ssa_348, ssa_348, ssa_642 vec1 32 ssa_750 = fsqrt ssa_749 vec1 32 ssa_751 = flt32 ssa_750, ssa_13 /* succs: block_59 block_60 */ if ssa_751 { block block_59: /* preds: block_58 */ vec1 32 ssa_752 = fmul ssa_750, ssa_403 vec1 32 ssa_753 = flrp ssa_14, ssa_15, ssa_752 vec4 32 ssa_754 = tex ssa_753 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r15 = fadd r15, ssa_754.x vec1 32 ssa_756 = ishl ssa_748, ssa_408 vec1 32 ssa_757 = intrinsic load_shared (ssa_756) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r16 = ffma ssa_754.x, ssa_757, r16 vec1 32 ssa_759 = iadd ssa_412, ssa_756 vec1 32 ssa_760 = intrinsic load_shared (ssa_759) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r17 = ffma ssa_754.x, ssa_760, r17 vec1 32 ssa_762 = iadd ssa_416, ssa_756 vec1 32 ssa_763 = intrinsic load_shared (ssa_762) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r18 = ffma ssa_754.x, ssa_763, r18 /* succs: block_61 */ } else { block block_60: /* preds: block_58 */ /* succs: block_61 */ } block block_61: /* preds: block_59 block_60 */ vec1 32 ssa_769 = iadd ssa_163, ssa_46 vec1 32 ssa_770 = ffma ssa_619, ssa_619, ssa_642 vec1 32 ssa_771 = fsqrt ssa_770 vec1 32 ssa_772 = flt32 ssa_771, ssa_13 /* succs: block_62 block_63 */ if ssa_772 { block block_62: /* preds: block_61 */ vec1 32 ssa_773 = fmul ssa_771, ssa_403 vec1 32 ssa_774 = flrp ssa_14, ssa_15, ssa_773 vec4 32 ssa_775 = tex ssa_774 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r15 = fadd r15, ssa_775.x vec1 32 ssa_777 = ishl ssa_769, ssa_408 vec1 32 ssa_778 = intrinsic load_shared (ssa_777) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r16 = ffma ssa_775.x, ssa_778, r16 vec1 32 ssa_780 = iadd ssa_412, ssa_777 vec1 32 ssa_781 = intrinsic load_shared (ssa_780) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r17 = ffma ssa_775.x, ssa_781, r17 vec1 32 ssa_783 = iadd ssa_416, ssa_777 vec1 32 ssa_784 = intrinsic load_shared (ssa_783) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r18 = ffma ssa_775.x, ssa_784, r18 /* succs: block_64 */ } else { block block_63: /* preds: block_61 */ /* succs: block_64 */ } block block_64: /* preds: block_62 block_63 */ vec1 32 ssa_790 = iadd ssa_163, ssa_47 vec1 32 ssa_791 = fadd ssa_23, -ssa_126 vec1 32 ssa_792 = fmul ssa_791, ssa_791 vec1 32 ssa_793 = ffma ssa_218, ssa_218, ssa_792 vec1 32 ssa_794 = fsqrt ssa_793 vec1 32 ssa_795 = flt32 ssa_794, ssa_13 /* succs: block_65 block_66 */ if ssa_795 { block block_65: /* preds: block_64 */ vec1 32 ssa_796 = fmul ssa_794, ssa_403 vec1 32 ssa_797 = flrp ssa_14, ssa_15, ssa_796 vec4 32 ssa_798 = tex ssa_797 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r15 = fadd r15, ssa_798.x vec1 32 ssa_800 = ishl ssa_790, ssa_408 vec1 32 ssa_801 = intrinsic load_shared (ssa_800) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r16 = ffma ssa_798.x, ssa_801, r16 vec1 32 ssa_803 = iadd ssa_412, ssa_800 vec1 32 ssa_804 = intrinsic load_shared (ssa_803) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r17 = ffma ssa_798.x, ssa_804, r17 vec1 32 ssa_806 = iadd ssa_416, ssa_800 vec1 32 ssa_807 = intrinsic load_shared (ssa_806) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r18 = ffma ssa_798.x, ssa_807, r18 /* succs: block_67 */ } else { block block_66: /* preds: block_64 */ /* succs: block_67 */ } block block_67: /* preds: block_65 block_66 */ vec1 32 ssa_813 = iadd ssa_163, ssa_48 vec1 32 ssa_814 = ffma ssa_246, ssa_246, ssa_792 vec1 32 ssa_815 = fsqrt ssa_814 vec1 32 ssa_816 = fmul ssa_815, ssa_403 vec1 32 ssa_817 = flrp ssa_14, ssa_15, ssa_816 vec4 32 ssa_818 = tex ssa_817 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_819 = fadd r15, ssa_818.x vec1 32 ssa_820 = ishl ssa_813, ssa_408 vec1 32 ssa_821 = intrinsic load_shared (ssa_820) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_822 = ffma ssa_818.x, ssa_821, r16 vec1 32 ssa_823 = iadd ssa_412, ssa_820 vec1 32 ssa_824 = intrinsic load_shared (ssa_823) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_825 = ffma ssa_818.x, ssa_824, r17 vec1 32 ssa_826 = iadd ssa_416, ssa_820 vec1 32 ssa_827 = intrinsic load_shared (ssa_826) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_828 = ffma ssa_818.x, ssa_827, r18 vec1 32 ssa_829 = iadd ssa_163, ssa_49 vec1 32 ssa_830 = ffma ssa_125, ssa_125, ssa_792 vec1 32 ssa_831 = fsqrt ssa_830 vec1 32 ssa_832 = fmul ssa_831, ssa_403 vec1 32 ssa_833 = flrp ssa_14, ssa_15, ssa_832 vec4 32 ssa_834 = tex ssa_833 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_835 = fadd ssa_819, ssa_834.x vec1 32 ssa_836 = ishl ssa_829, ssa_408 vec1 32 ssa_837 = intrinsic load_shared (ssa_836) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_838 = ffma ssa_834.x, ssa_837, ssa_822 vec1 32 ssa_839 = iadd ssa_412, ssa_836 vec1 32 ssa_840 = intrinsic load_shared (ssa_839) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_841 = ffma ssa_834.x, ssa_840, ssa_825 vec1 32 ssa_842 = iadd ssa_416, ssa_836 vec1 32 ssa_843 = intrinsic load_shared (ssa_842) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_844 = ffma ssa_834.x, ssa_843, ssa_828 vec1 32 ssa_845 = iadd ssa_163, ssa_50 vec1 32 ssa_846 = ffma ssa_192, ssa_192, ssa_792 vec1 32 ssa_847 = fsqrt ssa_846 vec1 32 ssa_848 = fmul ssa_847, ssa_403 vec1 32 ssa_849 = flrp ssa_14, ssa_15, ssa_848 vec4 32 ssa_850 = tex ssa_849 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_851 = fadd ssa_835, ssa_850.x vec1 32 ssa_852 = ishl ssa_845, ssa_408 vec1 32 ssa_853 = intrinsic load_shared (ssa_852) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_854 = ffma ssa_850.x, ssa_853, ssa_838 vec1 32 ssa_855 = iadd ssa_412, ssa_852 vec1 32 ssa_856 = intrinsic load_shared (ssa_855) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_857 = ffma ssa_850.x, ssa_856, ssa_841 vec1 32 ssa_858 = iadd ssa_416, ssa_852 vec1 32 ssa_859 = intrinsic load_shared (ssa_858) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_860 = ffma ssa_850.x, ssa_859, ssa_844 vec1 32 ssa_861 = iadd ssa_163, ssa_51 vec1 32 ssa_862 = ffma ssa_322, ssa_322, ssa_792 vec1 32 ssa_863 = fsqrt ssa_862 vec1 32 ssa_864 = fmul ssa_863, ssa_403 vec1 32 ssa_865 = flrp ssa_14, ssa_15, ssa_864 vec4 32 ssa_866 = tex ssa_865 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd ssa_851, ssa_866.x vec1 32 ssa_868 = ishl ssa_861, ssa_408 vec1 32 ssa_869 = intrinsic load_shared (ssa_868) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_866.x, ssa_869, ssa_854 vec1 32 ssa_871 = iadd ssa_412, ssa_868 vec1 32 ssa_872 = intrinsic load_shared (ssa_871) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_866.x, ssa_872, ssa_857 vec1 32 ssa_874 = iadd ssa_416, ssa_868 vec1 32 ssa_875 = intrinsic load_shared (ssa_874) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_866.x, ssa_875, ssa_860 vec1 32 ssa_877 = iadd ssa_163, ssa_52 vec1 32 ssa_878 = ffma ssa_348, ssa_348, ssa_792 vec1 32 ssa_879 = fsqrt ssa_878 vec1 32 ssa_880 = flt32 ssa_879, ssa_13 /* succs: block_68 block_69 */ if ssa_880 { block block_68: /* preds: block_67 */ vec1 32 ssa_881 = fmul ssa_879, ssa_403 vec1 32 ssa_882 = flrp ssa_14, ssa_15, ssa_881 vec4 32 ssa_883 = tex ssa_882 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_883.x vec1 32 ssa_885 = ishl ssa_877, ssa_408 vec1 32 ssa_886 = intrinsic load_shared (ssa_885) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_883.x, ssa_886, r20 vec1 32 ssa_888 = iadd ssa_412, ssa_885 vec1 32 ssa_889 = intrinsic load_shared (ssa_888) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_883.x, ssa_889, r21 vec1 32 ssa_891 = iadd ssa_416, ssa_885 vec1 32 ssa_892 = intrinsic load_shared (ssa_891) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_883.x, ssa_892, r22 /* succs: block_70 */ } else { block block_69: /* preds: block_67 */ /* succs: block_70 */ } block block_70: /* preds: block_68 block_69 */ vec1 32 ssa_898 = iadd ssa_163, ssa_53 vec1 32 ssa_899 = fadd ssa_25, -ssa_126 vec1 32 ssa_900 = fmul ssa_899, ssa_899 vec1 32 ssa_901 = ffma ssa_218, ssa_218, ssa_900 vec1 32 ssa_902 = fsqrt ssa_901 vec1 32 ssa_903 = flt32 ssa_902, ssa_13 /* succs: block_71 block_72 */ if ssa_903 { block block_71: /* preds: block_70 */ vec1 32 ssa_904 = fmul ssa_902, ssa_403 vec1 32 ssa_905 = flrp ssa_14, ssa_15, ssa_904 vec4 32 ssa_906 = tex ssa_905 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_906.x vec1 32 ssa_908 = ishl ssa_898, ssa_408 vec1 32 ssa_909 = intrinsic load_shared (ssa_908) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_906.x, ssa_909, r20 vec1 32 ssa_911 = iadd ssa_412, ssa_908 vec1 32 ssa_912 = intrinsic load_shared (ssa_911) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_906.x, ssa_912, r21 vec1 32 ssa_914 = iadd ssa_416, ssa_908 vec1 32 ssa_915 = intrinsic load_shared (ssa_914) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_906.x, ssa_915, r22 /* succs: block_73 */ } else { block block_72: /* preds: block_70 */ /* succs: block_73 */ } block block_73: /* preds: block_71 block_72 */ vec1 32 ssa_921 = iadd ssa_163, ssa_54 vec1 32 ssa_922 = ffma ssa_246, ssa_246, ssa_900 vec1 32 ssa_923 = fsqrt ssa_922 vec1 32 ssa_924 = flt32 ssa_923, ssa_13 /* succs: block_74 block_75 */ if ssa_924 { block block_74: /* preds: block_73 */ vec1 32 ssa_925 = fmul ssa_923, ssa_403 vec1 32 ssa_926 = flrp ssa_14, ssa_15, ssa_925 vec4 32 ssa_927 = tex ssa_926 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_927.x vec1 32 ssa_929 = ishl ssa_921, ssa_408 vec1 32 ssa_930 = intrinsic load_shared (ssa_929) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_927.x, ssa_930, r20 vec1 32 ssa_932 = iadd ssa_412, ssa_929 vec1 32 ssa_933 = intrinsic load_shared (ssa_932) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_927.x, ssa_933, r21 vec1 32 ssa_935 = iadd ssa_416, ssa_929 vec1 32 ssa_936 = intrinsic load_shared (ssa_935) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_927.x, ssa_936, r22 /* succs: block_76 */ } else { block block_75: /* preds: block_73 */ /* succs: block_76 */ } block block_76: /* preds: block_74 block_75 */ vec1 32 ssa_942 = iadd ssa_163, ssa_55 vec1 32 ssa_943 = ffma ssa_125, ssa_125, ssa_900 vec1 32 ssa_944 = fsqrt ssa_943 vec1 32 ssa_945 = flt32 ssa_944, ssa_13 /* succs: block_77 block_78 */ if ssa_945 { block block_77: /* preds: block_76 */ vec1 32 ssa_946 = fmul ssa_944, ssa_403 vec1 32 ssa_947 = flrp ssa_14, ssa_15, ssa_946 vec4 32 ssa_948 = tex ssa_947 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_948.x vec1 32 ssa_950 = ishl ssa_942, ssa_408 vec1 32 ssa_951 = intrinsic load_shared (ssa_950) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_948.x, ssa_951, r20 vec1 32 ssa_953 = iadd ssa_412, ssa_950 vec1 32 ssa_954 = intrinsic load_shared (ssa_953) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_948.x, ssa_954, r21 vec1 32 ssa_956 = iadd ssa_416, ssa_950 vec1 32 ssa_957 = intrinsic load_shared (ssa_956) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_948.x, ssa_957, r22 /* succs: block_79 */ } else { block block_78: /* preds: block_76 */ /* succs: block_79 */ } block block_79: /* preds: block_77 block_78 */ vec1 32 ssa_963 = iadd ssa_163, ssa_56 vec1 32 ssa_964 = ffma ssa_192, ssa_192, ssa_900 vec1 32 ssa_965 = fsqrt ssa_964 vec1 32 ssa_966 = flt32 ssa_965, ssa_13 /* succs: block_80 block_81 */ if ssa_966 { block block_80: /* preds: block_79 */ vec1 32 ssa_967 = fmul ssa_965, ssa_403 vec1 32 ssa_968 = flrp ssa_14, ssa_15, ssa_967 vec4 32 ssa_969 = tex ssa_968 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_969.x vec1 32 ssa_971 = ishl ssa_963, ssa_408 vec1 32 ssa_972 = intrinsic load_shared (ssa_971) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_969.x, ssa_972, r20 vec1 32 ssa_974 = iadd ssa_412, ssa_971 vec1 32 ssa_975 = intrinsic load_shared (ssa_974) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_969.x, ssa_975, r21 vec1 32 ssa_977 = iadd ssa_416, ssa_971 vec1 32 ssa_978 = intrinsic load_shared (ssa_977) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_969.x, ssa_978, r22 /* succs: block_82 */ } else { block block_81: /* preds: block_79 */ /* succs: block_82 */ } block block_82: /* preds: block_80 block_81 */ vec1 32 ssa_984 = iadd ssa_163, ssa_57 vec1 32 ssa_985 = ffma ssa_322, ssa_322, ssa_900 vec1 32 ssa_986 = fsqrt ssa_985 vec1 32 ssa_987 = flt32 ssa_986, ssa_13 /* succs: block_83 block_84 */ if ssa_987 { block block_83: /* preds: block_82 */ vec1 32 ssa_988 = fmul ssa_986, ssa_403 vec1 32 ssa_989 = flrp ssa_14, ssa_15, ssa_988 vec4 32 ssa_990 = tex ssa_989 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_990.x vec1 32 ssa_992 = ishl ssa_984, ssa_408 vec1 32 ssa_993 = intrinsic load_shared (ssa_992) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_990.x, ssa_993, r20 vec1 32 ssa_995 = iadd ssa_412, ssa_992 vec1 32 ssa_996 = intrinsic load_shared (ssa_995) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_990.x, ssa_996, r21 vec1 32 ssa_998 = iadd ssa_416, ssa_992 vec1 32 ssa_999 = intrinsic load_shared (ssa_998) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_990.x, ssa_999, r22 /* succs: block_85 */ } else { block block_84: /* preds: block_82 */ /* succs: block_85 */ } block block_85: /* preds: block_83 block_84 */ vec1 32 ssa_1005 = iadd ssa_163, ssa_58 vec1 32 ssa_1006 = ffma ssa_348, ssa_348, ssa_900 vec1 32 ssa_1007 = fsqrt ssa_1006 vec1 32 ssa_1008 = flt32 ssa_1007, ssa_13 /* succs: block_86 block_87 */ if ssa_1008 { block block_86: /* preds: block_85 */ vec1 32 ssa_1009 = fmul ssa_1007, ssa_403 vec1 32 ssa_1010 = flrp ssa_14, ssa_15, ssa_1009 vec4 32 ssa_1011 = tex ssa_1010 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_1011.x vec1 32 ssa_1013 = ishl ssa_1005, ssa_408 vec1 32 ssa_1014 = intrinsic load_shared (ssa_1013) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_1011.x, ssa_1014, r20 vec1 32 ssa_1016 = iadd ssa_412, ssa_1013 vec1 32 ssa_1017 = intrinsic load_shared (ssa_1016) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_1011.x, ssa_1017, r21 vec1 32 ssa_1019 = iadd ssa_416, ssa_1013 vec1 32 ssa_1020 = intrinsic load_shared (ssa_1019) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_1011.x, ssa_1020, r22 /* succs: block_88 */ } else { block block_87: /* preds: block_85 */ /* succs: block_88 */ } block block_88: /* preds: block_86 block_87 */ vec1 32 ssa_1026 = iadd ssa_163, ssa_59 vec1 32 ssa_1027 = fadd ssa_38, -ssa_126 vec1 32 ssa_1028 = fmul ssa_1027, ssa_1027 vec1 32 ssa_1029 = ffma ssa_125, ssa_125, ssa_1028 vec1 32 ssa_1030 = fsqrt ssa_1029 vec1 32 ssa_1031 = flt32 ssa_1030, ssa_13 /* succs: block_89 block_90 */ if ssa_1031 { block block_89: /* preds: block_88 */ vec1 32 ssa_1032 = fmul ssa_1030, ssa_403 vec1 32 ssa_1033 = flrp ssa_14, ssa_15, ssa_1032 vec4 32 ssa_1034 = tex ssa_1033 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_1034.x vec1 32 ssa_1036 = ishl ssa_1026, ssa_408 vec1 32 ssa_1037 = intrinsic load_shared (ssa_1036) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_1034.x, ssa_1037, r20 vec1 32 ssa_1039 = iadd ssa_412, ssa_1036 vec1 32 ssa_1040 = intrinsic load_shared (ssa_1039) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_1034.x, ssa_1040, r21 vec1 32 ssa_1042 = iadd ssa_416, ssa_1036 vec1 32 ssa_1043 = intrinsic load_shared (ssa_1042) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_1034.x, ssa_1043, r22 /* succs: block_91 */ } else { block block_90: /* preds: block_88 */ /* succs: block_91 */ } block block_91: /* preds: block_89 block_90 */ vec1 32 ssa_1049 = iadd ssa_163, ssa_60 vec1 32 ssa_1050 = ffma ssa_192, ssa_192, ssa_1028 vec1 32 ssa_1051 = fsqrt ssa_1050 vec1 32 ssa_1052 = flt32 ssa_1051, ssa_13 /* succs: block_92 block_93 */ if ssa_1052 { block block_92: /* preds: block_91 */ vec1 32 ssa_1053 = fmul ssa_1051, ssa_403 vec1 32 ssa_1054 = flrp ssa_14, ssa_15, ssa_1053 vec4 32 ssa_1055 = tex ssa_1054 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_1055.x vec1 32 ssa_1057 = ishl ssa_1049, ssa_408 vec1 32 ssa_1058 = intrinsic load_shared (ssa_1057) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_1055.x, ssa_1058, r20 vec1 32 ssa_1060 = iadd ssa_412, ssa_1057 vec1 32 ssa_1061 = intrinsic load_shared (ssa_1060) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_1055.x, ssa_1061, r21 vec1 32 ssa_1063 = iadd ssa_416, ssa_1057 vec1 32 ssa_1064 = intrinsic load_shared (ssa_1063) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_1055.x, ssa_1064, r22 /* succs: block_94 */ } else { block block_93: /* preds: block_91 */ /* succs: block_94 */ } block block_94: /* preds: block_92 block_93 */ vec1 32 ssa_1070 = frcp r19 vec1 32 ssa_1071 = fmul.sat r20, ssa_1070 vec1 32 ssa_1072 = fmul.sat r21, ssa_1070 vec1 32 ssa_1073 = fmul.sat r22, ssa_1070 vec1 32 ssa_1074 = fadd ssa_61, -ssa_1071 vec1 32 ssa_1075 = fadd ssa_61, -ssa_1072 vec1 32 ssa_1076 = fadd ssa_61, -ssa_1073 vec1 32 ssa_1077 = load_const (0x41160a50 /* 9.377518 */) vec1 32 ssa_1078 = fmul ssa_1077, ssa_1074 vec1 32 ssa_1079 = fmul ssa_1077, ssa_1075 vec1 32 ssa_1080 = fmul ssa_1077, ssa_1076 vec1 32 ssa_1081 = fexp2 ssa_1078 vec1 32 ssa_1082 = fexp2 ssa_1079 vec1 32 ssa_1083 = fexp2 ssa_1080 vec1 32 ssa_1084 = fadd ssa_16, ssa_1081 vec1 32 ssa_1085 = fadd ssa_16, ssa_1082 vec1 32 ssa_1086 = fadd ssa_16, ssa_1083 vec1 32 ssa_1087 = frcp ssa_1084 vec1 32 ssa_1088 = frcp ssa_1085 vec1 32 ssa_1089 = frcp ssa_1086 vec1 32 ssa_1090 = fadd ssa_1087, ssa_62 vec1 32 ssa_1091 = fadd ssa_1088, ssa_62 vec1 32 ssa_1092 = fadd ssa_1089, ssa_62 vec1 32 ssa_1093 = load_const (0x3f9a9b5f /* 1.207867 */) vec1 32 ssa_1094 = fmul.sat ssa_1090, ssa_1093 vec1 32 ssa_1095 = fmul.sat ssa_1091, ssa_1093 vec1 32 ssa_1096 = fmul.sat ssa_1092, ssa_1093 vec1 32 ssa_1097 = fpow ssa_1094, ssa_63 vec1 32 ssa_1098 = fpow ssa_1095, ssa_63 vec1 32 ssa_1099 = fpow ssa_1096, ssa_63 vec1 32 ssa_1100 = undefined vec4 32 ssa_1101 = vec4 ssa_86, ssa_87, ssa_1100, ssa_1100 vec4 32 ssa_1102 = vec4 ssa_1097, ssa_1098, ssa_1099, ssa_16 intrinsic image_store (ssa_0, ssa_1101, ssa_1100, ssa_1102) (1, 0, 34842, 8) /* image_dim=2D */ /* image_dim=true */ /* format=34842 */ /* access=8 */ /* succs: block_95 */ block block_95: } NIR (SSA form) for compute shader: shader: MESA_SHADER_COMPUTE name: GLSL19 local-size: 32, 8, 1 shared-size: 5040 inputs: 0 outputs: 0 uniforms: 80 shared: 0 decl_var ubo INTERP_MODE_NONE vec2 tex_scale0 (7, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 out_scale (6, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 pixel_size0 (5, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 texture_off0 (4, 0, 0) decl_var ubo INTERP_MODE_NONE mat2 texture_rot0 (3, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 texture_size0 (2, 0, 0) decl_var ubo INTERP_MODE_NONE vec3 dst_luma (1, 0, 0) decl_var ubo INTERP_MODE_NONE vec3 src_luma (0, 0, 0) decl_var uniform INTERP_MODE_NONE sampler1D lut (8, 0, 0) decl_var uniform INTERP_MODE_NONE writeonly GL_RGBA16F image2D out_image (9, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D texture0 (10, 0, 1) decl_function main (0 params) impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x00000040 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000060 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_4 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_5 = load_const (0xbf000000 /* -0.500000 */) vec1 32 ssa_6 = load_const (0x0000000e /* 0.000000 */) vec1 32 ssa_7 = load_const (0x0000001e /* 0.000000 */) vec1 32 ssa_8 = load_const (0xfffffffd /* -nan */) vec1 32 ssa_9 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_10 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_11 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_12 = load_const (0xc0400000 /* -3.000000 */) vec1 32 ssa_13 = load_const (0x404217e3 /* 3.032708 */) vec1 32 ssa_14 = load_const (0x3c000000 /* 0.007812 */) vec1 32 ssa_15 = load_const (0x3f7e0000 /* 0.992188 */) vec1 32 ssa_16 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_17 = load_const (0x0000001f /* 0.000000 */) vec1 32 ssa_18 = load_const (0xc0000000 /* -2.000000 */) vec1 32 ssa_19 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_20 = load_const (0x00000021 /* 0.000000 */) vec1 32 ssa_21 = load_const (0x00000022 /* 0.000000 */) vec1 32 ssa_22 = load_const (0x00000023 /* 0.000000 */) vec1 32 ssa_23 = load_const (0x40000000 /* 2.000000 */) vec1 32 ssa_24 = load_const (0x00000024 /* 0.000000 */) vec1 32 ssa_25 = load_const (0x40400000 /* 3.000000 */) vec1 32 ssa_26 = load_const (0x0000003d /* 0.000000 */) vec1 32 ssa_27 = load_const (0x0000003e /* 0.000000 */) vec1 32 ssa_28 = load_const (0x0000003f /* 0.000000 */) vec1 32 ssa_29 = load_const (0x00000041 /* 0.000000 */) vec1 32 ssa_30 = load_const (0x00000042 /* 0.000000 */) vec1 32 ssa_31 = load_const (0x0000005a /* 0.000000 */) vec1 32 ssa_32 = load_const (0x0000005b /* 0.000000 */) vec1 32 ssa_33 = load_const (0x0000005c /* 0.000000 */) vec1 32 ssa_34 = load_const (0x0000005d /* 0.000000 */) vec1 32 ssa_35 = load_const (0x0000005e /* 0.000000 */) vec1 32 ssa_36 = load_const (0x0000005f /* 0.000000 */) vec1 32 ssa_37 = load_const (0x00000061 /* 0.000000 */) vec1 32 ssa_38 = load_const (0x40800000 /* 4.000000 */) vec1 32 ssa_39 = load_const (0x00000078 /* 0.000000 */) vec1 32 ssa_40 = load_const (0x00000079 /* 0.000000 */) vec1 32 ssa_41 = load_const (0x0000007a /* 0.000000 */) vec1 32 ssa_42 = load_const (0x0000007b /* 0.000000 */) vec1 32 ssa_43 = load_const (0x0000007c /* 0.000000 */) vec1 32 ssa_44 = load_const (0x0000007d /* 0.000000 */) vec1 32 ssa_45 = load_const (0x0000007e /* 0.000000 */) vec1 32 ssa_46 = load_const (0x0000007f /* 0.000000 */) vec1 32 ssa_47 = load_const (0x00000097 /* 0.000000 */) vec1 32 ssa_48 = load_const (0x00000098 /* 0.000000 */) vec1 32 ssa_49 = load_const (0x00000099 /* 0.000000 */) vec1 32 ssa_50 = load_const (0x0000009a /* 0.000000 */) vec1 32 ssa_51 = load_const (0x0000009b /* 0.000000 */) vec1 32 ssa_52 = load_const (0x0000009c /* 0.000000 */) vec1 32 ssa_53 = load_const (0x000000b5 /* 0.000000 */) vec1 32 ssa_54 = load_const (0x000000b6 /* 0.000000 */) vec1 32 ssa_55 = load_const (0x000000b7 /* 0.000000 */) vec1 32 ssa_56 = load_const (0x000000b8 /* 0.000000 */) vec1 32 ssa_57 = load_const (0x000000b9 /* 0.000000 */) vec1 32 ssa_58 = load_const (0x000000ba /* 0.000000 */) vec1 32 ssa_59 = load_const (0x000000d5 /* 0.000000 */) vec1 32 ssa_60 = load_const (0x000000d6 /* 0.000000 */) vec1 32 ssa_61 = load_const (0x3f400000 /* 0.750000 */) vec1 32 ssa_62 = load_const (0xbbf8487c /* -0.007577 */) vec1 32 ssa_63 = load_const (0x3ed55555 /* 0.416667 */) vec1 32 ssa_64 = load_const (0x00000030 /* 0.000000 */) vec2 32 ssa_65 = intrinsic load_ubo (ssa_0, ssa_64) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec2 32 ssa_66 = intrinsic load_ubo (ssa_0, ssa_1) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_67 = load_const (0x00000068 /* 0.000000 */) vec2 32 ssa_68 = intrinsic load_ubo (ssa_0, ssa_67) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec2 32 ssa_69 = intrinsic load_ubo (ssa_0, ssa_2) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_70 = load_const (0x00000058 /* 0.000000 */) vec2 32 ssa_71 = intrinsic load_ubo (ssa_0, ssa_70) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_72 = load_const (0x00000050 /* 0.000000 */) vec2 32 ssa_73 = intrinsic load_ubo (ssa_0, ssa_72) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec3 32 ssa_74 = intrinsic load_work_group_id () () vec1 32 ssa_75 = intrinsic load_subgroup_id () () vec1 32 ssa_76 = ishl ssa_75, ssa_9 vec1 32 ssa_77 = intrinsic load_subgroup_invocation () () vec1 32 ssa_78 = iadd ssa_77, ssa_76 vec1 32 ssa_79 = iand ssa_78, ssa_17 vec1 32 ssa_80 = load_const (0x00000005 /* 0.000000 */) vec1 32 ssa_81 = ushr ssa_78, ssa_80 vec1 32 ssa_82 = load_const (0x00000007 /* 0.000000 */) vec1 32 ssa_83 = iand ssa_81, ssa_82 vec1 32 ssa_84 = ishl ssa_74.x, ssa_80 vec1 32 ssa_85 = ishl ssa_74.y, ssa_11 vec1 32 ssa_86 = iadd ssa_84, ssa_79 vec1 32 ssa_87 = iadd ssa_85, ssa_83 vec1 32 ssa_88 = u2f32 ssa_86 vec1 32 ssa_89 = u2f32 ssa_87 vec1 32 ssa_90 = fadd ssa_88, ssa_3 vec1 32 ssa_91 = fadd ssa_89, ssa_3 vec1 32 ssa_92 = fmul ssa_69.x, ssa_90 vec1 32 ssa_93 = fmul ssa_69.y, ssa_91 vec1 32 ssa_94 = fmul ssa_68.x, ssa_92 vec1 32 ssa_95 = fmul ssa_68.y, ssa_93 vec1 32 ssa_96 = fmul ssa_66.x, ssa_95 vec1 32 ssa_97 = fmul ssa_66.y, ssa_95 vec1 32 ssa_98 = ffma ssa_65.x, ssa_94, ssa_96 vec1 32 ssa_99 = ffma ssa_65.y, ssa_94, ssa_97 vec1 32 ssa_100 = ffma ssa_71.x, ssa_73.x, ssa_98 vec1 32 ssa_101 = ffma ssa_71.y, ssa_73.y, ssa_99 vec1 32 ssa_102 = u2f32 ssa_84 vec1 32 ssa_103 = u2f32 ssa_85 vec1 32 ssa_104 = fadd ssa_102, ssa_3 vec1 32 ssa_105 = fadd ssa_103, ssa_3 vec1 32 ssa_106 = fmul ssa_69.x, ssa_104 vec1 32 ssa_107 = fmul ssa_69.y, ssa_105 vec1 32 ssa_108 = fmul ssa_68.x, ssa_106 vec1 32 ssa_109 = fmul ssa_68.y, ssa_107 vec1 32 ssa_110 = fmul ssa_66.x, ssa_109 vec1 32 ssa_111 = fmul ssa_66.y, ssa_109 vec1 32 ssa_112 = ffma ssa_65.x, ssa_108, ssa_110 vec1 32 ssa_113 = ffma ssa_65.y, ssa_108, ssa_111 vec1 32 ssa_114 = ffma ssa_71.x, ssa_73.x, ssa_112 vec1 32 ssa_115 = ffma ssa_71.y, ssa_73.y, ssa_113 vec2 32 ssa_116 = intrinsic load_ubo (ssa_0, ssa_4) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_117 = ffma ssa_114, ssa_116.x, ssa_5 vec1 32 ssa_118 = ffma ssa_115, ssa_116.y, ssa_5 vec1 32 ssa_119 = ffract ssa_117 vec1 32 ssa_120 = ffract ssa_118 vec1 32 ssa_121 = ffma -ssa_71.x, ssa_119, ssa_114 vec1 32 ssa_122 = ffma -ssa_71.y, ssa_120, ssa_115 vec1 32 ssa_123 = ffma ssa_100, ssa_116.x, ssa_5 vec1 32 ssa_124 = ffma ssa_101, ssa_116.y, ssa_5 vec1 32 ssa_125 = ffract ssa_123 vec1 32 ssa_126 = ffract ssa_124 vec1 32 ssa_127 = ffma -ssa_71.x, ssa_125, ssa_100 vec1 32 ssa_128 = ffma -ssa_71.y, ssa_126, ssa_101 vec1 32 ssa_129 = fadd ssa_127, -ssa_121 vec1 32 ssa_130 = fadd ssa_128, -ssa_122 vec1 32 ssa_131 = fmul ssa_129, ssa_116.x vec1 32 ssa_132 = fmul ssa_130, ssa_116.y vec1 32 ssa_133 = fround_even ssa_131 vec1 32 ssa_134 = fround_even ssa_132 vec1 32 ssa_135 = f2i32 ssa_133 vec1 32 ssa_136 = f2i32 ssa_134 /* succs: block_1 */ loop { block block_1: /* preds: block_0 block_9 */ vec1 32 ssa_137 = phi block_0: ssa_83, block_9: ssa_161 vec1 32 ssa_138 = ige32 ssa_137, ssa_6 /* succs: block_2 block_3 */ if ssa_138 { block block_2: /* preds: block_1 */ break /* succs: block_10 */ } else { block block_3: /* preds: block_1 */ /* succs: block_4 */ } block block_4: /* preds: block_3 */ /* succs: block_5 */ loop { block block_5: /* preds: block_4 block_8 */ vec1 32 ssa_139 = phi block_4: ssa_79, block_8: ssa_160 vec1 32 ssa_140 = ige32 ssa_139, ssa_7 /* succs: block_6 block_7 */ if ssa_140 { block block_6: /* preds: block_5 */ break /* succs: block_9 */ } else { block block_7: /* preds: block_5 */ /* succs: block_8 */ } block block_8: /* preds: block_7 */ vec1 32 ssa_141 = iadd ssa_139, ssa_8 vec1 32 ssa_142 = i2f32 ssa_141 vec1 32 ssa_143 = iadd ssa_137, ssa_8 vec1 32 ssa_144 = i2f32 ssa_143 vec1 32 ssa_145 = ffma ssa_71.x, ssa_142, ssa_121 vec1 32 ssa_146 = ffma ssa_71.y, ssa_144, ssa_122 vec2 32 ssa_147 = vec2 ssa_145, ssa_146 vec4 32 ssa_148 = tex ssa_147 (coord), ssa_0 (lod), 1 (texture), 1 (sampler), vec1 32 ssa_149 = imul ssa_7, ssa_137 vec1 32 ssa_150 = iadd ssa_149, ssa_139 vec1 32 ssa_151 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_152 = ishl ssa_150, ssa_151 vec1 32 ssa_153 = imov ssa_148.x intrinsic store_shared (ssa_153, ssa_152) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_154 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_155 = iadd ssa_154, ssa_152 vec1 32 ssa_156 = imov ssa_148.y intrinsic store_shared (ssa_156, ssa_155) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_157 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_158 = iadd ssa_157, ssa_152 vec1 32 ssa_159 = imov ssa_148.z intrinsic store_shared (ssa_159, ssa_158) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_160 = iadd ssa_139, ssa_4 /* succs: block_5 */ } block block_9: /* preds: block_6 */ vec1 32 ssa_161 = iadd ssa_137, ssa_10 /* succs: block_1 */ } block block_10: /* preds: block_2 */ intrinsic group_memory_barrier () () intrinsic barrier () () vec1 32 ssa_162 = imul ssa_7, ssa_136 vec1 32 ssa_163 = iadd ssa_162, ssa_135 vec1 32 ssa_164 = iadd ssa_163, ssa_11 vec1 32 ssa_165 = fadd ssa_12, -ssa_126 vec1 32 ssa_166 = fmul ssa_165, ssa_165 vec1 32 ssa_167 = ffma ssa_125, ssa_125, ssa_166 vec1 32 ssa_168 = fsqrt ssa_167 vec1 32 ssa_169 = flt32 ssa_168, ssa_13 /* succs: block_11 block_12 */ if ssa_169 { block block_11: /* preds: block_10 */ vec1 32 ssa_170 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_171 = fmul ssa_168, ssa_170 vec1 32 ssa_172 = flrp ssa_14, ssa_15, ssa_171 vec4 32 ssa_173 = tex ssa_172 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_174 = imov ssa_173.x vec1 32 ssa_175 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_176 = ishl ssa_164, ssa_175 vec1 32 ssa_177 = intrinsic load_shared (ssa_176) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_178 = fmul ssa_173.x, ssa_177 vec1 32 ssa_179 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_180 = iadd ssa_179, ssa_176 vec1 32 ssa_181 = intrinsic load_shared (ssa_180) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_182 = fmul ssa_173.x, ssa_181 vec1 32 ssa_183 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_184 = iadd ssa_183, ssa_176 vec1 32 ssa_185 = intrinsic load_shared (ssa_184) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_186 = fmul ssa_173.x, ssa_185 /* succs: block_13 */ } else { block block_12: /* preds: block_10 */ /* succs: block_13 */ } block block_13: /* preds: block_11 block_12 */ vec1 32 ssa_187 = phi block_11: ssa_178, block_12: ssa_0 vec1 32 ssa_188 = phi block_11: ssa_182, block_12: ssa_0 vec1 32 ssa_189 = phi block_11: ssa_186, block_12: ssa_0 vec1 32 ssa_190 = phi block_11: ssa_174, block_12: ssa_0 vec1 32 ssa_191 = iadd ssa_163, ssa_9 vec1 32 ssa_192 = fadd ssa_16, -ssa_125 vec1 32 ssa_193 = ffma ssa_192, ssa_192, ssa_166 vec1 32 ssa_194 = fsqrt ssa_193 vec1 32 ssa_195 = flt32 ssa_194, ssa_13 /* succs: block_14 block_15 */ if ssa_195 { block block_14: /* preds: block_13 */ vec1 32 ssa_196 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_197 = fmul ssa_194, ssa_196 vec1 32 ssa_198 = flrp ssa_14, ssa_15, ssa_197 vec4 32 ssa_199 = tex ssa_198 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_200 = fadd ssa_190, ssa_199.x vec1 32 ssa_201 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_202 = ishl ssa_191, ssa_201 vec1 32 ssa_203 = intrinsic load_shared (ssa_202) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_204 = ffma ssa_199.x, ssa_203, ssa_187 vec1 32 ssa_205 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_206 = iadd ssa_205, ssa_202 vec1 32 ssa_207 = intrinsic load_shared (ssa_206) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_208 = ffma ssa_199.x, ssa_207, ssa_188 vec1 32 ssa_209 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_210 = iadd ssa_209, ssa_202 vec1 32 ssa_211 = intrinsic load_shared (ssa_210) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_212 = ffma ssa_199.x, ssa_211, ssa_189 /* succs: block_16 */ } else { block block_15: /* preds: block_13 */ /* succs: block_16 */ } block block_16: /* preds: block_14 block_15 */ vec1 32 ssa_213 = phi block_14: ssa_204, block_15: ssa_187 vec1 32 ssa_214 = phi block_14: ssa_208, block_15: ssa_188 vec1 32 ssa_215 = phi block_14: ssa_212, block_15: ssa_189 vec1 32 ssa_216 = phi block_14: ssa_200, block_15: ssa_190 vec1 32 ssa_217 = iadd ssa_163, ssa_17 vec1 32 ssa_218 = fadd ssa_18, -ssa_125 vec1 32 ssa_219 = fadd ssa_18, -ssa_126 vec1 32 ssa_220 = fmul ssa_219, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = fsqrt ssa_221 vec1 32 ssa_223 = flt32 ssa_222, ssa_13 /* succs: block_17 block_18 */ if ssa_223 { block block_17: /* preds: block_16 */ vec1 32 ssa_224 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_225 = fmul ssa_222, ssa_224 vec1 32 ssa_226 = flrp ssa_14, ssa_15, ssa_225 vec4 32 ssa_227 = tex ssa_226 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_228 = fadd ssa_216, ssa_227.x vec1 32 ssa_229 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_230 = ishl ssa_217, ssa_229 vec1 32 ssa_231 = intrinsic load_shared (ssa_230) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_232 = ffma ssa_227.x, ssa_231, ssa_213 vec1 32 ssa_233 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_234 = iadd ssa_233, ssa_230 vec1 32 ssa_235 = intrinsic load_shared (ssa_234) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_236 = ffma ssa_227.x, ssa_235, ssa_214 vec1 32 ssa_237 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_238 = iadd ssa_237, ssa_230 vec1 32 ssa_239 = intrinsic load_shared (ssa_238) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_240 = ffma ssa_227.x, ssa_239, ssa_215 /* succs: block_19 */ } else { block block_18: /* preds: block_16 */ /* succs: block_19 */ } block block_19: /* preds: block_17 block_18 */ vec1 32 ssa_241 = phi block_17: ssa_232, block_18: ssa_213 vec1 32 ssa_242 = phi block_17: ssa_236, block_18: ssa_214 vec1 32 ssa_243 = phi block_17: ssa_240, block_18: ssa_215 vec1 32 ssa_244 = phi block_17: ssa_228, block_18: ssa_216 vec1 32 ssa_245 = iadd ssa_163, ssa_4 vec1 32 ssa_246 = fadd ssa_19, -ssa_125 vec1 32 ssa_247 = ffma ssa_246, ssa_246, ssa_220 vec1 32 ssa_248 = fsqrt ssa_247 vec1 32 ssa_249 = flt32 ssa_248, ssa_13 /* succs: block_20 block_21 */ if ssa_249 { block block_20: /* preds: block_19 */ vec1 32 ssa_250 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_251 = fmul ssa_248, ssa_250 vec1 32 ssa_252 = flrp ssa_14, ssa_15, ssa_251 vec4 32 ssa_253 = tex ssa_252 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_254 = fadd ssa_244, ssa_253.x vec1 32 ssa_255 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_256 = ishl ssa_245, ssa_255 vec1 32 ssa_257 = intrinsic load_shared (ssa_256) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_258 = ffma ssa_253.x, ssa_257, ssa_241 vec1 32 ssa_259 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_260 = iadd ssa_259, ssa_256 vec1 32 ssa_261 = intrinsic load_shared (ssa_260) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_262 = ffma ssa_253.x, ssa_261, ssa_242 vec1 32 ssa_263 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_264 = iadd ssa_263, ssa_256 vec1 32 ssa_265 = intrinsic load_shared (ssa_264) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_266 = ffma ssa_253.x, ssa_265, ssa_243 /* succs: block_22 */ } else { block block_21: /* preds: block_19 */ /* succs: block_22 */ } block block_22: /* preds: block_20 block_21 */ vec1 32 ssa_267 = phi block_20: ssa_258, block_21: ssa_241 vec1 32 ssa_268 = phi block_20: ssa_262, block_21: ssa_242 vec1 32 ssa_269 = phi block_20: ssa_266, block_21: ssa_243 vec1 32 ssa_270 = phi block_20: ssa_254, block_21: ssa_244 vec1 32 ssa_271 = iadd ssa_163, ssa_20 vec1 32 ssa_272 = ffma ssa_125, ssa_125, ssa_220 vec1 32 ssa_273 = fsqrt ssa_272 vec1 32 ssa_274 = flt32 ssa_273, ssa_13 /* succs: block_23 block_24 */ if ssa_274 { block block_23: /* preds: block_22 */ vec1 32 ssa_275 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_276 = fmul ssa_273, ssa_275 vec1 32 ssa_277 = flrp ssa_14, ssa_15, ssa_276 vec4 32 ssa_278 = tex ssa_277 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_279 = fadd ssa_270, ssa_278.x vec1 32 ssa_280 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_281 = ishl ssa_271, ssa_280 vec1 32 ssa_282 = intrinsic load_shared (ssa_281) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_283 = ffma ssa_278.x, ssa_282, ssa_267 vec1 32 ssa_284 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_285 = iadd ssa_284, ssa_281 vec1 32 ssa_286 = intrinsic load_shared (ssa_285) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_287 = ffma ssa_278.x, ssa_286, ssa_268 vec1 32 ssa_288 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_289 = iadd ssa_288, ssa_281 vec1 32 ssa_290 = intrinsic load_shared (ssa_289) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_291 = ffma ssa_278.x, ssa_290, ssa_269 /* succs: block_25 */ } else { block block_24: /* preds: block_22 */ /* succs: block_25 */ } block block_25: /* preds: block_23 block_24 */ vec1 32 ssa_292 = phi block_23: ssa_283, block_24: ssa_267 vec1 32 ssa_293 = phi block_23: ssa_287, block_24: ssa_268 vec1 32 ssa_294 = phi block_23: ssa_291, block_24: ssa_269 vec1 32 ssa_295 = phi block_23: ssa_279, block_24: ssa_270 vec1 32 ssa_296 = iadd ssa_163, ssa_21 vec1 32 ssa_297 = ffma ssa_192, ssa_192, ssa_220 vec1 32 ssa_298 = fsqrt ssa_297 vec1 32 ssa_299 = flt32 ssa_298, ssa_13 /* succs: block_26 block_27 */ if ssa_299 { block block_26: /* preds: block_25 */ vec1 32 ssa_300 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_301 = fmul ssa_298, ssa_300 vec1 32 ssa_302 = flrp ssa_14, ssa_15, ssa_301 vec4 32 ssa_303 = tex ssa_302 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_304 = fadd ssa_295, ssa_303.x vec1 32 ssa_305 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_306 = ishl ssa_296, ssa_305 vec1 32 ssa_307 = intrinsic load_shared (ssa_306) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_308 = ffma ssa_303.x, ssa_307, ssa_292 vec1 32 ssa_309 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_310 = iadd ssa_309, ssa_306 vec1 32 ssa_311 = intrinsic load_shared (ssa_310) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_312 = ffma ssa_303.x, ssa_311, ssa_293 vec1 32 ssa_313 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_314 = iadd ssa_313, ssa_306 vec1 32 ssa_315 = intrinsic load_shared (ssa_314) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_316 = ffma ssa_303.x, ssa_315, ssa_294 /* succs: block_28 */ } else { block block_27: /* preds: block_25 */ /* succs: block_28 */ } block block_28: /* preds: block_26 block_27 */ vec1 32 ssa_317 = phi block_26: ssa_308, block_27: ssa_292 vec1 32 ssa_318 = phi block_26: ssa_312, block_27: ssa_293 vec1 32 ssa_319 = phi block_26: ssa_316, block_27: ssa_294 vec1 32 ssa_320 = phi block_26: ssa_304, block_27: ssa_295 vec1 32 ssa_321 = iadd ssa_163, ssa_22 vec1 32 ssa_322 = fadd ssa_23, -ssa_125 vec1 32 ssa_323 = ffma ssa_322, ssa_322, ssa_220 vec1 32 ssa_324 = fsqrt ssa_323 vec1 32 ssa_325 = flt32 ssa_324, ssa_13 /* succs: block_29 block_30 */ if ssa_325 { block block_29: /* preds: block_28 */ vec1 32 ssa_326 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_327 = fmul ssa_324, ssa_326 vec1 32 ssa_328 = flrp ssa_14, ssa_15, ssa_327 vec4 32 ssa_329 = tex ssa_328 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_330 = fadd ssa_320, ssa_329.x vec1 32 ssa_331 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_332 = ishl ssa_321, ssa_331 vec1 32 ssa_333 = intrinsic load_shared (ssa_332) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_334 = ffma ssa_329.x, ssa_333, ssa_317 vec1 32 ssa_335 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_336 = iadd ssa_335, ssa_332 vec1 32 ssa_337 = intrinsic load_shared (ssa_336) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_338 = ffma ssa_329.x, ssa_337, ssa_318 vec1 32 ssa_339 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_340 = iadd ssa_339, ssa_332 vec1 32 ssa_341 = intrinsic load_shared (ssa_340) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_342 = ffma ssa_329.x, ssa_341, ssa_319 /* succs: block_31 */ } else { block block_30: /* preds: block_28 */ /* succs: block_31 */ } block block_31: /* preds: block_29 block_30 */ vec1 32 ssa_343 = phi block_29: ssa_334, block_30: ssa_317 vec1 32 ssa_344 = phi block_29: ssa_338, block_30: ssa_318 vec1 32 ssa_345 = phi block_29: ssa_342, block_30: ssa_319 vec1 32 ssa_346 = phi block_29: ssa_330, block_30: ssa_320 vec1 32 ssa_347 = iadd ssa_163, ssa_24 vec1 32 ssa_348 = fadd ssa_25, -ssa_125 vec1 32 ssa_349 = ffma ssa_348, ssa_348, ssa_220 vec1 32 ssa_350 = fsqrt ssa_349 vec1 32 ssa_351 = flt32 ssa_350, ssa_13 /* succs: block_32 block_33 */ if ssa_351 { block block_32: /* preds: block_31 */ vec1 32 ssa_352 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_353 = fmul ssa_350, ssa_352 vec1 32 ssa_354 = flrp ssa_14, ssa_15, ssa_353 vec4 32 ssa_355 = tex ssa_354 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_356 = fadd ssa_346, ssa_355.x vec1 32 ssa_357 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_358 = ishl ssa_347, ssa_357 vec1 32 ssa_359 = intrinsic load_shared (ssa_358) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_360 = ffma ssa_355.x, ssa_359, ssa_343 vec1 32 ssa_361 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_362 = iadd ssa_361, ssa_358 vec1 32 ssa_363 = intrinsic load_shared (ssa_362) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_364 = ffma ssa_355.x, ssa_363, ssa_344 vec1 32 ssa_365 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_366 = iadd ssa_365, ssa_358 vec1 32 ssa_367 = intrinsic load_shared (ssa_366) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_368 = ffma ssa_355.x, ssa_367, ssa_345 /* succs: block_34 */ } else { block block_33: /* preds: block_31 */ /* succs: block_34 */ } block block_34: /* preds: block_32 block_33 */ vec1 32 ssa_369 = phi block_32: ssa_360, block_33: ssa_343 vec1 32 ssa_370 = phi block_32: ssa_364, block_33: ssa_344 vec1 32 ssa_371 = phi block_32: ssa_368, block_33: ssa_345 vec1 32 ssa_372 = phi block_32: ssa_356, block_33: ssa_346 vec1 32 ssa_373 = iadd ssa_163, ssa_26 vec1 32 ssa_374 = fadd ssa_19, -ssa_126 vec1 32 ssa_375 = fmul ssa_374, ssa_374 vec1 32 ssa_376 = ffma ssa_218, ssa_218, ssa_375 vec1 32 ssa_377 = fsqrt ssa_376 vec1 32 ssa_378 = flt32 ssa_377, ssa_13 /* succs: block_35 block_36 */ if ssa_378 { block block_35: /* preds: block_34 */ vec1 32 ssa_379 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_380 = fmul ssa_377, ssa_379 vec1 32 ssa_381 = flrp ssa_14, ssa_15, ssa_380 vec4 32 ssa_382 = tex ssa_381 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_383 = fadd ssa_372, ssa_382.x vec1 32 ssa_384 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_385 = ishl ssa_373, ssa_384 vec1 32 ssa_386 = intrinsic load_shared (ssa_385) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_387 = ffma ssa_382.x, ssa_386, ssa_369 vec1 32 ssa_388 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_389 = iadd ssa_388, ssa_385 vec1 32 ssa_390 = intrinsic load_shared (ssa_389) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_391 = ffma ssa_382.x, ssa_390, ssa_370 vec1 32 ssa_392 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_393 = iadd ssa_392, ssa_385 vec1 32 ssa_394 = intrinsic load_shared (ssa_393) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_395 = ffma ssa_382.x, ssa_394, ssa_371 /* succs: block_37 */ } else { block block_36: /* preds: block_34 */ /* succs: block_37 */ } block block_37: /* preds: block_35 block_36 */ vec1 32 ssa_396 = phi block_35: ssa_387, block_36: ssa_369 vec1 32 ssa_397 = phi block_35: ssa_391, block_36: ssa_370 vec1 32 ssa_398 = phi block_35: ssa_395, block_36: ssa_371 vec1 32 ssa_399 = phi block_35: ssa_383, block_36: ssa_372 vec1 32 ssa_400 = iadd ssa_163, ssa_27 vec1 32 ssa_401 = ffma ssa_246, ssa_246, ssa_375 vec1 32 ssa_402 = fsqrt ssa_401 vec1 32 ssa_403 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_404 = fmul ssa_402, ssa_403 vec1 32 ssa_405 = flrp ssa_14, ssa_15, ssa_404 vec4 32 ssa_406 = tex ssa_405 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_407 = fadd ssa_399, ssa_406.x vec1 32 ssa_408 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_409 = ishl ssa_400, ssa_408 vec1 32 ssa_410 = intrinsic load_shared (ssa_409) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_411 = ffma ssa_406.x, ssa_410, ssa_396 vec1 32 ssa_412 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_413 = iadd ssa_412, ssa_409 vec1 32 ssa_414 = intrinsic load_shared (ssa_413) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_415 = ffma ssa_406.x, ssa_414, ssa_397 vec1 32 ssa_416 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_417 = iadd ssa_416, ssa_409 vec1 32 ssa_418 = intrinsic load_shared (ssa_417) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_419 = ffma ssa_406.x, ssa_418, ssa_398 vec1 32 ssa_420 = iadd ssa_163, ssa_28 vec1 32 ssa_421 = ffma ssa_125, ssa_125, ssa_375 vec1 32 ssa_422 = fsqrt ssa_421 vec1 32 ssa_423 = fmul ssa_422, ssa_403 vec1 32 ssa_424 = flrp ssa_14, ssa_15, ssa_423 vec4 32 ssa_425 = tex ssa_424 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_426 = fadd ssa_407, ssa_425.x vec1 32 ssa_427 = ishl ssa_420, ssa_408 vec1 32 ssa_428 = intrinsic load_shared (ssa_427) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_429 = ffma ssa_425.x, ssa_428, ssa_411 vec1 32 ssa_430 = iadd ssa_412, ssa_427 vec1 32 ssa_431 = intrinsic load_shared (ssa_430) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_432 = ffma ssa_425.x, ssa_431, ssa_415 vec1 32 ssa_433 = iadd ssa_416, ssa_427 vec1 32 ssa_434 = intrinsic load_shared (ssa_433) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_435 = ffma ssa_425.x, ssa_434, ssa_419 vec1 32 ssa_436 = iadd ssa_163, ssa_1 vec1 32 ssa_437 = ffma ssa_192, ssa_192, ssa_375 vec1 32 ssa_438 = fsqrt ssa_437 vec1 32 ssa_439 = fmul ssa_438, ssa_403 vec1 32 ssa_440 = flrp ssa_14, ssa_15, ssa_439 vec4 32 ssa_441 = tex ssa_440 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_442 = fadd ssa_426, ssa_441.x vec1 32 ssa_443 = ishl ssa_436, ssa_408 vec1 32 ssa_444 = intrinsic load_shared (ssa_443) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_445 = ffma ssa_441.x, ssa_444, ssa_429 vec1 32 ssa_446 = iadd ssa_412, ssa_443 vec1 32 ssa_447 = intrinsic load_shared (ssa_446) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_448 = ffma ssa_441.x, ssa_447, ssa_432 vec1 32 ssa_449 = iadd ssa_416, ssa_443 vec1 32 ssa_450 = intrinsic load_shared (ssa_449) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_451 = ffma ssa_441.x, ssa_450, ssa_435 vec1 32 ssa_452 = iadd ssa_163, ssa_29 vec1 32 ssa_453 = ffma ssa_322, ssa_322, ssa_375 vec1 32 ssa_454 = fsqrt ssa_453 vec1 32 ssa_455 = fmul ssa_454, ssa_403 vec1 32 ssa_456 = flrp ssa_14, ssa_15, ssa_455 vec4 32 ssa_457 = tex ssa_456 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_458 = fadd ssa_442, ssa_457.x vec1 32 ssa_459 = ishl ssa_452, ssa_408 vec1 32 ssa_460 = intrinsic load_shared (ssa_459) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_461 = ffma ssa_457.x, ssa_460, ssa_445 vec1 32 ssa_462 = iadd ssa_412, ssa_459 vec1 32 ssa_463 = intrinsic load_shared (ssa_462) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_464 = ffma ssa_457.x, ssa_463, ssa_448 vec1 32 ssa_465 = iadd ssa_416, ssa_459 vec1 32 ssa_466 = intrinsic load_shared (ssa_465) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_467 = ffma ssa_457.x, ssa_466, ssa_451 vec1 32 ssa_468 = iadd ssa_163, ssa_30 vec1 32 ssa_469 = ffma ssa_348, ssa_348, ssa_375 vec1 32 ssa_470 = fsqrt ssa_469 vec1 32 ssa_471 = flt32 ssa_470, ssa_13 /* succs: block_38 block_39 */ if ssa_471 { block block_38: /* preds: block_37 */ vec1 32 ssa_472 = fmul ssa_470, ssa_403 vec1 32 ssa_473 = flrp ssa_14, ssa_15, ssa_472 vec4 32 ssa_474 = tex ssa_473 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_475 = fadd ssa_458, ssa_474.x vec1 32 ssa_476 = ishl ssa_468, ssa_408 vec1 32 ssa_477 = intrinsic load_shared (ssa_476) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_478 = ffma ssa_474.x, ssa_477, ssa_461 vec1 32 ssa_479 = iadd ssa_412, ssa_476 vec1 32 ssa_480 = intrinsic load_shared (ssa_479) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_481 = ffma ssa_474.x, ssa_480, ssa_464 vec1 32 ssa_482 = iadd ssa_416, ssa_476 vec1 32 ssa_483 = intrinsic load_shared (ssa_482) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_484 = ffma ssa_474.x, ssa_483, ssa_467 /* succs: block_40 */ } else { block block_39: /* preds: block_37 */ /* succs: block_40 */ } block block_40: /* preds: block_38 block_39 */ vec1 32 ssa_485 = phi block_38: ssa_478, block_39: ssa_461 vec1 32 ssa_486 = phi block_38: ssa_481, block_39: ssa_464 vec1 32 ssa_487 = phi block_38: ssa_484, block_39: ssa_467 vec1 32 ssa_488 = phi block_38: ssa_475, block_39: ssa_458 vec1 32 ssa_489 = iadd ssa_163, ssa_31 vec1 32 ssa_490 = fadd ssa_12, -ssa_125 vec1 32 ssa_491 = fmul ssa_126, ssa_126 vec1 32 ssa_492 = ffma ssa_490, ssa_490, ssa_491 vec1 32 ssa_493 = fsqrt ssa_492 vec1 32 ssa_494 = flt32 ssa_493, ssa_13 /* succs: block_41 block_42 */ if ssa_494 { block block_41: /* preds: block_40 */ vec1 32 ssa_495 = fmul ssa_493, ssa_403 vec1 32 ssa_496 = flrp ssa_14, ssa_15, ssa_495 vec4 32 ssa_497 = tex ssa_496 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_498 = fadd ssa_488, ssa_497.x vec1 32 ssa_499 = ishl ssa_489, ssa_408 vec1 32 ssa_500 = intrinsic load_shared (ssa_499) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_501 = ffma ssa_497.x, ssa_500, ssa_485 vec1 32 ssa_502 = iadd ssa_412, ssa_499 vec1 32 ssa_503 = intrinsic load_shared (ssa_502) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_504 = ffma ssa_497.x, ssa_503, ssa_486 vec1 32 ssa_505 = iadd ssa_416, ssa_499 vec1 32 ssa_506 = intrinsic load_shared (ssa_505) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_507 = ffma ssa_497.x, ssa_506, ssa_487 /* succs: block_43 */ } else { block block_42: /* preds: block_40 */ /* succs: block_43 */ } block block_43: /* preds: block_41 block_42 */ vec1 32 ssa_508 = phi block_41: ssa_501, block_42: ssa_485 vec1 32 ssa_509 = phi block_41: ssa_504, block_42: ssa_486 vec1 32 ssa_510 = phi block_41: ssa_507, block_42: ssa_487 vec1 32 ssa_511 = phi block_41: ssa_498, block_42: ssa_488 vec1 32 ssa_512 = iadd ssa_163, ssa_32 vec1 32 ssa_513 = ffma ssa_218, ssa_218, ssa_491 vec1 32 ssa_514 = fsqrt ssa_513 vec1 32 ssa_515 = flt32 ssa_514, ssa_13 /* succs: block_44 block_45 */ if ssa_515 { block block_44: /* preds: block_43 */ vec1 32 ssa_516 = fmul ssa_514, ssa_403 vec1 32 ssa_517 = flrp ssa_14, ssa_15, ssa_516 vec4 32 ssa_518 = tex ssa_517 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_519 = fadd ssa_511, ssa_518.x vec1 32 ssa_520 = ishl ssa_512, ssa_408 vec1 32 ssa_521 = intrinsic load_shared (ssa_520) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_522 = ffma ssa_518.x, ssa_521, ssa_508 vec1 32 ssa_523 = iadd ssa_412, ssa_520 vec1 32 ssa_524 = intrinsic load_shared (ssa_523) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_525 = ffma ssa_518.x, ssa_524, ssa_509 vec1 32 ssa_526 = iadd ssa_416, ssa_520 vec1 32 ssa_527 = intrinsic load_shared (ssa_526) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_528 = ffma ssa_518.x, ssa_527, ssa_510 /* succs: block_46 */ } else { block block_45: /* preds: block_43 */ /* succs: block_46 */ } block block_46: /* preds: block_44 block_45 */ vec1 32 ssa_529 = phi block_44: ssa_522, block_45: ssa_508 vec1 32 ssa_530 = phi block_44: ssa_525, block_45: ssa_509 vec1 32 ssa_531 = phi block_44: ssa_528, block_45: ssa_510 vec1 32 ssa_532 = phi block_44: ssa_519, block_45: ssa_511 vec1 32 ssa_533 = iadd ssa_163, ssa_33 vec1 32 ssa_534 = ffma ssa_246, ssa_246, ssa_491 vec1 32 ssa_535 = fsqrt ssa_534 vec1 32 ssa_536 = fmul ssa_535, ssa_403 vec1 32 ssa_537 = flrp ssa_14, ssa_15, ssa_536 vec4 32 ssa_538 = tex ssa_537 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_539 = fadd ssa_532, ssa_538.x vec1 32 ssa_540 = ishl ssa_533, ssa_408 vec1 32 ssa_541 = intrinsic load_shared (ssa_540) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_542 = ffma ssa_538.x, ssa_541, ssa_529 vec1 32 ssa_543 = iadd ssa_412, ssa_540 vec1 32 ssa_544 = intrinsic load_shared (ssa_543) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_545 = ffma ssa_538.x, ssa_544, ssa_530 vec1 32 ssa_546 = iadd ssa_416, ssa_540 vec1 32 ssa_547 = intrinsic load_shared (ssa_546) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_548 = ffma ssa_538.x, ssa_547, ssa_531 vec1 32 ssa_549 = iadd ssa_163, ssa_34 vec1 32 ssa_550 = ffma ssa_125, ssa_125, ssa_491 vec1 32 ssa_551 = fsqrt ssa_550 vec1 32 ssa_552 = fmul ssa_551, ssa_403 vec1 32 ssa_553 = flrp ssa_14, ssa_15, ssa_552 vec4 32 ssa_554 = tex ssa_553 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_555 = fadd ssa_539, ssa_554.x vec1 32 ssa_556 = ishl ssa_549, ssa_408 vec1 32 ssa_557 = intrinsic load_shared (ssa_556) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_558 = ffma ssa_554.x, ssa_557, ssa_542 vec1 32 ssa_559 = iadd ssa_412, ssa_556 vec1 32 ssa_560 = intrinsic load_shared (ssa_559) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_561 = ffma ssa_554.x, ssa_560, ssa_545 vec1 32 ssa_562 = iadd ssa_416, ssa_556 vec1 32 ssa_563 = intrinsic load_shared (ssa_562) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_564 = ffma ssa_554.x, ssa_563, ssa_548 vec1 32 ssa_565 = iadd ssa_163, ssa_35 vec1 32 ssa_566 = ffma ssa_192, ssa_192, ssa_491 vec1 32 ssa_567 = fsqrt ssa_566 vec1 32 ssa_568 = fmul ssa_567, ssa_403 vec1 32 ssa_569 = flrp ssa_14, ssa_15, ssa_568 vec4 32 ssa_570 = tex ssa_569 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_571 = fadd ssa_555, ssa_570.x vec1 32 ssa_572 = ishl ssa_565, ssa_408 vec1 32 ssa_573 = intrinsic load_shared (ssa_572) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_574 = ffma ssa_570.x, ssa_573, ssa_558 vec1 32 ssa_575 = iadd ssa_412, ssa_572 vec1 32 ssa_576 = intrinsic load_shared (ssa_575) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_577 = ffma ssa_570.x, ssa_576, ssa_561 vec1 32 ssa_578 = iadd ssa_416, ssa_572 vec1 32 ssa_579 = intrinsic load_shared (ssa_578) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_580 = ffma ssa_570.x, ssa_579, ssa_564 vec1 32 ssa_581 = iadd ssa_163, ssa_36 vec1 32 ssa_582 = ffma ssa_322, ssa_322, ssa_491 vec1 32 ssa_583 = fsqrt ssa_582 vec1 32 ssa_584 = fmul ssa_583, ssa_403 vec1 32 ssa_585 = flrp ssa_14, ssa_15, ssa_584 vec4 32 ssa_586 = tex ssa_585 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_587 = fadd ssa_571, ssa_586.x vec1 32 ssa_588 = ishl ssa_581, ssa_408 vec1 32 ssa_589 = intrinsic load_shared (ssa_588) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_590 = ffma ssa_586.x, ssa_589, ssa_574 vec1 32 ssa_591 = iadd ssa_412, ssa_588 vec1 32 ssa_592 = intrinsic load_shared (ssa_591) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_593 = ffma ssa_586.x, ssa_592, ssa_577 vec1 32 ssa_594 = iadd ssa_416, ssa_588 vec1 32 ssa_595 = intrinsic load_shared (ssa_594) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_596 = ffma ssa_586.x, ssa_595, ssa_580 vec1 32 ssa_597 = iadd ssa_163, ssa_2 vec1 32 ssa_598 = ffma ssa_348, ssa_348, ssa_491 vec1 32 ssa_599 = fsqrt ssa_598 vec1 32 ssa_600 = flt32 ssa_599, ssa_13 /* succs: block_47 block_48 */ if ssa_600 { block block_47: /* preds: block_46 */ vec1 32 ssa_601 = fmul ssa_599, ssa_403 vec1 32 ssa_602 = flrp ssa_14, ssa_15, ssa_601 vec4 32 ssa_603 = tex ssa_602 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_604 = fadd ssa_587, ssa_603.x vec1 32 ssa_605 = ishl ssa_597, ssa_408 vec1 32 ssa_606 = intrinsic load_shared (ssa_605) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_607 = ffma ssa_603.x, ssa_606, ssa_590 vec1 32 ssa_608 = iadd ssa_412, ssa_605 vec1 32 ssa_609 = intrinsic load_shared (ssa_608) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_610 = ffma ssa_603.x, ssa_609, ssa_593 vec1 32 ssa_611 = iadd ssa_416, ssa_605 vec1 32 ssa_612 = intrinsic load_shared (ssa_611) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_613 = ffma ssa_603.x, ssa_612, ssa_596 /* succs: block_49 */ } else { block block_48: /* preds: block_46 */ /* succs: block_49 */ } block block_49: /* preds: block_47 block_48 */ vec1 32 ssa_614 = phi block_47: ssa_607, block_48: ssa_590 vec1 32 ssa_615 = phi block_47: ssa_610, block_48: ssa_593 vec1 32 ssa_616 = phi block_47: ssa_613, block_48: ssa_596 vec1 32 ssa_617 = phi block_47: ssa_604, block_48: ssa_587 vec1 32 ssa_618 = iadd ssa_163, ssa_37 vec1 32 ssa_619 = fadd ssa_38, -ssa_125 vec1 32 ssa_620 = ffma ssa_619, ssa_619, ssa_491 vec1 32 ssa_621 = fsqrt ssa_620 vec1 32 ssa_622 = flt32 ssa_621, ssa_13 /* succs: block_50 block_51 */ if ssa_622 { block block_50: /* preds: block_49 */ vec1 32 ssa_623 = fmul ssa_621, ssa_403 vec1 32 ssa_624 = flrp ssa_14, ssa_15, ssa_623 vec4 32 ssa_625 = tex ssa_624 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_626 = fadd ssa_617, ssa_625.x vec1 32 ssa_627 = ishl ssa_618, ssa_408 vec1 32 ssa_628 = intrinsic load_shared (ssa_627) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_629 = ffma ssa_625.x, ssa_628, ssa_614 vec1 32 ssa_630 = iadd ssa_412, ssa_627 vec1 32 ssa_631 = intrinsic load_shared (ssa_630) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_632 = ffma ssa_625.x, ssa_631, ssa_615 vec1 32 ssa_633 = iadd ssa_416, ssa_627 vec1 32 ssa_634 = intrinsic load_shared (ssa_633) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_635 = ffma ssa_625.x, ssa_634, ssa_616 /* succs: block_52 */ } else { block block_51: /* preds: block_49 */ /* succs: block_52 */ } block block_52: /* preds: block_50 block_51 */ vec1 32 ssa_636 = phi block_50: ssa_629, block_51: ssa_614 vec1 32 ssa_637 = phi block_50: ssa_632, block_51: ssa_615 vec1 32 ssa_638 = phi block_50: ssa_635, block_51: ssa_616 vec1 32 ssa_639 = phi block_50: ssa_626, block_51: ssa_617 vec1 32 ssa_640 = iadd ssa_163, ssa_39 vec1 32 ssa_641 = fadd ssa_16, -ssa_126 vec1 32 ssa_642 = fmul ssa_641, ssa_641 vec1 32 ssa_643 = ffma ssa_490, ssa_490, ssa_642 vec1 32 ssa_644 = fsqrt ssa_643 vec1 32 ssa_645 = flt32 ssa_644, ssa_13 /* succs: block_53 block_54 */ if ssa_645 { block block_53: /* preds: block_52 */ vec1 32 ssa_646 = fmul ssa_644, ssa_403 vec1 32 ssa_647 = flrp ssa_14, ssa_15, ssa_646 vec4 32 ssa_648 = tex ssa_647 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_649 = fadd ssa_639, ssa_648.x vec1 32 ssa_650 = ishl ssa_640, ssa_408 vec1 32 ssa_651 = intrinsic load_shared (ssa_650) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_652 = ffma ssa_648.x, ssa_651, ssa_636 vec1 32 ssa_653 = iadd ssa_412, ssa_650 vec1 32 ssa_654 = intrinsic load_shared (ssa_653) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_655 = ffma ssa_648.x, ssa_654, ssa_637 vec1 32 ssa_656 = iadd ssa_416, ssa_650 vec1 32 ssa_657 = intrinsic load_shared (ssa_656) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_658 = ffma ssa_648.x, ssa_657, ssa_638 /* succs: block_55 */ } else { block block_54: /* preds: block_52 */ /* succs: block_55 */ } block block_55: /* preds: block_53 block_54 */ vec1 32 ssa_659 = phi block_53: ssa_652, block_54: ssa_636 vec1 32 ssa_660 = phi block_53: ssa_655, block_54: ssa_637 vec1 32 ssa_661 = phi block_53: ssa_658, block_54: ssa_638 vec1 32 ssa_662 = phi block_53: ssa_649, block_54: ssa_639 vec1 32 ssa_663 = iadd ssa_163, ssa_40 vec1 32 ssa_664 = ffma ssa_218, ssa_218, ssa_642 vec1 32 ssa_665 = fsqrt ssa_664 vec1 32 ssa_666 = flt32 ssa_665, ssa_13 /* succs: block_56 block_57 */ if ssa_666 { block block_56: /* preds: block_55 */ vec1 32 ssa_667 = fmul ssa_665, ssa_403 vec1 32 ssa_668 = flrp ssa_14, ssa_15, ssa_667 vec4 32 ssa_669 = tex ssa_668 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_670 = fadd ssa_662, ssa_669.x vec1 32 ssa_671 = ishl ssa_663, ssa_408 vec1 32 ssa_672 = intrinsic load_shared (ssa_671) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_673 = ffma ssa_669.x, ssa_672, ssa_659 vec1 32 ssa_674 = iadd ssa_412, ssa_671 vec1 32 ssa_675 = intrinsic load_shared (ssa_674) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_676 = ffma ssa_669.x, ssa_675, ssa_660 vec1 32 ssa_677 = iadd ssa_416, ssa_671 vec1 32 ssa_678 = intrinsic load_shared (ssa_677) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_679 = ffma ssa_669.x, ssa_678, ssa_661 /* succs: block_58 */ } else { block block_57: /* preds: block_55 */ /* succs: block_58 */ } block block_58: /* preds: block_56 block_57 */ vec1 32 ssa_680 = phi block_56: ssa_673, block_57: ssa_659 vec1 32 ssa_681 = phi block_56: ssa_676, block_57: ssa_660 vec1 32 ssa_682 = phi block_56: ssa_679, block_57: ssa_661 vec1 32 ssa_683 = phi block_56: ssa_670, block_57: ssa_662 vec1 32 ssa_684 = iadd ssa_163, ssa_41 vec1 32 ssa_685 = ffma ssa_246, ssa_246, ssa_642 vec1 32 ssa_686 = fsqrt ssa_685 vec1 32 ssa_687 = fmul ssa_686, ssa_403 vec1 32 ssa_688 = flrp ssa_14, ssa_15, ssa_687 vec4 32 ssa_689 = tex ssa_688 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_690 = fadd ssa_683, ssa_689.x vec1 32 ssa_691 = ishl ssa_684, ssa_408 vec1 32 ssa_692 = intrinsic load_shared (ssa_691) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_693 = ffma ssa_689.x, ssa_692, ssa_680 vec1 32 ssa_694 = iadd ssa_412, ssa_691 vec1 32 ssa_695 = intrinsic load_shared (ssa_694) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_696 = ffma ssa_689.x, ssa_695, ssa_681 vec1 32 ssa_697 = iadd ssa_416, ssa_691 vec1 32 ssa_698 = intrinsic load_shared (ssa_697) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_699 = ffma ssa_689.x, ssa_698, ssa_682 vec1 32 ssa_700 = iadd ssa_163, ssa_42 vec1 32 ssa_701 = ffma ssa_125, ssa_125, ssa_642 vec1 32 ssa_702 = fsqrt ssa_701 vec1 32 ssa_703 = fmul ssa_702, ssa_403 vec1 32 ssa_704 = flrp ssa_14, ssa_15, ssa_703 vec4 32 ssa_705 = tex ssa_704 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_706 = fadd ssa_690, ssa_705.x vec1 32 ssa_707 = ishl ssa_700, ssa_408 vec1 32 ssa_708 = intrinsic load_shared (ssa_707) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_709 = ffma ssa_705.x, ssa_708, ssa_693 vec1 32 ssa_710 = iadd ssa_412, ssa_707 vec1 32 ssa_711 = intrinsic load_shared (ssa_710) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_712 = ffma ssa_705.x, ssa_711, ssa_696 vec1 32 ssa_713 = iadd ssa_416, ssa_707 vec1 32 ssa_714 = intrinsic load_shared (ssa_713) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_715 = ffma ssa_705.x, ssa_714, ssa_699 vec1 32 ssa_716 = iadd ssa_163, ssa_43 vec1 32 ssa_717 = ffma ssa_192, ssa_192, ssa_642 vec1 32 ssa_718 = fsqrt ssa_717 vec1 32 ssa_719 = fmul ssa_718, ssa_403 vec1 32 ssa_720 = flrp ssa_14, ssa_15, ssa_719 vec4 32 ssa_721 = tex ssa_720 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_722 = fadd ssa_706, ssa_721.x vec1 32 ssa_723 = ishl ssa_716, ssa_408 vec1 32 ssa_724 = intrinsic load_shared (ssa_723) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_725 = ffma ssa_721.x, ssa_724, ssa_709 vec1 32 ssa_726 = iadd ssa_412, ssa_723 vec1 32 ssa_727 = intrinsic load_shared (ssa_726) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_728 = ffma ssa_721.x, ssa_727, ssa_712 vec1 32 ssa_729 = iadd ssa_416, ssa_723 vec1 32 ssa_730 = intrinsic load_shared (ssa_729) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_731 = ffma ssa_721.x, ssa_730, ssa_715 vec1 32 ssa_732 = iadd ssa_163, ssa_44 vec1 32 ssa_733 = ffma ssa_322, ssa_322, ssa_642 vec1 32 ssa_734 = fsqrt ssa_733 vec1 32 ssa_735 = fmul ssa_734, ssa_403 vec1 32 ssa_736 = flrp ssa_14, ssa_15, ssa_735 vec4 32 ssa_737 = tex ssa_736 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_738 = fadd ssa_722, ssa_737.x vec1 32 ssa_739 = ishl ssa_732, ssa_408 vec1 32 ssa_740 = intrinsic load_shared (ssa_739) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_741 = ffma ssa_737.x, ssa_740, ssa_725 vec1 32 ssa_742 = iadd ssa_412, ssa_739 vec1 32 ssa_743 = intrinsic load_shared (ssa_742) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_744 = ffma ssa_737.x, ssa_743, ssa_728 vec1 32 ssa_745 = iadd ssa_416, ssa_739 vec1 32 ssa_746 = intrinsic load_shared (ssa_745) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_747 = ffma ssa_737.x, ssa_746, ssa_731 vec1 32 ssa_748 = iadd ssa_163, ssa_45 vec1 32 ssa_749 = ffma ssa_348, ssa_348, ssa_642 vec1 32 ssa_750 = fsqrt ssa_749 vec1 32 ssa_751 = flt32 ssa_750, ssa_13 /* succs: block_59 block_60 */ if ssa_751 { block block_59: /* preds: block_58 */ vec1 32 ssa_752 = fmul ssa_750, ssa_403 vec1 32 ssa_753 = flrp ssa_14, ssa_15, ssa_752 vec4 32 ssa_754 = tex ssa_753 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_755 = fadd ssa_738, ssa_754.x vec1 32 ssa_756 = ishl ssa_748, ssa_408 vec1 32 ssa_757 = intrinsic load_shared (ssa_756) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_758 = ffma ssa_754.x, ssa_757, ssa_741 vec1 32 ssa_759 = iadd ssa_412, ssa_756 vec1 32 ssa_760 = intrinsic load_shared (ssa_759) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_761 = ffma ssa_754.x, ssa_760, ssa_744 vec1 32 ssa_762 = iadd ssa_416, ssa_756 vec1 32 ssa_763 = intrinsic load_shared (ssa_762) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_764 = ffma ssa_754.x, ssa_763, ssa_747 /* succs: block_61 */ } else { block block_60: /* preds: block_58 */ /* succs: block_61 */ } block block_61: /* preds: block_59 block_60 */ vec1 32 ssa_765 = phi block_59: ssa_758, block_60: ssa_741 vec1 32 ssa_766 = phi block_59: ssa_761, block_60: ssa_744 vec1 32 ssa_767 = phi block_59: ssa_764, block_60: ssa_747 vec1 32 ssa_768 = phi block_59: ssa_755, block_60: ssa_738 vec1 32 ssa_769 = iadd ssa_163, ssa_46 vec1 32 ssa_770 = ffma ssa_619, ssa_619, ssa_642 vec1 32 ssa_771 = fsqrt ssa_770 vec1 32 ssa_772 = flt32 ssa_771, ssa_13 /* succs: block_62 block_63 */ if ssa_772 { block block_62: /* preds: block_61 */ vec1 32 ssa_773 = fmul ssa_771, ssa_403 vec1 32 ssa_774 = flrp ssa_14, ssa_15, ssa_773 vec4 32 ssa_775 = tex ssa_774 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_776 = fadd ssa_768, ssa_775.x vec1 32 ssa_777 = ishl ssa_769, ssa_408 vec1 32 ssa_778 = intrinsic load_shared (ssa_777) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_779 = ffma ssa_775.x, ssa_778, ssa_765 vec1 32 ssa_780 = iadd ssa_412, ssa_777 vec1 32 ssa_781 = intrinsic load_shared (ssa_780) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_782 = ffma ssa_775.x, ssa_781, ssa_766 vec1 32 ssa_783 = iadd ssa_416, ssa_777 vec1 32 ssa_784 = intrinsic load_shared (ssa_783) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_785 = ffma ssa_775.x, ssa_784, ssa_767 /* succs: block_64 */ } else { block block_63: /* preds: block_61 */ /* succs: block_64 */ } block block_64: /* preds: block_62 block_63 */ vec1 32 ssa_786 = phi block_62: ssa_779, block_63: ssa_765 vec1 32 ssa_787 = phi block_62: ssa_782, block_63: ssa_766 vec1 32 ssa_788 = phi block_62: ssa_785, block_63: ssa_767 vec1 32 ssa_789 = phi block_62: ssa_776, block_63: ssa_768 vec1 32 ssa_790 = iadd ssa_163, ssa_47 vec1 32 ssa_791 = fadd ssa_23, -ssa_126 vec1 32 ssa_792 = fmul ssa_791, ssa_791 vec1 32 ssa_793 = ffma ssa_218, ssa_218, ssa_792 vec1 32 ssa_794 = fsqrt ssa_793 vec1 32 ssa_795 = flt32 ssa_794, ssa_13 /* succs: block_65 block_66 */ if ssa_795 { block block_65: /* preds: block_64 */ vec1 32 ssa_796 = fmul ssa_794, ssa_403 vec1 32 ssa_797 = flrp ssa_14, ssa_15, ssa_796 vec4 32 ssa_798 = tex ssa_797 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_799 = fadd ssa_789, ssa_798.x vec1 32 ssa_800 = ishl ssa_790, ssa_408 vec1 32 ssa_801 = intrinsic load_shared (ssa_800) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_802 = ffma ssa_798.x, ssa_801, ssa_786 vec1 32 ssa_803 = iadd ssa_412, ssa_800 vec1 32 ssa_804 = intrinsic load_shared (ssa_803) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_805 = ffma ssa_798.x, ssa_804, ssa_787 vec1 32 ssa_806 = iadd ssa_416, ssa_800 vec1 32 ssa_807 = intrinsic load_shared (ssa_806) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_808 = ffma ssa_798.x, ssa_807, ssa_788 /* succs: block_67 */ } else { block block_66: /* preds: block_64 */ /* succs: block_67 */ } block block_67: /* preds: block_65 block_66 */ vec1 32 ssa_809 = phi block_65: ssa_802, block_66: ssa_786 vec1 32 ssa_810 = phi block_65: ssa_805, block_66: ssa_787 vec1 32 ssa_811 = phi block_65: ssa_808, block_66: ssa_788 vec1 32 ssa_812 = phi block_65: ssa_799, block_66: ssa_789 vec1 32 ssa_813 = iadd ssa_163, ssa_48 vec1 32 ssa_814 = ffma ssa_246, ssa_246, ssa_792 vec1 32 ssa_815 = fsqrt ssa_814 vec1 32 ssa_816 = fmul ssa_815, ssa_403 vec1 32 ssa_817 = flrp ssa_14, ssa_15, ssa_816 vec4 32 ssa_818 = tex ssa_817 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_819 = fadd ssa_812, ssa_818.x vec1 32 ssa_820 = ishl ssa_813, ssa_408 vec1 32 ssa_821 = intrinsic load_shared (ssa_820) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_822 = ffma ssa_818.x, ssa_821, ssa_809 vec1 32 ssa_823 = iadd ssa_412, ssa_820 vec1 32 ssa_824 = intrinsic load_shared (ssa_823) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_825 = ffma ssa_818.x, ssa_824, ssa_810 vec1 32 ssa_826 = iadd ssa_416, ssa_820 vec1 32 ssa_827 = intrinsic load_shared (ssa_826) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_828 = ffma ssa_818.x, ssa_827, ssa_811 vec1 32 ssa_829 = iadd ssa_163, ssa_49 vec1 32 ssa_830 = ffma ssa_125, ssa_125, ssa_792 vec1 32 ssa_831 = fsqrt ssa_830 vec1 32 ssa_832 = fmul ssa_831, ssa_403 vec1 32 ssa_833 = flrp ssa_14, ssa_15, ssa_832 vec4 32 ssa_834 = tex ssa_833 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_835 = fadd ssa_819, ssa_834.x vec1 32 ssa_836 = ishl ssa_829, ssa_408 vec1 32 ssa_837 = intrinsic load_shared (ssa_836) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_838 = ffma ssa_834.x, ssa_837, ssa_822 vec1 32 ssa_839 = iadd ssa_412, ssa_836 vec1 32 ssa_840 = intrinsic load_shared (ssa_839) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_841 = ffma ssa_834.x, ssa_840, ssa_825 vec1 32 ssa_842 = iadd ssa_416, ssa_836 vec1 32 ssa_843 = intrinsic load_shared (ssa_842) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_844 = ffma ssa_834.x, ssa_843, ssa_828 vec1 32 ssa_845 = iadd ssa_163, ssa_50 vec1 32 ssa_846 = ffma ssa_192, ssa_192, ssa_792 vec1 32 ssa_847 = fsqrt ssa_846 vec1 32 ssa_848 = fmul ssa_847, ssa_403 vec1 32 ssa_849 = flrp ssa_14, ssa_15, ssa_848 vec4 32 ssa_850 = tex ssa_849 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_851 = fadd ssa_835, ssa_850.x vec1 32 ssa_852 = ishl ssa_845, ssa_408 vec1 32 ssa_853 = intrinsic load_shared (ssa_852) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_854 = ffma ssa_850.x, ssa_853, ssa_838 vec1 32 ssa_855 = iadd ssa_412, ssa_852 vec1 32 ssa_856 = intrinsic load_shared (ssa_855) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_857 = ffma ssa_850.x, ssa_856, ssa_841 vec1 32 ssa_858 = iadd ssa_416, ssa_852 vec1 32 ssa_859 = intrinsic load_shared (ssa_858) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_860 = ffma ssa_850.x, ssa_859, ssa_844 vec1 32 ssa_861 = iadd ssa_163, ssa_51 vec1 32 ssa_862 = ffma ssa_322, ssa_322, ssa_792 vec1 32 ssa_863 = fsqrt ssa_862 vec1 32 ssa_864 = fmul ssa_863, ssa_403 vec1 32 ssa_865 = flrp ssa_14, ssa_15, ssa_864 vec4 32 ssa_866 = tex ssa_865 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_867 = fadd ssa_851, ssa_866.x vec1 32 ssa_868 = ishl ssa_861, ssa_408 vec1 32 ssa_869 = intrinsic load_shared (ssa_868) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_870 = ffma ssa_866.x, ssa_869, ssa_854 vec1 32 ssa_871 = iadd ssa_412, ssa_868 vec1 32 ssa_872 = intrinsic load_shared (ssa_871) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_873 = ffma ssa_866.x, ssa_872, ssa_857 vec1 32 ssa_874 = iadd ssa_416, ssa_868 vec1 32 ssa_875 = intrinsic load_shared (ssa_874) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_876 = ffma ssa_866.x, ssa_875, ssa_860 vec1 32 ssa_877 = iadd ssa_163, ssa_52 vec1 32 ssa_878 = ffma ssa_348, ssa_348, ssa_792 vec1 32 ssa_879 = fsqrt ssa_878 vec1 32 ssa_880 = flt32 ssa_879, ssa_13 /* succs: block_68 block_69 */ if ssa_880 { block block_68: /* preds: block_67 */ vec1 32 ssa_881 = fmul ssa_879, ssa_403 vec1 32 ssa_882 = flrp ssa_14, ssa_15, ssa_881 vec4 32 ssa_883 = tex ssa_882 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_884 = fadd ssa_867, ssa_883.x vec1 32 ssa_885 = ishl ssa_877, ssa_408 vec1 32 ssa_886 = intrinsic load_shared (ssa_885) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_887 = ffma ssa_883.x, ssa_886, ssa_870 vec1 32 ssa_888 = iadd ssa_412, ssa_885 vec1 32 ssa_889 = intrinsic load_shared (ssa_888) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_890 = ffma ssa_883.x, ssa_889, ssa_873 vec1 32 ssa_891 = iadd ssa_416, ssa_885 vec1 32 ssa_892 = intrinsic load_shared (ssa_891) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_893 = ffma ssa_883.x, ssa_892, ssa_876 /* succs: block_70 */ } else { block block_69: /* preds: block_67 */ /* succs: block_70 */ } block block_70: /* preds: block_68 block_69 */ vec1 32 ssa_894 = phi block_68: ssa_887, block_69: ssa_870 vec1 32 ssa_895 = phi block_68: ssa_890, block_69: ssa_873 vec1 32 ssa_896 = phi block_68: ssa_893, block_69: ssa_876 vec1 32 ssa_897 = phi block_68: ssa_884, block_69: ssa_867 vec1 32 ssa_898 = iadd ssa_163, ssa_53 vec1 32 ssa_899 = fadd ssa_25, -ssa_126 vec1 32 ssa_900 = fmul ssa_899, ssa_899 vec1 32 ssa_901 = ffma ssa_218, ssa_218, ssa_900 vec1 32 ssa_902 = fsqrt ssa_901 vec1 32 ssa_903 = flt32 ssa_902, ssa_13 /* succs: block_71 block_72 */ if ssa_903 { block block_71: /* preds: block_70 */ vec1 32 ssa_904 = fmul ssa_902, ssa_403 vec1 32 ssa_905 = flrp ssa_14, ssa_15, ssa_904 vec4 32 ssa_906 = tex ssa_905 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_907 = fadd ssa_897, ssa_906.x vec1 32 ssa_908 = ishl ssa_898, ssa_408 vec1 32 ssa_909 = intrinsic load_shared (ssa_908) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_910 = ffma ssa_906.x, ssa_909, ssa_894 vec1 32 ssa_911 = iadd ssa_412, ssa_908 vec1 32 ssa_912 = intrinsic load_shared (ssa_911) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_913 = ffma ssa_906.x, ssa_912, ssa_895 vec1 32 ssa_914 = iadd ssa_416, ssa_908 vec1 32 ssa_915 = intrinsic load_shared (ssa_914) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_916 = ffma ssa_906.x, ssa_915, ssa_896 /* succs: block_73 */ } else { block block_72: /* preds: block_70 */ /* succs: block_73 */ } block block_73: /* preds: block_71 block_72 */ vec1 32 ssa_917 = phi block_71: ssa_910, block_72: ssa_894 vec1 32 ssa_918 = phi block_71: ssa_913, block_72: ssa_895 vec1 32 ssa_919 = phi block_71: ssa_916, block_72: ssa_896 vec1 32 ssa_920 = phi block_71: ssa_907, block_72: ssa_897 vec1 32 ssa_921 = iadd ssa_163, ssa_54 vec1 32 ssa_922 = ffma ssa_246, ssa_246, ssa_900 vec1 32 ssa_923 = fsqrt ssa_922 vec1 32 ssa_924 = flt32 ssa_923, ssa_13 /* succs: block_74 block_75 */ if ssa_924 { block block_74: /* preds: block_73 */ vec1 32 ssa_925 = fmul ssa_923, ssa_403 vec1 32 ssa_926 = flrp ssa_14, ssa_15, ssa_925 vec4 32 ssa_927 = tex ssa_926 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_928 = fadd ssa_920, ssa_927.x vec1 32 ssa_929 = ishl ssa_921, ssa_408 vec1 32 ssa_930 = intrinsic load_shared (ssa_929) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_931 = ffma ssa_927.x, ssa_930, ssa_917 vec1 32 ssa_932 = iadd ssa_412, ssa_929 vec1 32 ssa_933 = intrinsic load_shared (ssa_932) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_934 = ffma ssa_927.x, ssa_933, ssa_918 vec1 32 ssa_935 = iadd ssa_416, ssa_929 vec1 32 ssa_936 = intrinsic load_shared (ssa_935) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_937 = ffma ssa_927.x, ssa_936, ssa_919 /* succs: block_76 */ } else { block block_75: /* preds: block_73 */ /* succs: block_76 */ } block block_76: /* preds: block_74 block_75 */ vec1 32 ssa_938 = phi block_74: ssa_931, block_75: ssa_917 vec1 32 ssa_939 = phi block_74: ssa_934, block_75: ssa_918 vec1 32 ssa_940 = phi block_74: ssa_937, block_75: ssa_919 vec1 32 ssa_941 = phi block_74: ssa_928, block_75: ssa_920 vec1 32 ssa_942 = iadd ssa_163, ssa_55 vec1 32 ssa_943 = ffma ssa_125, ssa_125, ssa_900 vec1 32 ssa_944 = fsqrt ssa_943 vec1 32 ssa_945 = flt32 ssa_944, ssa_13 /* succs: block_77 block_78 */ if ssa_945 { block block_77: /* preds: block_76 */ vec1 32 ssa_946 = fmul ssa_944, ssa_403 vec1 32 ssa_947 = flrp ssa_14, ssa_15, ssa_946 vec4 32 ssa_948 = tex ssa_947 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_949 = fadd ssa_941, ssa_948.x vec1 32 ssa_950 = ishl ssa_942, ssa_408 vec1 32 ssa_951 = intrinsic load_shared (ssa_950) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_952 = ffma ssa_948.x, ssa_951, ssa_938 vec1 32 ssa_953 = iadd ssa_412, ssa_950 vec1 32 ssa_954 = intrinsic load_shared (ssa_953) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_955 = ffma ssa_948.x, ssa_954, ssa_939 vec1 32 ssa_956 = iadd ssa_416, ssa_950 vec1 32 ssa_957 = intrinsic load_shared (ssa_956) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_958 = ffma ssa_948.x, ssa_957, ssa_940 /* succs: block_79 */ } else { block block_78: /* preds: block_76 */ /* succs: block_79 */ } block block_79: /* preds: block_77 block_78 */ vec1 32 ssa_959 = phi block_77: ssa_952, block_78: ssa_938 vec1 32 ssa_960 = phi block_77: ssa_955, block_78: ssa_939 vec1 32 ssa_961 = phi block_77: ssa_958, block_78: ssa_940 vec1 32 ssa_962 = phi block_77: ssa_949, block_78: ssa_941 vec1 32 ssa_963 = iadd ssa_163, ssa_56 vec1 32 ssa_964 = ffma ssa_192, ssa_192, ssa_900 vec1 32 ssa_965 = fsqrt ssa_964 vec1 32 ssa_966 = flt32 ssa_965, ssa_13 /* succs: block_80 block_81 */ if ssa_966 { block block_80: /* preds: block_79 */ vec1 32 ssa_967 = fmul ssa_965, ssa_403 vec1 32 ssa_968 = flrp ssa_14, ssa_15, ssa_967 vec4 32 ssa_969 = tex ssa_968 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_970 = fadd ssa_962, ssa_969.x vec1 32 ssa_971 = ishl ssa_963, ssa_408 vec1 32 ssa_972 = intrinsic load_shared (ssa_971) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_973 = ffma ssa_969.x, ssa_972, ssa_959 vec1 32 ssa_974 = iadd ssa_412, ssa_971 vec1 32 ssa_975 = intrinsic load_shared (ssa_974) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_976 = ffma ssa_969.x, ssa_975, ssa_960 vec1 32 ssa_977 = iadd ssa_416, ssa_971 vec1 32 ssa_978 = intrinsic load_shared (ssa_977) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_979 = ffma ssa_969.x, ssa_978, ssa_961 /* succs: block_82 */ } else { block block_81: /* preds: block_79 */ /* succs: block_82 */ } block block_82: /* preds: block_80 block_81 */ vec1 32 ssa_980 = phi block_80: ssa_973, block_81: ssa_959 vec1 32 ssa_981 = phi block_80: ssa_976, block_81: ssa_960 vec1 32 ssa_982 = phi block_80: ssa_979, block_81: ssa_961 vec1 32 ssa_983 = phi block_80: ssa_970, block_81: ssa_962 vec1 32 ssa_984 = iadd ssa_163, ssa_57 vec1 32 ssa_985 = ffma ssa_322, ssa_322, ssa_900 vec1 32 ssa_986 = fsqrt ssa_985 vec1 32 ssa_987 = flt32 ssa_986, ssa_13 /* succs: block_83 block_84 */ if ssa_987 { block block_83: /* preds: block_82 */ vec1 32 ssa_988 = fmul ssa_986, ssa_403 vec1 32 ssa_989 = flrp ssa_14, ssa_15, ssa_988 vec4 32 ssa_990 = tex ssa_989 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_991 = fadd ssa_983, ssa_990.x vec1 32 ssa_992 = ishl ssa_984, ssa_408 vec1 32 ssa_993 = intrinsic load_shared (ssa_992) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_994 = ffma ssa_990.x, ssa_993, ssa_980 vec1 32 ssa_995 = iadd ssa_412, ssa_992 vec1 32 ssa_996 = intrinsic load_shared (ssa_995) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_997 = ffma ssa_990.x, ssa_996, ssa_981 vec1 32 ssa_998 = iadd ssa_416, ssa_992 vec1 32 ssa_999 = intrinsic load_shared (ssa_998) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1000 = ffma ssa_990.x, ssa_999, ssa_982 /* succs: block_85 */ } else { block block_84: /* preds: block_82 */ /* succs: block_85 */ } block block_85: /* preds: block_83 block_84 */ vec1 32 ssa_1001 = phi block_83: ssa_994, block_84: ssa_980 vec1 32 ssa_1002 = phi block_83: ssa_997, block_84: ssa_981 vec1 32 ssa_1003 = phi block_83: ssa_1000, block_84: ssa_982 vec1 32 ssa_1004 = phi block_83: ssa_991, block_84: ssa_983 vec1 32 ssa_1005 = iadd ssa_163, ssa_58 vec1 32 ssa_1006 = ffma ssa_348, ssa_348, ssa_900 vec1 32 ssa_1007 = fsqrt ssa_1006 vec1 32 ssa_1008 = flt32 ssa_1007, ssa_13 /* succs: block_86 block_87 */ if ssa_1008 { block block_86: /* preds: block_85 */ vec1 32 ssa_1009 = fmul ssa_1007, ssa_403 vec1 32 ssa_1010 = flrp ssa_14, ssa_15, ssa_1009 vec4 32 ssa_1011 = tex ssa_1010 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_1012 = fadd ssa_1004, ssa_1011.x vec1 32 ssa_1013 = ishl ssa_1005, ssa_408 vec1 32 ssa_1014 = intrinsic load_shared (ssa_1013) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1015 = ffma ssa_1011.x, ssa_1014, ssa_1001 vec1 32 ssa_1016 = iadd ssa_412, ssa_1013 vec1 32 ssa_1017 = intrinsic load_shared (ssa_1016) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1018 = ffma ssa_1011.x, ssa_1017, ssa_1002 vec1 32 ssa_1019 = iadd ssa_416, ssa_1013 vec1 32 ssa_1020 = intrinsic load_shared (ssa_1019) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1021 = ffma ssa_1011.x, ssa_1020, ssa_1003 /* succs: block_88 */ } else { block block_87: /* preds: block_85 */ /* succs: block_88 */ } block block_88: /* preds: block_86 block_87 */ vec1 32 ssa_1022 = phi block_86: ssa_1015, block_87: ssa_1001 vec1 32 ssa_1023 = phi block_86: ssa_1018, block_87: ssa_1002 vec1 32 ssa_1024 = phi block_86: ssa_1021, block_87: ssa_1003 vec1 32 ssa_1025 = phi block_86: ssa_1012, block_87: ssa_1004 vec1 32 ssa_1026 = iadd ssa_163, ssa_59 vec1 32 ssa_1027 = fadd ssa_38, -ssa_126 vec1 32 ssa_1028 = fmul ssa_1027, ssa_1027 vec1 32 ssa_1029 = ffma ssa_125, ssa_125, ssa_1028 vec1 32 ssa_1030 = fsqrt ssa_1029 vec1 32 ssa_1031 = flt32 ssa_1030, ssa_13 /* succs: block_89 block_90 */ if ssa_1031 { block block_89: /* preds: block_88 */ vec1 32 ssa_1032 = fmul ssa_1030, ssa_403 vec1 32 ssa_1033 = flrp ssa_14, ssa_15, ssa_1032 vec4 32 ssa_1034 = tex ssa_1033 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_1035 = fadd ssa_1025, ssa_1034.x vec1 32 ssa_1036 = ishl ssa_1026, ssa_408 vec1 32 ssa_1037 = intrinsic load_shared (ssa_1036) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1038 = ffma ssa_1034.x, ssa_1037, ssa_1022 vec1 32 ssa_1039 = iadd ssa_412, ssa_1036 vec1 32 ssa_1040 = intrinsic load_shared (ssa_1039) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1041 = ffma ssa_1034.x, ssa_1040, ssa_1023 vec1 32 ssa_1042 = iadd ssa_416, ssa_1036 vec1 32 ssa_1043 = intrinsic load_shared (ssa_1042) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1044 = ffma ssa_1034.x, ssa_1043, ssa_1024 /* succs: block_91 */ } else { block block_90: /* preds: block_88 */ /* succs: block_91 */ } block block_91: /* preds: block_89 block_90 */ vec1 32 ssa_1045 = phi block_89: ssa_1038, block_90: ssa_1022 vec1 32 ssa_1046 = phi block_89: ssa_1041, block_90: ssa_1023 vec1 32 ssa_1047 = phi block_89: ssa_1044, block_90: ssa_1024 vec1 32 ssa_1048 = phi block_89: ssa_1035, block_90: ssa_1025 vec1 32 ssa_1049 = iadd ssa_163, ssa_60 vec1 32 ssa_1050 = ffma ssa_192, ssa_192, ssa_1028 vec1 32 ssa_1051 = fsqrt ssa_1050 vec1 32 ssa_1052 = flt32 ssa_1051, ssa_13 /* succs: block_92 block_93 */ if ssa_1052 { block block_92: /* preds: block_91 */ vec1 32 ssa_1053 = fmul ssa_1051, ssa_403 vec1 32 ssa_1054 = flrp ssa_14, ssa_15, ssa_1053 vec4 32 ssa_1055 = tex ssa_1054 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_1056 = fadd ssa_1048, ssa_1055.x vec1 32 ssa_1057 = ishl ssa_1049, ssa_408 vec1 32 ssa_1058 = intrinsic load_shared (ssa_1057) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1059 = ffma ssa_1055.x, ssa_1058, ssa_1045 vec1 32 ssa_1060 = iadd ssa_412, ssa_1057 vec1 32 ssa_1061 = intrinsic load_shared (ssa_1060) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1062 = ffma ssa_1055.x, ssa_1061, ssa_1046 vec1 32 ssa_1063 = iadd ssa_416, ssa_1057 vec1 32 ssa_1064 = intrinsic load_shared (ssa_1063) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1065 = ffma ssa_1055.x, ssa_1064, ssa_1047 /* succs: block_94 */ } else { block block_93: /* preds: block_91 */ /* succs: block_94 */ } block block_94: /* preds: block_92 block_93 */ vec1 32 ssa_1066 = phi block_92: ssa_1059, block_93: ssa_1045 vec1 32 ssa_1067 = phi block_92: ssa_1062, block_93: ssa_1046 vec1 32 ssa_1068 = phi block_92: ssa_1065, block_93: ssa_1047 vec1 32 ssa_1069 = phi block_92: ssa_1056, block_93: ssa_1048 vec1 32 ssa_1070 = frcp ssa_1069 vec1 32 ssa_1071 = fmul.sat ssa_1066, ssa_1070 vec1 32 ssa_1072 = fmul.sat ssa_1067, ssa_1070 vec1 32 ssa_1073 = fmul.sat ssa_1068, ssa_1070 vec1 32 ssa_1074 = fadd ssa_61, -ssa_1071 vec1 32 ssa_1075 = fadd ssa_61, -ssa_1072 vec1 32 ssa_1076 = fadd ssa_61, -ssa_1073 vec1 32 ssa_1077 = load_const (0x41160a50 /* 9.377518 */) vec1 32 ssa_1078 = fmul ssa_1077, ssa_1074 vec1 32 ssa_1079 = fmul ssa_1077, ssa_1075 vec1 32 ssa_1080 = fmul ssa_1077, ssa_1076 vec1 32 ssa_1081 = fexp2 ssa_1078 vec1 32 ssa_1082 = fexp2 ssa_1079 vec1 32 ssa_1083 = fexp2 ssa_1080 vec1 32 ssa_1084 = fadd ssa_16, ssa_1081 vec1 32 ssa_1085 = fadd ssa_16, ssa_1082 vec1 32 ssa_1086 = fadd ssa_16, ssa_1083 vec1 32 ssa_1087 = frcp ssa_1084 vec1 32 ssa_1088 = frcp ssa_1085 vec1 32 ssa_1089 = frcp ssa_1086 vec1 32 ssa_1090 = fadd ssa_1087, ssa_62 vec1 32 ssa_1091 = fadd ssa_1088, ssa_62 vec1 32 ssa_1092 = fadd ssa_1089, ssa_62 vec1 32 ssa_1093 = load_const (0x3f9a9b5f /* 1.207867 */) vec1 32 ssa_1094 = fmul.sat ssa_1090, ssa_1093 vec1 32 ssa_1095 = fmul.sat ssa_1091, ssa_1093 vec1 32 ssa_1096 = fmul.sat ssa_1092, ssa_1093 vec1 32 ssa_1097 = fpow ssa_1094, ssa_63 vec1 32 ssa_1098 = fpow ssa_1095, ssa_63 vec1 32 ssa_1099 = fpow ssa_1096, ssa_63 vec1 32 ssa_1100 = undefined vec4 32 ssa_1101 = vec4 ssa_86, ssa_87, ssa_1100, ssa_1100 vec4 32 ssa_1102 = vec4 ssa_1097, ssa_1098, ssa_1099, ssa_16 intrinsic image_store (ssa_0, ssa_1101, ssa_1100, ssa_1102) (1, 0, 34842, 8) /* image_dim=2D */ /* image_dim=true */ /* format=34842 */ /* access=8 */ /* succs: block_95 */ block block_95: } NIR (final form) for compute shader: shader: MESA_SHADER_COMPUTE name: GLSL19 local-size: 32, 8, 1 shared-size: 5040 inputs: 0 outputs: 0 uniforms: 80 shared: 0 decl_var ubo INTERP_MODE_NONE vec2 tex_scale0 (7, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 out_scale (6, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 pixel_size0 (5, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 texture_off0 (4, 0, 0) decl_var ubo INTERP_MODE_NONE mat2 texture_rot0 (3, 0, 0) decl_var ubo INTERP_MODE_NONE vec2 texture_size0 (2, 0, 0) decl_var ubo INTERP_MODE_NONE vec3 dst_luma (1, 0, 0) decl_var ubo INTERP_MODE_NONE vec3 src_luma (0, 0, 0) decl_var uniform INTERP_MODE_NONE sampler1D lut (8, 0, 0) decl_var uniform INTERP_MODE_NONE writeonly GL_RGBA16F image2D out_image (9, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D texture0 (10, 0, 1) decl_function main (0 params) impl main { decl_reg vec1 32 r0 decl_reg vec1 32 r1 decl_reg vec1 32 r2 decl_reg vec1 32 r3 decl_reg vec1 32 r4 decl_reg vec1 32 r5 decl_reg vec1 32 r6 decl_reg vec1 32 r7 decl_reg vec1 32 r8 decl_reg vec1 32 r9 decl_reg vec1 32 r10 decl_reg vec1 32 r11 decl_reg vec1 32 r12 decl_reg vec1 32 r13 decl_reg vec1 32 r14 decl_reg vec1 32 r15 decl_reg vec1 32 r16 decl_reg vec1 32 r17 decl_reg vec1 32 r18 decl_reg vec1 32 r19 decl_reg vec1 32 r20 decl_reg vec1 32 r21 decl_reg vec1 32 r22 block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x00000040 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000060 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_4 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_5 = load_const (0xbf000000 /* -0.500000 */) vec1 32 ssa_6 = load_const (0x0000000e /* 0.000000 */) vec1 32 ssa_7 = load_const (0x0000001e /* 0.000000 */) vec1 32 ssa_8 = load_const (0xfffffffd /* -nan */) vec1 32 ssa_9 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_10 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_11 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_12 = load_const (0xc0400000 /* -3.000000 */) vec1 32 ssa_13 = load_const (0x404217e3 /* 3.032708 */) vec1 32 ssa_14 = load_const (0x3c000000 /* 0.007812 */) vec1 32 ssa_15 = load_const (0x3f7e0000 /* 0.992188 */) vec1 32 ssa_16 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_17 = load_const (0x0000001f /* 0.000000 */) vec1 32 ssa_18 = load_const (0xc0000000 /* -2.000000 */) vec1 32 ssa_19 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_20 = load_const (0x00000021 /* 0.000000 */) vec1 32 ssa_21 = load_const (0x00000022 /* 0.000000 */) vec1 32 ssa_22 = load_const (0x00000023 /* 0.000000 */) vec1 32 ssa_23 = load_const (0x40000000 /* 2.000000 */) vec1 32 ssa_24 = load_const (0x00000024 /* 0.000000 */) vec1 32 ssa_25 = load_const (0x40400000 /* 3.000000 */) vec1 32 ssa_26 = load_const (0x0000003d /* 0.000000 */) vec1 32 ssa_27 = load_const (0x0000003e /* 0.000000 */) vec1 32 ssa_28 = load_const (0x0000003f /* 0.000000 */) vec1 32 ssa_29 = load_const (0x00000041 /* 0.000000 */) vec1 32 ssa_30 = load_const (0x00000042 /* 0.000000 */) vec1 32 ssa_31 = load_const (0x0000005a /* 0.000000 */) vec1 32 ssa_32 = load_const (0x0000005b /* 0.000000 */) vec1 32 ssa_33 = load_const (0x0000005c /* 0.000000 */) vec1 32 ssa_34 = load_const (0x0000005d /* 0.000000 */) vec1 32 ssa_35 = load_const (0x0000005e /* 0.000000 */) vec1 32 ssa_36 = load_const (0x0000005f /* 0.000000 */) vec1 32 ssa_37 = load_const (0x00000061 /* 0.000000 */) vec1 32 ssa_38 = load_const (0x40800000 /* 4.000000 */) vec1 32 ssa_39 = load_const (0x00000078 /* 0.000000 */) vec1 32 ssa_40 = load_const (0x00000079 /* 0.000000 */) vec1 32 ssa_41 = load_const (0x0000007a /* 0.000000 */) vec1 32 ssa_42 = load_const (0x0000007b /* 0.000000 */) vec1 32 ssa_43 = load_const (0x0000007c /* 0.000000 */) vec1 32 ssa_44 = load_const (0x0000007d /* 0.000000 */) vec1 32 ssa_45 = load_const (0x0000007e /* 0.000000 */) vec1 32 ssa_46 = load_const (0x0000007f /* 0.000000 */) vec1 32 ssa_47 = load_const (0x00000097 /* 0.000000 */) vec1 32 ssa_48 = load_const (0x00000098 /* 0.000000 */) vec1 32 ssa_49 = load_const (0x00000099 /* 0.000000 */) vec1 32 ssa_50 = load_const (0x0000009a /* 0.000000 */) vec1 32 ssa_51 = load_const (0x0000009b /* 0.000000 */) vec1 32 ssa_52 = load_const (0x0000009c /* 0.000000 */) vec1 32 ssa_53 = load_const (0x000000b5 /* 0.000000 */) vec1 32 ssa_54 = load_const (0x000000b6 /* 0.000000 */) vec1 32 ssa_55 = load_const (0x000000b7 /* 0.000000 */) vec1 32 ssa_56 = load_const (0x000000b8 /* 0.000000 */) vec1 32 ssa_57 = load_const (0x000000b9 /* 0.000000 */) vec1 32 ssa_58 = load_const (0x000000ba /* 0.000000 */) vec1 32 ssa_59 = load_const (0x000000d5 /* 0.000000 */) vec1 32 ssa_60 = load_const (0x000000d6 /* 0.000000 */) vec1 32 ssa_61 = load_const (0x3f400000 /* 0.750000 */) vec1 32 ssa_62 = load_const (0xbbf8487c /* -0.007577 */) vec1 32 ssa_63 = load_const (0x3ed55555 /* 0.416667 */) vec1 32 ssa_64 = load_const (0x00000030 /* 0.000000 */) vec2 32 ssa_65 = intrinsic load_ubo (ssa_0, ssa_64) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec2 32 ssa_66 = intrinsic load_ubo (ssa_0, ssa_1) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_67 = load_const (0x00000068 /* 0.000000 */) vec2 32 ssa_68 = intrinsic load_ubo (ssa_0, ssa_67) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec2 32 ssa_69 = intrinsic load_ubo (ssa_0, ssa_2) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_70 = load_const (0x00000058 /* 0.000000 */) vec2 32 ssa_71 = intrinsic load_ubo (ssa_0, ssa_70) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_72 = load_const (0x00000050 /* 0.000000 */) vec2 32 ssa_73 = intrinsic load_ubo (ssa_0, ssa_72) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec3 32 ssa_74 = intrinsic load_work_group_id () () vec1 32 ssa_75 = intrinsic load_subgroup_id () () vec1 32 ssa_76 = ishl ssa_75, ssa_9 vec1 32 ssa_77 = intrinsic load_subgroup_invocation () () vec1 32 ssa_78 = iadd ssa_77, ssa_76 r0 = iand ssa_78, ssa_17 vec1 32 ssa_80 = load_const (0x00000005 /* 0.000000 */) vec1 32 ssa_81 = ushr ssa_78, ssa_80 vec1 32 ssa_82 = load_const (0x00000007 /* 0.000000 */) r1 = iand ssa_81, ssa_82 vec1 32 ssa_84 = ishl ssa_74.x, ssa_80 vec1 32 ssa_85 = ishl ssa_74.y, ssa_11 vec1 32 ssa_86 = iadd ssa_84, r0 vec1 32 ssa_87 = iadd ssa_85, r1 vec1 32 ssa_88 = u2f32 ssa_86 vec1 32 ssa_89 = u2f32 ssa_87 vec1 32 ssa_90 = fadd ssa_88, ssa_3 vec1 32 ssa_91 = fadd ssa_89, ssa_3 vec1 32 ssa_92 = fmul ssa_69.x, ssa_90 vec1 32 ssa_93 = fmul ssa_69.y, ssa_91 vec1 32 ssa_94 = fmul ssa_68.x, ssa_92 vec1 32 ssa_95 = fmul ssa_68.y, ssa_93 vec1 32 ssa_96 = fmul ssa_66.x, ssa_95 vec1 32 ssa_97 = fmul ssa_66.y, ssa_95 vec1 32 ssa_98 = ffma ssa_65.x, ssa_94, ssa_96 vec1 32 ssa_99 = ffma ssa_65.y, ssa_94, ssa_97 vec1 32 ssa_100 = ffma ssa_71.x, ssa_73.x, ssa_98 vec1 32 ssa_101 = ffma ssa_71.y, ssa_73.y, ssa_99 vec1 32 ssa_102 = u2f32 ssa_84 vec1 32 ssa_103 = u2f32 ssa_85 vec1 32 ssa_104 = fadd ssa_102, ssa_3 vec1 32 ssa_105 = fadd ssa_103, ssa_3 vec1 32 ssa_106 = fmul ssa_69.x, ssa_104 vec1 32 ssa_107 = fmul ssa_69.y, ssa_105 vec1 32 ssa_108 = fmul ssa_68.x, ssa_106 vec1 32 ssa_109 = fmul ssa_68.y, ssa_107 vec1 32 ssa_110 = fmul ssa_66.x, ssa_109 vec1 32 ssa_111 = fmul ssa_66.y, ssa_109 vec1 32 ssa_112 = ffma ssa_65.x, ssa_108, ssa_110 vec1 32 ssa_113 = ffma ssa_65.y, ssa_108, ssa_111 vec1 32 ssa_114 = ffma ssa_71.x, ssa_73.x, ssa_112 vec1 32 ssa_115 = ffma ssa_71.y, ssa_73.y, ssa_113 vec2 32 ssa_116 = intrinsic load_ubo (ssa_0, ssa_4) (8, 0) /* align_mul=8 */ /* align_offset=0 */ vec1 32 ssa_117 = ffma ssa_114, ssa_116.x, ssa_5 vec1 32 ssa_118 = ffma ssa_115, ssa_116.y, ssa_5 vec1 32 ssa_119 = ffract ssa_117 vec1 32 ssa_120 = ffract ssa_118 vec1 32 ssa_121 = ffma -ssa_71.x, ssa_119, ssa_114 vec1 32 ssa_122 = ffma -ssa_71.y, ssa_120, ssa_115 vec1 32 ssa_123 = ffma ssa_100, ssa_116.x, ssa_5 vec1 32 ssa_124 = ffma ssa_101, ssa_116.y, ssa_5 vec1 32 ssa_125 = ffract ssa_123 vec1 32 ssa_126 = ffract ssa_124 vec1 32 ssa_127 = ffma -ssa_71.x, ssa_125, ssa_100 vec1 32 ssa_128 = ffma -ssa_71.y, ssa_126, ssa_101 vec1 32 ssa_129 = fadd ssa_127, -ssa_121 vec1 32 ssa_130 = fadd ssa_128, -ssa_122 vec1 32 ssa_131 = fmul ssa_129, ssa_116.x vec1 32 ssa_132 = fmul ssa_130, ssa_116.y vec1 32 ssa_133 = fround_even ssa_131 vec1 32 ssa_134 = fround_even ssa_132 vec1 32 ssa_135 = f2i32 ssa_133 vec1 32 ssa_136 = f2i32 ssa_134 /* succs: block_1 */ loop { block block_1: /* preds: block_0 block_9 */ vec1 32 ssa_138 = ige32 r1, ssa_6 /* succs: block_2 block_3 */ if ssa_138 { block block_2: /* preds: block_1 */ break /* succs: block_10 */ } else { block block_3: /* preds: block_1 */ /* succs: block_4 */ } block block_4: /* preds: block_3 */ r2 = imov r0 /* succs: block_5 */ loop { block block_5: /* preds: block_4 block_8 */ vec1 32 ssa_140 = ige32 r2, ssa_7 /* succs: block_6 block_7 */ if ssa_140 { block block_6: /* preds: block_5 */ break /* succs: block_9 */ } else { block block_7: /* preds: block_5 */ /* succs: block_8 */ } block block_8: /* preds: block_7 */ vec1 32 ssa_141 = iadd r2, ssa_8 vec1 32 ssa_142 = i2f32 ssa_141 vec1 32 ssa_143 = iadd r1, ssa_8 vec1 32 ssa_144 = i2f32 ssa_143 vec1 32 ssa_145 = ffma ssa_71.x, ssa_142, ssa_121 vec1 32 ssa_146 = ffma ssa_71.y, ssa_144, ssa_122 vec2 32 ssa_147 = vec2 ssa_145, ssa_146 vec4 32 ssa_148 = tex ssa_147 (coord), ssa_0 (lod), 1 (texture), 1 (sampler), vec1 32 ssa_149 = imul ssa_7, r1 vec1 32 ssa_150 = iadd ssa_149, r2 vec1 32 ssa_151 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_152 = ishl ssa_150, ssa_151 vec1 32 ssa_153 = imov ssa_148.x intrinsic store_shared (ssa_153, ssa_152) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_154 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_155 = iadd ssa_154, ssa_152 vec1 32 ssa_156 = imov ssa_148.y intrinsic store_shared (ssa_156, ssa_155) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_157 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_158 = iadd ssa_157, ssa_152 vec1 32 ssa_159 = imov ssa_148.z intrinsic store_shared (ssa_159, ssa_158) (0, 1, 4, 0) /* base=0 */ /* wrmask=x */ /* align_mul=4 */ /* align_offset=0 */ r2 = iadd r2, ssa_4 /* succs: block_5 */ } block block_9: /* preds: block_6 */ r1 = iadd r1, ssa_10 /* succs: block_1 */ } block block_10: /* preds: block_2 */ intrinsic group_memory_barrier () () intrinsic barrier () () vec1 32 ssa_162 = imul ssa_7, ssa_136 vec1 32 ssa_163 = iadd ssa_162, ssa_135 vec1 32 ssa_164 = iadd ssa_163, ssa_11 vec1 32 ssa_165 = fadd ssa_12, -ssa_126 vec1 32 ssa_166 = fmul ssa_165, ssa_165 vec1 32 ssa_167 = ffma ssa_125, ssa_125, ssa_166 vec1 32 ssa_168 = fsqrt ssa_167 vec1 32 ssa_169 = flt32 ssa_168, ssa_13 /* succs: block_11 block_12 */ if ssa_169 { block block_11: /* preds: block_10 */ vec1 32 ssa_170 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_171 = fmul ssa_168, ssa_170 vec1 32 ssa_172 = flrp ssa_14, ssa_15, ssa_171 vec4 32 ssa_173 = tex ssa_172 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = imov ssa_173.x vec1 32 ssa_175 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_176 = ishl ssa_164, ssa_175 vec1 32 ssa_177 = intrinsic load_shared (ssa_176) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = fmul ssa_173.x, ssa_177 vec1 32 ssa_179 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_180 = iadd ssa_179, ssa_176 vec1 32 ssa_181 = intrinsic load_shared (ssa_180) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = fmul ssa_173.x, ssa_181 vec1 32 ssa_183 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_184 = iadd ssa_183, ssa_176 vec1 32 ssa_185 = intrinsic load_shared (ssa_184) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = fmul ssa_173.x, ssa_185 /* succs: block_13 */ } else { block block_12: /* preds: block_10 */ r3 = imov ssa_0 r6 = imov r3 r5 = imov r6 r4 = imov r5 /* succs: block_13 */ } block block_13: /* preds: block_11 block_12 */ vec1 32 ssa_191 = iadd ssa_163, ssa_9 vec1 32 ssa_192 = fadd ssa_16, -ssa_125 vec1 32 ssa_193 = ffma ssa_192, ssa_192, ssa_166 vec1 32 ssa_194 = fsqrt ssa_193 vec1 32 ssa_195 = flt32 ssa_194, ssa_13 /* succs: block_14 block_15 */ if ssa_195 { block block_14: /* preds: block_13 */ vec1 32 ssa_196 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_197 = fmul ssa_194, ssa_196 vec1 32 ssa_198 = flrp ssa_14, ssa_15, ssa_197 vec4 32 ssa_199 = tex ssa_198 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_199.x vec1 32 ssa_201 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_202 = ishl ssa_191, ssa_201 vec1 32 ssa_203 = intrinsic load_shared (ssa_202) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_199.x, ssa_203, r4 vec1 32 ssa_205 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_206 = iadd ssa_205, ssa_202 vec1 32 ssa_207 = intrinsic load_shared (ssa_206) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_199.x, ssa_207, r5 vec1 32 ssa_209 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_210 = iadd ssa_209, ssa_202 vec1 32 ssa_211 = intrinsic load_shared (ssa_210) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_199.x, ssa_211, r6 /* succs: block_16 */ } else { block block_15: /* preds: block_13 */ /* succs: block_16 */ } block block_16: /* preds: block_14 block_15 */ vec1 32 ssa_217 = iadd ssa_163, ssa_17 vec1 32 ssa_218 = fadd ssa_18, -ssa_125 vec1 32 ssa_219 = fadd ssa_18, -ssa_126 vec1 32 ssa_220 = fmul ssa_219, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = fsqrt ssa_221 vec1 32 ssa_223 = flt32 ssa_222, ssa_13 /* succs: block_17 block_18 */ if ssa_223 { block block_17: /* preds: block_16 */ vec1 32 ssa_224 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_225 = fmul ssa_222, ssa_224 vec1 32 ssa_226 = flrp ssa_14, ssa_15, ssa_225 vec4 32 ssa_227 = tex ssa_226 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_227.x vec1 32 ssa_229 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_230 = ishl ssa_217, ssa_229 vec1 32 ssa_231 = intrinsic load_shared (ssa_230) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_227.x, ssa_231, r4 vec1 32 ssa_233 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_234 = iadd ssa_233, ssa_230 vec1 32 ssa_235 = intrinsic load_shared (ssa_234) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_227.x, ssa_235, r5 vec1 32 ssa_237 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_238 = iadd ssa_237, ssa_230 vec1 32 ssa_239 = intrinsic load_shared (ssa_238) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_227.x, ssa_239, r6 /* succs: block_19 */ } else { block block_18: /* preds: block_16 */ /* succs: block_19 */ } block block_19: /* preds: block_17 block_18 */ vec1 32 ssa_245 = iadd ssa_163, ssa_4 vec1 32 ssa_246 = fadd ssa_19, -ssa_125 vec1 32 ssa_247 = ffma ssa_246, ssa_246, ssa_220 vec1 32 ssa_248 = fsqrt ssa_247 vec1 32 ssa_249 = flt32 ssa_248, ssa_13 /* succs: block_20 block_21 */ if ssa_249 { block block_20: /* preds: block_19 */ vec1 32 ssa_250 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_251 = fmul ssa_248, ssa_250 vec1 32 ssa_252 = flrp ssa_14, ssa_15, ssa_251 vec4 32 ssa_253 = tex ssa_252 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_253.x vec1 32 ssa_255 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_256 = ishl ssa_245, ssa_255 vec1 32 ssa_257 = intrinsic load_shared (ssa_256) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_253.x, ssa_257, r4 vec1 32 ssa_259 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_260 = iadd ssa_259, ssa_256 vec1 32 ssa_261 = intrinsic load_shared (ssa_260) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_253.x, ssa_261, r5 vec1 32 ssa_263 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_264 = iadd ssa_263, ssa_256 vec1 32 ssa_265 = intrinsic load_shared (ssa_264) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_253.x, ssa_265, r6 /* succs: block_22 */ } else { block block_21: /* preds: block_19 */ /* succs: block_22 */ } block block_22: /* preds: block_20 block_21 */ vec1 32 ssa_271 = iadd ssa_163, ssa_20 vec1 32 ssa_272 = ffma ssa_125, ssa_125, ssa_220 vec1 32 ssa_273 = fsqrt ssa_272 vec1 32 ssa_274 = flt32 ssa_273, ssa_13 /* succs: block_23 block_24 */ if ssa_274 { block block_23: /* preds: block_22 */ vec1 32 ssa_275 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_276 = fmul ssa_273, ssa_275 vec1 32 ssa_277 = flrp ssa_14, ssa_15, ssa_276 vec4 32 ssa_278 = tex ssa_277 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_278.x vec1 32 ssa_280 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_281 = ishl ssa_271, ssa_280 vec1 32 ssa_282 = intrinsic load_shared (ssa_281) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_278.x, ssa_282, r4 vec1 32 ssa_284 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_285 = iadd ssa_284, ssa_281 vec1 32 ssa_286 = intrinsic load_shared (ssa_285) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_278.x, ssa_286, r5 vec1 32 ssa_288 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_289 = iadd ssa_288, ssa_281 vec1 32 ssa_290 = intrinsic load_shared (ssa_289) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_278.x, ssa_290, r6 /* succs: block_25 */ } else { block block_24: /* preds: block_22 */ /* succs: block_25 */ } block block_25: /* preds: block_23 block_24 */ vec1 32 ssa_296 = iadd ssa_163, ssa_21 vec1 32 ssa_297 = ffma ssa_192, ssa_192, ssa_220 vec1 32 ssa_298 = fsqrt ssa_297 vec1 32 ssa_299 = flt32 ssa_298, ssa_13 /* succs: block_26 block_27 */ if ssa_299 { block block_26: /* preds: block_25 */ vec1 32 ssa_300 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_301 = fmul ssa_298, ssa_300 vec1 32 ssa_302 = flrp ssa_14, ssa_15, ssa_301 vec4 32 ssa_303 = tex ssa_302 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_303.x vec1 32 ssa_305 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_306 = ishl ssa_296, ssa_305 vec1 32 ssa_307 = intrinsic load_shared (ssa_306) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_303.x, ssa_307, r4 vec1 32 ssa_309 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_310 = iadd ssa_309, ssa_306 vec1 32 ssa_311 = intrinsic load_shared (ssa_310) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_303.x, ssa_311, r5 vec1 32 ssa_313 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_314 = iadd ssa_313, ssa_306 vec1 32 ssa_315 = intrinsic load_shared (ssa_314) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_303.x, ssa_315, r6 /* succs: block_28 */ } else { block block_27: /* preds: block_25 */ /* succs: block_28 */ } block block_28: /* preds: block_26 block_27 */ vec1 32 ssa_321 = iadd ssa_163, ssa_22 vec1 32 ssa_322 = fadd ssa_23, -ssa_125 vec1 32 ssa_323 = ffma ssa_322, ssa_322, ssa_220 vec1 32 ssa_324 = fsqrt ssa_323 vec1 32 ssa_325 = flt32 ssa_324, ssa_13 /* succs: block_29 block_30 */ if ssa_325 { block block_29: /* preds: block_28 */ vec1 32 ssa_326 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_327 = fmul ssa_324, ssa_326 vec1 32 ssa_328 = flrp ssa_14, ssa_15, ssa_327 vec4 32 ssa_329 = tex ssa_328 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_329.x vec1 32 ssa_331 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_332 = ishl ssa_321, ssa_331 vec1 32 ssa_333 = intrinsic load_shared (ssa_332) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_329.x, ssa_333, r4 vec1 32 ssa_335 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_336 = iadd ssa_335, ssa_332 vec1 32 ssa_337 = intrinsic load_shared (ssa_336) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_329.x, ssa_337, r5 vec1 32 ssa_339 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_340 = iadd ssa_339, ssa_332 vec1 32 ssa_341 = intrinsic load_shared (ssa_340) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_329.x, ssa_341, r6 /* succs: block_31 */ } else { block block_30: /* preds: block_28 */ /* succs: block_31 */ } block block_31: /* preds: block_29 block_30 */ vec1 32 ssa_347 = iadd ssa_163, ssa_24 vec1 32 ssa_348 = fadd ssa_25, -ssa_125 vec1 32 ssa_349 = ffma ssa_348, ssa_348, ssa_220 vec1 32 ssa_350 = fsqrt ssa_349 vec1 32 ssa_351 = flt32 ssa_350, ssa_13 /* succs: block_32 block_33 */ if ssa_351 { block block_32: /* preds: block_31 */ vec1 32 ssa_352 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_353 = fmul ssa_350, ssa_352 vec1 32 ssa_354 = flrp ssa_14, ssa_15, ssa_353 vec4 32 ssa_355 = tex ssa_354 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_355.x vec1 32 ssa_357 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_358 = ishl ssa_347, ssa_357 vec1 32 ssa_359 = intrinsic load_shared (ssa_358) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_355.x, ssa_359, r4 vec1 32 ssa_361 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_362 = iadd ssa_361, ssa_358 vec1 32 ssa_363 = intrinsic load_shared (ssa_362) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_355.x, ssa_363, r5 vec1 32 ssa_365 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_366 = iadd ssa_365, ssa_358 vec1 32 ssa_367 = intrinsic load_shared (ssa_366) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_355.x, ssa_367, r6 /* succs: block_34 */ } else { block block_33: /* preds: block_31 */ /* succs: block_34 */ } block block_34: /* preds: block_32 block_33 */ vec1 32 ssa_373 = iadd ssa_163, ssa_26 vec1 32 ssa_374 = fadd ssa_19, -ssa_126 vec1 32 ssa_375 = fmul ssa_374, ssa_374 vec1 32 ssa_376 = ffma ssa_218, ssa_218, ssa_375 vec1 32 ssa_377 = fsqrt ssa_376 vec1 32 ssa_378 = flt32 ssa_377, ssa_13 /* succs: block_35 block_36 */ if ssa_378 { block block_35: /* preds: block_34 */ vec1 32 ssa_379 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_380 = fmul ssa_377, ssa_379 vec1 32 ssa_381 = flrp ssa_14, ssa_15, ssa_380 vec4 32 ssa_382 = tex ssa_381 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r3 = fadd r3, ssa_382.x vec1 32 ssa_384 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_385 = ishl ssa_373, ssa_384 vec1 32 ssa_386 = intrinsic load_shared (ssa_385) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r4 = ffma ssa_382.x, ssa_386, r4 vec1 32 ssa_388 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_389 = iadd ssa_388, ssa_385 vec1 32 ssa_390 = intrinsic load_shared (ssa_389) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r5 = ffma ssa_382.x, ssa_390, r5 vec1 32 ssa_392 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_393 = iadd ssa_392, ssa_385 vec1 32 ssa_394 = intrinsic load_shared (ssa_393) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r6 = ffma ssa_382.x, ssa_394, r6 /* succs: block_37 */ } else { block block_36: /* preds: block_34 */ /* succs: block_37 */ } block block_37: /* preds: block_35 block_36 */ vec1 32 ssa_400 = iadd ssa_163, ssa_27 vec1 32 ssa_401 = ffma ssa_246, ssa_246, ssa_375 vec1 32 ssa_402 = fsqrt ssa_401 vec1 32 ssa_403 = load_const (0x3e9e1b5f /* 0.308803 */) vec1 32 ssa_404 = fmul ssa_402, ssa_403 vec1 32 ssa_405 = flrp ssa_14, ssa_15, ssa_404 vec4 32 ssa_406 = tex ssa_405 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_407 = fadd r3, ssa_406.x vec1 32 ssa_408 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_409 = ishl ssa_400, ssa_408 vec1 32 ssa_410 = intrinsic load_shared (ssa_409) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_411 = ffma ssa_406.x, ssa_410, r4 vec1 32 ssa_412 = load_const (0x00000690 /* 0.000000 */) vec1 32 ssa_413 = iadd ssa_412, ssa_409 vec1 32 ssa_414 = intrinsic load_shared (ssa_413) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_415 = ffma ssa_406.x, ssa_414, r5 vec1 32 ssa_416 = load_const (0x00000d20 /* 0.000000 */) vec1 32 ssa_417 = iadd ssa_416, ssa_409 vec1 32 ssa_418 = intrinsic load_shared (ssa_417) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_419 = ffma ssa_406.x, ssa_418, r6 vec1 32 ssa_420 = iadd ssa_163, ssa_28 vec1 32 ssa_421 = ffma ssa_125, ssa_125, ssa_375 vec1 32 ssa_422 = fsqrt ssa_421 vec1 32 ssa_423 = fmul ssa_422, ssa_403 vec1 32 ssa_424 = flrp ssa_14, ssa_15, ssa_423 vec4 32 ssa_425 = tex ssa_424 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_426 = fadd ssa_407, ssa_425.x vec1 32 ssa_427 = ishl ssa_420, ssa_408 vec1 32 ssa_428 = intrinsic load_shared (ssa_427) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_429 = ffma ssa_425.x, ssa_428, ssa_411 vec1 32 ssa_430 = iadd ssa_412, ssa_427 vec1 32 ssa_431 = intrinsic load_shared (ssa_430) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_432 = ffma ssa_425.x, ssa_431, ssa_415 vec1 32 ssa_433 = iadd ssa_416, ssa_427 vec1 32 ssa_434 = intrinsic load_shared (ssa_433) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_435 = ffma ssa_425.x, ssa_434, ssa_419 vec1 32 ssa_436 = iadd ssa_163, ssa_1 vec1 32 ssa_437 = ffma ssa_192, ssa_192, ssa_375 vec1 32 ssa_438 = fsqrt ssa_437 vec1 32 ssa_439 = fmul ssa_438, ssa_403 vec1 32 ssa_440 = flrp ssa_14, ssa_15, ssa_439 vec4 32 ssa_441 = tex ssa_440 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_442 = fadd ssa_426, ssa_441.x vec1 32 ssa_443 = ishl ssa_436, ssa_408 vec1 32 ssa_444 = intrinsic load_shared (ssa_443) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_445 = ffma ssa_441.x, ssa_444, ssa_429 vec1 32 ssa_446 = iadd ssa_412, ssa_443 vec1 32 ssa_447 = intrinsic load_shared (ssa_446) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_448 = ffma ssa_441.x, ssa_447, ssa_432 vec1 32 ssa_449 = iadd ssa_416, ssa_443 vec1 32 ssa_450 = intrinsic load_shared (ssa_449) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_451 = ffma ssa_441.x, ssa_450, ssa_435 vec1 32 ssa_452 = iadd ssa_163, ssa_29 vec1 32 ssa_453 = ffma ssa_322, ssa_322, ssa_375 vec1 32 ssa_454 = fsqrt ssa_453 vec1 32 ssa_455 = fmul ssa_454, ssa_403 vec1 32 ssa_456 = flrp ssa_14, ssa_15, ssa_455 vec4 32 ssa_457 = tex ssa_456 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r7 = fadd ssa_442, ssa_457.x vec1 32 ssa_459 = ishl ssa_452, ssa_408 vec1 32 ssa_460 = intrinsic load_shared (ssa_459) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r8 = ffma ssa_457.x, ssa_460, ssa_445 vec1 32 ssa_462 = iadd ssa_412, ssa_459 vec1 32 ssa_463 = intrinsic load_shared (ssa_462) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r9 = ffma ssa_457.x, ssa_463, ssa_448 vec1 32 ssa_465 = iadd ssa_416, ssa_459 vec1 32 ssa_466 = intrinsic load_shared (ssa_465) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r10 = ffma ssa_457.x, ssa_466, ssa_451 vec1 32 ssa_468 = iadd ssa_163, ssa_30 vec1 32 ssa_469 = ffma ssa_348, ssa_348, ssa_375 vec1 32 ssa_470 = fsqrt ssa_469 vec1 32 ssa_471 = flt32 ssa_470, ssa_13 /* succs: block_38 block_39 */ if ssa_471 { block block_38: /* preds: block_37 */ vec1 32 ssa_472 = fmul ssa_470, ssa_403 vec1 32 ssa_473 = flrp ssa_14, ssa_15, ssa_472 vec4 32 ssa_474 = tex ssa_473 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r7 = fadd r7, ssa_474.x vec1 32 ssa_476 = ishl ssa_468, ssa_408 vec1 32 ssa_477 = intrinsic load_shared (ssa_476) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r8 = ffma ssa_474.x, ssa_477, r8 vec1 32 ssa_479 = iadd ssa_412, ssa_476 vec1 32 ssa_480 = intrinsic load_shared (ssa_479) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r9 = ffma ssa_474.x, ssa_480, r9 vec1 32 ssa_482 = iadd ssa_416, ssa_476 vec1 32 ssa_483 = intrinsic load_shared (ssa_482) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r10 = ffma ssa_474.x, ssa_483, r10 /* succs: block_40 */ } else { block block_39: /* preds: block_37 */ /* succs: block_40 */ } block block_40: /* preds: block_38 block_39 */ vec1 32 ssa_489 = iadd ssa_163, ssa_31 vec1 32 ssa_490 = fadd ssa_12, -ssa_125 vec1 32 ssa_491 = fmul ssa_126, ssa_126 vec1 32 ssa_492 = ffma ssa_490, ssa_490, ssa_491 vec1 32 ssa_493 = fsqrt ssa_492 vec1 32 ssa_494 = flt32 ssa_493, ssa_13 /* succs: block_41 block_42 */ if ssa_494 { block block_41: /* preds: block_40 */ vec1 32 ssa_495 = fmul ssa_493, ssa_403 vec1 32 ssa_496 = flrp ssa_14, ssa_15, ssa_495 vec4 32 ssa_497 = tex ssa_496 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r7 = fadd r7, ssa_497.x vec1 32 ssa_499 = ishl ssa_489, ssa_408 vec1 32 ssa_500 = intrinsic load_shared (ssa_499) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r8 = ffma ssa_497.x, ssa_500, r8 vec1 32 ssa_502 = iadd ssa_412, ssa_499 vec1 32 ssa_503 = intrinsic load_shared (ssa_502) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r9 = ffma ssa_497.x, ssa_503, r9 vec1 32 ssa_505 = iadd ssa_416, ssa_499 vec1 32 ssa_506 = intrinsic load_shared (ssa_505) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r10 = ffma ssa_497.x, ssa_506, r10 /* succs: block_43 */ } else { block block_42: /* preds: block_40 */ /* succs: block_43 */ } block block_43: /* preds: block_41 block_42 */ vec1 32 ssa_512 = iadd ssa_163, ssa_32 vec1 32 ssa_513 = ffma ssa_218, ssa_218, ssa_491 vec1 32 ssa_514 = fsqrt ssa_513 vec1 32 ssa_515 = flt32 ssa_514, ssa_13 /* succs: block_44 block_45 */ if ssa_515 { block block_44: /* preds: block_43 */ vec1 32 ssa_516 = fmul ssa_514, ssa_403 vec1 32 ssa_517 = flrp ssa_14, ssa_15, ssa_516 vec4 32 ssa_518 = tex ssa_517 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r7 = fadd r7, ssa_518.x vec1 32 ssa_520 = ishl ssa_512, ssa_408 vec1 32 ssa_521 = intrinsic load_shared (ssa_520) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r8 = ffma ssa_518.x, ssa_521, r8 vec1 32 ssa_523 = iadd ssa_412, ssa_520 vec1 32 ssa_524 = intrinsic load_shared (ssa_523) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r9 = ffma ssa_518.x, ssa_524, r9 vec1 32 ssa_526 = iadd ssa_416, ssa_520 vec1 32 ssa_527 = intrinsic load_shared (ssa_526) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r10 = ffma ssa_518.x, ssa_527, r10 /* succs: block_46 */ } else { block block_45: /* preds: block_43 */ /* succs: block_46 */ } block block_46: /* preds: block_44 block_45 */ vec1 32 ssa_533 = iadd ssa_163, ssa_33 vec1 32 ssa_534 = ffma ssa_246, ssa_246, ssa_491 vec1 32 ssa_535 = fsqrt ssa_534 vec1 32 ssa_536 = fmul ssa_535, ssa_403 vec1 32 ssa_537 = flrp ssa_14, ssa_15, ssa_536 vec4 32 ssa_538 = tex ssa_537 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_539 = fadd r7, ssa_538.x vec1 32 ssa_540 = ishl ssa_533, ssa_408 vec1 32 ssa_541 = intrinsic load_shared (ssa_540) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_542 = ffma ssa_538.x, ssa_541, r8 vec1 32 ssa_543 = iadd ssa_412, ssa_540 vec1 32 ssa_544 = intrinsic load_shared (ssa_543) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_545 = ffma ssa_538.x, ssa_544, r9 vec1 32 ssa_546 = iadd ssa_416, ssa_540 vec1 32 ssa_547 = intrinsic load_shared (ssa_546) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_548 = ffma ssa_538.x, ssa_547, r10 vec1 32 ssa_549 = iadd ssa_163, ssa_34 vec1 32 ssa_550 = ffma ssa_125, ssa_125, ssa_491 vec1 32 ssa_551 = fsqrt ssa_550 vec1 32 ssa_552 = fmul ssa_551, ssa_403 vec1 32 ssa_553 = flrp ssa_14, ssa_15, ssa_552 vec4 32 ssa_554 = tex ssa_553 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_555 = fadd ssa_539, ssa_554.x vec1 32 ssa_556 = ishl ssa_549, ssa_408 vec1 32 ssa_557 = intrinsic load_shared (ssa_556) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_558 = ffma ssa_554.x, ssa_557, ssa_542 vec1 32 ssa_559 = iadd ssa_412, ssa_556 vec1 32 ssa_560 = intrinsic load_shared (ssa_559) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_561 = ffma ssa_554.x, ssa_560, ssa_545 vec1 32 ssa_562 = iadd ssa_416, ssa_556 vec1 32 ssa_563 = intrinsic load_shared (ssa_562) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_564 = ffma ssa_554.x, ssa_563, ssa_548 vec1 32 ssa_565 = iadd ssa_163, ssa_35 vec1 32 ssa_566 = ffma ssa_192, ssa_192, ssa_491 vec1 32 ssa_567 = fsqrt ssa_566 vec1 32 ssa_568 = fmul ssa_567, ssa_403 vec1 32 ssa_569 = flrp ssa_14, ssa_15, ssa_568 vec4 32 ssa_570 = tex ssa_569 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_571 = fadd ssa_555, ssa_570.x vec1 32 ssa_572 = ishl ssa_565, ssa_408 vec1 32 ssa_573 = intrinsic load_shared (ssa_572) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_574 = ffma ssa_570.x, ssa_573, ssa_558 vec1 32 ssa_575 = iadd ssa_412, ssa_572 vec1 32 ssa_576 = intrinsic load_shared (ssa_575) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_577 = ffma ssa_570.x, ssa_576, ssa_561 vec1 32 ssa_578 = iadd ssa_416, ssa_572 vec1 32 ssa_579 = intrinsic load_shared (ssa_578) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_580 = ffma ssa_570.x, ssa_579, ssa_564 vec1 32 ssa_581 = iadd ssa_163, ssa_36 vec1 32 ssa_582 = ffma ssa_322, ssa_322, ssa_491 vec1 32 ssa_583 = fsqrt ssa_582 vec1 32 ssa_584 = fmul ssa_583, ssa_403 vec1 32 ssa_585 = flrp ssa_14, ssa_15, ssa_584 vec4 32 ssa_586 = tex ssa_585 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd ssa_571, ssa_586.x vec1 32 ssa_588 = ishl ssa_581, ssa_408 vec1 32 ssa_589 = intrinsic load_shared (ssa_588) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_586.x, ssa_589, ssa_574 vec1 32 ssa_591 = iadd ssa_412, ssa_588 vec1 32 ssa_592 = intrinsic load_shared (ssa_591) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_586.x, ssa_592, ssa_577 vec1 32 ssa_594 = iadd ssa_416, ssa_588 vec1 32 ssa_595 = intrinsic load_shared (ssa_594) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_586.x, ssa_595, ssa_580 vec1 32 ssa_597 = iadd ssa_163, ssa_2 vec1 32 ssa_598 = ffma ssa_348, ssa_348, ssa_491 vec1 32 ssa_599 = fsqrt ssa_598 vec1 32 ssa_600 = flt32 ssa_599, ssa_13 /* succs: block_47 block_48 */ if ssa_600 { block block_47: /* preds: block_46 */ vec1 32 ssa_601 = fmul ssa_599, ssa_403 vec1 32 ssa_602 = flrp ssa_14, ssa_15, ssa_601 vec4 32 ssa_603 = tex ssa_602 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd r11, ssa_603.x vec1 32 ssa_605 = ishl ssa_597, ssa_408 vec1 32 ssa_606 = intrinsic load_shared (ssa_605) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_603.x, ssa_606, r12 vec1 32 ssa_608 = iadd ssa_412, ssa_605 vec1 32 ssa_609 = intrinsic load_shared (ssa_608) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_603.x, ssa_609, r13 vec1 32 ssa_611 = iadd ssa_416, ssa_605 vec1 32 ssa_612 = intrinsic load_shared (ssa_611) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_603.x, ssa_612, r14 /* succs: block_49 */ } else { block block_48: /* preds: block_46 */ /* succs: block_49 */ } block block_49: /* preds: block_47 block_48 */ vec1 32 ssa_618 = iadd ssa_163, ssa_37 vec1 32 ssa_619 = fadd ssa_38, -ssa_125 vec1 32 ssa_620 = ffma ssa_619, ssa_619, ssa_491 vec1 32 ssa_621 = fsqrt ssa_620 vec1 32 ssa_622 = flt32 ssa_621, ssa_13 /* succs: block_50 block_51 */ if ssa_622 { block block_50: /* preds: block_49 */ vec1 32 ssa_623 = fmul ssa_621, ssa_403 vec1 32 ssa_624 = flrp ssa_14, ssa_15, ssa_623 vec4 32 ssa_625 = tex ssa_624 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd r11, ssa_625.x vec1 32 ssa_627 = ishl ssa_618, ssa_408 vec1 32 ssa_628 = intrinsic load_shared (ssa_627) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_625.x, ssa_628, r12 vec1 32 ssa_630 = iadd ssa_412, ssa_627 vec1 32 ssa_631 = intrinsic load_shared (ssa_630) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_625.x, ssa_631, r13 vec1 32 ssa_633 = iadd ssa_416, ssa_627 vec1 32 ssa_634 = intrinsic load_shared (ssa_633) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_625.x, ssa_634, r14 /* succs: block_52 */ } else { block block_51: /* preds: block_49 */ /* succs: block_52 */ } block block_52: /* preds: block_50 block_51 */ vec1 32 ssa_640 = iadd ssa_163, ssa_39 vec1 32 ssa_641 = fadd ssa_16, -ssa_126 vec1 32 ssa_642 = fmul ssa_641, ssa_641 vec1 32 ssa_643 = ffma ssa_490, ssa_490, ssa_642 vec1 32 ssa_644 = fsqrt ssa_643 vec1 32 ssa_645 = flt32 ssa_644, ssa_13 /* succs: block_53 block_54 */ if ssa_645 { block block_53: /* preds: block_52 */ vec1 32 ssa_646 = fmul ssa_644, ssa_403 vec1 32 ssa_647 = flrp ssa_14, ssa_15, ssa_646 vec4 32 ssa_648 = tex ssa_647 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd r11, ssa_648.x vec1 32 ssa_650 = ishl ssa_640, ssa_408 vec1 32 ssa_651 = intrinsic load_shared (ssa_650) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_648.x, ssa_651, r12 vec1 32 ssa_653 = iadd ssa_412, ssa_650 vec1 32 ssa_654 = intrinsic load_shared (ssa_653) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_648.x, ssa_654, r13 vec1 32 ssa_656 = iadd ssa_416, ssa_650 vec1 32 ssa_657 = intrinsic load_shared (ssa_656) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_648.x, ssa_657, r14 /* succs: block_55 */ } else { block block_54: /* preds: block_52 */ /* succs: block_55 */ } block block_55: /* preds: block_53 block_54 */ vec1 32 ssa_663 = iadd ssa_163, ssa_40 vec1 32 ssa_664 = ffma ssa_218, ssa_218, ssa_642 vec1 32 ssa_665 = fsqrt ssa_664 vec1 32 ssa_666 = flt32 ssa_665, ssa_13 /* succs: block_56 block_57 */ if ssa_666 { block block_56: /* preds: block_55 */ vec1 32 ssa_667 = fmul ssa_665, ssa_403 vec1 32 ssa_668 = flrp ssa_14, ssa_15, ssa_667 vec4 32 ssa_669 = tex ssa_668 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r11 = fadd r11, ssa_669.x vec1 32 ssa_671 = ishl ssa_663, ssa_408 vec1 32 ssa_672 = intrinsic load_shared (ssa_671) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r12 = ffma ssa_669.x, ssa_672, r12 vec1 32 ssa_674 = iadd ssa_412, ssa_671 vec1 32 ssa_675 = intrinsic load_shared (ssa_674) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r13 = ffma ssa_669.x, ssa_675, r13 vec1 32 ssa_677 = iadd ssa_416, ssa_671 vec1 32 ssa_678 = intrinsic load_shared (ssa_677) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r14 = ffma ssa_669.x, ssa_678, r14 /* succs: block_58 */ } else { block block_57: /* preds: block_55 */ /* succs: block_58 */ } block block_58: /* preds: block_56 block_57 */ vec1 32 ssa_684 = iadd ssa_163, ssa_41 vec1 32 ssa_685 = ffma ssa_246, ssa_246, ssa_642 vec1 32 ssa_686 = fsqrt ssa_685 vec1 32 ssa_687 = fmul ssa_686, ssa_403 vec1 32 ssa_688 = flrp ssa_14, ssa_15, ssa_687 vec4 32 ssa_689 = tex ssa_688 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_690 = fadd r11, ssa_689.x vec1 32 ssa_691 = ishl ssa_684, ssa_408 vec1 32 ssa_692 = intrinsic load_shared (ssa_691) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_693 = ffma ssa_689.x, ssa_692, r12 vec1 32 ssa_694 = iadd ssa_412, ssa_691 vec1 32 ssa_695 = intrinsic load_shared (ssa_694) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_696 = ffma ssa_689.x, ssa_695, r13 vec1 32 ssa_697 = iadd ssa_416, ssa_691 vec1 32 ssa_698 = intrinsic load_shared (ssa_697) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_699 = ffma ssa_689.x, ssa_698, r14 vec1 32 ssa_700 = iadd ssa_163, ssa_42 vec1 32 ssa_701 = ffma ssa_125, ssa_125, ssa_642 vec1 32 ssa_702 = fsqrt ssa_701 vec1 32 ssa_703 = fmul ssa_702, ssa_403 vec1 32 ssa_704 = flrp ssa_14, ssa_15, ssa_703 vec4 32 ssa_705 = tex ssa_704 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_706 = fadd ssa_690, ssa_705.x vec1 32 ssa_707 = ishl ssa_700, ssa_408 vec1 32 ssa_708 = intrinsic load_shared (ssa_707) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_709 = ffma ssa_705.x, ssa_708, ssa_693 vec1 32 ssa_710 = iadd ssa_412, ssa_707 vec1 32 ssa_711 = intrinsic load_shared (ssa_710) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_712 = ffma ssa_705.x, ssa_711, ssa_696 vec1 32 ssa_713 = iadd ssa_416, ssa_707 vec1 32 ssa_714 = intrinsic load_shared (ssa_713) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_715 = ffma ssa_705.x, ssa_714, ssa_699 vec1 32 ssa_716 = iadd ssa_163, ssa_43 vec1 32 ssa_717 = ffma ssa_192, ssa_192, ssa_642 vec1 32 ssa_718 = fsqrt ssa_717 vec1 32 ssa_719 = fmul ssa_718, ssa_403 vec1 32 ssa_720 = flrp ssa_14, ssa_15, ssa_719 vec4 32 ssa_721 = tex ssa_720 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_722 = fadd ssa_706, ssa_721.x vec1 32 ssa_723 = ishl ssa_716, ssa_408 vec1 32 ssa_724 = intrinsic load_shared (ssa_723) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_725 = ffma ssa_721.x, ssa_724, ssa_709 vec1 32 ssa_726 = iadd ssa_412, ssa_723 vec1 32 ssa_727 = intrinsic load_shared (ssa_726) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_728 = ffma ssa_721.x, ssa_727, ssa_712 vec1 32 ssa_729 = iadd ssa_416, ssa_723 vec1 32 ssa_730 = intrinsic load_shared (ssa_729) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_731 = ffma ssa_721.x, ssa_730, ssa_715 vec1 32 ssa_732 = iadd ssa_163, ssa_44 vec1 32 ssa_733 = ffma ssa_322, ssa_322, ssa_642 vec1 32 ssa_734 = fsqrt ssa_733 vec1 32 ssa_735 = fmul ssa_734, ssa_403 vec1 32 ssa_736 = flrp ssa_14, ssa_15, ssa_735 vec4 32 ssa_737 = tex ssa_736 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r15 = fadd ssa_722, ssa_737.x vec1 32 ssa_739 = ishl ssa_732, ssa_408 vec1 32 ssa_740 = intrinsic load_shared (ssa_739) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r16 = ffma ssa_737.x, ssa_740, ssa_725 vec1 32 ssa_742 = iadd ssa_412, ssa_739 vec1 32 ssa_743 = intrinsic load_shared (ssa_742) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r17 = ffma ssa_737.x, ssa_743, ssa_728 vec1 32 ssa_745 = iadd ssa_416, ssa_739 vec1 32 ssa_746 = intrinsic load_shared (ssa_745) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r18 = ffma ssa_737.x, ssa_746, ssa_731 vec1 32 ssa_748 = iadd ssa_163, ssa_45 vec1 32 ssa_749 = ffma ssa_348, ssa_348, ssa_642 vec1 32 ssa_750 = fsqrt ssa_749 vec1 32 ssa_751 = flt32 ssa_750, ssa_13 /* succs: block_59 block_60 */ if ssa_751 { block block_59: /* preds: block_58 */ vec1 32 ssa_752 = fmul ssa_750, ssa_403 vec1 32 ssa_753 = flrp ssa_14, ssa_15, ssa_752 vec4 32 ssa_754 = tex ssa_753 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r15 = fadd r15, ssa_754.x vec1 32 ssa_756 = ishl ssa_748, ssa_408 vec1 32 ssa_757 = intrinsic load_shared (ssa_756) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r16 = ffma ssa_754.x, ssa_757, r16 vec1 32 ssa_759 = iadd ssa_412, ssa_756 vec1 32 ssa_760 = intrinsic load_shared (ssa_759) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r17 = ffma ssa_754.x, ssa_760, r17 vec1 32 ssa_762 = iadd ssa_416, ssa_756 vec1 32 ssa_763 = intrinsic load_shared (ssa_762) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r18 = ffma ssa_754.x, ssa_763, r18 /* succs: block_61 */ } else { block block_60: /* preds: block_58 */ /* succs: block_61 */ } block block_61: /* preds: block_59 block_60 */ vec1 32 ssa_769 = iadd ssa_163, ssa_46 vec1 32 ssa_770 = ffma ssa_619, ssa_619, ssa_642 vec1 32 ssa_771 = fsqrt ssa_770 vec1 32 ssa_772 = flt32 ssa_771, ssa_13 /* succs: block_62 block_63 */ if ssa_772 { block block_62: /* preds: block_61 */ vec1 32 ssa_773 = fmul ssa_771, ssa_403 vec1 32 ssa_774 = flrp ssa_14, ssa_15, ssa_773 vec4 32 ssa_775 = tex ssa_774 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r15 = fadd r15, ssa_775.x vec1 32 ssa_777 = ishl ssa_769, ssa_408 vec1 32 ssa_778 = intrinsic load_shared (ssa_777) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r16 = ffma ssa_775.x, ssa_778, r16 vec1 32 ssa_780 = iadd ssa_412, ssa_777 vec1 32 ssa_781 = intrinsic load_shared (ssa_780) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r17 = ffma ssa_775.x, ssa_781, r17 vec1 32 ssa_783 = iadd ssa_416, ssa_777 vec1 32 ssa_784 = intrinsic load_shared (ssa_783) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r18 = ffma ssa_775.x, ssa_784, r18 /* succs: block_64 */ } else { block block_63: /* preds: block_61 */ /* succs: block_64 */ } block block_64: /* preds: block_62 block_63 */ vec1 32 ssa_790 = iadd ssa_163, ssa_47 vec1 32 ssa_791 = fadd ssa_23, -ssa_126 vec1 32 ssa_792 = fmul ssa_791, ssa_791 vec1 32 ssa_793 = ffma ssa_218, ssa_218, ssa_792 vec1 32 ssa_794 = fsqrt ssa_793 vec1 32 ssa_795 = flt32 ssa_794, ssa_13 /* succs: block_65 block_66 */ if ssa_795 { block block_65: /* preds: block_64 */ vec1 32 ssa_796 = fmul ssa_794, ssa_403 vec1 32 ssa_797 = flrp ssa_14, ssa_15, ssa_796 vec4 32 ssa_798 = tex ssa_797 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r15 = fadd r15, ssa_798.x vec1 32 ssa_800 = ishl ssa_790, ssa_408 vec1 32 ssa_801 = intrinsic load_shared (ssa_800) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r16 = ffma ssa_798.x, ssa_801, r16 vec1 32 ssa_803 = iadd ssa_412, ssa_800 vec1 32 ssa_804 = intrinsic load_shared (ssa_803) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r17 = ffma ssa_798.x, ssa_804, r17 vec1 32 ssa_806 = iadd ssa_416, ssa_800 vec1 32 ssa_807 = intrinsic load_shared (ssa_806) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r18 = ffma ssa_798.x, ssa_807, r18 /* succs: block_67 */ } else { block block_66: /* preds: block_64 */ /* succs: block_67 */ } block block_67: /* preds: block_65 block_66 */ vec1 32 ssa_813 = iadd ssa_163, ssa_48 vec1 32 ssa_814 = ffma ssa_246, ssa_246, ssa_792 vec1 32 ssa_815 = fsqrt ssa_814 vec1 32 ssa_816 = fmul ssa_815, ssa_403 vec1 32 ssa_817 = flrp ssa_14, ssa_15, ssa_816 vec4 32 ssa_818 = tex ssa_817 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_819 = fadd r15, ssa_818.x vec1 32 ssa_820 = ishl ssa_813, ssa_408 vec1 32 ssa_821 = intrinsic load_shared (ssa_820) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_822 = ffma ssa_818.x, ssa_821, r16 vec1 32 ssa_823 = iadd ssa_412, ssa_820 vec1 32 ssa_824 = intrinsic load_shared (ssa_823) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_825 = ffma ssa_818.x, ssa_824, r17 vec1 32 ssa_826 = iadd ssa_416, ssa_820 vec1 32 ssa_827 = intrinsic load_shared (ssa_826) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_828 = ffma ssa_818.x, ssa_827, r18 vec1 32 ssa_829 = iadd ssa_163, ssa_49 vec1 32 ssa_830 = ffma ssa_125, ssa_125, ssa_792 vec1 32 ssa_831 = fsqrt ssa_830 vec1 32 ssa_832 = fmul ssa_831, ssa_403 vec1 32 ssa_833 = flrp ssa_14, ssa_15, ssa_832 vec4 32 ssa_834 = tex ssa_833 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_835 = fadd ssa_819, ssa_834.x vec1 32 ssa_836 = ishl ssa_829, ssa_408 vec1 32 ssa_837 = intrinsic load_shared (ssa_836) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_838 = ffma ssa_834.x, ssa_837, ssa_822 vec1 32 ssa_839 = iadd ssa_412, ssa_836 vec1 32 ssa_840 = intrinsic load_shared (ssa_839) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_841 = ffma ssa_834.x, ssa_840, ssa_825 vec1 32 ssa_842 = iadd ssa_416, ssa_836 vec1 32 ssa_843 = intrinsic load_shared (ssa_842) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_844 = ffma ssa_834.x, ssa_843, ssa_828 vec1 32 ssa_845 = iadd ssa_163, ssa_50 vec1 32 ssa_846 = ffma ssa_192, ssa_192, ssa_792 vec1 32 ssa_847 = fsqrt ssa_846 vec1 32 ssa_848 = fmul ssa_847, ssa_403 vec1 32 ssa_849 = flrp ssa_14, ssa_15, ssa_848 vec4 32 ssa_850 = tex ssa_849 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), vec1 32 ssa_851 = fadd ssa_835, ssa_850.x vec1 32 ssa_852 = ishl ssa_845, ssa_408 vec1 32 ssa_853 = intrinsic load_shared (ssa_852) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_854 = ffma ssa_850.x, ssa_853, ssa_838 vec1 32 ssa_855 = iadd ssa_412, ssa_852 vec1 32 ssa_856 = intrinsic load_shared (ssa_855) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_857 = ffma ssa_850.x, ssa_856, ssa_841 vec1 32 ssa_858 = iadd ssa_416, ssa_852 vec1 32 ssa_859 = intrinsic load_shared (ssa_858) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_860 = ffma ssa_850.x, ssa_859, ssa_844 vec1 32 ssa_861 = iadd ssa_163, ssa_51 vec1 32 ssa_862 = ffma ssa_322, ssa_322, ssa_792 vec1 32 ssa_863 = fsqrt ssa_862 vec1 32 ssa_864 = fmul ssa_863, ssa_403 vec1 32 ssa_865 = flrp ssa_14, ssa_15, ssa_864 vec4 32 ssa_866 = tex ssa_865 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd ssa_851, ssa_866.x vec1 32 ssa_868 = ishl ssa_861, ssa_408 vec1 32 ssa_869 = intrinsic load_shared (ssa_868) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_866.x, ssa_869, ssa_854 vec1 32 ssa_871 = iadd ssa_412, ssa_868 vec1 32 ssa_872 = intrinsic load_shared (ssa_871) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_866.x, ssa_872, ssa_857 vec1 32 ssa_874 = iadd ssa_416, ssa_868 vec1 32 ssa_875 = intrinsic load_shared (ssa_874) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_866.x, ssa_875, ssa_860 vec1 32 ssa_877 = iadd ssa_163, ssa_52 vec1 32 ssa_878 = ffma ssa_348, ssa_348, ssa_792 vec1 32 ssa_879 = fsqrt ssa_878 vec1 32 ssa_880 = flt32 ssa_879, ssa_13 /* succs: block_68 block_69 */ if ssa_880 { block block_68: /* preds: block_67 */ vec1 32 ssa_881 = fmul ssa_879, ssa_403 vec1 32 ssa_882 = flrp ssa_14, ssa_15, ssa_881 vec4 32 ssa_883 = tex ssa_882 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_883.x vec1 32 ssa_885 = ishl ssa_877, ssa_408 vec1 32 ssa_886 = intrinsic load_shared (ssa_885) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_883.x, ssa_886, r20 vec1 32 ssa_888 = iadd ssa_412, ssa_885 vec1 32 ssa_889 = intrinsic load_shared (ssa_888) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_883.x, ssa_889, r21 vec1 32 ssa_891 = iadd ssa_416, ssa_885 vec1 32 ssa_892 = intrinsic load_shared (ssa_891) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_883.x, ssa_892, r22 /* succs: block_70 */ } else { block block_69: /* preds: block_67 */ /* succs: block_70 */ } block block_70: /* preds: block_68 block_69 */ vec1 32 ssa_898 = iadd ssa_163, ssa_53 vec1 32 ssa_899 = fadd ssa_25, -ssa_126 vec1 32 ssa_900 = fmul ssa_899, ssa_899 vec1 32 ssa_901 = ffma ssa_218, ssa_218, ssa_900 vec1 32 ssa_902 = fsqrt ssa_901 vec1 32 ssa_903 = flt32 ssa_902, ssa_13 /* succs: block_71 block_72 */ if ssa_903 { block block_71: /* preds: block_70 */ vec1 32 ssa_904 = fmul ssa_902, ssa_403 vec1 32 ssa_905 = flrp ssa_14, ssa_15, ssa_904 vec4 32 ssa_906 = tex ssa_905 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_906.x vec1 32 ssa_908 = ishl ssa_898, ssa_408 vec1 32 ssa_909 = intrinsic load_shared (ssa_908) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_906.x, ssa_909, r20 vec1 32 ssa_911 = iadd ssa_412, ssa_908 vec1 32 ssa_912 = intrinsic load_shared (ssa_911) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_906.x, ssa_912, r21 vec1 32 ssa_914 = iadd ssa_416, ssa_908 vec1 32 ssa_915 = intrinsic load_shared (ssa_914) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_906.x, ssa_915, r22 /* succs: block_73 */ } else { block block_72: /* preds: block_70 */ /* succs: block_73 */ } block block_73: /* preds: block_71 block_72 */ vec1 32 ssa_921 = iadd ssa_163, ssa_54 vec1 32 ssa_922 = ffma ssa_246, ssa_246, ssa_900 vec1 32 ssa_923 = fsqrt ssa_922 vec1 32 ssa_924 = flt32 ssa_923, ssa_13 /* succs: block_74 block_75 */ if ssa_924 { block block_74: /* preds: block_73 */ vec1 32 ssa_925 = fmul ssa_923, ssa_403 vec1 32 ssa_926 = flrp ssa_14, ssa_15, ssa_925 vec4 32 ssa_927 = tex ssa_926 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_927.x vec1 32 ssa_929 = ishl ssa_921, ssa_408 vec1 32 ssa_930 = intrinsic load_shared (ssa_929) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_927.x, ssa_930, r20 vec1 32 ssa_932 = iadd ssa_412, ssa_929 vec1 32 ssa_933 = intrinsic load_shared (ssa_932) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_927.x, ssa_933, r21 vec1 32 ssa_935 = iadd ssa_416, ssa_929 vec1 32 ssa_936 = intrinsic load_shared (ssa_935) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_927.x, ssa_936, r22 /* succs: block_76 */ } else { block block_75: /* preds: block_73 */ /* succs: block_76 */ } block block_76: /* preds: block_74 block_75 */ vec1 32 ssa_942 = iadd ssa_163, ssa_55 vec1 32 ssa_943 = ffma ssa_125, ssa_125, ssa_900 vec1 32 ssa_944 = fsqrt ssa_943 vec1 32 ssa_945 = flt32 ssa_944, ssa_13 /* succs: block_77 block_78 */ if ssa_945 { block block_77: /* preds: block_76 */ vec1 32 ssa_946 = fmul ssa_944, ssa_403 vec1 32 ssa_947 = flrp ssa_14, ssa_15, ssa_946 vec4 32 ssa_948 = tex ssa_947 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_948.x vec1 32 ssa_950 = ishl ssa_942, ssa_408 vec1 32 ssa_951 = intrinsic load_shared (ssa_950) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_948.x, ssa_951, r20 vec1 32 ssa_953 = iadd ssa_412, ssa_950 vec1 32 ssa_954 = intrinsic load_shared (ssa_953) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_948.x, ssa_954, r21 vec1 32 ssa_956 = iadd ssa_416, ssa_950 vec1 32 ssa_957 = intrinsic load_shared (ssa_956) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_948.x, ssa_957, r22 /* succs: block_79 */ } else { block block_78: /* preds: block_76 */ /* succs: block_79 */ } block block_79: /* preds: block_77 block_78 */ vec1 32 ssa_963 = iadd ssa_163, ssa_56 vec1 32 ssa_964 = ffma ssa_192, ssa_192, ssa_900 vec1 32 ssa_965 = fsqrt ssa_964 vec1 32 ssa_966 = flt32 ssa_965, ssa_13 /* succs: block_80 block_81 */ if ssa_966 { block block_80: /* preds: block_79 */ vec1 32 ssa_967 = fmul ssa_965, ssa_403 vec1 32 ssa_968 = flrp ssa_14, ssa_15, ssa_967 vec4 32 ssa_969 = tex ssa_968 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_969.x vec1 32 ssa_971 = ishl ssa_963, ssa_408 vec1 32 ssa_972 = intrinsic load_shared (ssa_971) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_969.x, ssa_972, r20 vec1 32 ssa_974 = iadd ssa_412, ssa_971 vec1 32 ssa_975 = intrinsic load_shared (ssa_974) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_969.x, ssa_975, r21 vec1 32 ssa_977 = iadd ssa_416, ssa_971 vec1 32 ssa_978 = intrinsic load_shared (ssa_977) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_969.x, ssa_978, r22 /* succs: block_82 */ } else { block block_81: /* preds: block_79 */ /* succs: block_82 */ } block block_82: /* preds: block_80 block_81 */ vec1 32 ssa_984 = iadd ssa_163, ssa_57 vec1 32 ssa_985 = ffma ssa_322, ssa_322, ssa_900 vec1 32 ssa_986 = fsqrt ssa_985 vec1 32 ssa_987 = flt32 ssa_986, ssa_13 /* succs: block_83 block_84 */ if ssa_987 { block block_83: /* preds: block_82 */ vec1 32 ssa_988 = fmul ssa_986, ssa_403 vec1 32 ssa_989 = flrp ssa_14, ssa_15, ssa_988 vec4 32 ssa_990 = tex ssa_989 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_990.x vec1 32 ssa_992 = ishl ssa_984, ssa_408 vec1 32 ssa_993 = intrinsic load_shared (ssa_992) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_990.x, ssa_993, r20 vec1 32 ssa_995 = iadd ssa_412, ssa_992 vec1 32 ssa_996 = intrinsic load_shared (ssa_995) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_990.x, ssa_996, r21 vec1 32 ssa_998 = iadd ssa_416, ssa_992 vec1 32 ssa_999 = intrinsic load_shared (ssa_998) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_990.x, ssa_999, r22 /* succs: block_85 */ } else { block block_84: /* preds: block_82 */ /* succs: block_85 */ } block block_85: /* preds: block_83 block_84 */ vec1 32 ssa_1005 = iadd ssa_163, ssa_58 vec1 32 ssa_1006 = ffma ssa_348, ssa_348, ssa_900 vec1 32 ssa_1007 = fsqrt ssa_1006 vec1 32 ssa_1008 = flt32 ssa_1007, ssa_13 /* succs: block_86 block_87 */ if ssa_1008 { block block_86: /* preds: block_85 */ vec1 32 ssa_1009 = fmul ssa_1007, ssa_403 vec1 32 ssa_1010 = flrp ssa_14, ssa_15, ssa_1009 vec4 32 ssa_1011 = tex ssa_1010 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_1011.x vec1 32 ssa_1013 = ishl ssa_1005, ssa_408 vec1 32 ssa_1014 = intrinsic load_shared (ssa_1013) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_1011.x, ssa_1014, r20 vec1 32 ssa_1016 = iadd ssa_412, ssa_1013 vec1 32 ssa_1017 = intrinsic load_shared (ssa_1016) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_1011.x, ssa_1017, r21 vec1 32 ssa_1019 = iadd ssa_416, ssa_1013 vec1 32 ssa_1020 = intrinsic load_shared (ssa_1019) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_1011.x, ssa_1020, r22 /* succs: block_88 */ } else { block block_87: /* preds: block_85 */ /* succs: block_88 */ } block block_88: /* preds: block_86 block_87 */ vec1 32 ssa_1026 = iadd ssa_163, ssa_59 vec1 32 ssa_1027 = fadd ssa_38, -ssa_126 vec1 32 ssa_1028 = fmul ssa_1027, ssa_1027 vec1 32 ssa_1029 = ffma ssa_125, ssa_125, ssa_1028 vec1 32 ssa_1030 = fsqrt ssa_1029 vec1 32 ssa_1031 = flt32 ssa_1030, ssa_13 /* succs: block_89 block_90 */ if ssa_1031 { block block_89: /* preds: block_88 */ vec1 32 ssa_1032 = fmul ssa_1030, ssa_403 vec1 32 ssa_1033 = flrp ssa_14, ssa_15, ssa_1032 vec4 32 ssa_1034 = tex ssa_1033 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_1034.x vec1 32 ssa_1036 = ishl ssa_1026, ssa_408 vec1 32 ssa_1037 = intrinsic load_shared (ssa_1036) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_1034.x, ssa_1037, r20 vec1 32 ssa_1039 = iadd ssa_412, ssa_1036 vec1 32 ssa_1040 = intrinsic load_shared (ssa_1039) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_1034.x, ssa_1040, r21 vec1 32 ssa_1042 = iadd ssa_416, ssa_1036 vec1 32 ssa_1043 = intrinsic load_shared (ssa_1042) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_1034.x, ssa_1043, r22 /* succs: block_91 */ } else { block block_90: /* preds: block_88 */ /* succs: block_91 */ } block block_91: /* preds: block_89 block_90 */ vec1 32 ssa_1049 = iadd ssa_163, ssa_60 vec1 32 ssa_1050 = ffma ssa_192, ssa_192, ssa_1028 vec1 32 ssa_1051 = fsqrt ssa_1050 vec1 32 ssa_1052 = flt32 ssa_1051, ssa_13 /* succs: block_92 block_93 */ if ssa_1052 { block block_92: /* preds: block_91 */ vec1 32 ssa_1053 = fmul ssa_1051, ssa_403 vec1 32 ssa_1054 = flrp ssa_14, ssa_15, ssa_1053 vec4 32 ssa_1055 = tex ssa_1054 (coord), ssa_0 (lod), 0 (texture), 0 (sampler), r19 = fadd r19, ssa_1055.x vec1 32 ssa_1057 = ishl ssa_1049, ssa_408 vec1 32 ssa_1058 = intrinsic load_shared (ssa_1057) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r20 = ffma ssa_1055.x, ssa_1058, r20 vec1 32 ssa_1060 = iadd ssa_412, ssa_1057 vec1 32 ssa_1061 = intrinsic load_shared (ssa_1060) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r21 = ffma ssa_1055.x, ssa_1061, r21 vec1 32 ssa_1063 = iadd ssa_416, ssa_1057 vec1 32 ssa_1064 = intrinsic load_shared (ssa_1063) (0, 4, 0) /* base=0 */ /* align_mul=4 */ /* align_offset=0 */ r22 = ffma ssa_1055.x, ssa_1064, r22 /* succs: block_94 */ } else { block block_93: /* preds: block_91 */ /* succs: block_94 */ } block block_94: /* preds: block_92 block_93 */ vec1 32 ssa_1070 = frcp r19 vec1 32 ssa_1071 = fmul.sat r20, ssa_1070 vec1 32 ssa_1072 = fmul.sat r21, ssa_1070 vec1 32 ssa_1073 = fmul.sat r22, ssa_1070 vec1 32 ssa_1074 = fadd ssa_61, -ssa_1071 vec1 32 ssa_1075 = fadd ssa_61, -ssa_1072 vec1 32 ssa_1076 = fadd ssa_61, -ssa_1073 vec1 32 ssa_1077 = load_const (0x41160a50 /* 9.377518 */) vec1 32 ssa_1078 = fmul ssa_1077, ssa_1074 vec1 32 ssa_1079 = fmul ssa_1077, ssa_1075 vec1 32 ssa_1080 = fmul ssa_1077, ssa_1076 vec1 32 ssa_1081 = fexp2 ssa_1078 vec1 32 ssa_1082 = fexp2 ssa_1079 vec1 32 ssa_1083 = fexp2 ssa_1080 vec1 32 ssa_1084 = fadd ssa_16, ssa_1081 vec1 32 ssa_1085 = fadd ssa_16, ssa_1082 vec1 32 ssa_1086 = fadd ssa_16, ssa_1083 vec1 32 ssa_1087 = frcp ssa_1084 vec1 32 ssa_1088 = frcp ssa_1085 vec1 32 ssa_1089 = frcp ssa_1086 vec1 32 ssa_1090 = fadd ssa_1087, ssa_62 vec1 32 ssa_1091 = fadd ssa_1088, ssa_62 vec1 32 ssa_1092 = fadd ssa_1089, ssa_62 vec1 32 ssa_1093 = load_const (0x3f9a9b5f /* 1.207867 */) vec1 32 ssa_1094 = fmul.sat ssa_1090, ssa_1093 vec1 32 ssa_1095 = fmul.sat ssa_1091, ssa_1093 vec1 32 ssa_1096 = fmul.sat ssa_1092, ssa_1093 vec1 32 ssa_1097 = fpow ssa_1094, ssa_63 vec1 32 ssa_1098 = fpow ssa_1095, ssa_63 vec1 32 ssa_1099 = fpow ssa_1096, ssa_63 vec1 32 ssa_1100 = undefined vec4 32 ssa_1101 = vec4 ssa_86, ssa_87, ssa_1100, ssa_1100 vec4 32 ssa_1102 = vec4 ssa_1097, ssa_1098, ssa_1099, ssa_16 intrinsic image_store (ssa_0, ssa_1101, ssa_1100, ssa_1102) (1, 0, 34842, 8) /* image_dim=2D */ /* image_dim=true */ /* format=34842 */ /* access=8 */ /* succs: block_95 */ block block_95: } Native code for unnamed compute shader GLSL19 SIMD16 shader: 963 instructions. 2 loops. 89260 cycles. 0:0 spills:fills. Promoted 3 constants. Compacted 15408 to 11536 bytes (25%) START B0 (516 cycles) mov(8) g10<1>UW 0x76543210V { align1 WE_all 1Q }; mov(16) g2<1>UD g0.1<0,1,0>UD { align1 1H compacted }; mov(16) g4<1>UD g0.6<0,1,0>UD { align1 1H }; mov(8) g7<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g8<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; shl(16) g50<1>D g1<0,1,0>D 0x00000004UD { align1 1H }; mov(1) g9<1>F 0x3f000000F /* 0.5F */ { align1 WE_all 1N }; add(8) g10.8<1>UW g10<8,8,1>UW 0x0008UW { align1 WE_all 1Q }; shl(16) g58<1>D g2<8,8,1>D 0x00000005UD { align1 1H }; shl(16) g60<1>D g4<8,8,1>D 0x00000003UD { align1 1H }; mov(1) g7.2<1>UD 0x00000000UD { align1 WE_all 1N compacted }; mov(1) g8.2<1>UD 0x00000004UD { align1 WE_all 1N compacted }; mov(16) g52<1>D g10<8,8,1>UW { align1 1H }; mov(16) g86<1>F g58<8,8,1>UD { align1 1H compacted }; mov(16) g88<1>F g60<8,8,1>UD { align1 1H compacted }; send(16) g5<1>UD g7<0,1,0>UD 0x02280303 const MsgDesc: (3, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; send(16) g3<1>UD g8<0,1,0>UD 0x02280303 const MsgDesc: (3, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; add(16) g54<1>D g52<8,8,1>D g50<8,8,1>D { align1 1H compacted }; add(16) g90<1>F g86<8,8,1>F 0x3f000000F /* 0.5F */ { align1 1H }; add(16) g92<1>F g88<8,8,1>F 0x3f000000F /* 0.5F */ { align1 1H }; and(16) g10<1>UD g54<8,8,1>UD 0x0000001fUD { align1 1H compacted }; shr(16) g56<1>UD g54<8,8,1>UD 0x00000005UD { align1 1H compacted }; add(16) g7<1>D g58<8,8,1>D g10<8,8,1>D { align1 1H compacted }; and(16) g12<1>UD g56<8,8,1>UD 0x00000007UD { align1 1H compacted }; mov(16) g62<1>F g7<8,8,1>UD { align1 1H compacted }; add(16) g1<1>D g60<8,8,1>D g12<8,8,1>D { align1 1H compacted }; add(16) g65<1>F g62<8,8,1>F 0x3f000000F /* 0.5F */ { align1 1H }; mov(16) g63<1>F g1<8,8,1>UD { align1 1H compacted }; mul(16) g68<1>F g4<0,1,0>F g65<8,8,1>F { align1 1H compacted }; mul(16) g96<1>F g4.1<0,1,0>F g92<8,8,1>F { align1 1H compacted }; mul(16) g94<1>F g4<0,1,0>F g90<8,8,1>F { align1 1H compacted }; add(16) g66<1>F g63<8,8,1>F 0x3f000000F /* 0.5F */ { align1 1H }; mul(16) g71<1>F g4.2<0,1,0>F g68<8,8,1>F { align1 1H compacted }; mul(16) g100<1>F g4.3<0,1,0>F g96<8,8,1>F { align1 1H compacted }; mul(16) g98<1>F g4.2<0,1,0>F g94<8,8,1>F { align1 1H compacted }; mul(16) g69<1>F g4.1<0,1,0>F g66<8,8,1>F { align1 1H compacted }; mul(16) g102<1>F g3<0,1,0>F g100<8,8,1>F { align1 1H compacted }; mul(16) g104<1>F g3.1<0,1,0>F g100<8,8,1>F { align1 1H compacted }; mul(16) g73<1>F g4.3<0,1,0>F g69<8,8,1>F { align1 1H compacted }; mad(16) g106<1>F g102<4,4,1>F g98<4,4,1>F g6.4<0,1,0>F { align16 1H }; mad(16) g108<1>F g104<4,4,1>F g98<4,4,1>F g6.5<0,1,0>F { align16 1H }; mul(16) g75<1>F g3<0,1,0>F g73<8,8,1>F { align1 1H compacted }; mul(16) g77<1>F g3.1<0,1,0>F g73<8,8,1>F { align1 1H compacted }; mad(16) g110<1>F g106<4,4,1>F g3.4<0,1,0>F g3.6<0,1,0>F { align16 1H }; mad(16) g79<1>F g75<4,4,1>F g71<4,4,1>F g6.4<0,1,0>F { align16 1H }; mad(16) g112<1>F g108<4,4,1>F g3.5<0,1,0>F g3.7<0,1,0>F { align16 1H }; mad(16) g81<1>F g77<4,4,1>F g71<4,4,1>F g6.5<0,1,0>F { align16 1H }; mad(16) g114<1>F -g9.0<0,1,0>F g6.0<0,1,0>F g110<4,4,1>F { align16 1H }; mad(16) g83<1>F g79<4,4,1>F g3.4<0,1,0>F g3.6<0,1,0>F { align16 1H }; mad(16) g116<1>F -g9.0<0,1,0>F g6.1<0,1,0>F g112<4,4,1>F { align16 1H }; mad(16) g85<1>F g81<4,4,1>F g3.5<0,1,0>F g3.7<0,1,0>F { align16 1H }; frc(16) g118<1>F g114<8,8,1>F { align1 1H compacted }; mad(16) g126<1>F -g9.0<0,1,0>F g6.0<0,1,0>F g83<4,4,1>F { align16 1H }; frc(16) g120<1>F g116<8,8,1>F { align1 1H compacted }; mad(16) g14<1>F -g9.0<0,1,0>F g6.1<0,1,0>F g85<4,4,1>F { align16 1H }; mad(16) g122<1>F g110<4,4,1>F g118<4,4,1>F -g3.6<0,1,0>F { align16 1H }; mad(16) g124<1>F g112<4,4,1>F g120<4,4,1>F -g3.7<0,1,0>F { align16 1H }; frc(16) g51<1>F g126<8,8,1>F { align1 1H compacted }; frc(16) g53<1>F g14<8,8,1>F { align1 1H compacted }; mad(16) g55<1>F g83<4,4,1>F g51<4,4,1>F -g3.6<0,1,0>F { align16 1H }; mad(16) g57<1>F g85<4,4,1>F g53<4,4,1>F -g3.7<0,1,0>F { align16 1H }; add(16) g59<1>F g55<8,8,1>F -g122<8,8,1>F { align1 1H compacted }; add(16) g61<1>F g57<8,8,1>F -g124<8,8,1>F { align1 1H compacted }; mul(16) g63<1>F g59<8,8,1>F g6<0,1,0>F { align1 1H compacted }; mul(16) g65<1>F g61<8,8,1>F g6.1<0,1,0>F { align1 1H compacted }; rnde(16) g67<1>F g63<8,8,1>F { align1 1H compacted }; rnde(16) g69<1>F g65<8,8,1>F { align1 1H compacted }; mov(16) g71<1>D g67<8,8,1>F { align1 1H compacted }; mov(16) g73<1>D g69<8,8,1>F { align1 1H compacted }; END B0 ->B1 START B2 <-B1 <-B7 (22 cycles) cmp.ge.f0.0(16) null<1>D g12<8,8,1>D 14D { align1 1H compacted }; END B1 ->B2 ->B8 (+f0.0) break(16) JIP: 256 UIP: 256 { align1 1H }; END B2 ->B1 ->B3 START B3 <-B2 (4 cycles) mov(16) g14<1>D g10<8,8,1>D { align1 1H compacted }; END B3 ->B4 START B5 <-B4 <-B6 (22 cycles) cmp.ge.f0.0(16) null<1>D g14<8,8,1>D 30D { align1 1H compacted }; END B4 ->B5 ->B7 (+f0.0) break(16) JIP: 200 UIP: 200 { align1 1H }; END B5 ->B4 ->B6 START B6 <-B5 (322 cycles) add(16) g75<1>D g14<8,8,1>D -3D { align1 1H compacted }; add(16) g79<1>D g12<8,8,1>D -3D { align1 1H compacted }; mul(16) g83<1>D g12<8,8,1>D 30D { align1 1H compacted }; mov(16) g77<1>F g75<8,8,1>D { align1 1H compacted }; mov(16) g81<1>F g79<8,8,1>D { align1 1H compacted }; add(16) g85<1>D g83<8,8,1>D g14<8,8,1>D { align1 1H compacted }; mad(16) g22<1>F g122<4,4,1>F g77<4,4,1>F g3.6<0,1,0>F { align16 1H }; mad(16) g24<1>F g124<4,4,1>F g81<4,4,1>F g3.7<0,1,0>F { align16 1H }; shl(16) g5<1>D g85<8,8,1>D 0x00000002UD { align1 1H }; send(16) g16<1>UW g22<8,8,1>UD 0x08658102 sampler MsgDesc: sample_lz SIMD16 Surface = 2 Sampler = 1 mlen 4 rlen 6 { align1 1H }; sends(16) nullUD g5UD g16UD 0x04025efe 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 254, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g22<1>D g5<8,8,1>D 1680D { align1 1H compacted }; sends(16) nullUD g22UD g18UD 0x04025efe 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 254, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g23<1>D g5<8,8,1>D 3360D { align1 1H compacted }; sends(16) nullUD g23UD g20UD 0x04025efe 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 254, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g14<1>D g14<8,8,1>D 32D { align1 1H compacted }; while(16) JIP: -208 { align1 1H }; END B6 ->B5 START B7 <-B4 (8 cycles) add(16) g12<1>D g12<8,8,1>D 8D { align1 1H compacted }; while(16) JIP: -264 { align1 1H }; END B7 ->B2 START B8 <-B1 (142 cycles) send(1) g86<1>UW g86<0,1,0>UW 0x0209c000 data MsgDesc: ( DC mfence, 0, 0) mlen 1 rlen 0 { align1 WE_all 1N }; mov(8) g3<1>UD 0x00000000UD { align1 WE_all 1Q compacted }; and(1) g3.2<1>UD g0.2<0,1,0>UD 0x8f000000UD { align1 WE_all 1N }; send(16) null<1>UW g3<0,1,0>UD 0x02008004 gateway MsgDesc: (barrier msg) mlen 1 rlen 0 { align1 WE_all 1H }; wait(1) n0<0,1,0>UD { align1 WE_all 1N }; mul(16) g3<1>D g73<8,8,1>D 30D { align1 1H compacted }; add(16) g11<1>F -g53<8,8,1>F 0xc0400000F /* -3F */ { align1 1H }; mov(1) g9.1<1>F 0x3f7e0000F /* 0.992188F */ { align1 WE_all 1N }; add(16) g5<1>D g3<8,8,1>D g71<8,8,1>D { align1 1H compacted }; mul(16) g13<1>F g11<8,8,1>F g11<8,8,1>F { align1 1H compacted }; mov(1) g9.2<1>F 0x3c000000F /* 0.0078125F */ { align1 WE_all 1N }; add(16) g10<1>D g5<8,8,1>D 3D { align1 1H compacted }; mad(16) g15<1>F g13<4,4,1>F g51<4,4,1>F g51<4,4,1>F { align16 1H compacted }; math sqrt(16) g17<1>F g15<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g17<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 192 UIP: 224 { align1 1H }; END B8 ->B9 ->B10 START B9 <-B8 (1172 cycles) mul(16) g19<1>F g17<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g26<1>D g10<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g24<1>F g19<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g3<1>UW g24<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; mov(16) g15<1>D g3<8,8,1>D { align1 1H compacted }; send(16) g20<1>UW g26<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g28<1>D g26<8,8,1>D 1680D { align1 1H compacted }; mul(16) g17<1>F g3<8,8,1>F g20<8,8,1>F { align1 1H compacted }; send(16) g21<1>UW g28<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g29<1>D g26<8,8,1>D 3360D { align1 1H compacted }; mul(16) g19<1>F g3<8,8,1>F g21<8,8,1>F { align1 1H compacted }; send(16) g23<1>UW g29<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mul(16) g21<1>F g3<8,8,1>F g23<8,8,1>F { align1 1H compacted }; else(16) JIP: 48 UIP: 48 { align1 1H }; END B9 ->B11 START B10 <-B8 (16 cycles) mov(16) g15<1>UD 0x00000000UD { align1 1H compacted }; mov(16) g21<1>UD 0x00000000UD { align1 1H compacted }; mov(16) g19<1>UD 0x00000000UD { align1 1H compacted }; mov(16) g17<1>UD 0x00000000UD { align1 1H compacted }; END B10 ->B11 START B11 <-B10 <-B9 (90 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g24<1>D g5<8,8,1>D 4D { align1 1H compacted }; add(16) g55<1>F -g51<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; mad(16) g57<1>F g13<4,4,1>F g55<4,4,1>F g55<4,4,1>F { align16 1H compacted }; math sqrt(16) g59<1>F g57<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g59<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B11 ->B12 ->B13 START B12 <-B11 (1170 cycles) mul(16) g61<1>F g59<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g32<1>D g24<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g30<1>F g61<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g10<1>UW g30<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g15<1>F g15<8,8,1>F g10<8,8,1>F { align1 1H compacted }; send(16) g62<1>UW g32<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g34<1>D g32<8,8,1>D 1680D { align1 1H compacted }; mad(16) g17<1>F g17<4,4,1>F g62<4,4,1>F g10<4,4,1>F { align16 1H compacted }; send(16) g63<1>UW g34<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g35<1>D g32<8,8,1>D 3360D { align1 1H compacted }; mad(16) g19<1>F g19<4,4,1>F g63<4,4,1>F g10<4,4,1>F { align16 1H compacted }; send(16) g64<1>UW g35<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g21<1>F g21<4,4,1>F g64<4,4,1>F g10<4,4,1>F { align16 1H compacted }; END B12 ->B13 START B13 <-B11 <-B12 (112 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g65<1>D g5<8,8,1>D 31D { align1 1H compacted }; add(16) g67<1>F -g51<8,8,1>F 0xc0000000F /* -2F */ { align1 1H }; add(16) g69<1>F -g53<8,8,1>F 0xc0000000F /* -2F */ { align1 1H }; mul(16) g71<1>F g69<8,8,1>F g69<8,8,1>F { align1 1H compacted }; mad(16) g73<1>F g71<4,4,1>F g67<4,4,1>F g67<4,4,1>F { align16 1H compacted }; math sqrt(16) g75<1>F g73<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g75<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B13 ->B14 ->B15 START B14 <-B13 (1168 cycles) mul(16) g77<1>F g75<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g38<1>D g65<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g36<1>F g77<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g11<1>UW g36<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g15<1>F g15<8,8,1>F g11<8,8,1>F { align1 1H compacted }; send(16) g78<1>UW g38<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g40<1>D g38<8,8,1>D 1680D { align1 1H compacted }; mad(16) g17<1>F g17<4,4,1>F g78<4,4,1>F g11<4,4,1>F { align16 1H compacted }; send(16) g79<1>UW g40<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g41<1>D g38<8,8,1>D 3360D { align1 1H compacted }; mad(16) g19<1>F g19<4,4,1>F g79<4,4,1>F g11<4,4,1>F { align16 1H compacted }; send(16) g80<1>UW g41<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g21<1>F g21<4,4,1>F g80<4,4,1>F g11<4,4,1>F { align16 1H compacted }; END B14 ->B15 START B15 <-B13 <-B14 (90 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g81<1>D g5<8,8,1>D 32D { align1 1H compacted }; add(16) g83<1>F -g51<8,8,1>F 0xbf800000F /* -1F */ { align1 1H }; mad(16) g85<1>F g71<4,4,1>F g83<4,4,1>F g83<4,4,1>F { align16 1H compacted }; math sqrt(16) g87<1>F g85<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g87<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B15 ->B16 ->B17 START B16 <-B15 (1168 cycles) mul(16) g89<1>F g87<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g44<1>D g81<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g42<1>F g89<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g12<1>UW g42<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g15<1>F g15<8,8,1>F g12<8,8,1>F { align1 1H compacted }; send(16) g90<1>UW g44<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g46<1>D g44<8,8,1>D 1680D { align1 1H compacted }; mad(16) g17<1>F g17<4,4,1>F g90<4,4,1>F g12<4,4,1>F { align16 1H compacted }; send(16) g91<1>UW g46<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g47<1>D g44<8,8,1>D 3360D { align1 1H compacted }; mad(16) g19<1>F g19<4,4,1>F g91<4,4,1>F g12<4,4,1>F { align16 1H compacted }; send(16) g92<1>UW g47<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g21<1>F g21<4,4,1>F g92<4,4,1>F g12<4,4,1>F { align16 1H compacted }; END B16 ->B17 START B17 <-B15 <-B16 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g93<1>D g5<8,8,1>D 33D { align1 1H compacted }; mad(16) g95<1>F g71<4,4,1>F g51<4,4,1>F g51<4,4,1>F { align16 1H compacted }; math sqrt(16) g97<1>F g95<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g97<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B17 ->B18 ->B19 START B18 <-B17 (1168 cycles) mul(16) g99<1>F g97<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g57<1>D g93<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g48<1>F g99<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g13<1>UW g48<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g15<1>F g15<8,8,1>F g13<8,8,1>F { align1 1H compacted }; send(16) g100<1>UW g57<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g59<1>D g57<8,8,1>D 1680D { align1 1H compacted }; mad(16) g17<1>F g17<4,4,1>F g100<4,4,1>F g13<4,4,1>F { align16 1H compacted }; send(16) g101<1>UW g59<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g60<1>D g57<8,8,1>D 3360D { align1 1H compacted }; mad(16) g19<1>F g19<4,4,1>F g101<4,4,1>F g13<4,4,1>F { align16 1H compacted }; send(16) g102<1>UW g60<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g21<1>F g21<4,4,1>F g102<4,4,1>F g13<4,4,1>F { align16 1H compacted }; END B18 ->B19 START B19 <-B17 <-B18 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g103<1>D g5<8,8,1>D 34D { align1 1H compacted }; mad(16) g105<1>F g71<4,4,1>F g55<4,4,1>F g55<4,4,1>F { align16 1H compacted }; math sqrt(16) g107<1>F g105<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g107<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B19 ->B20 ->B21 START B20 <-B19 (1168 cycles) mul(16) g109<1>F g107<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g63<1>D g103<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g61<1>F g109<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g23<1>UW g61<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g15<1>F g15<8,8,1>F g23<8,8,1>F { align1 1H compacted }; send(16) g110<1>UW g63<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g65<1>D g63<8,8,1>D 1680D { align1 1H compacted }; mad(16) g17<1>F g17<4,4,1>F g110<4,4,1>F g23<4,4,1>F { align16 1H compacted }; send(16) g111<1>UW g65<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g69<1>D g63<8,8,1>D 3360D { align1 1H compacted }; mad(16) g19<1>F g19<4,4,1>F g111<4,4,1>F g23<4,4,1>F { align16 1H compacted }; send(16) g112<1>UW g69<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g21<1>F g21<4,4,1>F g112<4,4,1>F g23<4,4,1>F { align16 1H compacted }; END B20 ->B21 START B21 <-B19 <-B20 (90 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g113<1>D g5<8,8,1>D 35D { align1 1H compacted }; add(16) g115<1>F -g51<8,8,1>F 0x40000000F /* 2F */ { align1 1H }; mad(16) g117<1>F g71<4,4,1>F g115<4,4,1>F g115<4,4,1>F { align16 1H compacted }; math sqrt(16) g119<1>F g117<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g119<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B21 ->B22 ->B23 START B22 <-B21 (1168 cycles) mul(16) g121<1>F g119<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g75<1>D g113<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g73<1>F g121<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g24<1>UW g73<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g15<1>F g15<8,8,1>F g24<8,8,1>F { align1 1H compacted }; send(16) g122<1>UW g75<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g77<1>D g75<8,8,1>D 1680D { align1 1H compacted }; mad(16) g17<1>F g17<4,4,1>F g122<4,4,1>F g24<4,4,1>F { align16 1H compacted }; send(16) g123<1>UW g77<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g78<1>D g75<8,8,1>D 3360D { align1 1H compacted }; mad(16) g19<1>F g19<4,4,1>F g123<4,4,1>F g24<4,4,1>F { align16 1H compacted }; send(16) g124<1>UW g78<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g21<1>F g21<4,4,1>F g124<4,4,1>F g24<4,4,1>F { align16 1H compacted }; END B22 ->B23 START B23 <-B21 <-B22 (90 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g125<1>D g5<8,8,1>D 36D { align1 1H compacted }; add(16) g3<1>F -g51<8,8,1>F 0x40400000F /* 3F */ { align1 1H }; mad(16) g10<1>F g71<4,4,1>F g3<4,4,1>F g3<4,4,1>F { align16 1H compacted }; math sqrt(16) g12<1>F g10<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g12<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B23 ->B24 ->B25 START B24 <-B23 (1172 cycles) mul(16) g23<1>F g12<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g81<1>D g125<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g79<1>F g23<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g28<1>UW g79<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g15<1>F g15<8,8,1>F g28<8,8,1>F { align1 1H compacted }; send(16) g24<1>UW g81<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g85<1>D g81<8,8,1>D 1680D { align1 1H compacted }; mad(16) g17<1>F g17<4,4,1>F g24<4,4,1>F g28<4,4,1>F { align16 1H compacted }; send(16) g25<1>UW g85<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g86<1>D g81<8,8,1>D 3360D { align1 1H compacted }; mad(16) g19<1>F g19<4,4,1>F g25<4,4,1>F g28<4,4,1>F { align16 1H compacted }; send(16) g26<1>UW g86<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g21<1>F g21<4,4,1>F g26<4,4,1>F g28<4,4,1>F { align16 1H compacted }; END B24 ->B25 START B25 <-B23 <-B24 (108 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g27<1>D g5<8,8,1>D 61D { align1 1H compacted }; add(16) g29<1>F -g53<8,8,1>F 0xbf800000F /* -1F */ { align1 1H }; mul(16) g31<1>F g29<8,8,1>F g29<8,8,1>F { align1 1H compacted }; mad(16) g33<1>F g31<4,4,1>F g67<4,4,1>F g67<4,4,1>F { align16 1H compacted }; math sqrt(16) g35<1>F g33<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g35<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B25 ->B26 ->B27 START B26 <-B25 (1170 cycles) mul(16) g37<1>F g35<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g89<1>D g27<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g87<1>F g37<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g29<1>UW g87<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g15<1>F g15<8,8,1>F g29<8,8,1>F { align1 1H compacted }; send(16) g38<1>UW g89<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g91<1>D g89<8,8,1>D 1680D { align1 1H compacted }; mad(16) g17<1>F g17<4,4,1>F g38<4,4,1>F g29<4,4,1>F { align16 1H compacted }; send(16) g39<1>UW g91<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g92<1>D g89<8,8,1>D 3360D { align1 1H compacted }; mad(16) g19<1>F g19<4,4,1>F g39<4,4,1>F g29<4,4,1>F { align16 1H compacted }; send(16) g40<1>UW g92<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g21<1>F g21<4,4,1>F g40<4,4,1>F g29<4,4,1>F { align16 1H compacted }; END B26 ->B27 START B27 <-B25 <-B26 (4624 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g41<1>D g5<8,8,1>D 62D { align1 1H compacted }; mad(16) g43<1>F g31<4,4,1>F g83<4,4,1>F g83<4,4,1>F { align16 1H compacted }; shl(16) g95<1>D g41<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g45<1>F g43<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g47<1>F g45<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g93<1>F g47<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g33<1>UW g93<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g48<1>F g15<8,8,1>F g33<8,8,1>F { align1 1H compacted }; send(16) g57<1>UW g95<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g97<1>D g95<8,8,1>D 1680D { align1 1H compacted }; mad(16) g59<1>F g17<4,4,1>F g57<4,4,1>F g33<4,4,1>F { align16 1H compacted }; send(16) g61<1>UW g97<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g98<1>D g95<8,8,1>D 3360D { align1 1H compacted }; mad(16) g63<1>F g19<4,4,1>F g61<4,4,1>F g33<4,4,1>F { align16 1H compacted }; send(16) g65<1>UW g98<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g71<1>D g5<8,8,1>D 63D { align1 1H compacted }; mad(16) g73<1>F g31<4,4,1>F g51<4,4,1>F g51<4,4,1>F { align16 1H compacted }; shl(16) g101<1>D g71<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g75<1>F g73<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g77<1>F g75<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g99<1>F g77<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g69<1>F g21<4,4,1>F g65<4,4,1>F g33<4,4,1>F { align16 1H compacted }; send(16) g34<1>UW g99<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g78<1>F g48<8,8,1>F g34<8,8,1>F { align1 1H compacted }; send(16) g80<1>UW g101<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g103<1>D g101<8,8,1>D 1680D { align1 1H compacted }; mad(16) g85<1>F g59<4,4,1>F g80<4,4,1>F g34<4,4,1>F { align16 1H compacted }; send(16) g87<1>UW g103<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g104<1>D g101<8,8,1>D 3360D { align1 1H compacted }; mad(16) g89<1>F g63<4,4,1>F g87<4,4,1>F g34<4,4,1>F { align16 1H compacted }; send(16) g91<1>UW g104<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g95<1>D g5<8,8,1>D 64D { align1 1H compacted }; mad(16) g97<1>F g31<4,4,1>F g55<4,4,1>F g55<4,4,1>F { align16 1H compacted }; shl(16) g110<1>D g95<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g99<1>F g97<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g101<1>F g99<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g105<1>F g101<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g93<1>F g69<4,4,1>F g91<4,4,1>F g34<4,4,1>F { align16 1H compacted }; send(16) g35<1>UW g105<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g102<1>F g78<8,8,1>F g35<8,8,1>F { align1 1H compacted }; send(16) g104<1>UW g110<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g112<1>D g110<8,8,1>D 1680D { align1 1H compacted }; mad(16) g106<1>F g85<4,4,1>F g104<4,4,1>F g35<4,4,1>F { align16 1H compacted }; send(16) g108<1>UW g112<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g117<1>D g110<8,8,1>D 3360D { align1 1H compacted }; mad(16) g110<1>F g89<4,4,1>F g108<4,4,1>F g35<4,4,1>F { align16 1H compacted }; send(16) g112<1>UW g117<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g119<1>D g5<8,8,1>D 65D { align1 1H compacted }; mad(16) g121<1>F g31<4,4,1>F g115<4,4,1>F g115<4,4,1>F { align16 1H compacted }; shl(16) g12<1>D g119<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g123<1>F g121<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g125<1>F g123<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g119<1>F g125<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g117<1>F g93<4,4,1>F g112<4,4,1>F g35<4,4,1>F { align16 1H compacted }; send(16) g36<1>UW g119<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g22<1>F g102<8,8,1>F g36<8,8,1>F { align1 1H compacted }; send(16) g126<1>UW g12<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g14<1>D g12<8,8,1>D 1680D { align1 1H compacted }; mad(16) g24<1>F g106<4,4,1>F g126<4,4,1>F g36<4,4,1>F { align16 1H compacted }; send(16) g10<1>UW g14<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g15<1>D g12<8,8,1>D 3360D { align1 1H compacted }; mad(16) g26<1>F g110<4,4,1>F g10<4,4,1>F g36<4,4,1>F { align16 1H compacted }; send(16) g11<1>UW g15<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g13<1>D g5<8,8,1>D 66D { align1 1H compacted }; mad(16) g15<1>F g31<4,4,1>F g3<4,4,1>F g3<4,4,1>F { align16 1H compacted }; math sqrt(16) g17<1>F g15<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g17<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; mad(16) g28<1>F g117<4,4,1>F g11<4,4,1>F g36<4,4,1>F { align16 1H compacted }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B27 ->B28 ->B29 START B28 <-B27 (1170 cycles) mul(16) g19<1>F g17<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g32<1>D g13<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g16<1>F g19<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g37<1>UW g16<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g22<1>F g22<8,8,1>F g37<8,8,1>F { align1 1H compacted }; send(16) g20<1>UW g32<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g34<1>D g32<8,8,1>D 1680D { align1 1H compacted }; mad(16) g24<1>F g24<4,4,1>F g20<4,4,1>F g37<4,4,1>F { align16 1H compacted }; send(16) g30<1>UW g34<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g35<1>D g32<8,8,1>D 3360D { align1 1H compacted }; mad(16) g26<1>F g26<4,4,1>F g30<4,4,1>F g37<4,4,1>F { align16 1H compacted }; send(16) g31<1>UW g35<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g28<1>F g28<4,4,1>F g31<4,4,1>F g37<4,4,1>F { align16 1H compacted }; END B28 ->B29 START B29 <-B27 <-B28 (94 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g32<1>D g5<8,8,1>D 90D { align1 1H compacted }; add(16) g37<1>F -g51<8,8,1>F 0xc0400000F /* -3F */ { align1 1H }; mul(16) g39<1>F g53<8,8,1>F g53<8,8,1>F { align1 1H compacted }; mad(16) g41<1>F g39<4,4,1>F g37<4,4,1>F g37<4,4,1>F { align16 1H compacted }; math sqrt(16) g43<1>F g41<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g43<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B29 ->B30 ->B31 START B30 <-B29 (1170 cycles) mul(16) g45<1>F g43<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g49<1>D g32<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g43<1>F g45<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g41<1>UW g43<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g22<1>F g22<8,8,1>F g41<8,8,1>F { align1 1H compacted }; send(16) g46<1>UW g49<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g57<1>D g49<8,8,1>D 1680D { align1 1H compacted }; mad(16) g24<1>F g24<4,4,1>F g46<4,4,1>F g41<4,4,1>F { align16 1H compacted }; send(16) g47<1>UW g57<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g58<1>D g49<8,8,1>D 3360D { align1 1H compacted }; mad(16) g26<1>F g26<4,4,1>F g47<4,4,1>F g41<4,4,1>F { align16 1H compacted }; send(16) g48<1>UW g58<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g28<1>F g28<4,4,1>F g48<4,4,1>F g41<4,4,1>F { align16 1H compacted }; END B30 ->B31 START B31 <-B29 <-B30 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g49<1>D g5<8,8,1>D 91D { align1 1H compacted }; mad(16) g57<1>F g39<4,4,1>F g67<4,4,1>F g67<4,4,1>F { align16 1H compacted }; math sqrt(16) g59<1>F g57<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g59<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B31 ->B32 ->B33 START B32 <-B31 (1170 cycles) mul(16) g61<1>F g59<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g65<1>D g49<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g59<1>F g61<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g42<1>UW g59<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g22<1>F g22<8,8,1>F g42<8,8,1>F { align1 1H compacted }; send(16) g62<1>UW g65<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g69<1>D g65<8,8,1>D 1680D { align1 1H compacted }; mad(16) g24<1>F g24<4,4,1>F g62<4,4,1>F g42<4,4,1>F { align16 1H compacted }; send(16) g63<1>UW g69<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g70<1>D g65<8,8,1>D 3360D { align1 1H compacted }; mad(16) g26<1>F g26<4,4,1>F g63<4,4,1>F g42<4,4,1>F { align16 1H compacted }; send(16) g64<1>UW g70<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g28<1>F g28<4,4,1>F g64<4,4,1>F g42<4,4,1>F { align16 1H compacted }; END B32 ->B33 START B33 <-B31 <-B32 (4658 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g65<1>D g5<8,8,1>D 92D { align1 1H compacted }; mad(16) g69<1>F g39<4,4,1>F g83<4,4,1>F g83<4,4,1>F { align16 1H compacted }; shl(16) g85<1>D g65<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g71<1>F g69<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g73<1>F g71<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g71<1>F g73<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g43<1>UW g71<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g74<1>F g22<8,8,1>F g43<8,8,1>F { align1 1H compacted }; send(16) g76<1>UW g85<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g87<1>D g85<8,8,1>D 1680D { align1 1H compacted }; mad(16) g78<1>F g24<4,4,1>F g76<4,4,1>F g43<4,4,1>F { align16 1H compacted }; send(16) g80<1>UW g87<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g89<1>D g85<8,8,1>D 3360D { align1 1H compacted }; mad(16) g85<1>F g26<4,4,1>F g80<4,4,1>F g43<4,4,1>F { align16 1H compacted }; send(16) g87<1>UW g89<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g91<1>D g5<8,8,1>D 93D { align1 1H compacted }; mad(16) g93<1>F g39<4,4,1>F g51<4,4,1>F g51<4,4,1>F { align16 1H compacted }; shl(16) g106<1>D g91<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g95<1>F g93<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g97<1>F g95<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g91<1>F g97<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g89<1>F g28<4,4,1>F g87<4,4,1>F g43<4,4,1>F { align16 1H compacted }; send(16) g44<1>UW g91<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g98<1>F g74<8,8,1>F g44<8,8,1>F { align1 1H compacted }; send(16) g100<1>UW g106<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g108<1>D g106<8,8,1>D 1680D { align1 1H compacted }; mad(16) g102<1>F g78<4,4,1>F g100<4,4,1>F g44<4,4,1>F { align16 1H compacted }; send(16) g104<1>UW g108<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g110<1>D g106<8,8,1>D 3360D { align1 1H compacted }; mad(16) g106<1>F g85<4,4,1>F g104<4,4,1>F g44<4,4,1>F { align16 1H compacted }; send(16) g108<1>UW g110<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g112<1>D g5<8,8,1>D 94D { align1 1H compacted }; mad(16) g117<1>F g39<4,4,1>F g55<4,4,1>F g55<4,4,1>F { align16 1H compacted }; shl(16) g12<1>D g112<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g119<1>F g117<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g121<1>F g119<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g112<1>F g121<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g110<1>F g89<4,4,1>F g108<4,4,1>F g44<4,4,1>F { align16 1H compacted }; send(16) g45<1>UW g112<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g122<1>F g98<8,8,1>F g45<8,8,1>F { align1 1H compacted }; send(16) g124<1>UW g12<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g14<1>D g12<8,8,1>D 1680D { align1 1H compacted }; mad(16) g126<1>F g102<4,4,1>F g124<4,4,1>F g45<4,4,1>F { align16 1H compacted }; send(16) g10<1>UW g14<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g16<1>D g12<8,8,1>D 3360D { align1 1H compacted }; mad(16) g12<1>F g106<4,4,1>F g10<4,4,1>F g45<4,4,1>F { align16 1H compacted }; send(16) g14<1>UW g16<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g18<1>D g5<8,8,1>D 95D { align1 1H compacted }; mad(16) g20<1>F g39<4,4,1>F g115<4,4,1>F g115<4,4,1>F { align16 1H compacted }; shl(16) g33<1>D g18<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g22<1>F g20<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g24<1>F g22<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g18<1>F g24<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g16<1>F g110<4,4,1>F g14<4,4,1>F g45<4,4,1>F { align16 1H compacted }; send(16) g46<1>UW g18<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g29<1>F g122<8,8,1>F g46<8,8,1>F { align1 1H compacted }; send(16) g25<1>UW g33<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g35<1>D g33<8,8,1>D 1680D { align1 1H compacted }; mad(16) g31<1>F g126<4,4,1>F g25<4,4,1>F g46<4,4,1>F { align16 1H compacted }; send(16) g26<1>UW g35<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g41<1>D g33<8,8,1>D 3360D { align1 1H compacted }; mad(16) g33<1>F g12<4,4,1>F g26<4,4,1>F g46<4,4,1>F { align16 1H compacted }; send(16) g27<1>UW g41<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g41<1>D g5<8,8,1>D 96D { align1 1H compacted }; mad(16) g43<1>F g39<4,4,1>F g3<4,4,1>F g3<4,4,1>F { align16 1H compacted }; mad(16) g35<1>F g16<4,4,1>F g27<4,4,1>F g46<4,4,1>F { align16 1H compacted }; math sqrt(16) g45<1>F g43<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g45<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B33 ->B34 ->B35 START B34 <-B33 (1172 cycles) mul(16) g47<1>F g45<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g44<1>D g41<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g42<1>F g47<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g59<1>UW g42<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g29<1>F g29<8,8,1>F g59<8,8,1>F { align1 1H compacted }; send(16) g48<1>UW g44<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g46<1>D g44<8,8,1>D 1680D { align1 1H compacted }; mad(16) g31<1>F g31<4,4,1>F g48<4,4,1>F g59<4,4,1>F { align16 1H compacted }; send(16) g49<1>UW g46<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g47<1>D g44<8,8,1>D 3360D { align1 1H compacted }; mad(16) g33<1>F g33<4,4,1>F g49<4,4,1>F g59<4,4,1>F { align16 1H compacted }; send(16) g57<1>UW g47<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g35<1>F g35<4,4,1>F g57<4,4,1>F g59<4,4,1>F { align16 1H compacted }; END B34 ->B35 START B35 <-B33 <-B34 (90 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g58<1>D g5<8,8,1>D 97D { align1 1H compacted }; add(16) g60<1>F -g51<8,8,1>F 0x40800000F /* 4F */ { align1 1H }; mad(16) g62<1>F g39<4,4,1>F g60<4,4,1>F g60<4,4,1>F { align16 1H compacted }; math sqrt(16) g64<1>F g62<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g64<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B35 ->B36 ->B37 START B36 <-B35 (1168 cycles) mul(16) g69<1>F g64<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g64<1>D g58<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g48<1>F g69<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g62<1>UW g48<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g29<1>F g29<8,8,1>F g62<8,8,1>F { align1 1H compacted }; send(16) g70<1>UW g64<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g73<1>D g64<8,8,1>D 1680D { align1 1H compacted }; mad(16) g31<1>F g31<4,4,1>F g70<4,4,1>F g62<4,4,1>F { align16 1H compacted }; send(16) g71<1>UW g73<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g74<1>D g64<8,8,1>D 3360D { align1 1H compacted }; mad(16) g33<1>F g33<4,4,1>F g71<4,4,1>F g62<4,4,1>F { align16 1H compacted }; send(16) g72<1>UW g74<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g35<1>F g35<4,4,1>F g72<4,4,1>F g62<4,4,1>F { align16 1H compacted }; END B36 ->B37 START B37 <-B35 <-B36 (108 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g73<1>D g5<8,8,1>D 120D { align1 1H compacted }; add(16) g75<1>F -g53<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; mul(16) g77<1>F g75<8,8,1>F g75<8,8,1>F { align1 1H compacted }; mad(16) g79<1>F g77<4,4,1>F g37<4,4,1>F g37<4,4,1>F { align16 1H compacted }; math sqrt(16) g81<1>F g79<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g81<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B37 ->B38 ->B39 START B38 <-B37 (1168 cycles) mul(16) g85<1>F g81<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g79<1>D g73<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g75<1>F g85<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g63<1>UW g75<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g29<1>F g29<8,8,1>F g63<8,8,1>F { align1 1H compacted }; send(16) g86<1>UW g79<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g81<1>D g79<8,8,1>D 1680D { align1 1H compacted }; mad(16) g31<1>F g31<4,4,1>F g86<4,4,1>F g63<4,4,1>F { align16 1H compacted }; send(16) g87<1>UW g81<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g85<1>D g79<8,8,1>D 3360D { align1 1H compacted }; mad(16) g33<1>F g33<4,4,1>F g87<4,4,1>F g63<4,4,1>F { align16 1H compacted }; send(16) g88<1>UW g85<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g35<1>F g35<4,4,1>F g88<4,4,1>F g63<4,4,1>F { align16 1H compacted }; END B38 ->B39 START B39 <-B37 <-B38 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g89<1>D g5<8,8,1>D 121D { align1 1H compacted }; mad(16) g91<1>F g77<4,4,1>F g67<4,4,1>F g67<4,4,1>F { align16 1H compacted }; math sqrt(16) g93<1>F g91<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g93<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B39 ->B40 ->B41 START B40 <-B39 (1172 cycles) mul(16) g95<1>F g93<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g91<1>D g89<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g86<1>F g95<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g64<1>UW g86<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g29<1>F g29<8,8,1>F g64<8,8,1>F { align1 1H compacted }; send(16) g96<1>UW g91<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g93<1>D g91<8,8,1>D 1680D { align1 1H compacted }; mad(16) g31<1>F g31<4,4,1>F g96<4,4,1>F g64<4,4,1>F { align16 1H compacted }; send(16) g97<1>UW g93<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g94<1>D g91<8,8,1>D 3360D { align1 1H compacted }; mad(16) g33<1>F g33<4,4,1>F g97<4,4,1>F g64<4,4,1>F { align16 1H compacted }; send(16) g98<1>UW g94<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g35<1>F g35<4,4,1>F g98<4,4,1>F g64<4,4,1>F { align16 1H compacted }; END B40 ->B41 START B41 <-B39 <-B40 (4488 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g99<1>D g5<8,8,1>D 122D { align1 1H compacted }; mad(16) g101<1>F g77<4,4,1>F g83<4,4,1>F g83<4,4,1>F { align16 1H compacted }; shl(16) g97<1>D g99<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g103<1>F g101<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g105<1>F g103<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g95<1>F g105<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g65<1>UW g95<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g106<1>F g29<8,8,1>F g65<8,8,1>F { align1 1H compacted }; send(16) g108<1>UW g97<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g99<1>D g97<8,8,1>D 1680D { align1 1H compacted }; mad(16) g110<1>F g31<4,4,1>F g108<4,4,1>F g65<4,4,1>F { align16 1H compacted }; send(16) g112<1>UW g99<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g100<1>D g97<8,8,1>D 3360D { align1 1H compacted }; mad(16) g117<1>F g33<4,4,1>F g112<4,4,1>F g65<4,4,1>F { align16 1H compacted }; send(16) g119<1>UW g100<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g123<1>D g5<8,8,1>D 123D { align1 1H compacted }; mad(16) g125<1>F g77<4,4,1>F g51<4,4,1>F g51<4,4,1>F { align16 1H compacted }; shl(16) g103<1>D g123<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g10<1>F g125<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g12<1>F g10<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g101<1>F g12<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g69<1>UW g101<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g13<1>F g106<8,8,1>F g69<8,8,1>F { align1 1H compacted }; mad(16) g121<1>F g35<4,4,1>F g119<4,4,1>F g65<4,4,1>F { align16 1H compacted }; send(16) g15<1>UW g103<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g105<1>D g103<8,8,1>D 1680D { align1 1H compacted }; mad(16) g17<1>F g110<4,4,1>F g15<4,4,1>F g69<4,4,1>F { align16 1H compacted }; send(16) g19<1>UW g105<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g106<1>D g103<8,8,1>D 3360D { align1 1H compacted }; mad(16) g21<1>F g117<4,4,1>F g19<4,4,1>F g69<4,4,1>F { align16 1H compacted }; send(16) g23<1>UW g106<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g27<1>D g5<8,8,1>D 124D { align1 1H compacted }; mad(16) g29<1>F g77<4,4,1>F g55<4,4,1>F g55<4,4,1>F { align16 1H compacted }; shl(16) g109<1>D g27<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g31<1>F g29<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g33<1>F g31<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g107<1>F g33<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g25<1>F g121<4,4,1>F g23<4,4,1>F g69<4,4,1>F { align16 1H compacted }; send(16) g70<1>UW g107<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g34<1>F g13<8,8,1>F g70<8,8,1>F { align1 1H compacted }; send(16) g36<1>UW g109<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g111<1>D g109<8,8,1>D 1680D { align1 1H compacted }; mad(16) g40<1>F g17<4,4,1>F g36<4,4,1>F g70<4,4,1>F { align16 1H compacted }; send(16) g42<1>UW g111<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g112<1>D g109<8,8,1>D 3360D { align1 1H compacted }; mad(16) g44<1>F g21<4,4,1>F g42<4,4,1>F g70<4,4,1>F { align16 1H compacted }; send(16) g46<1>UW g112<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g57<1>D g5<8,8,1>D 125D { align1 1H compacted }; mad(16) g62<1>F g77<4,4,1>F g115<4,4,1>F g115<4,4,1>F { align16 1H compacted }; shl(16) g117<1>D g57<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g64<1>F g62<8,8,1>F null<8,8,1>F { align1 1H compacted }; mad(16) g48<1>F g25<4,4,1>F g46<4,4,1>F g70<4,4,1>F { align16 1H compacted }; mul(16) g69<1>F g64<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g113<1>F g69<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g81<1>UW g113<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g36<1>F g34<8,8,1>F g81<8,8,1>F { align1 1H compacted }; send(16) g70<1>UW g117<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g119<1>D g117<8,8,1>D 1680D { align1 1H compacted }; mad(16) g38<1>F g40<4,4,1>F g70<4,4,1>F g81<4,4,1>F { align16 1H compacted }; send(16) g71<1>UW g119<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g120<1>D g117<8,8,1>D 3360D { align1 1H compacted }; mad(16) g40<1>F g44<4,4,1>F g71<4,4,1>F g81<4,4,1>F { align16 1H compacted }; send(16) g72<1>UW g120<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g74<1>D g5<8,8,1>D 126D { align1 1H compacted }; mad(16) g79<1>F g77<4,4,1>F g3<4,4,1>F g3<4,4,1>F { align16 1H compacted }; mad(16) g42<1>F g48<4,4,1>F g72<4,4,1>F g81<4,4,1>F { align16 1H compacted }; math sqrt(16) g81<1>F g79<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g81<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B41 ->B42 ->B43 START B42 <-B41 (1172 cycles) mul(16) g85<1>F g81<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g123<1>D g74<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g121<1>F g85<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g90<1>UW g121<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g36<1>F g36<8,8,1>F g90<8,8,1>F { align1 1H compacted }; send(16) g86<1>UW g123<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g125<1>D g123<8,8,1>D 1680D { align1 1H compacted }; mad(16) g38<1>F g38<4,4,1>F g86<4,4,1>F g90<4,4,1>F { align16 1H compacted }; send(16) g87<1>UW g125<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g126<1>D g123<8,8,1>D 3360D { align1 1H compacted }; mad(16) g40<1>F g40<4,4,1>F g87<4,4,1>F g90<4,4,1>F { align16 1H compacted }; send(16) g88<1>UW g126<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g42<1>F g42<4,4,1>F g88<4,4,1>F g90<4,4,1>F { align16 1H compacted }; END B42 ->B43 START B43 <-B41 <-B42 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g89<1>D g5<8,8,1>D 127D { align1 1H compacted }; mad(16) g91<1>F g77<4,4,1>F g60<4,4,1>F g60<4,4,1>F { align16 1H compacted }; math sqrt(16) g93<1>F g91<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g93<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B43 ->B44 ->B45 START B44 <-B43 (1170 cycles) mul(16) g95<1>F g93<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g12<1>D g89<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g10<1>F g95<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g91<1>UW g10<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g36<1>F g36<8,8,1>F g91<8,8,1>F { align1 1H compacted }; send(16) g96<1>UW g12<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g14<1>D g12<8,8,1>D 1680D { align1 1H compacted }; mad(16) g38<1>F g38<4,4,1>F g96<4,4,1>F g91<4,4,1>F { align16 1H compacted }; send(16) g97<1>UW g14<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g15<1>D g12<8,8,1>D 3360D { align1 1H compacted }; mad(16) g40<1>F g40<4,4,1>F g97<4,4,1>F g91<4,4,1>F { align16 1H compacted }; send(16) g98<1>UW g15<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g42<1>F g42<4,4,1>F g98<4,4,1>F g91<4,4,1>F { align16 1H compacted }; END B44 ->B45 START B45 <-B43 <-B44 (108 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g99<1>D g5<8,8,1>D 151D { align1 1H compacted }; add(16) g101<1>F -g53<8,8,1>F 0x40000000F /* 2F */ { align1 1H }; mul(16) g103<1>F g101<8,8,1>F g101<8,8,1>F { align1 1H compacted }; mad(16) g105<1>F g103<4,4,1>F g67<4,4,1>F g67<4,4,1>F { align16 1H compacted }; math sqrt(16) g107<1>F g105<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g107<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B45 ->B46 ->B47 START B46 <-B45 (1172 cycles) mul(16) g109<1>F g107<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g18<1>D g99<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g16<1>F g109<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g92<1>UW g16<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g36<1>F g36<8,8,1>F g92<8,8,1>F { align1 1H compacted }; send(16) g110<1>UW g18<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g20<1>D g18<8,8,1>D 1680D { align1 1H compacted }; mad(16) g38<1>F g38<4,4,1>F g110<4,4,1>F g92<4,4,1>F { align16 1H compacted }; send(16) g111<1>UW g20<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g21<1>D g18<8,8,1>D 3360D { align1 1H compacted }; mad(16) g40<1>F g40<4,4,1>F g111<4,4,1>F g92<4,4,1>F { align16 1H compacted }; send(16) g112<1>UW g21<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g42<1>F g42<4,4,1>F g112<4,4,1>F g92<4,4,1>F { align16 1H compacted }; END B46 ->B47 START B47 <-B45 <-B46 (4622 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g113<1>D g5<8,8,1>D 152D { align1 1H compacted }; mad(16) g117<1>F g103<4,4,1>F g83<4,4,1>F g83<4,4,1>F { align16 1H compacted }; shl(16) g24<1>D g113<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g119<1>F g117<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g121<1>F g119<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g22<1>F g121<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g93<1>UW g22<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g122<1>F g36<8,8,1>F g93<8,8,1>F { align1 1H compacted }; send(16) g124<1>UW g24<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g26<1>D g24<8,8,1>D 1680D { align1 1H compacted }; mad(16) g126<1>F g38<4,4,1>F g124<4,4,1>F g93<4,4,1>F { align16 1H compacted }; send(16) g10<1>UW g26<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g27<1>D g24<8,8,1>D 3360D { align1 1H compacted }; mad(16) g12<1>F g40<4,4,1>F g10<4,4,1>F g93<4,4,1>F { align16 1H compacted }; send(16) g14<1>UW g27<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g18<1>D g5<8,8,1>D 153D { align1 1H compacted }; mad(16) g20<1>F g103<4,4,1>F g51<4,4,1>F g51<4,4,1>F { align16 1H compacted }; shl(16) g33<1>D g18<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g22<1>F g20<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g24<1>F g22<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g28<1>F g24<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g16<1>F g42<4,4,1>F g14<4,4,1>F g93<4,4,1>F { align16 1H compacted }; send(16) g94<1>UW g28<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g25<1>F g122<8,8,1>F g94<8,8,1>F { align1 1H compacted }; send(16) g27<1>UW g33<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g35<1>D g33<8,8,1>D 1680D { align1 1H compacted }; mad(16) g29<1>F g126<4,4,1>F g27<4,4,1>F g94<4,4,1>F { align16 1H compacted }; send(16) g31<1>UW g35<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g37<1>D g33<8,8,1>D 3360D { align1 1H compacted }; mad(16) g33<1>F g12<4,4,1>F g31<4,4,1>F g94<4,4,1>F { align16 1H compacted }; send(16) g35<1>UW g37<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g39<1>D g5<8,8,1>D 154D { align1 1H compacted }; mad(16) g41<1>F g103<4,4,1>F g55<4,4,1>F g55<4,4,1>F { align16 1H compacted }; shl(16) g61<1>D g39<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g43<1>F g41<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g45<1>F g43<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g39<1>F g45<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g37<1>F g16<4,4,1>F g35<4,4,1>F g94<4,4,1>F { align16 1H compacted }; send(16) g95<1>UW g39<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g46<1>F g25<8,8,1>F g95<8,8,1>F { align1 1H compacted }; send(16) g48<1>UW g61<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g63<1>D g61<8,8,1>D 1680D { align1 1H compacted }; mad(16) g57<1>F g29<4,4,1>F g48<4,4,1>F g95<4,4,1>F { align16 1H compacted }; send(16) g59<1>UW g63<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g65<1>D g61<8,8,1>D 3360D { align1 1H compacted }; mad(16) g61<1>F g33<4,4,1>F g59<4,4,1>F g95<4,4,1>F { align16 1H compacted }; send(16) g63<1>UW g65<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g69<1>D g5<8,8,1>D 155D { align1 1H compacted }; mad(16) g71<1>F g103<4,4,1>F g115<4,4,1>F g115<4,4,1>F { align16 1H compacted }; shl(16) g79<1>D g69<8,8,1>D 0x00000002UD { align1 1H }; math sqrt(16) g73<1>F g71<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g75<1>F g73<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; lrp(16) g69<1>F g75<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; mad(16) g65<1>F g37<4,4,1>F g63<4,4,1>F g95<4,4,1>F { align16 1H compacted }; send(16) g96<1>UW g69<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g46<8,8,1>F g96<8,8,1>F { align1 1H compacted }; send(16) g76<1>UW g79<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g81<1>D g79<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g57<4,4,1>F g76<4,4,1>F g96<4,4,1>F { align16 1H compacted }; send(16) g77<1>UW g81<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g85<1>D g79<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g61<4,4,1>F g77<4,4,1>F g96<4,4,1>F { align16 1H compacted }; send(16) g78<1>UW g85<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g80<1>D g5<8,8,1>D 156D { align1 1H compacted }; mad(16) g85<1>F g103<4,4,1>F g3<4,4,1>F g3<4,4,1>F { align16 1H compacted }; math sqrt(16) g87<1>F g85<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g87<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; mad(16) g49<1>F g65<4,4,1>F g78<4,4,1>F g96<4,4,1>F { align16 1H compacted }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B47 ->B48 ->B49 START B48 <-B47 (1170 cycles) mul(16) g89<1>F g87<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g93<1>D g80<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g86<1>F g89<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g97<1>UW g86<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g43<8,8,1>F g97<8,8,1>F { align1 1H compacted }; send(16) g90<1>UW g93<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g95<1>D g93<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g45<4,4,1>F g90<4,4,1>F g97<4,4,1>F { align16 1H compacted }; send(16) g91<1>UW g95<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g99<1>D g93<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g47<4,4,1>F g91<4,4,1>F g97<4,4,1>F { align16 1H compacted }; send(16) g92<1>UW g99<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g49<1>F g49<4,4,1>F g92<4,4,1>F g97<4,4,1>F { align16 1H compacted }; END B48 ->B49 START B49 <-B47 <-B48 (108 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g93<1>D g5<8,8,1>D 181D { align1 1H compacted }; add(16) g95<1>F -g53<8,8,1>F 0x40400000F /* 3F */ { align1 1H }; mul(16) g97<1>F g95<8,8,1>F g95<8,8,1>F { align1 1H compacted }; mad(16) g99<1>F g97<4,4,1>F g67<4,4,1>F g67<4,4,1>F { align16 1H compacted }; math sqrt(16) g101<1>F g99<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g101<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B49 ->B50 ->B51 START B50 <-B49 (1170 cycles) mul(16) g103<1>F g101<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g107<1>D g93<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g101<1>F g103<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g99<1>UW g101<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g43<8,8,1>F g99<8,8,1>F { align1 1H compacted }; send(16) g104<1>UW g107<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g109<1>D g107<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g45<4,4,1>F g104<4,4,1>F g99<4,4,1>F { align16 1H compacted }; send(16) g105<1>UW g109<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g110<1>D g107<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g47<4,4,1>F g105<4,4,1>F g99<4,4,1>F { align16 1H compacted }; send(16) g106<1>UW g110<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g49<1>F g49<4,4,1>F g106<4,4,1>F g99<4,4,1>F { align16 1H compacted }; END B50 ->B51 START B51 <-B49 <-B50 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g107<1>D g5<8,8,1>D 182D { align1 1H compacted }; mad(16) g109<1>F g97<4,4,1>F g83<4,4,1>F g83<4,4,1>F { align16 1H compacted }; math sqrt(16) g111<1>F g109<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g111<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B51 ->B52 ->B53 START B52 <-B51 (1170 cycles) mul(16) g113<1>F g111<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g120<1>D g107<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g111<1>F g113<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g100<1>UW g111<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g43<8,8,1>F g100<8,8,1>F { align1 1H compacted }; send(16) g117<1>UW g120<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g122<1>D g120<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g45<4,4,1>F g117<4,4,1>F g100<4,4,1>F { align16 1H compacted }; send(16) g118<1>UW g122<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g123<1>D g120<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g47<4,4,1>F g118<4,4,1>F g100<4,4,1>F { align16 1H compacted }; send(16) g119<1>UW g123<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g49<1>F g49<4,4,1>F g119<4,4,1>F g100<4,4,1>F { align16 1H compacted }; END B52 ->B53 START B53 <-B51 <-B52 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g120<1>D g5<8,8,1>D 183D { align1 1H compacted }; mad(16) g122<1>F g97<4,4,1>F g51<4,4,1>F g51<4,4,1>F { align16 1H compacted }; math sqrt(16) g124<1>F g122<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g124<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B53 ->B54 ->B55 START B54 <-B53 (1168 cycles) mul(16) g126<1>F g124<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g13<1>D g120<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g124<1>F g126<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g101<1>UW g124<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g43<8,8,1>F g101<8,8,1>F { align1 1H compacted }; send(16) g10<1>UW g13<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g15<1>D g13<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g45<4,4,1>F g10<4,4,1>F g101<4,4,1>F { align16 1H compacted }; send(16) g11<1>UW g15<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g16<1>D g13<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g47<4,4,1>F g11<4,4,1>F g101<4,4,1>F { align16 1H compacted }; send(16) g12<1>UW g16<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g49<1>F g49<4,4,1>F g12<4,4,1>F g101<4,4,1>F { align16 1H compacted }; END B54 ->B55 START B55 <-B53 <-B54 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g13<1>D g5<8,8,1>D 184D { align1 1H compacted }; mad(16) g15<1>F g97<4,4,1>F g55<4,4,1>F g55<4,4,1>F { align16 1H compacted }; math sqrt(16) g17<1>F g15<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g17<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B55 ->B56 ->B57 START B56 <-B55 (1168 cycles) mul(16) g19<1>F g17<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g23<1>D g13<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g17<1>F g19<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g102<1>UW g17<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g43<8,8,1>F g102<8,8,1>F { align1 1H compacted }; send(16) g20<1>UW g23<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g25<1>D g23<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g45<4,4,1>F g20<4,4,1>F g102<4,4,1>F { align16 1H compacted }; send(16) g21<1>UW g25<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g26<1>D g23<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g47<4,4,1>F g21<4,4,1>F g102<4,4,1>F { align16 1H compacted }; send(16) g22<1>UW g26<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g49<1>F g49<4,4,1>F g22<4,4,1>F g102<4,4,1>F { align16 1H compacted }; END B56 ->B57 START B57 <-B55 <-B56 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g23<1>D g5<8,8,1>D 185D { align1 1H compacted }; mad(16) g25<1>F g97<4,4,1>F g115<4,4,1>F g115<4,4,1>F { align16 1H compacted }; math sqrt(16) g27<1>F g25<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g27<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B57 ->B58 ->B59 START B58 <-B57 (1168 cycles) mul(16) g29<1>F g27<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g33<1>D g23<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g27<1>F g29<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g103<1>UW g27<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g43<8,8,1>F g103<8,8,1>F { align1 1H compacted }; send(16) g30<1>UW g33<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g35<1>D g33<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g45<4,4,1>F g30<4,4,1>F g103<4,4,1>F { align16 1H compacted }; send(16) g31<1>UW g35<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g36<1>D g33<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g47<4,4,1>F g31<4,4,1>F g103<4,4,1>F { align16 1H compacted }; send(16) g32<1>UW g36<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g49<1>F g49<4,4,1>F g32<4,4,1>F g103<4,4,1>F { align16 1H compacted }; END B58 ->B59 START B59 <-B57 <-B58 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g33<1>D g5<8,8,1>D 186D { align1 1H compacted }; mad(16) g35<1>F g97<4,4,1>F g3<4,4,1>F g3<4,4,1>F { align16 1H compacted }; math sqrt(16) g37<1>F g35<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g37<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B59 ->B60 ->B61 START B60 <-B59 (1168 cycles) mul(16) g39<1>F g37<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g57<1>D g33<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g37<1>F g39<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g104<1>UW g37<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g43<8,8,1>F g104<8,8,1>F { align1 1H compacted }; send(16) g40<1>UW g57<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g59<1>D g57<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g45<4,4,1>F g40<4,4,1>F g104<4,4,1>F { align16 1H compacted }; send(16) g41<1>UW g59<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g60<1>D g57<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g47<4,4,1>F g41<4,4,1>F g104<4,4,1>F { align16 1H compacted }; send(16) g57<1>UW g60<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g49<1>F g49<4,4,1>F g57<4,4,1>F g104<4,4,1>F { align16 1H compacted }; END B60 ->B61 START B61 <-B59 <-B60 (108 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g58<1>D g5<8,8,1>D 213D { align1 1H compacted }; add(16) g60<1>F -g53<8,8,1>F 0x40800000F /* 4F */ { align1 1H }; mul(16) g62<1>F g60<8,8,1>F g60<8,8,1>F { align1 1H compacted }; mad(16) g64<1>F g62<4,4,1>F g51<4,4,1>F g51<4,4,1>F { align16 1H compacted }; math sqrt(16) g66<1>F g64<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g66<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B61 ->B62 ->B63 START B62 <-B61 (1172 cycles) mul(16) g68<1>F g66<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g66<1>D g58<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g64<1>F g68<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g105<1>UW g64<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g43<8,8,1>F g105<8,8,1>F { align1 1H compacted }; send(16) g69<1>UW g66<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g72<1>D g66<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g45<4,4,1>F g69<4,4,1>F g105<4,4,1>F { align16 1H compacted }; send(16) g70<1>UW g72<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g73<1>D g66<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g47<4,4,1>F g70<4,4,1>F g105<4,4,1>F { align16 1H compacted }; send(16) g71<1>UW g73<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g49<1>F g49<4,4,1>F g71<4,4,1>F g105<4,4,1>F { align16 1H compacted }; END B62 ->B63 START B63 <-B61 <-B62 (72 cycles) endif(16) JIP: 16 { align1 1H }; add(16) g72<1>D g5<8,8,1>D 214D { align1 1H compacted }; mad(16) g74<1>F g62<4,4,1>F g55<4,4,1>F g55<4,4,1>F { align16 1H compacted }; math sqrt(16) g76<1>F g74<8,8,1>F null<8,8,1>F { align1 1H compacted }; cmp.l.f0.0(16) null<1>F g76<8,8,1>F 0x404217e3F /* 3.03271F */ { align1 1H }; (+f0.0) if(16) JIP: 176 UIP: 176 { align1 1H }; END B63 ->B64 ->B65 START B64 <-B63 (1170 cycles) mul(16) g78<1>F g76<8,8,1>F 0x3e9e1b5fF /* 0.308803F */ { align1 1H }; shl(16) g76<1>D g72<8,8,1>D 0x00000002UD { align1 1H }; lrp(16) g74<1>F g78<4,4,1>F g9.1<0,1,0>F g9.2<0,1,0>F { align16 1H }; send(16) g106<1>UW g74<8,8,1>UD 0x04258001 sampler MsgDesc: sample_lz SIMD16 Surface = 1 Sampler = 0 mlen 2 rlen 2 { align1 1H }; add(16) g43<1>F g43<8,8,1>F g106<8,8,1>F { align1 1H compacted }; send(16) g79<1>UW g76<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g82<1>D g76<8,8,1>D 1680D { align1 1H compacted }; mad(16) g45<1>F g45<4,4,1>F g79<4,4,1>F g106<4,4,1>F { align16 1H compacted }; send(16) g80<1>UW g82<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; add(16) g83<1>D g76<8,8,1>D 3360D { align1 1H compacted }; mad(16) g47<1>F g47<4,4,1>F g80<4,4,1>F g106<4,4,1>F { align16 1H compacted }; send(16) g81<1>UW g83<8,8,1>UD 0x04205efe dp data 1 MsgDesc: ( untyped surface read, Surface = 254, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; mad(16) g49<1>F g49<4,4,1>F g81<4,4,1>F g106<4,4,1>F { align16 1H compacted }; END B64 ->B65 START B65 <-B63 <-B64 (274 cycles) endif(16) JIP: 16 { align1 1H }; math inv(16) g82<1>F g43<8,8,1>F null<8,8,1>F { align1 1H compacted }; mov(8) g13<1>UD g7<8,8,1>UD { align1 1Q compacted }; mov(8) g14<1>UD g1<8,8,1>UD { align1 1Q compacted }; mov(8) g6<1>D 1065353216D { align1 1Q }; mul.sat(16) g84<1>F g45<8,8,1>F g82<8,8,1>F { align1 1H compacted }; mul.sat(16) g86<1>F g47<8,8,1>F g82<8,8,1>F { align1 1H compacted }; mul.sat(16) g88<1>F g49<8,8,1>F g82<8,8,1>F { align1 1H compacted }; add(16) g90<1>F -g84<8,8,1>F 0x3f400000F /* 0.75F */ { align1 1H }; add(16) g92<1>F -g86<8,8,1>F 0x3f400000F /* 0.75F */ { align1 1H }; add(16) g94<1>F -g88<8,8,1>F 0x3f400000F /* 0.75F */ { align1 1H }; mul(16) g96<1>F g90<8,8,1>F 0x41160a50F /* 9.37752F */ { align1 1H }; mul(16) g98<1>F g92<8,8,1>F 0x41160a50F /* 9.37752F */ { align1 1H }; mul(16) g100<1>F g94<8,8,1>F 0x41160a50F /* 9.37752F */ { align1 1H }; math exp(16) g102<1>F g96<8,8,1>F null<8,8,1>F { align1 1H compacted }; math exp(16) g104<1>F g98<8,8,1>F null<8,8,1>F { align1 1H compacted }; math exp(16) g106<1>F g100<8,8,1>F null<8,8,1>F { align1 1H compacted }; add(16) g108<1>F g102<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; add(16) g110<1>F g104<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; add(16) g112<1>F g106<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; math inv(16) g114<1>F g108<8,8,1>F null<8,8,1>F { align1 1H compacted }; math inv(16) g116<1>F g110<8,8,1>F null<8,8,1>F { align1 1H compacted }; math inv(16) g118<1>F g112<8,8,1>F null<8,8,1>F { align1 1H compacted }; add(16) g120<1>F g114<8,8,1>F 0xbbf8487cF /* -0.007577F */ { align1 1H }; add(16) g122<1>F g116<8,8,1>F 0xbbf8487cF /* -0.007577F */ { align1 1H }; add(16) g124<1>F g118<8,8,1>F 0xbbf8487cF /* -0.007577F */ { align1 1H }; mul.sat(16) g126<1>F g120<8,8,1>F 0x3f9a9b5fF /* 1.20787F */ { align1 1H }; mul.sat(16) g9<1>F g122<8,8,1>F 0x3f9a9b5fF /* 1.20787F */ { align1 1H }; mul.sat(16) g11<1>F g124<8,8,1>F 0x3f9a9b5fF /* 1.20787F */ { align1 1H }; math pow(16) g17<1>F g126<8,8,1>F 0x3ed55555F /* 0.416667F */ { align1 1H }; nop ; math pow(16) g19<1>F g9<8,8,1>F 0x3ed55555F /* 0.416667F */ { align1 1H }; nop ; math pow(16) g21<1>F g11<8,8,1>F 0x3ed55555F /* 0.416667F */ { align1 1H }; mov(8) g3<1>D g17<8,8,1>D { align1 1Q compacted }; mov(8) g4<1>D g19<8,8,1>D { align1 1Q compacted }; mov(8) g5<1>D g21<8,8,1>D { align1 1Q compacted }; sends(8) nullUD g13UD g3UD 0x04035004 0x00000100 dp data 1 MsgDesc: ( DC typed surface write, Surface = 4, SIMD16, Mask = 0x0) mlen 2 ex_mlen 4 rlen 0 { align1 1Q }; mov(8) g15<1>UD g8<8,8,1>UD { align1 2Q compacted }; mov(8) g16<1>UD g2<8,8,1>UD { align1 2Q compacted }; mov(8) g7<1>D g18<8,8,1>D { align1 2Q compacted }; mov(8) g9<1>D g22<8,8,1>D { align1 2Q compacted }; mov(8) g10<1>D 1065353216D { align1 2Q }; mov(8) g8<1>D g20<8,8,1>D { align1 2Q compacted }; sends(8) nullUD g15UD g7UD 0x04036004 0x00000100 dp data 1 MsgDesc: ( DC typed surface write, Surface = 4, SIMD8, Mask = 0x0) mlen 2 ex_mlen 4 rlen 0 { align1 2Q }; mov(8) g127<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; send(16) null<1>UW g127<8,8,1>UW 0x82000010 thread_spawner MsgDesc: mlen 1 rlen 0 { align1 WE_all 1H EOT }; END B65 AV: 00:00:00 / 00:00:08 (0%) A-V: -0.017 DS: 1.000/0 AV: 00:00:00 / 00:00:08 (0%) A-V: -0.017 DS: 1.000/0 AV: 00:00:00 / 00:00:08 (0%) A-V: -0.017 DS: 1.000/0 AV: 00:00:00 / 00:00:08 (0%) A-V: 0.351 DS: 1.333/0 AV: 00:00:00 / 00:00:08 (0%) A-V: 0.351 DS: 1.333/0 AV: 00:00:00 / 00:00:08 (0%) A-V: 0.385 DS: 1.000/0 AV: 00:00:00 / 00:00:08 (0%) A-V: 0.368 DS: 0.800/0 Dropped: 1 AV: 00:00:00 / 00:00:08 (1%) A-V: 0.351 DS: 0.667/0 Dropped: 2 AV: 00:00:00 / 00:00:08 (1%) A-V: 0.334 DS: 0.571/0 Dropped: 3 AV: 00:00:00 / 00:00:08 (1%) A-V: 0.301 DS: 0.500/0 Dropped: 4 AV: 00:00:00 / 00:00:08 (1%) A-V: 0.284 DS: 0.444/0 Dropped: 5 AV: 00:00:00 / 00:00:08 (2%) A-V: 0.267 DS: 0.400/0 Dropped: 6 AV: 00:00:00 / 00:00:08 (2%) A-V: 0.250 DS: 0.364/0 Dropped: 7 AV: 00:00:00 / 00:00:08 (2%) A-V: 0.233 DS: 0.333/0 Dropped: 8 AV: 00:00:00 / 00:00:08 (2%) A-V: 0.200 DS: 0.308/0 Dropped: 9 AV: 00:00:00 / 00:00:08 (3%) A-V: 0.183 DS: 0.286/0 Dropped: 10 AV: 00:00:00 / 00:00:08 (3%) A-V: 0.166 DS: 0.267/0 Dropped: 11 AV: 00:00:00 / 00:00:08 (3%) A-V: 0.149 DS: 0.250/0 Dropped: 12 AV: 00:00:00 / 00:00:08 (3%) A-V: 0.132 DS: 0.235/0 Dropped: 13 AV: 00:00:00 / 00:00:08 (4%) A-V: 0.099 DS: 0.222/0 Dropped: 14 AV: 00:00:00 / 00:00:08 (4%) A-V: 0.082 DS: 0.211/0 Dropped: 15 AV: 00:00:00 / 00:00:08 (4%) A-V: 0.065 DS: 0.200/0 Dropped: 16 AV: 00:00:00 / 00:00:08 (4%) A-V: 0.048 DS: 0.190/0 Dropped: 17 AV: 00:00:00 / 00:00:08 (5%) A-V: 0.031 DS: 0.182/0 Dropped: 18 AV: 00:00:00 / 00:00:08 (5%) A-V: 0.014 DS: 0.217/0 Dropped: 19 AV: 00:00:00 / 00:00:08 (5%) A-V: 0.014 DS: 0.250/0 Dropped: 19 AV: 00:00:00 / 00:00:08 (5%) A-V: 0.014 DS: 0.280/0 Dropped: 19 AV: 00:00:00 / 00:00:08 (6%) A-V: 0.014 DS: 0.308/0 Dropped: 19 AV: 00:00:00 / 00:00:08 (6%) A-V: 0.013 DS: 0.333/0 Dropped: 19 AV: 00:00:00 / 00:00:08 (6%) A-V: 0.013 DS: 0.393/0 Dropped: 19 AV: 00:00:00 / 00:00:08 (6%) A-V: 0.013 DS: 0.393/0 Dropped: 19 AV: 00:00:00 / 00:00:08 (6%) A-V: 0.012 DS: 0.414/0 Dropped: 19 AV: 00:00:00 / 00:00:08 (7%) A-V: 0.121 DS: 0.433/1 Dropped: 19 AV: 00:00:00 / 00:00:08 (7%) A-V: 0.104 DS: 0.419/1 Dropped: 19 AV: 00:00:00 / 00:00:08 (7%) A-V: 0.088 DS: 0.406/1 Dropped: 20 AV: 00:00:00 / 00:00:08 (7%) A-V: 0.054 DS: 0.394/1 Dropped: 21 AV: 00:00:00 / 00:00:08 (8%) A-V: 0.038 DS: 0.382/1 Dropped: 22 AV: 00:00:00 / 00:00:08 (8%) A-V: 0.021 DS: 0.371/1 Dropped: 23 AV: 00:00:00 / 00:00:08 (8%) A-V: 0.004 DS: 0.361/1 Dropped: 24 AV: 00:00:00 / 00:00:08 (8%) A-V: 0.004 DS: 0.378/1 Dropped: 25 AV: 00:00:00 / 00:00:08 (9%) A-V: 0.004 DS: 0.421/1 Dropped: 25 AV: 00:00:00 / 00:00:08 (9%) A-V: 0.005 DS: 0.436/1 Dropped: 25 AV: 00:00:00 / 00:00:08 (9%) A-V: 0.005 DS: 0.450/1 Dropped: 25 AV: 00:00:00 / 00:00:08 (9%) A-V: 0.005 DS: 0.463/1 Dropped: 25 AV: 00:00:00 / 00:00:08 (10%) A-V: 0.005 DS: 0.476/1 Dropped: 25 AV: 00:00:00 / 00:00:08 (10%) A-V: 0.006 DS: 0.512/1 Dropped: 25 AV: 00:00:00 / 00:00:08 (10%) A-V: 0.006 DS: 0.512/2 Dropped: 25 AV: 00:00:00 / 00:00:08 (10%) A-V: 0.061 DS: 0.523/2 Dropped: 25 AV: 00:00:00 / 00:00:08 (10%) A-V: 0.045 DS: 0.511/2 Dropped: 25 AV: 00:00:00 / 00:00:08 (11%) A-V: 0.029 DS: 0.500/2 Dropped: 26 AV: 00:00:00 / 00:00:08 (11%) A-V: 0.013 DS: 0.489/2 Dropped: 27 AV: 00:00:00 / 00:00:08 (11%) A-V: 0.013 DS: 0.521/2 Dropped: 28 AV: 00:00:00 / 00:00:08 (11%) A-V: 0.013 DS: 0.531/2 Dropped: 28 AV: 00:00:00 / 00:00:08 (12%) A-V: 0.013 DS: 0.540/2 Dropped: 28 AV: 00:00:00 / 00:00:08 (12%) A-V: 0.014 DS: 0.549/2 Dropped: 28 AV: 00:00:01 / 00:00:08 (12%) A-V: 0.014 DS: 0.558/2 Dropped: 28 AV: 00:00:01 / 00:00:08 (12%) A-V: 0.015 DS: 0.585/2 Dropped: 28 AV: 00:00:01 / 00:00:08 (13%) A-V: 0.062 DS: 0.593/3 Dropped: 28 AV: 00:00:01 / 00:00:08 (13%) A-V: 0.046 DS: 0.582/3 Dropped: 28 AV: 00:00:01 / 00:00:08 (13%) A-V: 0.030 DS: 0.571/3 Dropped: 29 AV: 00:00:01 / 00:00:08 (13%) A-V: 0.014 DS: 0.561/3 Dropped: 30 AV: 00:00:01 / 00:00:08 (14%) A-V: 0.014 DS: 0.586/3 Dropped: 31 AV: 00:00:01 / 00:00:08 (14%) A-V: 0.014 DS: 0.593/3 Dropped: 31 AV: 00:00:01 / 00:00:08 (14%) A-V: 0.014 DS: 0.600/3 Dropped: 31 AV: 00:00:01 / 00:00:08 (14%) A-V: 0.015 DS: 0.607/3 Dropped: 31 AV: 00:00:01 / 00:00:08 (15%) A-V: 0.015 DS: 0.613/3 Dropped: 31 AV: 00:00:01 / 00:00:08 (15%) A-V: 0.016 DS: 0.635/3 Dropped: 31 AV: 00:00:01 / 00:00:08 (15%) A-V: 0.016 DS: 0.635/4 Dropped: 31 AV: 00:00:01 / 00:00:08 (15%) A-V: 0.064 DS: 0.641/4 Dropped: 31 AV: 00:00:01 / 00:00:08 (15%) A-V: 0.048 DS: 0.631/4 Dropped: 31 AV: 00:00:01 / 00:00:08 (16%) A-V: 0.032 DS: 0.621/4 Dropped: 32 AV: 00:00:01 / 00:00:08 (16%) A-V: 0.016 DS: 0.612/4 Dropped: 33 AV: 00:00:01 / 00:00:08 (16%) A-V: 0.016 DS: 0.632/4 Dropped: 34 AV: 00:00:01 / 00:00:08 (16%) A-V: 0.016 DS: 0.638/4 Dropped: 34 AV: 00:00:01 / 00:00:08 (17%) A-V: 0.016 DS: 0.643/4 Dropped: 34 AV: 00:00:01 / 00:00:08 (17%) A-V: 0.017 DS: 0.648/4 Dropped: 34 AV: 00:00:01 / 00:00:08 (17%) A-V: 0.017 DS: 0.653/4 Dropped: 34 AV: 00:00:01 / 00:00:08 (17%) A-V: 0.018 DS: 0.671/4 Dropped: 34 AV: 00:00:01 / 00:00:08 (18%) A-V: 0.063 DS: 0.676/5 Dropped: 34 AV: 00:00:01 / 00:00:08 (18%) A-V: 0.047 DS: 0.667/5 Dropped: 34 AV: 00:00:01 / 00:00:08 (18%) A-V: 0.031 DS: 0.658/5 Dropped: 35 AV: 00:00:01 / 00:00:08 (18%) A-V: 0.014 DS: 0.649/5 Dropped: 36 AV: 00:00:01 / 00:00:08 (19%) A-V: 0.014 DS: 0.667/5 Dropped: 37 AV: 00:00:01 / 00:00:08 (19%) A-V: 0.014 DS: 0.671/5 Dropped: 37 AV: 00:00:01 / 00:00:08 (19%) A-V: 0.015 DS: 0.675/5 Dropped: 37 AV: 00:00:01 / 00:00:08 (19%) A-V: 0.015 DS: 0.679/5 Dropped: 37 AV: 00:00:01 / 00:00:08 (20%) A-V: 0.015 DS: 0.683/5 Dropped: 37 AV: 00:00:01 / 00:00:08 (20%) A-V: 0.016 DS: 0.699/5 Dropped: 37 AV: 00:00:01 / 00:00:08 (20%) A-V: 0.070 DS: 0.702/6 Dropped: 37 AV: 00:00:01 / 00:00:08 (20%) A-V: 0.053 DS: 0.694/6 Dropped: 37 AV: 00:00:01 / 00:00:08 (21%) A-V: 0.037 DS: 0.686/6 Dropped: 38 AV: 00:00:01 / 00:00:08 (21%) A-V: 0.020 DS: 0.678/6 Dropped: 39 AV: 00:00:01 / 00:00:08 (21%) A-V: 0.004 DS: 0.682/6 Dropped: 40 AV: 00:00:01 / 00:00:08 (21%) A-V: 0.004 DS: 0.685/6 Dropped: 40 AV: 00:00:01 / 00:00:08 (22%) A-V: 0.004 DS: 0.689/6 Dropped: 40 AV: 00:00:01 / 00:00:08 (22%) A-V: 0.004 DS: 0.692/6 Dropped: 40 AV: 00:00:01 / 00:00:08 (22%) A-V: 0.005 DS: 0.696/6 Dropped: 40 AV: 00:00:01 / 00:00:08 (22%) A-V: 0.005 DS: 0.710/6 Dropped: 40 AV: 00:00:01 / 00:00:08 (23%) A-V: 0.005 DS: 0.710/6 Dropped: 40 AV: 00:00:01 / 00:00:08 (23%) A-V: 0.075 DS: 0.713/7 Dropped: 40 AV: 00:00:01 / 00:00:08 (23%) A-V: 0.059 DS: 0.705/7 Dropped: 40 AV: 00:00:01 / 00:00:08 (23%) A-V: 0.043 DS: 0.698/7 Dropped: 41 AV: 00:00:01 / 00:00:08 (23%) A-V: 0.026 DS: 0.691/7 Dropped: 42 AV: 00:00:01 / 00:00:08 (24%) A-V: 0.009 DS: 0.694/7 Dropped: 43 AV: 00:00:01 / 00:00:08 (24%) A-V: 0.009 DS: 0.697/7 Dropped: 43 AV: 00:00:01 / 00:00:08 (24%) A-V: 0.009 DS: 0.700/7 Dropped: 43 AV: 00:00:01 / 00:00:08 (24%) A-V: 0.009 DS: 0.700/7 Dropped: 43 AV: 00:00:02 / 00:00:08 (25%) A-V: 0.009 DS: 0.700/7 Dropped: 43 AV: 00:00:02 / 00:00:08 (25%) A-V: 0.009 DS: 0.700/7 Dropped: 43 AV: 00:00:02 / 00:00:08 (25%) A-V: 0.009 DS: 0.700/7 Dropped: 43 AV: 00:00:02 / 00:00:08 (25%) A-V: 0.009 DS: 0.710/7 Dropped: 43 AV: 00:00:02 / 00:00:08 (25%) A-V: 0.073 DS: 0.720/8 Dropped: 43 AV: 00:00:02 / 00:00:08 (26%) A-V: 0.056 DS: 0.720/8 Dropped: 43 AV: 00:00:02 / 00:00:08 (26%) A-V: 0.039 DS: 0.720/8 Dropped: 44 AV: 00:00:02 / 00:00:08 (26%) A-V: 0.006 DS: 0.720/8 Dropped: 45 AV: 00:00:02 / 00:00:08 (26%) A-V: 0.006 DS: 0.730/8 Dropped: 46 AV: 00:00:02 / 00:00:08 (27%) A-V: 0.006 DS: 0.740/8 Dropped: 46 AV: 00:00:02 / 00:00:08 (27%) A-V: 0.006 DS: 0.750/8 Dropped: 46 AV: 00:00:02 / 00:00:08 (27%) A-V: 0.006 DS: 0.760/8 Dropped: 46 AV: 00:00:02 / 00:00:08 (27%) A-V: 0.006 DS: 0.780/8 Dropped: 46 AV: 00:00:02 / 00:00:08 (28%) A-V: 0.006 DS: 0.780/8 Dropped: 46 AV: 00:00:02 / 00:00:08 (28%) A-V: 0.005 DS: 0.790/8 Dropped: 46 AV: 00:00:02 / 00:00:08 (28%) A-V: 0.005 DS: 0.800/8 Dropped: 46 AV: 00:00:02 / 00:00:08 (28%) A-V: 0.069 DS: 0.810/9 Dropped: 46 AV: 00:00:02 / 00:00:08 (28%) A-V: 0.052 DS: 0.810/9 Dropped: 46 AV: 00:00:02 / 00:00:08 (29%) A-V: 0.019 DS: 0.810/9 Dropped: 47 AV: 00:00:02 / 00:00:08 (29%) A-V: 0.019 DS: 0.820/9 Dropped: 48 AV: 00:00:02 / 00:00:08 (29%) A-V: 0.019 DS: 0.830/9 Dropped: 48 AV: 00:00:02 / 00:00:08 (29%) A-V: 0.019 DS: 0.840/9 Dropped: 48 AV: 00:00:02 / 00:00:08 (30%) A-V: 0.018 DS: 0.850/9 Dropped: 48 AV: 00:00:02 / 00:00:08 (30%) A-V: 0.018 DS: 0.860/9 Dropped: 48 AV: 00:00:02 / 00:00:08 (30%) A-V: 0.018 DS: 0.860/9 Dropped: 48 AV: 00:00:02 / 00:00:08 (30%) A-V: 0.018 DS: 0.860/9 Dropped: 48 AV: 00:00:02 / 00:00:08 (31%) A-V: 0.083 DS: 0.860/10 Dropped: 48 AV: 00:00:02 / 00:00:08 (31%) A-V: 0.067 DS: 0.850/10 Dropped: 48 AV: 00:00:02 / 00:00:08 (31%) A-V: 0.033 DS: 0.830/10 Dropped: 49 AV: 00:00:02 / 00:00:08 (31%) A-V: 0.017 DS: 0.820/10 Dropped: 50 AV: 00:00:02 / 00:00:08 (32%) A-V: 0.017 DS: 0.820/10 Dropped: 51 AV: 00:00:02 / 00:00:08 (32%) A-V: 0.017 DS: 0.830/10 Dropped: 51 AV: 00:00:02 / 00:00:08 (32%) A-V: 0.017 DS: 0.840/10 Dropped: 51 AV: 00:00:02 / 00:00:08 (32%) A-V: 0.017 DS: 0.860/10 Dropped: 51 AV: 00:00:02 / 00:00:08 (33%) A-V: 0.017 DS: 0.860/10 Dropped: 51 AV: 00:00:02 / 00:00:08 (33%) A-V: 0.017 DS: 0.870/10 Dropped: 51 AV: 00:00:02 / 00:00:08 (33%) A-V: 0.017 DS: 0.880/10 Dropped: 51 AV: 00:00:02 / 00:00:08 (33%) A-V: 0.017 DS: 0.890/10 Dropped: 51 AV: 00:00:02 / 00:00:08 (33%) A-V: 0.070 DS: 0.890/11 Dropped: 51 AV: 00:00:02 / 00:00:08 (34%) A-V: 0.037 DS: 0.870/11 Dropped: 51 AV: 00:00:02 / 00:00:08 (34%) A-V: 0.021 DS: 0.860/11 Dropped: 52 AV: 00:00:02 / 00:00:08 (34%) A-V: 0.004 DS: 0.850/11 Dropped: 53 AV: 00:00:02 / 00:00:08 (34%) A-V: 0.004 DS: 0.850/11 Dropped: 54 AV: 00:00:02 / 00:00:08 (35%) A-V: 0.004 DS: 0.850/11 Dropped: 54 AV: 00:00:02 / 00:00:08 (35%) A-V: 0.004 DS: 0.850/11 Dropped: 54 AV: 00:00:02 / 00:00:08 (35%) A-V: 0.004 DS: 0.850/11 Dropped: 54 AV: 00:00:02 / 00:00:08 (35%) A-V: 0.005 DS: 0.850/11 Dropped: 54 AV: 00:00:02 / 00:00:08 (35%) A-V: 0.005 DS: 0.860/11 Dropped: 54 AV: 00:00:02 / 00:00:08 (36%) A-V: 0.005 DS: 0.870/11 Dropped: 54 AV: 00:00:02 / 00:00:08 (36%) A-V: 0.005 DS: 0.880/11 Dropped: 54 AV: 00:00:02 / 00:00:08 (36%) A-V: 0.067 DS: 0.880/12 Dropped: 54 AV: 00:00:02 / 00:00:08 (36%) A-V: 0.067 DS: 0.880/12 Dropped: 54 AV: 00:00:02 / 00:00:08 (36%) A-V: 0.051 DS: 0.870/12 Dropped: 54 AV: 00:00:02 / 00:00:08 (37%) A-V: 0.035 DS: 0.860/12 Dropped: 55 AV: 00:00:02 / 00:00:08 (37%) A-V: 0.018 DS: 0.850/12 Dropped: 56 AV: 00:00:03 / 00:00:08 (37%) A-V: 0.018 DS: 0.850/12 Dropped: 57 AV: 00:00:03 / 00:00:08 (37%) A-V: 0.018 DS: 0.850/12 Dropped: 57 AV: 00:00:03 / 00:00:08 (38%) A-V: 0.018 DS: 0.850/12 Dropped: 57 AV: 00:00:03 / 00:00:08 (38%) A-V: 0.019 DS: 0.860/12 Dropped: 57 AV: 00:00:03 / 00:00:08 (38%) A-V: 0.019 DS: 0.870/12 Dropped: 57 AV: 00:00:03 / 00:00:08 (38%) A-V: 0.019 DS: 0.880/12 Dropped: 57 AV: 00:00:03 / 00:00:08 (39%) A-V: 0.066 DS: 0.880/13 Dropped: 57 AV: 00:00:03 / 00:00:08 (39%) A-V: 0.050 DS: 0.870/13 Dropped: 57 AV: 00:00:03 / 00:00:08 (39%) A-V: 0.033 DS: 0.860/13 Dropped: 58 AV: 00:00:03 / 00:00:08 (39%) A-V: 0.016 DS: 0.850/13 Dropped: 59 AV: 00:00:03 / 00:00:08 (40%) A-V: 0.016 DS: 0.850/13 Dropped: 60 AV: 00:00:03 / 00:00:08 (40%) A-V: 0.016 DS: 0.850/13 Dropped: 60 AV: 00:00:03 / 00:00:08 (40%) A-V: 0.017 DS: 0.850/13 Dropped: 60 AV: 00:00:03 / 00:00:08 (40%) A-V: 0.017 DS: 0.860/13 Dropped: 60 AV: 00:00:03 / 00:00:08 (41%) A-V: 0.017 DS: 0.870/13 Dropped: 60 AV: 00:00:03 / 00:00:08 (41%) A-V: 0.017 DS: 0.880/13 Dropped: 60 AV: 00:00:03 / 00:00:08 (41%) A-V: 0.060 DS: 0.880/14 Dropped: 60 AV: 00:00:03 / 00:00:08 (41%) A-V: 0.043 DS: 0.870/14 Dropped: 60 AV: 00:00:03 / 00:00:08 (42%) A-V: 0.027 DS: 0.860/14 Dropped: 61 AV: 00:00:03 / 00:00:08 (42%) A-V: 0.010 DS: 0.850/14 Dropped: 62 AV: 00:00:03 / 00:00:08 (42%) A-V: 0.010 DS: 0.850/14 Dropped: 63 AV: 00:00:03 / 00:00:08 (42%) A-V: 0.010 DS: 0.850/14 Dropped: 63 AV: 00:00:03 / 00:00:08 (43%) A-V: 0.010 DS: 0.850/14 Dropped: 63 AV: 00:00:03 / 00:00:08 (43%) A-V: 0.010 DS: 0.860/14 Dropped: 63 AV: 00:00:03 / 00:00:08 (43%) A-V: 0.010 DS: 0.870/14 Dropped: 63 AV: 00:00:03 / 00:00:08 (43%) A-V: 0.010 DS: 0.880/14 Dropped: 63 AV: 00:00:03 / 00:00:08 (44%) A-V: 0.055 DS: 0.880/15 Dropped: 63 AV: 00:00:03 / 00:00:08 (44%) A-V: 0.055 DS: 0.880/15 Dropped: 63 AV: 00:00:03 / 00:00:08 (44%) A-V: 0.038 DS: 0.870/15 Dropped: 63 AV: 00:00:03 / 00:00:08 (44%) A-V: 0.021 DS: 0.860/15 Dropped: 64 AV: 00:00:03 / 00:00:08 (44%) A-V: 0.004 DS: 0.850/15 Dropped: 65 AV: 00:00:03 / 00:00:08 (45%) A-V: 0.004 DS: 0.850/15 Dropped: 66 AV: 00:00:03 / 00:00:08 (45%) A-V: 0.004 DS: 0.850/15 Dropped: 66 AV: 00:00:03 / 00:00:08 (45%) A-V: 0.004 DS: 0.850/15 Dropped: 66 AV: 00:00:03 / 00:00:08 (45%) A-V: 0.005 DS: 0.860/15 Dropped: 66 AV: 00:00:03 / 00:00:08 (46%) A-V: 0.004 DS: 0.870/15 Dropped: 66 AV: 00:00:03 / 00:00:08 (46%) A-V: 0.004 DS: 0.880/15 Dropped: 66 AV: 00:00:03 / 00:00:08 (46%) A-V: 0.058 DS: 0.890/16 Dropped: 66 AV: 00:00:03 / 00:00:08 (46%) A-V: 0.040 DS: 0.880/16 Dropped: 66 AV: 00:00:03 / 00:00:08 (47%) A-V: 0.024 DS: 0.870/16 Dropped: 67 AV: 00:00:03 / 00:00:08 (47%) A-V: 0.007 DS: 0.860/16 Dropped: 68 AV: 00:00:03 / 00:00:08 (47%) A-V: 0.007 DS: 0.860/16 Dropped: 69 AV: 00:00:03 / 00:00:08 (47%) A-V: 0.007 DS: 0.860/16 Dropped: 69 AV: 00:00:03 / 00:00:08 (48%) A-V: 0.007 DS: 0.860/16 Dropped: 69 AV: 00:00:03 / 00:00:08 (48%) A-V: 0.007 DS: 0.870/16 Dropped: 69 AV: 00:00:03 / 00:00:08 (48%) A-V: 0.007 DS: 0.880/16 Dropped: 69 AV: 00:00:03 / 00:00:08 (48%) A-V: 0.007 DS: 0.890/16 Dropped: 69 AV: 00:00:03 / 00:00:08 (49%) A-V: 0.051 DS: 0.900/17 Dropped: 69 AV: 00:00:03 / 00:00:08 (49%) A-V: 0.051 DS: 0.900/17 Dropped: 69 AV: 00:00:03 / 00:00:08 (49%) A-V: 0.034 DS: 0.890/17 Dropped: 69 AV: 00:00:03 / 00:00:08 (49%) A-V: 0.017 DS: 0.880/17 Dropped: 70 AV: 00:00:03 / 00:00:08 (49%) A-V: 0.017 DS: 0.880/17 Dropped: 71 AV: 00:00:04 / 00:00:08 (50%) A-V: 0.017 DS: 0.880/17 Dropped: 71 AV: 00:00:04 / 00:00:08 (50%) A-V: 0.017 DS: 0.880/17 Dropped: 71 AV: 00:00:04 / 00:00:08 (50%) A-V: 0.017 DS: 0.880/17 Dropped: 71 AV: 00:00:04 / 00:00:08 (50%) A-V: 0.017 DS: 0.880/17 Dropped: 71 AV: 00:00:04 / 00:00:08 (51%) A-V: 0.017 DS: 0.890/17 Dropped: 71 AV: 00:00:04 / 00:00:08 (51%) A-V: 0.067 DS: 0.900/18 Dropped: 71 AV: 00:00:04 / 00:00:08 (51%) A-V: 0.033 DS: 0.900/18 Dropped: 71 AV: 00:00:04 / 00:00:08 (51%) A-V: 0.017 DS: 0.890/18 Dropped: 72 AV: 00:00:04 / 00:00:08 (52%) A-V: 0.017 DS: 0.890/18 Dropped: 73 AV: 00:00:04 / 00:00:08 (52%) A-V: 0.017 DS: 0.890/18 Dropped: 73 AV: 00:00:04 / 00:00:08 (52%) A-V: 0.017 DS: 0.890/18 Dropped: 73 AV: 00:00:04 / 00:00:08 (52%) A-V: 0.017 DS: 0.890/18 Dropped: 73 AV: 00:00:04 / 00:00:08 (53%) A-V: 0.017 DS: 0.890/18 Dropped: 73 AV: 00:00:04 / 00:00:08 (53%) A-V: 0.017 DS: 0.890/18 Dropped: 73 AV: 00:00:04 / 00:00:08 (53%) A-V: 0.017 DS: 0.890/18 Dropped: 73 AV: 00:00:04 / 00:00:08 (53%) A-V: 0.017 DS: 0.890/18 Dropped: 73 AV: 00:00:04 / 00:00:08 (53%) A-V: 0.085 DS: 0.900/19 Dropped: 73 AV: 00:00:04 / 00:00:08 (54%) A-V: 0.052 DS: 0.900/19 Dropped: 73 AV: 00:00:04 / 00:00:08 (54%) A-V: 0.035 DS: 0.890/19 Dropped: 74 AV: 00:00:04 / 00:00:08 (54%) A-V: 0.018 DS: 0.880/19 Dropped: 75 AV: 00:00:04 / 00:00:08 (54%) A-V: 0.018 DS: 0.880/19 Dropped: 76 AV: 00:00:04 / 00:00:08 (55%) A-V: 0.018 DS: 0.880/19 Dropped: 76 AV: 00:00:04 / 00:00:08 (55%) A-V: 0.018 DS: 0.880/19 Dropped: 76 AV: 00:00:04 / 00:00:08 (55%) A-V: 0.018 DS: 0.880/19 Dropped: 76 AV: 00:00:04 / 00:00:08 (55%) A-V: 0.019 DS: 0.880/19 Dropped: 76 AV: 00:00:04 / 00:00:08 (55%) A-V: 0.019 DS: 0.880/19 Dropped: 76 AV: 00:00:04 / 00:00:08 (56%) A-V: 0.019 DS: 0.880/19 Dropped: 76 AV: 00:00:04 / 00:00:08 (56%) A-V: 0.019 DS: 0.890/19 Dropped: 76 AV: 00:00:04 / 00:00:08 (56%) A-V: 0.081 DS: 0.910/20 Dropped: 76 AV: 00:00:04 / 00:00:08 (56%) A-V: 0.081 DS: 0.910/20 Dropped: 76 AV: 00:00:04 / 00:00:08 (56%) A-V: 0.064 DS: 0.910/20 Dropped: 76 AV: 00:00:04 / 00:00:08 (57%) A-V: 0.047 DS: 0.900/20 Dropped: 77 AV: 00:00:04 / 00:00:08 (57%) A-V: 0.031 DS: 0.890/20 Dropped: 78 AV: 00:00:04 / 00:00:08 (57%) A-V: 0.014 DS: 0.880/20 Dropped: 79 AV: 00:00:04 / 00:00:08 (57%) A-V: 0.014 DS: 0.880/20 Dropped: 80 AV: 00:00:04 / 00:00:08 (58%) A-V: 0.014 DS: 0.880/20 Dropped: 80 AV: 00:00:04 / 00:00:08 (58%) A-V: 0.014 DS: 0.880/20 Dropped: 80 AV: 00:00:04 / 00:00:08 (58%) A-V: 0.014 DS: 0.880/20 Dropped: 80 AV: 00:00:04 / 00:00:08 (58%) A-V: 0.014 DS: 0.880/20 Dropped: 80 AV: 00:00:04 / 00:00:08 (59%) A-V: 0.014 DS: 0.900/20 Dropped: 80 AV: 00:00:04 / 00:00:08 (59%) A-V: 0.066 DS: 0.910/21 Dropped: 80 AV: 00:00:04 / 00:00:08 (59%) A-V: 0.049 DS: 0.910/21 Dropped: 80 AV: 00:00:04 / 00:00:08 (59%) A-V: 0.032 DS: 0.900/21 Dropped: 81 AV: 00:00:04 / 00:00:08 (60%) A-V: 0.016 DS: 0.890/21 Dropped: 82 AV: 00:00:04 / 00:00:08 (60%) A-V: 0.016 DS: 0.890/21 Dropped: 83 AV: 00:00:04 / 00:00:08 (60%) A-V: 0.016 DS: 0.890/21 Dropped: 83 AV: 00:00:04 / 00:00:08 (60%) A-V: 0.016 DS: 0.890/21 Dropped: 83 AV: 00:00:04 / 00:00:08 (61%) A-V: 0.016 DS: 0.890/21 Dropped: 83 AV: 00:00:04 / 00:00:08 (61%) A-V: 0.016 DS: 0.890/21 Dropped: 83 AV: 00:00:04 / 00:00:08 (61%) A-V: 0.016 DS: 0.890/21 Dropped: 83 AV: 00:00:04 / 00:00:08 (61%) A-V: 0.016 DS: 0.890/22 Dropped: 83 AV: 00:00:04 / 00:00:08 (61%) A-V: 0.059 DS: 0.900/22 Dropped: 83 AV: 00:00:04 / 00:00:08 (62%) A-V: 0.043 DS: 0.900/22 Dropped: 83 AV: 00:00:04 / 00:00:08 (62%) A-V: 0.026 DS: 0.900/22 Dropped: 84 AV: 00:00:05 / 00:00:08 (62%) A-V: 0.009 DS: 0.890/22 Dropped: 85 AV: 00:00:05 / 00:00:08 (62%) A-V: 0.009 DS: 0.890/22 Dropped: 86 AV: 00:00:05 / 00:00:08 (63%) A-V: 0.009 DS: 0.890/22 Dropped: 86 AV: 00:00:05 / 00:00:08 (63%) A-V: 0.009 DS: 0.890/22 Dropped: 86 AV: 00:00:05 / 00:00:08 (63%) A-V: 0.009 DS: 0.890/22 Dropped: 86 AV: 00:00:05 / 00:00:08 (63%) A-V: 0.009 DS: 0.890/22 Dropped: 86 AV: 00:00:05 / 00:00:08 (64%) A-V: 0.009 DS: 0.890/22 Dropped: 86 AV: 00:00:05 / 00:00:08 (64%) A-V: 0.055 DS: 0.900/23 Dropped: 86 AV: 00:00:05 / 00:00:08 (64%) A-V: 0.038 DS: 0.900/23 Dropped: 86 AV: 00:00:05 / 00:00:08 (64%) A-V: 0.022 DS: 0.900/23 Dropped: 87 AV: 00:00:05 / 00:00:08 (65%) A-V: 0.005 DS: 0.890/23 Dropped: 88 AV: 00:00:05 / 00:00:08 (65%) A-V: 0.005 DS: 0.890/23 Dropped: 89 AV: 00:00:05 / 00:00:08 (65%) A-V: 0.005 DS: 0.890/23 Dropped: 89 AV: 00:00:05 / 00:00:08 (65%) A-V: 0.005 DS: 0.890/23 Dropped: 89 AV: 00:00:05 / 00:00:08 (66%) A-V: 0.005 DS: 0.890/23 Dropped: 89 AV: 00:00:05 / 00:00:08 (66%) A-V: 0.005 DS: 0.890/23 Dropped: 89 AV: 00:00:05 / 00:00:08 (66%) A-V: 0.005 DS: 0.890/23 Dropped: 89 AV: 00:00:05 / 00:00:08 (66%) A-V: 0.005 DS: 0.890/24 Dropped: 89 AV: 00:00:05 / 00:00:08 (66%) A-V: 0.050 DS: 0.900/24 Dropped: 89 AV: 00:00:05 / 00:00:08 (67%) A-V: 0.033 DS: 0.900/24 Dropped: 89 AV: 00:00:05 / 00:00:08 (67%) A-V: 0.017 DS: 0.900/24 Dropped: 90 AV: 00:00:05 / 00:00:08 (67%) A-V: 0.017 DS: 0.900/24 Dropped: 91 AV: 00:00:05 / 00:00:08 (67%) A-V: 0.017 DS: 0.900/24 Dropped: 91 AV: 00:00:05 / 00:00:08 (68%) A-V: 0.017 DS: 0.900/24 Dropped: 91 AV: 00:00:05 / 00:00:08 (68%) A-V: 0.017 DS: 0.900/24 Dropped: 91 AV: 00:00:05 / 00:00:08 (68%) A-V: 0.017 DS: 0.900/24 Dropped: 91 AV: 00:00:05 / 00:00:08 (68%) A-V: 0.017 DS: 0.900/24 Dropped: 91 AV: 00:00:05 / 00:00:08 (69%) A-V: 0.060 DS: 0.900/25 Dropped: 91 AV: 00:00:05 / 00:00:08 (69%) A-V: 0.060 DS: 0.900/25 Dropped: 91 AV: 00:00:05 / 00:00:08 (69%) A-V: 0.044 DS: 0.900/25 Dropped: 91 AV: 00:00:05 / 00:00:08 (69%) A-V: 0.027 DS: 0.900/25 Dropped: 92 AV: 00:00:05 / 00:00:08 (69%) A-V: 0.010 DS: 0.900/25 Dropped: 93 AV: 00:00:05 / 00:00:08 (70%) A-V: 0.010 DS: 0.900/25 Dropped: 94 AV: 00:00:05 / 00:00:08 (70%) A-V: 0.010 DS: 0.900/25 Dropped: 94 AV: 00:00:05 / 00:00:08 (70%) A-V: 0.010 DS: 0.900/25 Dropped: 94 AV: 00:00:05 / 00:00:08 (70%) A-V: 0.010 DS: 0.900/25 Dropped: 94 AV: 00:00:05 / 00:00:08 (71%) A-V: 0.010 DS: 0.900/25 Dropped: 94 AV: 00:00:05 / 00:00:08 (71%) A-V: 0.010 DS: 0.900/25 Dropped: 94 AV: 00:00:05 / 00:00:08 (71%) A-V: 0.062 DS: 0.900/26 Dropped: 94 AV: 00:00:05 / 00:00:08 (71%) A-V: 0.062 DS: 0.900/26 Dropped: 94 AV: 00:00:05 / 00:00:08 (71%) A-V: 0.046 DS: 0.900/26 Dropped: 94 AV: 00:00:05 / 00:00:08 (72%) A-V: 0.029 DS: 0.900/26 Dropped: 95 AV: 00:00:05 / 00:00:08 (72%) A-V: 0.012 DS: 0.900/26 Dropped: 96 AV: 00:00:05 / 00:00:08 (72%) A-V: 0.012 DS: 0.900/26 Dropped: 97 AV: 00:00:05 / 00:00:08 (72%) A-V: 0.012 DS: 0.900/26 Dropped: 97 AV: 00:00:05 / 00:00:08 (73%) A-V: 0.013 DS: 0.900/26 Dropped: 97 AV: 00:00:05 / 00:00:08 (73%) A-V: 0.013 DS: 0.900/26 Dropped: 97 AV: 00:00:05 / 00:00:08 (73%) A-V: 0.013 DS: 0.900/26 Dropped: 97 AV: 00:00:05 / 00:00:08 (73%) A-V: 0.013 DS: 0.900/26 Dropped: 97 AV: 00:00:05 / 00:00:08 (74%) A-V: 0.061 DS: 0.900/27 Dropped: 97 AV: 00:00:05 / 00:00:08 (74%) A-V: 0.044 DS: 0.900/27 Dropped: 97 AV: 00:00:05 / 00:00:08 (74%) A-V: 0.028 DS: 0.900/27 Dropped: 98 AV: 00:00:05 / 00:00:08 (74%) A-V: 0.011 DS: 0.890/27 Dropped: 99 AV: 00:00:06 / 00:00:08 (75%) A-V: 0.011 DS: 0.890/27 Dropped: 100 AV: 00:00:06 / 00:00:08 (75%) A-V: 0.011 DS: 0.890/27 Dropped: 100 AV: 00:00:06 / 00:00:08 (75%) A-V: 0.011 DS: 0.890/27 Dropped: 100 AV: 00:00:06 / 00:00:08 (75%) A-V: 0.011 DS: 0.890/27 Dropped: 100 AV: 00:00:06 / 00:00:08 (76%) A-V: 0.011 DS: 0.890/27 Dropped: 100 AV: 00:00:06 / 00:00:08 (76%) A-V: 0.011 DS: 0.890/27 Dropped: 100 AV: 00:00:06 / 00:00:08 (76%) A-V: 0.055 DS: 0.910/28 Dropped: 100 AV: 00:00:06 / 00:00:08 (76%) A-V: 0.039 DS: 0.910/28 Dropped: 100 AV: 00:00:06 / 00:00:08 (77%) A-V: 0.022 DS: 0.900/28 Dropped: 101 AV: 00:00:06 / 00:00:08 (77%) A-V: 0.005 DS: 0.890/28 Dropped: 102 AV: 00:00:06 / 00:00:08 (77%) A-V: 0.005 DS: 0.890/28 Dropped: 103 AV: 00:00:06 / 00:00:08 (77%) A-V: 0.005 DS: 0.890/28 Dropped: 103 AV: 00:00:06 / 00:00:08 (78%) A-V: 0.005 DS: 0.890/28 Dropped: 103 AV: 00:00:06 / 00:00:08 (78%) A-V: 0.005 DS: 0.890/28 Dropped: 103 AV: 00:00:06 / 00:00:08 (78%) A-V: 0.005 DS: 0.890/28 Dropped: 103 AV: 00:00:06 / 00:00:08 (78%) A-V: 0.005 DS: 0.890/28 Dropped: 103 AV: 00:00:06 / 00:00:08 (79%) A-V: 0.050 DS: 0.910/29 Dropped: 103 AV: 00:00:06 / 00:00:08 (79%) A-V: 0.050 DS: 0.910/29 Dropped: 103 AV: 00:00:06 / 00:00:08 (79%) A-V: 0.033 DS: 0.910/29 Dropped: 103 AV: 00:00:06 / 00:00:08 (79%) A-V: 0.017 DS: 0.910/29 Dropped: 104 AV: 00:00:06 / 00:00:08 (79%) A-V: 0.017 DS: 0.910/29 Dropped: 105 AV: 00:00:06 / 00:00:08 (80%) A-V: 0.017 DS: 0.910/29 Dropped: 105 AV: 00:00:06 / 00:00:08 (80%) A-V: 0.017 DS: 0.910/29 Dropped: 105 AV: 00:00:06 / 00:00:08 (80%) A-V: 0.017 DS: 0.910/29 Dropped: 105 AV: 00:00:06 / 00:00:08 (80%) A-V: 0.017 DS: 0.910/29 Dropped: 105 AV: 00:00:06 / 00:00:08 (81%) A-V: 0.017 DS: 0.910/29 Dropped: 105 AV: 00:00:06 / 00:00:08 (81%) A-V: 0.067 DS: 0.910/30 Dropped: 105 AV: 00:00:06 / 00:00:08 (81%) A-V: 0.034 DS: 0.890/30 Dropped: 105 AV: 00:00:06 / 00:00:08 (81%) A-V: 0.017 DS: 0.890/30 Dropped: 106 AV: 00:00:06 / 00:00:08 (82%) A-V: 0.017 DS: 0.900/30 Dropped: 107 AV: 00:00:06 / 00:00:08 (82%) A-V: 0.017 DS: 0.910/30 Dropped: 107 AV: 00:00:06 / 00:00:08 (82%) A-V: 0.017 DS: 0.920/30 Dropped: 107 AV: 00:00:06 / 00:00:08 (82%) A-V: 0.017 DS: 0.920/30 Dropped: 107 AV: 00:00:06 / 00:00:08 (83%) A-V: 0.017 DS: 0.920/30 Dropped: 107 AV: 00:00:06 / 00:00:08 (83%) A-V: 0.017 DS: 0.920/30 Dropped: 107 AV: 00:00:06 / 00:00:08 (83%) A-V: 0.017 DS: 0.920/30 Dropped: 107 AV: 00:00:06 / 00:00:08 (83%) A-V: 0.017 DS: 0.920/30 Dropped: 107 AV: 00:00:06 / 00:00:08 (83%) A-V: 0.090 DS: 0.920/31 Dropped: 107 AV: 00:00:06 / 00:00:08 (84%) A-V: 0.057 DS: 0.900/31 Dropped: 107 AV: 00:00:06 / 00:00:08 (84%) A-V: 0.040 DS: 0.890/31 Dropped: 108 AV: 00:00:06 / 00:00:08 (84%) A-V: 0.023 DS: 0.890/31 Dropped: 109 AV: 00:00:06 / 00:00:08 (84%) A-V: 0.007 DS: 0.890/31 Dropped: 110 AV: 00:00:06 / 00:00:08 (85%) A-V: 0.007 DS: 0.900/31 Dropped: 111 AV: 00:00:06 / 00:00:08 (85%) A-V: 0.007 DS: 0.900/31 Dropped: 111 AV: 00:00:06 / 00:00:08 (85%) A-V: 0.007 DS: 0.900/31 Dropped: 111 AV: 00:00:06 / 00:00:08 (85%) A-V: 0.007 DS: 0.900/31 Dropped: 111 AV: 00:00:06 / 00:00:08 (86%) A-V: 0.006 DS: 0.900/31 Dropped: 111 AV: 00:00:06 / 00:00:08 (86%) A-V: 0.006 DS: 0.900/31 Dropped: 111 AV: 00:00:06 / 00:00:08 (86%) A-V: 0.007 DS: 0.900/31 Dropped: 111 AV: 00:00:06 / 00:00:08 (86%) A-V: 0.007 DS: 0.900/32 Dropped: 111 AV: 00:00:06 / 00:00:08 (86%) A-V: 0.067 DS: 0.900/32 Dropped: 111 AV: 00:00:06 / 00:00:08 (87%) A-V: 0.050 DS: 0.900/32 Dropped: 111 AV: 00:00:06 / 00:00:08 (87%) A-V: 0.034 DS: 0.900/32 Dropped: 112 AV: 00:00:07 / 00:00:08 (87%) A-V: 0.017 DS: 0.900/32 Dropped: 113 AV: 00:00:07 / 00:00:08 (87%) A-V: 0.017 DS: 0.900/32 Dropped: 114 AV: 00:00:07 / 00:00:08 (88%) A-V: 0.017 DS: 0.900/32 Dropped: 114 AV: 00:00:07 / 00:00:08 (88%) A-V: 0.017 DS: 0.900/32 Dropped: 114 AV: 00:00:07 / 00:00:08 (88%) A-V: 0.017 DS: 0.900/32 Dropped: 114 AV: 00:00:07 / 00:00:08 (88%) A-V: 0.017 DS: 0.900/32 Dropped: 114 AV: 00:00:07 / 00:00:08 (89%) A-V: 0.017 DS: 0.900/32 Dropped: 114 AV: 00:00:07 / 00:00:08 (89%) A-V: 0.017 DS: 0.900/33 Dropped: 114 AV: 00:00:07 / 00:00:08 (89%) A-V: 0.063 DS: 0.900/33 Dropped: 114 AV: 00:00:07 / 00:00:08 (89%) A-V: 0.046 DS: 0.900/33 Dropped: 114 AV: 00:00:07 / 00:00:08 (89%) A-V: 0.030 DS: 0.900/33 Dropped: 115 AV: 00:00:07 / 00:00:08 (90%) A-V: 0.013 DS: 0.900/33 Dropped: 116 AV: 00:00:07 / 00:00:08 (90%) A-V: 0.013 DS: 0.900/33 Dropped: 117 AV: 00:00:07 / 00:00:08 (90%) A-V: 0.013 DS: 0.900/33 Dropped: 117 AV: 00:00:07 / 00:00:08 (90%) A-V: 0.013 DS: 0.900/33 Dropped: 117 AV: 00:00:07 / 00:00:08 (91%) A-V: 0.013 DS: 0.900/33 Dropped: 117 AV: 00:00:07 / 00:00:08 (91%) A-V: 0.012 DS: 0.900/33 Dropped: 117 AV: 00:00:07 / 00:00:08 (91%) A-V: 0.012 DS: 0.900/33 Dropped: 117 AV: 00:00:07 / 00:00:08 (91%) A-V: 0.012 DS: 0.900/34 Dropped: 117 AV: 00:00:07 / 00:00:08 (91%) A-V: 0.059 DS: 0.900/34 Dropped: 117 AV: 00:00:07 / 00:00:08 (92%) A-V: 0.042 DS: 0.900/34 Dropped: 117 AV: 00:00:07 / 00:00:08 (92%) A-V: 0.025 DS: 0.900/34 Dropped: 118 AV: 00:00:07 / 00:00:08 (92%) A-V: 0.009 DS: 0.890/34 Dropped: 119 AV: 00:00:07 / 00:00:08 (92%) A-V: 0.009 DS: 0.890/34 Dropped: 120 AV: 00:00:07 / 00:00:08 (93%) A-V: 0.009 DS: 0.890/34 Dropped: 120 AV: 00:00:07 / 00:00:08 (93%) A-V: 0.009 DS: 0.890/34 Dropped: 120 AV: 00:00:07 / 00:00:08 (93%) A-V: 0.008 DS: 0.890/34 Dropped: 120 AV: 00:00:07 / 00:00:08 (93%) A-V: 0.008 DS: 0.890/34 Dropped: 120 AV: 00:00:07 / 00:00:08 (94%) A-V: 0.008 DS: 0.890/34 Dropped: 120 AV: 00:00:07 / 00:00:08 (94%) A-V: 0.008 DS: 0.890/35 Dropped: 120 AV: 00:00:07 / 00:00:08 (94%) A-V: 0.055 DS: 0.900/35 Dropped: 120 AV: 00:00:07 / 00:00:08 (94%) A-V: 0.038 DS: 0.900/35 Dropped: 120 AV: 00:00:07 / 00:00:08 (94%) A-V: 0.021 DS: 0.900/35 Dropped: 121 AV: 00:00:07 / 00:00:08 (95%) A-V: 0.005 DS: 0.890/35 Dropped: 122 AV: 00:00:07 / 00:00:08 (95%) A-V: 0.005 DS: 0.890/35 Dropped: 123 AV: 00:00:07 / 00:00:08 (95%) A-V: 0.004 DS: 0.890/35 Dropped: 123 AV: 00:00:07 / 00:00:08 (95%) A-V: 0.004 DS: 0.890/35 Dropped: 123 AV: 00:00:07 / 00:00:08 (96%) A-V: 0.004 DS: 0.890/35 Dropped: 123 AV: 00:00:07 / 00:00:08 (96%) A-V: 0.004 DS: 0.890/35 Dropped: 123 AV: 00:00:07 / 00:00:08 (96%) A-V: 0.004 DS: 0.890/35 Dropped: 123 AV: 00:00:07 / 00:00:08 (96%) A-V: 0.004 DS: 0.890/36 Dropped: 123 AV: 00:00:07 / 00:00:08 (96%) A-V: 0.000 DS: 0.900/36 Dropped: 123 AV: 00:00:07 / 00:00:08 (97%) A-V: 0.000 DS: 0.910/36 Dropped: 123 AV: 00:00:07 / 00:00:08 (97%) A-V: 0.000 DS: 0.920/36 Dropped: 123 AV: 00:00:07 / 00:00:08 (97%) A-V: 0.000 DS: 0.920/36 Dropped: 123 AV: 00:00:07 / 00:00:08 (97%) A-V: 0.000 DS: 0.920/36 Dropped: 123 AV: 00:00:07 / 00:00:08 (98%) A-V: 0.000 DS: 0.920/36 Dropped: 123 AV: 00:00:07 / 00:00:08 (98%) A-V: 0.000 DS: 0.920/36 Dropped: 123 AV: 00:00:07 / 00:00:08 (98%) A-V: 0.000 DS: 0.920/36 Dropped: 123 AV: 00:00:07 / 00:00:08 (98%) A-V: 0.000 DS: 0.920/37 Dropped: 123 AV: 00:00:07 / 00:00:08 (98%) A-V: 0.000 DS: 0.920/37 Dropped: 123 AV: 00:00:07 / 00:00:08 (99%) A-V: 0.000 DS: 0.920/37 Dropped: 123 AV: 00:00:07 / 00:00:08 (99%) A-V: 0.000 DS: 0.930/37 Dropped: 123 AV: 00:00:07 / 00:00:08 (99%) A-V: 0.000 DS: 0.940/37 Dropped: 123 Exiting... (End of file)