Number of devices: 1 GPU: AMD RADV POLARIS10 (LLVM 9.0.0) --> /home/hakzsam/programming/vkpipeline-db-private/no_man_sky/640.pipeline_test shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var shader_in INTERP_MODE_NONE vec4 mkLocalPositionVec4 (VERT_ATTRIB_GENERIC0, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 .field0 (VARYING_SLOT_POS, 0, 0) decl_var shader_out INTERP_MODE_NONE float Out.field1 (VARYING_SLOT_VAR0.x, 0, 0) decl_var shader_out INTERP_MODE_NONE float Out.field1@0 (VARYING_SLOT_VAR0.y, 0, 0) decl_var shader_out INTERP_MODE_NONE float Out.field1@1 (VARYING_SLOT_VAR0.z, 0, 0) decl_var shader_out INTERP_MODE_NONE float Out.field3 (VARYING_SLOT_VAR0.w, 0, 0) decl_var shader_out INTERP_MODE_NONE float Out.field3@2 (VARYING_SLOT_VAR1.x, 0, 0) decl_var shader_out INTERP_MODE_NONE float Out.field3@3 (VARYING_SLOT_VAR1.y, 0, 0) decl_function main (0 params) impl main { decl_var INTERP_MODE_NONE vec4 out@Out.field3-temp decl_var INTERP_MODE_NONE vec4 out@.field0-temp decl_var INTERP_MODE_NONE vec4 in@mkLocalPositionVec4-temp decl_var INTERP_MODE_NONE vec4 out@Out.field2-temp decl_var INTERP_MODE_NONE vec4 out@Out.field1-temp decl_var INTERP_MODE_NONE float Out.field2 decl_var INTERP_MODE_NONE float Out.field2@4 decl_var INTERP_MODE_NONE float Out.field2@5 decl_var INTERP_MODE_NONE float Out.field1@6 decl_var INTERP_MODE_NONE float Out.field3@7 decl_var INTERP_MODE_NONE float Out.field2@8 block block_0: /* preds: */ vec1 32 ssa_0 = deref_var &mkLocalPositionVec4 (shader_in vec4) vec4 32 ssa_1 = intrinsic load_deref (ssa_0) (0) /* access=0 */ vec1 32 ssa_2 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_3 = intrinsic vulkan_resource_index (ssa_2) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_4 = load_const (0x00000460 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x00000470 /* 0.000000 */) vec1 32 ssa_6 = load_const (0x00000480 /* 0.000000 */) vec1 32 ssa_7 = load_const (0x00000490 /* 0.000000 */) vec4 32 ssa_8 = intrinsic load_ubo (ssa_3, ssa_4) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_9 = fmul ssa_8.x, ssa_1.x vec1 32 ssa_10 = fmul ssa_8.y, ssa_1.x vec1 32 ssa_11 = fmul ssa_8.z, ssa_1.x vec1 32 ssa_12 = fmul ssa_8.w, ssa_1.x vec4 32 ssa_13 = intrinsic load_ubo (ssa_3, ssa_5) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_14 = fmul ssa_13.x, ssa_1.y vec1 32 ssa_15 = fmul ssa_13.y, ssa_1.y vec1 32 ssa_16 = fmul ssa_13.z, ssa_1.y vec1 32 ssa_17 = fmul ssa_13.w, ssa_1.y vec1 32 ssa_18 = fadd ssa_9, ssa_14 vec1 32 ssa_19 = fadd ssa_10, ssa_15 vec1 32 ssa_20 = fadd ssa_11, ssa_16 vec1 32 ssa_21 = fadd ssa_12, ssa_17 vec4 32 ssa_22 = intrinsic load_ubo (ssa_3, ssa_6) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_23 = fmul ssa_22.x, ssa_1.z vec1 32 ssa_24 = fmul ssa_22.y, ssa_1.z vec1 32 ssa_25 = fmul ssa_22.z, ssa_1.z vec1 32 ssa_26 = fmul ssa_22.w, ssa_1.z vec1 32 ssa_27 = fadd ssa_18, ssa_23 vec1 32 ssa_28 = fadd ssa_19, ssa_24 vec1 32 ssa_29 = fadd ssa_20, ssa_25 vec1 32 ssa_30 = fadd ssa_21, ssa_26 vec4 32 ssa_31 = intrinsic load_ubo (ssa_3, ssa_7) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_32 = fadd ssa_27, ssa_31.x vec1 32 ssa_33 = fadd ssa_28, ssa_31.y vec1 32 ssa_34 = fadd ssa_29, ssa_31.z vec1 32 ssa_35 = fadd ssa_30, ssa_31.w vec1 32 ssa_36 = load_const (0x000004a0 /* 0.000000 */) vec1 32 ssa_37 = load_const (0x000004b0 /* 0.000000 */) vec1 32 ssa_38 = load_const (0x000004c0 /* 0.000000 */) vec1 32 ssa_39 = load_const (0x000004d0 /* 0.000000 */) vec4 32 ssa_40 = intrinsic load_ubo (ssa_3, ssa_36) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_41 = fmul ssa_40.x, ssa_32 vec1 32 ssa_42 = fmul ssa_40.y, ssa_32 vec1 32 ssa_43 = fmul ssa_40.z, ssa_32 vec1 32 ssa_44 = fmul ssa_40.w, ssa_32 vec4 32 ssa_45 = intrinsic load_ubo (ssa_3, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_46 = fmul ssa_45.x, ssa_33 vec1 32 ssa_47 = fmul ssa_45.y, ssa_33 vec1 32 ssa_48 = fmul ssa_45.z, ssa_33 vec1 32 ssa_49 = fmul ssa_45.w, ssa_33 vec1 32 ssa_50 = fadd ssa_41, ssa_46 vec1 32 ssa_51 = fadd ssa_42, ssa_47 vec1 32 ssa_52 = fadd ssa_43, ssa_48 vec1 32 ssa_53 = fadd ssa_44, ssa_49 vec4 32 ssa_54 = intrinsic load_ubo (ssa_3, ssa_38) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_55 = fmul ssa_54.x, ssa_34 vec1 32 ssa_56 = fmul ssa_54.y, ssa_34 vec1 32 ssa_57 = fmul ssa_54.z, ssa_34 vec1 32 ssa_58 = fmul ssa_54.w, ssa_34 vec1 32 ssa_59 = fadd ssa_50, ssa_55 vec1 32 ssa_60 = fadd ssa_51, ssa_56 vec1 32 ssa_61 = fadd ssa_52, ssa_57 vec1 32 ssa_62 = fadd ssa_53, ssa_58 vec4 32 ssa_63 = intrinsic load_ubo (ssa_3, ssa_39) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_64 = fmul ssa_63.x, ssa_35 vec1 32 ssa_65 = fmul ssa_63.y, ssa_35 vec1 32 ssa_66 = fmul ssa_63.z, ssa_35 vec1 32 ssa_67 = fmul ssa_63.w, ssa_35 vec1 32 ssa_68 = fadd ssa_59, ssa_64 vec1 32 ssa_69 = fadd ssa_60, ssa_65 vec1 32 ssa_70 = fadd ssa_61, ssa_66 vec1 32 ssa_71 = fadd ssa_62, ssa_67 vec1 32 ssa_72 = deref_var &Out.field1 (shader_out float) intrinsic store_deref (ssa_72, ssa_32) (1, 0) /* wrmask=x */ /* access=0 */ vec1 32 ssa_73 = deref_var &Out.field1@0 (shader_out float) intrinsic store_deref (ssa_73, ssa_33) (1, 0) /* wrmask=x */ /* access=0 */ vec1 32 ssa_74 = deref_var &Out.field1@1 (shader_out float) intrinsic store_deref (ssa_74, ssa_34) (1, 0) /* wrmask=x */ /* access=0 */ vec4 32 ssa_75 = vec4 ssa_68, ssa_69, ssa_70, ssa_71 vec1 32 ssa_76 = deref_var &Out.field3 (shader_out float) intrinsic store_deref (ssa_76, ssa_68) (1, 0) /* wrmask=x */ /* access=0 */ vec1 32 ssa_77 = deref_var &Out.field3@2 (shader_out float) intrinsic store_deref (ssa_77, ssa_69) (1, 0) /* wrmask=x */ /* access=0 */ vec1 32 ssa_78 = deref_var &Out.field3@3 (shader_out float) intrinsic store_deref (ssa_78, ssa_71) (1, 0) /* wrmask=x */ /* access=0 */ vec1 32 ssa_79 = deref_var &.field0 (shader_out vec4) intrinsic store_deref (ssa_79, ssa_75) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_1 */ block block_1: } shader: MESA_SHADER_FRAGMENT inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D gBufferMap (429, 0, 0) decl_var shader_in INTERP_MODE_NONE float In.field1 (VARYING_SLOT_VAR0.x, 0, 0) decl_var shader_in INTERP_MODE_NONE float In.field1@0 (VARYING_SLOT_VAR0.y, 0, 0) decl_var shader_in INTERP_MODE_NONE float In.field1@1 (VARYING_SLOT_VAR0.z, 0, 0) decl_var shader_in INTERP_MODE_NONE float In.field3 (VARYING_SLOT_VAR0.w, 0, 0) decl_var shader_in INTERP_MODE_NONE float In.field3@2 (VARYING_SLOT_VAR1.x, 0, 0) decl_var shader_in INTERP_MODE_NONE float In.field3@3 (VARYING_SLOT_VAR1.y, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 out_color0 (FRAG_RESULT_DATA0, 0, 0) decl_function main (0 params) impl main { decl_var INTERP_MODE_NONE vec4 out@out_color0-temp block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_2 = load_const (0x3f8a3d71 /* 1.080000 */) vec1 32 ssa_3 = load_const (0x3f666666 /* 0.900000 */) vec1 32 ssa_4 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x3f733333 /* 0.950000 */) vec1 32 ssa_6 = load_const (0x3f400000 /* 0.750000 */) vec1 32 ssa_7 = load_const (0x3727c5ac /* 0.000010 */) vec1 32 ssa_8 = load_const (0x3e99999a /* 0.300000 */) vec1 32 ssa_9 = load_const (0x41980000 /* 19.000000 */) vec1 32 ssa_10 = load_const (0x41f80000 /* 31.000000 */) vec1 32 ssa_11 = load_const (0x41500000 /* 13.000000 */) vec1 32 ssa_12 = load_const (0x41b80000 /* 23.000000 */) vec1 32 ssa_13 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_14 = load_const (0x3e800000 /* 0.250000 */) vec1 32 ssa_15 = load_const (0x3951b717 /* 0.000200 */) vec1 32 ssa_16 = load_const (0x3fb6db6e /* 1.428571 */) vec1 32 ssa_17 = load_const (0x365fb23b /* 0.000003 */) vec1 32 ssa_18 = load_const (0x42140000 /* 37.000000 */) vec1 32 ssa_19 = load_const (0x41880000 /* 17.000000 */) vec1 32 ssa_20 = load_const (0x3f19999a /* 0.600000 */) vec1 32 ssa_21 = load_const (0x422c0000 /* 43.000000 */) vec1 32 ssa_22 = load_const (0x40e00000 /* 7.000000 */) vec1 32 ssa_23 = load_const (0x41a00000 /* 20.000000 */) vec1 32 ssa_24 = load_const (0x41300000 /* 11.000000 */) vec1 32 ssa_25 = load_const (0x42c80000 /* 100.000000 */) vec1 32 ssa_26 = load_const (0x358637bd /* 0.000001 */) vec1 32 ssa_27 = load_const (0x00000014 /* 0.000000 */) vec1 32 ssa_28 = load_const (0x3f7fbe77 /* 0.999000 */) vec1 32 ssa_29 = load_const (0x40000000 /* 2.000000 */) vec1 32 ssa_30 = load_const (0x3a83126f /* 0.001000 */) vec1 32 ssa_31 = load_const (0x3fb33333 /* 1.400000 */) vec1 32 ssa_32 = load_const (0x3f99999a /* 1.200000 */) vec1 32 ssa_33 = load_const (0x3fe66666 /* 1.800000 */) vec1 32 ssa_34 = intrinsic vulkan_resource_index (ssa_0) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_35 = load_const (0x000003c0 /* 0.000000 */) vec1 32 ssa_36 = intrinsic load_ubo (ssa_34, ssa_35) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_37 = load_const (0x000003c4 /* 0.000000 */) vec1 32 ssa_38 = intrinsic load_ubo (ssa_34, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_39 = load_const (0x000003c8 /* 0.000000 */) vec1 32 ssa_40 = load_const (0x000003cc /* 0.000000 */) vec1 32 ssa_41 = intrinsic load_ubo (ssa_34, ssa_40) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_42 = load_const (0x00000450 /* 0.000000 */) vec1 32 ssa_43 = load_const (0x0000045c /* 0.000000 */) vec1 32 ssa_44 = intrinsic load_ubo (ssa_34, ssa_43) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_45 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_46 = load_const (0x00000250 /* 0.000000 */) vec4 32 ssa_47 = intrinsic load_ubo (ssa_34, ssa_46) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_48 = fmul ssa_47.x, ssa_47.x vec1 32 ssa_49 = fmul ssa_47.y, ssa_47.y vec1 32 ssa_50 = fadd ssa_48, ssa_49 vec1 32 ssa_51 = fmul ssa_47.z, ssa_47.z vec1 32 ssa_52 = fadd ssa_50, ssa_51 vec1 32 ssa_53 = frsq ssa_52 vec1 32 ssa_54 = fmul ssa_47.x, ssa_53 vec1 32 ssa_55 = fmul ssa_47.y, ssa_53 vec1 32 ssa_56 = fmul ssa_47.z, ssa_53 vec1 32 ssa_57 = load_const (0x00000470 /* 0.000000 */) vec4 32 ssa_58 = intrinsic load_ubo (ssa_34, ssa_57) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_59 = fmul ssa_58.x, ssa_58.x vec1 32 ssa_60 = fmul ssa_58.y, ssa_58.y vec1 32 ssa_61 = fadd ssa_59, ssa_60 vec1 32 ssa_62 = fmul ssa_58.z, ssa_58.z vec1 32 ssa_63 = fadd ssa_61, ssa_62 vec1 32 ssa_64 = frsq ssa_63 vec1 32 ssa_65 = fmul ssa_58.x, ssa_64 vec1 32 ssa_66 = fmul ssa_58.y, ssa_64 vec1 32 ssa_67 = fmul ssa_58.z, ssa_64 vec1 32 ssa_68 = deref_var &In.field1 (shader_in float) vec1 32 ssa_69 = intrinsic load_deref (ssa_68) (0) /* access=0 */ vec1 32 ssa_70 = deref_var &In.field1@0 (shader_in float) vec1 32 ssa_71 = intrinsic load_deref (ssa_70) (0) /* access=0 */ vec1 32 ssa_72 = deref_var &In.field1@1 (shader_in float) vec1 32 ssa_73 = intrinsic load_deref (ssa_72) (0) /* access=0 */ vec3 32 ssa_74 = intrinsic load_ubo (ssa_34, ssa_45) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_75 = fneg ssa_74.x vec1 32 ssa_76 = fadd ssa_69, ssa_75 vec1 32 ssa_77 = fneg ssa_74.y vec1 32 ssa_78 = fadd ssa_71, ssa_77 vec1 32 ssa_79 = fneg ssa_74.z vec1 32 ssa_80 = fadd ssa_73, ssa_79 vec1 32 ssa_81 = fmul ssa_76, ssa_76 vec1 32 ssa_82 = fmul ssa_78, ssa_78 vec1 32 ssa_83 = fadd ssa_81, ssa_82 vec1 32 ssa_84 = fmul ssa_80, ssa_80 vec1 32 ssa_85 = fadd ssa_83, ssa_84 vec1 32 ssa_86 = frsq ssa_85 vec1 32 ssa_87 = fmul ssa_76, ssa_86 vec1 32 ssa_88 = fmul ssa_78, ssa_86 vec1 32 ssa_89 = fmul ssa_80, ssa_86 vec1 32 ssa_90 = fmul ssa_65, ssa_87 vec1 32 ssa_91 = fmul ssa_66, ssa_88 vec1 32 ssa_92 = fadd ssa_90, ssa_91 vec1 32 ssa_93 = fmul ssa_67, ssa_89 vec1 32 ssa_94 = fadd ssa_92, ssa_93 vec1 32 ssa_95 = flt32 ssa_94, ssa_0 vec1 32 ssa_96 = fneg ssa_65 vec1 32 ssa_97 = fneg ssa_66 vec1 32 ssa_98 = fneg ssa_67 vec1 32 ssa_99 = fneg ssa_94 vec1 32 ssa_100 = b32csel ssa_95, ssa_96, ssa_65 vec1 32 ssa_101 = b32csel ssa_95, ssa_97, ssa_66 vec1 32 ssa_102 = b32csel ssa_95, ssa_98, ssa_67 vec1 32 ssa_103 = b32csel ssa_95, ssa_99, ssa_94 vec1 32 ssa_104 = feq32 ssa_103, ssa_0 intrinsic discard_if (ssa_104) () vec4 32 ssa_105 = intrinsic load_ubo (ssa_34, ssa_42) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_106 = fneg ssa_105.x vec1 32 ssa_107 = fadd ssa_74.x, ssa_106 vec1 32 ssa_108 = fneg ssa_105.y vec1 32 ssa_109 = fadd ssa_74.y, ssa_108 vec1 32 ssa_110 = fneg ssa_105.z vec1 32 ssa_111 = fadd ssa_74.z, ssa_110 vec1 32 ssa_112 = fmul ssa_107, ssa_100 vec1 32 ssa_113 = fmul ssa_109, ssa_101 vec1 32 ssa_114 = fadd ssa_112, ssa_113 vec1 32 ssa_115 = fmul ssa_111, ssa_102 vec1 32 ssa_116 = fadd ssa_114, ssa_115 vec1 32 ssa_117 = intrinsic load_ubo (ssa_34, ssa_39) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_118 = fmul ssa_100, ssa_117 vec1 32 ssa_119 = fmul ssa_101, ssa_117 vec1 32 ssa_120 = fmul ssa_102, ssa_117 vec1 32 ssa_121 = fneg ssa_118 vec1 32 ssa_122 = fadd ssa_105.x, ssa_121 vec1 32 ssa_123 = fneg ssa_119 vec1 32 ssa_124 = fadd ssa_105.y, ssa_123 vec1 32 ssa_125 = fneg ssa_120 vec1 32 ssa_126 = fadd ssa_105.z, ssa_125 vec1 32 ssa_127 = fneg ssa_122 vec1 32 ssa_128 = fadd ssa_74.x, ssa_127 vec1 32 ssa_129 = fneg ssa_124 vec1 32 ssa_130 = fadd ssa_74.y, ssa_129 vec1 32 ssa_131 = fneg ssa_126 vec1 32 ssa_132 = fadd ssa_74.z, ssa_131 vec1 32 ssa_133 = fmul ssa_100, ssa_128 vec1 32 ssa_134 = fmul ssa_101, ssa_130 vec1 32 ssa_135 = fadd ssa_133, ssa_134 vec1 32 ssa_136 = fmul ssa_102, ssa_132 vec1 32 ssa_137 = fadd ssa_135, ssa_136 vec1 32 ssa_138 = frcp ssa_103 vec1 32 ssa_139 = fmul ssa_137, ssa_138 vec1 32 ssa_140 = fneg ssa_139 vec1 32 ssa_141 = flt32 ssa_0, ssa_140 /* succs: block_1 block_2 */ if ssa_141 { block block_1: /* preds: block_0 */ vec1 32 ssa_142 = fmul ssa_139, ssa_87 vec1 32 ssa_143 = fneg ssa_142 vec1 32 ssa_144 = fmul ssa_139, ssa_88 vec1 32 ssa_145 = fneg ssa_144 vec1 32 ssa_146 = fmul ssa_139, ssa_89 vec1 32 ssa_147 = fneg ssa_146 vec1 32 ssa_148 = fadd ssa_128, ssa_143 vec1 32 ssa_149 = fadd ssa_130, ssa_145 vec1 32 ssa_150 = fadd ssa_132, ssa_147 vec1 32 ssa_151 = fadd ssa_148, ssa_122 vec1 32 ssa_152 = fadd ssa_149, ssa_124 vec1 32 ssa_153 = fadd ssa_150, ssa_126 vec1 32 ssa_154 = fadd ssa_151, ssa_75 vec1 32 ssa_155 = fadd ssa_152, ssa_77 vec1 32 ssa_156 = fadd ssa_153, ssa_79 vec1 32 ssa_157 = fmul ssa_154, ssa_154 vec1 32 ssa_158 = fmul ssa_155, ssa_155 vec1 32 ssa_159 = fadd ssa_157, ssa_158 vec1 32 ssa_160 = fmul ssa_156, ssa_156 vec1 32 ssa_161 = fadd ssa_159, ssa_160 vec1 32 ssa_162 = fsqrt ssa_161 /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ vec1 32 ssa_163 = phi block_1: ssa_162, block_2: ssa_0 vec1 32 ssa_164 = fabs ssa_116 vec1 32 ssa_165 = flt32 ssa_164, ssa_117 /* succs: block_4 block_5 */ if ssa_165 { block block_4: /* preds: block_3 */ vec1 32 ssa_166 = fmul ssa_107, ssa_107 vec1 32 ssa_167 = fmul ssa_109, ssa_109 vec1 32 ssa_168 = fadd ssa_166, ssa_167 vec1 32 ssa_169 = fmul ssa_111, ssa_111 vec1 32 ssa_170 = fadd ssa_168, ssa_169 vec1 32 ssa_171 = fsqrt ssa_170 vec1 32 ssa_172 = fneg ssa_171 vec1 32 ssa_173 = fadd ssa_36, ssa_172 vec1 32 ssa_174 = fmax ssa_0, ssa_173 /* succs: block_9 */ } else { block block_5: /* preds: block_3 */ vec1 32 ssa_175 = fge32 ssa_0, ssa_140 intrinsic discard_if (ssa_175) () vec1 32 ssa_176 = fadd ssa_105.x, ssa_118 vec1 32 ssa_177 = fadd ssa_105.y, ssa_119 vec1 32 ssa_178 = fadd ssa_105.z, ssa_120 vec1 32 ssa_179 = fneg ssa_176 vec1 32 ssa_180 = fadd ssa_74.x, ssa_179 vec1 32 ssa_181 = fneg ssa_177 vec1 32 ssa_182 = fadd ssa_74.y, ssa_181 vec1 32 ssa_183 = fneg ssa_178 vec1 32 ssa_184 = fadd ssa_74.z, ssa_183 vec1 32 ssa_185 = fmul ssa_100, ssa_180 vec1 32 ssa_186 = fmul ssa_101, ssa_182 vec1 32 ssa_187 = fadd ssa_185, ssa_186 vec1 32 ssa_188 = fmul ssa_102, ssa_184 vec1 32 ssa_189 = fadd ssa_187, ssa_188 vec1 32 ssa_190 = fmul ssa_189, ssa_138 vec1 32 ssa_191 = fneg ssa_190 vec1 32 ssa_192 = flt32 ssa_0, ssa_191 /* succs: block_6 block_7 */ if ssa_192 { block block_6: /* preds: block_5 */ vec1 32 ssa_193 = fmul ssa_139, ssa_87 vec1 32 ssa_194 = fneg ssa_193 vec1 32 ssa_195 = fmul ssa_139, ssa_88 vec1 32 ssa_196 = fneg ssa_195 vec1 32 ssa_197 = fmul ssa_139, ssa_89 vec1 32 ssa_198 = fneg ssa_197 vec1 32 ssa_199 = fadd ssa_180, ssa_194 vec1 32 ssa_200 = fadd ssa_182, ssa_196 vec1 32 ssa_201 = fadd ssa_184, ssa_198 vec1 32 ssa_202 = fmul ssa_199, ssa_199 vec1 32 ssa_203 = fmul ssa_200, ssa_200 vec1 32 ssa_204 = fadd ssa_202, ssa_203 vec1 32 ssa_205 = fmul ssa_201, ssa_201 vec1 32 ssa_206 = fadd ssa_204, ssa_205 vec1 32 ssa_207 = fsqrt ssa_206 vec1 32 ssa_208 = fmul ssa_190, ssa_87 vec1 32 ssa_209 = fneg ssa_208 vec1 32 ssa_210 = fmul ssa_190, ssa_88 vec1 32 ssa_211 = fneg ssa_210 vec1 32 ssa_212 = fmul ssa_190, ssa_89 vec1 32 ssa_213 = fneg ssa_212 vec1 32 ssa_214 = fadd ssa_180, ssa_209 vec1 32 ssa_215 = fadd ssa_182, ssa_211 vec1 32 ssa_216 = fadd ssa_184, ssa_213 vec1 32 ssa_217 = fmul ssa_214, ssa_214 vec1 32 ssa_218 = fmul ssa_215, ssa_215 vec1 32 ssa_219 = fadd ssa_217, ssa_218 vec1 32 ssa_220 = fmul ssa_216, ssa_216 vec1 32 ssa_221 = fadd ssa_219, ssa_220 vec1 32 ssa_222 = fsqrt ssa_221 vec1 32 ssa_223 = fmin ssa_207, ssa_222 vec1 32 ssa_224 = flt32 ssa_38, ssa_223 vec1 32 ssa_225 = fmax ssa_207, ssa_222 vec1 32 ssa_226 = flt32 ssa_225, ssa_36 vec1 32 ssa_227 = ior ssa_224, ssa_226 intrinsic discard_if (ssa_227) () /* succs: block_8 */ } else { block block_7: /* preds: block_5 */ intrinsic discard () () /* succs: block_8 */ } block block_8: /* preds: block_6 block_7 */ /* succs: block_9 */ } block block_9: /* preds: block_4 block_8 */ vec1 32 ssa_228 = phi block_4: ssa_174, block_8: ssa_163 vec1 32 ssa_229 = fmul ssa_87, ssa_228 vec1 32 ssa_230 = fmul ssa_88, ssa_228 vec1 32 ssa_231 = fmul ssa_89, ssa_228 vec1 32 ssa_232 = fadd ssa_74.x, ssa_229 vec1 32 ssa_233 = fadd ssa_74.y, ssa_230 vec1 32 ssa_234 = fadd ssa_74.z, ssa_231 vec1 32 ssa_235 = deref_var &In.field3 (shader_in float) vec1 32 ssa_236 = intrinsic load_deref (ssa_235) (0) /* access=0 */ vec1 32 ssa_237 = deref_var &In.field3@2 (shader_in float) vec1 32 ssa_238 = intrinsic load_deref (ssa_237) (0) /* access=0 */ vec1 32 ssa_239 = deref_var &In.field3@3 (shader_in float) vec1 32 ssa_240 = intrinsic load_deref (ssa_239) (0) /* access=0 */ vec1 32 ssa_241 = frcp ssa_240 vec1 32 ssa_242 = fmul ssa_236, ssa_13 vec1 32 ssa_243 = fmul ssa_242, ssa_241 vec1 32 ssa_244 = fmul ssa_238, ssa_13 vec1 32 ssa_245 = fmul ssa_244, ssa_241 vec1 32 ssa_246 = fadd ssa_243, ssa_13 vec1 32 ssa_247 = fneg ssa_245 vec1 32 ssa_248 = fadd ssa_13, ssa_247 vec2 32 ssa_249 = vec2 ssa_246, ssa_248 vec1 32 ssa_250 = deref_var &gBufferMap (uniform sampler2D) vec4 32 ssa_251 = tex ssa_250 (texture_deref), ssa_250 (sampler_deref), ssa_249 (coord), vec1 32 ssa_252 = load_const (0x00000040 /* 0.000000 */) vec4 32 ssa_253 = intrinsic load_ubo (ssa_34, ssa_252) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_254 = fmul ssa_251.x, ssa_253.y vec1 32 ssa_255 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_256 = intrinsic load_ubo (ssa_34, ssa_255) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_257 = fmul ssa_256.x, ssa_256.x vec1 32 ssa_258 = fmul ssa_256.y, ssa_256.y vec1 32 ssa_259 = fadd ssa_257, ssa_258 vec1 32 ssa_260 = fmul ssa_256.z, ssa_256.z vec1 32 ssa_261 = fadd ssa_259, ssa_260 vec1 32 ssa_262 = frsq ssa_261 vec1 32 ssa_263 = fmul ssa_256.x, ssa_262 vec1 32 ssa_264 = fmul ssa_256.y, ssa_262 vec1 32 ssa_265 = fmul ssa_256.z, ssa_262 vec1 32 ssa_266 = fmul ssa_263, ssa_87 vec1 32 ssa_267 = fneg ssa_266 vec1 32 ssa_268 = fmul ssa_264, ssa_88 vec1 32 ssa_269 = fneg ssa_268 vec1 32 ssa_270 = fadd ssa_267, ssa_269 vec1 32 ssa_271 = fmul ssa_265, ssa_89 vec1 32 ssa_272 = fneg ssa_271 vec1 32 ssa_273 = fadd ssa_270, ssa_272 vec1 32 ssa_274 = fmax ssa_273, ssa_7 vec1 32 ssa_275 = frcp ssa_274 vec1 32 ssa_276 = fmul ssa_254, ssa_275 vec1 32 ssa_277 = fmul ssa_117, ssa_6 vec1 32 ssa_278 = load_const (0xbf4ccccc /* -0.800000 */) vec1 32 ssa_279 = fmul ssa_41, ssa_278 vec1 32 ssa_280 = fadd ssa_279, ssa_33 vec1 32 ssa_281 = load_const (0x40800000 /* 4.000000 */) vec1 32 ssa_282 = fmul ssa_41, ssa_281 vec1 32 ssa_283 = fadd ssa_282, ssa_1 vec1 32 ssa_284 = load_const (0xbe800000 /* -0.250000 */) vec1 32 ssa_285 = fmul ssa_284, ssa_41 vec1 32 ssa_286 = fadd ssa_285, ssa_14 vec1 32 ssa_287 = fmul ssa_107, ssa_107 vec1 32 ssa_288 = fmul ssa_109, ssa_109 vec1 32 ssa_289 = fadd ssa_287, ssa_288 vec1 32 ssa_290 = fmul ssa_111, ssa_111 vec1 32 ssa_291 = fadd ssa_289, ssa_290 vec1 32 ssa_292 = fsqrt ssa_291 vec1 32 ssa_293 = fmul ssa_38, ssa_32 vec1 32 ssa_294 = fadd ssa_44, ssa_293 vec1 32 ssa_295 = fmul ssa_38, ssa_31 vec1 32 ssa_296 = fadd ssa_44, ssa_295 vec1 32 ssa_297 = fneg ssa_294 vec1 32 ssa_298 = fadd ssa_292, ssa_297 vec1 32 ssa_299 = fadd ssa_296, ssa_297 vec1 32 ssa_300 = frcp ssa_299 vec1 32 ssa_301 = fmul ssa_298, ssa_300 vec1 32 ssa_302 = fmax ssa_301, ssa_0 vec1 32 ssa_303 = fmin ssa_302, ssa_1 vec1 32 ssa_304 = fneg ssa_41 vec1 32 ssa_305 = fadd ssa_1, ssa_304 vec1 32 ssa_306 = fmax ssa_303, ssa_305 vec1 32 ssa_307 = flt32 ssa_30, ssa_306 /* succs: block_10 block_14 */ if ssa_307 { block block_10: /* preds: block_9 */ vec1 32 ssa_308 = fmul ssa_116, ssa_138 vec1 32 ssa_309 = fneg ssa_308 vec1 32 ssa_310 = flt32 ssa_0, ssa_309 /* succs: block_11 block_12 */ if ssa_310 { block block_11: /* preds: block_10 */ vec1 32 ssa_311 = fmul ssa_308, ssa_87 vec1 32 ssa_312 = fneg ssa_311 vec1 32 ssa_313 = fmul ssa_308, ssa_88 vec1 32 ssa_314 = fneg ssa_313 vec1 32 ssa_315 = fmul ssa_308, ssa_89 vec1 32 ssa_316 = fneg ssa_315 vec1 32 ssa_317 = fadd ssa_107, ssa_312 vec1 32 ssa_318 = fadd ssa_109, ssa_314 vec1 32 ssa_319 = fadd ssa_111, ssa_316 vec1 32 ssa_320 = fadd ssa_317, ssa_105.x vec1 32 ssa_321 = fadd ssa_318, ssa_105.y vec1 32 ssa_322 = fadd ssa_319, ssa_105.z /* succs: block_13 */ } else { block block_12: /* preds: block_10 */ /* succs: block_13 */ } block block_13: /* preds: block_11 block_12 */ vec1 32 ssa_323 = phi block_11: ssa_320, block_12: ssa_232 vec1 32 ssa_324 = phi block_11: ssa_321, block_12: ssa_233 vec1 32 ssa_325 = phi block_11: ssa_322, block_12: ssa_234 vec1 32 ssa_326 = fadd ssa_323, ssa_106 vec1 32 ssa_327 = fadd ssa_324, ssa_108 vec1 32 ssa_328 = fadd ssa_325, ssa_110 vec1 32 ssa_329 = fmul ssa_326, ssa_100 vec1 32 ssa_330 = fmul ssa_327, ssa_101 vec1 32 ssa_331 = fadd ssa_329, ssa_330 vec1 32 ssa_332 = fmul ssa_328, ssa_102 vec1 32 ssa_333 = fadd ssa_331, ssa_332 vec1 32 ssa_334 = fmul ssa_100, ssa_333 vec1 32 ssa_335 = fmul ssa_101, ssa_333 vec1 32 ssa_336 = fmul ssa_102, ssa_333 vec1 32 ssa_337 = fneg ssa_334 vec1 32 ssa_338 = fadd ssa_323, ssa_337 vec1 32 ssa_339 = fneg ssa_335 vec1 32 ssa_340 = fadd ssa_324, ssa_339 vec1 32 ssa_341 = fneg ssa_336 vec1 32 ssa_342 = fadd ssa_325, ssa_341 vec1 32 ssa_343 = fadd ssa_338, ssa_106 vec1 32 ssa_344 = fadd ssa_340, ssa_108 vec1 32 ssa_345 = fadd ssa_342, ssa_110 vec1 32 ssa_346 = fmul ssa_343, ssa_343 vec1 32 ssa_347 = fmul ssa_344, ssa_344 vec1 32 ssa_348 = fadd ssa_346, ssa_347 vec1 32 ssa_349 = fmul ssa_345, ssa_345 vec1 32 ssa_350 = fadd ssa_348, ssa_349 vec1 32 ssa_351 = fsqrt ssa_350 vec1 32 ssa_352 = fneg ssa_36 vec1 32 ssa_353 = fadd ssa_38, ssa_352 vec1 32 ssa_354 = fmul ssa_353, ssa_13 vec1 32 ssa_355 = fadd ssa_354, ssa_36 vec1 32 ssa_356 = frsq ssa_350 vec1 32 ssa_357 = fmul ssa_343, ssa_356 vec1 32 ssa_358 = fmul ssa_344, ssa_356 vec1 32 ssa_359 = fmul ssa_345, ssa_356 vec1 32 ssa_360 = fmul ssa_357, ssa_355 vec1 32 ssa_361 = fmul ssa_358, ssa_355 vec1 32 ssa_362 = fmul ssa_359, ssa_355 vec1 32 ssa_363 = fadd ssa_360, ssa_105.x vec1 32 ssa_364 = fadd ssa_361, ssa_105.y vec1 32 ssa_365 = fadd ssa_362, ssa_105.z vec1 32 ssa_366 = fneg ssa_363 vec1 32 ssa_367 = fadd ssa_323, ssa_366 vec1 32 ssa_368 = fneg ssa_364 vec1 32 ssa_369 = fadd ssa_324, ssa_368 vec1 32 ssa_370 = fneg ssa_365 vec1 32 ssa_371 = fadd ssa_325, ssa_370 vec1 32 ssa_372 = fmul ssa_367, ssa_367 vec1 32 ssa_373 = fmul ssa_369, ssa_369 vec1 32 ssa_374 = fadd ssa_372, ssa_373 vec1 32 ssa_375 = fmul ssa_371, ssa_371 vec1 32 ssa_376 = fadd ssa_374, ssa_375 vec1 32 ssa_377 = fsqrt ssa_376 vec1 32 ssa_378 = fneg ssa_323 vec1 32 ssa_379 = fadd ssa_74.x, ssa_378 vec1 32 ssa_380 = fneg ssa_324 vec1 32 ssa_381 = fadd ssa_74.y, ssa_380 vec1 32 ssa_382 = fneg ssa_325 vec1 32 ssa_383 = fadd ssa_74.z, ssa_382 vec1 32 ssa_384 = fmul ssa_379, ssa_379 vec1 32 ssa_385 = fmul ssa_381, ssa_381 vec1 32 ssa_386 = fadd ssa_384, ssa_385 vec1 32 ssa_387 = fmul ssa_383, ssa_383 vec1 32 ssa_388 = fadd ssa_386, ssa_387 vec1 32 ssa_389 = fsqrt ssa_388 vec1 32 ssa_390 = load_const (0x000003b0 /* 0.000000 */) vec1 32 ssa_391 = load_const (0x000003d0 /* 0.000000 */) vec1 32 ssa_392 = load_const (0x000003e0 /* 0.000000 */) vec1 32 ssa_393 = fmul ssa_351, ssa_26 vec4 32 ssa_394 = intrinsic load_ubo (ssa_34, ssa_390) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_395 = fadd ssa_393, ssa_394.w vec1 32 ssa_396 = fadd ssa_395, ssa_25 vec1 32 ssa_397 = fmul ssa_396, ssa_394.x vec1 32 ssa_398 = fmul ssa_397, ssa_24 vec1 32 ssa_399 = fmul ssa_397, ssa_11 vec1 32 ssa_400 = fcos ssa_399 vec1 32 ssa_401 = fadd ssa_398, ssa_400 vec1 32 ssa_402 = fmul ssa_397, ssa_10 vec1 32 ssa_403 = fsin ssa_402 vec1 32 ssa_404 = fadd ssa_401, ssa_403 vec1 32 ssa_405 = fsin ssa_404 vec1 32 ssa_406 = fmul ssa_405, ssa_13 vec1 32 ssa_407 = fadd ssa_406, ssa_13 vec4 32 ssa_408 = intrinsic load_ubo (ssa_34, ssa_391) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_409 = fmul ssa_408.x, ssa_407 vec1 32 ssa_410 = fmul ssa_408.y, ssa_407 vec1 32 ssa_411 = fmul ssa_408.z, ssa_407 vec1 32 ssa_412 = load_const (0x40400000 /* 3.000000 */) vec1 32 ssa_413 = load_const (0x3ecccccd /* 0.400000 */) vec1 32 ssa_414 = fadd ssa_413, ssa_406 vec1 32 ssa_415 = load_const (0x41200000 /* 10.000000 */) vec1 32 ssa_416 = fmul ssa_414, ssa_415 vec1 32 ssa_417 = fmax ssa_416, ssa_0 vec1 32 ssa_418 = fmin ssa_417, ssa_1 vec1 32 ssa_419 = fmul ssa_29, ssa_418 vec1 32 ssa_420 = fneg ssa_419 vec1 32 ssa_421 = fadd ssa_412, ssa_420 vec1 32 ssa_422 = fmul ssa_418, ssa_421 vec1 32 ssa_423 = fmul ssa_418, ssa_13 vec1 32 ssa_424 = fmul ssa_423, ssa_422 vec1 32 ssa_425 = fmul ssa_396, ssa_394.y vec1 32 ssa_426 = fadd ssa_425, ssa_23 vec1 32 ssa_427 = fmul ssa_426, ssa_19 vec1 32 ssa_428 = fmul ssa_426, ssa_22 vec1 32 ssa_429 = fcos ssa_428 vec1 32 ssa_430 = fadd ssa_427, ssa_429 vec1 32 ssa_431 = fmul ssa_426, ssa_21 vec1 32 ssa_432 = fsin ssa_431 vec1 32 ssa_433 = fadd ssa_430, ssa_432 vec1 32 ssa_434 = fsin ssa_433 vec1 32 ssa_435 = fmul ssa_434, ssa_13 vec1 32 ssa_436 = fadd ssa_435, ssa_13 vec4 32 ssa_437 = intrinsic load_ubo (ssa_34, ssa_392) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_438 = fmul ssa_437.x, ssa_436 vec1 32 ssa_439 = fmul ssa_437.y, ssa_436 vec1 32 ssa_440 = fmul ssa_437.z, ssa_436 vec1 32 ssa_441 = fadd ssa_409, ssa_438 vec1 32 ssa_442 = fadd ssa_410, ssa_439 vec1 32 ssa_443 = fadd ssa_411, ssa_440 vec1 32 ssa_444 = fadd ssa_413, ssa_435 vec1 32 ssa_445 = load_const (0x409fffff /* 5.000000 */) vec1 32 ssa_446 = fmul ssa_444, ssa_445 vec1 32 ssa_447 = fmax ssa_446, ssa_0 vec1 32 ssa_448 = fmin ssa_447, ssa_1 vec1 32 ssa_449 = fmul ssa_29, ssa_448 vec1 32 ssa_450 = fneg ssa_449 vec1 32 ssa_451 = fadd ssa_412, ssa_450 vec1 32 ssa_452 = fmul ssa_448, ssa_451 vec1 32 ssa_453 = fmul ssa_448, ssa_20 vec1 32 ssa_454 = fmul ssa_453, ssa_452 vec1 32 ssa_455 = fadd ssa_424, ssa_454 vec1 32 ssa_456 = fmul ssa_396, ssa_408.w vec1 32 ssa_457 = fmul ssa_456, ssa_9 vec1 32 ssa_458 = fmul ssa_456, ssa_19 vec1 32 ssa_459 = fcos ssa_458 vec1 32 ssa_460 = fadd ssa_457, ssa_459 vec1 32 ssa_461 = fsin ssa_460 vec1 32 ssa_462 = fmul ssa_456, ssa_18 vec1 32 ssa_463 = fmul ssa_456, ssa_12 vec1 32 ssa_464 = fsin ssa_463 vec1 32 ssa_465 = fadd ssa_462, ssa_464 vec1 32 ssa_466 = fsin ssa_465 vec1 32 ssa_467 = fmul ssa_461, ssa_13 vec1 32 ssa_468 = fmul ssa_467, ssa_466 vec1 32 ssa_469 = fadd ssa_468, ssa_13 vec1 32 ssa_470 = load_const (0xc8435000 /* -200000.000000 */) vec1 32 ssa_471 = fadd ssa_389, ssa_470 vec1 32 ssa_472 = fmul ssa_471, ssa_17 vec1 32 ssa_473 = fmax ssa_472, ssa_0 vec1 32 ssa_474 = fmin ssa_473, ssa_1 vec1 32 ssa_475 = fmul ssa_100, ssa_100 vec1 32 ssa_476 = fmul ssa_101, ssa_101 vec1 32 ssa_477 = fadd ssa_475, ssa_476 vec1 32 ssa_478 = fmul ssa_102, ssa_102 vec1 32 ssa_479 = fadd ssa_477, ssa_478 vec1 32 ssa_480 = frsq ssa_479 vec1 32 ssa_481 = fmul ssa_100, ssa_480 vec1 32 ssa_482 = fmul ssa_101, ssa_480 vec1 32 ssa_483 = fmul ssa_102, ssa_480 vec1 32 ssa_484 = frsq ssa_388 vec1 32 ssa_485 = fmul ssa_379, ssa_484 vec1 32 ssa_486 = fmul ssa_381, ssa_484 vec1 32 ssa_487 = fmul ssa_383, ssa_484 vec1 32 ssa_488 = fmul ssa_481, ssa_485 vec1 32 ssa_489 = fmul ssa_482, ssa_486 vec1 32 ssa_490 = fadd ssa_488, ssa_489 vec1 32 ssa_491 = fmul ssa_483, ssa_487 vec1 32 ssa_492 = fadd ssa_490, ssa_491 vec1 32 ssa_493 = fabs ssa_492 vec1 32 ssa_494 = load_const (0xbe99999a /* -0.300000 */) vec1 32 ssa_495 = fadd ssa_493, ssa_494 vec1 32 ssa_496 = fmul ssa_495, ssa_16 vec1 32 ssa_497 = fmax ssa_496, ssa_0 vec1 32 ssa_498 = fmin ssa_497, ssa_1 vec1 32 ssa_499 = load_const (0xc59c4000 /* -5000.000000 */) vec1 32 ssa_500 = fadd ssa_389, ssa_499 vec1 32 ssa_501 = fmul ssa_500, ssa_15 vec1 32 ssa_502 = fmax ssa_501, ssa_0 vec1 32 ssa_503 = fmin ssa_502, ssa_1 vec1 32 ssa_504 = fneg ssa_498 vec1 32 ssa_505 = fadd ssa_1, ssa_504 vec1 32 ssa_506 = fmul ssa_505, ssa_503 vec1 32 ssa_507 = fmul ssa_469, ssa_14 vec1 32 ssa_508 = fadd ssa_507, ssa_14 vec1 32 ssa_509 = fmax ssa_506, ssa_474 vec1 32 ssa_510 = fneg ssa_469 vec1 32 ssa_511 = fadd ssa_508, ssa_510 vec1 32 ssa_512 = fmul ssa_509, ssa_511 vec1 32 ssa_513 = fadd ssa_512, ssa_469 vec1 32 ssa_514 = fadd ssa_513, ssa_13 vec1 32 ssa_515 = fmul ssa_441, ssa_514 vec1 32 ssa_516 = fmul ssa_442, ssa_514 vec1 32 ssa_517 = fmul ssa_443, ssa_514 vec1 32 ssa_518 = fadd ssa_513, ssa_394.z vec1 32 ssa_519 = fmul ssa_455, ssa_518 vec1 32 ssa_520 = fmul ssa_396, ssa_437.w vec1 32 ssa_521 = fmul ssa_520, ssa_12 vec1 32 ssa_522 = fmul ssa_520, ssa_11 vec1 32 ssa_523 = fcos ssa_522 vec1 32 ssa_524 = fadd ssa_521, ssa_523 vec1 32 ssa_525 = fsin ssa_524 vec1 32 ssa_526 = fmul ssa_520, ssa_10 vec1 32 ssa_527 = fmul ssa_520, ssa_9 vec1 32 ssa_528 = fcos ssa_527 vec1 32 ssa_529 = fadd ssa_526, ssa_528 vec1 32 ssa_530 = fsin ssa_529 vec1 32 ssa_531 = fmul ssa_525, ssa_530 vec1 32 ssa_532 = fmul ssa_531, ssa_8 vec1 32 ssa_533 = load_const (0xc7435000 /* -50000.000000 */) vec1 32 ssa_534 = fadd ssa_389, ssa_533 vec1 32 ssa_535 = fmul ssa_534, ssa_7 vec1 32 ssa_536 = fmax ssa_535, ssa_0 vec1 32 ssa_537 = fmin ssa_536, ssa_1 vec1 32 ssa_538 = fmax ssa_537, ssa_506 vec1 32 ssa_539 = fmul ssa_532, ssa_538 vec1 32 ssa_540 = fneg ssa_539 vec1 32 ssa_541 = fadd ssa_540, ssa_532 vec1 32 ssa_542 = fadd ssa_515, ssa_541 vec1 32 ssa_543 = fadd ssa_516, ssa_541 vec1 32 ssa_544 = fadd ssa_517, ssa_541 vec1 32 ssa_545 = load_const (0xbf400000 /* -0.750000 */) vec1 32 ssa_546 = fadd ssa_519, ssa_545 vec1 32 ssa_547 = fmul ssa_537, ssa_546 vec1 32 ssa_548 = fadd ssa_547, ssa_6 vec1 32 ssa_549 = fmul ssa_326, ssa_54 vec1 32 ssa_550 = fmul ssa_327, ssa_55 vec1 32 ssa_551 = fadd ssa_549, ssa_550 vec1 32 ssa_552 = fmul ssa_328, ssa_56 vec1 32 ssa_553 = fadd ssa_551, ssa_552 vec1 32 ssa_554 = fmul ssa_326, ssa_326 vec1 32 ssa_555 = fmul ssa_327, ssa_327 vec1 32 ssa_556 = fadd ssa_554, ssa_555 vec1 32 ssa_557 = fmul ssa_328, ssa_328 vec1 32 ssa_558 = fadd ssa_556, ssa_557 vec1 32 ssa_559 = fmul ssa_44, ssa_44 vec1 32 ssa_560 = fneg ssa_559 vec1 32 ssa_561 = fadd ssa_558, ssa_560 vec1 32 ssa_562 = fmin ssa_561, ssa_553 vec1 32 ssa_563 = flt32 ssa_0, ssa_562 vec1 32 ssa_564 = b32csel ssa_563, ssa_0, ssa_5 vec1 32 ssa_565 = fmul ssa_553, ssa_553 vec1 32 ssa_566 = flt32 ssa_565, ssa_561 vec1 32 ssa_567 = b32csel ssa_566, ssa_0, ssa_564 vec1 32 ssa_568 = fneg ssa_567 vec1 32 ssa_569 = fadd ssa_1, ssa_568 vec1 32 ssa_570 = fmax ssa_569, ssa_3 vec1 32 ssa_571 = fneg ssa_570 vec1 32 ssa_572 = fadd ssa_569, ssa_571 vec1 32 ssa_573 = fmul ssa_41, ssa_572 vec1 32 ssa_574 = fadd ssa_573, ssa_570 vec1 32 ssa_575 = fmul ssa_542, ssa_574 vec1 32 ssa_576 = fmul ssa_543, ssa_574 vec1 32 ssa_577 = fmul ssa_544, ssa_574 vec1 32 ssa_578 = frcp ssa_354 vec1 32 ssa_579 = fmul ssa_377, ssa_578 vec1 32 ssa_580 = fmax ssa_579, ssa_0 vec1 32 ssa_581 = fmin ssa_580, ssa_1 vec1 32 ssa_582 = fneg ssa_581 vec1 32 ssa_583 = fadd ssa_1, ssa_582 vec1 32 ssa_584 = fabs ssa_333 vec1 32 ssa_585 = frcp ssa_117 vec1 32 ssa_586 = fmul ssa_584, ssa_585 vec1 32 ssa_587 = fmax ssa_586, ssa_0 vec1 32 ssa_588 = fmin ssa_587, ssa_1 vec1 32 ssa_589 = fneg ssa_588 vec1 32 ssa_590 = fadd ssa_1, ssa_589 vec1 32 ssa_591 = fmul ssa_590, ssa_590 vec1 32 ssa_592 = fmul ssa_591, ssa_583 vec1 32 ssa_593 = fmul ssa_592, ssa_548 vec1 32 ssa_594 = fge32 ssa_276, ssa_228 vec1 32 ssa_595 = b32csel ssa_594, ssa_593, ssa_0 vec1 32 ssa_596 = fmax ssa_575, ssa_0 vec1 32 ssa_597 = fmax ssa_576, ssa_0 vec1 32 ssa_598 = fmax ssa_577, ssa_0 vec1 32 ssa_599 = fmin ssa_596, ssa_1 vec1 32 ssa_600 = fmin ssa_597, ssa_1 vec1 32 ssa_601 = fmin ssa_598, ssa_1 vec1 32 ssa_602 = fmul ssa_599, ssa_593 vec1 32 ssa_603 = fmul ssa_600, ssa_593 vec1 32 ssa_604 = fmul ssa_601, ssa_593 vec1 32 ssa_605 = fmul ssa_595, ssa_29 /* succs: block_15 */ } else { block block_14: /* preds: block_9 */ /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ vec1 32 ssa_606 = phi block_13: ssa_605, block_14: ssa_0 vec1 32 ssa_607 = phi block_13: ssa_602, block_14: ssa_0 vec1 32 ssa_608 = phi block_13: ssa_603, block_14: ssa_0 vec1 32 ssa_609 = phi block_13: ssa_604, block_14: ssa_0 vec1 32 ssa_610 = phi block_13: ssa_593, block_14: ssa_0 vec1 32 ssa_611 = flt32 ssa_306, ssa_28 /* succs: block_16 block_25 */ if ssa_611 { block block_16: /* preds: block_15 */ /* succs: block_17 */ loop { block block_17: /* preds: block_16 block_23 */ vec1 32 ssa_612 = phi block_16: ssa_0, block_23: ssa_917 vec1 32 ssa_613 = phi block_16: ssa_0, block_23: ssa_916 vec1 32 ssa_614 = phi block_16: ssa_0, block_23: ssa_913 vec1 32 ssa_615 = phi block_16: ssa_0, block_23: ssa_914 vec1 32 ssa_616 = phi block_16: ssa_0, block_23: ssa_915 vec1 32 ssa_617 = phi block_16: ssa_0, block_23: ssa_912 vec1 32 ssa_618 = phi block_16: ssa_0, block_23: ssa_918 vec1 32 ssa_619 = ige32 ssa_618, ssa_27 /* succs: block_18 block_19 */ if ssa_619 { block block_18: /* preds: block_17 */ break /* succs: block_24 */ } else { block block_19: /* preds: block_17 */ /* succs: block_20 */ } block block_20: /* preds: block_19 */ vec1 32 ssa_620 = fmul ssa_87, ssa_612 vec1 32 ssa_621 = fmul ssa_88, ssa_612 vec1 32 ssa_622 = fmul ssa_89, ssa_612 vec1 32 ssa_623 = fadd ssa_232, ssa_620 vec1 32 ssa_624 = fadd ssa_233, ssa_621 vec1 32 ssa_625 = fadd ssa_234, ssa_622 vec1 32 ssa_626 = fadd ssa_623, ssa_106 vec1 32 ssa_627 = fadd ssa_624, ssa_108 vec1 32 ssa_628 = fadd ssa_625, ssa_110 vec1 32 ssa_629 = fmul ssa_626, ssa_100 vec1 32 ssa_630 = fmul ssa_627, ssa_101 vec1 32 ssa_631 = fadd ssa_629, ssa_630 vec1 32 ssa_632 = fmul ssa_628, ssa_102 vec1 32 ssa_633 = fadd ssa_631, ssa_632 vec1 32 ssa_634 = fmul ssa_100, ssa_633 vec1 32 ssa_635 = fmul ssa_101, ssa_633 vec1 32 ssa_636 = fmul ssa_102, ssa_633 vec1 32 ssa_637 = fneg ssa_634 vec1 32 ssa_638 = fadd ssa_623, ssa_637 vec1 32 ssa_639 = fneg ssa_635 vec1 32 ssa_640 = fadd ssa_624, ssa_639 vec1 32 ssa_641 = fneg ssa_636 vec1 32 ssa_642 = fadd ssa_625, ssa_641 vec1 32 ssa_643 = fneg ssa_36 vec1 32 ssa_644 = fadd ssa_38, ssa_643 vec1 32 ssa_645 = fmul ssa_644, ssa_13 vec1 32 ssa_646 = fadd ssa_645, ssa_36 vec1 32 ssa_647 = fadd ssa_638, ssa_106 vec1 32 ssa_648 = fadd ssa_640, ssa_108 vec1 32 ssa_649 = fadd ssa_642, ssa_110 vec1 32 ssa_650 = fmul ssa_647, ssa_647 vec1 32 ssa_651 = fmul ssa_648, ssa_648 vec1 32 ssa_652 = fadd ssa_650, ssa_651 vec1 32 ssa_653 = fmul ssa_649, ssa_649 vec1 32 ssa_654 = fadd ssa_652, ssa_653 vec1 32 ssa_655 = fsqrt ssa_654 vec1 32 ssa_656 = frsq ssa_654 vec1 32 ssa_657 = fmul ssa_647, ssa_656 vec1 32 ssa_658 = fmul ssa_648, ssa_656 vec1 32 ssa_659 = fmul ssa_649, ssa_656 vec1 32 ssa_660 = fmul ssa_657, ssa_646 vec1 32 ssa_661 = fmul ssa_658, ssa_646 vec1 32 ssa_662 = fmul ssa_659, ssa_646 vec1 32 ssa_663 = fadd ssa_660, ssa_105.x vec1 32 ssa_664 = fadd ssa_661, ssa_105.y vec1 32 ssa_665 = fadd ssa_662, ssa_105.z vec1 32 ssa_666 = fneg ssa_663 vec1 32 ssa_667 = fadd ssa_623, ssa_666 vec1 32 ssa_668 = fneg ssa_664 vec1 32 ssa_669 = fadd ssa_624, ssa_668 vec1 32 ssa_670 = fneg ssa_665 vec1 32 ssa_671 = fadd ssa_625, ssa_670 vec1 32 ssa_672 = fmul ssa_667, ssa_667 vec1 32 ssa_673 = fmul ssa_669, ssa_669 vec1 32 ssa_674 = fadd ssa_672, ssa_673 vec1 32 ssa_675 = fmul ssa_671, ssa_671 vec1 32 ssa_676 = fadd ssa_674, ssa_675 vec1 32 ssa_677 = fsqrt ssa_676 vec1 32 ssa_678 = frcp ssa_645 vec1 32 ssa_679 = fmul ssa_677, ssa_678 vec1 32 ssa_680 = fmax ssa_679, ssa_0 vec1 32 ssa_681 = fmin ssa_680, ssa_1 vec1 32 ssa_682 = fneg ssa_681 vec1 32 ssa_683 = fadd ssa_1, ssa_682 vec1 32 ssa_684 = fabs ssa_633 vec1 32 ssa_685 = frcp ssa_117 vec1 32 ssa_686 = fmul ssa_684, ssa_685 vec1 32 ssa_687 = fmax ssa_686, ssa_0 vec1 32 ssa_688 = fmin ssa_687, ssa_1 vec1 32 ssa_689 = fneg ssa_688 vec1 32 ssa_690 = fadd ssa_1, ssa_689 vec1 32 ssa_691 = fmul ssa_690, ssa_690 vec1 32 ssa_692 = fmul ssa_691, ssa_683 vec1 32 ssa_693 = flt32 ssa_0, ssa_692 /* succs: block_21 block_22 */ if ssa_693 { block block_21: /* preds: block_20 */ vec1 32 ssa_694 = fneg ssa_623 vec1 32 ssa_695 = fadd ssa_74.x, ssa_694 vec1 32 ssa_696 = fneg ssa_624 vec1 32 ssa_697 = fadd ssa_74.y, ssa_696 vec1 32 ssa_698 = fneg ssa_625 vec1 32 ssa_699 = fadd ssa_74.z, ssa_698 vec1 32 ssa_700 = fmul ssa_695, ssa_695 vec1 32 ssa_701 = fmul ssa_697, ssa_697 vec1 32 ssa_702 = fadd ssa_700, ssa_701 vec1 32 ssa_703 = fmul ssa_699, ssa_699 vec1 32 ssa_704 = fadd ssa_702, ssa_703 vec1 32 ssa_705 = fsqrt ssa_704 vec1 32 ssa_706 = load_const (0x000003b0 /* 0.000000 */) vec1 32 ssa_707 = load_const (0x000003d0 /* 0.000000 */) vec1 32 ssa_708 = load_const (0x000003e0 /* 0.000000 */) vec1 32 ssa_709 = fmul ssa_655, ssa_26 vec4 32 ssa_710 = intrinsic load_ubo (ssa_34, ssa_706) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_711 = fadd ssa_709, ssa_710.w vec1 32 ssa_712 = fadd ssa_711, ssa_25 vec1 32 ssa_713 = fmul ssa_712, ssa_710.x vec1 32 ssa_714 = fmul ssa_713, ssa_24 vec1 32 ssa_715 = fmul ssa_713, ssa_11 vec1 32 ssa_716 = fcos ssa_715 vec1 32 ssa_717 = fadd ssa_714, ssa_716 vec1 32 ssa_718 = fmul ssa_713, ssa_10 vec1 32 ssa_719 = fsin ssa_718 vec1 32 ssa_720 = fadd ssa_717, ssa_719 vec1 32 ssa_721 = fsin ssa_720 vec1 32 ssa_722 = fmul ssa_721, ssa_13 vec1 32 ssa_723 = fadd ssa_722, ssa_13 vec4 32 ssa_724 = intrinsic load_ubo (ssa_34, ssa_707) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_725 = fmul ssa_724.x, ssa_723 vec1 32 ssa_726 = fmul ssa_724.y, ssa_723 vec1 32 ssa_727 = fmul ssa_724.z, ssa_723 vec1 32 ssa_728 = load_const (0x40400000 /* 3.000000 */) vec1 32 ssa_729 = load_const (0x3ecccccd /* 0.400000 */) vec1 32 ssa_730 = fadd ssa_729, ssa_722 vec1 32 ssa_731 = load_const (0x41200000 /* 10.000000 */) vec1 32 ssa_732 = fmul ssa_730, ssa_731 vec1 32 ssa_733 = fmax ssa_732, ssa_0 vec1 32 ssa_734 = fmin ssa_733, ssa_1 vec1 32 ssa_735 = fmul ssa_29, ssa_734 vec1 32 ssa_736 = fneg ssa_735 vec1 32 ssa_737 = fadd ssa_728, ssa_736 vec1 32 ssa_738 = fmul ssa_734, ssa_737 vec1 32 ssa_739 = fmul ssa_734, ssa_13 vec1 32 ssa_740 = fmul ssa_739, ssa_738 vec1 32 ssa_741 = fmul ssa_712, ssa_710.y vec1 32 ssa_742 = fadd ssa_741, ssa_23 vec1 32 ssa_743 = fmul ssa_742, ssa_19 vec1 32 ssa_744 = fmul ssa_742, ssa_22 vec1 32 ssa_745 = fcos ssa_744 vec1 32 ssa_746 = fadd ssa_743, ssa_745 vec1 32 ssa_747 = fmul ssa_742, ssa_21 vec1 32 ssa_748 = fsin ssa_747 vec1 32 ssa_749 = fadd ssa_746, ssa_748 vec1 32 ssa_750 = fsin ssa_749 vec1 32 ssa_751 = fmul ssa_750, ssa_13 vec1 32 ssa_752 = fadd ssa_751, ssa_13 vec4 32 ssa_753 = intrinsic load_ubo (ssa_34, ssa_708) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_754 = fmul ssa_753.x, ssa_752 vec1 32 ssa_755 = fmul ssa_753.y, ssa_752 vec1 32 ssa_756 = fmul ssa_753.z, ssa_752 vec1 32 ssa_757 = fadd ssa_725, ssa_754 vec1 32 ssa_758 = fadd ssa_726, ssa_755 vec1 32 ssa_759 = fadd ssa_727, ssa_756 vec1 32 ssa_760 = fadd ssa_729, ssa_751 vec1 32 ssa_761 = load_const (0x409fffff /* 5.000000 */) vec1 32 ssa_762 = fmul ssa_760, ssa_761 vec1 32 ssa_763 = fmax ssa_762, ssa_0 vec1 32 ssa_764 = fmin ssa_763, ssa_1 vec1 32 ssa_765 = fmul ssa_29, ssa_764 vec1 32 ssa_766 = fneg ssa_765 vec1 32 ssa_767 = fadd ssa_728, ssa_766 vec1 32 ssa_768 = fmul ssa_764, ssa_767 vec1 32 ssa_769 = fmul ssa_764, ssa_20 vec1 32 ssa_770 = fmul ssa_769, ssa_768 vec1 32 ssa_771 = fadd ssa_740, ssa_770 vec1 32 ssa_772 = fmul ssa_712, ssa_724.w vec1 32 ssa_773 = fmul ssa_772, ssa_9 vec1 32 ssa_774 = fmul ssa_772, ssa_19 vec1 32 ssa_775 = fcos ssa_774 vec1 32 ssa_776 = fadd ssa_773, ssa_775 vec1 32 ssa_777 = fsin ssa_776 vec1 32 ssa_778 = fmul ssa_772, ssa_18 vec1 32 ssa_779 = fmul ssa_772, ssa_12 vec1 32 ssa_780 = fsin ssa_779 vec1 32 ssa_781 = fadd ssa_778, ssa_780 vec1 32 ssa_782 = fsin ssa_781 vec1 32 ssa_783 = fmul ssa_777, ssa_13 vec1 32 ssa_784 = fmul ssa_783, ssa_782 vec1 32 ssa_785 = fadd ssa_784, ssa_13 vec1 32 ssa_786 = load_const (0xc8435000 /* -200000.000000 */) vec1 32 ssa_787 = fadd ssa_705, ssa_786 vec1 32 ssa_788 = fmul ssa_787, ssa_17 vec1 32 ssa_789 = fmax ssa_788, ssa_0 vec1 32 ssa_790 = fmin ssa_789, ssa_1 vec1 32 ssa_791 = fmul ssa_100, ssa_100 vec1 32 ssa_792 = fmul ssa_101, ssa_101 vec1 32 ssa_793 = fadd ssa_791, ssa_792 vec1 32 ssa_794 = fmul ssa_102, ssa_102 vec1 32 ssa_795 = fadd ssa_793, ssa_794 vec1 32 ssa_796 = frsq ssa_795 vec1 32 ssa_797 = fmul ssa_100, ssa_796 vec1 32 ssa_798 = fmul ssa_101, ssa_796 vec1 32 ssa_799 = fmul ssa_102, ssa_796 vec1 32 ssa_800 = frsq ssa_704 vec1 32 ssa_801 = fmul ssa_695, ssa_800 vec1 32 ssa_802 = fmul ssa_697, ssa_800 vec1 32 ssa_803 = fmul ssa_699, ssa_800 vec1 32 ssa_804 = fmul ssa_797, ssa_801 vec1 32 ssa_805 = fmul ssa_798, ssa_802 vec1 32 ssa_806 = fadd ssa_804, ssa_805 vec1 32 ssa_807 = fmul ssa_799, ssa_803 vec1 32 ssa_808 = fadd ssa_806, ssa_807 vec1 32 ssa_809 = fabs ssa_808 vec1 32 ssa_810 = load_const (0xbe99999a /* -0.300000 */) vec1 32 ssa_811 = fadd ssa_809, ssa_810 vec1 32 ssa_812 = fmul ssa_811, ssa_16 vec1 32 ssa_813 = fmax ssa_812, ssa_0 vec1 32 ssa_814 = fmin ssa_813, ssa_1 vec1 32 ssa_815 = load_const (0xc59c4000 /* -5000.000000 */) vec1 32 ssa_816 = fadd ssa_705, ssa_815 vec1 32 ssa_817 = fmul ssa_816, ssa_15 vec1 32 ssa_818 = fmax ssa_817, ssa_0 vec1 32 ssa_819 = fmin ssa_818, ssa_1 vec1 32 ssa_820 = fneg ssa_814 vec1 32 ssa_821 = fadd ssa_1, ssa_820 vec1 32 ssa_822 = fmul ssa_821, ssa_819 vec1 32 ssa_823 = fmul ssa_785, ssa_14 vec1 32 ssa_824 = fadd ssa_823, ssa_14 vec1 32 ssa_825 = fmax ssa_822, ssa_790 vec1 32 ssa_826 = fneg ssa_785 vec1 32 ssa_827 = fadd ssa_824, ssa_826 vec1 32 ssa_828 = fmul ssa_825, ssa_827 vec1 32 ssa_829 = fadd ssa_828, ssa_785 vec1 32 ssa_830 = fadd ssa_829, ssa_13 vec1 32 ssa_831 = fmul ssa_757, ssa_830 vec1 32 ssa_832 = fmul ssa_758, ssa_830 vec1 32 ssa_833 = fmul ssa_759, ssa_830 vec1 32 ssa_834 = fadd ssa_829, ssa_710.z vec1 32 ssa_835 = fmul ssa_771, ssa_834 vec1 32 ssa_836 = fmul ssa_712, ssa_753.w vec1 32 ssa_837 = fmul ssa_836, ssa_12 vec1 32 ssa_838 = fmul ssa_836, ssa_11 vec1 32 ssa_839 = fcos ssa_838 vec1 32 ssa_840 = fadd ssa_837, ssa_839 vec1 32 ssa_841 = fsin ssa_840 vec1 32 ssa_842 = fmul ssa_836, ssa_10 vec1 32 ssa_843 = fmul ssa_836, ssa_9 vec1 32 ssa_844 = fcos ssa_843 vec1 32 ssa_845 = fadd ssa_842, ssa_844 vec1 32 ssa_846 = fsin ssa_845 vec1 32 ssa_847 = fmul ssa_841, ssa_846 vec1 32 ssa_848 = fmul ssa_847, ssa_8 vec1 32 ssa_849 = load_const (0xc7435000 /* -50000.000000 */) vec1 32 ssa_850 = fadd ssa_705, ssa_849 vec1 32 ssa_851 = fmul ssa_850, ssa_7 vec1 32 ssa_852 = fmax ssa_851, ssa_0 vec1 32 ssa_853 = fmin ssa_852, ssa_1 vec1 32 ssa_854 = fmax ssa_853, ssa_822 vec1 32 ssa_855 = fmul ssa_848, ssa_854 vec1 32 ssa_856 = fneg ssa_855 vec1 32 ssa_857 = fadd ssa_856, ssa_848 vec1 32 ssa_858 = fadd ssa_831, ssa_857 vec1 32 ssa_859 = fadd ssa_832, ssa_857 vec1 32 ssa_860 = fadd ssa_833, ssa_857 vec1 32 ssa_861 = load_const (0xbf400000 /* -0.750000 */) vec1 32 ssa_862 = fadd ssa_835, ssa_861 vec1 32 ssa_863 = fmul ssa_853, ssa_862 vec1 32 ssa_864 = fadd ssa_863, ssa_6 vec1 32 ssa_865 = fmul ssa_692, ssa_864 vec1 32 ssa_866 = fmul ssa_626, ssa_54 vec1 32 ssa_867 = fmul ssa_627, ssa_55 vec1 32 ssa_868 = fadd ssa_866, ssa_867 vec1 32 ssa_869 = fmul ssa_628, ssa_56 vec1 32 ssa_870 = fadd ssa_868, ssa_869 vec1 32 ssa_871 = fmul ssa_626, ssa_626 vec1 32 ssa_872 = fmul ssa_627, ssa_627 vec1 32 ssa_873 = fadd ssa_871, ssa_872 vec1 32 ssa_874 = fmul ssa_628, ssa_628 vec1 32 ssa_875 = fadd ssa_873, ssa_874 vec1 32 ssa_876 = fmul ssa_44, ssa_44 vec1 32 ssa_877 = fneg ssa_876 vec1 32 ssa_878 = fadd ssa_875, ssa_877 vec1 32 ssa_879 = fmin ssa_878, ssa_870 vec1 32 ssa_880 = flt32 ssa_0, ssa_879 vec1 32 ssa_881 = b32csel ssa_880, ssa_0, ssa_5 vec1 32 ssa_882 = fmul ssa_870, ssa_870 vec1 32 ssa_883 = flt32 ssa_882, ssa_878 vec1 32 ssa_884 = b32csel ssa_883, ssa_0, ssa_881 vec1 32 ssa_885 = fneg ssa_884 vec1 32 ssa_886 = fadd ssa_1, ssa_885 vec1 32 ssa_887 = fmax ssa_886, ssa_3 vec1 32 ssa_888 = fneg ssa_886 vec1 32 ssa_889 = fadd ssa_887, ssa_888 vec1 32 ssa_890 = fmul ssa_41, ssa_889 vec1 32 ssa_891 = fadd ssa_890, ssa_886 vec1 32 ssa_892 = fmul ssa_858, ssa_891 vec1 32 ssa_893 = fmul ssa_859, ssa_891 vec1 32 ssa_894 = fmul ssa_860, ssa_891 vec1 32 ssa_895 = fadd ssa_612, ssa_228 vec1 32 ssa_896 = fge32 ssa_276, ssa_895 vec1 32 ssa_897 = fadd ssa_613, ssa_865 vec1 32 ssa_898 = b32csel ssa_896, ssa_897, ssa_613 vec1 32 ssa_899 = fmax ssa_892, ssa_0 vec1 32 ssa_900 = fmax ssa_893, ssa_0 vec1 32 ssa_901 = fmax ssa_894, ssa_0 vec1 32 ssa_902 = fmin ssa_899, ssa_1 vec1 32 ssa_903 = fmin ssa_900, ssa_1 vec1 32 ssa_904 = fmin ssa_901, ssa_1 vec1 32 ssa_905 = fmul ssa_902, ssa_865 vec1 32 ssa_906 = fmul ssa_903, ssa_865 vec1 32 ssa_907 = fmul ssa_904, ssa_865 vec1 32 ssa_908 = fadd ssa_614, ssa_905 vec1 32 ssa_909 = fadd ssa_615, ssa_906 vec1 32 ssa_910 = fadd ssa_616, ssa_907 vec1 32 ssa_911 = fadd ssa_617, ssa_865 /* succs: block_23 */ } else { block block_22: /* preds: block_20 */ /* succs: block_23 */ } block block_23: /* preds: block_21 block_22 */ vec1 32 ssa_912 = phi block_22: ssa_617, block_21: ssa_911 vec1 32 ssa_913 = phi block_22: ssa_614, block_21: ssa_908 vec1 32 ssa_914 = phi block_22: ssa_615, block_21: ssa_909 vec1 32 ssa_915 = phi block_22: ssa_616, block_21: ssa_910 vec1 32 ssa_916 = phi block_22: ssa_613, block_21: ssa_898 vec1 32 ssa_917 = fadd ssa_612, ssa_277 vec1 32 ssa_918 = iadd ssa_618, ssa_4 /* succs: block_17 */ } block block_24: /* preds: block_18 */ /* succs: block_26 */ } else { block block_25: /* preds: block_15 */ /* succs: block_26 */ } block block_26: /* preds: block_24 block_25 */ vec1 32 ssa_919 = phi block_24: ssa_613, block_25: ssa_0 vec1 32 ssa_920 = phi block_24: ssa_614, block_25: ssa_0 vec1 32 ssa_921 = phi block_24: ssa_615, block_25: ssa_0 vec1 32 ssa_922 = phi block_24: ssa_616, block_25: ssa_0 vec1 32 ssa_923 = phi block_24: ssa_617, block_25: ssa_0 vec1 32 ssa_924 = fmax ssa_923, ssa_610 vec1 32 ssa_925 = fge32 ssa_0, ssa_924 intrinsic discard_if (ssa_925) () vec1 32 ssa_926 = fneg ssa_920 vec1 32 ssa_927 = fadd ssa_607, ssa_926 vec1 32 ssa_928 = fmul ssa_306, ssa_927 vec1 32 ssa_929 = fadd ssa_928, ssa_920 vec1 32 ssa_930 = fneg ssa_921 vec1 32 ssa_931 = fadd ssa_608, ssa_930 vec1 32 ssa_932 = fmul ssa_306, ssa_931 vec1 32 ssa_933 = fadd ssa_932, ssa_921 vec1 32 ssa_934 = fneg ssa_922 vec1 32 ssa_935 = fadd ssa_609, ssa_934 vec1 32 ssa_936 = fmul ssa_306, ssa_935 vec1 32 ssa_937 = fadd ssa_936, ssa_922 vec1 32 ssa_938 = fneg ssa_923 vec1 32 ssa_939 = fadd ssa_610, ssa_938 vec1 32 ssa_940 = fmul ssa_306, ssa_939 vec1 32 ssa_941 = fadd ssa_940, ssa_923 vec1 32 ssa_942 = fneg ssa_919 vec1 32 ssa_943 = fadd ssa_606, ssa_942 vec1 32 ssa_944 = fmul ssa_306, ssa_943 vec1 32 ssa_945 = fadd ssa_944, ssa_919 vec1 32 ssa_946 = fmul ssa_929, ssa_280 vec1 32 ssa_947 = fmul ssa_933, ssa_280 vec1 32 ssa_948 = fmul ssa_937, ssa_280 vec1 32 ssa_949 = frcp ssa_941 vec1 32 ssa_950 = fmul ssa_946, ssa_949 vec1 32 ssa_951 = fmul ssa_947, ssa_949 vec1 32 ssa_952 = fmul ssa_948, ssa_949 vec1 32 ssa_953 = fmax ssa_286, ssa_950 vec1 32 ssa_954 = fmax ssa_286, ssa_951 vec1 32 ssa_955 = fmax ssa_286, ssa_952 vec1 32 ssa_956 = fmul ssa_945, ssa_283 vec1 32 ssa_957 = fmax ssa_956, ssa_0 vec1 32 ssa_958 = fmin ssa_957, ssa_1 vec1 32 ssa_959 = fmin ssa_958, ssa_3 vec1 32 ssa_960 = fadd ssa_105.x, ssa_75 vec1 32 ssa_961 = fadd ssa_105.y, ssa_77 vec1 32 ssa_962 = fadd ssa_105.z, ssa_79 vec1 32 ssa_963 = fmul ssa_960, ssa_960 vec1 32 ssa_964 = fmul ssa_961, ssa_961 vec1 32 ssa_965 = fadd ssa_963, ssa_964 vec1 32 ssa_966 = fmul ssa_962, ssa_962 vec1 32 ssa_967 = fadd ssa_965, ssa_966 vec1 32 ssa_968 = fsqrt ssa_967 vec1 32 ssa_969 = frsq ssa_967 vec1 32 ssa_970 = fmul ssa_960, ssa_969 vec1 32 ssa_971 = fmul ssa_961, ssa_969 vec1 32 ssa_972 = fmul ssa_962, ssa_969 vec1 32 ssa_973 = fmul ssa_970, ssa_87 vec1 32 ssa_974 = fneg ssa_973 vec1 32 ssa_975 = fmul ssa_971, ssa_88 vec1 32 ssa_976 = fneg ssa_975 vec1 32 ssa_977 = fadd ssa_974, ssa_976 vec1 32 ssa_978 = fmul ssa_972, ssa_89 vec1 32 ssa_979 = fneg ssa_978 vec1 32 ssa_980 = fadd ssa_977, ssa_979 vec1 32 ssa_981 = fmul ssa_972, ssa_101 vec1 32 ssa_982 = fmul ssa_970, ssa_102 vec1 32 ssa_983 = fmul ssa_971, ssa_100 vec1 32 ssa_984 = fmul ssa_971, ssa_102 vec1 32 ssa_985 = fmul ssa_972, ssa_100 vec1 32 ssa_986 = fmul ssa_970, ssa_101 vec1 32 ssa_987 = fneg ssa_981 vec1 32 ssa_988 = fadd ssa_984, ssa_987 vec1 32 ssa_989 = fneg ssa_982 vec1 32 ssa_990 = fadd ssa_985, ssa_989 vec1 32 ssa_991 = fneg ssa_983 vec1 32 ssa_992 = fadd ssa_986, ssa_991 vec1 32 ssa_993 = fmul ssa_988, ssa_988 vec1 32 ssa_994 = fmul ssa_990, ssa_990 vec1 32 ssa_995 = fadd ssa_993, ssa_994 vec1 32 ssa_996 = fmul ssa_992, ssa_992 vec1 32 ssa_997 = fadd ssa_995, ssa_996 vec1 32 ssa_998 = frsq ssa_997 vec1 32 ssa_999 = fmul ssa_988, ssa_998 vec1 32 ssa_1000 = fmul ssa_990, ssa_998 vec1 32 ssa_1001 = fmul ssa_992, ssa_998 vec1 32 ssa_1002 = fmul ssa_999, ssa_44 vec1 32 ssa_1003 = fmul ssa_1000, ssa_44 vec1 32 ssa_1004 = fmul ssa_1001, ssa_44 vec1 32 ssa_1005 = fadd ssa_105.x, ssa_1002 vec1 32 ssa_1006 = fadd ssa_105.y, ssa_1003 vec1 32 ssa_1007 = fadd ssa_105.z, ssa_1004 vec1 32 ssa_1008 = fadd ssa_1005, ssa_75 vec1 32 ssa_1009 = fadd ssa_1006, ssa_77 vec1 32 ssa_1010 = fadd ssa_1007, ssa_79 vec1 32 ssa_1011 = fmul ssa_1008, ssa_1008 vec1 32 ssa_1012 = fmul ssa_1009, ssa_1009 vec1 32 ssa_1013 = fadd ssa_1011, ssa_1012 vec1 32 ssa_1014 = fmul ssa_1010, ssa_1010 vec1 32 ssa_1015 = fadd ssa_1013, ssa_1014 vec1 32 ssa_1016 = frsq ssa_1015 vec1 32 ssa_1017 = fmul ssa_1008, ssa_1016 vec1 32 ssa_1018 = fmul ssa_1009, ssa_1016 vec1 32 ssa_1019 = fmul ssa_1010, ssa_1016 vec1 32 ssa_1020 = fmul ssa_1002, ssa_2 vec1 32 ssa_1021 = fmul ssa_1003, ssa_2 vec1 32 ssa_1022 = fmul ssa_1004, ssa_2 vec1 32 ssa_1023 = fadd ssa_105.x, ssa_1020 vec1 32 ssa_1024 = fadd ssa_105.y, ssa_1021 vec1 32 ssa_1025 = fadd ssa_105.z, ssa_1022 vec1 32 ssa_1026 = fadd ssa_1023, ssa_75 vec1 32 ssa_1027 = fadd ssa_1024, ssa_77 vec1 32 ssa_1028 = fadd ssa_1025, ssa_79 vec1 32 ssa_1029 = fmul ssa_1026, ssa_1026 vec1 32 ssa_1030 = fmul ssa_1027, ssa_1027 vec1 32 ssa_1031 = fadd ssa_1029, ssa_1030 vec1 32 ssa_1032 = fmul ssa_1028, ssa_1028 vec1 32 ssa_1033 = fadd ssa_1031, ssa_1032 vec1 32 ssa_1034 = frsq ssa_1033 vec1 32 ssa_1035 = fmul ssa_1026, ssa_1034 vec1 32 ssa_1036 = fmul ssa_1027, ssa_1034 vec1 32 ssa_1037 = fmul ssa_1028, ssa_1034 vec1 32 ssa_1038 = fmul ssa_970, ssa_1017 vec1 32 ssa_1039 = fneg ssa_1038 vec1 32 ssa_1040 = fmul ssa_971, ssa_1018 vec1 32 ssa_1041 = fneg ssa_1040 vec1 32 ssa_1042 = fadd ssa_1039, ssa_1041 vec1 32 ssa_1043 = fmul ssa_972, ssa_1019 vec1 32 ssa_1044 = fneg ssa_1043 vec1 32 ssa_1045 = fadd ssa_1042, ssa_1044 vec1 32 ssa_1046 = fmul ssa_970, ssa_1035 vec1 32 ssa_1047 = fneg ssa_1046 vec1 32 ssa_1048 = fmul ssa_971, ssa_1036 vec1 32 ssa_1049 = fneg ssa_1048 vec1 32 ssa_1050 = fadd ssa_1047, ssa_1049 vec1 32 ssa_1051 = fmul ssa_972, ssa_1037 vec1 32 ssa_1052 = fneg ssa_1051 vec1 32 ssa_1053 = fadd ssa_1050, ssa_1052 vec1 32 ssa_1054 = fneg ssa_1045 vec1 32 ssa_1055 = fadd ssa_980, ssa_1054 vec1 32 ssa_1056 = fadd ssa_1053, ssa_1054 vec1 32 ssa_1057 = frcp ssa_1056 vec1 32 ssa_1058 = fmul ssa_1055, ssa_1057 vec1 32 ssa_1059 = fmax ssa_1058, ssa_0 vec1 32 ssa_1060 = fmin ssa_1059, ssa_1 vec1 32 ssa_1061 = fmul ssa_229, ssa_970 vec1 32 ssa_1062 = fmul ssa_230, ssa_971 vec1 32 ssa_1063 = fadd ssa_1061, ssa_1062 vec1 32 ssa_1064 = fmul ssa_231, ssa_972 vec1 32 ssa_1065 = fadd ssa_1063, ssa_1064 vec1 32 ssa_1066 = flt32 ssa_968, ssa_1065 vec1 32 ssa_1067 = fmul ssa_953, ssa_1060 vec1 32 ssa_1068 = fmul ssa_954, ssa_1060 vec1 32 ssa_1069 = fmul ssa_955, ssa_1060 vec1 32 ssa_1070 = fmul ssa_959, ssa_1060 vec1 32 ssa_1071 = b32csel ssa_1066, ssa_1067, ssa_953 vec1 32 ssa_1072 = b32csel ssa_1066, ssa_1068, ssa_954 vec1 32 ssa_1073 = b32csel ssa_1066, ssa_1069, ssa_955 vec1 32 ssa_1074 = b32csel ssa_1066, ssa_1070, ssa_959 vec1 32 ssa_1075 = deref_var &out_color0 (shader_out vec4) vec4 32 ssa_1076 = vec4 ssa_1071, ssa_1072, ssa_1073, ssa_1074 intrinsic store_deref (ssa_1075, ssa_1076) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_27 */ block block_27: } ; ModuleID = 'mesa-shader' source_filename = "mesa-shader" target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7" target triple = "amdgcn-mesa-mesa3d" define amdgpu_ps void @main([0 x i8] addrspace(6)* inreg noalias dereferenceable(18446744073709551615), [0 x i8] addrspace(6)* inreg noalias dereferenceable(18446744073709551615), [0 x i8] addrspace(6)* inreg noalias dereferenceable(18446744073709551615), i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, i32, i32, i32, i32) { main_body: %20 = bitcast <2 x i32> %5 to <2 x float> %21 = extractelement <2 x float> %20, i32 0 %22 = extractelement <2 x float> %20, i32 1 %23 = call float @llvm.amdgcn.interp.p1(float %21, i32 0, i32 0, i32 %3) #1 %24 = call float @llvm.amdgcn.interp.p2(float %23, float %22, i32 0, i32 0, i32 %3) #1 %25 = call float @llvm.amdgcn.interp.p1(float %21, i32 1, i32 0, i32 %3) #1 %26 = call float @llvm.amdgcn.interp.p2(float %25, float %22, i32 1, i32 0, i32 %3) #1 %27 = call float @llvm.amdgcn.interp.p1(float %21, i32 2, i32 0, i32 %3) #1 %28 = call float @llvm.amdgcn.interp.p2(float %27, float %22, i32 2, i32 0, i32 %3) #1 %29 = call float @llvm.amdgcn.interp.p1(float %21, i32 3, i32 0, i32 %3) #1 %30 = call float @llvm.amdgcn.interp.p2(float %29, float %22, i32 3, i32 0, i32 %3) #1 %31 = call float @llvm.amdgcn.interp.p1(float %21, i32 0, i32 1, i32 %3) #1 %32 = call float @llvm.amdgcn.interp.p2(float %31, float %22, i32 0, i32 1, i32 %3) #1 %33 = call float @llvm.amdgcn.interp.p1(float %21, i32 1, i32 1, i32 %3) #1 %34 = call float @llvm.amdgcn.interp.p2(float %33, float %22, i32 1, i32 1, i32 %3) #1 %35 = bitcast [0 x i8] addrspace(6)* %2 to <4 x i32> addrspace(6)*, !amdgpu.uniform !0 %36 = load <4 x i32>, <4 x i32> addrspace(6)* %35, align 16, !invariant.load !0 %37 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 960, i32 0) #1 %38 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 964, i32 0) #1 %39 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 972, i32 0) #1 %40 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1116, i32 0) #1 %41 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 592, i32 0) #1 %42 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 596, i32 0) #1 %43 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 600, i32 0) #1 %44 = fmul float %41, %41 %45 = fmul float %42, %42 %46 = fadd float %44, %45 %47 = fmul float %43, %43 %48 = fadd float %46, %47 %49 = call float @llvm.sqrt.f32(float %48) #1 %50 = fdiv float 1.000000e+00, %49 %51 = fmul float %41, %50 %52 = fmul float %42, %50 %53 = fmul float %43, %50 %54 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1136, i32 0) #1 %55 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1140, i32 0) #1 %56 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1144, i32 0) #1 %57 = fmul float %54, %54 %58 = fmul float %55, %55 %59 = fadd float %57, %58 %60 = fmul float %56, %56 %61 = fadd float %59, %60 %62 = call float @llvm.sqrt.f32(float %61) #1 %63 = fdiv float 1.000000e+00, %62 %64 = fmul float %54, %63 %65 = fmul float %55, %63 %66 = fmul float %56, %63 %67 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 32, i32 0) #1 %68 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 36, i32 0) #1 %69 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 40, i32 0) #1 %70 = fsub float %24, %67 %71 = fsub float %26, %68 %72 = fsub float %28, %69 %73 = fmul float %70, %70 %74 = fmul float %71, %71 %75 = fadd float %73, %74 %76 = fmul float %72, %72 %77 = fadd float %75, %76 %78 = call float @llvm.sqrt.f32(float %77) #1 %79 = fdiv float 1.000000e+00, %78 %80 = fmul float %70, %79 %81 = fmul float %71, %79 %82 = fmul float %72, %79 %83 = fmul float %64, %80 %84 = fmul float %65, %81 %85 = fadd float %83, %84 %86 = fmul float %66, %82 %87 = fadd float %85, %86 %88 = fcmp olt float %87, 0.000000e+00 %89 = fsub float -0.000000e+00, %64 %90 = fsub float -0.000000e+00, %65 %91 = fsub float -0.000000e+00, %66 %92 = fsub float -0.000000e+00, %87 %.v = select i1 %88, float %89, float %64 %.v12 = select i1 %88, float %90, float %65 %.v13 = select i1 %88, float %91, float %66 %.v14 = select i1 %88, float %92, float %87 %93 = fcmp une float %.v14, 0.000000e+00 call void @llvm.amdgcn.kill(i1 %93) #2 %94 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1104, i32 0) #1 %95 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1108, i32 0) #1 %96 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1112, i32 0) #1 %97 = fsub float %67, %94 %98 = fsub float %68, %95 %99 = fsub float %69, %96 %100 = fmul float %97, %.v %101 = fmul float %98, %.v12 %102 = fadd float %100, %101 %103 = fmul float %99, %.v13 %104 = fadd float %102, %103 %105 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 968, i32 0) #1 %106 = fmul float %.v, %105 %107 = fmul float %.v12, %105 %108 = fmul float %.v13, %105 %109 = fsub float %94, %106 %110 = fsub float %95, %107 %111 = fsub float %96, %108 %112 = fsub float %67, %109 %113 = fsub float %68, %110 %114 = fsub float %69, %111 %115 = fmul float %.v, %112 %116 = fmul float %.v12, %113 %117 = fadd float %115, %116 %118 = fmul float %.v13, %114 %119 = fadd float %117, %118 %120 = fdiv float 1.000000e+00, %.v14 %121 = fmul float %119, %120 %122 = fcmp olt float %121, 0.000000e+00 br i1 %122, label %if1, label %endif1 if1: ; preds = %main_body %123 = fmul float %121, %80 %124 = fmul float %121, %81 %125 = fmul float %121, %82 %126 = fsub float %112, %123 %127 = fsub float %113, %124 %128 = fsub float %114, %125 %129 = fadd float %126, %109 %130 = fadd float %127, %110 %131 = fadd float %128, %111 %132 = fsub float %129, %67 %133 = fsub float %130, %68 %134 = fsub float %131, %69 %135 = fmul float %132, %132 %136 = fmul float %133, %133 %137 = fadd float %135, %136 %138 = fmul float %134, %134 %139 = fadd float %137, %138 %140 = call float @llvm.sqrt.f32(float %139) #1 br label %endif1 endif1: ; preds = %main_body, %if1 %141 = phi float [ %140, %if1 ], [ 0.000000e+00, %main_body ] %142 = call float @llvm.fabs.f32(float %104) #1 %143 = fcmp olt float %142, %105 br i1 %143, label %if4, label %else5 if4: ; preds = %endif1 %144 = fmul float %97, %97 %145 = fmul float %98, %98 %146 = fadd float %144, %145 %147 = fmul float %99, %99 %148 = fadd float %146, %147 %149 = call float @llvm.sqrt.f32(float %148) #1 %150 = fsub float %37, %149 %151 = call float @llvm.maxnum.f32(float %150, float 0.000000e+00) #1 %152 = call float @llvm.canonicalize.f32(float %151) #1 br label %endif4 else5: ; preds = %endif1 %153 = fcmp ult float %121, 0.000000e+00 call void @llvm.amdgcn.kill(i1 %153) #2 %154 = fadd float %94, %106 %155 = fadd float %95, %107 %156 = fadd float %96, %108 %157 = fsub float %67, %154 %158 = fsub float %68, %155 %159 = fsub float %69, %156 %160 = fmul float %.v, %157 %161 = fmul float %.v12, %158 %162 = fadd float %160, %161 %163 = fmul float %.v13, %159 %164 = fadd float %162, %163 %165 = fmul float %164, %120 %166 = fcmp olt float %165, 0.000000e+00 br i1 %166, label %if6, label %else7 if6: ; preds = %else5 %167 = fmul float %121, %80 %168 = fmul float %121, %81 %169 = fmul float %121, %82 %170 = fsub float %157, %167 %171 = fsub float %158, %168 %172 = fsub float %159, %169 %173 = fmul float %170, %170 %174 = fmul float %171, %171 %175 = fadd float %173, %174 %176 = fmul float %172, %172 %177 = fadd float %175, %176 %178 = call float @llvm.sqrt.f32(float %177) #1 %179 = fmul float %165, %80 %180 = fmul float %165, %81 %181 = fmul float %165, %82 %182 = fsub float %157, %179 %183 = fsub float %158, %180 %184 = fsub float %159, %181 %185 = fmul float %182, %182 %186 = fmul float %183, %183 %187 = fadd float %185, %186 %188 = fmul float %184, %184 %189 = fadd float %187, %188 %190 = call float @llvm.sqrt.f32(float %189) #1 %191 = call float @llvm.minnum.f32(float %178, float %190) #1 %192 = call float @llvm.canonicalize.f32(float %191) #1 %193 = fcmp uge float %38, %192 %194 = call float @llvm.maxnum.f32(float %178, float %190) #1 %195 = call float @llvm.canonicalize.f32(float %194) #1 %196 = fcmp uge float %195, %37 %197 = and i1 %196, %193 call void @llvm.amdgcn.kill(i1 %197) #2 br label %endif4 else7: ; preds = %else5 call void @llvm.amdgcn.kill(i1 false) #2 br label %endif4 endif4: ; preds = %if6, %else7, %if4 %198 = phi float [ %152, %if4 ], [ %141, %else7 ], [ %141, %if6 ] %199 = fmul float %80, %198 %200 = fmul float %81, %198 %201 = fmul float %82, %198 %202 = fadd float %67, %199 %203 = fadd float %68, %200 %204 = fadd float %69, %201 %205 = fdiv float 1.000000e+00, %34 %206 = fmul float %30, 5.000000e-01 %207 = fmul float %206, %205 %208 = fmul float %32, 5.000000e-01 %209 = fmul float %208, %205 %210 = fadd float %207, 5.000000e-01 %211 = fsub float 5.000000e-01, %209 %212 = bitcast [0 x i8] addrspace(6)* %1 to <8 x i32> addrspace(6)* %213 = load <8 x i32>, <8 x i32> addrspace(6)* %212, align 32, !invariant.load !0 %214 = getelementptr [0 x i8], [0 x i8] addrspace(6)* %1, i32 0, i32 64 %215 = bitcast i8 addrspace(6)* %214 to <4 x i32> addrspace(6)* %216 = load <4 x i32>, <4 x i32> addrspace(6)* %215, align 16, !invariant.load !0 %217 = call float @llvm.amdgcn.image.sample.2d.f32.f32(i32 1, float %210, float %211, <8 x i32> %213, <4 x i32> %216, i1 false, i32 0, i32 0) %218 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 68, i32 0) #1 %219 = fmul float %217, %218 %220 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 128, i32 0) #1 %221 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 132, i32 0) #1 %222 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 136, i32 0) #1 %223 = fmul float %220, %220 %224 = fmul float %221, %221 %225 = fadd float %223, %224 %226 = fmul float %222, %222 %227 = fadd float %225, %226 %228 = call float @llvm.sqrt.f32(float %227) #1 %229 = fdiv float 1.000000e+00, %228 %230 = fmul float %220, %229 %231 = fmul float %221, %229 %232 = fmul float %222, %229 %233 = fmul float %230, %80 %234 = fmul float %231, %81 %235 = fsub float -0.000000e+00, %234 %236 = fsub float %235, %233 %237 = fmul float %232, %82 %238 = fsub float %236, %237 %239 = call float @llvm.maxnum.f32(float %238, float 0x3EE4F8B580000000) #1 %240 = call float @llvm.canonicalize.f32(float %239) #1 %241 = fdiv float 1.000000e+00, %240 %242 = fmul float %219, %241 %243 = fmul float %105, 7.500000e-01 %244 = fmul float %39, 0xBFE9999980000000 %245 = fadd float %244, 0x3FFCCCCCC0000000 %246 = fmul float %39, 4.000000e+00 %247 = fadd float %246, 1.000000e+00 %248 = fmul float %39, -2.500000e-01 %249 = fadd float %248, 2.500000e-01 %250 = fmul float %97, %97 %251 = fmul float %98, %98 %252 = fadd float %250, %251 %253 = fmul float %99, %99 %254 = fadd float %252, %253 %255 = call float @llvm.sqrt.f32(float %254) #1 %256 = fmul float %38, 0x3FF3333340000000 %257 = fadd float %40, %256 %258 = fmul float %38, 0x3FF6666660000000 %259 = fadd float %40, %258 %260 = fsub float %255, %257 %261 = fsub float %259, %257 %262 = fdiv float 1.000000e+00, %261 %263 = fmul float %260, %262 %264 = call float @llvm.maxnum.f32(float %263, float 0.000000e+00) #1 %265 = call float @llvm.canonicalize.f32(float %264) #1 %266 = call float @llvm.minnum.f32(float %265, float 1.000000e+00) #1 %267 = call float @llvm.canonicalize.f32(float %266) #1 %268 = fsub float 1.000000e+00, %39 %269 = call float @llvm.maxnum.f32(float %267, float %268) #1 %270 = call float @llvm.canonicalize.f32(float %269) #1 %271 = fcmp ogt float %270, 0x3F50624DE0000000 br i1 %271, label %if10, label %endif10 if10: ; preds = %endif4 %272 = fmul float %104, %120 %273 = fcmp olt float %272, 0.000000e+00 br i1 %273, label %if11, label %endif11 if11: ; preds = %if10 %274 = fmul float %272, %80 %275 = fmul float %272, %81 %276 = fmul float %272, %82 %277 = fsub float %97, %274 %278 = fsub float %98, %275 %279 = fsub float %99, %276 %280 = fadd float %277, %94 %281 = fadd float %278, %95 %282 = fadd float %279, %96 br label %endif11 endif11: ; preds = %if10, %if11 %.in = phi float [ %280, %if11 ], [ %202, %if10 ] %.in19 = phi float [ %281, %if11 ], [ %203, %if10 ] %.in20 = phi float [ %282, %if11 ], [ %204, %if10 ] %283 = fsub float %.in, %94 %284 = fsub float %.in19, %95 %285 = fsub float %.in20, %96 %286 = fmul float %283, %.v %287 = fmul float %284, %.v12 %288 = fadd float %286, %287 %289 = fmul float %285, %.v13 %290 = fadd float %288, %289 %291 = fmul float %.v, %290 %292 = fmul float %.v12, %290 %293 = fmul float %.v13, %290 %294 = fsub float %.in, %291 %295 = fsub float %.in19, %292 %296 = fsub float %.in20, %293 %297 = fsub float %294, %94 %298 = fsub float %295, %95 %299 = fsub float %296, %96 %300 = fmul float %297, %297 %301 = fmul float %298, %298 %302 = fadd float %300, %301 %303 = fmul float %299, %299 %304 = fadd float %302, %303 %305 = call float @llvm.sqrt.f32(float %304) #1 %306 = fsub float %38, %37 %307 = fmul float %306, 5.000000e-01 %308 = fadd float %307, %37 %309 = fdiv float 1.000000e+00, %305 %310 = fmul float %297, %309 %311 = fmul float %298, %309 %312 = fmul float %299, %309 %313 = fmul float %310, %308 %314 = fmul float %311, %308 %315 = fmul float %312, %308 %316 = fadd float %313, %94 %317 = fadd float %314, %95 %318 = fadd float %315, %96 %319 = fsub float %.in, %316 %320 = fsub float %.in19, %317 %321 = fsub float %.in20, %318 %322 = fmul float %319, %319 %323 = fmul float %320, %320 %324 = fadd float %322, %323 %325 = fmul float %321, %321 %326 = fadd float %324, %325 %327 = call float @llvm.sqrt.f32(float %326) #1 %328 = fsub float %67, %.in %329 = fsub float %68, %.in19 %330 = fsub float %69, %.in20 %331 = fmul float %328, %328 %332 = fmul float %329, %329 %333 = fadd float %331, %332 %334 = fmul float %330, %330 %335 = fadd float %333, %334 %336 = call float @llvm.sqrt.f32(float %335) #1 %337 = fmul float %305, 0x3EB0C6F7A0000000 %338 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 944, i32 0) #1 %339 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 948, i32 0) #1 %340 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 952, i32 0) #1 %341 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 956, i32 0) #1 %342 = fadd float %337, %341 %343 = fadd float %342, 1.000000e+02 %344 = fmul float %343, %338 %345 = fmul float %344, 1.100000e+01 %346 = fmul float %344, 1.300000e+01 %347 = call float @llvm.cos.f32(float %346) #1 %348 = fadd float %345, %347 %349 = fmul float %344, 3.100000e+01 %350 = call float @llvm.sin.f32(float %349) #1 %351 = fadd float %348, %350 %352 = call float @llvm.sin.f32(float %351) #1 %353 = fmul float %352, 5.000000e-01 %354 = fadd float %353, 5.000000e-01 %355 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 976, i32 0) #1 %356 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 980, i32 0) #1 %357 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 984, i32 0) #1 %358 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 988, i32 0) #1 %359 = fmul float %355, %354 %360 = fmul float %356, %354 %361 = fmul float %357, %354 %362 = fadd float %353, 0x3FD99999A0000000 %363 = fmul float %362, 1.000000e+01 %364 = call float @llvm.maxnum.f32(float %363, float 0.000000e+00) #1 %365 = call float @llvm.canonicalize.f32(float %364) #1 %366 = call float @llvm.minnum.f32(float %365, float 1.000000e+00) #1 %367 = call float @llvm.canonicalize.f32(float %366) #1 %368 = fmul float %367, -2.000000e+00 %369 = fadd float %368, 3.000000e+00 %370 = fmul float %367, %369 %371 = fmul float %367, 5.000000e-01 %372 = fmul float %371, %370 %373 = fmul float %343, %339 %374 = fadd float %373, 2.000000e+01 %375 = fmul float %374, 1.700000e+01 %376 = fmul float %374, 7.000000e+00 %377 = call float @llvm.cos.f32(float %376) #1 %378 = fadd float %375, %377 %379 = fmul float %374, 4.300000e+01 %380 = call float @llvm.sin.f32(float %379) #1 %381 = fadd float %378, %380 %382 = call float @llvm.sin.f32(float %381) #1 %383 = fmul float %382, 5.000000e-01 %384 = fadd float %383, 5.000000e-01 %385 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 992, i32 0) #1 %386 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 996, i32 0) #1 %387 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1000, i32 0) #1 %388 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1004, i32 0) #1 %389 = fmul float %385, %384 %390 = fmul float %386, %384 %391 = fmul float %387, %384 %392 = fadd float %359, %389 %393 = fadd float %360, %390 %394 = fadd float %361, %391 %395 = fadd float %383, 0x3FD99999A0000000 %396 = fmul float %395, 0x4013FFFFE0000000 %397 = call float @llvm.maxnum.f32(float %396, float 0.000000e+00) #1 %398 = call float @llvm.canonicalize.f32(float %397) #1 %399 = call float @llvm.minnum.f32(float %398, float 1.000000e+00) #1 %400 = call float @llvm.canonicalize.f32(float %399) #1 %401 = fmul float %400, -2.000000e+00 %402 = fadd float %401, 3.000000e+00 %403 = fmul float %400, %402 %404 = fmul float %400, 0x3FE3333340000000 %405 = fmul float %404, %403 %406 = fadd float %372, %405 %407 = fmul float %343, %358 %408 = fmul float %407, 1.900000e+01 %409 = fmul float %407, 1.700000e+01 %410 = call float @llvm.cos.f32(float %409) #1 %411 = fadd float %408, %410 %412 = call float @llvm.sin.f32(float %411) #1 %413 = fmul float %407, 3.700000e+01 %414 = fmul float %407, 2.300000e+01 %415 = call float @llvm.sin.f32(float %414) #1 %416 = fadd float %413, %415 %417 = call float @llvm.sin.f32(float %416) #1 %418 = fmul float %412, 5.000000e-01 %419 = fmul float %418, %417 %420 = fadd float %419, 5.000000e-01 %421 = fadd float %336, -2.000000e+05 %422 = fmul float %421, 0x3ECBF64760000000 %423 = call float @llvm.maxnum.f32(float %422, float 0.000000e+00) #1 %424 = call float @llvm.canonicalize.f32(float %423) #1 %425 = call float @llvm.minnum.f32(float %424, float 1.000000e+00) #1 %426 = call float @llvm.canonicalize.f32(float %425) #1 %427 = fmul float %.v, %.v %428 = fmul float %.v12, %.v12 %429 = fadd float %427, %428 %430 = fmul float %.v13, %.v13 %431 = fadd float %429, %430 %432 = call float @llvm.sqrt.f32(float %431) #1 %433 = fdiv float 1.000000e+00, %432 %434 = fmul float %.v, %433 %435 = fmul float %.v12, %433 %436 = fmul float %.v13, %433 %437 = fdiv float 1.000000e+00, %336 %438 = fmul float %328, %437 %439 = fmul float %329, %437 %440 = fmul float %330, %437 %441 = fmul float %434, %438 %442 = fmul float %435, %439 %443 = fadd float %441, %442 %444 = fmul float %436, %440 %445 = fadd float %443, %444 %446 = call float @llvm.fabs.f32(float %445) #1 %447 = fadd float %446, 0xBFD3333340000000 %448 = fmul float %447, 0x3FF6DB6DC0000000 %449 = call float @llvm.maxnum.f32(float %448, float 0.000000e+00) #1 %450 = call float @llvm.canonicalize.f32(float %449) #1 %451 = call float @llvm.minnum.f32(float %450, float 1.000000e+00) #1 %452 = call float @llvm.canonicalize.f32(float %451) #1 %453 = fadd float %336, -5.000000e+03 %454 = fmul float %453, 0x3F2A36E2E0000000 %455 = call float @llvm.maxnum.f32(float %454, float 0.000000e+00) #1 %456 = call float @llvm.canonicalize.f32(float %455) #1 %457 = call float @llvm.minnum.f32(float %456, float 1.000000e+00) #1 %458 = call float @llvm.canonicalize.f32(float %457) #1 %459 = fsub float 1.000000e+00, %452 %460 = fmul float %459, %458 %461 = fmul float %420, 2.500000e-01 %462 = fadd float %461, 2.500000e-01 %463 = call float @llvm.maxnum.f32(float %460, float %426) #1 %464 = call float @llvm.canonicalize.f32(float %463) #1 %465 = fsub float %462, %420 %466 = fmul float %464, %465 %467 = fadd float %466, %420 %468 = fadd float %467, 5.000000e-01 %469 = fmul float %392, %468 %470 = fmul float %393, %468 %471 = fmul float %394, %468 %472 = fadd float %467, %340 %473 = fmul float %406, %472 %474 = fmul float %343, %388 %475 = fmul float %474, 2.300000e+01 %476 = fmul float %474, 1.300000e+01 %477 = call float @llvm.cos.f32(float %476) #1 %478 = fadd float %475, %477 %479 = call float @llvm.sin.f32(float %478) #1 %480 = fmul float %474, 3.100000e+01 %481 = fmul float %474, 1.900000e+01 %482 = call float @llvm.cos.f32(float %481) #1 %483 = fadd float %480, %482 %484 = call float @llvm.sin.f32(float %483) #1 %485 = fmul float %479, %484 %486 = fmul float %485, 0x3FD3333340000000 %487 = fadd float %336, -5.000000e+04 %488 = fmul float %487, 0x3EE4F8B580000000 %489 = call float @llvm.maxnum.f32(float %488, float 0.000000e+00) #1 %490 = call float @llvm.canonicalize.f32(float %489) #1 %491 = call float @llvm.minnum.f32(float %490, float 1.000000e+00) #1 %492 = call float @llvm.canonicalize.f32(float %491) #1 %493 = call float @llvm.maxnum.f32(float %492, float %460) #1 %494 = call float @llvm.canonicalize.f32(float %493) #1 %495 = fmul float %486, %494 %496 = fsub float %486, %495 %497 = fadd float %469, %496 %498 = fadd float %470, %496 %499 = fadd float %471, %496 %500 = fadd float %473, -7.500000e-01 %501 = fmul float %492, %500 %502 = fadd float %501, 7.500000e-01 %503 = fmul float %283, %51 %504 = fmul float %284, %52 %505 = fadd float %503, %504 %506 = fmul float %285, %53 %507 = fadd float %505, %506 %508 = fmul float %283, %283 %509 = fmul float %284, %284 %510 = fadd float %508, %509 %511 = fmul float %285, %285 %512 = fadd float %510, %511 %513 = fmul float %40, %40 %514 = fsub float %512, %513 %515 = call float @llvm.minnum.f32(float %514, float %507) #1 %516 = call float @llvm.canonicalize.f32(float %515) #1 %517 = fcmp ogt float %516, 0.000000e+00 %518 = fmul float %507, %507 %519 = fcmp olt float %518, %514 %520 = or i1 %519, %517 %521 = select i1 %520, float 1.000000e+00, float 0x3FA9999A00000000 %522 = call float @llvm.maxnum.f32(float %521, float 0x3FECCCCCC0000000) #1 %523 = call float @llvm.canonicalize.f32(float %522) #1 %524 = fsub float %521, %523 %525 = fmul float %39, %524 %526 = fadd float %525, %523 %527 = fmul float %497, %526 %528 = fmul float %498, %526 %529 = fmul float %499, %526 %530 = fdiv float 1.000000e+00, %307 %531 = fmul float %327, %530 %532 = call float @llvm.maxnum.f32(float %531, float 0.000000e+00) #1 %533 = call float @llvm.canonicalize.f32(float %532) #1 %534 = call float @llvm.minnum.f32(float %533, float 1.000000e+00) #1 %535 = call float @llvm.canonicalize.f32(float %534) #1 %536 = fsub float 1.000000e+00, %535 %537 = call float @llvm.fabs.f32(float %290) #1 %538 = fdiv float 1.000000e+00, %105 %539 = fmul float %537, %538 %540 = call float @llvm.maxnum.f32(float %539, float 0.000000e+00) #1 %541 = call float @llvm.canonicalize.f32(float %540) #1 %542 = call float @llvm.minnum.f32(float %541, float 1.000000e+00) #1 %543 = call float @llvm.canonicalize.f32(float %542) #1 %544 = fsub float 1.000000e+00, %543 %545 = fmul float %544, %544 %546 = fmul float %545, %536 %547 = fmul float %546, %502 %548 = fcmp oge float %242, %198 %549 = call float @llvm.maxnum.f32(float %527, float 0.000000e+00) #1 %550 = call float @llvm.canonicalize.f32(float %549) #1 %551 = call float @llvm.maxnum.f32(float %528, float 0.000000e+00) #1 %552 = call float @llvm.canonicalize.f32(float %551) #1 %553 = call float @llvm.maxnum.f32(float %529, float 0.000000e+00) #1 %554 = call float @llvm.canonicalize.f32(float %553) #1 %555 = call float @llvm.minnum.f32(float %550, float 1.000000e+00) #1 %556 = call float @llvm.canonicalize.f32(float %555) #1 %557 = call float @llvm.minnum.f32(float %552, float 1.000000e+00) #1 %558 = call float @llvm.canonicalize.f32(float %557) #1 %559 = call float @llvm.minnum.f32(float %554, float 1.000000e+00) #1 %560 = call float @llvm.canonicalize.f32(float %559) #1 %561 = fmul float %556, %547 %562 = fmul float %558, %547 %563 = fmul float %560, %547 %.op = fmul float %547, 2.000000e+00 %564 = select i1 %548, float %.op, float 0.000000e+00 br label %endif10 endif10: ; preds = %endif4, %endif11 %565 = phi float [ %564, %endif11 ], [ 0.000000e+00, %endif4 ] %566 = phi float [ %561, %endif11 ], [ 0.000000e+00, %endif4 ] %567 = phi float [ %562, %endif11 ], [ 0.000000e+00, %endif4 ] %568 = phi float [ %563, %endif11 ], [ 0.000000e+00, %endif4 ] %569 = phi float [ %547, %endif11 ], [ 0.000000e+00, %endif4 ] %570 = fcmp olt float %270, 0x3FEFF7CEE0000000 br i1 %570, label %if16, label %endif16 if16: ; preds = %endif10 %571 = fsub float %38, %37 %572 = fmul float %571, 5.000000e-01 %573 = fadd float %572, %37 %574 = fdiv float 1.000000e+00, %572 %575 = fdiv float 1.000000e+00, %105 %576 = fmul float %.v, %.v %577 = fmul float %.v12, %.v12 %578 = fadd float %576, %577 %579 = fmul float %.v13, %.v13 %580 = fadd float %578, %579 %581 = call float @llvm.sqrt.f32(float %580) #1 %582 = fdiv float 1.000000e+00, %581 %583 = fmul float %.v, %582 %584 = fmul float %.v12, %582 %585 = fmul float %.v13, %582 %586 = fmul float %40, %40 br label %loop17 loop17: ; preds = %endif21, %if16 %587 = phi float [ 0.000000e+00, %if16 ], [ %881, %endif21 ] %588 = phi i32 [ 0, %if16 ], [ %880, %endif21 ] %589 = phi float [ 0.000000e+00, %if16 ], [ %877, %endif21 ] %590 = phi float [ 0.000000e+00, %if16 ], [ %878, %endif21 ] %591 = phi float [ 0.000000e+00, %if16 ], [ %879, %endif21 ] %592 = phi float [ 0.000000e+00, %if16 ], [ %876, %endif21 ] %593 = phi i32 [ 0, %if16 ], [ %882, %endif21 ] %594 = icmp sgt i32 %593, 19 br i1 %594, label %endif16, label %endif18 endif18: ; preds = %loop17 %595 = fmul float %80, %587 %596 = fmul float %81, %587 %597 = fmul float %82, %587 %598 = fadd float %202, %595 %599 = fadd float %203, %596 %600 = fadd float %204, %597 %601 = fsub float %598, %94 %602 = fsub float %599, %95 %603 = fsub float %600, %96 %604 = fmul float %601, %.v %605 = fmul float %602, %.v12 %606 = fadd float %604, %605 %607 = fmul float %603, %.v13 %608 = fadd float %606, %607 %609 = fmul float %.v, %608 %610 = fmul float %.v12, %608 %611 = fmul float %.v13, %608 %612 = fsub float %598, %609 %613 = fsub float %599, %610 %614 = fsub float %600, %611 %615 = fsub float %612, %94 %616 = fsub float %613, %95 %617 = fsub float %614, %96 %618 = fmul float %615, %615 %619 = fmul float %616, %616 %620 = fadd float %618, %619 %621 = fmul float %617, %617 %622 = fadd float %620, %621 %623 = call float @llvm.sqrt.f32(float %622) #1 %624 = fdiv float 1.000000e+00, %623 %625 = fmul float %615, %624 %626 = fmul float %616, %624 %627 = fmul float %617, %624 %628 = fmul float %625, %573 %629 = fmul float %626, %573 %630 = fmul float %627, %573 %631 = fadd float %628, %94 %632 = fadd float %629, %95 %633 = fadd float %630, %96 %634 = fsub float %598, %631 %635 = fsub float %599, %632 %636 = fsub float %600, %633 %637 = fmul float %634, %634 %638 = fmul float %635, %635 %639 = fadd float %637, %638 %640 = fmul float %636, %636 %641 = fadd float %639, %640 %642 = call float @llvm.sqrt.f32(float %641) #1 %643 = fmul float %642, %574 %644 = call float @llvm.maxnum.f32(float %643, float 0.000000e+00) #1 %645 = call float @llvm.canonicalize.f32(float %644) #1 %646 = call float @llvm.minnum.f32(float %645, float 1.000000e+00) #1 %647 = call float @llvm.canonicalize.f32(float %646) #1 %648 = fsub float 1.000000e+00, %647 %649 = call float @llvm.fabs.f32(float %608) #1 %650 = fmul float %649, %575 %651 = call float @llvm.maxnum.f32(float %650, float 0.000000e+00) #1 %652 = call float @llvm.canonicalize.f32(float %651) #1 %653 = call float @llvm.minnum.f32(float %652, float 1.000000e+00) #1 %654 = call float @llvm.canonicalize.f32(float %653) #1 %655 = fsub float 1.000000e+00, %654 %656 = fmul float %655, %655 %657 = fmul float %656, %648 %658 = fcmp ogt float %657, 0.000000e+00 br i1 %658, label %if21, label %endif21 if21: ; preds = %endif18 %659 = fsub float %67, %598 %660 = fsub float %68, %599 %661 = fsub float %69, %600 %662 = fmul float %659, %659 %663 = fmul float %660, %660 %664 = fadd float %662, %663 %665 = fmul float %661, %661 %666 = fadd float %664, %665 %667 = call float @llvm.sqrt.f32(float %666) #1 %668 = fmul float %623, 0x3EB0C6F7A0000000 %669 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 944, i32 0) #1 %670 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 948, i32 0) #1 %671 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 952, i32 0) #1 %672 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 956, i32 0) #1 %673 = fadd float %668, %672 %674 = fadd float %673, 1.000000e+02 %675 = fmul float %674, %669 %676 = fmul float %675, 1.100000e+01 %677 = fmul float %675, 1.300000e+01 %678 = call float @llvm.cos.f32(float %677) #1 %679 = fadd float %676, %678 %680 = fmul float %675, 3.100000e+01 %681 = call float @llvm.sin.f32(float %680) #1 %682 = fadd float %679, %681 %683 = call float @llvm.sin.f32(float %682) #1 %684 = fmul float %683, 5.000000e-01 %685 = fadd float %684, 5.000000e-01 %686 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 976, i32 0) #1 %687 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 980, i32 0) #1 %688 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 984, i32 0) #1 %689 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 988, i32 0) #1 %690 = fmul float %686, %685 %691 = fmul float %687, %685 %692 = fmul float %688, %685 %693 = fadd float %684, 0x3FD99999A0000000 %694 = fmul float %693, 1.000000e+01 %695 = call float @llvm.maxnum.f32(float %694, float 0.000000e+00) #1 %696 = call float @llvm.canonicalize.f32(float %695) #1 %697 = call float @llvm.minnum.f32(float %696, float 1.000000e+00) #1 %698 = call float @llvm.canonicalize.f32(float %697) #1 %699 = fmul float %698, -2.000000e+00 %700 = fadd float %699, 3.000000e+00 %701 = fmul float %698, %700 %702 = fmul float %698, 5.000000e-01 %703 = fmul float %702, %701 %704 = fmul float %674, %670 %705 = fadd float %704, 2.000000e+01 %706 = fmul float %705, 1.700000e+01 %707 = fmul float %705, 7.000000e+00 %708 = call float @llvm.cos.f32(float %707) #1 %709 = fadd float %706, %708 %710 = fmul float %705, 4.300000e+01 %711 = call float @llvm.sin.f32(float %710) #1 %712 = fadd float %709, %711 %713 = call float @llvm.sin.f32(float %712) #1 %714 = fmul float %713, 5.000000e-01 %715 = fadd float %714, 5.000000e-01 %716 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 992, i32 0) #1 %717 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 996, i32 0) #1 %718 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1000, i32 0) #1 %719 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %36, i32 1004, i32 0) #1 %720 = fmul float %716, %715 %721 = fmul float %717, %715 %722 = fmul float %718, %715 %723 = fadd float %690, %720 %724 = fadd float %691, %721 %725 = fadd float %692, %722 %726 = fadd float %714, 0x3FD99999A0000000 %727 = fmul float %726, 0x4013FFFFE0000000 %728 = call float @llvm.maxnum.f32(float %727, float 0.000000e+00) #1 %729 = call float @llvm.canonicalize.f32(float %728) #1 %730 = call float @llvm.minnum.f32(float %729, float 1.000000e+00) #1 %731 = call float @llvm.canonicalize.f32(float %730) #1 %732 = fmul float %731, -2.000000e+00 %733 = fadd float %732, 3.000000e+00 %734 = fmul float %731, %733 %735 = fmul float %731, 0x3FE3333340000000 %736 = fmul float %735, %734 %737 = fadd float %703, %736 %738 = fmul float %674, %689 %739 = fmul float %738, 1.900000e+01 %740 = fmul float %738, 1.700000e+01 %741 = call float @llvm.cos.f32(float %740) #1 %742 = fadd float %739, %741 %743 = call float @llvm.sin.f32(float %742) #1 %744 = fmul float %738, 3.700000e+01 %745 = fmul float %738, 2.300000e+01 %746 = call float @llvm.sin.f32(float %745) #1 %747 = fadd float %744, %746 %748 = call float @llvm.sin.f32(float %747) #1 %749 = fmul float %743, 5.000000e-01 %750 = fmul float %749, %748 %751 = fadd float %750, 5.000000e-01 %752 = fadd float %667, -2.000000e+05 %753 = fmul float %752, 0x3ECBF64760000000 %754 = call float @llvm.maxnum.f32(float %753, float 0.000000e+00) #1 %755 = call float @llvm.canonicalize.f32(float %754) #1 %756 = call float @llvm.minnum.f32(float %755, float 1.000000e+00) #1 %757 = call float @llvm.canonicalize.f32(float %756) #1 %758 = fdiv float 1.000000e+00, %667 %759 = fmul float %659, %758 %760 = fmul float %660, %758 %761 = fmul float %661, %758 %762 = fmul float %583, %759 %763 = fmul float %584, %760 %764 = fadd float %762, %763 %765 = fmul float %585, %761 %766 = fadd float %764, %765 %767 = call float @llvm.fabs.f32(float %766) #1 %768 = fadd float %767, 0xBFD3333340000000 %769 = fmul float %768, 0x3FF6DB6DC0000000 %770 = call float @llvm.maxnum.f32(float %769, float 0.000000e+00) #1 %771 = call float @llvm.canonicalize.f32(float %770) #1 %772 = call float @llvm.minnum.f32(float %771, float 1.000000e+00) #1 %773 = call float @llvm.canonicalize.f32(float %772) #1 %774 = fadd float %667, -5.000000e+03 %775 = fmul float %774, 0x3F2A36E2E0000000 %776 = call float @llvm.maxnum.f32(float %775, float 0.000000e+00) #1 %777 = call float @llvm.canonicalize.f32(float %776) #1 %778 = call float @llvm.minnum.f32(float %777, float 1.000000e+00) #1 %779 = call float @llvm.canonicalize.f32(float %778) #1 %780 = fsub float 1.000000e+00, %773 %781 = fmul float %780, %779 %782 = fmul float %751, 2.500000e-01 %783 = fadd float %782, 2.500000e-01 %784 = call float @llvm.maxnum.f32(float %781, float %757) #1 %785 = call float @llvm.canonicalize.f32(float %784) #1 %786 = fsub float %783, %751 %787 = fmul float %785, %786 %788 = fadd float %787, %751 %789 = fadd float %788, 5.000000e-01 %790 = fmul float %723, %789 %791 = fmul float %724, %789 %792 = fmul float %725, %789 %793 = fadd float %788, %671 %794 = fmul float %737, %793 %795 = fmul float %674, %719 %796 = fmul float %795, 2.300000e+01 %797 = fmul float %795, 1.300000e+01 %798 = call float @llvm.cos.f32(float %797) #1 %799 = fadd float %796, %798 %800 = call float @llvm.sin.f32(float %799) #1 %801 = fmul float %795, 3.100000e+01 %802 = fmul float %795, 1.900000e+01 %803 = call float @llvm.cos.f32(float %802) #1 %804 = fadd float %801, %803 %805 = call float @llvm.sin.f32(float %804) #1 %806 = fmul float %800, %805 %807 = fmul float %806, 0x3FD3333340000000 %808 = fadd float %667, -5.000000e+04 %809 = fmul float %808, 0x3EE4F8B580000000 %810 = call float @llvm.maxnum.f32(float %809, float 0.000000e+00) #1 %811 = call float @llvm.canonicalize.f32(float %810) #1 %812 = call float @llvm.minnum.f32(float %811, float 1.000000e+00) #1 %813 = call float @llvm.canonicalize.f32(float %812) #1 %814 = call float @llvm.maxnum.f32(float %813, float %781) #1 %815 = call float @llvm.canonicalize.f32(float %814) #1 %816 = fmul float %807, %815 %817 = fsub float %807, %816 %818 = fadd float %790, %817 %819 = fadd float %791, %817 %820 = fadd float %792, %817 %821 = fadd float %794, -7.500000e-01 %822 = fmul float %813, %821 %823 = fadd float %822, 7.500000e-01 %824 = fmul float %657, %823 %825 = fmul float %601, %51 %826 = fmul float %602, %52 %827 = fadd float %825, %826 %828 = fmul float %603, %53 %829 = fadd float %827, %828 %830 = fmul float %601, %601 %831 = fmul float %602, %602 %832 = fadd float %830, %831 %833 = fmul float %603, %603 %834 = fadd float %832, %833 %835 = fsub float %834, %586 %836 = call float @llvm.minnum.f32(float %835, float %829) #1 %837 = call float @llvm.canonicalize.f32(float %836) #1 %838 = fcmp ogt float %837, 0.000000e+00 %839 = fmul float %829, %829 %840 = fcmp olt float %839, %835 %841 = or i1 %840, %838 %842 = select i1 %841, float 1.000000e+00, float 0x3FA9999A00000000 %843 = call float @llvm.maxnum.f32(float %842, float 0x3FECCCCCC0000000) #1 %844 = call float @llvm.canonicalize.f32(float %843) #1 %845 = fsub float %844, %842 %846 = fmul float %39, %845 %847 = fadd float %846, %842 %848 = fmul float %818, %847 %849 = fmul float %819, %847 %850 = fmul float %820, %847 %851 = fadd float %587, %198 %852 = fcmp oge float %242, %851 %853 = bitcast i32 %588 to float %854 = fadd float %824, %853 %855 = bitcast float %854 to i32 %856 = select i1 %852, i32 %855, i32 %588 %857 = call float @llvm.maxnum.f32(float %848, float 0.000000e+00) #1 %858 = call float @llvm.canonicalize.f32(float %857) #1 %859 = call float @llvm.maxnum.f32(float %849, float 0.000000e+00) #1 %860 = call float @llvm.canonicalize.f32(float %859) #1 %861 = call float @llvm.maxnum.f32(float %850, float 0.000000e+00) #1 %862 = call float @llvm.canonicalize.f32(float %861) #1 %863 = call float @llvm.minnum.f32(float %858, float 1.000000e+00) #1 %864 = call float @llvm.canonicalize.f32(float %863) #1 %865 = call float @llvm.minnum.f32(float %860, float 1.000000e+00) #1 %866 = call float @llvm.canonicalize.f32(float %865) #1 %867 = call float @llvm.minnum.f32(float %862, float 1.000000e+00) #1 %868 = call float @llvm.canonicalize.f32(float %867) #1 %869 = fmul float %864, %824 %870 = fmul float %866, %824 %871 = fmul float %868, %824 %872 = fadd float %589, %869 %873 = fadd float %590, %870 %874 = fadd float %591, %871 %875 = fadd float %592, %824 br label %endif21 endif21: ; preds = %endif18, %if21 %876 = phi float [ %875, %if21 ], [ %592, %endif18 ] %877 = phi float [ %872, %if21 ], [ %589, %endif18 ] %878 = phi float [ %873, %if21 ], [ %590, %endif18 ] %879 = phi float [ %874, %if21 ], [ %591, %endif18 ] %880 = phi i32 [ %856, %if21 ], [ %588, %endif18 ] %881 = fadd float %587, %243 %882 = add i32 %593, 1 br label %loop17 endif16: ; preds = %endif10, %loop17 %883 = phi i32 [ %588, %loop17 ], [ 0, %endif10 ] %884 = phi float [ %589, %loop17 ], [ 0.000000e+00, %endif10 ] %885 = phi float [ %590, %loop17 ], [ 0.000000e+00, %endif10 ] %886 = phi float [ %591, %loop17 ], [ 0.000000e+00, %endif10 ] %887 = phi float [ %592, %loop17 ], [ 0.000000e+00, %endif10 ] %888 = call float @llvm.maxnum.f32(float %887, float %569) #1 %889 = call float @llvm.canonicalize.f32(float %888) #1 %890 = fcmp ugt float %889, 0.000000e+00 call void @llvm.amdgcn.kill(i1 %890) #2 %891 = fsub float %566, %884 %892 = fmul float %270, %891 %893 = fadd float %892, %884 %894 = fsub float %567, %885 %895 = fmul float %270, %894 %896 = fadd float %895, %885 %897 = fsub float %568, %886 %898 = fmul float %270, %897 %899 = fadd float %898, %886 %900 = fsub float %569, %887 %901 = fmul float %270, %900 %902 = fadd float %901, %887 %903 = bitcast i32 %883 to float %904 = fsub float %565, %903 %905 = fmul float %270, %904 %906 = fadd float %905, %903 %907 = fmul float %893, %245 %908 = fmul float %896, %245 %909 = fmul float %899, %245 %910 = fdiv float 1.000000e+00, %902 %911 = fmul float %907, %910 %912 = fmul float %908, %910 %913 = fmul float %909, %910 %914 = call float @llvm.maxnum.f32(float %249, float %911) #1 %915 = call float @llvm.canonicalize.f32(float %914) #1 %916 = call float @llvm.maxnum.f32(float %249, float %912) #1 %917 = call float @llvm.canonicalize.f32(float %916) #1 %918 = call float @llvm.maxnum.f32(float %249, float %913) #1 %919 = call float @llvm.canonicalize.f32(float %918) #1 %920 = fmul float %906, %247 %921 = call float @llvm.maxnum.f32(float %920, float 0.000000e+00) #1 %922 = call float @llvm.canonicalize.f32(float %921) #1 %923 = call float @llvm.minnum.f32(float %922, float 1.000000e+00) #1 %924 = call float @llvm.canonicalize.f32(float %923) #1 %925 = call float @llvm.minnum.f32(float %924, float 0x3FECCCCCC0000000) #1 %926 = call float @llvm.canonicalize.f32(float %925) #1 %927 = fsub float %94, %67 %928 = fsub float %95, %68 %929 = fsub float %96, %69 %930 = fmul float %927, %927 %931 = fmul float %928, %928 %932 = fadd float %930, %931 %933 = fmul float %929, %929 %934 = fadd float %932, %933 %935 = call float @llvm.sqrt.f32(float %934) #1 %936 = fdiv float 1.000000e+00, %935 %937 = fmul float %927, %936 %938 = fmul float %928, %936 %939 = fmul float %929, %936 %940 = fmul float %937, %80 %941 = fmul float %938, %81 %942 = fsub float -0.000000e+00, %941 %943 = fsub float %942, %940 %944 = fmul float %939, %82 %945 = fsub float %943, %944 %946 = fmul float %939, %.v12 %947 = fmul float %937, %.v13 %948 = fmul float %938, %.v %949 = fmul float %938, %.v13 %950 = fmul float %939, %.v %951 = fmul float %937, %.v12 %952 = fsub float %949, %946 %953 = fsub float %950, %947 %954 = fsub float %951, %948 %955 = fmul float %952, %952 %956 = fmul float %953, %953 %957 = fadd float %955, %956 %958 = fmul float %954, %954 %959 = fadd float %957, %958 %960 = call float @llvm.sqrt.f32(float %959) #1 %961 = fdiv float 1.000000e+00, %960 %962 = fmul float %952, %961 %963 = fmul float %953, %961 %964 = fmul float %954, %961 %965 = fmul float %962, %40 %966 = fmul float %963, %40 %967 = fmul float %964, %40 %968 = fadd float %94, %965 %969 = fadd float %95, %966 %970 = fadd float %96, %967 %971 = fsub float %968, %67 %972 = fsub float %969, %68 %973 = fsub float %970, %69 %974 = fmul float %971, %971 %975 = fmul float %972, %972 %976 = fadd float %974, %975 %977 = fmul float %973, %973 %978 = fadd float %976, %977 %979 = call float @llvm.sqrt.f32(float %978) #1 %980 = fdiv float 1.000000e+00, %979 %981 = fmul float %971, %980 %982 = fmul float %972, %980 %983 = fmul float %973, %980 %984 = fmul float %965, 0x3FF147AE20000000 %985 = fmul float %966, 0x3FF147AE20000000 %986 = fmul float %967, 0x3FF147AE20000000 %987 = fadd float %94, %984 %988 = fadd float %95, %985 %989 = fadd float %96, %986 %990 = fsub float %987, %67 %991 = fsub float %988, %68 %992 = fsub float %989, %69 %993 = fmul float %990, %990 %994 = fmul float %991, %991 %995 = fadd float %993, %994 %996 = fmul float %992, %992 %997 = fadd float %995, %996 %998 = call float @llvm.sqrt.f32(float %997) #1 %999 = fdiv float 1.000000e+00, %998 %1000 = fmul float %990, %999 %1001 = fmul float %991, %999 %1002 = fmul float %992, %999 %1003 = fmul float %937, %981 %1004 = fmul float %938, %982 %1005 = fsub float -0.000000e+00, %1004 %1006 = fsub float %1005, %1003 %1007 = fmul float %939, %983 %1008 = fsub float %1006, %1007 %1009 = fmul float %937, %1000 %1010 = fmul float %938, %1001 %1011 = fsub float -0.000000e+00, %1010 %1012 = fsub float %1011, %1009 %1013 = fmul float %939, %1002 %1014 = fsub float %1012, %1013 %1015 = fsub float %945, %1008 %1016 = fsub float %1014, %1008 %1017 = fdiv float 1.000000e+00, %1016 %1018 = fmul float %1015, %1017 %1019 = call float @llvm.maxnum.f32(float %1018, float 0.000000e+00) #1 %1020 = call float @llvm.canonicalize.f32(float %1019) #1 %1021 = call float @llvm.minnum.f32(float %1020, float 1.000000e+00) #1 %1022 = call float @llvm.canonicalize.f32(float %1021) #1 %1023 = fmul float %199, %937 %1024 = fmul float %200, %938 %1025 = fadd float %1023, %1024 %1026 = fmul float %201, %939 %1027 = fadd float %1025, %1026 %1028 = fcmp olt float %935, %1027 %1029 = fmul float %915, %1022 %1030 = fmul float %917, %1022 %1031 = fmul float %919, %1022 %1032 = fmul float %926, %1022 %.v15 = select i1 %1028, float %1029, float %915 %.v16 = select i1 %1028, float %1030, float %917 %.v17 = select i1 %1028, float %1031, float %919 %.v18 = select i1 %1028, float %1032, float %926 %1033 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %.v15, float %.v16) #1 %1034 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %.v17, float %.v18) #1 %1035 = bitcast <2 x half> %1033 to <2 x i16> %1036 = bitcast <2 x half> %1034 to <2 x i16> call void @llvm.amdgcn.exp.compr.v2i16(i32 0, i32 5, <2 x i16> %1035, <2 x i16> %1036, i1 true, i1 true) #2 ret void } ; Function Attrs: nounwind readnone speculatable declare i8 addrspace(4)* @llvm.amdgcn.implicit.buffer.ptr() #0 ; Function Attrs: nounwind readnone speculatable declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #0 ; Function Attrs: nounwind readnone speculatable declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #0 ; Function Attrs: nounwind readnone declare float @llvm.amdgcn.s.buffer.load.f32(<4 x i32>, i32, i32 immarg) #1 ; Function Attrs: nounwind readnone speculatable declare float @llvm.sqrt.f32(float) #0 ; Function Attrs: nounwind declare void @llvm.amdgcn.kill(i1) #2 ; Function Attrs: nounwind readnone speculatable declare float @llvm.fabs.f32(float) #0 ; Function Attrs: nounwind readnone speculatable declare float @llvm.maxnum.f32(float, float) #0 ; Function Attrs: nounwind readnone speculatable declare float @llvm.canonicalize.f32(float) #0 ; Function Attrs: nounwind readnone speculatable declare float @llvm.minnum.f32(float, float) #0 ; Function Attrs: nounwind readonly declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #3 ; Function Attrs: nounwind readnone speculatable declare float @llvm.cos.f32(float) #0 ; Function Attrs: nounwind readnone speculatable declare float @llvm.sin.f32(float) #0 ; Function Attrs: nounwind readnone speculatable declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #0 ; Function Attrs: nounwind declare void @llvm.amdgcn.exp.compr.v2i16(i32 immarg, i32 immarg, <2 x i16>, <2 x i16>, i1 immarg, i1 immarg) #2 ; Function Attrs: nounwind readonly declare float @llvm.amdgcn.image.sample.2d.f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #3 attributes #0 = { nounwind readnone speculatable } attributes #1 = { nounwind readnone } attributes #2 = { nounwind } attributes #3 = { nounwind readonly } !0 = !{} disasm: main: BB14_0: s_mov_b64 s[18:19], exec ; BE92017E s_wqm_b64 exec, exec ; BEFE077E s_mov_b32 s0, s4 ; BE800004 s_mov_b32 s1, 0 ; BE810080 s_mov_b32 s20, s3 ; BE940003 s_load_dwordx4 s[12:15], s[0:1], 0x0 ; C00A0300 00000000 s_mov_b32 m0, s5 ; BEFC0005 v_interp_p1_f32_e32 v3, v0, attr0.y ; D40C0100 v_interp_p1_f32_e32 v2, v0, attr0.x ; D4080000 v_interp_p1_f32_e32 v4, v0, attr0.z ; D4100200 s_waitcnt lgkmcnt(0) ; BF8C007F s_buffer_load_dwordx2 s[2:3], s[12:15], 0x20 ; C0260086 00000020 s_buffer_load_dword s4, s[12:15], 0x28 ; C0220106 00000028 s_buffer_load_dwordx2 s[0:1], s[12:15], 0x470 ; C0260006 00000470 s_buffer_load_dword s6, s[12:15], 0x478 ; C0220186 00000478 v_interp_p2_f32_e32 v3, v1, attr0.y ; D40D0101 v_interp_p2_f32_e32 v2, v1, attr0.x ; D4090001 v_interp_p2_f32_e32 v4, v1, attr0.z ; D4110201 s_waitcnt lgkmcnt(0) ; BF8C007F v_subrev_f32_e32 v3, s3, v3 ; 06060603 v_mul_f32_e64 v5, s1, s1 ; D1050005 00000201 v_subrev_f32_e32 v6, s4, v4 ; 060C0804 v_subrev_f32_e32 v2, s2, v2 ; 06040402 v_mul_f32_e32 v4, v3, v3 ; 0A080703 v_mac_f32_e64 v5, s0, s0 ; D1160005 00000000 v_mac_f32_e32 v4, v2, v2 ; 2C080502 v_mac_f32_e64 v5, s6, s6 ; D1160005 00000C06 v_mac_f32_e32 v4, v6, v6 ; 2C080D06 v_rsq_f32_e32 v5, v5 ; 7E0A4905 v_rsq_f32_e32 v7, v4 ; 7E0E4904 v_mov_b32_e32 v11, 0 ; 7E160280 v_mul_f32_e32 v9, s1, v5 ; 0A120A01 v_mul_f32_e32 v4, v2, v7 ; 0A080F02 v_mul_f32_e32 v2, v3, v7 ; 0A040F03 v_mul_f32_e32 v8, s0, v5 ; 0A100A00 v_mul_f32_e32 v10, s6, v5 ; 0A140A06 v_mul_f32_e32 v5, v9, v2 ; 0A0A0509 v_mul_f32_e32 v3, v6, v7 ; 0A060F06 v_mac_f32_e32 v5, v8, v4 ; 2C0A0908 v_bfrev_b32_e32 v6, 1 ; 7E0C5881 v_mac_f32_e32 v5, v10, v3 ; 2C0A070A v_xor_b32_e32 v7, v8, v6 ; 2A0E0D08 v_cmp_gt_f32_e32 vcc, 0, v5 ; 7C880A80 v_xor_b32_e32 v12, v9, v6 ; 2A180D09 v_xor_b32_e32 v13, v10, v6 ; 2A1A0D0A v_xor_b32_e32 v6, v5, v6 ; 2A0C0D05 v_cndmask_b32_e32 v8, v8, v7, vcc ; 00100F08 v_cndmask_b32_e32 v9, v9, v12, vcc ; 00121909 v_cndmask_b32_e32 v5, v5, v6, vcc ; 000A0D05 v_cndmask_b32_e32 v10, v10, v13, vcc ; 00141B0A v_cmpx_neq_f32_e32 vcc, 0, v5 ; 7CBA0A80 s_cbranch_execnz BB14_2 ; BF890000 exp null off, off, off, off done vm ; C4001890 00000000 s_endpgm ; BF810000 BB14_2: s_buffer_load_dword s11, s[12:15], 0x3c8 ; C02202C6 000003C8 s_buffer_load_dwordx2 s[8:9], s[12:15], 0x450 ; C0260206 00000450 s_buffer_load_dword s6, s[12:15], 0x458 ; C0220186 00000458 v_rcp_f32_e32 v29, v5 ; 7E3A4505 s_waitcnt lgkmcnt(0) ; BF8C007F v_mov_b32_e32 v22, s11 ; 7E2C020B v_mov_b32_e32 v5, s9 ; 7E0A0209 v_mov_b32_e32 v7, s8 ; 7E0E0208 v_mad_f32 v17, -v9, s11, v5 ; D1C10011 24141709 v_mov_b32_e32 v6, s6 ; 7E0C0206 v_mad_f32 v16, -v8, s11, v7 ; D1C10010 241C1708 v_sub_f32_e32 v20, s3, v17 ; 04282203 v_mad_f32 v18, -v10, s11, v6 ; D1C10012 2418170A v_sub_f32_e32 v19, s2, v16 ; 04262002 v_mul_f32_e32 v12, v9, v20 ; 0A182909 v_sub_f32_e32 v21, s4, v18 ; 042A2404 v_mac_f32_e32 v12, v8, v19 ; 2C182708 v_mac_f32_e32 v12, v10, v21 ; 2C182B0A v_mul_f32_e32 v12, v12, v29 ; 0A183B0C v_mul_f32_e32 v13, s11, v8 ; 0A1A100B v_mul_f32_e32 v14, s11, v9 ; 0A1C120B v_mul_f32_e32 v15, s11, v10 ; 0A1E140B v_cmp_gt_f32_e32 vcc, 0, v12 ; 7C881880 s_and_saveexec_b64 s[0:1], vcc ; BE80206A s_cbranch_execz BB14_4 ; BF880000 BB14_3: v_mad_f32 v11, -v12, v4, v19 ; D1C1000B 244E090C v_mad_f32 v19, -v12, v2, v20 ; D1C10013 2452050C v_add_f32_e32 v11, v11, v16 ; 0216210B v_add_f32_e32 v16, v19, v17 ; 02202313 v_subrev_f32_e32 v16, s3, v16 ; 06202003 v_mad_f32 v20, -v12, v3, v21 ; D1C10014 2456070C v_add_f32_e32 v17, v20, v18 ; 02222514 v_subrev_f32_e32 v11, s2, v11 ; 06161602 v_mul_f32_e32 v16, v16, v16 ; 0A202110 v_subrev_f32_e32 v17, s4, v17 ; 06222204 v_mac_f32_e32 v16, v11, v11 ; 2C20170B v_mac_f32_e32 v16, v17, v17 ; 2C202311 v_sqrt_f32_e32 v11, v16 ; 7E164F10 BB14_4: s_or_b64 exec, exec, s[0:1] ; 87FE007E s_buffer_load_dwordx2 s[16:17], s[12:15], 0x3c0 ; C0260406 000003C0 v_sub_f32_e32 v32, s3, v5 ; 04400A03 v_sub_f32_e32 v30, s2, v7 ; 043C0E02 v_mul_f32_e32 v34, v32, v9 ; 0A441320 v_sub_f32_e32 v33, s4, v6 ; 04420C04 v_mac_f32_e32 v34, v30, v8 ; 2C44111E v_mac_f32_e32 v34, v33, v10 ; 2C441521 s_waitcnt lgkmcnt(0) ; BF8C007F v_mov_b32_e32 v23, s17 ; 7E2E0211 v_cmp_nlt_f32_e64 s[0:1], |v34|, s11 ; D04E0100 00001722 s_and_saveexec_b64 s[22:23], s[0:1] ; BE962000 s_xor_b64 s[22:23], exec, s[22:23] ; 8896167E s_cbranch_execz BB14_11 ; BF880000 BB14_5: v_cmpx_nle_f32_e32 vcc, 0, v12 ; 7CB81880 v_add_f32_e32 v14, s9, v14 ; 021C1C09 v_add_f32_e32 v13, s8, v13 ; 021A1A08 v_sub_f32_e32 v14, s3, v14 ; 041C1C03 v_add_f32_e32 v15, s6, v15 ; 021E1E06 v_sub_f32_e32 v13, s2, v13 ; 041A1A02 v_mul_f32_e32 v16, v9, v14 ; 0A201D09 v_sub_f32_e32 v15, s4, v15 ; 041E1E04 v_mac_f32_e32 v16, v8, v13 ; 2C201B08 v_mac_f32_e32 v16, v10, v15 ; 2C201F0A v_mul_f32_e32 v16, v16, v29 ; 0A203B10 v_cmp_ngt_f32_e32 vcc, 0, v16 ; 7C962080 s_and_saveexec_b64 s[0:1], vcc ; BE80206A s_xor_b64 s[0:1], exec, s[0:1] ; 8880007E s_cbranch_execz BB14_8 ; BF880000 BB14_7: s_mov_b64 exec, 0 ; BEFE0180 BB14_8: s_or_saveexec_b64 s[24:25], s[0:1] ; BE982100 s_xor_b64 exec, exec, s[24:25] ; 88FE187E s_cbranch_execz BB14_10 ; BF880000 BB14_9: v_mad_f32 v18, -v12, v2, v14 ; D1C10012 243A050C v_mad_f32 v14, -v16, v2, v14 ; D1C1000E 243A0510 v_mad_f32 v17, -v12, v4, v13 ; D1C10011 2436090C v_mul_f32_e32 v18, v18, v18 ; 0A242512 v_mad_f32 v13, -v16, v4, v13 ; D1C1000D 24360910 v_mul_f32_e32 v14, v14, v14 ; 0A1C1D0E v_mad_f32 v12, -v12, v3, v15 ; D1C1000C 243E070C v_mac_f32_e32 v18, v17, v17 ; 2C242311 v_mad_f32 v15, -v16, v3, v15 ; D1C1000F 243E0710 v_mac_f32_e32 v14, v13, v13 ; 2C1C1B0D v_mac_f32_e32 v18, v12, v12 ; 2C24190C v_mac_f32_e32 v14, v15, v15 ; 2C1C1F0F v_sqrt_f32_e32 v12, v18 ; 7E184F12 v_sqrt_f32_e32 v13, v14 ; 7E1A4F0E v_min_f32_e32 v14, v12, v13 ; 141C1B0C v_max_f32_e32 v12, v12, v13 ; 16181B0C v_cmp_nlt_f32_e32 vcc, s17, v14 ; 7C9C1C11 v_cmp_ngt_f32_e64 s[0:1], s16, v12 ; D04B0000 00021810 s_and_b64 s[0:1], s[0:1], vcc ; 86806A00 s_and_b64 exec, exec, s[0:1] ; 86FE007E BB14_10: s_or_b64 exec, exec, s[24:25] ; 87FE187E BB14_11: s_or_saveexec_b64 s[0:1], s[22:23] ; BE802116 s_xor_b64 exec, exec, s[0:1] ; 88FE007E s_cbranch_execz BB14_13 ; BF880000 BB14_12: v_mul_f32_e32 v11, v32, v32 ; 0A164120 v_mac_f32_e32 v11, v30, v30 ; 2C163D1E v_mac_f32_e32 v11, v33, v33 ; 2C164321 v_sqrt_f32_e32 v11, v11 ; 7E164F0B v_sub_f32_e32 v11, s16, v11 ; 04161610 v_max_f32_e32 v11, 0, v11 ; 16161680 BB14_13: s_or_b64 exec, exec, s[0:1] ; 87FE007E s_buffer_load_dwordx2 s[0:1], s[12:15], 0x250 ; C0260006 00000250 s_buffer_load_dword s17, s[12:15], 0x258 ; C0220446 00000258 s_mov_b32 m0, s5 ; BEFC0005 s_buffer_load_dword s10, s[12:15], 0x3cc ; C0220286 000003CC s_buffer_load_dword s7, s[12:15], 0x45c ; C02201C6 0000045C s_waitcnt lgkmcnt(0) ; BF8C007F v_mul_f32_e64 v13, s1, s1 ; D105000D 00000201 v_interp_p1_f32_e32 v18, v0, attr1.y ; D4480500 v_mac_f32_e64 v13, s0, s0 ; D116000D 00000000 v_interp_p2_f32_e32 v18, v1, attr1.y ; D4490501 s_mov_b32 s21, 0 ; BE950080 v_mac_f32_e64 v13, s17, s17 ; D116000D 00002211 v_interp_p1_f32_e32 v20, v0, attr0.w ; D4500300 v_interp_p1_f32_e32 v21, v0, attr1.x ; D4540400 v_rsq_f32_e32 v0, v13 ; 7E00490D v_rcp_f32_e32 v24, v18 ; 7E304512 s_load_dwordx8 s[24:31], s[20:21], 0x0 ; C00E060A 00000000 s_add_i32 s20, s20, 64 ; 8114C014 s_load_dwordx4 s[20:23], s[20:21], 0x0 ; C00A050A 00000000 v_interp_p2_f32_e32 v20, v1, attr0.w ; D4510301 v_interp_p2_f32_e32 v21, v1, attr1.x ; D4550401 v_mul_f32_e32 v20, 0.5, v20 ; 0A2828F0 v_mul_f32_e32 v21, -0.5, v21 ; 0A2A2AF1 v_mul_f32_e32 v12, v4, v11 ; 0A181704 v_mov_b32_e32 v31, s10 ; 7E3E020A v_mov_b32_e32 v13, s7 ; 7E1A0207 v_mul_f32_e32 v14, s0, v0 ; 0A1C0000 v_mul_f32_e32 v15, s1, v0 ; 0A1E0001 v_mul_f32_e32 v16, s17, v0 ; 0A200011 v_mul_f32_e32 v1, v2, v11 ; 0A021702 v_mul_f32_e32 v0, v3, v11 ; 0A001703 v_mad_f32 v17, v4, v11, s2 ; D1C10011 000A1704 v_mad_f32 v18, v2, v11, s3 ; D1C10012 000E1702 v_mad_f32 v19, v3, v11, s4 ; D1C10013 00121703 v_mad_f32 v20, v20, v24, 0.5 ; D1C10014 03C23114 v_mad_f32 v21, v21, v24, 0.5 ; D1C10015 03C23115 s_and_b64 exec, exec, s[18:19] ; 86FE127E s_waitcnt lgkmcnt(0) ; BF8C007F image_sample v21, v[20:21], s[24:31], s[20:23] dmask:0x1 ; F0800100 00A61514 s_buffer_load_dword s5, s[12:15], 0x44 ; C0220146 00000044 s_buffer_load_dwordx2 s[0:1], s[12:15], 0x80 ; C0260006 00000080 s_buffer_load_dword s17, s[12:15], 0x88 ; C0220446 00000088 v_mul_f32_e32 v20, v32, v32 ; 0A284120 v_mov_b32_e32 v35, s7 ; 7E460207 v_mov_b32_e32 v36, s7 ; 7E480207 s_waitcnt lgkmcnt(0) ; BF8C007F v_mul_f32_e64 v38, s1, s1 ; D1050026 00000201 v_mac_f32_e32 v20, v30, v30 ; 2C283D1E v_mac_f32_e64 v38, s0, s0 ; D1160026 00000000 v_mac_f32_e32 v20, v33, v33 ; 2C284321 v_mac_f32_e64 v38, s17, s17 ; D1160026 00002211 v_mac_f32_e32 v35, 0x3f99999a, v23 ; 2C462EFF 3F99999A v_mac_f32_e32 v36, 0x3fb33333, v23 ; 2C482EFF 3FB33333 v_sub_f32_e32 v36, v36, v35 ; 04484724 v_sqrt_f32_e32 v20, v20 ; 7E284F14 v_rsq_f32_e32 v38, v38 ; 7E4C4926 v_rcp_f32_e32 v36, v36 ; 7E484524 v_sub_f32_e64 v37, 1.0, s10 ; D1020025 000014F2 v_sub_f32_e32 v20, v20, v35 ; 04284714 v_mul_f32_e32 v35, s0, v38 ; 0A464C00 v_mul_f32_e64 v20, v20, v36 clamp ; D1058014 00024914 v_mul_f32_e32 v36, s1, v38 ; 0A484C01 v_mul_f32_e32 v35, v35, v4 ; 0A460923 v_max_f32_e32 v20, v20, v37 ; 16284B14 v_mul_f32_e32 v37, s17, v38 ; 0A4A4C11 v_mad_f32 v35, v36, -v2, -v35 ; D1C10023 C48E0524 v_mad_f32 v35, -v37, v3, v35 ; D1C10023 248E0725 v_max_f32_e32 v35, 0x3727c5ac, v35 ; 164646FF 3727C5AC v_rcp_f32_e32 v35, v35 ; 7E464523 s_mov_b32 s18, 0x3a83126f ; BE9200FF 3A83126F v_mov_b32_e32 v24, 0 ; 7E300280 v_cmp_nlt_f32_e32 vcc, s18, v20 ; 7C9C2812 v_mov_b32_e32 v26, v24 ; 7E340318 v_mov_b32_e32 v27, v24 ; 7E360318 v_mov_b32_e32 v28, v24 ; 7E380318 v_mov_b32_e32 v25, v24 ; 7E320318 s_and_b64 vcc, exec, vcc ; 86EA6A7E s_waitcnt vmcnt(0) ; BF8C0F70 v_mul_f32_e32 v21, s5, v21 ; 0A2A2A05 v_mul_f32_e32 v21, v21, v35 ; 0A2A4715 s_cbranch_vccnz BB14_17 ; BF870000 v_mul_f32_e32 v27, v34, v29 ; 0A363B22 v_cmp_gt_f32_e32 vcc, 0, v27 ; 7C883680 v_mov_b32_e32 v24, v17 ; 7E300311 v_mov_b32_e32 v25, v18 ; 7E320312 v_mov_b32_e32 v26, v19 ; 7E340313 s_and_saveexec_b64 s[0:1], vcc ; BE80206A s_cbranch_execz BB14_16 ; BF880000 BB14_15: v_mad_f32 v24, -v27, v4, v30 ; D1C10018 247A091B v_mad_f32 v25, -v27, v2, v32 ; D1C10019 2482051B v_mad_f32 v26, -v27, v3, v33 ; D1C1001A 2486071B v_add_f32_e32 v24, s8, v24 ; 02303008 v_add_f32_e32 v25, s9, v25 ; 02323209 v_add_f32_e32 v26, s6, v26 ; 02343406 BB14_16: s_or_b64 exec, exec, s[0:1] ; 87FE007E v_subrev_f32_e32 v28, s9, v25 ; 06383209 v_subrev_f32_e32 v27, s8, v24 ; 06363008 v_mul_f32_e32 v30, v28, v9 ; 0A3C131C v_subrev_f32_e32 v29, s6, v26 ; 063A3406 v_mac_f32_e32 v30, v27, v8 ; 2C3C111B v_mac_f32_e32 v30, v29, v10 ; 2C3C151D v_mad_f32 v33, -v9, v30, v25 ; D1C10021 24663D09 v_mad_f32 v32, -v8, v30, v24 ; D1C10020 24623D08 v_subrev_f32_e32 v33, s9, v33 ; 06424209 v_mad_f32 v34, -v10, v30, v26 ; D1C10022 246A3D0A v_subrev_f32_e32 v32, s8, v32 ; 06404008 v_mul_f32_e32 v35, v33, v33 ; 0A464321 s_buffer_load_dwordx4 s[20:23], s[12:15], 0x3b0 ; C02A0506 000003B0 v_subrev_f32_e32 v34, s6, v34 ; 06444406 v_mac_f32_e32 v35, v32, v32 ; 2C464120 v_mac_f32_e32 v35, v34, v34 ; 2C464522 v_sqrt_f32_e32 v36, v35 ; 7E484F23 s_buffer_load_dwordx4 s[24:27], s[12:15], 0x3d0 ; C02A0606 000003D0 s_waitcnt lgkmcnt(0) ; BF8C007F v_mov_b32_e32 v37, s23 ; 7E4A0217 v_mov_b32_e32 v40, 0x41a00000 ; 7E5002FF 41A00000 v_mac_f32_e32 v37, 0x358637bd, v36 ; 2C4A48FF 358637BD v_add_f32_e32 v36, 0x42c80000, v37 ; 02484AFF 42C80000 v_mac_f32_e32 v40, s21, v36 ; 2C504815 v_mul_f32_e32 v41, 0x40e00000, v40 ; 0A5250FF 40E00000 v_mul_f32_e32 v41, 0.15915494, v41 ; 0A5252F8 v_mul_f32_e32 v42, 0x422c0000, v40 ; 0A5450FF 422C0000 v_fract_f32_e32 v41, v41 ; 7E523729 v_mul_f32_e32 v42, 0.15915494, v42 ; 0A5454F8 v_fract_f32_e32 v42, v42 ; 7E54372A s_mov_b32 s0, 0x41500000 ; BE8000FF 41500000 v_mul_f32_e32 v37, s20, v36 ; 0A4A4814 v_cos_f32_e32 v41, v41 ; 7E525529 v_mul_f32_e32 v38, s0, v37 ; 0A4C4A00 s_mov_b32 s1, 0x41f80000 ; BE8100FF 41F80000 v_sin_f32_e32 v42, v42 ; 7E54532A v_mul_f32_e32 v38, 0.15915494, v38 ; 0A4C4CF8 v_mul_f32_e32 v39, s1, v37 ; 0A4E4A01 v_fract_f32_e32 v38, v38 ; 7E4C3726 v_mul_f32_e32 v39, 0.15915494, v39 ; 0A4E4EF8 s_mov_b32 s5, 0x41880000 ; BE8500FF 41880000 v_cos_f32_e32 v38, v38 ; 7E4C5526 v_fract_f32_e32 v39, v39 ; 7E4E3727 v_mac_f32_e32 v41, s5, v40 ; 2C525005 v_add_f32_e32 v40, v41, v42 ; 02505529 v_sin_f32_e32 v39, v39 ; 7E4E5327 v_mul_f32_e32 v40, 0.15915494, v40 ; 0A5050F8 v_fract_f32_e32 v40, v40 ; 7E503728 v_mac_f32_e32 v38, 0x41300000, v37 ; 2C4C4AFF 41300000 v_sin_f32_e32 v40, v40 ; 7E505328 v_add_f32_e32 v37, v38, v39 ; 024A4F26 v_mul_f32_e32 v37, 0.15915494, v37 ; 0A4A4AF8 v_mov_b32_e32 v38, 0x3ecccccd ; 7E4C02FF 3ECCCCCD v_fract_f32_e32 v37, v37 ; 7E4A3725 v_mac_f32_e32 v38, 0.5, v40 ; 2C4C50F0 s_mov_b32 s17, 0x409fffff ; BE9100FF 409FFFFF v_sub_f32_e32 v43, s3, v25 ; 04563203 v_sin_f32_e32 v37, v37 ; 7E4A5325 v_mul_f32_e32 v46, v9, v9 ; 0A5C1309 v_mul_f32_e64 v38, v38, s17 clamp ; D1058026 00002326 v_mov_b32_e32 v39, 0x40400000 ; 7E4E02FF 40400000 v_sub_f32_e32 v42, s2, v24 ; 04543002 v_mul_f32_e32 v44, v43, v43 ; 0A58572B v_mac_f32_e32 v39, -2.0, v38 ; 2C4E4CF5 v_mac_f32_e32 v46, v8, v8 ; 2C5C1108 v_mac_f32_e32 v44, v42, v42 ; 2C58552A v_sub_f32_e32 v45, s4, v26 ; 045A3404 v_mac_f32_e32 v46, v10, v10 ; 2C5C150A v_mul_f32_e32 v39, v38, v39 ; 0A4E4F26 v_mul_f32_e32 v38, 0x3f19999a, v38 ; 0A4C4CFF 3F19999A v_mac_f32_e32 v44, v45, v45 ; 2C585B2D v_mul_f32_e32 v38, v38, v39 ; 0A4C4F26 v_madak_f32 v39, 0.5, v37, 0x3ecccccd ; 304E4AF0 3ECCCCCD s_mov_b32 s17, 0x41200000 ; BE9100FF 41200000 v_rsq_f32_e32 v47, v44 ; 7E5E492C v_rsq_f32_e32 v46, v46 ; 7E5C492E v_mul_f32_e64 v39, v39, s17 clamp ; D1058027 00002327 v_madak_f32 v41, -2.0, v39, 0x40400000 ; 30524EF5 40400000 v_mul_f32_e32 v41, v39, v41 ; 0A525327 v_mul_f32_e32 v39, 0.5, v39 ; 0A4E4EF0 v_mac_f32_e32 v38, v39, v41 ; 2C4C5327 v_mul_f32_e32 v39, v43, v47 ; 0A4E5F2B v_mul_f32_e32 v41, v9, v46 ; 0A525D09 v_mul_f32_e32 v39, v41, v39 ; 0A4E4F29 v_mul_f32_e32 v41, v42, v47 ; 0A525F2A v_mul_f32_e32 v42, v8, v46 ; 0A545D08 v_mac_f32_e32 v39, v42, v41 ; 2C4E532A v_mul_f32_e32 v41, s27, v36 ; 0A52481B v_mul_f32_e32 v42, s5, v41 ; 0A545205 s_mov_b32 s5, 0x41b80000 ; BE8500FF 41B80000 v_mul_f32_e32 v42, 0.15915494, v42 ; 0A5454F8 v_mul_f32_e32 v43, s5, v41 ; 0A565205 v_fract_f32_e32 v42, v42 ; 7E54372A v_mul_f32_e32 v43, 0.15915494, v43 ; 0A5656F8 v_fract_f32_e32 v43, v43 ; 7E56372B v_cos_f32_e32 v42, v42 ; 7E54552A v_sin_f32_e32 v43, v43 ; 7E56532B s_mov_b32 s17, 0x41980000 ; BE9100FF 41980000 v_mul_f32_e32 v45, v45, v47 ; 0A5A5F2D v_mac_f32_e32 v42, s17, v41 ; 2C545211 v_mac_f32_e32 v43, 0x42140000, v41 ; 2C5652FF 42140000 v_mul_f32_e32 v41, 0.15915494, v42 ; 0A5254F8 v_mul_f32_e32 v42, 0.15915494, v43 ; 0A5456F8 v_fract_f32_e32 v41, v41 ; 7E523729 v_sin_f32_e32 v41, v41 ; 7E525329 v_fract_f32_e32 v42, v42 ; 7E54372A v_sin_f32_e32 v42, v42 ; 7E54532A v_mul_f32_e32 v43, v10, v46 ; 0A565D0A v_mul_f32_e32 v41, 0.5, v41 ; 0A5252F0 v_mac_f32_e32 v39, v43, v45 ; 2C4E5B2B v_mad_f32 v41, v41, v42, 0.5 ; D1C10029 03C25529 v_sqrt_f32_e32 v42, v44 ; 7E544F2C s_mov_b32 s18, 0xbe99999a ; BE9200FF BE99999A v_add_f32_e64 v39, |v39|, s18 ; D1010127 00002527 s_mov_b32 s18, 0x3fb6db6e ; BE9200FF 3FB6DB6E s_buffer_load_dwordx4 s[28:31], s[12:15], 0x3e0 ; C02A0706 000003E0 v_mul_f32_e64 v39, v39, s18 clamp ; D1058027 00002527 s_mov_b32 s18, 0x3951b717 ; BE9200FF 3951B717 v_add_f32_e32 v43, 0xc59c4000, v42 ; 025654FF C59C4000 v_mul_f32_e64 v43, v43, s18 clamp ; D105802B 0000252B v_sub_f32_e32 v39, 1.0, v39 ; 044E4EF2 v_mov_b32_e32 v44, 0x3e800000 ; 7E5802FF 3E800000 v_mul_f32_e32 v39, v39, v43 ; 0A4E5727 s_mov_b32 s18, 0x365fb23b ; BE9200FF 365FB23B v_add_f32_e32 v43, 0xc8435000, v42 ; 025654FF C8435000 v_mul_f32_e64 v43, v43, s18 clamp ; D105802B 0000252B v_mac_f32_e32 v44, v41, v44 ; 2C585929 s_waitcnt lgkmcnt(0) ; BF8C007F v_mul_f32_e32 v36, s31, v36 ; 0A48481F v_max_f32_e32 v43, v39, v43 ; 16565727 v_sub_f32_e32 v44, v44, v41 ; 0458532C v_mul_f32_e32 v45, s17, v36 ; 0A5A4811 v_mac_f32_e32 v41, v43, v44 ; 2C52592B v_mul_f32_e32 v44, s0, v36 ; 0A584800 v_mul_f32_e32 v44, 0.15915494, v44 ; 0A5858F8 v_mul_f32_e32 v45, 0.15915494, v45 ; 0A5A5AF8 v_fract_f32_e32 v44, v44 ; 7E58372C v_fract_f32_e32 v45, v45 ; 7E5A372D v_cos_f32_e32 v44, v44 ; 7E58552C v_cos_f32_e32 v45, v45 ; 7E5A552D v_mad_f32 v40, v40, 0.5, 0.5 ; D1C10028 03C1E128 v_mad_f32 v37, v37, 0.5, 0.5 ; D1C10025 03C1E125 v_mac_f32_e32 v44, s5, v36 ; 2C584805 v_mac_f32_e32 v45, s1, v36 ; 2C5A4801 v_mul_f32_e32 v36, 0.15915494, v44 ; 0A4858F8 v_mul_f32_e32 v44, 0.15915494, v45 ; 0A585AF8 v_fract_f32_e32 v36, v36 ; 7E483724 v_fract_f32_e32 v44, v44 ; 7E58372C v_sin_f32_e32 v36, v36 ; 7E485324 v_sin_f32_e32 v44, v44 ; 7E58532C v_mul_f32_e32 v43, s28, v40 ; 0A56501C v_mul_f32_e32 v46, s29, v40 ; 0A5C501D v_mul_f32_e32 v40, s30, v40 ; 0A50501E v_rsq_f32_e32 v35, v35 ; 7E464923 v_mac_f32_e32 v43, s24, v37 ; 2C564A18 v_mac_f32_e32 v46, s25, v37 ; 2C5C4A19 v_mac_f32_e32 v40, s26, v37 ; 2C504A1A v_add_f32_e32 v37, 0xc7435000, v42 ; 024A54FF C7435000 s_mov_b32 s1, 0x3727c5ac ; BE8100FF 3727C5AC v_mul_f32_e64 v37, v37, s1 clamp ; D1058025 00000325 v_mul_f32_e32 v36, v36, v44 ; 0A485924 s_mov_b32 s0, 0x3e99999a ; BE8000FF 3E99999A v_max_f32_e32 v39, v37, v39 ; 164E4F25 v_mul_f32_e32 v42, s0, v36 ; 0A544800 v_mul_f32_e32 v39, v42, v39 ; 0A4E4F2A v_mad_f32 v36, v36, s0, -v39 ; D1C10024 849C0124 v_add_f32_e32 v39, 0.5, v41 ; 024E52F0 v_mul_f32_e32 v32, v32, v35 ; 0A404720 v_mul_f32_e32 v33, v33, v35 ; 0A424721 v_mul_f32_e32 v34, v34, v35 ; 0A444722 v_subrev_f32_e32 v35, s16, v23 ; 06462E10 v_mad_f32 v42, v43, v39, v36 ; D1C1002A 04924F2B v_mad_f32 v43, v46, v39, v36 ; D1C1002B 04924F2E v_mac_f32_e32 v36, v40, v39 ; 2C484F28 v_mad_f32 v39, v35, 0.5, s16 ; D1C10027 0041E123 v_mad_f32 v33, v33, v39, s9 ; D1C10021 00264F21 v_sub_f32_e32 v25, v25, v33 ; 04324319 v_mad_f32 v32, v32, v39, s8 ; D1C10020 00224F20 v_sub_f32_e32 v24, v24, v32 ; 04304118 v_mul_f32_e32 v25, v25, v25 ; 0A323319 v_mac_f32_e32 v25, v24, v24 ; 2C323118 v_add_f32_e32 v24, s22, v41 ; 02305216 v_madak_f32 v24, v38, v24, 0xbf400000 ; 30303126 BF400000 v_madak_f32 v32, v37, v24, 0x3f400000 ; 30403125 3F400000 v_mul_f32_e32 v24, v28, v15 ; 0A301F1C v_mul_f32_e32 v28, v28, v28 ; 0A38391C v_mac_f32_e32 v28, v27, v27 ; 2C38371B v_mac_f32_e32 v24, v27, v14 ; 2C301D1B v_mac_f32_e32 v28, v29, v29 ; 2C383B1D v_mac_f32_e32 v24, v29, v16 ; 2C30211D v_mad_f32 v27, -s7, s7, v28 ; D1C1001B 24700E07 v_min_f32_e32 v28, v27, v24 ; 1438311B v_mul_f32_e32 v24, v24, v24 ; 0A303118 v_cmp_lt_f32_e64 s[0:1], v24, v27 ; D0410000 00023718 v_cmp_lt_f32_e32 vcc, 0, v28 ; 7C823880 s_or_b64 s[0:1], s[0:1], vcc ; 87806A00 v_mov_b32_e32 v24, 0x3d4cccd0 ; 7E3002FF 3D4CCCD0 v_mad_f32 v34, v34, v39, s6 ; D1C10022 001A4F22 v_cndmask_b32_e64 v24, v24, 1.0, s[0:1] ; D1000018 0001E518 v_max_f32_e32 v27, 0x3f666666, v24 ; 163630FF 3F666666 v_sub_f32_e32 v26, v26, v34 ; 0434451A v_mac_f32_e32 v25, v26, v26 ; 2C32351A v_sub_f32_e32 v24, v24, v27 ; 04303718 v_mul_f32_e32 v26, 0.5, v35 ; 0A3446F0 v_mac_f32_e32 v27, s10, v24 ; 2C36300A v_rcp_f32_e32 v24, v26 ; 7E30451A v_sqrt_f32_e32 v25, v25 ; 7E324F19 v_rcp_f32_e32 v29, s11 ; 7E3A440B v_mul_f32_e64 v28, v42, v27 clamp ; D105801C 0002372A v_mul_f32_e64 v26, v43, v27 clamp ; D105801A 0002372B v_mul_f32_e64 v24, v25, v24 clamp ; D1058018 00023119 v_mul_f32_e64 v25, |v30|, v29 clamp ; D1058119 00023B1E v_sub_f32_e32 v25, 1.0, v25 ; 043232F2 v_sub_f32_e32 v24, 1.0, v24 ; 043030F2 v_mul_f32_e32 v25, v25, v25 ; 0A323319 v_mul_f32_e32 v25, v25, v24 ; 0A323119 v_mul_f32_e32 v24, v25, v32 ; 0A304119 v_mul_f32_e64 v33, v36, v27 clamp ; D1058021 00023724 v_mad_f32 v25, v25, v32, v24 ; D1C10019 04624119 v_cmp_ge_f32_e32 vcc, v21, v11 ; 7C8C1715 v_mul_f32_e32 v27, v26, v24 ; 0A36311A v_mul_f32_e32 v28, v28, v24 ; 0A38311C v_mul_f32_e32 v26, v33, v24 ; 0A343121 v_cndmask_b32_e32 v25, 0, v25, vcc ; 00323280 BB14_17: v_mov_b32_e32 v29, 0x3fe66666 ; 7E3A02FF 3FE66666 v_mul_f32_e32 v32, 0x3f400000, v22 ; 0A402CFF 3F400000 v_mov_b32_e32 v22, 0x3e800000 ; 7E2C02FF 3E800000 s_mov_b32 s0, 0x3f7fbe77 ; BE8000FF 3F7FBE77 v_cmp_ngt_f32_e32 vcc, s0, v20 ; 7C962800 v_mac_f32_e32 v29, 0xbf4ccccc, v31 ; 2C3A3EFF BF4CCCCC v_mac_f32_e32 v22, 0xbe800000, v31 ; 2C2C3EFF BE800000 v_mov_b32_e32 v31, 0 ; 7E3E0280 v_mad_f32 v30, s10, 4.0, 1.0 ; D1C1001E 03C9EC0A s_and_b64 vcc, exec, vcc ; 86EA6A7E v_mov_b32_e32 v36, 0 ; 7E480280 v_mov_b32_e32 v33, v31 ; 7E42031F v_mov_b32_e32 v34, v31 ; 7E44031F v_mov_b32_e32 v35, v31 ; 7E46031F s_cbranch_vccnz BB14_24 ; BF870000 v_mul_f32_e32 v33, v9, v9 ; 0A421309 v_mac_f32_e32 v33, v8, v8 ; 2C421108 v_mac_f32_e32 v33, v10, v10 ; 2C42150A v_subrev_f32_e32 v23, s16, v23 ; 062E2E10 v_rsq_f32_e32 v33, v33 ; 7E424921 v_mul_f32_e32 v31, 0.5, v23 ; 0A3E2EF0 v_rcp_f32_e32 v38, s11 ; 7E4C440B v_rcp_f32_e32 v37, v31 ; 7E4A451F v_mov_b32_e32 v43, 0 ; 7E560280 v_mov_b32_e32 v49, 0 ; 7E620280 v_mad_f32 v23, v23, 0.5, s16 ; D1C10017 0041E117 v_mul_f32_e32 v39, v8, v33 ; 0A4E4308 v_mul_f32_e32 v40, v9, v33 ; 0A504309 v_mul_f32_e32 v41, v10, v33 ; 0A52430A v_mul_f32_e64 v42, s7, s7 ; D105002A 00000E07 s_mov_b32 s5, 0x41500000 ; BE8500FF 41500000 s_mov_b32 s11, 0x41f80000 ; BE8B00FF 41F80000 s_mov_b32 s16, 0x41200000 ; BE9000FF 41200000 s_mov_b32 s17, 0x41880000 ; BE9100FF 41880000 s_mov_b32 s18, 0x409fffff ; BE9200FF 409FFFFF s_mov_b32 s19, 0x41980000 ; BE9300FF 41980000 s_mov_b32 s20, 0x41b80000 ; BE9400FF 41B80000 s_mov_b32 s21, 0x365fb23b ; BE9500FF 365FB23B s_mov_b32 s22, 0xbe99999a ; BE9600FF BE99999A s_mov_b32 s23, 0x3fb6db6e ; BE9700FF 3FB6DB6E s_mov_b32 s24, 0x3951b717 ; BE9800FF 3951B717 s_mov_b32 s25, 0x3e99999a ; BE9900FF 3E99999A s_mov_b32 s26, 0x3727c5ac ; BE9A00FF 3727C5AC v_mov_b32_e32 v50, v43 ; 7E64032B v_mov_b32_e32 v51, v43 ; 7E66032B v_mov_b32_e32 v53, v43 ; 7E6A032B v_mov_b32_e32 v52, v43 ; 7E68032B v_mov_b32_e32 v44, v49 ; 7E580331 BB14_19: v_cmp_lt_i32_e32 vcc, 19, v44 ; 7D825893 v_mov_b32_e32 v31, v52 ; 7E3E0334 v_mov_b32_e32 v33, v53 ; 7E420335 v_mov_b32_e32 v34, v51 ; 7E440333 v_mov_b32_e32 v35, v50 ; 7E460332 v_mov_b32_e32 v36, v49 ; 7E480331 s_and_b64 vcc, exec, vcc ; 86EA6A7E s_or_b64 s[0:1], s[0:1], exec ; 87807E00 s_cbranch_vccnz BB14_23 ; BF870000 v_mad_f32 v55, v2, v43, v18 ; D1C10037 044A5702 v_mad_f32 v54, v4, v43, v17 ; D1C10036 04465704 v_subrev_f32_e32 v46, s9, v55 ; 065C6E09 v_mad_f32 v56, v3, v43, v19 ; D1C10038 044E5703 v_subrev_f32_e32 v45, s8, v54 ; 065A6C08 v_mul_f32_e32 v48, v46, v9 ; 0A60132E v_mac_f32_e32 v48, v45, v8 ; 2C60112D v_subrev_f32_e32 v47, s6, v56 ; 065E7006 v_mac_f32_e32 v48, v47, v10 ; 2C60152F v_mad_f32 v50, -v9, v48, v55 ; D1C10032 24DE6109 v_mad_f32 v49, -v8, v48, v54 ; D1C10031 24DA6108 v_subrev_f32_e32 v50, s9, v50 ; 06646409 v_mad_f32 v51, -v10, v48, v56 ; D1C10033 24E2610A v_subrev_f32_e32 v49, s8, v49 ; 06626208 v_mul_f32_e32 v52, v50, v50 ; 0A686532 v_subrev_f32_e32 v51, s6, v51 ; 06666606 v_mac_f32_e32 v52, v49, v49 ; 2C686331 v_mac_f32_e32 v52, v51, v51 ; 2C686733 v_rsq_f32_e32 v53, v52 ; 7E6A4934 v_mul_f32_e64 v48, |v48|, v38 clamp ; D1058130 00024D30 v_sub_f32_e32 v48, 1.0, v48 ; 046060F2 v_sqrt_f32_e32 v57, v52 ; 7E724F34 v_mul_f32_e32 v50, v50, v53 ; 0A646B32 v_mul_f32_e32 v49, v49, v53 ; 0A626B31 v_mad_f32 v50, v50, v23, s9 ; D1C10032 00262F32 v_mad_f32 v49, v49, v23, s8 ; D1C10031 00222F31 v_sub_f32_e32 v50, v55, v50 ; 04646537 v_mul_f32_e32 v51, v51, v53 ; 0A666B33 v_sub_f32_e32 v49, v54, v49 ; 04626336 v_mul_f32_e32 v50, v50, v50 ; 0A646532 v_mac_f32_e32 v50, v49, v49 ; 2C646331 v_mad_f32 v49, v51, v23, s6 ; D1C10031 001A2F33 v_sub_f32_e32 v49, v56, v49 ; 04626338 v_mac_f32_e32 v50, v49, v49 ; 2C646331 v_sqrt_f32_e32 v49, v50 ; 7E624F32 v_mul_f32_e32 v48, v48, v48 ; 0A606130 v_mov_b32_e32 v52, v31 ; 7E68031F v_mov_b32_e32 v50, v35 ; 7E640323 v_mul_f32_e64 v49, v49, v37 clamp ; D1058031 00024B31 v_sub_f32_e32 v49, 1.0, v49 ; 046262F2 v_mul_f32_e32 v48, v48, v49 ; 0A606330 v_cmp_lt_f32_e32 vcc, 0, v48 ; 7C826080 v_mov_b32_e32 v51, v34 ; 7E660322 v_mov_b32_e32 v53, v33 ; 7E6A0321 v_mov_b32_e32 v49, v36 ; 7E620324 s_and_saveexec_b64 s[28:29], vcc ; BE9C206A s_cbranch_execz BB14_22 ; BF880000 BB14_21: s_buffer_load_dwordx4 s[32:35], s[12:15], 0x3b0 ; C02A0806 000003B0 s_buffer_load_dwordx4 s[36:39], s[12:15], 0x3d0 ; C02A0906 000003D0 s_buffer_load_dwordx4 s[40:43], s[12:15], 0x3e0 ; C02A0A06 000003E0 v_sub_f32_e32 v49, s2, v54 ; 04626C02 v_sub_f32_e32 v50, s3, v55 ; 04646E03 s_waitcnt lgkmcnt(0) ; BF8C007F v_mov_b32_e32 v53, s35 ; 7E6A0223 v_mac_f32_e32 v53, 0x358637bd, v57 ; 2C6A72FF 358637BD v_add_f32_e32 v53, 0x42c80000, v53 ; 026A6AFF 42C80000 v_mov_b32_e32 v57, 0x41a00000 ; 7E7202FF 41A00000 v_mac_f32_e32 v57, s33, v53 ; 2C726A21 v_mul_f32_e32 v58, 0x40e00000, v57 ; 0A7472FF 40E00000 v_mul_f32_e32 v58, 0.15915494, v58 ; 0A7474F8 v_mul_f32_e32 v59, 0x422c0000, v57 ; 0A7672FF 422C0000 v_fract_f32_e32 v58, v58 ; 7E74373A v_mul_f32_e32 v59, 0.15915494, v59 ; 0A7676F8 v_cos_f32_e32 v58, v58 ; 7E74553A v_fract_f32_e32 v59, v59 ; 7E76373B v_sin_f32_e32 v59, v59 ; 7E76533B v_mul_f32_e32 v54, s32, v53 ; 0A6C6A20 v_mac_f32_e32 v58, s17, v57 ; 2C747211 v_mul_f32_e32 v55, s5, v54 ; 0A6E6C05 v_add_f32_e32 v57, v58, v59 ; 0272773A v_mul_f32_e32 v59, s39, v53 ; 0A766A27 v_mul_f32_e32 v60, s17, v59 ; 0A787611 v_mul_f32_e32 v61, s20, v59 ; 0A7A7614 v_mul_f32_e32 v55, 0.15915494, v55 ; 0A6E6EF8 v_mul_f32_e32 v60, 0.15915494, v60 ; 0A7878F8 v_mul_f32_e32 v61, 0.15915494, v61 ; 0A7A7AF8 v_sub_f32_e32 v51, s4, v56 ; 04667004 v_mul_f32_e32 v56, s11, v54 ; 0A706C0B v_fract_f32_e32 v55, v55 ; 7E6E3737 v_mul_f32_e32 v56, 0.15915494, v56 ; 0A7070F8 v_fract_f32_e32 v60, v60 ; 7E78373C v_fract_f32_e32 v61, v61 ; 7E7A373D v_cos_f32_e32 v55, v55 ; 7E6E5537 v_fract_f32_e32 v56, v56 ; 7E703738 v_cos_f32_e32 v60, v60 ; 7E78553C v_sin_f32_e32 v61, v61 ; 7E7A533D v_sin_f32_e32 v56, v56 ; 7E705338 v_mul_f32_e32 v52, v50, v50 ; 0A686532 v_mul_f32_e32 v57, 0.15915494, v57 ; 0A7272F8 v_mac_f32_e32 v52, v49, v49 ; 2C686331 v_fract_f32_e32 v57, v57 ; 7E723739 v_mac_f32_e32 v55, 0x41300000, v54 ; 2C6E6CFF 41300000 v_mac_f32_e32 v52, v51, v51 ; 2C686733 v_sin_f32_e32 v57, v57 ; 7E725339 v_mac_f32_e32 v60, s19, v59 ; 2C787613 v_mac_f32_e32 v61, 0x42140000, v59 ; 2C7A76FF 42140000 v_add_f32_e32 v54, v55, v56 ; 026C7137 v_mul_f32_e32 v59, 0.15915494, v60 ; 0A7678F8 v_mul_f32_e32 v60, 0.15915494, v61 ; 0A787AF8 v_rsq_f32_e32 v61, v52 ; 7E7A4934 v_mul_f32_e32 v54, 0.15915494, v54 ; 0A6C6CF8 v_mov_b32_e32 v55, 0x3ecccccd ; 7E6E02FF 3ECCCCCD v_fract_f32_e32 v54, v54 ; 7E6C3736 v_mac_f32_e32 v55, 0.5, v57 ; 2C6E72F0 v_sin_f32_e32 v54, v54 ; 7E6C5336 v_mul_f32_e32 v50, v50, v61 ; 0A647B32 v_mul_f32_e64 v55, v55, s18 clamp ; D1058037 00002537 v_mov_b32_e32 v56, 0x40400000 ; 7E7002FF 40400000 v_mac_f32_e32 v56, -2.0, v55 ; 2C706EF5 v_mul_f32_e32 v49, v49, v61 ; 0A627B31 v_mul_f32_e32 v50, v40, v50 ; 0A646528 v_fract_f32_e32 v59, v59 ; 7E76373B v_mul_f32_e32 v56, v55, v56 ; 0A707137 v_mul_f32_e32 v55, 0x3f19999a, v55 ; 0A6E6EFF 3F19999A v_mac_f32_e32 v50, v39, v49 ; 2C646327 v_sqrt_f32_e32 v49, v52 ; 7E624F34 v_fract_f32_e32 v60, v60 ; 7E78373C v_mul_f32_e32 v55, v55, v56 ; 0A6E7137 v_madak_f32 v56, 0.5, v54, 0x3ecccccd ; 30706CF0 3ECCCCCD v_mul_f32_e32 v51, v51, v61 ; 0A667B33 v_sin_f32_e32 v59, v59 ; 7E76533B v_mac_f32_e32 v50, v41, v51 ; 2C646729 v_mul_f32_e64 v56, v56, s16 clamp ; D1058038 00002138 v_sin_f32_e32 v60, v60 ; 7E78533C v_madak_f32 v58, -2.0, v56, 0x40400000 ; 307470F5 40400000 v_add_f32_e64 v50, |v50|, s22 ; D1010132 00002D32 v_mul_f32_e64 v50, v50, s23 clamp ; D1058032 00002F32 v_add_f32_e32 v51, 0xc59c4000, v49 ; 026662FF C59C4000 v_mul_f32_e32 v58, v56, v58 ; 0A747538 v_mul_f32_e32 v56, 0.5, v56 ; 0A7070F0 v_mac_f32_e32 v55, v56, v58 ; 2C6E7538 v_mul_f32_e32 v56, 0.5, v59 ; 0A7076F0 v_mul_f32_e64 v51, v51, s24 clamp ; D1058033 00003133 v_sub_f32_e32 v50, 1.0, v50 ; 046464F2 v_mul_f32_e32 v50, v50, v51 ; 0A646732 v_add_f32_e32 v51, 0xc8435000, v49 ; 026662FF C8435000 v_mad_f32 v56, v56, v60, 0.5 ; D1C10038 03C27938 v_mov_b32_e32 v52, 0x3e800000 ; 7E6802FF 3E800000 v_mul_f32_e64 v51, v51, s21 clamp ; D1058033 00002B33 v_mac_f32_e32 v52, v56, v52 ; 2C686938 v_mul_f32_e32 v53, s43, v53 ; 0A6A6A2B v_max_f32_e32 v51, v50, v51 ; 16666732 v_sub_f32_e32 v52, v52, v56 ; 04687134 v_mac_f32_e32 v56, v51, v52 ; 2C706933 v_mad_f32 v52, v57, 0.5, 0.5 ; D1C10034 03C1E139 v_mul_f32_e32 v57, s5, v53 ; 0A726A05 v_mul_f32_e32 v58, s19, v53 ; 0A746A13 v_mul_f32_e32 v57, 0.15915494, v57 ; 0A7272F8 v_mul_f32_e32 v58, 0.15915494, v58 ; 0A7474F8 v_fract_f32_e32 v57, v57 ; 7E723739 v_fract_f32_e32 v58, v58 ; 7E74373A v_cos_f32_e32 v57, v57 ; 7E725539 v_cos_f32_e32 v58, v58 ; 7E74553A v_add_f32_e32 v49, 0xc7435000, v49 ; 026262FF C7435000 v_mad_f32 v51, v54, 0.5, 0.5 ; D1C10033 03C1E136 v_mac_f32_e32 v57, s20, v53 ; 2C726A14 v_mac_f32_e32 v58, s11, v53 ; 2C746A0B v_mul_f32_e32 v53, 0.15915494, v57 ; 0A6A72F8 v_mul_f32_e32 v57, 0.15915494, v58 ; 0A7274F8 v_fract_f32_e32 v53, v53 ; 7E6A3735 v_fract_f32_e32 v57, v57 ; 7E723739 v_sin_f32_e32 v53, v53 ; 7E6A5335 v_sin_f32_e32 v57, v57 ; 7E725339 v_mul_f32_e32 v54, s40, v52 ; 0A6C6828 v_mul_f32_e32 v59, s41, v52 ; 0A766829 v_mul_f32_e32 v52, s42, v52 ; 0A68682A v_mul_f32_e64 v49, v49, s26 clamp ; D1058031 00003531 v_mac_f32_e32 v54, s36, v51 ; 2C6C6624 v_mac_f32_e32 v59, s37, v51 ; 2C766625 v_mac_f32_e32 v52, s38, v51 ; 2C686626 v_mul_f32_e32 v51, v53, v57 ; 0A667335 v_max_f32_e32 v50, v49, v50 ; 16646531 v_mul_f32_e32 v53, s25, v51 ; 0A6A6619 v_mul_f32_e32 v50, v53, v50 ; 0A646535 v_mad_f32 v50, v51, s25, -v50 ; D1C10032 84C83333 v_add_f32_e32 v51, 0.5, v56 ; 026670F0 v_mad_f32 v53, v54, v51, v50 ; D1C10035 04CA6736 v_mad_f32 v54, v59, v51, v50 ; D1C10036 04CA673B v_mac_f32_e32 v50, v52, v51 ; 2C646734 v_add_f32_e32 v51, s34, v56 ; 02667022 v_madak_f32 v51, v55, v51, 0xbf400000 ; 30666737 BF400000 v_madak_f32 v49, v49, v51, 0x3f400000 ; 30626731 3F400000 v_mul_f32_e32 v51, v46, v15 ; 0A661F2E v_mul_f32_e32 v46, v46, v46 ; 0A5C5D2E v_mac_f32_e32 v51, v45, v14 ; 2C661D2D v_mac_f32_e32 v46, v45, v45 ; 2C5C5B2D v_mac_f32_e32 v46, v47, v47 ; 2C5C5F2F v_mac_f32_e32 v51, v47, v16 ; 2C66212F v_sub_f32_e32 v45, v46, v42 ; 045A552E v_mul_f32_e32 v46, v51, v51 ; 0A5C6733 v_cmp_lt_f32_e32 vcc, v46, v45 ; 7C825B2E v_min_f32_e32 v45, v45, v51 ; 145A672D v_cmp_lt_f32_e64 s[0:1], 0, v45 ; D0410000 00025A80 v_mov_b32_e32 v45, 0x3d4cccd0 ; 7E5A02FF 3D4CCCD0 s_or_b64 s[0:1], vcc, s[0:1] ; 8780006A v_cndmask_b32_e64 v45, v45, 1.0, s[0:1] ; D100002D 0001E52D v_max_f32_e32 v46, 0x3f666666, v45 ; 165C5AFF 3F666666 v_sub_f32_e32 v46, v46, v45 ; 045C5B2E v_mac_f32_e32 v45, s10, v46 ; 2C5A5C0A v_mul_f32_e64 v46, v53, v45 clamp ; D105802E 00025B35 v_mul_f32_e32 v52, v48, v49 ; 0A686330 v_mul_f32_e64 v47, v54, v45 clamp ; D105802F 00025B36 v_mul_f32_e64 v45, v50, v45 clamp ; D105802D 00025B32 v_mad_f32 v50, v46, v52, v35 ; D1C10032 048E692E v_add_f32_e32 v46, v43, v11 ; 025C172B v_mad_f32 v53, v45, v52, v33 ; D1C10035 0486692D v_mad_f32 v45, v48, v49, v36 ; D1C1002D 04926330 v_cmp_ge_f32_e32 vcc, v21, v46 ; 7C8C5D15 v_mad_f32 v51, v47, v52, v34 ; D1C10033 048A692F v_mad_f32 v52, v48, v49, v31 ; D1C10034 047E6330 v_cndmask_b32_e32 v49, v36, v45, vcc ; 00625B24 BB14_22: s_or_b64 exec, exec, s[28:29] ; 87FE1C7E v_add_f32_e32 v43, v43, v32 ; 0256412B v_add_u32_e32 v44, vcc, 1, v44 ; 32585881 s_mov_b64 s[0:1], 0 ; BE800180 s_and_b64 vcc, exec, s[0:1] ; 86EA007E s_cbranch_vccz BB14_19 ; BF860000 s_branch BB14_24 ; BF820000 BB14_23: s_and_b64 vcc, exec, s[0:1] ; 86EA007E s_cbranch_vccz BB14_19 ; BF860000 BB14_24: v_max_f32_e32 v11, v31, v24 ; 1616311F v_mul_f32_e32 v11, 1.0, v11 ; 0A1616F2 v_cmpx_nge_f32_e32 vcc, 0, v11 ; 7CB21680 s_cbranch_execnz BB14_26 ; BF890000 exp null off, off, off, off done vm ; C4001890 00000000 s_endpgm ; BF810000 BB14_26: v_sub_f32_e32 v11, v28, v35 ; 0416471C v_mac_f32_e32 v35, v20, v11 ; 2C461714 v_sub_f32_e32 v11, v27, v34 ; 0416451B v_mac_f32_e32 v34, v20, v11 ; 2C441714 v_sub_f32_e32 v11, v26, v33 ; 0416431A v_mac_f32_e32 v33, v20, v11 ; 2C421714 v_sub_f32_e32 v11, v24, v31 ; 04163F18 v_mac_f32_e32 v31, v20, v11 ; 2C3E1714 v_rcp_f32_e32 v14, v31 ; 7E1C451F v_mul_f32_e32 v15, v35, v29 ; 0A1E3B23 v_mul_f32_e32 v16, v34, v29 ; 0A203B22 v_subrev_f32_e32 v18, s3, v5 ; 06240A03 v_mul_f32_e32 v17, v33, v29 ; 0A223B21 v_sub_f32_e32 v11, v25, v36 ; 04164919 v_mul_f32_e32 v15, v15, v14 ; 0A1E1D0F v_mul_f32_e32 v16, v16, v14 ; 0A201D10 v_mul_f32_e32 v14, v17, v14 ; 0A1C1D11 v_subrev_f32_e32 v17, s2, v7 ; 06220E02 v_mul_f32_e32 v19, v18, v18 ; 0A262512 v_mac_f32_e32 v36, v20, v11 ; 2C481714 v_mac_f32_e32 v19, v17, v17 ; 2C262311 v_subrev_f32_e32 v20, s4, v6 ; 06280C04 v_mac_f32_e32 v19, v20, v20 ; 2C262914 v_rsq_f32_e32 v21, v19 ; 7E2A4913 s_mov_b32 s0, 0x3f8a3d71 ; BE8000FF 3F8A3D71 v_mul_f32_e32 v11, v36, v30 ; 0A163D24 v_max_f32_e32 v14, v22, v14 ; 161C1D16 v_mul_f32_e32 v20, v20, v21 ; 0A282B14 v_mul_f32_e32 v17, v17, v21 ; 0A222B11 v_mul_f32_e32 v18, v18, v21 ; 0A242B12 v_mul_f32_e32 v21, v20, v9 ; 0A2A1314 v_mad_f32 v21, v18, v10, -v21 ; D1C10015 84561512 v_mul_f32_e32 v10, v17, v10 ; 0A141511 v_mad_f32 v10, v20, v8, -v10 ; D1C1000A 842A1114 v_mul_f32_e32 v8, v18, v8 ; 0A101112 v_mad_f32 v8, v17, v9, -v8 ; D1C10008 84221311 v_mul_f32_e32 v9, v10, v10 ; 0A12150A v_mac_f32_e32 v9, v21, v21 ; 2C122B15 v_mac_f32_e32 v9, v8, v8 ; 2C121108 v_rsq_f32_e32 v9, v9 ; 7E124909 v_max_f32_e32 v15, v22, v15 ; 161E1F16 v_max_f32_e32 v16, v22, v16 ; 16202116 v_max_f32_e32 v11, 0, v11 ; 16161680 v_mul_f32_e32 v21, v21, v9 ; 0A2A1315 v_mul_f32_e32 v10, v10, v9 ; 0A14130A v_mul_f32_e32 v8, v8, v9 ; 0A101308 v_mul_f32_e32 v9, s7, v21 ; 0A122A07 v_mad_f32 v7, s0, v9, v7 ; D1C10007 041E1200 v_mul_f32_e32 v9, s7, v10 ; 0A121407 v_mad_f32 v10, v10, v13, s9 ; D1C1000A 00261B0A v_mad_f32 v21, v21, v13, s8 ; D1C10015 00221B15 v_mov_b32_e32 v22, 0x3f666666 ; 7E2C02FF 3F666666 v_subrev_f32_e32 v10, s3, v10 ; 06141403 v_mad_f32 v13, v8, v13, s6 ; D1C1000D 001A1B08 v_min3_f32 v11, v11, 1.0, v22 ; D1D0000B 0459E50B v_subrev_f32_e32 v21, s2, v21 ; 062A2A02 v_mul_f32_e32 v22, v10, v10 ; 0A2C150A v_subrev_f32_e32 v13, s4, v13 ; 061A1A04 v_mac_f32_e32 v22, v21, v21 ; 2C2C2B15 v_mac_f32_e32 v22, v13, v13 ; 2C2C1B0D v_rsq_f32_e32 v22, v22 ; 7E2C4916 v_mul_f32_e32 v8, s7, v8 ; 0A101007 v_mad_f32 v5, s0, v9, v5 ; D1C10005 04161200 v_mad_f32 v6, s0, v8, v6 ; D1C10006 041A1000 v_mul_f32_e32 v8, v21, v22 ; 0A102D15 v_mul_f32_e64 v10, v10, -v22 ; D105000A 40022D0A v_mul_f32_e32 v8, v17, v8 ; 0A101111 v_subrev_f32_e32 v5, s3, v5 ; 060A0A03 v_mad_f32 v8, v18, v10, -v8 ; D1C10008 84221512 v_subrev_f32_e32 v7, s2, v7 ; 060E0E02 v_mul_f32_e32 v10, v5, v5 ; 0A140B05 v_subrev_f32_e32 v6, s4, v6 ; 060C0C04 v_mac_f32_e32 v10, v7, v7 ; 2C140F07 v_mac_f32_e32 v10, v6, v6 ; 2C140D06 v_rsq_f32_e32 v10, v10 ; 7E14490A v_mul_f32_e32 v9, v13, v22 ; 0A122D0D v_mad_f32 v8, -v20, v9, v8 ; D1C10008 24221314 v_mul_f32_e32 v4, v17, v4 ; 0A080911 v_mul_f32_e32 v7, v7, v10 ; 0A0E1507 v_mul_f32_e32 v7, v17, v7 ; 0A0E0F11 v_mul_f32_e64 v5, v5, -v10 ; D1050005 40021505 v_mul_f32_e32 v6, v6, v10 ; 0A0C1506 v_mad_f32 v5, v18, v5, -v7 ; D1C10005 841E0B12 v_mad_f32 v5, -v20, v6, v5 ; D1C10005 24160D14 v_sub_f32_e32 v5, v5, v8 ; 040A1105 v_rcp_f32_e32 v5, v5 ; 7E0A4505 v_mad_f32 v2, v18, -v2, -v4 ; D1C10002 C4120512 v_mul_f32_e32 v13, 1.0, v15 ; 0A1A1EF2 v_mul_f32_e32 v15, 1.0, v16 ; 0A1E20F2 v_sqrt_f32_e32 v16, v19 ; 7E204F13 v_mad_f32 v2, -v20, v3, v2 ; D1C10002 240A0714 v_mul_f32_e32 v1, v1, v18 ; 0A022501 v_sub_f32_e32 v2, v2, v8 ; 04041102 v_mac_f32_e32 v1, v12, v17 ; 2C02230C v_mul_f32_e64 v2, v2, v5 clamp ; D1058002 00020B02 v_mac_f32_e32 v1, v0, v20 ; 2C022900 v_mul_f32_e32 v14, 1.0, v14 ; 0A1C1CF2 v_mul_f32_e32 v0, v13, v2 ; 0A00050D v_cmp_lt_f32_e32 vcc, v16, v1 ; 7C820310 v_mul_f32_e32 v3, v15, v2 ; 0A06050F v_mul_f32_e32 v4, v14, v2 ; 0A08050E v_mul_f32_e32 v2, v11, v2 ; 0A04050B v_cndmask_b32_e32 v1, v15, v3, vcc ; 0002070F v_cndmask_b32_e32 v0, v13, v0, vcc ; 0000010D v_cndmask_b32_e32 v3, v14, v4, vcc ; 0006090E v_cndmask_b32_e32 v2, v11, v2, vcc ; 0004050B v_cvt_pkrtz_f16_f32 v0, v0, v1 ; D2960000 00020300 v_cvt_pkrtz_f16_f32 v1, v3, v2 ; D2960001 00020503 exp mrt0 v0, off, v1, off done compr vm ; C4001C05 00000100 s_endpgm ; BF810000 ; ModuleID = 'mesa-shader' source_filename = "mesa-shader" target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7" target triple = "amdgcn-mesa-mesa3d" define amdgpu_vs void @main([0 x i8] addrspace(6)* inreg noalias dereferenceable(18446744073709551615), [0 x i8] addrspace(6)* inreg noalias dereferenceable(18446744073709551615), [0 x <4 x i32>] addrspace(6)* inreg noalias dereferenceable(18446744073709551615), i32 inreg, i32 inreg, i32, i32, i32, i32) { main_body: %9 = add i32 %5, %3 %10 = getelementptr inbounds [0 x <4 x i32>], [0 x <4 x i32>] addrspace(6)* %2, i32 0, i32 0, !amdgpu.uniform !0 %11 = load <4 x i32>, <4 x i32> addrspace(6)* %10, align 16, !invariant.load !0 %12 = call <4 x i32> @llvm.amdgcn.struct.tbuffer.load.v4i32(<4 x i32> %11, i32 %9, i32 0, i32 0, i32 124, i32 0) #2 %13 = bitcast [0 x i8] addrspace(6)* %1 to <4 x i32> addrspace(6)*, !amdgpu.uniform !0 %14 = load <4 x i32>, <4 x i32> addrspace(6)* %13, align 16, !invariant.load !0 %15 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1120, i32 0) #2 %16 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1124, i32 0) #2 %17 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1128, i32 0) #2 %18 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1132, i32 0) #2 %bc = bitcast <4 x i32> %12 to <4 x float> %19 = extractelement <4 x float> %bc, i32 0 %20 = fmul float %15, %19 %21 = fmul float %16, %19 %22 = fmul float %17, %19 %23 = fmul float %18, %19 %24 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1136, i32 0) #2 %25 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1140, i32 0) #2 %26 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1144, i32 0) #2 %27 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1148, i32 0) #2 %bc44 = bitcast <4 x i32> %12 to <4 x float> %28 = extractelement <4 x float> %bc44, i32 1 %29 = fmul float %24, %28 %30 = fmul float %25, %28 %31 = fmul float %26, %28 %32 = fmul float %27, %28 %33 = fadd float %20, %29 %34 = fadd float %21, %30 %35 = fadd float %22, %31 %36 = fadd float %23, %32 %37 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1152, i32 0) #2 %38 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1156, i32 0) #2 %39 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1160, i32 0) #2 %40 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1164, i32 0) #2 %bc45 = bitcast <4 x i32> %12 to <4 x float> %41 = extractelement <4 x float> %bc45, i32 2 %42 = fmul float %37, %41 %43 = fmul float %38, %41 %44 = fmul float %39, %41 %45 = fmul float %40, %41 %46 = fadd float %33, %42 %47 = fadd float %34, %43 %48 = fadd float %35, %44 %49 = fadd float %36, %45 %50 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1168, i32 0) #2 %51 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1172, i32 0) #2 %52 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1176, i32 0) #2 %53 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1180, i32 0) #2 %54 = fadd float %46, %50 %55 = fadd float %47, %51 %56 = fadd float %48, %52 %57 = fadd float %49, %53 %58 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1184, i32 0) #2 %59 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1188, i32 0) #2 %60 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1192, i32 0) #2 %61 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1196, i32 0) #2 %62 = fmul float %58, %54 %63 = fmul float %59, %54 %64 = fmul float %60, %54 %65 = fmul float %61, %54 %66 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1200, i32 0) #2 %67 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1204, i32 0) #2 %68 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1208, i32 0) #2 %69 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1212, i32 0) #2 %70 = fmul float %66, %55 %71 = fmul float %67, %55 %72 = fmul float %68, %55 %73 = fmul float %69, %55 %74 = fadd float %62, %70 %75 = fadd float %63, %71 %76 = fadd float %64, %72 %77 = fadd float %65, %73 %78 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1216, i32 0) #2 %79 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1220, i32 0) #2 %80 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1224, i32 0) #2 %81 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1228, i32 0) #2 %82 = fmul float %78, %56 %83 = fmul float %79, %56 %84 = fmul float %80, %56 %85 = fmul float %81, %56 %86 = fadd float %74, %82 %87 = fadd float %75, %83 %88 = fadd float %76, %84 %89 = fadd float %77, %85 %90 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1232, i32 0) #2 %91 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1236, i32 0) #2 %92 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1240, i32 0) #2 %93 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %14, i32 1244, i32 0) #2 %94 = fmul float %90, %57 %95 = fmul float %91, %57 %96 = fmul float %92, %57 %97 = fmul float %93, %57 %98 = fadd float %86, %94 %99 = fadd float %87, %95 %100 = fadd float %88, %96 %101 = fadd float %89, %97 call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %98, float %99, float %100, float %101, i1 true, i1 false) #3 call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %54, float %55, float %56, float %98, i1 false, i1 false) #3 call void @llvm.amdgcn.exp.f32(i32 33, i32 3, float %99, float %101, float undef, float undef, i1 false, i1 false) #3 ret void } ; Function Attrs: nounwind readnone speculatable declare i8 addrspace(4)* @llvm.amdgcn.implicit.buffer.ptr() #0 ; Function Attrs: nounwind readonly declare <4 x i32> @llvm.amdgcn.struct.tbuffer.load.v4i32(<4 x i32>, i32, i32, i32, i32 immarg, i32 immarg) #1 ; Function Attrs: nounwind readnone declare float @llvm.amdgcn.s.buffer.load.f32(<4 x i32>, i32, i32 immarg) #2 ; Function Attrs: nounwind declare void @llvm.amdgcn.exp.f32(i32 immarg, i32 immarg, float, float, float, float, i1 immarg, i1 immarg) #3 attributes #0 = { nounwind readnone speculatable } attributes #1 = { nounwind readonly } attributes #2 = { nounwind readnone } attributes #3 = { nounwind } !0 = !{} disasm: main: BB15_0: s_mov_b32 s2, s3 ; BE820003 s_mov_b32 s3, 0 ; BE830080 s_mov_b32 s0, s5 ; BE800005 s_mov_b32 s5, s3 ; BE850003 s_load_dwordx4 s[8:11], s[2:3], 0x0 ; C00A0201 00000000 s_load_dwordx4 s[4:7], s[4:5], 0x0 ; C00A0102 00000000 v_add_u32_e32 v0, vcc, s0, v0 ; 32000000 s_waitcnt lgkmcnt(0) ; BF8C007F tbuffer_load_format_xyzw v[0:3], v0, s[4:7], dfmt:12, nfmt:7, 0 idxen ; EBE1A000 80010000 s_buffer_load_dwordx4 s[4:7], s[8:11], 0x460 ; C02A0104 00000460 s_buffer_load_dwordx4 s[12:15], s[8:11], 0x470 ; C02A0304 00000470 s_buffer_load_dwordx4 s[16:19], s[8:11], 0x480 ; C02A0404 00000480 s_buffer_load_dwordx4 s[20:23], s[8:11], 0x490 ; C02A0504 00000490 s_buffer_load_dwordx4 s[24:27], s[8:11], 0x4a0 ; C02A0604 000004A0 s_buffer_load_dwordx4 s[28:31], s[8:11], 0x4b0 ; C02A0704 000004B0 s_buffer_load_dwordx4 s[32:35], s[8:11], 0x4c0 ; C02A0804 000004C0 s_buffer_load_dwordx4 s[0:3], s[8:11], 0x4d0 ; C02A0004 000004D0 s_waitcnt vmcnt(0) lgkmcnt(0) ; BF8C0070 v_mul_f32_e32 v3, s13, v1 ; 0A06020D v_mul_f32_e32 v4, s12, v1 ; 0A08020C v_mul_f32_e32 v5, s14, v1 ; 0A0A020E v_mul_f32_e32 v1, s15, v1 ; 0A02020F v_mac_f32_e32 v3, s5, v0 ; 2C060005 v_mac_f32_e32 v4, s4, v0 ; 2C080004 v_mac_f32_e32 v5, s6, v0 ; 2C0A0006 v_mac_f32_e32 v1, s7, v0 ; 2C020007 v_mac_f32_e32 v3, s17, v2 ; 2C060411 v_mac_f32_e32 v1, s19, v2 ; 2C020413 v_mac_f32_e32 v4, s16, v2 ; 2C080410 v_mac_f32_e32 v5, s18, v2 ; 2C0A0412 v_add_f32_e32 v2, s21, v3 ; 02040615 v_add_f32_e32 v0, s20, v4 ; 02000814 v_mul_f32_e32 v4, s28, v2 ; 0A08041C v_mul_f32_e32 v6, s30, v2 ; 0A0C041E v_mul_f32_e32 v7, s31, v2 ; 0A0E041F v_add_f32_e32 v3, s22, v5 ; 02060A16 v_mul_f32_e32 v5, s29, v2 ; 0A0A041D v_mac_f32_e32 v4, s24, v0 ; 2C080018 v_mac_f32_e32 v5, s25, v0 ; 2C0A0019 v_mac_f32_e32 v6, s26, v0 ; 2C0C001A v_mac_f32_e32 v7, s27, v0 ; 2C0E001B v_add_f32_e32 v1, s23, v1 ; 02020217 v_mac_f32_e32 v4, s32, v3 ; 2C080620 v_mac_f32_e32 v5, s33, v3 ; 2C0A0621 v_mac_f32_e32 v6, s34, v3 ; 2C0C0622 v_mac_f32_e32 v7, s35, v3 ; 2C0E0623 v_mac_f32_e32 v4, s0, v1 ; 2C080200 v_mac_f32_e32 v5, s1, v1 ; 2C0A0201 v_mac_f32_e32 v6, s2, v1 ; 2C0C0202 v_mac_f32_e32 v7, s3, v1 ; 2C0E0203 exp pos0 v4, v5, v6, v7 done ; C40008CF 07060504 exp param0 v0, v2, v3, v4 ; C400020F 04030200 exp param1 v5, v7, off, off ; C4000213 00000705 s_endpgm ; BF810000 /home/hakzsam/programming/vkpipeline-db-private/no_man_sky/640.pipeline_test (VS) - Shader Stats: SGPRS: 40 VGPRS: 8 Code Size: 292 LDS: 0 Scratch: 0 Max Waves: 8 Spilled SGPRs: 0 Spilled VGPRs: 0 PrivMem VGPRs: 0 /home/hakzsam/programming/vkpipeline-db-private/no_man_sky/640.pipeline_test (FS) - Shader Stats: SGPRS: 48 VGPRS: 64 Code Size: 4404 LDS: 0 Scratch: 0 Max Waves: 4 Spilled SGPRs: 0 Spilled VGPRs: 0 PrivMem VGPRs: 0 Thread 0 took 2.84 seconds and compiled 1 pipelines