Using breakpad crash handler Setting breakpad minidump AppID = 570 Forcing breakpad minidump interfaces to load Looking up breakpad interfaces from steamclient Calling BreakpadMiniDumpSystemInit Looking up breakpad interfaces from steamclient Calling BreakpadMiniDumpSystemInit Steam_SetMinidumpSteamID: Caching Steam ID: 76561198040191507 [API loaded yes] Steam_SetMinidumpSteamID: Setting Steam ID: 76561198040191507 Setting breakpad minidump AppID = 373300 Fontconfig warning: "/home/jason/.local/share/Steam/steamapps/common/dota 2 beta/game/core/panorama/fonts/conf.d/41-repl-os-win.conf", line 148: Having multiple values in isn't supported and may not work as expected Fontconfig warning: "/home/jason/.local/share/Steam/steamapps/common/dota 2 beta/game/core/panorama/fonts/conf.d/41-repl-os-win.conf", line 160: Having multiple values in isn't supported and may not work as expected UBO Ranges: [0]: block 0 start 3 length 4 [1]: block 1 start 0 length 2 [2]: block 1 start 12 length 1 [3]: (none) 466 NIR instructions NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } VS Output VUE map (11 slots, SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_CLIP_DIST0 [3] VARYING_SLOT_CLIP_DIST1 [4] VARYING_SLOT_VAR0 [5] VARYING_SLOT_VAR1 [6] VARYING_SLOT_VAR2 [7] VARYING_SLOT_VAR3 [8] VARYING_SLOT_VAR4 [9] VARYING_SLOT_VAR5 [10] VARYING_SLOT_VAR6 Native code for unnamed vertex shader (null) SIMD8 shader: 308 instructions. 0 loops. 600 cycles. 0:0 spills:fills. Promoted 4 constants. Compacted 4928 to 3184 bytes (35%) START B0 (600 cycles) vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 add(8) g12<1>D g21<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 add(8) g67<1>D g22<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 add(8) g54<1>D g23<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 add(8) g121<1>D g24<8,8,1>D 2D { align1 1Q compacted }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) mov(8) g31<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g32<1>F g30<8,8,1>F { align1 1Q compacted }; mov(1) g107<1>F 128F { align1 WE_all 1N }; vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y mul(8) g59<1>F g3.1<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y mul(8) g73<1>F g3.3<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y mul(8) g77<1>F g4.5<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y mul(8) g79<1>F g4.7<0,1,0>F g18<8,8,1>F { align1 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(8) g24<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(8) g37<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g35<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g36<1>F g30<8,8,1>F { align1 1Q compacted }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(8) g38<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; URB write mov(8) g18<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; vec1 32 ssa_20 = ishl ssa_19, ssa_6 shl(8) g23<1>D g12<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_34 = ishl ssa_33, ssa_6 shl(8) g68<1>D g67<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_70 = ishl ssa_69, ssa_6 shl(8) g55<1>D g54<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_94 = ishl ssa_93, ssa_6 shl(8) g122<1>D g121<8,8,1>D 0x00000002UD { align1 1Q }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g31<1>UW g31<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 3 { align1 1Q }; mov(1) g107.1<1>F 255F { align1 WE_all 1N }; vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 mad(8) g60<1>F g59<4,4,1>F g17<4,4,1>F g3.0<0,1,0>F { align16 1Q }; vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 mad(8) g74<1>F g73<4,4,1>F g17<4,4,1>F g3.2<0,1,0>F { align16 1Q }; vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 mad(8) g78<1>F g77<4,4,1>F g17<4,4,1>F g4.4<0,1,0>F { align16 1Q }; vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 mad(8) g80<1>F g79<4,4,1>F g17<4,4,1>F g4.6<0,1,0>F { align16 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(1) g24.2<1>UD 0x00000014UD { align1 WE_all 1N compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(1) g37.2<1>UD 0x00000010UD { align1 WE_all 1N compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g34<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g34.2<1>UD 0x00000300UD { align1 WE_all 1N compacted }; send(8) g49<1>UW g34<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(1) g38.2<1>UD 0x00000004UD { align1 WE_all 1N compacted }; vec1 32 ssa_21 = i2f32 ssa_20 mov(8) g34<1>F g23<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_35 = i2f32 ssa_34 mov(8) g69<1>F g68<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_71 = i2f32 ssa_70 mov(8) g56<1>F g55<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_95 = i2f32 ssa_94 mov(8) g123<1>F g122<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z mul(8) g100<1>F g9<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z mul(8) g99<1>F g10<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z mul(8) g101<1>F g11<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 mad(8) g67<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g13<4,4,1>F { align16 1Q }; vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 mad(8) g68<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g14<4,4,1>F { align16 1Q }; vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 mad(8) g103<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g15<4,4,1>F { align16 1Q }; vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 mad(8) g104<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g16<4,4,1>F { align16 1Q }; vec1 32 ssa_249 = fadd ssa_244, ssa_248.x add(8) g10<1>F g60<8,8,1>F g2.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_250 = fadd ssa_246, ssa_248.y add(8) g11<1>F g74<8,8,1>F g2.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_259 = fadd ssa_254, ssa_258.x add(8) g12<1>F g78<8,8,1>F g4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_260 = fadd ssa_256, ssa_258.y add(8) g13<1>F g80<8,8,1>F g4.1<0,1,0>F { align1 1Q compacted }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () send(16) g23<1>UD g24<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () send(16) g97<1>UD g37<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () send(16) g2<1>UD g38<8,8,1>UD const (2, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec1 32 ssa_22 = fadd ssa_21, ssa_9 add(8) g35<1>F g34<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_36 = fadd ssa_35, ssa_9 add(8) g70<1>F g69<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_72 = fadd ssa_71, ssa_9 add(8) g57<1>F g56<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_96 = fadd ssa_95, ssa_9 add(8) g124<1>F g123<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_123 = flt ssa_119, ssa_0 cmp.l.f0(8) g69<1>F g67<8,8,1>F 0F { align1 1Q compacted }; mov(1) g107.2<1>F 1F { align1 WE_all 1N }; vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x mul(8) g4<1>F g2.4<0,1,0>F g5.4<0,1,0>F { align1 1Q }; vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y mul(8) g31<1>F g2.5<0,1,0>F g5.5<0,1,0>F { align1 1Q }; vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z mul(8) g32<1>F g2.6<0,1,0>F g5.6<0,1,0>F { align1 1Q }; vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x mad(8) g65<1>F g29<4,4,1>F g8.6<0,1,0>F g35<4,4,1>F { align16 1Q }; vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x mad(8) g72<1>F g29<4,4,1>F g8.6<0,1,0>F g70<4,4,1>F { align16 1Q }; vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x mad(8) g60<1>F g29<4,4,1>F g8.6<0,1,0>F g57<4,4,1>F { align16 1Q }; vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x mad(8) g125<1>F g29<4,4,1>F g8.6<0,1,0>F g124<4,4,1>F { align16 1Q }; vec1 32 ssa_125 = flt ssa_120, ssa_0 cmp.l.f0(8) g71<1>F g68<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_124 = b2f ssa_123 mov(8) g70<1>F -g69<8,8,1>D { align1 1Q compacted }; mov(1) g107.3<1>F 2F { align1 WE_all 1N }; vec1 32 ssa_309 = fmul ssa_304, ssa_308 mul(8) g45<1>F g4<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_310 = fmul ssa_305, ssa_308 mul(8) g46<1>F g31<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_311 = fmul ssa_306, ssa_308 mul(8) g47<1>F g32<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_24 = ffloor ssa_23 rndd(8) g66<1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_38 = ffloor ssa_37 rndd(8) g61<1>F g72<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_74 = ffloor ssa_73 rndd(8) g73<1>F g60<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_98 = ffloor ssa_97 rndd(8) g126<1>F g125<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_127 = flt ssa_121, ssa_0 cmp.l.f0(8) g63<1>F g103<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_126 = b2f ssa_125 mov(8) g62<1>F -g71<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 add(8) g105<1>F (abs)g67<8,8,1>F -g70<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 mad(8) g113<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g70<4,4,1>F { align16 1Q }; vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 lrp(8) g36<1>F g5.7<0,1,0>F g49<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 lrp(8) g37<1>F g5.7<0,1,0>F g50<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 lrp(8) g38<1>F g5.7<0,1,0>F g51<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_25 = fadd ssa_23, -ssa_24 add(8) g70<1>F g65<8,8,1>F -g66<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y mad(8) g71<1>F g30<4,4,1>F g8.7<0,1,0>F g66<4,4,1>F { align16 1Q }; vec1 32 ssa_39 = fadd ssa_37, -ssa_38 add(8) g58<1>F g72<8,8,1>F -g61<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y mad(8) g59<1>F g30<4,4,1>F g8.7<0,1,0>F g61<4,4,1>F { align16 1Q }; vec1 32 ssa_75 = fadd ssa_73, -ssa_74 add(8) g82<1>F g60<8,8,1>F -g73<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y mad(8) g83<1>F g30<4,4,1>F g8.7<0,1,0>F g73<4,4,1>F { align16 1Q }; vec1 32 ssa_99 = fadd ssa_97, -ssa_98 add(8) g94<1>F g125<8,8,1>F -g126<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y mad(8) g95<1>F g30<4,4,1>F g8.7<0,1,0>F g126<4,4,1>F { align16 1Q }; vec1 32 ssa_129 = flt ssa_122, ssa_0 cmp.l.f0(8) g110<1>F g104<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_128 = b2f ssa_127 mov(8) g108<1>F -g63<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 add(8) g106<1>F (abs)g68<8,8,1>F -g62<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_136 = fadd ssa_131, ssa_135 add(8) g109<1>F g105<8,8,1>F -64F { align1 1Q }; vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g2<1>UW g70<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g65<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g69<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g61<1>UW g58<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g53<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g57<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g73<1>UW g82<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g77<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g81<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g85<1>UW g94<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g89<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g93<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec1 32 ssa_140 = flt ssa_131, -ssa_135 cmp.l.f0(8) g124<1>F g105<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_130 = b2f ssa_129 mov(8) g111<1>F -g110<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 add(8) g102<1>F (abs)g103<8,8,1>F -g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 mad(8) g114<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g108<4,4,1>F { align16 1Q }; vec1 32 ssa_137 = fadd ssa_132, ssa_135 add(8) g110<1>F g106<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y mul(8) g103<1>F g61<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y mul(8) g105<1>F g62<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y mul(8) g112<1>F g63<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y mul(8) g115<1>F g64<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y mul(8) g120<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y mul(8) g121<1>F g54<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y mul(8) g122<1>F g55<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y mul(8) g123<1>F g56<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y mul(8) g8<1>F g57<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y mul(8) g29<1>F g58<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y mul(8) g30<1>F g59<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y mul(8) g61<1>F g60<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_142 = flt ssa_132, -ssa_135 cmp.l.f0(8) g54<1>F g106<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_141 = b2f ssa_140 mov(8) g26<1>F -g124<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 add(8) g108<1>F (abs)g104<8,8,1>F -g111<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 mad(8) g35<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g111<4,4,1>F { align16 1Q }; vec1 32 ssa_138 = fadd ssa_133, ssa_135 add(8) g111<1>F g102<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 mad(8) g116<1>F g103<4,4,1>F g25<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 mad(8) g117<1>F g105<4,4,1>F g25<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 mad(8) g118<1>F g112<4,4,1>F g25<4,4,1>F g4<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 mad(8) g119<1>F g115<4,4,1>F g25<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 mad(8) g124<1>F g120<4,4,1>F g25<4,4,1>F g65<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 mad(8) g125<1>F g121<4,4,1>F g25<4,4,1>F g66<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 mad(8) g126<1>F g122<4,4,1>F g25<4,4,1>F g67<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 mad(8) g127<1>F g123<4,4,1>F g25<4,4,1>F g68<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 mad(8) g62<1>F g8<4,4,1>F g25<4,4,1>F g69<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 mad(8) g63<1>F g29<4,4,1>F g25<4,4,1>F g70<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 mad(8) g64<1>F g30<4,4,1>F g25<4,4,1>F g71<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 mad(8) g53<1>F g61<4,4,1>F g25<4,4,1>F g72<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_144 = flt ssa_133, -ssa_135 cmp.l.f0(8) g56<1>F g102<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_143 = b2f ssa_142 mov(8) g55<1>F -g54<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 add(8) g60<1>F (abs)g109<8,8,1>F -g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 mad(8) g112<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g26<4,4,1>F { align16 1Q }; vec1 32 ssa_139 = fadd ssa_134, ssa_135 add(8) g121<1>F g108<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 mad(8) g103<1>F g116<4,4,1>F g27<4,4,1>F g73<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 mad(8) g104<1>F g117<4,4,1>F g27<4,4,1>F g74<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 mad(8) g105<1>F g118<4,4,1>F g27<4,4,1>F g75<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 mad(8) g106<1>F g119<4,4,1>F g27<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 mad(8) g102<1>F g124<4,4,1>F g27<4,4,1>F g77<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 mad(8) g109<1>F g125<4,4,1>F g27<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 mad(8) g115<1>F g126<4,4,1>F g27<4,4,1>F g79<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 mad(8) g116<1>F g127<4,4,1>F g27<4,4,1>F g80<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 mad(8) g117<1>F g62<4,4,1>F g27<4,4,1>F g81<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 mad(8) g118<1>F g63<4,4,1>F g27<4,4,1>F g82<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 mad(8) g119<1>F g64<4,4,1>F g27<4,4,1>F g83<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 mad(8) g120<1>F g53<4,4,1>F g27<4,4,1>F g84<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_146 = flt ssa_134, -ssa_135 cmp.l.f0(8) g58<1>F g108<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_145 = b2f ssa_144 mov(8) g57<1>F -g56<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 add(8) g73<1>F (abs)g110<8,8,1>F -g55<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 mad(8) g80<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g55<4,4,1>F { align16 1Q }; vec1 32 ssa_153 = fmul ssa_148, ssa_152 mul(8) g76<1>F g60<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 mad(8) g127<1>F g103<4,4,1>F g28<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 mad(8) g2<1>F g104<4,4,1>F g28<4,4,1>F g86<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 mad(8) g3<1>F g105<4,4,1>F g28<4,4,1>F g87<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 mad(8) g4<1>F g106<4,4,1>F g28<4,4,1>F g88<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 mad(8) g5<1>F g102<4,4,1>F g28<4,4,1>F g89<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 mad(8) g8<1>F g109<4,4,1>F g28<4,4,1>F g90<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 mad(8) g25<1>F g115<4,4,1>F g28<4,4,1>F g91<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 mad(8) g26<1>F g116<4,4,1>F g28<4,4,1>F g92<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 mad(8) g27<1>F g117<4,4,1>F g28<4,4,1>F g93<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 mad(8) g29<1>F g118<4,4,1>F g28<4,4,1>F g94<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 mad(8) g30<1>F g119<4,4,1>F g28<4,4,1>F g95<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 mad(8) g65<1>F g120<4,4,1>F g28<4,4,1>F g96<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_147 = b2f ssa_146 mov(8) g59<1>F -g58<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 add(8) g74<1>F (abs)g111<8,8,1>F -g57<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 mad(8) g81<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g57<4,4,1>F { align16 1Q }; vec1 32 ssa_154 = fmul ssa_149, ssa_152 mul(8) g77<1>F g73<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_164 = fadd ssa_1, -ssa_153 add(8) g84<1>F -g76<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_229 = fmul ssa_104, ssa_227 mul(8) g71<1>F g2<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_233 = fmul ssa_110, ssa_227 mul(8) g63<1>F g8<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_237 = fmul ssa_114, ssa_227 mul(8) g55<1>F g29<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 add(8) g75<1>F (abs)g121<8,8,1>F -g59<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 mad(8) g82<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g59<4,4,1>F { align16 1Q }; vec1 32 ssa_155 = fmul ssa_150, ssa_152 mul(8) g78<1>F g74<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_166 = fmul ssa_154, ssa_154 mul(8) g86<1>F g77<8,8,1>F g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_165 = fadd ssa_164, -ssa_154 add(8) g85<1>F g84<8,8,1>F -g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 mad(8) g72<1>F g71<4,4,1>F g100<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 mad(8) g64<1>F g63<4,4,1>F g100<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 mad(8) g56<1>F g55<4,4,1>F g100<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_156 = fmul ssa_151, ssa_152 mul(8) g79<1>F g75<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_176 = fadd ssa_1, -ssa_155 add(8) g96<1>F -g78<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 mad(8) g87<1>F g86<4,4,1>F g76<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 mad(8) g61<1>F g72<4,4,1>F g101<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 mad(8) g53<1>F g64<4,4,1>F g101<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 mad(8) g57<1>F g56<4,4,1>F g101<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_178 = fmul ssa_156, ssa_156 mul(8) g99<1>F g79<8,8,1>F g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_177 = fadd ssa_176, -ssa_156 add(8) g100<1>F g96<8,8,1>F -g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 mad(8) g88<1>F g87<4,4,1>F g85<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_232 = fadd ssa_231, ssa_106 add(8) g41<1>F g61<8,8,1>F g4<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_236 = fadd ssa_235, ssa_112 add(8) g42<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_240 = fadd ssa_239, ssa_116 add(8) g43<1>F g57<8,8,1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 mad(8) g101<1>F g99<4,4,1>F g78<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_169 = frsq ssa_168 math rsq(8) g89<1>F g88<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x add(8) g102<1>F g41<8,8,1>F -g98.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 mul(8) g83<1>F g6.4<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 mul(8) g84<1>F g6.5<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 mul(8) g86<1>F g6.6<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 mul(8) g87<1>F g6.7<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y add(8) g108<1>F g42<8,8,1>F -g98.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z add(8) g109<1>F g43<8,8,1>F -g98.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 mad(8) g107<1>F g101<4,4,1>F g100<4,4,1>F g100<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_170 = fmul ssa_153, ssa_169 mul(8) g90<1>F g76<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_171 = fmul ssa_154, ssa_169 mul(8) g91<1>F g77<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_172 = fmul ssa_165, ssa_169 mul(8) g92<1>F g85<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 mad(8) g88<1>F g83<4,4,1>F g41<4,4,1>F g6.0<0,1,0>F { align16 1Q }; vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 mad(8) g89<1>F g84<4,4,1>F g41<4,4,1>F g6.1<0,1,0>F { align16 1Q }; vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 mad(8) g96<1>F g86<4,4,1>F g41<4,4,1>F g6.2<0,1,0>F { align16 1Q }; vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 mad(8) g97<1>F g87<4,4,1>F g41<4,4,1>F g6.3<0,1,0>F { align16 1Q }; vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 mul(8) g110<1>F g23.1<0,1,0>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_181 = frsq ssa_180 math rsq(8) g103<1>F g107<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_173 = fmul ssa_170, ssa_157 mul(8) g93<1>F g90<8,8,1>F g112<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_174 = fmul ssa_171, ssa_158 mul(8) g94<1>F g91<8,8,1>F g80<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_175 = fmul ssa_172, ssa_161 mul(8) g95<1>F g92<8,8,1>F g113<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 mad(8) g98<1>F g88<4,4,1>F g43<4,4,1>F g7.0<0,1,0>F { align16 1Q }; vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 mad(8) g99<1>F g89<4,4,1>F g43<4,4,1>F g7.1<0,1,0>F { align16 1Q }; vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 mad(8) g101<1>F g96<4,4,1>F g43<4,4,1>F g7.2<0,1,0>F { align16 1Q }; vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 mad(8) g107<1>F g97<4,4,1>F g43<4,4,1>F g7.3<0,1,0>F { align16 1Q }; vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 mad(8) g111<1>F g110<4,4,1>F g102<4,4,1>F g23.0<0,1,0>F { align16 1Q }; vec1 32 ssa_182 = fmul ssa_155, ssa_181 mul(8) g104<1>F g78<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_183 = fmul ssa_156, ssa_181 mul(8) g105<1>F g79<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_184 = fmul ssa_177, ssa_181 mul(8) g106<1>F g100<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_188 = fmul ssa_104, ssa_174 mul(8) g110<1>F g2<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_191 = fmul ssa_110, ssa_174 mul(8) g113<1>F g8<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_194 = fmul ssa_114, ssa_174 mul(8) g116<1>F g29<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_280 = fadd ssa_276, ssa_267.x add(8) g19<1>F g98<8,8,1>F g7.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_281 = fadd ssa_277, ssa_267.y add(8) g20<1>F g99<8,8,1>F g7.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_282 = fadd ssa_278, ssa_267.z add(8) g21<1>F g101<8,8,1>F g7.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_283 = fadd ssa_279, ssa_267.w add(8) g22<1>F g107<8,8,1>F g7.7<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 mad(8) g44<1>F g111<4,4,1>F g109<4,4,1>F g23.2<0,1,0>F { align16 1Q }; vec1 32 ssa_185 = fmul ssa_182, ssa_159 mul(8) g102<1>F g104<8,8,1>F g81<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_186 = fmul ssa_183, ssa_160 mul(8) g108<1>F g105<8,8,1>F g82<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_187 = fmul ssa_184, ssa_162 mul(8) g109<1>F g106<8,8,1>F g114<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 mad(8) g111<1>F g110<4,4,1>F g93<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 mad(8) g114<1>F g113<4,4,1>F g93<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 mad(8) g117<1>F g116<4,4,1>F g93<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_204 = fmul ssa_104, ssa_186 mul(8) g126<1>F g2<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_207 = fmul ssa_110, ssa_186 mul(8) g4<1>F g8<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_210 = fmul ssa_114, ssa_186 mul(8) g7<1>F g29<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 mad(8) g112<1>F g111<4,4,1>F g95<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 mad(8) g115<1>F g114<4,4,1>F g95<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 mad(8) g118<1>F g117<4,4,1>F g95<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 mad(8) g127<1>F g126<4,4,1>F g102<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 mad(8) g5<1>F g4<4,4,1>F g102<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 mad(8) g8<1>F g7<4,4,1>F g102<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_197 = fmul ssa_193, ssa_193 mul(8) g119<1>F g115<8,8,1>F g115<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 mad(8) g2<1>F g127<4,4,1>F g109<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 mad(8) g6<1>F g5<4,4,1>F g109<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 mad(8) g23<1>F g8<4,4,1>F g109<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 mad(8) g120<1>F g119<4,4,1>F g112<4,4,1>F g112<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 mad(8) g121<1>F g120<4,4,1>F g118<4,4,1>F g118<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_200 = frsq ssa_199 math rsq(8) g122<1>F g121<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_201 = fmul ssa_190, ssa_200 mul(8) g14<1>F g112<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_202 = fmul ssa_193, ssa_200 mul(8) g15<1>F g115<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_203 = fmul ssa_196, ssa_200 mul(8) g16<1>F g118<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_213 = fmul ssa_209, ssa_202 mul(8) g24<1>F g6<8,8,1>F g15<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 mad(8) g25<1>F g24<4,4,1>F g14<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 mad(8) g26<1>F g25<4,4,1>F g16<4,4,1>F g23<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 mad(8) g27<1>F g2<4,4,1>F g26<4,4,1>F -g14<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 mad(8) g28<1>F g6<4,4,1>F g26<4,4,1>F -g15<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 mad(8) g29<1>F g23<4,4,1>F g26<4,4,1>F -g16<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_219 = fmul ssa_217, ssa_217 mul(8) g30<1>F g28<8,8,1>F g28<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 mad(8) g65<1>F g30<4,4,1>F g27<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 mad(8) g66<1>F g65<4,4,1>F g29<4,4,1>F g29<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_222 = frsq ssa_221 math rsq(8) g67<1>F g66<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_223 = fmul ssa_216, ssa_222 mul(8) g32<1>F g27<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_224 = fmul ssa_217, ssa_222 mul(8) g33<1>F g28<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_225 = fmul ssa_218, ssa_222 mul(8) g34<1>F g29<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; URB write send(8) null<1>F g18<8,8,1>F urb 1 SIMD8 write mlen 5 rlen 0 { align1 1Q }; mov(8) g9<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; send(8) null<1>F g9<8,8,1>F urb 4 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g31<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g39<1>F g52<8,8,1>F { align1 1Q compacted }; send(8) null<1>F g31<8,8,1>F urb 6 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g40<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g48<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g40<8,8,1>F urb 8 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g123<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g124<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g125<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g126<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g127<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g123<8,8,1>F urb 10 SIMD8 write mlen 5 rlen 0 { align1 1Q EOT }; END B0 UBO Ranges: [0]: block 0 start 3 length 4 [1]: block 1 start 0 length 2 [2]: block 1 start 12 length 1 [3]: (none) 466 NIR instructions NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } VS Output VUE map (11 slots, SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_CLIP_DIST0 [3] VARYING_SLOT_CLIP_DIST1 [4] VARYING_SLOT_VAR0 [5] VARYING_SLOT_VAR1 [6] VARYING_SLOT_VAR2 [7] VARYING_SLOT_VAR3 [8] VARYING_SLOT_VAR4 [9] VARYING_SLOT_VAR5 [10] VARYING_SLOT_VAR6 Native code for unnamed vertex shader (null) SIMD8 shader: 308 instructions. 0 loops. 600 cycles. 0:0 spills:fills. Promoted 4 constants. Compacted 4928 to 3184 bytes (35%) START B0 (600 cycles) vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 add(8) g12<1>D g21<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 add(8) g67<1>D g22<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 add(8) g54<1>D g23<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 add(8) g121<1>D g24<8,8,1>D 2D { align1 1Q compacted }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) mov(8) g31<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g32<1>F g30<8,8,1>F { align1 1Q compacted }; mov(1) g107<1>F 128F { align1 WE_all 1N }; vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y mul(8) g59<1>F g3.1<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y mul(8) g73<1>F g3.3<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y mul(8) g77<1>F g4.5<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y mul(8) g79<1>F g4.7<0,1,0>F g18<8,8,1>F { align1 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(8) g24<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(8) g37<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g35<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g36<1>F g30<8,8,1>F { align1 1Q compacted }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(8) g38<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; URB write mov(8) g18<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; vec1 32 ssa_20 = ishl ssa_19, ssa_6 shl(8) g23<1>D g12<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_34 = ishl ssa_33, ssa_6 shl(8) g68<1>D g67<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_70 = ishl ssa_69, ssa_6 shl(8) g55<1>D g54<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_94 = ishl ssa_93, ssa_6 shl(8) g122<1>D g121<8,8,1>D 0x00000002UD { align1 1Q }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g31<1>UW g31<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 3 { align1 1Q }; mov(1) g107.1<1>F 255F { align1 WE_all 1N }; vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 mad(8) g60<1>F g59<4,4,1>F g17<4,4,1>F g3.0<0,1,0>F { align16 1Q }; vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 mad(8) g74<1>F g73<4,4,1>F g17<4,4,1>F g3.2<0,1,0>F { align16 1Q }; vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 mad(8) g78<1>F g77<4,4,1>F g17<4,4,1>F g4.4<0,1,0>F { align16 1Q }; vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 mad(8) g80<1>F g79<4,4,1>F g17<4,4,1>F g4.6<0,1,0>F { align16 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(1) g24.2<1>UD 0x00000014UD { align1 WE_all 1N compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(1) g37.2<1>UD 0x00000010UD { align1 WE_all 1N compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g34<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g34.2<1>UD 0x00000300UD { align1 WE_all 1N compacted }; send(8) g49<1>UW g34<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(1) g38.2<1>UD 0x00000004UD { align1 WE_all 1N compacted }; vec1 32 ssa_21 = i2f32 ssa_20 mov(8) g34<1>F g23<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_35 = i2f32 ssa_34 mov(8) g69<1>F g68<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_71 = i2f32 ssa_70 mov(8) g56<1>F g55<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_95 = i2f32 ssa_94 mov(8) g123<1>F g122<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z mul(8) g100<1>F g9<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z mul(8) g99<1>F g10<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z mul(8) g101<1>F g11<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 mad(8) g67<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g13<4,4,1>F { align16 1Q }; vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 mad(8) g68<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g14<4,4,1>F { align16 1Q }; vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 mad(8) g103<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g15<4,4,1>F { align16 1Q }; vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 mad(8) g104<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g16<4,4,1>F { align16 1Q }; vec1 32 ssa_249 = fadd ssa_244, ssa_248.x add(8) g10<1>F g60<8,8,1>F g2.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_250 = fadd ssa_246, ssa_248.y add(8) g11<1>F g74<8,8,1>F g2.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_259 = fadd ssa_254, ssa_258.x add(8) g12<1>F g78<8,8,1>F g4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_260 = fadd ssa_256, ssa_258.y add(8) g13<1>F g80<8,8,1>F g4.1<0,1,0>F { align1 1Q compacted }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () send(16) g23<1>UD g24<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () send(16) g97<1>UD g37<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () send(16) g2<1>UD g38<8,8,1>UD const (2, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec1 32 ssa_22 = fadd ssa_21, ssa_9 add(8) g35<1>F g34<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_36 = fadd ssa_35, ssa_9 add(8) g70<1>F g69<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_72 = fadd ssa_71, ssa_9 add(8) g57<1>F g56<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_96 = fadd ssa_95, ssa_9 add(8) g124<1>F g123<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_123 = flt ssa_119, ssa_0 cmp.l.f0(8) g69<1>F g67<8,8,1>F 0F { align1 1Q compacted }; mov(1) g107.2<1>F 1F { align1 WE_all 1N }; vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x mul(8) g4<1>F g2.4<0,1,0>F g5.4<0,1,0>F { align1 1Q }; vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y mul(8) g31<1>F g2.5<0,1,0>F g5.5<0,1,0>F { align1 1Q }; vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z mul(8) g32<1>F g2.6<0,1,0>F g5.6<0,1,0>F { align1 1Q }; vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x mad(8) g65<1>F g29<4,4,1>F g8.6<0,1,0>F g35<4,4,1>F { align16 1Q }; vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x mad(8) g72<1>F g29<4,4,1>F g8.6<0,1,0>F g70<4,4,1>F { align16 1Q }; vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x mad(8) g60<1>F g29<4,4,1>F g8.6<0,1,0>F g57<4,4,1>F { align16 1Q }; vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x mad(8) g125<1>F g29<4,4,1>F g8.6<0,1,0>F g124<4,4,1>F { align16 1Q }; vec1 32 ssa_125 = flt ssa_120, ssa_0 cmp.l.f0(8) g71<1>F g68<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_124 = b2f ssa_123 mov(8) g70<1>F -g69<8,8,1>D { align1 1Q compacted }; mov(1) g107.3<1>F 2F { align1 WE_all 1N }; vec1 32 ssa_309 = fmul ssa_304, ssa_308 mul(8) g45<1>F g4<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_310 = fmul ssa_305, ssa_308 mul(8) g46<1>F g31<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_311 = fmul ssa_306, ssa_308 mul(8) g47<1>F g32<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_24 = ffloor ssa_23 rndd(8) g66<1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_38 = ffloor ssa_37 rndd(8) g61<1>F g72<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_74 = ffloor ssa_73 rndd(8) g73<1>F g60<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_98 = ffloor ssa_97 rndd(8) g126<1>F g125<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_127 = flt ssa_121, ssa_0 cmp.l.f0(8) g63<1>F g103<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_126 = b2f ssa_125 mov(8) g62<1>F -g71<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 add(8) g105<1>F (abs)g67<8,8,1>F -g70<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 mad(8) g113<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g70<4,4,1>F { align16 1Q }; vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 lrp(8) g36<1>F g5.7<0,1,0>F g49<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 lrp(8) g37<1>F g5.7<0,1,0>F g50<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 lrp(8) g38<1>F g5.7<0,1,0>F g51<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_25 = fadd ssa_23, -ssa_24 add(8) g70<1>F g65<8,8,1>F -g66<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y mad(8) g71<1>F g30<4,4,1>F g8.7<0,1,0>F g66<4,4,1>F { align16 1Q }; vec1 32 ssa_39 = fadd ssa_37, -ssa_38 add(8) g58<1>F g72<8,8,1>F -g61<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y mad(8) g59<1>F g30<4,4,1>F g8.7<0,1,0>F g61<4,4,1>F { align16 1Q }; vec1 32 ssa_75 = fadd ssa_73, -ssa_74 add(8) g82<1>F g60<8,8,1>F -g73<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y mad(8) g83<1>F g30<4,4,1>F g8.7<0,1,0>F g73<4,4,1>F { align16 1Q }; vec1 32 ssa_99 = fadd ssa_97, -ssa_98 add(8) g94<1>F g125<8,8,1>F -g126<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y mad(8) g95<1>F g30<4,4,1>F g8.7<0,1,0>F g126<4,4,1>F { align16 1Q }; vec1 32 ssa_129 = flt ssa_122, ssa_0 cmp.l.f0(8) g110<1>F g104<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_128 = b2f ssa_127 mov(8) g108<1>F -g63<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 add(8) g106<1>F (abs)g68<8,8,1>F -g62<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_136 = fadd ssa_131, ssa_135 add(8) g109<1>F g105<8,8,1>F -64F { align1 1Q }; vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g2<1>UW g70<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g65<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g69<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g61<1>UW g58<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g53<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g57<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g73<1>UW g82<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g77<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g81<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g85<1>UW g94<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g89<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g93<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec1 32 ssa_140 = flt ssa_131, -ssa_135 cmp.l.f0(8) g124<1>F g105<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_130 = b2f ssa_129 mov(8) g111<1>F -g110<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 add(8) g102<1>F (abs)g103<8,8,1>F -g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 mad(8) g114<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g108<4,4,1>F { align16 1Q }; vec1 32 ssa_137 = fadd ssa_132, ssa_135 add(8) g110<1>F g106<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y mul(8) g103<1>F g61<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y mul(8) g105<1>F g62<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y mul(8) g112<1>F g63<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y mul(8) g115<1>F g64<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y mul(8) g120<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y mul(8) g121<1>F g54<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y mul(8) g122<1>F g55<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y mul(8) g123<1>F g56<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y mul(8) g8<1>F g57<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y mul(8) g29<1>F g58<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y mul(8) g30<1>F g59<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y mul(8) g61<1>F g60<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_142 = flt ssa_132, -ssa_135 cmp.l.f0(8) g54<1>F g106<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_141 = b2f ssa_140 mov(8) g26<1>F -g124<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 add(8) g108<1>F (abs)g104<8,8,1>F -g111<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 mad(8) g35<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g111<4,4,1>F { align16 1Q }; vec1 32 ssa_138 = fadd ssa_133, ssa_135 add(8) g111<1>F g102<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 mad(8) g116<1>F g103<4,4,1>F g25<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 mad(8) g117<1>F g105<4,4,1>F g25<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 mad(8) g118<1>F g112<4,4,1>F g25<4,4,1>F g4<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 mad(8) g119<1>F g115<4,4,1>F g25<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 mad(8) g124<1>F g120<4,4,1>F g25<4,4,1>F g65<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 mad(8) g125<1>F g121<4,4,1>F g25<4,4,1>F g66<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 mad(8) g126<1>F g122<4,4,1>F g25<4,4,1>F g67<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 mad(8) g127<1>F g123<4,4,1>F g25<4,4,1>F g68<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 mad(8) g62<1>F g8<4,4,1>F g25<4,4,1>F g69<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 mad(8) g63<1>F g29<4,4,1>F g25<4,4,1>F g70<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 mad(8) g64<1>F g30<4,4,1>F g25<4,4,1>F g71<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 mad(8) g53<1>F g61<4,4,1>F g25<4,4,1>F g72<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_144 = flt ssa_133, -ssa_135 cmp.l.f0(8) g56<1>F g102<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_143 = b2f ssa_142 mov(8) g55<1>F -g54<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 add(8) g60<1>F (abs)g109<8,8,1>F -g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 mad(8) g112<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g26<4,4,1>F { align16 1Q }; vec1 32 ssa_139 = fadd ssa_134, ssa_135 add(8) g121<1>F g108<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 mad(8) g103<1>F g116<4,4,1>F g27<4,4,1>F g73<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 mad(8) g104<1>F g117<4,4,1>F g27<4,4,1>F g74<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 mad(8) g105<1>F g118<4,4,1>F g27<4,4,1>F g75<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 mad(8) g106<1>F g119<4,4,1>F g27<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 mad(8) g102<1>F g124<4,4,1>F g27<4,4,1>F g77<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 mad(8) g109<1>F g125<4,4,1>F g27<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 mad(8) g115<1>F g126<4,4,1>F g27<4,4,1>F g79<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 mad(8) g116<1>F g127<4,4,1>F g27<4,4,1>F g80<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 mad(8) g117<1>F g62<4,4,1>F g27<4,4,1>F g81<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 mad(8) g118<1>F g63<4,4,1>F g27<4,4,1>F g82<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 mad(8) g119<1>F g64<4,4,1>F g27<4,4,1>F g83<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 mad(8) g120<1>F g53<4,4,1>F g27<4,4,1>F g84<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_146 = flt ssa_134, -ssa_135 cmp.l.f0(8) g58<1>F g108<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_145 = b2f ssa_144 mov(8) g57<1>F -g56<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 add(8) g73<1>F (abs)g110<8,8,1>F -g55<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 mad(8) g80<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g55<4,4,1>F { align16 1Q }; vec1 32 ssa_153 = fmul ssa_148, ssa_152 mul(8) g76<1>F g60<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 mad(8) g127<1>F g103<4,4,1>F g28<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 mad(8) g2<1>F g104<4,4,1>F g28<4,4,1>F g86<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 mad(8) g3<1>F g105<4,4,1>F g28<4,4,1>F g87<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 mad(8) g4<1>F g106<4,4,1>F g28<4,4,1>F g88<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 mad(8) g5<1>F g102<4,4,1>F g28<4,4,1>F g89<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 mad(8) g8<1>F g109<4,4,1>F g28<4,4,1>F g90<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 mad(8) g25<1>F g115<4,4,1>F g28<4,4,1>F g91<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 mad(8) g26<1>F g116<4,4,1>F g28<4,4,1>F g92<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 mad(8) g27<1>F g117<4,4,1>F g28<4,4,1>F g93<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 mad(8) g29<1>F g118<4,4,1>F g28<4,4,1>F g94<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 mad(8) g30<1>F g119<4,4,1>F g28<4,4,1>F g95<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 mad(8) g65<1>F g120<4,4,1>F g28<4,4,1>F g96<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_147 = b2f ssa_146 mov(8) g59<1>F -g58<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 add(8) g74<1>F (abs)g111<8,8,1>F -g57<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 mad(8) g81<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g57<4,4,1>F { align16 1Q }; vec1 32 ssa_154 = fmul ssa_149, ssa_152 mul(8) g77<1>F g73<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_164 = fadd ssa_1, -ssa_153 add(8) g84<1>F -g76<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_229 = fmul ssa_104, ssa_227 mul(8) g71<1>F g2<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_233 = fmul ssa_110, ssa_227 mul(8) g63<1>F g8<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_237 = fmul ssa_114, ssa_227 mul(8) g55<1>F g29<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 add(8) g75<1>F (abs)g121<8,8,1>F -g59<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 mad(8) g82<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g59<4,4,1>F { align16 1Q }; vec1 32 ssa_155 = fmul ssa_150, ssa_152 mul(8) g78<1>F g74<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_166 = fmul ssa_154, ssa_154 mul(8) g86<1>F g77<8,8,1>F g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_165 = fadd ssa_164, -ssa_154 add(8) g85<1>F g84<8,8,1>F -g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 mad(8) g72<1>F g71<4,4,1>F g100<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 mad(8) g64<1>F g63<4,4,1>F g100<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 mad(8) g56<1>F g55<4,4,1>F g100<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_156 = fmul ssa_151, ssa_152 mul(8) g79<1>F g75<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_176 = fadd ssa_1, -ssa_155 add(8) g96<1>F -g78<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 mad(8) g87<1>F g86<4,4,1>F g76<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 mad(8) g61<1>F g72<4,4,1>F g101<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 mad(8) g53<1>F g64<4,4,1>F g101<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 mad(8) g57<1>F g56<4,4,1>F g101<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_178 = fmul ssa_156, ssa_156 mul(8) g99<1>F g79<8,8,1>F g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_177 = fadd ssa_176, -ssa_156 add(8) g100<1>F g96<8,8,1>F -g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 mad(8) g88<1>F g87<4,4,1>F g85<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_232 = fadd ssa_231, ssa_106 add(8) g41<1>F g61<8,8,1>F g4<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_236 = fadd ssa_235, ssa_112 add(8) g42<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_240 = fadd ssa_239, ssa_116 add(8) g43<1>F g57<8,8,1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 mad(8) g101<1>F g99<4,4,1>F g78<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_169 = frsq ssa_168 math rsq(8) g89<1>F g88<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x add(8) g102<1>F g41<8,8,1>F -g98.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 mul(8) g83<1>F g6.4<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 mul(8) g84<1>F g6.5<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 mul(8) g86<1>F g6.6<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 mul(8) g87<1>F g6.7<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y add(8) g108<1>F g42<8,8,1>F -g98.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z add(8) g109<1>F g43<8,8,1>F -g98.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 mad(8) g107<1>F g101<4,4,1>F g100<4,4,1>F g100<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_170 = fmul ssa_153, ssa_169 mul(8) g90<1>F g76<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_171 = fmul ssa_154, ssa_169 mul(8) g91<1>F g77<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_172 = fmul ssa_165, ssa_169 mul(8) g92<1>F g85<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 mad(8) g88<1>F g83<4,4,1>F g41<4,4,1>F g6.0<0,1,0>F { align16 1Q }; vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 mad(8) g89<1>F g84<4,4,1>F g41<4,4,1>F g6.1<0,1,0>F { align16 1Q }; vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 mad(8) g96<1>F g86<4,4,1>F g41<4,4,1>F g6.2<0,1,0>F { align16 1Q }; vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 mad(8) g97<1>F g87<4,4,1>F g41<4,4,1>F g6.3<0,1,0>F { align16 1Q }; vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 mul(8) g110<1>F g23.1<0,1,0>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_181 = frsq ssa_180 math rsq(8) g103<1>F g107<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_173 = fmul ssa_170, ssa_157 mul(8) g93<1>F g90<8,8,1>F g112<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_174 = fmul ssa_171, ssa_158 mul(8) g94<1>F g91<8,8,1>F g80<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_175 = fmul ssa_172, ssa_161 mul(8) g95<1>F g92<8,8,1>F g113<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 mad(8) g98<1>F g88<4,4,1>F g43<4,4,1>F g7.0<0,1,0>F { align16 1Q }; vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 mad(8) g99<1>F g89<4,4,1>F g43<4,4,1>F g7.1<0,1,0>F { align16 1Q }; vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 mad(8) g101<1>F g96<4,4,1>F g43<4,4,1>F g7.2<0,1,0>F { align16 1Q }; vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 mad(8) g107<1>F g97<4,4,1>F g43<4,4,1>F g7.3<0,1,0>F { align16 1Q }; vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 mad(8) g111<1>F g110<4,4,1>F g102<4,4,1>F g23.0<0,1,0>F { align16 1Q }; vec1 32 ssa_182 = fmul ssa_155, ssa_181 mul(8) g104<1>F g78<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_183 = fmul ssa_156, ssa_181 mul(8) g105<1>F g79<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_184 = fmul ssa_177, ssa_181 mul(8) g106<1>F g100<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_188 = fmul ssa_104, ssa_174 mul(8) g110<1>F g2<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_191 = fmul ssa_110, ssa_174 mul(8) g113<1>F g8<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_194 = fmul ssa_114, ssa_174 mul(8) g116<1>F g29<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_280 = fadd ssa_276, ssa_267.x add(8) g19<1>F g98<8,8,1>F g7.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_281 = fadd ssa_277, ssa_267.y add(8) g20<1>F g99<8,8,1>F g7.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_282 = fadd ssa_278, ssa_267.z add(8) g21<1>F g101<8,8,1>F g7.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_283 = fadd ssa_279, ssa_267.w add(8) g22<1>F g107<8,8,1>F g7.7<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 mad(8) g44<1>F g111<4,4,1>F g109<4,4,1>F g23.2<0,1,0>F { align16 1Q }; vec1 32 ssa_185 = fmul ssa_182, ssa_159 mul(8) g102<1>F g104<8,8,1>F g81<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_186 = fmul ssa_183, ssa_160 mul(8) g108<1>F g105<8,8,1>F g82<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_187 = fmul ssa_184, ssa_162 mul(8) g109<1>F g106<8,8,1>F g114<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 mad(8) g111<1>F g110<4,4,1>F g93<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 mad(8) g114<1>F g113<4,4,1>F g93<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 mad(8) g117<1>F g116<4,4,1>F g93<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_204 = fmul ssa_104, ssa_186 mul(8) g126<1>F g2<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_207 = fmul ssa_110, ssa_186 mul(8) g4<1>F g8<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_210 = fmul ssa_114, ssa_186 mul(8) g7<1>F g29<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 mad(8) g112<1>F g111<4,4,1>F g95<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 mad(8) g115<1>F g114<4,4,1>F g95<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 mad(8) g118<1>F g117<4,4,1>F g95<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 mad(8) g127<1>F g126<4,4,1>F g102<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 mad(8) g5<1>F g4<4,4,1>F g102<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 mad(8) g8<1>F g7<4,4,1>F g102<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_197 = fmul ssa_193, ssa_193 mul(8) g119<1>F g115<8,8,1>F g115<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 mad(8) g2<1>F g127<4,4,1>F g109<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 mad(8) g6<1>F g5<4,4,1>F g109<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 mad(8) g23<1>F g8<4,4,1>F g109<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 mad(8) g120<1>F g119<4,4,1>F g112<4,4,1>F g112<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 mad(8) g121<1>F g120<4,4,1>F g118<4,4,1>F g118<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_200 = frsq ssa_199 math rsq(8) g122<1>F g121<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_201 = fmul ssa_190, ssa_200 mul(8) g14<1>F g112<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_202 = fmul ssa_193, ssa_200 mul(8) g15<1>F g115<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_203 = fmul ssa_196, ssa_200 mul(8) g16<1>F g118<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_213 = fmul ssa_209, ssa_202 mul(8) g24<1>F g6<8,8,1>F g15<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 mad(8) g25<1>F g24<4,4,1>F g14<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 mad(8) g26<1>F g25<4,4,1>F g16<4,4,1>F g23<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 mad(8) g27<1>F g2<4,4,1>F g26<4,4,1>F -g14<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 mad(8) g28<1>F g6<4,4,1>F g26<4,4,1>F -g15<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 mad(8) g29<1>F g23<4,4,1>F g26<4,4,1>F -g16<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_219 = fmul ssa_217, ssa_217 mul(8) g30<1>F g28<8,8,1>F g28<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 mad(8) g65<1>F g30<4,4,1>F g27<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 mad(8) g66<1>F g65<4,4,1>F g29<4,4,1>F g29<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_222 = frsq ssa_221 math rsq(8) g67<1>F g66<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_223 = fmul ssa_216, ssa_222 mul(8) g32<1>F g27<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_224 = fmul ssa_217, ssa_222 mul(8) g33<1>F g28<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_225 = fmul ssa_218, ssa_222 mul(8) g34<1>F g29<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; URB write send(8) null<1>F g18<8,8,1>F urb 1 SIMD8 write mlen 5 rlen 0 { align1 1Q }; mov(8) g9<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; send(8) null<1>F g9<8,8,1>F urb 4 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g31<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g39<1>F g52<8,8,1>F { align1 1Q compacted }; send(8) null<1>F g31<8,8,1>F urb 6 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g40<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g48<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g40<8,8,1>F urb 8 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g123<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g124<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g125<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g126<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g127<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g123<8,8,1>F urb 10 SIMD8 write mlen 5 rlen 0 { align1 1Q EOT }; END B0 UBO Ranges: [0]: block 0 start 3 length 4 [1]: block 1 start 0 length 2 [2]: block 1 start 12 length 1 [3]: (none) 466 NIR instructions NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } VS Output VUE map (11 slots, SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_CLIP_DIST0 [3] VARYING_SLOT_CLIP_DIST1 [4] VARYING_SLOT_VAR0 [5] VARYING_SLOT_VAR1 [6] VARYING_SLOT_VAR2 [7] VARYING_SLOT_VAR3 [8] VARYING_SLOT_VAR4 [9] VARYING_SLOT_VAR5 [10] VARYING_SLOT_VAR6 Native code for unnamed vertex shader (null) SIMD8 shader: 308 instructions. 0 loops. 600 cycles. 0:0 spills:fills. Promoted 4 constants. Compacted 4928 to 3184 bytes (35%) START B0 (600 cycles) vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 add(8) g12<1>D g21<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 add(8) g67<1>D g22<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 add(8) g54<1>D g23<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 add(8) g121<1>D g24<8,8,1>D 2D { align1 1Q compacted }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) mov(8) g31<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g32<1>F g30<8,8,1>F { align1 1Q compacted }; mov(1) g107<1>F 128F { align1 WE_all 1N }; vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y mul(8) g59<1>F g3.1<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y mul(8) g73<1>F g3.3<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y mul(8) g77<1>F g4.5<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y mul(8) g79<1>F g4.7<0,1,0>F g18<8,8,1>F { align1 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(8) g24<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(8) g37<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g35<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g36<1>F g30<8,8,1>F { align1 1Q compacted }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(8) g38<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; URB write mov(8) g18<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; vec1 32 ssa_20 = ishl ssa_19, ssa_6 shl(8) g23<1>D g12<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_34 = ishl ssa_33, ssa_6 shl(8) g68<1>D g67<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_70 = ishl ssa_69, ssa_6 shl(8) g55<1>D g54<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_94 = ishl ssa_93, ssa_6 shl(8) g122<1>D g121<8,8,1>D 0x00000002UD { align1 1Q }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g31<1>UW g31<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 3 { align1 1Q }; mov(1) g107.1<1>F 255F { align1 WE_all 1N }; vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 mad(8) g60<1>F g59<4,4,1>F g17<4,4,1>F g3.0<0,1,0>F { align16 1Q }; vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 mad(8) g74<1>F g73<4,4,1>F g17<4,4,1>F g3.2<0,1,0>F { align16 1Q }; vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 mad(8) g78<1>F g77<4,4,1>F g17<4,4,1>F g4.4<0,1,0>F { align16 1Q }; vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 mad(8) g80<1>F g79<4,4,1>F g17<4,4,1>F g4.6<0,1,0>F { align16 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(1) g24.2<1>UD 0x00000014UD { align1 WE_all 1N compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(1) g37.2<1>UD 0x00000010UD { align1 WE_all 1N compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g34<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g34.2<1>UD 0x00000300UD { align1 WE_all 1N compacted }; send(8) g49<1>UW g34<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(1) g38.2<1>UD 0x00000004UD { align1 WE_all 1N compacted }; vec1 32 ssa_21 = i2f32 ssa_20 mov(8) g34<1>F g23<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_35 = i2f32 ssa_34 mov(8) g69<1>F g68<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_71 = i2f32 ssa_70 mov(8) g56<1>F g55<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_95 = i2f32 ssa_94 mov(8) g123<1>F g122<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z mul(8) g100<1>F g9<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z mul(8) g99<1>F g10<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z mul(8) g101<1>F g11<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 mad(8) g67<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g13<4,4,1>F { align16 1Q }; vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 mad(8) g68<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g14<4,4,1>F { align16 1Q }; vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 mad(8) g103<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g15<4,4,1>F { align16 1Q }; vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 mad(8) g104<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g16<4,4,1>F { align16 1Q }; vec1 32 ssa_249 = fadd ssa_244, ssa_248.x add(8) g10<1>F g60<8,8,1>F g2.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_250 = fadd ssa_246, ssa_248.y add(8) g11<1>F g74<8,8,1>F g2.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_259 = fadd ssa_254, ssa_258.x add(8) g12<1>F g78<8,8,1>F g4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_260 = fadd ssa_256, ssa_258.y add(8) g13<1>F g80<8,8,1>F g4.1<0,1,0>F { align1 1Q compacted }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () send(16) g23<1>UD g24<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () send(16) g97<1>UD g37<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () send(16) g2<1>UD g38<8,8,1>UD const (2, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec1 32 ssa_22 = fadd ssa_21, ssa_9 add(8) g35<1>F g34<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_36 = fadd ssa_35, ssa_9 add(8) g70<1>F g69<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_72 = fadd ssa_71, ssa_9 add(8) g57<1>F g56<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_96 = fadd ssa_95, ssa_9 add(8) g124<1>F g123<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_123 = flt ssa_119, ssa_0 cmp.l.f0(8) g69<1>F g67<8,8,1>F 0F { align1 1Q compacted }; mov(1) g107.2<1>F 1F { align1 WE_all 1N }; vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x mul(8) g4<1>F g2.4<0,1,0>F g5.4<0,1,0>F { align1 1Q }; vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y mul(8) g31<1>F g2.5<0,1,0>F g5.5<0,1,0>F { align1 1Q }; vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z mul(8) g32<1>F g2.6<0,1,0>F g5.6<0,1,0>F { align1 1Q }; vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x mad(8) g65<1>F g29<4,4,1>F g8.6<0,1,0>F g35<4,4,1>F { align16 1Q }; vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x mad(8) g72<1>F g29<4,4,1>F g8.6<0,1,0>F g70<4,4,1>F { align16 1Q }; vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x mad(8) g60<1>F g29<4,4,1>F g8.6<0,1,0>F g57<4,4,1>F { align16 1Q }; vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x mad(8) g125<1>F g29<4,4,1>F g8.6<0,1,0>F g124<4,4,1>F { align16 1Q }; vec1 32 ssa_125 = flt ssa_120, ssa_0 cmp.l.f0(8) g71<1>F g68<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_124 = b2f ssa_123 mov(8) g70<1>F -g69<8,8,1>D { align1 1Q compacted }; mov(1) g107.3<1>F 2F { align1 WE_all 1N }; vec1 32 ssa_309 = fmul ssa_304, ssa_308 mul(8) g45<1>F g4<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_310 = fmul ssa_305, ssa_308 mul(8) g46<1>F g31<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_311 = fmul ssa_306, ssa_308 mul(8) g47<1>F g32<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_24 = ffloor ssa_23 rndd(8) g66<1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_38 = ffloor ssa_37 rndd(8) g61<1>F g72<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_74 = ffloor ssa_73 rndd(8) g73<1>F g60<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_98 = ffloor ssa_97 rndd(8) g126<1>F g125<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_127 = flt ssa_121, ssa_0 cmp.l.f0(8) g63<1>F g103<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_126 = b2f ssa_125 mov(8) g62<1>F -g71<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 add(8) g105<1>F (abs)g67<8,8,1>F -g70<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 mad(8) g113<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g70<4,4,1>F { align16 1Q }; vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 lrp(8) g36<1>F g5.7<0,1,0>F g49<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 lrp(8) g37<1>F g5.7<0,1,0>F g50<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 lrp(8) g38<1>F g5.7<0,1,0>F g51<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_25 = fadd ssa_23, -ssa_24 add(8) g70<1>F g65<8,8,1>F -g66<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y mad(8) g71<1>F g30<4,4,1>F g8.7<0,1,0>F g66<4,4,1>F { align16 1Q }; vec1 32 ssa_39 = fadd ssa_37, -ssa_38 add(8) g58<1>F g72<8,8,1>F -g61<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y mad(8) g59<1>F g30<4,4,1>F g8.7<0,1,0>F g61<4,4,1>F { align16 1Q }; vec1 32 ssa_75 = fadd ssa_73, -ssa_74 add(8) g82<1>F g60<8,8,1>F -g73<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y mad(8) g83<1>F g30<4,4,1>F g8.7<0,1,0>F g73<4,4,1>F { align16 1Q }; vec1 32 ssa_99 = fadd ssa_97, -ssa_98 add(8) g94<1>F g125<8,8,1>F -g126<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y mad(8) g95<1>F g30<4,4,1>F g8.7<0,1,0>F g126<4,4,1>F { align16 1Q }; vec1 32 ssa_129 = flt ssa_122, ssa_0 cmp.l.f0(8) g110<1>F g104<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_128 = b2f ssa_127 mov(8) g108<1>F -g63<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 add(8) g106<1>F (abs)g68<8,8,1>F -g62<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_136 = fadd ssa_131, ssa_135 add(8) g109<1>F g105<8,8,1>F -64F { align1 1Q }; vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g2<1>UW g70<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g65<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g69<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g61<1>UW g58<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g53<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g57<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g73<1>UW g82<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g77<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g81<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g85<1>UW g94<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g89<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g93<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec1 32 ssa_140 = flt ssa_131, -ssa_135 cmp.l.f0(8) g124<1>F g105<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_130 = b2f ssa_129 mov(8) g111<1>F -g110<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 add(8) g102<1>F (abs)g103<8,8,1>F -g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 mad(8) g114<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g108<4,4,1>F { align16 1Q }; vec1 32 ssa_137 = fadd ssa_132, ssa_135 add(8) g110<1>F g106<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y mul(8) g103<1>F g61<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y mul(8) g105<1>F g62<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y mul(8) g112<1>F g63<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y mul(8) g115<1>F g64<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y mul(8) g120<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y mul(8) g121<1>F g54<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y mul(8) g122<1>F g55<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y mul(8) g123<1>F g56<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y mul(8) g8<1>F g57<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y mul(8) g29<1>F g58<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y mul(8) g30<1>F g59<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y mul(8) g61<1>F g60<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_142 = flt ssa_132, -ssa_135 cmp.l.f0(8) g54<1>F g106<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_141 = b2f ssa_140 mov(8) g26<1>F -g124<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 add(8) g108<1>F (abs)g104<8,8,1>F -g111<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 mad(8) g35<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g111<4,4,1>F { align16 1Q }; vec1 32 ssa_138 = fadd ssa_133, ssa_135 add(8) g111<1>F g102<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 mad(8) g116<1>F g103<4,4,1>F g25<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 mad(8) g117<1>F g105<4,4,1>F g25<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 mad(8) g118<1>F g112<4,4,1>F g25<4,4,1>F g4<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 mad(8) g119<1>F g115<4,4,1>F g25<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 mad(8) g124<1>F g120<4,4,1>F g25<4,4,1>F g65<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 mad(8) g125<1>F g121<4,4,1>F g25<4,4,1>F g66<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 mad(8) g126<1>F g122<4,4,1>F g25<4,4,1>F g67<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 mad(8) g127<1>F g123<4,4,1>F g25<4,4,1>F g68<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 mad(8) g62<1>F g8<4,4,1>F g25<4,4,1>F g69<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 mad(8) g63<1>F g29<4,4,1>F g25<4,4,1>F g70<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 mad(8) g64<1>F g30<4,4,1>F g25<4,4,1>F g71<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 mad(8) g53<1>F g61<4,4,1>F g25<4,4,1>F g72<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_144 = flt ssa_133, -ssa_135 cmp.l.f0(8) g56<1>F g102<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_143 = b2f ssa_142 mov(8) g55<1>F -g54<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 add(8) g60<1>F (abs)g109<8,8,1>F -g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 mad(8) g112<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g26<4,4,1>F { align16 1Q }; vec1 32 ssa_139 = fadd ssa_134, ssa_135 add(8) g121<1>F g108<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 mad(8) g103<1>F g116<4,4,1>F g27<4,4,1>F g73<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 mad(8) g104<1>F g117<4,4,1>F g27<4,4,1>F g74<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 mad(8) g105<1>F g118<4,4,1>F g27<4,4,1>F g75<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 mad(8) g106<1>F g119<4,4,1>F g27<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 mad(8) g102<1>F g124<4,4,1>F g27<4,4,1>F g77<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 mad(8) g109<1>F g125<4,4,1>F g27<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 mad(8) g115<1>F g126<4,4,1>F g27<4,4,1>F g79<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 mad(8) g116<1>F g127<4,4,1>F g27<4,4,1>F g80<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 mad(8) g117<1>F g62<4,4,1>F g27<4,4,1>F g81<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 mad(8) g118<1>F g63<4,4,1>F g27<4,4,1>F g82<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 mad(8) g119<1>F g64<4,4,1>F g27<4,4,1>F g83<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 mad(8) g120<1>F g53<4,4,1>F g27<4,4,1>F g84<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_146 = flt ssa_134, -ssa_135 cmp.l.f0(8) g58<1>F g108<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_145 = b2f ssa_144 mov(8) g57<1>F -g56<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 add(8) g73<1>F (abs)g110<8,8,1>F -g55<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 mad(8) g80<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g55<4,4,1>F { align16 1Q }; vec1 32 ssa_153 = fmul ssa_148, ssa_152 mul(8) g76<1>F g60<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 mad(8) g127<1>F g103<4,4,1>F g28<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 mad(8) g2<1>F g104<4,4,1>F g28<4,4,1>F g86<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 mad(8) g3<1>F g105<4,4,1>F g28<4,4,1>F g87<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 mad(8) g4<1>F g106<4,4,1>F g28<4,4,1>F g88<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 mad(8) g5<1>F g102<4,4,1>F g28<4,4,1>F g89<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 mad(8) g8<1>F g109<4,4,1>F g28<4,4,1>F g90<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 mad(8) g25<1>F g115<4,4,1>F g28<4,4,1>F g91<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 mad(8) g26<1>F g116<4,4,1>F g28<4,4,1>F g92<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 mad(8) g27<1>F g117<4,4,1>F g28<4,4,1>F g93<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 mad(8) g29<1>F g118<4,4,1>F g28<4,4,1>F g94<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 mad(8) g30<1>F g119<4,4,1>F g28<4,4,1>F g95<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 mad(8) g65<1>F g120<4,4,1>F g28<4,4,1>F g96<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_147 = b2f ssa_146 mov(8) g59<1>F -g58<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 add(8) g74<1>F (abs)g111<8,8,1>F -g57<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 mad(8) g81<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g57<4,4,1>F { align16 1Q }; vec1 32 ssa_154 = fmul ssa_149, ssa_152 mul(8) g77<1>F g73<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_164 = fadd ssa_1, -ssa_153 add(8) g84<1>F -g76<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_229 = fmul ssa_104, ssa_227 mul(8) g71<1>F g2<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_233 = fmul ssa_110, ssa_227 mul(8) g63<1>F g8<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_237 = fmul ssa_114, ssa_227 mul(8) g55<1>F g29<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 add(8) g75<1>F (abs)g121<8,8,1>F -g59<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 mad(8) g82<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g59<4,4,1>F { align16 1Q }; vec1 32 ssa_155 = fmul ssa_150, ssa_152 mul(8) g78<1>F g74<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_166 = fmul ssa_154, ssa_154 mul(8) g86<1>F g77<8,8,1>F g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_165 = fadd ssa_164, -ssa_154 add(8) g85<1>F g84<8,8,1>F -g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 mad(8) g72<1>F g71<4,4,1>F g100<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 mad(8) g64<1>F g63<4,4,1>F g100<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 mad(8) g56<1>F g55<4,4,1>F g100<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_156 = fmul ssa_151, ssa_152 mul(8) g79<1>F g75<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_176 = fadd ssa_1, -ssa_155 add(8) g96<1>F -g78<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 mad(8) g87<1>F g86<4,4,1>F g76<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 mad(8) g61<1>F g72<4,4,1>F g101<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 mad(8) g53<1>F g64<4,4,1>F g101<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 mad(8) g57<1>F g56<4,4,1>F g101<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_178 = fmul ssa_156, ssa_156 mul(8) g99<1>F g79<8,8,1>F g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_177 = fadd ssa_176, -ssa_156 add(8) g100<1>F g96<8,8,1>F -g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 mad(8) g88<1>F g87<4,4,1>F g85<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_232 = fadd ssa_231, ssa_106 add(8) g41<1>F g61<8,8,1>F g4<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_236 = fadd ssa_235, ssa_112 add(8) g42<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_240 = fadd ssa_239, ssa_116 add(8) g43<1>F g57<8,8,1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 mad(8) g101<1>F g99<4,4,1>F g78<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_169 = frsq ssa_168 math rsq(8) g89<1>F g88<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x add(8) g102<1>F g41<8,8,1>F -g98.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 mul(8) g83<1>F g6.4<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 mul(8) g84<1>F g6.5<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 mul(8) g86<1>F g6.6<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 mul(8) g87<1>F g6.7<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y add(8) g108<1>F g42<8,8,1>F -g98.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z add(8) g109<1>F g43<8,8,1>F -g98.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 mad(8) g107<1>F g101<4,4,1>F g100<4,4,1>F g100<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_170 = fmul ssa_153, ssa_169 mul(8) g90<1>F g76<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_171 = fmul ssa_154, ssa_169 mul(8) g91<1>F g77<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_172 = fmul ssa_165, ssa_169 mul(8) g92<1>F g85<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 mad(8) g88<1>F g83<4,4,1>F g41<4,4,1>F g6.0<0,1,0>F { align16 1Q }; vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 mad(8) g89<1>F g84<4,4,1>F g41<4,4,1>F g6.1<0,1,0>F { align16 1Q }; vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 mad(8) g96<1>F g86<4,4,1>F g41<4,4,1>F g6.2<0,1,0>F { align16 1Q }; vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 mad(8) g97<1>F g87<4,4,1>F g41<4,4,1>F g6.3<0,1,0>F { align16 1Q }; vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 mul(8) g110<1>F g23.1<0,1,0>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_181 = frsq ssa_180 math rsq(8) g103<1>F g107<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_173 = fmul ssa_170, ssa_157 mul(8) g93<1>F g90<8,8,1>F g112<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_174 = fmul ssa_171, ssa_158 mul(8) g94<1>F g91<8,8,1>F g80<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_175 = fmul ssa_172, ssa_161 mul(8) g95<1>F g92<8,8,1>F g113<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 mad(8) g98<1>F g88<4,4,1>F g43<4,4,1>F g7.0<0,1,0>F { align16 1Q }; vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 mad(8) g99<1>F g89<4,4,1>F g43<4,4,1>F g7.1<0,1,0>F { align16 1Q }; vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 mad(8) g101<1>F g96<4,4,1>F g43<4,4,1>F g7.2<0,1,0>F { align16 1Q }; vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 mad(8) g107<1>F g97<4,4,1>F g43<4,4,1>F g7.3<0,1,0>F { align16 1Q }; vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 mad(8) g111<1>F g110<4,4,1>F g102<4,4,1>F g23.0<0,1,0>F { align16 1Q }; vec1 32 ssa_182 = fmul ssa_155, ssa_181 mul(8) g104<1>F g78<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_183 = fmul ssa_156, ssa_181 mul(8) g105<1>F g79<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_184 = fmul ssa_177, ssa_181 mul(8) g106<1>F g100<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_188 = fmul ssa_104, ssa_174 mul(8) g110<1>F g2<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_191 = fmul ssa_110, ssa_174 mul(8) g113<1>F g8<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_194 = fmul ssa_114, ssa_174 mul(8) g116<1>F g29<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_280 = fadd ssa_276, ssa_267.x add(8) g19<1>F g98<8,8,1>F g7.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_281 = fadd ssa_277, ssa_267.y add(8) g20<1>F g99<8,8,1>F g7.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_282 = fadd ssa_278, ssa_267.z add(8) g21<1>F g101<8,8,1>F g7.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_283 = fadd ssa_279, ssa_267.w add(8) g22<1>F g107<8,8,1>F g7.7<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 mad(8) g44<1>F g111<4,4,1>F g109<4,4,1>F g23.2<0,1,0>F { align16 1Q }; vec1 32 ssa_185 = fmul ssa_182, ssa_159 mul(8) g102<1>F g104<8,8,1>F g81<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_186 = fmul ssa_183, ssa_160 mul(8) g108<1>F g105<8,8,1>F g82<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_187 = fmul ssa_184, ssa_162 mul(8) g109<1>F g106<8,8,1>F g114<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 mad(8) g111<1>F g110<4,4,1>F g93<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 mad(8) g114<1>F g113<4,4,1>F g93<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 mad(8) g117<1>F g116<4,4,1>F g93<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_204 = fmul ssa_104, ssa_186 mul(8) g126<1>F g2<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_207 = fmul ssa_110, ssa_186 mul(8) g4<1>F g8<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_210 = fmul ssa_114, ssa_186 mul(8) g7<1>F g29<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 mad(8) g112<1>F g111<4,4,1>F g95<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 mad(8) g115<1>F g114<4,4,1>F g95<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 mad(8) g118<1>F g117<4,4,1>F g95<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 mad(8) g127<1>F g126<4,4,1>F g102<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 mad(8) g5<1>F g4<4,4,1>F g102<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 mad(8) g8<1>F g7<4,4,1>F g102<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_197 = fmul ssa_193, ssa_193 mul(8) g119<1>F g115<8,8,1>F g115<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 mad(8) g2<1>F g127<4,4,1>F g109<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 mad(8) g6<1>F g5<4,4,1>F g109<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 mad(8) g23<1>F g8<4,4,1>F g109<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 mad(8) g120<1>F g119<4,4,1>F g112<4,4,1>F g112<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 mad(8) g121<1>F g120<4,4,1>F g118<4,4,1>F g118<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_200 = frsq ssa_199 math rsq(8) g122<1>F g121<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_201 = fmul ssa_190, ssa_200 mul(8) g14<1>F g112<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_202 = fmul ssa_193, ssa_200 mul(8) g15<1>F g115<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_203 = fmul ssa_196, ssa_200 mul(8) g16<1>F g118<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_213 = fmul ssa_209, ssa_202 mul(8) g24<1>F g6<8,8,1>F g15<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 mad(8) g25<1>F g24<4,4,1>F g14<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 mad(8) g26<1>F g25<4,4,1>F g16<4,4,1>F g23<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 mad(8) g27<1>F g2<4,4,1>F g26<4,4,1>F -g14<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 mad(8) g28<1>F g6<4,4,1>F g26<4,4,1>F -g15<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 mad(8) g29<1>F g23<4,4,1>F g26<4,4,1>F -g16<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_219 = fmul ssa_217, ssa_217 mul(8) g30<1>F g28<8,8,1>F g28<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 mad(8) g65<1>F g30<4,4,1>F g27<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 mad(8) g66<1>F g65<4,4,1>F g29<4,4,1>F g29<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_222 = frsq ssa_221 math rsq(8) g67<1>F g66<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_223 = fmul ssa_216, ssa_222 mul(8) g32<1>F g27<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_224 = fmul ssa_217, ssa_222 mul(8) g33<1>F g28<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_225 = fmul ssa_218, ssa_222 mul(8) g34<1>F g29<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; URB write send(8) null<1>F g18<8,8,1>F urb 1 SIMD8 write mlen 5 rlen 0 { align1 1Q }; mov(8) g9<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; send(8) null<1>F g9<8,8,1>F urb 4 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g31<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g39<1>F g52<8,8,1>F { align1 1Q compacted }; send(8) null<1>F g31<8,8,1>F urb 6 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g40<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g48<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g40<8,8,1>F urb 8 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g123<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g124<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g125<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g126<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g127<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g123<8,8,1>F urb 10 SIMD8 write mlen 5 rlen 0 { align1 1Q EOT }; END B0 UBO Ranges: [0]: block 0 start 3 length 4 [1]: block 1 start 0 length 2 [2]: block 1 start 12 length 1 [3]: (none) 466 NIR instructions NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } VS Output VUE map (11 slots, SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_CLIP_DIST0 [3] VARYING_SLOT_CLIP_DIST1 [4] VARYING_SLOT_VAR0 [5] VARYING_SLOT_VAR1 [6] VARYING_SLOT_VAR2 [7] VARYING_SLOT_VAR3 [8] VARYING_SLOT_VAR4 [9] VARYING_SLOT_VAR5 [10] VARYING_SLOT_VAR6 Native code for unnamed vertex shader (null) SIMD8 shader: 308 instructions. 0 loops. 600 cycles. 0:0 spills:fills. Promoted 4 constants. Compacted 4928 to 3184 bytes (35%) START B0 (600 cycles) vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 add(8) g12<1>D g21<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 add(8) g67<1>D g22<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 add(8) g54<1>D g23<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 add(8) g121<1>D g24<8,8,1>D 2D { align1 1Q compacted }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) mov(8) g31<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g32<1>F g30<8,8,1>F { align1 1Q compacted }; mov(1) g107<1>F 128F { align1 WE_all 1N }; vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y mul(8) g59<1>F g3.1<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y mul(8) g73<1>F g3.3<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y mul(8) g77<1>F g4.5<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y mul(8) g79<1>F g4.7<0,1,0>F g18<8,8,1>F { align1 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(8) g24<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(8) g37<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g35<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g36<1>F g30<8,8,1>F { align1 1Q compacted }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(8) g38<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; URB write mov(8) g18<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; vec1 32 ssa_20 = ishl ssa_19, ssa_6 shl(8) g23<1>D g12<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_34 = ishl ssa_33, ssa_6 shl(8) g68<1>D g67<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_70 = ishl ssa_69, ssa_6 shl(8) g55<1>D g54<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_94 = ishl ssa_93, ssa_6 shl(8) g122<1>D g121<8,8,1>D 0x00000002UD { align1 1Q }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g31<1>UW g31<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 3 { align1 1Q }; mov(1) g107.1<1>F 255F { align1 WE_all 1N }; vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 mad(8) g60<1>F g59<4,4,1>F g17<4,4,1>F g3.0<0,1,0>F { align16 1Q }; vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 mad(8) g74<1>F g73<4,4,1>F g17<4,4,1>F g3.2<0,1,0>F { align16 1Q }; vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 mad(8) g78<1>F g77<4,4,1>F g17<4,4,1>F g4.4<0,1,0>F { align16 1Q }; vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 mad(8) g80<1>F g79<4,4,1>F g17<4,4,1>F g4.6<0,1,0>F { align16 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(1) g24.2<1>UD 0x00000014UD { align1 WE_all 1N compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(1) g37.2<1>UD 0x00000010UD { align1 WE_all 1N compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g34<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g34.2<1>UD 0x00000300UD { align1 WE_all 1N compacted }; send(8) g49<1>UW g34<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(1) g38.2<1>UD 0x00000004UD { align1 WE_all 1N compacted }; vec1 32 ssa_21 = i2f32 ssa_20 mov(8) g34<1>F g23<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_35 = i2f32 ssa_34 mov(8) g69<1>F g68<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_71 = i2f32 ssa_70 mov(8) g56<1>F g55<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_95 = i2f32 ssa_94 mov(8) g123<1>F g122<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z mul(8) g100<1>F g9<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z mul(8) g99<1>F g10<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z mul(8) g101<1>F g11<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 mad(8) g67<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g13<4,4,1>F { align16 1Q }; vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 mad(8) g68<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g14<4,4,1>F { align16 1Q }; vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 mad(8) g103<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g15<4,4,1>F { align16 1Q }; vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 mad(8) g104<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g16<4,4,1>F { align16 1Q }; vec1 32 ssa_249 = fadd ssa_244, ssa_248.x add(8) g10<1>F g60<8,8,1>F g2.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_250 = fadd ssa_246, ssa_248.y add(8) g11<1>F g74<8,8,1>F g2.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_259 = fadd ssa_254, ssa_258.x add(8) g12<1>F g78<8,8,1>F g4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_260 = fadd ssa_256, ssa_258.y add(8) g13<1>F g80<8,8,1>F g4.1<0,1,0>F { align1 1Q compacted }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () send(16) g23<1>UD g24<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () send(16) g97<1>UD g37<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () send(16) g2<1>UD g38<8,8,1>UD const (2, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec1 32 ssa_22 = fadd ssa_21, ssa_9 add(8) g35<1>F g34<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_36 = fadd ssa_35, ssa_9 add(8) g70<1>F g69<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_72 = fadd ssa_71, ssa_9 add(8) g57<1>F g56<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_96 = fadd ssa_95, ssa_9 add(8) g124<1>F g123<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_123 = flt ssa_119, ssa_0 cmp.l.f0(8) g69<1>F g67<8,8,1>F 0F { align1 1Q compacted }; mov(1) g107.2<1>F 1F { align1 WE_all 1N }; vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x mul(8) g4<1>F g2.4<0,1,0>F g5.4<0,1,0>F { align1 1Q }; vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y mul(8) g31<1>F g2.5<0,1,0>F g5.5<0,1,0>F { align1 1Q }; vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z mul(8) g32<1>F g2.6<0,1,0>F g5.6<0,1,0>F { align1 1Q }; vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x mad(8) g65<1>F g29<4,4,1>F g8.6<0,1,0>F g35<4,4,1>F { align16 1Q }; vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x mad(8) g72<1>F g29<4,4,1>F g8.6<0,1,0>F g70<4,4,1>F { align16 1Q }; vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x mad(8) g60<1>F g29<4,4,1>F g8.6<0,1,0>F g57<4,4,1>F { align16 1Q }; vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x mad(8) g125<1>F g29<4,4,1>F g8.6<0,1,0>F g124<4,4,1>F { align16 1Q }; vec1 32 ssa_125 = flt ssa_120, ssa_0 cmp.l.f0(8) g71<1>F g68<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_124 = b2f ssa_123 mov(8) g70<1>F -g69<8,8,1>D { align1 1Q compacted }; mov(1) g107.3<1>F 2F { align1 WE_all 1N }; vec1 32 ssa_309 = fmul ssa_304, ssa_308 mul(8) g45<1>F g4<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_310 = fmul ssa_305, ssa_308 mul(8) g46<1>F g31<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_311 = fmul ssa_306, ssa_308 mul(8) g47<1>F g32<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_24 = ffloor ssa_23 rndd(8) g66<1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_38 = ffloor ssa_37 rndd(8) g61<1>F g72<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_74 = ffloor ssa_73 rndd(8) g73<1>F g60<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_98 = ffloor ssa_97 rndd(8) g126<1>F g125<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_127 = flt ssa_121, ssa_0 cmp.l.f0(8) g63<1>F g103<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_126 = b2f ssa_125 mov(8) g62<1>F -g71<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 add(8) g105<1>F (abs)g67<8,8,1>F -g70<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 mad(8) g113<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g70<4,4,1>F { align16 1Q }; vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 lrp(8) g36<1>F g5.7<0,1,0>F g49<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 lrp(8) g37<1>F g5.7<0,1,0>F g50<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 lrp(8) g38<1>F g5.7<0,1,0>F g51<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_25 = fadd ssa_23, -ssa_24 add(8) g70<1>F g65<8,8,1>F -g66<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y mad(8) g71<1>F g30<4,4,1>F g8.7<0,1,0>F g66<4,4,1>F { align16 1Q }; vec1 32 ssa_39 = fadd ssa_37, -ssa_38 add(8) g58<1>F g72<8,8,1>F -g61<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y mad(8) g59<1>F g30<4,4,1>F g8.7<0,1,0>F g61<4,4,1>F { align16 1Q }; vec1 32 ssa_75 = fadd ssa_73, -ssa_74 add(8) g82<1>F g60<8,8,1>F -g73<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y mad(8) g83<1>F g30<4,4,1>F g8.7<0,1,0>F g73<4,4,1>F { align16 1Q }; vec1 32 ssa_99 = fadd ssa_97, -ssa_98 add(8) g94<1>F g125<8,8,1>F -g126<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y mad(8) g95<1>F g30<4,4,1>F g8.7<0,1,0>F g126<4,4,1>F { align16 1Q }; vec1 32 ssa_129 = flt ssa_122, ssa_0 cmp.l.f0(8) g110<1>F g104<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_128 = b2f ssa_127 mov(8) g108<1>F -g63<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 add(8) g106<1>F (abs)g68<8,8,1>F -g62<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_136 = fadd ssa_131, ssa_135 add(8) g109<1>F g105<8,8,1>F -64F { align1 1Q }; vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g2<1>UW g70<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g65<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g69<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g61<1>UW g58<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g53<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g57<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g73<1>UW g82<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g77<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g81<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g85<1>UW g94<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g89<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g93<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec1 32 ssa_140 = flt ssa_131, -ssa_135 cmp.l.f0(8) g124<1>F g105<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_130 = b2f ssa_129 mov(8) g111<1>F -g110<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 add(8) g102<1>F (abs)g103<8,8,1>F -g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 mad(8) g114<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g108<4,4,1>F { align16 1Q }; vec1 32 ssa_137 = fadd ssa_132, ssa_135 add(8) g110<1>F g106<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y mul(8) g103<1>F g61<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y mul(8) g105<1>F g62<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y mul(8) g112<1>F g63<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y mul(8) g115<1>F g64<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y mul(8) g120<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y mul(8) g121<1>F g54<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y mul(8) g122<1>F g55<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y mul(8) g123<1>F g56<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y mul(8) g8<1>F g57<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y mul(8) g29<1>F g58<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y mul(8) g30<1>F g59<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y mul(8) g61<1>F g60<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_142 = flt ssa_132, -ssa_135 cmp.l.f0(8) g54<1>F g106<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_141 = b2f ssa_140 mov(8) g26<1>F -g124<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 add(8) g108<1>F (abs)g104<8,8,1>F -g111<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 mad(8) g35<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g111<4,4,1>F { align16 1Q }; vec1 32 ssa_138 = fadd ssa_133, ssa_135 add(8) g111<1>F g102<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 mad(8) g116<1>F g103<4,4,1>F g25<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 mad(8) g117<1>F g105<4,4,1>F g25<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 mad(8) g118<1>F g112<4,4,1>F g25<4,4,1>F g4<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 mad(8) g119<1>F g115<4,4,1>F g25<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 mad(8) g124<1>F g120<4,4,1>F g25<4,4,1>F g65<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 mad(8) g125<1>F g121<4,4,1>F g25<4,4,1>F g66<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 mad(8) g126<1>F g122<4,4,1>F g25<4,4,1>F g67<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 mad(8) g127<1>F g123<4,4,1>F g25<4,4,1>F g68<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 mad(8) g62<1>F g8<4,4,1>F g25<4,4,1>F g69<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 mad(8) g63<1>F g29<4,4,1>F g25<4,4,1>F g70<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 mad(8) g64<1>F g30<4,4,1>F g25<4,4,1>F g71<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 mad(8) g53<1>F g61<4,4,1>F g25<4,4,1>F g72<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_144 = flt ssa_133, -ssa_135 cmp.l.f0(8) g56<1>F g102<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_143 = b2f ssa_142 mov(8) g55<1>F -g54<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 add(8) g60<1>F (abs)g109<8,8,1>F -g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 mad(8) g112<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g26<4,4,1>F { align16 1Q }; vec1 32 ssa_139 = fadd ssa_134, ssa_135 add(8) g121<1>F g108<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 mad(8) g103<1>F g116<4,4,1>F g27<4,4,1>F g73<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 mad(8) g104<1>F g117<4,4,1>F g27<4,4,1>F g74<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 mad(8) g105<1>F g118<4,4,1>F g27<4,4,1>F g75<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 mad(8) g106<1>F g119<4,4,1>F g27<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 mad(8) g102<1>F g124<4,4,1>F g27<4,4,1>F g77<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 mad(8) g109<1>F g125<4,4,1>F g27<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 mad(8) g115<1>F g126<4,4,1>F g27<4,4,1>F g79<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 mad(8) g116<1>F g127<4,4,1>F g27<4,4,1>F g80<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 mad(8) g117<1>F g62<4,4,1>F g27<4,4,1>F g81<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 mad(8) g118<1>F g63<4,4,1>F g27<4,4,1>F g82<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 mad(8) g119<1>F g64<4,4,1>F g27<4,4,1>F g83<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 mad(8) g120<1>F g53<4,4,1>F g27<4,4,1>F g84<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_146 = flt ssa_134, -ssa_135 cmp.l.f0(8) g58<1>F g108<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_145 = b2f ssa_144 mov(8) g57<1>F -g56<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 add(8) g73<1>F (abs)g110<8,8,1>F -g55<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 mad(8) g80<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g55<4,4,1>F { align16 1Q }; vec1 32 ssa_153 = fmul ssa_148, ssa_152 mul(8) g76<1>F g60<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 mad(8) g127<1>F g103<4,4,1>F g28<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 mad(8) g2<1>F g104<4,4,1>F g28<4,4,1>F g86<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 mad(8) g3<1>F g105<4,4,1>F g28<4,4,1>F g87<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 mad(8) g4<1>F g106<4,4,1>F g28<4,4,1>F g88<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 mad(8) g5<1>F g102<4,4,1>F g28<4,4,1>F g89<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 mad(8) g8<1>F g109<4,4,1>F g28<4,4,1>F g90<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 mad(8) g25<1>F g115<4,4,1>F g28<4,4,1>F g91<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 mad(8) g26<1>F g116<4,4,1>F g28<4,4,1>F g92<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 mad(8) g27<1>F g117<4,4,1>F g28<4,4,1>F g93<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 mad(8) g29<1>F g118<4,4,1>F g28<4,4,1>F g94<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 mad(8) g30<1>F g119<4,4,1>F g28<4,4,1>F g95<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 mad(8) g65<1>F g120<4,4,1>F g28<4,4,1>F g96<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_147 = b2f ssa_146 mov(8) g59<1>F -g58<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 add(8) g74<1>F (abs)g111<8,8,1>F -g57<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 mad(8) g81<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g57<4,4,1>F { align16 1Q }; vec1 32 ssa_154 = fmul ssa_149, ssa_152 mul(8) g77<1>F g73<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_164 = fadd ssa_1, -ssa_153 add(8) g84<1>F -g76<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_229 = fmul ssa_104, ssa_227 mul(8) g71<1>F g2<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_233 = fmul ssa_110, ssa_227 mul(8) g63<1>F g8<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_237 = fmul ssa_114, ssa_227 mul(8) g55<1>F g29<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 add(8) g75<1>F (abs)g121<8,8,1>F -g59<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 mad(8) g82<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g59<4,4,1>F { align16 1Q }; vec1 32 ssa_155 = fmul ssa_150, ssa_152 mul(8) g78<1>F g74<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_166 = fmul ssa_154, ssa_154 mul(8) g86<1>F g77<8,8,1>F g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_165 = fadd ssa_164, -ssa_154 add(8) g85<1>F g84<8,8,1>F -g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 mad(8) g72<1>F g71<4,4,1>F g100<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 mad(8) g64<1>F g63<4,4,1>F g100<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 mad(8) g56<1>F g55<4,4,1>F g100<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_156 = fmul ssa_151, ssa_152 mul(8) g79<1>F g75<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_176 = fadd ssa_1, -ssa_155 add(8) g96<1>F -g78<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 mad(8) g87<1>F g86<4,4,1>F g76<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 mad(8) g61<1>F g72<4,4,1>F g101<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 mad(8) g53<1>F g64<4,4,1>F g101<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 mad(8) g57<1>F g56<4,4,1>F g101<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_178 = fmul ssa_156, ssa_156 mul(8) g99<1>F g79<8,8,1>F g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_177 = fadd ssa_176, -ssa_156 add(8) g100<1>F g96<8,8,1>F -g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 mad(8) g88<1>F g87<4,4,1>F g85<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_232 = fadd ssa_231, ssa_106 add(8) g41<1>F g61<8,8,1>F g4<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_236 = fadd ssa_235, ssa_112 add(8) g42<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_240 = fadd ssa_239, ssa_116 add(8) g43<1>F g57<8,8,1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 mad(8) g101<1>F g99<4,4,1>F g78<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_169 = frsq ssa_168 math rsq(8) g89<1>F g88<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x add(8) g102<1>F g41<8,8,1>F -g98.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 mul(8) g83<1>F g6.4<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 mul(8) g84<1>F g6.5<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 mul(8) g86<1>F g6.6<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 mul(8) g87<1>F g6.7<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y add(8) g108<1>F g42<8,8,1>F -g98.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z add(8) g109<1>F g43<8,8,1>F -g98.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 mad(8) g107<1>F g101<4,4,1>F g100<4,4,1>F g100<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_170 = fmul ssa_153, ssa_169 mul(8) g90<1>F g76<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_171 = fmul ssa_154, ssa_169 mul(8) g91<1>F g77<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_172 = fmul ssa_165, ssa_169 mul(8) g92<1>F g85<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 mad(8) g88<1>F g83<4,4,1>F g41<4,4,1>F g6.0<0,1,0>F { align16 1Q }; vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 mad(8) g89<1>F g84<4,4,1>F g41<4,4,1>F g6.1<0,1,0>F { align16 1Q }; vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 mad(8) g96<1>F g86<4,4,1>F g41<4,4,1>F g6.2<0,1,0>F { align16 1Q }; vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 mad(8) g97<1>F g87<4,4,1>F g41<4,4,1>F g6.3<0,1,0>F { align16 1Q }; vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 mul(8) g110<1>F g23.1<0,1,0>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_181 = frsq ssa_180 math rsq(8) g103<1>F g107<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_173 = fmul ssa_170, ssa_157 mul(8) g93<1>F g90<8,8,1>F g112<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_174 = fmul ssa_171, ssa_158 mul(8) g94<1>F g91<8,8,1>F g80<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_175 = fmul ssa_172, ssa_161 mul(8) g95<1>F g92<8,8,1>F g113<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 mad(8) g98<1>F g88<4,4,1>F g43<4,4,1>F g7.0<0,1,0>F { align16 1Q }; vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 mad(8) g99<1>F g89<4,4,1>F g43<4,4,1>F g7.1<0,1,0>F { align16 1Q }; vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 mad(8) g101<1>F g96<4,4,1>F g43<4,4,1>F g7.2<0,1,0>F { align16 1Q }; vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 mad(8) g107<1>F g97<4,4,1>F g43<4,4,1>F g7.3<0,1,0>F { align16 1Q }; vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 mad(8) g111<1>F g110<4,4,1>F g102<4,4,1>F g23.0<0,1,0>F { align16 1Q }; vec1 32 ssa_182 = fmul ssa_155, ssa_181 mul(8) g104<1>F g78<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_183 = fmul ssa_156, ssa_181 mul(8) g105<1>F g79<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_184 = fmul ssa_177, ssa_181 mul(8) g106<1>F g100<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_188 = fmul ssa_104, ssa_174 mul(8) g110<1>F g2<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_191 = fmul ssa_110, ssa_174 mul(8) g113<1>F g8<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_194 = fmul ssa_114, ssa_174 mul(8) g116<1>F g29<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_280 = fadd ssa_276, ssa_267.x add(8) g19<1>F g98<8,8,1>F g7.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_281 = fadd ssa_277, ssa_267.y add(8) g20<1>F g99<8,8,1>F g7.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_282 = fadd ssa_278, ssa_267.z add(8) g21<1>F g101<8,8,1>F g7.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_283 = fadd ssa_279, ssa_267.w add(8) g22<1>F g107<8,8,1>F g7.7<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 mad(8) g44<1>F g111<4,4,1>F g109<4,4,1>F g23.2<0,1,0>F { align16 1Q }; vec1 32 ssa_185 = fmul ssa_182, ssa_159 mul(8) g102<1>F g104<8,8,1>F g81<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_186 = fmul ssa_183, ssa_160 mul(8) g108<1>F g105<8,8,1>F g82<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_187 = fmul ssa_184, ssa_162 mul(8) g109<1>F g106<8,8,1>F g114<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 mad(8) g111<1>F g110<4,4,1>F g93<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 mad(8) g114<1>F g113<4,4,1>F g93<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 mad(8) g117<1>F g116<4,4,1>F g93<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_204 = fmul ssa_104, ssa_186 mul(8) g126<1>F g2<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_207 = fmul ssa_110, ssa_186 mul(8) g4<1>F g8<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_210 = fmul ssa_114, ssa_186 mul(8) g7<1>F g29<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 mad(8) g112<1>F g111<4,4,1>F g95<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 mad(8) g115<1>F g114<4,4,1>F g95<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 mad(8) g118<1>F g117<4,4,1>F g95<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 mad(8) g127<1>F g126<4,4,1>F g102<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 mad(8) g5<1>F g4<4,4,1>F g102<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 mad(8) g8<1>F g7<4,4,1>F g102<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_197 = fmul ssa_193, ssa_193 mul(8) g119<1>F g115<8,8,1>F g115<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 mad(8) g2<1>F g127<4,4,1>F g109<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 mad(8) g6<1>F g5<4,4,1>F g109<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 mad(8) g23<1>F g8<4,4,1>F g109<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 mad(8) g120<1>F g119<4,4,1>F g112<4,4,1>F g112<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 mad(8) g121<1>F g120<4,4,1>F g118<4,4,1>F g118<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_200 = frsq ssa_199 math rsq(8) g122<1>F g121<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_201 = fmul ssa_190, ssa_200 mul(8) g14<1>F g112<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_202 = fmul ssa_193, ssa_200 mul(8) g15<1>F g115<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_203 = fmul ssa_196, ssa_200 mul(8) g16<1>F g118<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_213 = fmul ssa_209, ssa_202 mul(8) g24<1>F g6<8,8,1>F g15<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 mad(8) g25<1>F g24<4,4,1>F g14<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 mad(8) g26<1>F g25<4,4,1>F g16<4,4,1>F g23<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 mad(8) g27<1>F g2<4,4,1>F g26<4,4,1>F -g14<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 mad(8) g28<1>F g6<4,4,1>F g26<4,4,1>F -g15<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 mad(8) g29<1>F g23<4,4,1>F g26<4,4,1>F -g16<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_219 = fmul ssa_217, ssa_217 mul(8) g30<1>F g28<8,8,1>F g28<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 mad(8) g65<1>F g30<4,4,1>F g27<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 mad(8) g66<1>F g65<4,4,1>F g29<4,4,1>F g29<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_222 = frsq ssa_221 math rsq(8) g67<1>F g66<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_223 = fmul ssa_216, ssa_222 mul(8) g32<1>F g27<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_224 = fmul ssa_217, ssa_222 mul(8) g33<1>F g28<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_225 = fmul ssa_218, ssa_222 mul(8) g34<1>F g29<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; URB write send(8) null<1>F g18<8,8,1>F urb 1 SIMD8 write mlen 5 rlen 0 { align1 1Q }; mov(8) g9<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; send(8) null<1>F g9<8,8,1>F urb 4 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g31<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g39<1>F g52<8,8,1>F { align1 1Q compacted }; send(8) null<1>F g31<8,8,1>F urb 6 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g40<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g48<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g40<8,8,1>F urb 8 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g123<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g124<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g125<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g126<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g127<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g123<8,8,1>F urb 10 SIMD8 write mlen 5 rlen 0 { align1 1Q EOT }; END B0 UBO Ranges: [0]: block 0 start 3 length 4 [1]: block 1 start 0 length 2 [2]: block 1 start 12 length 1 [3]: (none) 466 NIR instructions NIR (SSA form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } NIR (final form) for vertex shader: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE sampler2D @0 (0, 0, 268) decl_var uniform INTERP_MODE_NONE sampler @1 (0, 0, 76) decl_var shader_in INTERP_MODE_NONE vec3 @2 (VERT_ATTRIB_GENERIC0.xyz, 16, 0) decl_var shader_in INTERP_MODE_NONE vec4 @3 (VERT_ATTRIB_GENERIC1, 17, 0) decl_var shader_in INTERP_MODE_NONE vec4 @4 (VERT_ATTRIB_GENERIC2, 18, 0) decl_var shader_in INTERP_MODE_NONE vec2 @5 (VERT_ATTRIB_GENERIC3.xy, 19, 0) decl_var shader_in INTERP_MODE_NONE uvec4 @6 (VERT_ATTRIB_GENERIC4, 20, 0) decl_var shader_in INTERP_MODE_NONE vec4 @7 (VERT_ATTRIB_GENERIC5, 21, 0) decl_var shader_in INTERP_MODE_NONE vec2 @8 (VERT_ATTRIB_GENERIC6.xy, 22, 0) decl_var shader_out INTERP_MODE_NONE vec4 @9 (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec3 @10 (VARYING_SLOT_VAR1.xyz, 32, 0) decl_var shader_out INTERP_MODE_NONE vec4 @11 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_out INTERP_MODE_NONE vec4 @12 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_out INTERP_MODE_NONE vec4 @13 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_out INTERP_MODE_NONE vec4 @14 (VARYING_SLOT_VAR5, 36, 0) decl_var shader_out INTERP_MODE_NONE vec4 @15 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 @16 (VARYING_SLOT_POS, 0, 0) decl_function main returning void impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_2 = load_const (0x00000003 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x40000000 /* 2.000000 */) vec4 32 ssa_4 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x437f0000 /* 255.000000 */) vec1 32 ssa_6 = load_const (0x00000002 /* 0.000000 */) vec2 32 ssa_7 = load_const (0x00000002 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_8 = load_const (0x00000001 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x3f000000 /* 0.500000 */) vec3 32 ssa_10 = intrinsic load_input (ssa_0) () (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_11 = intrinsic load_input (ssa_0) () (1, 0) /* base=1 */ /* component=0 */ vec2 32 ssa_12 = intrinsic load_input (ssa_0) () (2, 0) /* base=2 */ /* component=0 */ vec4 32 ssa_13 = intrinsic load_input (ssa_0) () (3, 0) /* base=3 */ /* component=0 */ vec4 32 ssa_14 = intrinsic load_input (ssa_0) () (4, 0) /* base=4 */ /* component=0 */ vec2 32 ssa_15 = intrinsic load_input (ssa_0) () (5, 0) /* base=5 */ /* component=0 */ vec1 32 ssa_16 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_17 = load_const (0x00000198 /* 0.000000 */) vec1 32 ssa_18 = intrinsic load_ubo (ssa_16, ssa_17) () () vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 vec1 32 ssa_20 = ishl ssa_19, ssa_6 vec1 32 ssa_21 = i2f32 ssa_20 vec1 32 ssa_22 = fadd ssa_21, ssa_9 vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x vec1 32 ssa_24 = ffloor ssa_23 vec1 32 ssa_25 = fadd ssa_23, -ssa_24 vec1 32 ssa_26 = load_const (0x0000019c /* 0.000000 */) vec1 32 ssa_27 = intrinsic load_ubo (ssa_16, ssa_26) () () vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y vec2 32 ssa_29 = vec2 ssa_25, ssa_28 vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 vec1 32 ssa_34 = ishl ssa_33, ssa_6 vec1 32 ssa_35 = i2f32 ssa_34 vec1 32 ssa_36 = fadd ssa_35, ssa_9 vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x vec1 32 ssa_38 = ffloor ssa_37 vec1 32 ssa_39 = fadd ssa_37, -ssa_38 vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y vec2 32 ssa_41 = vec2 ssa_39, ssa_40 vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 vec1 32 ssa_70 = ishl ssa_69, ssa_6 vec1 32 ssa_71 = i2f32 ssa_70 vec1 32 ssa_72 = fadd ssa_71, ssa_9 vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x vec1 32 ssa_74 = ffloor ssa_73 vec1 32 ssa_75 = fadd ssa_73, -ssa_74 vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y vec2 32 ssa_77 = vec2 ssa_75, ssa_76 vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 vec1 32 ssa_94 = ishl ssa_93, ssa_6 vec1 32 ssa_95 = i2f32 ssa_94 vec1 32 ssa_96 = fadd ssa_95, ssa_9 vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x vec1 32 ssa_98 = ffloor ssa_97 vec1 32 ssa_99 = fadd ssa_97, -ssa_98 vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y vec2 32 ssa_101 = vec2 ssa_99, ssa_100 vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) vec1 32 ssa_118 = load_const (0xc3000000 /* -128.000000 */) vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 vec1 32 ssa_123 = flt ssa_119, ssa_0 vec1 32 ssa_124 = b2f ssa_123 vec1 32 ssa_125 = flt ssa_120, ssa_0 vec1 32 ssa_126 = b2f ssa_125 vec1 32 ssa_127 = flt ssa_121, ssa_0 vec1 32 ssa_128 = b2f ssa_127 vec1 32 ssa_129 = flt ssa_122, ssa_0 vec1 32 ssa_130 = b2f ssa_129 vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 vec1 32 ssa_135 = load_const (0xc2800000 /* -64.000000 */) vec1 32 ssa_136 = fadd ssa_131, ssa_135 vec1 32 ssa_137 = fadd ssa_132, ssa_135 vec1 32 ssa_138 = fadd ssa_133, ssa_135 vec1 32 ssa_139 = fadd ssa_134, ssa_135 vec1 32 ssa_140 = flt ssa_131, -ssa_135 vec1 32 ssa_141 = b2f ssa_140 vec1 32 ssa_142 = flt ssa_132, -ssa_135 vec1 32 ssa_143 = b2f ssa_142 vec1 32 ssa_144 = flt ssa_133, -ssa_135 vec1 32 ssa_145 = b2f ssa_144 vec1 32 ssa_146 = flt ssa_134, -ssa_135 vec1 32 ssa_147 = b2f ssa_146 vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 vec1 32 ssa_152 = load_const (0x3c820821 /* 0.015873 */) vec1 32 ssa_153 = fmul ssa_148, ssa_152 vec1 32 ssa_154 = fmul ssa_149, ssa_152 vec1 32 ssa_155 = fmul ssa_150, ssa_152 vec1 32 ssa_156 = fmul ssa_151, ssa_152 vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 vec1 32 ssa_164 = fadd ssa_1, -ssa_153 vec1 32 ssa_165 = fadd ssa_164, -ssa_154 vec1 32 ssa_166 = fmul ssa_154, ssa_154 vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 vec1 32 ssa_169 = frsq ssa_168 vec1 32 ssa_170 = fmul ssa_153, ssa_169 vec1 32 ssa_171 = fmul ssa_154, ssa_169 vec1 32 ssa_172 = fmul ssa_165, ssa_169 vec1 32 ssa_173 = fmul ssa_170, ssa_157 vec1 32 ssa_174 = fmul ssa_171, ssa_158 vec1 32 ssa_175 = fmul ssa_172, ssa_161 vec1 32 ssa_176 = fadd ssa_1, -ssa_155 vec1 32 ssa_177 = fadd ssa_176, -ssa_156 vec1 32 ssa_178 = fmul ssa_156, ssa_156 vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 vec1 32 ssa_181 = frsq ssa_180 vec1 32 ssa_182 = fmul ssa_155, ssa_181 vec1 32 ssa_183 = fmul ssa_156, ssa_181 vec1 32 ssa_184 = fmul ssa_177, ssa_181 vec1 32 ssa_185 = fmul ssa_182, ssa_159 vec1 32 ssa_186 = fmul ssa_183, ssa_160 vec1 32 ssa_187 = fmul ssa_184, ssa_162 vec1 32 ssa_188 = fmul ssa_104, ssa_174 vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 vec1 32 ssa_191 = fmul ssa_110, ssa_174 vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 vec1 32 ssa_194 = fmul ssa_114, ssa_174 vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 vec1 32 ssa_197 = fmul ssa_193, ssa_193 vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 vec1 32 ssa_200 = frsq ssa_199 vec1 32 ssa_201 = fmul ssa_190, ssa_200 vec1 32 ssa_202 = fmul ssa_193, ssa_200 vec1 32 ssa_203 = fmul ssa_196, ssa_200 vec1 32 ssa_204 = fmul ssa_104, ssa_186 vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 vec1 32 ssa_207 = fmul ssa_110, ssa_186 vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 vec1 32 ssa_210 = fmul ssa_114, ssa_186 vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 vec1 32 ssa_213 = fmul ssa_209, ssa_202 vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 vec1 32 ssa_219 = fmul ssa_217, ssa_217 vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 vec1 32 ssa_222 = frsq ssa_221 vec1 32 ssa_223 = fmul ssa_216, ssa_222 vec1 32 ssa_224 = fmul ssa_217, ssa_222 vec1 32 ssa_225 = fmul ssa_218, ssa_222 vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z vec1 32 ssa_229 = fmul ssa_104, ssa_227 vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 vec1 32 ssa_232 = fadd ssa_231, ssa_106 vec1 32 ssa_233 = fmul ssa_110, ssa_227 vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 vec1 32 ssa_236 = fadd ssa_235, ssa_112 vec1 32 ssa_237 = fmul ssa_114, ssa_227 vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 vec1 32 ssa_240 = fadd ssa_239, ssa_116 vec1 32 ssa_241 = load_const (0x00000080 /* 0.000000 */) vec4 32 ssa_242 = intrinsic load_ubo (ssa_0, ssa_241) () () vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 vec1 32 ssa_247 = load_const (0x00000070 /* 0.000000 */) vec2 32 ssa_248 = intrinsic load_ubo (ssa_0, ssa_247) () () vec1 32 ssa_249 = fadd ssa_244, ssa_248.x vec1 32 ssa_250 = fadd ssa_246, ssa_248.y vec1 32 ssa_251 = load_const (0x000000b0 /* 0.000000 */) vec4 32 ssa_252 = intrinsic load_ubo (ssa_0, ssa_251) () () vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 vec1 32 ssa_257 = load_const (0x000000a0 /* 0.000000 */) vec2 32 ssa_258 = intrinsic load_ubo (ssa_0, ssa_257) () () vec1 32 ssa_259 = fadd ssa_254, ssa_258.x vec1 32 ssa_260 = fadd ssa_256, ssa_258.y vec4 32 ssa_261 = intrinsic load_ubo (ssa_16, ssa_0) () () vec1 32 ssa_262 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_263 = intrinsic load_ubo (ssa_16, ssa_262) () () vec1 32 ssa_264 = load_const (0x00000020 /* 0.000000 */) vec4 32 ssa_265 = intrinsic load_ubo (ssa_16, ssa_264) () () vec1 32 ssa_266 = load_const (0x00000030 /* 0.000000 */) vec4 32 ssa_267 = intrinsic load_ubo (ssa_16, ssa_266) () () vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 vec1 32 ssa_280 = fadd ssa_276, ssa_267.x vec1 32 ssa_281 = fadd ssa_277, ssa_267.y vec1 32 ssa_282 = fadd ssa_278, ssa_267.z vec1 32 ssa_283 = fadd ssa_279, ssa_267.w vec1 32 ssa_284 = load_const (0x00000140 /* 0.000000 */) vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () vec1 32 ssa_286 = load_const (0x00000130 /* 0.000000 */) vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) vec1 32 ssa_295 = load_const (0x000000dc /* 0.000000 */) vec1 32 ssa_296 = intrinsic load_ubo (ssa_0, ssa_295) () () vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 vec1 32 ssa_300 = load_const (0x00000050 /* 0.000000 */) vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () vec1 32 ssa_302 = load_const (0x000000d0 /* 0.000000 */) vec3 32 ssa_303 = intrinsic load_ubo (ssa_0, ssa_302) () () vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z vec1 32 ssa_307 = load_const (0x000000c8 /* 0.000000 */) vec1 32 ssa_308 = intrinsic load_ubo (ssa_0, ssa_307) () () vec1 32 ssa_309 = fmul ssa_304, ssa_308 vec1 32 ssa_310 = fmul ssa_305, ssa_308 vec1 32 ssa_311 = fmul ssa_306, ssa_308 vec4 32 ssa_312 = vec4 ssa_249, ssa_250, ssa_259, ssa_260 intrinsic store_output (ssa_312, ssa_0) () (31, 15, 0) /* base=31 */ /* wrmask=xyzw */ /* component=0 */ vec3 32 ssa_313 = vec3 ssa_201, ssa_202, ssa_203 intrinsic store_output (ssa_313, ssa_0) () (32, 7, 0) /* base=32 */ /* wrmask=xyz */ /* component=0 */ vec4 32 ssa_314 = vec4 ssa_223, ssa_224, ssa_225, ssa_163 intrinsic store_output (ssa_314, ssa_0) () (33, 15, 0) /* base=33 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_315 = vec4 ssa_297, ssa_298, ssa_299, ssa_294.w intrinsic store_output (ssa_315, ssa_0) () (34, 15, 0) /* base=34 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_316 = vec4 ssa_232, ssa_236, ssa_240, ssa_293 intrinsic store_output (ssa_316, ssa_0) () (35, 15, 0) /* base=35 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_317 = vec4 ssa_309, ssa_310, ssa_311, ssa_0 intrinsic store_output (ssa_317, ssa_0) () (36, 15, 0) /* base=36 */ /* wrmask=xyzw */ /* component=0 */ intrinsic store_output (ssa_4, ssa_0) () (37, 15, 0) /* base=37 */ /* wrmask=xyzw */ /* component=0 */ vec4 32 ssa_318 = vec4 ssa_280, ssa_281, ssa_282, ssa_283 intrinsic store_output (ssa_318, ssa_0) () (0, 15, 0) /* base=0 */ /* wrmask=xyzw */ /* component=0 */ /* succs: block_0 */ block block_0: } VS Output VUE map (11 slots, SSO) [0] VARYING_SLOT_PSIZ [1] VARYING_SLOT_POS [2] VARYING_SLOT_CLIP_DIST0 [3] VARYING_SLOT_CLIP_DIST1 [4] VARYING_SLOT_VAR0 [5] VARYING_SLOT_VAR1 [6] VARYING_SLOT_VAR2 [7] VARYING_SLOT_VAR3 [8] VARYING_SLOT_VAR4 [9] VARYING_SLOT_VAR5 [10] VARYING_SLOT_VAR6 Native code for unnamed vertex shader (null) SIMD8 shader: 308 instructions. 0 loops. 600 cycles. 0:0 spills:fills. Promoted 4 constants. Compacted 4928 to 3184 bytes (35%) START B0 (600 cycles) vec1 32 ssa_19 = iadd ssa_13.x, ssa_6 add(8) g12<1>D g21<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_33 = iadd ssa_13.y, ssa_6 add(8) g67<1>D g22<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_69 = iadd ssa_13.z, ssa_6 add(8) g54<1>D g23<8,8,1>D 2D { align1 1Q compacted }; vec1 32 ssa_93 = iadd ssa_13.w, ssa_6 add(8) g121<1>D g24<8,8,1>D 2D { align1 1Q compacted }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) mov(8) g31<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g32<1>F g30<8,8,1>F { align1 1Q compacted }; mov(1) g107<1>F 128F { align1 WE_all 1N }; vec1 32 ssa_243 = fmul ssa_242.y, ssa_12.y mul(8) g59<1>F g3.1<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_245 = fmul ssa_242.w, ssa_12.y mul(8) g73<1>F g3.3<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_253 = fmul ssa_252.y, ssa_12.y mul(8) g77<1>F g4.5<0,1,0>F g18<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_255 = fmul ssa_252.w, ssa_12.y mul(8) g79<1>F g4.7<0,1,0>F g18<8,8,1>F { align1 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(8) g24<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(8) g37<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g35<1>F g29<8,8,1>F { align1 1Q compacted }; mov(8) g36<1>F g30<8,8,1>F { align1 1Q compacted }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(8) g38<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; URB write mov(8) g18<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; vec1 32 ssa_20 = ishl ssa_19, ssa_6 shl(8) g23<1>D g12<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_34 = ishl ssa_33, ssa_6 shl(8) g68<1>D g67<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_70 = ishl ssa_69, ssa_6 shl(8) g55<1>D g54<8,8,1>D 0x00000002UD { align1 1Q }; vec1 32 ssa_94 = ishl ssa_93, ssa_6 shl(8) g122<1>D g121<8,8,1>D 0x00000002UD { align1 1Q }; vec4 32 ssa_117 = txl ssa_15 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g31<1>UW g31<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 3 { align1 1Q }; mov(1) g107.1<1>F 255F { align1 WE_all 1N }; vec1 32 ssa_244 = ffma ssa_242.x, ssa_12.x, ssa_243 mad(8) g60<1>F g59<4,4,1>F g17<4,4,1>F g3.0<0,1,0>F { align16 1Q }; vec1 32 ssa_246 = ffma ssa_242.z, ssa_12.x, ssa_245 mad(8) g74<1>F g73<4,4,1>F g17<4,4,1>F g3.2<0,1,0>F { align16 1Q }; vec1 32 ssa_254 = ffma ssa_252.x, ssa_12.x, ssa_253 mad(8) g78<1>F g77<4,4,1>F g17<4,4,1>F g4.4<0,1,0>F { align16 1Q }; vec1 32 ssa_256 = ffma ssa_252.z, ssa_12.x, ssa_255 mad(8) g80<1>F g79<4,4,1>F g17<4,4,1>F g4.6<0,1,0>F { align16 1Q }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () mov(1) g24.2<1>UD 0x00000014UD { align1 WE_all 1N compacted }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () mov(1) g37.2<1>UD 0x00000010UD { align1 WE_all 1N compacted }; vec4 32 ssa_294 = txl ssa_15 (coord), ssa_0 (lod), ssa_2 (offset), 3 (texture) 0 (sampler) mov(8) g34<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g34.2<1>UD 0x00000300UD { align1 WE_all 1N compacted }; send(8) g49<1>UW g34<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () mov(1) g38.2<1>UD 0x00000004UD { align1 WE_all 1N compacted }; vec1 32 ssa_21 = i2f32 ssa_20 mov(8) g34<1>F g23<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_35 = i2f32 ssa_34 mov(8) g69<1>F g68<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_71 = i2f32 ssa_70 mov(8) g56<1>F g55<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_95 = i2f32 ssa_94 mov(8) g123<1>F g122<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_226 = fmul ssa_10.x, ssa_117.z mul(8) g100<1>F g9<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_227 = fmul ssa_10.y, ssa_117.z mul(8) g99<1>F g10<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_228 = fmul ssa_10.z, ssa_117.z mul(8) g101<1>F g11<8,8,1>F g33<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_119 = ffma ssa_11.x, ssa_5, ssa_118 mad(8) g67<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g13<4,4,1>F { align16 1Q }; vec1 32 ssa_120 = ffma ssa_11.y, ssa_5, ssa_118 mad(8) g68<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g14<4,4,1>F { align16 1Q }; vec1 32 ssa_121 = ffma ssa_11.z, ssa_5, ssa_118 mad(8) g103<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g15<4,4,1>F { align16 1Q }; vec1 32 ssa_122 = ffma ssa_11.w, ssa_5, ssa_118 mad(8) g104<1>F -g107.0<0,1,0>F g107.1<0,1,0>F g16<4,4,1>F { align16 1Q }; vec1 32 ssa_249 = fadd ssa_244, ssa_248.x add(8) g10<1>F g60<8,8,1>F g2.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_250 = fadd ssa_246, ssa_248.y add(8) g11<1>F g74<8,8,1>F g2.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_259 = fadd ssa_254, ssa_258.x add(8) g12<1>F g78<8,8,1>F g4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_260 = fadd ssa_256, ssa_258.y add(8) g13<1>F g80<8,8,1>F g4.1<0,1,0>F { align1 1Q compacted }; vec3 32 ssa_285 = intrinsic load_ubo (ssa_16, ssa_284) () () send(16) g23<1>UD g24<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_287 = intrinsic load_ubo (ssa_16, ssa_286) () () send(16) g97<1>UD g37<8,8,1>UD const (1, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec3 32 ssa_301 = intrinsic load_ubo (ssa_6, ssa_300) () () send(16) g2<1>UD g38<8,8,1>UD const (2, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; vec1 32 ssa_22 = fadd ssa_21, ssa_9 add(8) g35<1>F g34<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_36 = fadd ssa_35, ssa_9 add(8) g70<1>F g69<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_72 = fadd ssa_71, ssa_9 add(8) g57<1>F g56<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_96 = fadd ssa_95, ssa_9 add(8) g124<1>F g123<8,8,1>F 0.5F { align1 1Q }; vec1 32 ssa_123 = flt ssa_119, ssa_0 cmp.l.f0(8) g69<1>F g67<8,8,1>F 0F { align1 1Q compacted }; mov(1) g107.2<1>F 1F { align1 WE_all 1N }; vec1 32 ssa_304 = fmul ssa_301.x, ssa_303.x mul(8) g4<1>F g2.4<0,1,0>F g5.4<0,1,0>F { align1 1Q }; vec1 32 ssa_305 = fmul ssa_301.y, ssa_303.y mul(8) g31<1>F g2.5<0,1,0>F g5.5<0,1,0>F { align1 1Q }; vec1 32 ssa_306 = fmul ssa_301.z, ssa_303.z mul(8) g32<1>F g2.6<0,1,0>F g5.6<0,1,0>F { align1 1Q }; vec1 32 ssa_23 = ffma ssa_22, ssa_18, ssa_15.x mad(8) g65<1>F g29<4,4,1>F g8.6<0,1,0>F g35<4,4,1>F { align16 1Q }; vec1 32 ssa_37 = ffma ssa_36, ssa_18, ssa_15.x mad(8) g72<1>F g29<4,4,1>F g8.6<0,1,0>F g70<4,4,1>F { align16 1Q }; vec1 32 ssa_73 = ffma ssa_72, ssa_18, ssa_15.x mad(8) g60<1>F g29<4,4,1>F g8.6<0,1,0>F g57<4,4,1>F { align16 1Q }; vec1 32 ssa_97 = ffma ssa_96, ssa_18, ssa_15.x mad(8) g125<1>F g29<4,4,1>F g8.6<0,1,0>F g124<4,4,1>F { align16 1Q }; vec1 32 ssa_125 = flt ssa_120, ssa_0 cmp.l.f0(8) g71<1>F g68<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_124 = b2f ssa_123 mov(8) g70<1>F -g69<8,8,1>D { align1 1Q compacted }; mov(1) g107.3<1>F 2F { align1 WE_all 1N }; vec1 32 ssa_309 = fmul ssa_304, ssa_308 mul(8) g45<1>F g4<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_310 = fmul ssa_305, ssa_308 mul(8) g46<1>F g31<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_311 = fmul ssa_306, ssa_308 mul(8) g47<1>F g32<8,8,1>F g5.2<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_24 = ffloor ssa_23 rndd(8) g66<1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_38 = ffloor ssa_37 rndd(8) g61<1>F g72<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_74 = ffloor ssa_73 rndd(8) g73<1>F g60<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_98 = ffloor ssa_97 rndd(8) g126<1>F g125<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_127 = flt ssa_121, ssa_0 cmp.l.f0(8) g63<1>F g103<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_126 = b2f ssa_125 mov(8) g62<1>F -g71<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_131 = fadd abs(ssa_119), -ssa_124 add(8) g105<1>F (abs)g67<8,8,1>F -g70<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_161 = ffma -ssa_124, ssa_3, ssa_1 mad(8) g113<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g70<4,4,1>F { align16 1Q }; vec1 32 ssa_297 = flrp ssa_1, ssa_294.x, ssa_296 lrp(8) g36<1>F g5.7<0,1,0>F g49<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_298 = flrp ssa_1, ssa_294.y, ssa_296 lrp(8) g37<1>F g5.7<0,1,0>F g50<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_299 = flrp ssa_1, ssa_294.z, ssa_296 lrp(8) g38<1>F g5.7<0,1,0>F g51<4,4,1>F g107.2<0,1,0>F { align16 1Q }; vec1 32 ssa_25 = fadd ssa_23, -ssa_24 add(8) g70<1>F g65<8,8,1>F -g66<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_28 = ffma ssa_24, ssa_27, ssa_15.y mad(8) g71<1>F g30<4,4,1>F g8.7<0,1,0>F g66<4,4,1>F { align16 1Q }; vec1 32 ssa_39 = fadd ssa_37, -ssa_38 add(8) g58<1>F g72<8,8,1>F -g61<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_40 = ffma ssa_38, ssa_27, ssa_15.y mad(8) g59<1>F g30<4,4,1>F g8.7<0,1,0>F g61<4,4,1>F { align16 1Q }; vec1 32 ssa_75 = fadd ssa_73, -ssa_74 add(8) g82<1>F g60<8,8,1>F -g73<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_76 = ffma ssa_74, ssa_27, ssa_15.y mad(8) g83<1>F g30<4,4,1>F g8.7<0,1,0>F g73<4,4,1>F { align16 1Q }; vec1 32 ssa_99 = fadd ssa_97, -ssa_98 add(8) g94<1>F g125<8,8,1>F -g126<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_100 = ffma ssa_98, ssa_27, ssa_15.y mad(8) g95<1>F g30<4,4,1>F g8.7<0,1,0>F g126<4,4,1>F { align16 1Q }; vec1 32 ssa_129 = flt ssa_122, ssa_0 cmp.l.f0(8) g110<1>F g104<8,8,1>F 0F { align1 1Q compacted }; vec1 32 ssa_128 = b2f ssa_127 mov(8) g108<1>F -g63<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_132 = fadd abs(ssa_120), -ssa_126 add(8) g106<1>F (abs)g68<8,8,1>F -g62<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_136 = fadd ssa_131, ssa_135 add(8) g109<1>F g105<8,8,1>F -64F { align1 1Q }; vec4 32 ssa_30 = txl ssa_29 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g2<1>UW g70<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_31 = txl ssa_29 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g65<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_32 = txl ssa_29 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g69<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g69.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g69<1>UW g69<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_42 = txl ssa_41 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g61<1>UW g58<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_51 = txl ssa_41 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g53<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_52 = txl ssa_41 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g57<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g57.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g57<1>UW g57<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_78 = txl ssa_77 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g73<1>UW g82<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_83 = txl ssa_77 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g77<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_84 = txl ssa_77 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g81<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g81.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g81<1>UW g81<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_102 = txl ssa_101 (coord), ssa_0 (lod), 3 (texture) 0 (sampler) send(8) g85<1>UW g94<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; vec4 32 ssa_107 = txl ssa_101 (coord), ssa_0 (lod), ssa_8 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000100UD { align1 WE_all 1N compacted }; send(8) g89<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec4 32 ssa_108 = txl ssa_101 (coord), ssa_0 (lod), ssa_7 (offset), 3 (texture) 0 (sampler) mov(8) g93<1>F g0<8,8,1>F { align1 WE_all 1Q compacted }; mov(1) g93.2<1>UD 0x00000200UD { align1 WE_all 1N compacted }; send(8) g93<1>UW g93<8,8,1>F sampler sample_lz SIMD8 Surface = 3 Sampler = 0 mlen 3 rlen 4 { align1 1Q }; vec1 32 ssa_140 = flt ssa_131, -ssa_135 cmp.l.f0(8) g124<1>F g105<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_130 = b2f ssa_129 mov(8) g111<1>F -g110<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_133 = fadd abs(ssa_121), -ssa_128 add(8) g102<1>F (abs)g103<8,8,1>F -g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_162 = ffma -ssa_128, ssa_3, ssa_1 mad(8) g114<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g108<4,4,1>F { align16 1Q }; vec1 32 ssa_137 = fadd ssa_132, ssa_135 add(8) g110<1>F g106<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_43 = fmul ssa_42.x, ssa_14.y mul(8) g103<1>F g61<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_44 = fmul ssa_42.y, ssa_14.y mul(8) g105<1>F g62<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_45 = fmul ssa_42.z, ssa_14.y mul(8) g112<1>F g63<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_46 = fmul ssa_42.w, ssa_14.y mul(8) g115<1>F g64<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_53 = fmul ssa_51.x, ssa_14.y mul(8) g120<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_54 = fmul ssa_51.y, ssa_14.y mul(8) g121<1>F g54<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_55 = fmul ssa_51.z, ssa_14.y mul(8) g122<1>F g55<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_56 = fmul ssa_51.w, ssa_14.y mul(8) g123<1>F g56<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_61 = fmul ssa_52.x, ssa_14.y mul(8) g8<1>F g57<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_62 = fmul ssa_52.y, ssa_14.y mul(8) g29<1>F g58<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_63 = fmul ssa_52.z, ssa_14.y mul(8) g30<1>F g59<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_64 = fmul ssa_52.w, ssa_14.y mul(8) g61<1>F g60<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_142 = flt ssa_132, -ssa_135 cmp.l.f0(8) g54<1>F g106<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_141 = b2f ssa_140 mov(8) g26<1>F -g124<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_134 = fadd abs(ssa_122), -ssa_130 add(8) g108<1>F (abs)g104<8,8,1>F -g111<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_163 = ffma -ssa_130, ssa_3, ssa_1 mad(8) g35<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g111<4,4,1>F { align16 1Q }; vec1 32 ssa_138 = fadd ssa_133, ssa_135 add(8) g111<1>F g102<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_47 = ffma ssa_30.x, ssa_14.x, ssa_43 mad(8) g116<1>F g103<4,4,1>F g25<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_48 = ffma ssa_30.y, ssa_14.x, ssa_44 mad(8) g117<1>F g105<4,4,1>F g25<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_49 = ffma ssa_30.z, ssa_14.x, ssa_45 mad(8) g118<1>F g112<4,4,1>F g25<4,4,1>F g4<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_50 = ffma ssa_30.w, ssa_14.x, ssa_46 mad(8) g119<1>F g115<4,4,1>F g25<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_57 = ffma ssa_31.x, ssa_14.x, ssa_53 mad(8) g124<1>F g120<4,4,1>F g25<4,4,1>F g65<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_58 = ffma ssa_31.y, ssa_14.x, ssa_54 mad(8) g125<1>F g121<4,4,1>F g25<4,4,1>F g66<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_59 = ffma ssa_31.z, ssa_14.x, ssa_55 mad(8) g126<1>F g122<4,4,1>F g25<4,4,1>F g67<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_60 = ffma ssa_31.w, ssa_14.x, ssa_56 mad(8) g127<1>F g123<4,4,1>F g25<4,4,1>F g68<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_65 = ffma ssa_32.x, ssa_14.x, ssa_61 mad(8) g62<1>F g8<4,4,1>F g25<4,4,1>F g69<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_66 = ffma ssa_32.y, ssa_14.x, ssa_62 mad(8) g63<1>F g29<4,4,1>F g25<4,4,1>F g70<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_67 = ffma ssa_32.z, ssa_14.x, ssa_63 mad(8) g64<1>F g30<4,4,1>F g25<4,4,1>F g71<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_68 = ffma ssa_32.w, ssa_14.x, ssa_64 mad(8) g53<1>F g61<4,4,1>F g25<4,4,1>F g72<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_144 = flt ssa_133, -ssa_135 cmp.l.f0(8) g56<1>F g102<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_143 = b2f ssa_142 mov(8) g55<1>F -g54<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_148 = fadd abs(ssa_136), -ssa_141 add(8) g60<1>F (abs)g109<8,8,1>F -g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_157 = ffma -ssa_141, ssa_3, ssa_1 mad(8) g112<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g26<4,4,1>F { align16 1Q }; vec1 32 ssa_139 = fadd ssa_134, ssa_135 add(8) g121<1>F g108<8,8,1>F -64F { align1 1Q }; vec1 32 ssa_79 = ffma ssa_78.x, ssa_14.z, ssa_47 mad(8) g103<1>F g116<4,4,1>F g27<4,4,1>F g73<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_80 = ffma ssa_78.y, ssa_14.z, ssa_48 mad(8) g104<1>F g117<4,4,1>F g27<4,4,1>F g74<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_81 = ffma ssa_78.z, ssa_14.z, ssa_49 mad(8) g105<1>F g118<4,4,1>F g27<4,4,1>F g75<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_82 = ffma ssa_78.w, ssa_14.z, ssa_50 mad(8) g106<1>F g119<4,4,1>F g27<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_85 = ffma ssa_83.x, ssa_14.z, ssa_57 mad(8) g102<1>F g124<4,4,1>F g27<4,4,1>F g77<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_86 = ffma ssa_83.y, ssa_14.z, ssa_58 mad(8) g109<1>F g125<4,4,1>F g27<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_87 = ffma ssa_83.z, ssa_14.z, ssa_59 mad(8) g115<1>F g126<4,4,1>F g27<4,4,1>F g79<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_88 = ffma ssa_83.w, ssa_14.z, ssa_60 mad(8) g116<1>F g127<4,4,1>F g27<4,4,1>F g80<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_89 = ffma ssa_84.x, ssa_14.z, ssa_65 mad(8) g117<1>F g62<4,4,1>F g27<4,4,1>F g81<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_90 = ffma ssa_84.y, ssa_14.z, ssa_66 mad(8) g118<1>F g63<4,4,1>F g27<4,4,1>F g82<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_91 = ffma ssa_84.z, ssa_14.z, ssa_67 mad(8) g119<1>F g64<4,4,1>F g27<4,4,1>F g83<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_92 = ffma ssa_84.w, ssa_14.z, ssa_68 mad(8) g120<1>F g53<4,4,1>F g27<4,4,1>F g84<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_146 = flt ssa_134, -ssa_135 cmp.l.f0(8) g58<1>F g108<8,8,1>F 64F { align1 1Q }; vec1 32 ssa_145 = b2f ssa_144 mov(8) g57<1>F -g56<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_149 = fadd abs(ssa_137), -ssa_143 add(8) g73<1>F (abs)g110<8,8,1>F -g55<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_158 = ffma -ssa_143, ssa_3, ssa_1 mad(8) g80<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g55<4,4,1>F { align16 1Q }; vec1 32 ssa_153 = fmul ssa_148, ssa_152 mul(8) g76<1>F g60<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_103 = ffma ssa_102.x, ssa_14.w, ssa_79 mad(8) g127<1>F g103<4,4,1>F g28<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_104 = ffma ssa_102.y, ssa_14.w, ssa_80 mad(8) g2<1>F g104<4,4,1>F g28<4,4,1>F g86<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_105 = ffma ssa_102.z, ssa_14.w, ssa_81 mad(8) g3<1>F g105<4,4,1>F g28<4,4,1>F g87<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_106 = ffma ssa_102.w, ssa_14.w, ssa_82 mad(8) g4<1>F g106<4,4,1>F g28<4,4,1>F g88<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_109 = ffma ssa_107.x, ssa_14.w, ssa_85 mad(8) g5<1>F g102<4,4,1>F g28<4,4,1>F g89<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_110 = ffma ssa_107.y, ssa_14.w, ssa_86 mad(8) g8<1>F g109<4,4,1>F g28<4,4,1>F g90<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_111 = ffma ssa_107.z, ssa_14.w, ssa_87 mad(8) g25<1>F g115<4,4,1>F g28<4,4,1>F g91<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_112 = ffma ssa_107.w, ssa_14.w, ssa_88 mad(8) g26<1>F g116<4,4,1>F g28<4,4,1>F g92<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_113 = ffma ssa_108.x, ssa_14.w, ssa_89 mad(8) g27<1>F g117<4,4,1>F g28<4,4,1>F g93<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_114 = ffma ssa_108.y, ssa_14.w, ssa_90 mad(8) g29<1>F g118<4,4,1>F g28<4,4,1>F g94<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_115 = ffma ssa_108.z, ssa_14.w, ssa_91 mad(8) g30<1>F g119<4,4,1>F g28<4,4,1>F g95<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_116 = ffma ssa_108.w, ssa_14.w, ssa_92 mad(8) g65<1>F g120<4,4,1>F g28<4,4,1>F g96<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_147 = b2f ssa_146 mov(8) g59<1>F -g58<8,8,1>D { align1 1Q compacted }; vec1 32 ssa_150 = fadd abs(ssa_138), -ssa_145 add(8) g74<1>F (abs)g111<8,8,1>F -g57<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_159 = ffma -ssa_145, ssa_3, ssa_1 mad(8) g81<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g57<4,4,1>F { align16 1Q }; vec1 32 ssa_154 = fmul ssa_149, ssa_152 mul(8) g77<1>F g73<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_164 = fadd ssa_1, -ssa_153 add(8) g84<1>F -g76<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_229 = fmul ssa_104, ssa_227 mul(8) g71<1>F g2<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_233 = fmul ssa_110, ssa_227 mul(8) g63<1>F g8<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_237 = fmul ssa_114, ssa_227 mul(8) g55<1>F g29<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_151 = fadd abs(ssa_139), -ssa_147 add(8) g75<1>F (abs)g121<8,8,1>F -g59<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_160 = ffma -ssa_147, ssa_3, ssa_1 mad(8) g82<1>F g107.2<0,1,0>F g107.3<0,1,0>F -g59<4,4,1>F { align16 1Q }; vec1 32 ssa_155 = fmul ssa_150, ssa_152 mul(8) g78<1>F g74<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_166 = fmul ssa_154, ssa_154 mul(8) g86<1>F g77<8,8,1>F g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_165 = fadd ssa_164, -ssa_154 add(8) g85<1>F g84<8,8,1>F -g77<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_230 = ffma ssa_103, ssa_226, ssa_229 mad(8) g72<1>F g71<4,4,1>F g100<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_234 = ffma ssa_109, ssa_226, ssa_233 mad(8) g64<1>F g63<4,4,1>F g100<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_238 = ffma ssa_113, ssa_226, ssa_237 mad(8) g56<1>F g55<4,4,1>F g100<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_156 = fmul ssa_151, ssa_152 mul(8) g79<1>F g75<8,8,1>F 0.015873F { align1 1Q }; vec1 32 ssa_176 = fadd ssa_1, -ssa_155 add(8) g96<1>F -g78<8,8,1>F 1F { align1 1Q }; vec1 32 ssa_167 = ffma ssa_153, ssa_153, ssa_166 mad(8) g87<1>F g86<4,4,1>F g76<4,4,1>F g76<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_231 = ffma ssa_105, ssa_228, ssa_230 mad(8) g61<1>F g72<4,4,1>F g101<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_235 = ffma ssa_111, ssa_228, ssa_234 mad(8) g53<1>F g64<4,4,1>F g101<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_239 = ffma ssa_115, ssa_228, ssa_238 mad(8) g57<1>F g56<4,4,1>F g101<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_178 = fmul ssa_156, ssa_156 mul(8) g99<1>F g79<8,8,1>F g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_177 = fadd ssa_176, -ssa_156 add(8) g100<1>F g96<8,8,1>F -g79<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_168 = ffma ssa_165, ssa_165, ssa_167 mad(8) g88<1>F g87<4,4,1>F g85<4,4,1>F g85<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_232 = fadd ssa_231, ssa_106 add(8) g41<1>F g61<8,8,1>F g4<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_236 = fadd ssa_235, ssa_112 add(8) g42<1>F g53<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_240 = fadd ssa_239, ssa_116 add(8) g43<1>F g57<8,8,1>F g65<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_179 = ffma ssa_155, ssa_155, ssa_178 mad(8) g101<1>F g99<4,4,1>F g78<4,4,1>F g78<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_169 = frsq ssa_168 math rsq(8) g89<1>F g88<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_288 = fadd ssa_232, -ssa_287.x add(8) g102<1>F g41<8,8,1>F -g98.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_268 = fmul ssa_263.x, ssa_236 mul(8) g83<1>F g6.4<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_269 = fmul ssa_263.y, ssa_236 mul(8) g84<1>F g6.5<0,1,0>F g42<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_270 = fmul ssa_263.z, ssa_236 mul(8) g86<1>F g6.6<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_271 = fmul ssa_263.w, ssa_236 mul(8) g87<1>F g6.7<0,1,0>F g42<8,8,1>F { align1 1Q }; vec1 32 ssa_289 = fadd ssa_236, -ssa_287.y add(8) g108<1>F g42<8,8,1>F -g98.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_290 = fadd ssa_240, -ssa_287.z add(8) g109<1>F g43<8,8,1>F -g98.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_180 = ffma ssa_177, ssa_177, ssa_179 mad(8) g107<1>F g101<4,4,1>F g100<4,4,1>F g100<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_170 = fmul ssa_153, ssa_169 mul(8) g90<1>F g76<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_171 = fmul ssa_154, ssa_169 mul(8) g91<1>F g77<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_172 = fmul ssa_165, ssa_169 mul(8) g92<1>F g85<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_272 = ffma ssa_261.x, ssa_232, ssa_268 mad(8) g88<1>F g83<4,4,1>F g41<4,4,1>F g6.0<0,1,0>F { align16 1Q }; vec1 32 ssa_273 = ffma ssa_261.y, ssa_232, ssa_269 mad(8) g89<1>F g84<4,4,1>F g41<4,4,1>F g6.1<0,1,0>F { align16 1Q }; vec1 32 ssa_274 = ffma ssa_261.z, ssa_232, ssa_270 mad(8) g96<1>F g86<4,4,1>F g41<4,4,1>F g6.2<0,1,0>F { align16 1Q }; vec1 32 ssa_275 = ffma ssa_261.w, ssa_232, ssa_271 mad(8) g97<1>F g87<4,4,1>F g41<4,4,1>F g6.3<0,1,0>F { align16 1Q }; vec1 32 ssa_291 = fmul ssa_285.y, ssa_289 mul(8) g110<1>F g23.1<0,1,0>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_181 = frsq ssa_180 math rsq(8) g103<1>F g107<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_173 = fmul ssa_170, ssa_157 mul(8) g93<1>F g90<8,8,1>F g112<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_174 = fmul ssa_171, ssa_158 mul(8) g94<1>F g91<8,8,1>F g80<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_175 = fmul ssa_172, ssa_161 mul(8) g95<1>F g92<8,8,1>F g113<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_276 = ffma ssa_265.x, ssa_240, ssa_272 mad(8) g98<1>F g88<4,4,1>F g43<4,4,1>F g7.0<0,1,0>F { align16 1Q }; vec1 32 ssa_277 = ffma ssa_265.y, ssa_240, ssa_273 mad(8) g99<1>F g89<4,4,1>F g43<4,4,1>F g7.1<0,1,0>F { align16 1Q }; vec1 32 ssa_278 = ffma ssa_265.z, ssa_240, ssa_274 mad(8) g101<1>F g96<4,4,1>F g43<4,4,1>F g7.2<0,1,0>F { align16 1Q }; vec1 32 ssa_279 = ffma ssa_265.w, ssa_240, ssa_275 mad(8) g107<1>F g97<4,4,1>F g43<4,4,1>F g7.3<0,1,0>F { align16 1Q }; vec1 32 ssa_292 = ffma ssa_285.x, ssa_288, ssa_291 mad(8) g111<1>F g110<4,4,1>F g102<4,4,1>F g23.0<0,1,0>F { align16 1Q }; vec1 32 ssa_182 = fmul ssa_155, ssa_181 mul(8) g104<1>F g78<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_183 = fmul ssa_156, ssa_181 mul(8) g105<1>F g79<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_184 = fmul ssa_177, ssa_181 mul(8) g106<1>F g100<8,8,1>F g103<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_188 = fmul ssa_104, ssa_174 mul(8) g110<1>F g2<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_191 = fmul ssa_110, ssa_174 mul(8) g113<1>F g8<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_194 = fmul ssa_114, ssa_174 mul(8) g116<1>F g29<8,8,1>F g94<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_280 = fadd ssa_276, ssa_267.x add(8) g19<1>F g98<8,8,1>F g7.4<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_281 = fadd ssa_277, ssa_267.y add(8) g20<1>F g99<8,8,1>F g7.5<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_282 = fadd ssa_278, ssa_267.z add(8) g21<1>F g101<8,8,1>F g7.6<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_283 = fadd ssa_279, ssa_267.w add(8) g22<1>F g107<8,8,1>F g7.7<0,1,0>F { align1 1Q compacted }; vec1 32 ssa_293 = ffma ssa_285.z, ssa_290, ssa_292 mad(8) g44<1>F g111<4,4,1>F g109<4,4,1>F g23.2<0,1,0>F { align16 1Q }; vec1 32 ssa_185 = fmul ssa_182, ssa_159 mul(8) g102<1>F g104<8,8,1>F g81<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_186 = fmul ssa_183, ssa_160 mul(8) g108<1>F g105<8,8,1>F g82<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_187 = fmul ssa_184, ssa_162 mul(8) g109<1>F g106<8,8,1>F g114<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_189 = ffma ssa_103, ssa_173, ssa_188 mad(8) g111<1>F g110<4,4,1>F g93<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_192 = ffma ssa_109, ssa_173, ssa_191 mad(8) g114<1>F g113<4,4,1>F g93<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_195 = ffma ssa_113, ssa_173, ssa_194 mad(8) g117<1>F g116<4,4,1>F g93<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_204 = fmul ssa_104, ssa_186 mul(8) g126<1>F g2<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_207 = fmul ssa_110, ssa_186 mul(8) g4<1>F g8<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_210 = fmul ssa_114, ssa_186 mul(8) g7<1>F g29<8,8,1>F g108<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_190 = ffma ssa_105, ssa_175, ssa_189 mad(8) g112<1>F g111<4,4,1>F g95<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_193 = ffma ssa_111, ssa_175, ssa_192 mad(8) g115<1>F g114<4,4,1>F g95<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_196 = ffma ssa_115, ssa_175, ssa_195 mad(8) g118<1>F g117<4,4,1>F g95<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_205 = ffma ssa_103, ssa_185, ssa_204 mad(8) g127<1>F g126<4,4,1>F g102<4,4,1>F g127<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_208 = ffma ssa_109, ssa_185, ssa_207 mad(8) g5<1>F g4<4,4,1>F g102<4,4,1>F g5<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_211 = ffma ssa_113, ssa_185, ssa_210 mad(8) g8<1>F g7<4,4,1>F g102<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_197 = fmul ssa_193, ssa_193 mul(8) g119<1>F g115<8,8,1>F g115<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_206 = ffma ssa_105, ssa_187, ssa_205 mad(8) g2<1>F g127<4,4,1>F g109<4,4,1>F g3<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_209 = ffma ssa_111, ssa_187, ssa_208 mad(8) g6<1>F g5<4,4,1>F g109<4,4,1>F g25<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_212 = ffma ssa_115, ssa_187, ssa_211 mad(8) g23<1>F g8<4,4,1>F g109<4,4,1>F g30<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_198 = ffma ssa_190, ssa_190, ssa_197 mad(8) g120<1>F g119<4,4,1>F g112<4,4,1>F g112<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_199 = ffma ssa_196, ssa_196, ssa_198 mad(8) g121<1>F g120<4,4,1>F g118<4,4,1>F g118<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_200 = frsq ssa_199 math rsq(8) g122<1>F g121<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_201 = fmul ssa_190, ssa_200 mul(8) g14<1>F g112<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_202 = fmul ssa_193, ssa_200 mul(8) g15<1>F g115<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_203 = fmul ssa_196, ssa_200 mul(8) g16<1>F g118<8,8,1>F g122<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_213 = fmul ssa_209, ssa_202 mul(8) g24<1>F g6<8,8,1>F g15<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_214 = ffma ssa_206, ssa_201, ssa_213 mad(8) g25<1>F g24<4,4,1>F g14<4,4,1>F g2<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_215 = ffma ssa_212, ssa_203, ssa_214 mad(8) g26<1>F g25<4,4,1>F g16<4,4,1>F g23<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_216 = ffma -ssa_201, ssa_215, ssa_206 mad(8) g27<1>F g2<4,4,1>F g26<4,4,1>F -g14<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_217 = ffma -ssa_202, ssa_215, ssa_209 mad(8) g28<1>F g6<4,4,1>F g26<4,4,1>F -g15<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_218 = ffma -ssa_203, ssa_215, ssa_212 mad(8) g29<1>F g23<4,4,1>F g26<4,4,1>F -g16<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_219 = fmul ssa_217, ssa_217 mul(8) g30<1>F g28<8,8,1>F g28<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_220 = ffma ssa_216, ssa_216, ssa_219 mad(8) g65<1>F g30<4,4,1>F g27<4,4,1>F g27<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_221 = ffma ssa_218, ssa_218, ssa_220 mad(8) g66<1>F g65<4,4,1>F g29<4,4,1>F g29<4,4,1>F { align16 1Q compacted }; vec1 32 ssa_222 = frsq ssa_221 math rsq(8) g67<1>F g66<8,8,1>F null<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_223 = fmul ssa_216, ssa_222 mul(8) g32<1>F g27<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_224 = fmul ssa_217, ssa_222 mul(8) g33<1>F g28<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; vec1 32 ssa_225 = fmul ssa_218, ssa_222 mul(8) g34<1>F g29<8,8,1>F g67<8,8,1>F { align1 1Q compacted }; URB write send(8) null<1>F g18<8,8,1>F urb 1 SIMD8 write mlen 5 rlen 0 { align1 1Q }; mov(8) g9<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; send(8) null<1>F g9<8,8,1>F urb 4 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g31<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g39<1>F g52<8,8,1>F { align1 1Q compacted }; send(8) null<1>F g31<8,8,1>F urb 6 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g40<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g48<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g40<8,8,1>F urb 8 SIMD8 write mlen 9 rlen 0 { align1 1Q }; mov(8) g123<1>UD g1<8,8,1>UD { align1 WE_all 1Q compacted }; mov(8) g124<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g125<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g126<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; mov(8) g127<1>F [0F, 0F, 0F, 0F]VF { align1 1Q compacted }; send(8) null<1>F g123<8,8,1>F urb 10 SIMD8 write mlen 5 rlen 0 { align1 1Q EOT }; END B0 /home/jason/rundota.sh: line 39: 30765 Killed "${DOTA2_BIN}" ${FLAGS}