000b:fixme:winediag:start_process Wine Staging 4.4 is a testing version containing experimental patches. 000b:fixme:winediag:start_process Please mention your exact version when filing bug reports on winehq.org. [?1h=002a:fixme:heap:RtlSetHeapInformation 0x310000 0 0x22e730 4 stub 002a:fixme:ntdll:NtQuerySystemInformation returning fake driver list 002a:fixme:heap:RtlSetHeapInformation 0x440000 0 0x22dce0 4 stub 002a:fixme:thread:create_user_shared_data_thread Creating user shared data update thread. 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002f:fixme:ntdll:NtQuerySystemInformation returning fake driver list 002f:fixme:ntdll:NtQuerySystemInformation returning fake driver list 002f:fixme:ntdll:NtQuerySystemInformation returning fake driver list 002f:fixme:ntdll:NtQuerySystemInformation returning fake driver list 002f:fixme:ntdll:NtQuerySystemInformation returning fake driver list 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002f:fixme:d3d:wined3d_adapter_init_gl_caps A set of 3336696 devices is not supported. 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002f:fixme:heap:GetPhysicallyInstalledSystemMemory stub: 0x32f640 002f:fixme:winsock:WSAEnumNameSpaceProvidersW (0x31a0a0 0x32dd64) Stub! 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0031:fixme:ntdll:NtQuerySystemInformation returning fake driver list 0031:fixme:ntdll:NtQuerySystemInformation returning fake driver list 0031:fixme:ntdll:NtQuerySystemInformation returning fake driver list 0031:fixme:ntdll:NtQuerySystemInformation returning fake driver list 0031:fixme:ntdll:NtQuerySystemInformation returning fake driver list 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0031:fixme:kernelbase:AppPolicyGetProcessTerminationMethod 0xfffffffa, 0x32fe2c 002f:fixme:shcore:SetCurrentProcessExplicitAppUserModelID L"Ubisoft.UPC.MainProcess": stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 003d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0036:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0036:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0036:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0036:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0036:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0036:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0036:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 0036:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002d:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 002f:fixme:process:SetProcessDEPPolicy (3): stub 002f:fixme:heap:RtlSetHeapInformation (nil) 1 (nil) 0 stub 002f:fixme:ntdll:EtwEventRegister ({d2d578d9-2936-45b6-a09f-30e32715f42d}, 0x462abc0, 0x7fcf0d0, 0x800df48) stub. 0040:fixme:dwrite:get_name_record_codepage encoding 20 not handled, platform 1. 0040:fixme:dwrite:get_name_record_codepage encoding 20 not handled, platform 1. 0040:fixme:nls:GetThreadPreferredUILanguages 00000038, 0xca6fcc4, (nil) 0xca6fcc0 0040:fixme:nls:get_dummy_preferred_ui_language (0x38 0xca6fcc4 (nil) 0xca6fcc0) returning a dummy value (current locale) 0040:fixme:nls:GetThreadPreferredUILanguages 00000038, 0xca6fcc4, 0x885e9c8 0xca6fcc0 0040:fixme:nls:get_dummy_preferred_ui_language (0x38 0xca6fcc4 0x885e9c8 0xca6fcc0) returning a dummy value (current locale) 0040:fixme:winsock:WSALookupServiceBeginW (0xca6fb00 0x00000ff0 0xca6fb3c) Stub! [0325/233723.021:ERROR:network_change_notifier_win.cc(157)] WSALookupServiceBegin failed with: 8 0040:fixme:iphlpapi:NotifyAddrChange (Handle 0xca6fd1c, overlapped 0x885f2dc): stub 0040:fixme:win:RegisterDeviceNotificationW (hwnd=0x30066, filter=0xca6fc84,flags=0x00000000) returns a fake device notification handle! 0040:fixme:win:RegisterDeviceNotificationW (hwnd=0x30066, filter=0xca6fc84,flags=0x00000000) returns a fake device notification handle! 0040:fixme:win:GetDisplayConfigBufferSizes (0x2 0xca6fa3c 0xca6fa38): stub 0047:fixme:file:FindFirstFileExW flags not implemented 0x00000002 0047:fixme:file:FindFirstFileExW flags not implemented 0x00000002 0040:fixme:win:GetDisplayConfigBufferSizes (0x2 0xca6f75c 0xca6f758): stub 003f:fixme:wlanapi:WlanEnumInterfaces (0x1, (nil), 0xc95f798) semi-stub 004c:fixme:ntdll:NtLockFile I/O completion on lock not implemented yet 0040:fixme:rawinput:RegisterRawInputDevices Unhandled flags 0x100 for device 0. 0040:fixme:rawinput:RegisterRawInputDevices Unhandled flags 0x2100 for device 1. 0040:fixme:rawinput:RegisterRawInputDevices Unhandled flags 0x100 for device 2. 0053:fixme:process:SetProcessDEPPolicy (3): stub 0053:fixme:heap:RtlSetHeapInformation (nil) 1 (nil) 0 stub 0053:fixme:ntdll:EtwEventRegister ({d2d578d9-2936-45b6-a09f-30e32715f42d}, 0x114dabc0, 0x14e7f0d0, 0x14ebdf48) stub. [0325/233723.297:ERROR:viz_main_impl.cc(201)] Exiting GPU process due to errors during initialization 0053:fixme:ntdll:EtwEventUnregister (deadbeef) stub. 0053:fixme:kernelbase:AppPolicyGetProcessTerminationMethod 0xfffffffa, 0x32fe2c 005d:fixme:process:SetProcessDEPPolicy (3): stub 005d:fixme:heap:RtlSetHeapInformation (nil) 1 (nil) 0 stub 005d:fixme:ntdll:EtwEventRegister ({d2d578d9-2936-45b6-a09f-30e32715f42d}, 0x114dabc0, 0x14e7f0d0, 0x14ebdf48) stub. 005f:fixme:process:SetProcessDEPPolicy (3): stub 005f:fixme:heap:RtlSetHeapInformation (nil) 1 (nil) 0 stub 005f:fixme:ntdll:EtwEventRegister ({d2d578d9-2936-45b6-a09f-30e32715f42d}, 0x114dabc0, 0x14e7f0d0, 0x14ebdf48) stub. 006b:fixme:process:SetProcessDEPPolicy (3): stub 006b:fixme:heap:RtlSetHeapInformation (nil) 1 (nil) 0 stub 006b:fixme:ntdll:EtwEventRegister ({d2d578d9-2936-45b6-a09f-30e32715f42d}, 0x114dabc0, 0x14e7f0d0, 0x14ebdf48) stub. [0325/233723.543:ERROR:viz_main_impl.cc(201)] Exiting GPU process due to errors during initialization 006b:fixme:ntdll:EtwEventUnregister (deadbeef) stub. 006b:fixme:kernelbase:AppPolicyGetProcessTerminationMethod 0xfffffffa, 0x32fe2c [0325/233723.557:ERROR:browser_gpu_channel_host_factory.cc(119)] Failed to launch GPU process. [0325/233723.557:ERROR:gpu_process_transport_factory.cc(1026)] Lost UI shared context. 004a:fixme:file:ReplaceFileW Ignoring flags 2 004a:fixme:file:FindFirstFileExW flags not implemented 0x00000002 004a:fixme:file:FindFirstFileExW flags not implemented 0x00000002 0039:fixme:winhttp:get_system_proxy_autoconfig_url no support on this platform 0039:fixme:winhttp:WinHttpDetectAutoProxyConfigUrl discovery via DHCP not supported 009a:fixme:kernelbase:AppPolicyGetThreadInitializationType 0xfffffffa, 0xf7efea4 00f5:fixme:ntdll:NtQuerySystemInformation returning fake driver list 00f5:fixme:ntdll:NtQuerySystemInformation returning fake driver list 00f5:fixme:ntdll:NtQuerySystemInformation returning fake driver list 00f5:fixme:ntdll:NtQuerySystemInformation returning fake driver list 00f5:fixme:ntdll:NtQuerySystemInformation returning fake driver list 00f5:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 00f5:fixme:hnetcfg:netfw_rules_Remove 0x151a38, L"AssassinsCreedOdyssey-TCP" 00f5:err:ole:apartment_getclassobject DllGetClassObject returned error 0x80040111 for dll L"C:\\windows\\system32\\hnetcfg.dll" 00f5:err:ole:CoGetClassObject no class object {2c5bc43e-3369-4c33-ab0c-be9469677af4} could be created for context 0x1 00f5:fixme:hnetcfg:netfw_rules_Remove 0x151a38, L"AssassinsCreedOdyssey-TCP" 00f5:err:ole:apartment_getclassobject DllGetClassObject returned error 0x80040111 for dll L"C:\\windows\\system32\\hnetcfg.dll" 00f5:err:ole:CoGetClassObject no class object {2c5bc43e-3369-4c33-ab0c-be9469677af4} could be created for context 0x1 00f5:fixme:kernelbase:AppPolicyGetProcessTerminationMethod 0xfffffffa, 0x32fe2c 0147:fixme:ntdll:NtQuerySystemInformation returning fake driver list 0147:fixme:ntdll:NtQuerySystemInformation returning fake driver list 0147:fixme:ntdll:NtQuerySystemInformation returning fake driver list 0147:fixme:ntdll:NtQuerySystemInformation returning fake driver list 0147:fixme:ntdll:NtQuerySystemInformation returning fake driver list 0147:fixme:ole:CoInitializeSecurity ((nil),-1,(nil),(nil),0,3,(nil),0,(nil)) - stub! 0147:fixme:wbemprox:client_security_SetBlanket 0x7c4f1958, 0x14e770, 10, 0, (null), 3, 3, (nil), 0x00000000 0147:fixme:wbemprox:client_security_Release 0x7c4f1958 info: Game: UplayService.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Required Vulkan extension VK_KHR_get_physical_device_properties2 not supported info: Required Vulkan extension VK_KHR_surface not supported info: Required Vulkan extension VK_KHR_win32_surface not supported err: DxvkInstance: Failed to create instance 0147:fixme:kernelbase:AppPolicyGetProcessTerminationMethod 0xfffffffa, 0x32fe2c 00bf:fixme:heap:RtlSetHeapInformation 0x320000 0 0x22e730 4 stub 00bf:fixme:ntdll:NtQuerySystemInformation returning fake driver list 00bf:fixme:heap:RtlSetHeapInformation 0x450000 0 0x22dce0 4 stub 2019-03-25 23:37:44 [ 191] [DEBUG] Hooks.cpp (36) : --- Overlay started --- (C:\Program Files (x86)\Ubisoft\Ubisoft Game Launcher\games\Assassin's Creed Odyssey\ACOdyssey.exe) 2019-03-25 23:37:44 [ 191] [INFO ] Hooks.cpp (47) : UI disabled via command line 2019-03-25 23:37:44 [ 191] [DEBUG] PlatformHooks.cpp (113) : 'kernel32.dll' hooked 2019-03-25 23:37:44 [ 191] [DEBUG] PlatformHooks.cpp (113) : 'kernel32.dll' hooked 2019-03-25 23:37:44 [ 191] [DEBUG] PlatformHooks.cpp (113) : 'shell32.dll' hooked 00bf:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 00c1:fixme:kernelbase:AppPolicyGetProcessTerminationMethod 0xfffffffffffffffa, 0x22fd00 002d:fixme:ntdll:NtQueryInformationJobObject stub: 0x140 9 0x74f884 112 (nil) 00bf:fixme:thread:create_user_shared_data_thread Creating user shared data update thread. 00bf:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 00bf:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 00bf:fixme:ntdll:NtQueryInformationToken QueryInformationToken( ..., TokenSessionId, ...) semi-stub 00c4:fixme:kernelbase:AppPolicyGetThreadInitializationType 0xfffffffffffffffa, 0x218fdc0 00cf:fixme:ver:GetCurrentPackageId (0x24bfdc0 (nil)): stub 00bf:fixme:ole:CoInitializeSecurity ((nil),-1,(nil),(nil),0,3,(nil),0,(nil)) - stub! 00bf:fixme:wbemprox:wbem_locator_ConnectServer unsupported flags 00bf:fixme:wbemprox:client_security_SetBlanket 0x7f611cc1d280, 0x392c2a0, 10, 0, (null), 3, 3, (nil), 0x00000000 00bf:fixme:wbemprox:client_security_Release 0x7f611cc1d280 info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe 00bf:fixme:nls:get_dummy_preferred_ui_language (0x8 0x22f270 0x22f290 0x22f274) returning a dummy value (current locale) 00e5:fixme:win:RegisterDeviceNotificationA (hwnd=0x300e8, filter=0x1012f270,flags=0x00000000) returns a fake device notification handle! 00e5:fixme:wtsapi:WTSRegisterSessionNotification Stub 0x300e8 0x00000001 info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe 00bf:fixme:win:GetDisplayConfigBufferSizes (0x2 0x22eb40 0x22eb50): stub 00bf:fixme:win:QueryDisplayConfig (00000002 0x22eb40 (nil) 0x22eb50 (nil) (nil)) 00bf:fixme:nvapi:unimplemented_stub function 0x694d52e is unimplemented! info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe 00bf:fixme:win:GetDisplayConfigBufferSizes (0x2 0x22eb40 0x22eb50): stub 00bf:fixme:win:QueryDisplayConfig (00000002 0x22eb40 (nil) 0x22eb50 (nil) (nil)) 00bf:fixme:ole:CoInitializeSecurity ((nil),-1,(nil),(nil),0,3,(nil),0,(nil)) - stub! 00bf:fixme:wbemprox:wbem_locator_ConnectServer unsupported flags 00bf:fixme:wbemprox:client_security_SetBlanket 0x7f611cc1d280, 0x39ce930, 10, 0, (null), 3, 3, (nil), 0x00000000 00bf:fixme:wbemprox:client_security_Release 0x7f611cc1d280 info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe 00bf:fixme:nvapi:unimplemented_stub function 0x2ddfb66e is unimplemented! 00bf:fixme:nvapi:unimplemented_stub function 0x1be0b8e5 is unimplemented! 00bf:fixme:nvapi:unimplemented_stub function 0xdcb616c3 is unimplemented! warn: DxgiFactory::QueryInterface: Unknown interface query warn: 7632e1f5-ee65-4dca-87fd-84cd75f8838d info: D3D11CoreCreateDevice: Probing D3D_FEATURE_LEVEL_11_0 info: D3D11CoreCreateDevice: Using feature level D3D_FEATURE_LEVEL_11_0 info: Enabled device extensions: info: VK_EXT_shader_viewport_index_layer info: VK_EXT_transform_feedback info: VK_EXT_vertex_attribute_divisor info: VK_KHR_dedicated_allocation info: VK_KHR_descriptor_update_template info: VK_KHR_driver_properties info: VK_KHR_get_memory_requirements2 info: VK_KHR_image_format_list info: VK_KHR_maintenance1 info: VK_KHR_maintenance2 info: VK_KHR_sampler_mirror_clamp_to_edge info: VK_KHR_shader_draw_parameters info: VK_KHR_swapchain 00bf:fixme:vulkan:wine_vk_device_convert_create_info Application requested a linked structure of type 0x3b9db032. 00bf:fixme:vulkan:wine_vk_device_convert_create_info Application requested a linked structure of type 0x3b9b3760. info: DXVK: Read 1342 valid state cache entries info: DXVK: Using 16 compiler threads warn: DXGI: VK_FORMAT_D24_UNORM_S8_UINT -> VK_FORMAT_D32_SFLOAT_S8_UINT warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: D3D11DeviceContext::QueryInterface: Unknown interface query warn: 420d5b32-b90c-4da4-bef0-359f6a24a83a warn: D3D11DXGIDevice::QueryInterface: Unknown interface query warn: 9d06dffa-d1e5-4d07-83a8-1bb123f2f841 warn: DxbcCompiler: Unsupported custom data block: DxbcCustomDataClass::Opaque warn: DxbcCompiler: Unsupported custom data block: DxbcCustomDataClass::Opaque warn: DxbcCompiler: Unsupported custom data block: DxbcCustomDataClass::Opaque warn: DxbcCompiler: Unsupported custom data block: DxbcCustomDataClass::Opaque warn: DxbcCompiler: Unsupported custom data block: DxbcCustomDataClass::Opaque warn: DxbcCompiler: Unsupported custom data block: DxbcCustomDataClass::Opaque warn: DxbcCompiler: Unsupported custom data block: DxbcCustomDataClass::Opaque warn: DxbcCompiler: Unsupported custom data block: DxbcCustomDataClass::Opaque info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Presenter: Actual swap chain properties: Format: VK_FORMAT_B8G8R8A8_UNORM Present mode: VK_PRESENT_MODE_IMMEDIATE_KHR Buffer size: 1272x693 Image count: 3info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: D3D11CoreCreateDevice: Probing D3D_FEATURE_LEVEL_10_0 info: D3D11CoreCreateDevice: Using feature level D3D_FEATURE_LEVEL_10_0 info: Enabled device extensions: info: VK_EXT_shader_viewport_index_layer info: VK_EXT_transform_feedback info: VK_EXT_vertex_attribute_divisor info: VK_KHR_dedicated_allocation info: VK_KHR_descriptor_update_template info: VK_KHR_driver_properties info: VK_KHR_get_memory_requirements2 info: VK_KHR_image_format_list info: VK_KHR_maintenance1 info: VK_KHR_00e5:fixme:system:SystemParametersInfoW Unimplemented action: 59 (SPI_SETSTICKYKEYS) 00e5:fixme:system:SystemParametersInfoW Unimplemented action: 53 (SPI_SETTOGGLEKEYS) m00e5:fixme:system:SystemParametersInfoW Unimplemented action: 51 (SPI_SETFILTERKEYS) aintenance2 info: VK_KHR_sampler_mirror_clamp_to_edge info: VK_KHR_shader_draw_parameters info: VK_KHR_swapchain 0107:fixme:vulkan:wine_vk_device_convert_create_info Application requested a linked structure of type 0x3b9db032. 0107:fixme:vulkan:wine_vk_device_convert_create_info Application requested a linked structure of type 0x3b9b3760. warn: DXGI: MakeWindowAssociation: Ignoring flags warn: D3D11Device::GetDeviceRemovedReason: Stub warn: DXGI: MakeWindowAssociation: Ignoring flags info: DXVK: Read 1342 valid state cache entries info: Presenter: Actual swap chain properties: Format: VK_FORMAT_B8G8R8A8_UNORM Present mode: VK_PRESENT_MODE_IMMEDIATE_KHR Buffer size: 1272x693 Image count: 3 info: DXVK: Using 16 compiler threads warn: DXGI: VK_FORMAT_D24_UNORM_S8_UINT -> VK_FORMAT_D32_SFLOAT_S8_UINT warn: DXGI: MakeWindowAssociation: Ignoring flags info: Presenter: Actual swap chain properties: Format: VK_FORMAT_B8G8R8A8_UNORM Present mode: VK_PRESENT_MODE_IMMEDIATE_KHR Buffer size: 1272x723 Image count: 3 0107:fixme:wbemprox:wbem_services_CreateInstanceEnum unsupported flags 0x00000030 0107:fixme:wbemprox:enum_class_object_Next timeout not supported mesa: for the -simplifycfg-sink-common option: may only occur zero or one times! 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. 0107:fixme:ddraw:ddraw7_Initialize Ignoring guid {aeb2cdd4-6e41-43ea-941c-8361cc760781}. ALSA lib seq_hw.c:466:(snd_seq_hw_open) open /dev/snd/seq failed: No such file or directory 0107:err:winediag:MIDIMAP_drvOpen No software synthesizer midi port found, Midi sound output probably won't work. info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: D3D11CoreCreateDevice: Probing D3D_FEATURE_LEVEL_10_0 info: D3D11CoreCreateDevice: Using feature level D3D_FEATURE_LEVEL_10_0 info: Enabled device extensions: info: VK_EXT_shader_viewport_index_layer info: VK_EXT_transform_feedback info: VK_EXT_vertex_attribute_divisor info: VK_KHR_dedicated_allocation info: VK_KHR_descriptor_update_template info: VK_KHR_driver_properties info: VK_KHR_get_memory_requirements2 info: VK_KHR_image_format_list info: VK_KHR_maintenance1 info: VK_KHR_maintenance2 info: VK_KHR_sampler_mirror_clamp_to_edge info: VK_KHR_shader_draw_parameters info: VK_KHR_swapchain 0107:fixme:vulkan:wine_vk_device_convert_create_info Application requested a linked structure of type 0x3b9db032. 0107:fixme:vulkan:wine_vk_device_convert_create_info Application requested a linked structure of type 0x3b9b3760. info: DXVK: Read 1342 valid state cache entries info: DXVK: Using 16 compiler threads warn: DXGI: VK_FORMAT_D24_UNORM_S8_UINT -> VK_FORMAT_D32_SFLOAT_S8_UINT 0107:fixme:wbemprox:wbem_services_CreateInstanceEnum unsupported flags 0x00000030 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. 0107:fixme:ddraw:ddraw7_Initialize Ignoring guid {aeb2cdd4-6e41-43ea-941c-8361cc760781}. 0107:fixme:wbemprox:client_security_SetBlanket 0x7f611cc1d280, 0x3cbe420, 10, 0, (null), 3, 3, (nil), 0x00000000 0107:fixme:wbemprox:client_security_Release 0x7f611cc1d280 info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. 0107:fixme:wbemprox:wbem_services_CreateInstanceEnum unsupported flags 0x00000030 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. 0107:fixme:ddraw:ddraw7_Initialize Ignoring guid {aeb2cdd4-6e41-43ea-941c-8361cc760781}. 0107:fixme:wbemprox:client_security_SetBlanket 0x7f611cc1d280, 0x1e38940, 10, 0, (null), 3, 3, (nil), 0x00000000 0107:fixme:wbemprox:client_security_Release 0x7f611cc1d280 info: Game: ACOdyssey.exe info: DXVK: v1.0.1 warn: OpenVR: Failed to locate module info: Enabled instance extensions: info: VK_KHR_get_physical_device_properties2 info: VK_KHR_surface info: VK_KHR_win32_surface info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe info: AMD RADV VEGA10 (LLVM 8.0.0): info: Driver: 19.0.99 info: Vulkan: 1.1.90 info: Memory Heap[0]: info: Size: 8176 MiB info: Flags: 0x1 info: Memory Type[1]: Property Flags = 0x7 info: Memory Heap[1]: info: Size: 8176 MiB info: Flags: 0x0 info: Memory Type[0]: Property Flags = 0x6 info: Memory Type[2]: Property Flags = 0xe 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. NIR validation failed after nir_split_array_vars 5 errors: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE samplerBuffer t80 (429, 0, 2) decl_var NIR validation failed after nir_lower_clip_cull_distance_arrays 6 errors: shader_in INTERP_MODE_NONE uint v0 (VERT_ATTRIB_GENERIC0.x, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o0 (VARYING_SLOT_VAR0, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o1 (VARYING_SLOT_VAR1, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o2 (VARYING_SLOT_VAR2, 0, 0)shader: MESA_SHADER_VERTEX inputs: 0 decl_var shader_out INTERP_MODE_NONE vec4 o3 (VARYING_SLOT_VAR3, 0, 0) decl_var shader_out INTERP_MODE_NONE outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE samplerBuffer t80 (429, 0, 2) vec4 o4 (VARYING_SLOT_VAR4, 0, 0) decl_var decl_var shader_in INTERP_MODE_NONE uint v0 (VERT_ATTRIB_GENERIC0.x, 0, 0) decl_var shader_out INTERP_MODE_NONE shader_out INTERP_MODE_NONE vec4 o5 (VARYING_SLOT_VAR5, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o0 (VARYING_SLOT_VAR0, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o6 (VARYING_SLOT_VAR6, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o7 (VARYING_SLOT_VAR7, 0, 0) decl_var shader_out INTERP_MODE_NONE float o8 (VARYING_SLOT_VAR8.x, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_vertex_out.field0 (VARYING_SLOT_POS, 0, 0) decl_function main (0 params) impl main { decl_var INTERP_MODE_NONE vec4 r2 decl_var INTERP_MODE_NONE vec4 o1 (VARYING_SLOT_VAR1, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o2 (VARYING_SLOT_VAR2, 0, 0) vec4 out@o0-temp decl_var INTERP_MODE_NONE vec4 r3 decl_var shader_out INTERP_MODE_NONE decl_var INTERP_MODE_NONE vec4 r12 decl_var INTERP_MODE_NONE vec4 r10 vec4 o3 (VARYING_SLOT_VAR3, 0, 0) decl_var INTERP_MODE_NONE vec4 out@o1-temp decl_var INTERP_MODE_NONE vec4 r7 decl_var INTERP_MODE_NONE vec4 out@o2-temp decl_var INTERP_MODE_NONE vec4 out@o4-temp decl_var INTERP_MODE_NONE uint in@v0-temp decl_var INTERP_MODE_NONE vec4 r5 decl_var INTERP_MODE_NONE vec4 r1 decl_var INTERP_MODE_NONE vec4 out@o3-temp decl_var INTERP_MODE_NONE vec4 out@o6-temp decl_var INTERP_MODE_NONE vec4 r0 decl_var INTERP_MODE_NONE vec4 r11 decl_var INTERP_MODE_NONE vec4 r6 decl_var INTERP_MODE_NONE float out@o8-temp decl_var INTERP_MODE_NONE vec4 r9 decl_var INTERP_MODE_NONE vec4 out@o5-temp decl_var INTERP_MODE_NONE Z shader_in decl_var INTERP_MODE_NONE vec4 r4 decl_var INTERP_MODE_NONE vec4 out@vs_vertex_out.field0-temp decl_var INTERP_MODE_NONE vec4 r8 decl_var INTERP_MODE_NONE vec4 out@o7-temp block block_0: /* preds: */ vec1 32 ssa_1893 = deref_var &v0 (shader_in uint) vec1 32 ssa_1894 = deref_var &in@v0-temp (function_temp uint) intrinsic copy_deref (ssa_1894, ssa_1893) () vec1 32 ssa_0 = deref_var &in@v0-temp (function_temp uint) vec1 32 ssa_1 = intrinsic load_deref (ssa_0) (0) /* access=0 */ vec1 32 ssa_2NIR validation failed after nir_remove_dead_variables 1 errors: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE samplerBuffer t80 (429, 0, 2) decl_var shader_in INTERP_MODE_NONE uint v0 (VERT_ATTRIB_GENERIC0.x, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o0 (VARYING_SLOT_VAR0, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o1 (VARYING_SLOT_VAR1, 0, 0) decl_var = deref_var &shader_in (function_temp Z) NIR validation failed after nir_lower_clip_cull_distance_arrays 13 errors: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE samplerBuffer t80 (429, 0, 2) decl_var shader_in INTERP_MODE_NONE vec4 v0 (VERT_ATTRIB_GENERIC0, 0, 0) decl_var shader_in INTERP_MODE_NONE vec4 v1 (VERT_ATTRIB_GENERIC1, 0, 0) decl_var shader_in INTERP_MODE_NONE uvec4 v2 (VERT_ATTRIB_GENERIC2, 0, 0) decl_var decl_var shader_out INTERP_MODE_NONE shader_in INTERP_MODE_NONE vec4 v3 (VERT_ATTRIB_GENERIC3, 0, 0) decl_var shader_in INTERP_MODE_NONE vec1 32 ssa_3 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_4 = deref_array &(*ssa_2)vec4 o2 (VARYING_SLOT_VAR2, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o3 (VARYING_SLOT_VAR3, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o4 (VARYING_SLOT_VAR4, 0, 0) vec2 v4 (VERT_ATTRIB_GENERIC4.xy, 0, 0) decl_var decl_var shader_out INTERP_MODE_NONE vec4 o5 (VARYING_SLOT_VAR5, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o6 (VARYING_SLOT_VAR6, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o7 (VARYING_SLOT_VAR7, 0, 0) NIR validation failed after nir_split_array_vars 6 errors: [0] (function_temp vec4) decl_var shader_out INTERP_MODE_NONE float o8 (VARYING_SLOT_VAR8.x, 0, 0) decl_var /* &shader_out INTERP_MODE_NONE shader_out INTERP_MODE_NONE vec4 o0 (VARYING_SLOT_VAR0, 0, 0) decl_var shader_out INTERP_MODE_NONE shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE samplerBuffer t80shader_out INTERP_MODE_NONE vec4 vs_vertex_out.field0 (VARYING_SLOT_POS, 0, 0) decl_var INTERP_MODE_NONE Z shader_in decl_var INTERP_MODE_NONE vec4 r0 decl_var INTERP_MODE_NONE vec4 r1 vec4 o4 (429, 0, 2)shader_invec4 o1 (VARYING_SLOT_VAR1, 0, 0) decl_var shader_in INTERP_MODE_NONE decl_var INTERP_MODE_NONE vec4 r2[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_5 = uint v0intrinsic load_deref (decl_var shader_out INTERP_MODE_NONE vec4 o2 (VARYING_SLOT_VAR2, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o3 (VARYING_SLOT_VAR3, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o4 (VARYING_SLOT_VAR4, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o5 (VARYING_SLOT_VAR5, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o6 (VARYING_SLOT_VAR6, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o7 (VARYING_SLOT_VAR7, 0, 0) decl_var shader_out INTERP_MODE_NONE float o8 (VARYING_SLOT_VAR8.x, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_vertex_out.field0 (VARYING_SLOT_POS, 0, 0) decl_var INTERP_MODE_NONE Z shader_in decl_var INTERP_MODE_NONE vec4 r0 decl_var INTERP_MODE_NONE vec4 r1 decl_var INTERP_MODE_NONE vec4 r2 decl_var INTERP_MODE_NONE vec4 r3 decl_var INTERP_MODE_NONE vec4 r4 decl_var INTERP_MODE_NONE vec4 r5 decl_var INTERP_MODE_NONE vec4 r6 decl_var INTERP_MODE_NONE vec4 r7 decl_var INTERP_MODE_NONE vec4 r8 decl_var INTERP_MODE_NONE vec4 r9 decl_var INTERP_MODE_NONE vec4 r10 decl_var INTERP_MODE_NONE vec4 r11 decl_function main (0 params) impl main { decl_var INTERP_MODE_NONE vec2 phi decl_var INTERP_MODE_NONE vec2 phi@0 decl_var INTERP_MODE_NONE vec4 phi@1 decl_var INTERP_MODE_NONE vec4 phi@2 decl_var INTERP_MODE_NONE vec4 phi@3 NIR validation failed after nir_split_var_copies 8 errors: shader: MESA_SHADER_VERTEX inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var uniform INTERP_MODE_NONE samplerBuffer t80 (429, 0, 2) decl_var shader_in INTERP_MODE_NONE uint v0 (VERT_ATTRIB_GENERIC0.x, 0, 0) decl_var shader_out INTERP_MODE_NONE (null) vs_vertex_out (VARYING_SLOT_POS, 0, 0) error: var->num_members == glsl_get_length(without_array) (../src/compiler/nir/nir_validate.c:1033) decl_var shader_out INTERP_MODE_NONE vec4 o0 (VARYING_SLOT_VAR0, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o1 (VARYING_SLOT_VAR1, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o2 (VARYING_SLOT_VAR2, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o3 (VARYING_SLOT_VAR3, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o4 (VARYING_SLOT_VAR4, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o5 (VARYING_SLOT_VAR5, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o6 (VARYING_SLOT_VAR6, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o7 (VARYING_SLOT_VAR7, 0, 0) decl_var shader_out INTERP_MODE_NONE float o8 (VARYING_SLOT_VAR8.x, 0, 0) decl_var INTERP_MODE_NONE (null) shader_in decl_var INTERP_MODE_NONE vec4 r0 decl_var INTERP_MODE_NONE vec4 r1 decl_var INTERP_MODE_NONE vec4 r2 decl_var INTERP_MODE_NONE vec4 r3 decl_var INTERP_MODE_NONE vec4 r4 decl_var INTERP_MODE_NONE vec4 r5 decl_var INTERP_MODE_NONE vec4 r6 decl_var INTERP_MODE_NONE vec4 r7 (VERT_ATTRIB_GENERIC0.x, 0, 0) (VARYING_SLOT_VAR4, 0, 0) decl_var shader_out INTERP_MODE_NONE decl_var decl_var ssa_4) (decl_var decl_var shader_out INTERP_MODE_NONE vec4 o0 (VARYING_SLOT_VAR0, 0, 0)0) /* INTERP_MODE_NONE vec4 phi@4 decl_var INTERP_MODE_NONE vec4 phi@5 decl_var INTERP_MODE_NONE INTERP_MODE_NONE vec4 o5 (VARYING_SLOT_VAR5, 0, 0) INTERP_MODE_NONE vec4 r8 decl_var INTERP_MODE_NONE vec4 r9 decl_var INTERP_MODE_NONE decl_var decl_var shader_out INTERP_MODE_NONE vec4 r3vec4 r10 shader_out INTERP_MODE_NONE access=0 */ vec4 32 ssa_6 = vec4 ssa_1, ssa_5.y, decl_var INTERP_MODE_NONE vec4 r4 decl_var INTERP_MODE_NONE vec4 r5 decl_var ssa_5.vec4 o6 decl_var INTERP_MODE_NONE vec4 phi@6 vec4 o1 (VARYING_SLOT_VAR1, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o2 (VARYING_SLOT_VAR2, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o3 (VARYING_SLOT_VAR3, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o4 (VARYING_SLOT_VAR4, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o5 (VARYING_SLOT_VAR5, 0, 0) decl_var shader_out INTERP_MODE_NONE z, INTERP_MODE_NONE block block_0: /* preds: */ vec1 32 ssa_0 = deref_var &v0 (shader_in vec4) vec4 32 ssa_1 = intrinsic load_deref (ssa_0) (0) /*vec4 r6 (VARYING_SLOT_VAR6, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o7 (VARYING_SLOT_VAR7, 0, 0) decl_var shader_out INTERP_MODE_NONE float o8 (VARYING_SLOT_VAR8.x, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_vertex_out.field0ssa_5.w intrinsic store_deref (ssa_4 access=0 */ vec1 32 ssa_2 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_3 = load_const (0x00000000 /* 0.000000 */) decl_var INTERP_MODE_NONE vec4 r7 decl_var INTERP_MODE_NONE vec4 r8 decl_var (VARYING_SLOT_POS, 0, 0) decl_var INTERP_MODE_NONE vec1 32 ssa_4 = deref_array &vec4 r11 decl_var INTERP_MODE_NONE vec4 r9 decl_var INTERP_MODE_NONE vec4 r10 (null) shader_in decl_var INTERP_MODE_NONE vec4 r0 decl_var INTERP_MODE_NONE decl_var INTERP_MODE_NONE vec4 r12 decl_var INTERP_MODE_NONE vec4 r13 decl_var INTERP_MODE_NONE vec4 r14 decl_var vec4 o6(*ssa_2), system INTERP_MODE_NONE (VARYING_SLOT_VAR6, 0, 0) INTERP_MODE_NONE vec4 r11 decl_var INTERP_MODE_NONE vec4 r12 decl_var INTERP_MODE_NONE vec4 r13 ssa_6) (15, 0) /* wrmask=xyzw */ /* access=0 */ [0] (shader_temp vec4) /* &shader_in[0] */decl_var INTERP_MODE_NONE vec4 r1 decl_var INTERP_MODE_NONE vec4 r2 decl_var INTERP_MODE_NONE vec4 r3 decl_var vec1 32 ssa_1865 error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) intrinsic store_deref (ssa_4, ssa_1 = intrinsic load_first_vertex (decl_var shader_out INTERP_MODE_NONE vec4 o7 (VARYING_SLOT_VAR7, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 o8 (VARYING_SLOT_VAR8, 0, 0) decl_var shader_out INTERP_MODE_NONE uint vs_vertex_index ) ()) (15, 0)decl_var system INTERP_MODE_NONE uint vs_base_vertex /*vec4 r14 decl_var INTERP_MODE_NONE vec4 r15 decl_var INTERP_MODE_NONE vec4 r4 decl_var INTERP_MODE_NONE INTERP_MODE_NONE vec4 r16 decl_var INTERP_MODE_NONE vec4 o9 (VARYING_SLOT_VAR9, 0, 0) wrmask=xyzw */ /* access=0 */ vec1 32 ssa_5vec4 r5decl_function main (0 params)decl_var shader_out INTERP_MODE_NONE float o10 (VARYING_SLOT_VAR10.x, 0, 0) decl_var shader_out INTERP_MODE_NONE vec4 vs_vertex_out.field0 (VARYING_SLOT_POS, 0, 0) decl_function main (0 params) impl main vec4 r17 = deref_var &v1 (shader_in vec4) { decl_var INTERP_MODE_NONE impl main { decl_var INTERP_MODE_NONE vec4 r6 decl_var INTERP_MODE_NONE vec4 out@o4-tempdecl_var system INTERP_MODE_NONE decl_var INTERP_MODE_NONE vec2 phi decl_var INTERP_MODE_NONE float phi@0 decl_var INTERP_MODE_NONE vec4 32 ssa_6 = vec1 32 ssa_1866vec4 r6 decl_var INTERP_MODE_NONE uint vs_vertex_index = decl_var INTERP_MODE_NONE vec4 r7intrinsic load_deref (ssa_5intrinsic load_first_vertex () () vec1 32 ssa_1867vec4 phi@1 decl_var INTERP_MODE_NONE vec2 phi@2 decl_var INTERP_MODE_NONE ) (0) /* access=0 */ = intrinsic load_vertex_id_zero_base () () vec1 32 ssa_1868 = iadd ssa_1867, ssa_1866vec4 r4 decl_var INTERP_MODE_NONE vec1 32 ssa_11 = isub ssa_1868, ssa_1865 vec1 32 ssa_12 = deref_var &shader_in (function_temp Z) vec1 32 ssa_13 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_14 = deref_array &(*ssa_12)[1] (function_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_15 = intrinsic load_deref (ssa_14) (0) /*vec4 r8 decl_var INTERP_MODE_NONE decl_var system INTERP_MODE_NONE access=0 */ decl_var INTERP_MODE_NONE vec1 32 ssa_7 = deref_var &shader_invec4 phi@3 decl_var INTERP_MODE_NONE vec4 phi@4 decl_var INTERP_MODE_NONE vec4 phi@5 decl_var INTERP_MODE_NONE vec4 32 ssa_16 = vec4 ssa_11vec4 phi@6 (shader_temp Z) vec4 r9 decl_var uint in@v0-temp, ssa_15.y, ssa_15.vec1 32 ssa_8 decl_var INTERP_MODE_NONE vec4 r10 decl_var INTERP_MODE_NONE decl_var INTERP_MODE_NONE = load_const ( INTERP_MODE_NONE vec4 r11uint vs_base_vertex 0x00000001 /* 0.000000 */) vec1 32 ssa_9 = deref_array &(*ssa_7) decl_var decl_function main (0 params) z, ssa_15.w vec4 out@o3-temp decl_var INTERP_MODE_NONE vec4 r10 decl_var INTERP_MODE_NONE vec4 r12 vec4 phi@7 impl main [1] (shader_temp vec4) decl_var INTERP_MODE_NONE { decl_var INTERP_MODE_NONE vec4 out@o8-tempintrinsic store_deref ( INTERP_MODE_NONE vec2 phi decl_var INTERP_MODE_NONE float phi@0ssa_14/* &shader_in[1] */ vec4 r12 decl_var INTERP_MODE_NONE vec4 r13 decl_var INTERP_MODE_NONE vec4 r14 decl_function main (0 params) impl main { decl_var INTERP_MODE_NONE vec2 phi decl_var decl_var INTERP_MODE_NONE INTERP_MODE_NONE float phi@0 decl_var INTERP_MODE_NONE vec4 r1 decl_var INTERP_MODE_NONE vec4 out@o1-temp decl_var INTERP_MODE_NONE vec4 phi@1 decl_var INTERP_MODE_NONE error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) , ssa_16decl_var INTERP_MODE_NONE decl_var float out@o10-temp) ( INTERP_MODE_NONE vec4 phi@8 decl_var INTERP_MODE_NONE vec4 r0 decl_var INTERP_MODE_NONE vec4 out@o2-temp block block_0: /* preds: 15, 0) /**/ vec4 phi@1 intrinsic store_deref ( vec1 32 ssa_0 = deref_var &v0 (shader_in uint) vec1 32 ssa_1 = intrinsic load_deref (ssa_0) ( wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1915decl_var INTERP_MODE_NONE 0) /*vec2 phi@2 decl_var INTERP_MODE_NONE vec4 phi@3 decl_var INTERP_MODE_NONE vec4 phi@4 decl_var INTERP_MODE_NONE vec4 phi@5 decl_var INTERP_MODE_NONE vec4 phi@6 decl_var vec2 phi@2 decl_var INTERP_MODE_NONE INTERP_MODE_NONE vec4 phi@7 ssa_9, ssa_6) (15vec4 phi@3 decl_var INTERP_MODE_NONE = load_const (0x00000000 /* 0.000000 */) , 0) /* wrmask=xyzw access=0 */ decl_var INTERP_MODE_NONE decl_var INTERP_MODE_NONE vec1 32 ssa_2vec4 r3 decl_var INTERP_MODE_NONE vec4 out@o5-temp decl_var vec1 32 ssa_1916 = load_const (vec4 phi@4 = deref_var & */ INTERP_MODE_NONE vec4 out@vs_vertex_out.field0-temp decl_var INTERP_MODE_NONE vec4 r2 decl_var INTERP_MODE_NONE /* access=0 */ decl_var INTERP_MODE_NONE Z shader_in0x00000000 /* 0.000000 */) vec1 32 ssa_1917 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1918 = load_const (0x00000000 /* 0.000000 */)shader_in (shader_temp Z) vec4 phi@5 decl_var INTERP_MODE_NONE vec4 phi@6 decl_var vec1 32 ssa_10 = deref_var &v2 (shader_in uvec4) vec4 32 ssa_1919 = vec4 ssa_1915, ssa_1916, ssa_1917, ssa_1918 vec1 32 ssa_1920 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1921 vec1 32 ssa_3 = load_const ( INTERP_MODE_NONE vec4 phi@7 decl_var INTERP_MODE_NONE 0x00000000 /* 0.000000 */) vec4 phi@8 vec4 32 ssa_11 = vec1 32 ssa_4 = deref_array &(*ssa_2intrinsic load_deref (ssa_10) (0 decl_var INTERP_MODE_NONE vec4 phi@8 )[0] (shader_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_5 = ) /* access=0 */block block_0: /* preds: */ vec1 32 ssa_0 = deref_var &v0 (shader_in uint) block block_0: /* preds: */ vec1 32 ssa_0 = deref_var & vec1 32 ssa_12 = deref_var &shader_in (shader_temp Z) = load_const (vec4 r8v0 (shader_in uint) intrinsic load_deref (vec1 32 ssa_1 = intrinsic load_deref (ssa_0) (0ssa_4 vec1 32 ssa_13 = load_const (0x00000002 /* 0.000000 */) ) (0) /* access=0 */ vec4 32 ssa_60x00000000 /* 0.000000 */) vec1 32 ssa_1922 = load_const (0x00000000 /* 0.000000 */) decl_var INTERP_MODE_NONE vec4 r9 decl_var INTERP_MODE_NONE vec4 r14 vec1 32 ssa_14 = deref_array & = vec4 ssa_1, ssa_5.y(vec1 32 ssa_1923 ) /* access=0 */*ssa_12)[2] (shader_temp vec4) /* &shader_in[2] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) intrinsic store_deref (ssa_14, , = load_const (0x00000000 /* 0.000000 */) ssa_5 .vec1 32 ssa_2ssa_11 vec4 32 ssa_1924 = deref_var &) (15, 0) /* wrmask=xyzshader_in (shader_temp Z) vec1 32 ssa_3 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_4 = deref_array &(*ssa_2)[0] (shader_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_5 = intrinsic load_deref (ssa_4) (0) /* access=0 */ vec4 32 ssa_6 = vec4 ssa_1, ssa_5.y, ssa_5.z, ssa_5.w intrinsic store_deref (ssa_4, ssa_6) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2072 = intrinsic load_first_vertex () () vec1 32 ssa_2073 = intrinsic load_first_vertex () () vec1 32 ssa_2074 = intrinsic load_vertex_id_zero_base () () vec1 32 ssa_2075 = iadd ssa_2074, ssa_2073 vec1 32 ssa_11 = isub ssa_2075, ssa_2072 vec1 32 ssa_12 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_13 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_14 = deref_array &(*ssa_12)[1] (shader_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_15 = intrinsic load_deref (ssa_14) (0) /* access=0 */ vec4 32 ssa_16 = vec4 ssa_11, ssa_15.y, ssa_15.z, ssa_15.w intrinsic store_deref (ssa_14, ssa_16) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec4 32 ssa_17 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_18 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_19 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_20 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_21 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_22 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_23 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_24 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_25 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_26 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_27 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_28 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_29 = load_const (true) vec1 1 ssa_30 = load_const (true) vec3 32 ssa_31 = load_const (0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */) vec3 32 ssa_32 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_33 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_34 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_35 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_36 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_37 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_38 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_39 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */) vec2 32 ssa_40 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */) vec4 32 ssa_41 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec4 32 ssa_42 = load_const (0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */) vec4 32 ssa_43 = load_const (0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */) vec4 32 ssa_44 = load_const (0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */) vec3 32 ssa_45 = load_const (0x00000008 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000018 /* 0.000000 */) vec1 32 ssa_46 = load_const (0x3f800000 /* 1.000000 */) vec4 32 ssa_47 = load_const (0x3b808081 /* 0.003922 */, 0x3b808081 /* 0.003922 */, 0x3b808081 /* 0.003922 */, 0x3b808081 /* 0.003922 */) vec4 32 ssa_48 = load_const (0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */) vec3 32 ssa_49 = load_const (0x00000008 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000018 /* 0.000000 */) vec1 32 ssa_50 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_51 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_52 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_53 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_54 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_55 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_56 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_57 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_58 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_59 = load_const (0x3a000080 /* 0.000488 */, 0x3a000080 /* 0.000488 */) vec2 32 ssa_60 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_61 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_62 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_63 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_64 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_65 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_66 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_67 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_68 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_69 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_70 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_71 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_72 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_73 = load_const (true) vec1 1 ssa_74 = load_const (true) vec1 1 ssa_75 = load_const (true) vec4 32 ssa_76 = load_const (0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */) vec1 1 ssa_77 = load_const (true) vec1 1 ssa_78 = load_const (true) vec1 32 ssa_79 = load_const (0x00000004 /* 0.000000 */) vec3 32 ssa_80 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_81 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec3 32 ssa_82 = load_const (0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */) vec3 32 ssa_83 = load_const (0xffffffff /* -nan */, 0xffffffff /* -nan */, 0xffffffff /* -nan */) vec3 32 ssa_84 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_85 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_86 = load_const (0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000004 /* 0.000000 */) vec1 32 ssa_87 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_88 = load_const (0x3f7fff00 /* 0.999985 */) vec3 32 ssa_89 = load_const (0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */) vec4 32 ssa_90 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec2 32 ssa_91 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_92 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_93 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_94 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_95 = load_const (0x00000000 /* 0.000000 */) vec3 32 ssa_96 = load_const (0x00000001 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000001 /* 0.000000 */) vec1 32 ssa_97 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_98 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_99 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_100 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_101 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_102 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_103 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_104 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_105 = load_const (0x00000001 /* 0.000000 */) vec1 1 ssa_106 = load_const (true) vec1 1 ssa_107 = load_const (true) vec1 1 ssa_108 = load_const (true) vec1 32 ssa_109 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_110 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_111 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_112 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_113 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_114 = deref_array &(*ssa_112)[0] (shader_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_115 = intrinsic load_deref (ssa_114) (0) /* access=0 */ vec1 32 ssa_116 = imov ssa_115.x vec1 32 ssa_117 = ishl ssa_116, ssa_111 vec1 32 ssa_118 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_119 = intrinsic load_deref (ssa_118) (0) /* access=0 */ vec4 32 ssa_120 = vec4 ssa_117, ssa_119.y, ssa_119.z, ssa_119.w vec1 32 ssa_121 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_121, ssa_120) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_122 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_123 = intrinsic load_deref (ssa_122) (0) /* access=0 */ vec1 32 ssa_124 = imov ssa_123.x vec1 32 ssa_125 = ushr ssa_124, ssa_110 vec1 32 ssa_126 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_127 = intrinsic load_deref (ssa_126) (0) /* access=0 */ vec4 32 ssa_128 = vec4 ssa_125, ssa_127.y, ssa_127.z, ssa_127.w vec1 32 ssa_129 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_129, ssa_128) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_130 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_131 = intrinsic load_deref (ssa_130) (0) /* access=0 */ vec1 32 ssa_132 = imov ssa_131.x vec1 32 ssa_133 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_134 = intrinsic vulkan_resource_index (ssa_133) (0, 1, 6) /* desc-set=0 */ /* binding=1 */ /* desc_type=UBO */ vec1 32 ssa_135 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_136 = load_const (0x00000130 /* 0.000000 */) vec1 32 ssa_137 = iadd ssa_135, ssa_136 vec1 32 ssa_138 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_139 = iadd ssa_137, ssa_138 vec1 32 ssa_140 = intrinsic load_ubo (ssa_134, ssa_139) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_141 = iadd ssa_132, ssa_140 vec1 32 ssa_142 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_143 = intrinsic load_deref (ssa_142) (0) /* access=0 */ vec4 32 ssa_144 = vec4 ssa_141, ssa_143.y, ssa_143.z, ssa_143.w vec1 32 ssa_145 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_145, ssa_144) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_146 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_147 = intrinsic load_deref (ssa_146) (0) /* access=0 */ vec1 32 ssa_148 = imov ssa_147.x vec1 32 ssa_149 = iadd ssa_148, ssa_109 vec1 32 ssa_150 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_151 = intrinsic load_deref (ssa_150) (0) /* access=0 */ vec4 32 ssa_152 = vec4 ssa_151.x, ssa_149, ssa_151.z, ssa_151.w vec1 32 ssa_153 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_153, ssa_152) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_154 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_155 = intrinsic load_deref (ssa_154) (0) /* access=0 */ vec4 32 ssa_156 = vec4 ssa_155.x, ssa_155.x, ssa_155.x, ssa_155.x vec1 32 ssa_157 = imov ssa_156.x /* succs: block_1 block_2 */ if ssa_108 { block block_1: /* preds: block_0 */ vec1 32 ssa_158 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_159 = txf ssa_158 (texture_deref), ssa_157 (coord), 0 (sampler), vec2 32 ssa_160 = vec2 ssa_159.y, ssa_159.z vec1 32 ssa_161 = deref_var &phi (function_temp vec2) intrinsic store_deref (ssa_161, ssa_160) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ vec1 32 ssa_162 = deref_var &phi (function_temp vec2) intrinsic store_deref (ssa_162, ssa_26) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ vec1 32 ssa_163 = deref_var &phi (function_temp vec2) vec2 32 ssa_164 = intrinsic load_deref (ssa_163) (0) /* access=0 */ vec1 32 ssa_165 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_166 = intrinsic load_deref (ssa_165) (0) /* access=0 */ vec4 32 ssa_167 = vec4 ssa_164.x, ssa_166.y, ssa_164.y, ssa_166.w vec1 32 ssa_168 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_168, ssa_167) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_169 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_170 = intrinsic load_deref (ssa_169) (0) /* access=0 */ vec4 32 ssa_171 = vec4 ssa_170.y, ssa_170.y, ssa_170.y, ssa_170.y vec1 32 ssa_172 = imov ssa_171.x /* succs: block_4 block_5 */ if ssa_107 { block block_4: /* preds: block_3 */ vec1 32 ssa_173 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_174 = txf ssa_173 (texture_deref), ssa_172 (coord), 0 (sampler), vec1 32 ssa_175 = imov ssa_174.w vec1 32 ssa_176 = deref_var &phi@0 (function_temp float) intrinsic store_deref (ssa_176, ssa_175) (1, 0) /* wrmask=x */ /* access=0 */ /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ vec1 32 ssa_177 = deref_var &phi@0 (function_temp float) intrinsic store_deref (ssa_177, ssa_25) (1, 0) /* wrmask=x */ /* access=0 */ /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ vec1 32 ssa_178 = deref_var &phi@0 (function_temp float) vec1 32 ssa_179 = intrinsic load_deref (ssa_178) (0) /* access=0 */ vec1 32 ssa_180 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_181 = intrinsic load_deref (ssa_180) (0) /* access=0 */ vec4 32 ssa_182 = vec4 ssa_181.x, ssa_179, ssa_181.z, ssa_181.w vec1 32 ssa_183 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_183, ssa_182) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_184 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_185 = intrinsic load_deref (ssa_184) (0) /* access=0 */ vec4 32 ssa_186 = vec4 ssa_185.y, ssa_185.y, ssa_185.y, ssa_185.y vec1 32 ssa_187 = imov ssa_186.x /* succs: block_7 block_8 */ if ssa_106 { block block_7: /* preds: block_6 */ vec1 32 ssa_188 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_189 = txf ssa_188 (texture_deref), ssa_187 (coord), 0 (sampler), vec1 32 ssa_190 = deref_var &phi@1 (function_temp vec4) intrinsic store_deref (ssa_190, ssa_189) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ vec1 32 ssa_191 = deref_var &phi@1 (function_temp vec4) intrinsic store_deref (ssa_191, ssa_24) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ vec1 32 ssa_192 = deref_var &phi@1 (function_temp vec4) vec4 32 ssa_193 = intrinsic load_deref (ssa_192) (0) /* access=0 */ vec1 32 ssa_194 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_194, ssa_193) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1 = intrinsic load_deref (ssa_0) (0)w */ /* access=0 */ vec1 32 ssa_15 = deref_var &z, ssa_5.w /* access=0 */ vec1 32 ssa_2 = deref_var &shader_in (shader_temp Z) v3decl_var (shader_in vec4) vec1 32 ssa_195 = deref_var &shader_in (shader_temp Z) INTERP_MODE_NONE = vec4 ssa_1920, vec1 32 ssa_196 vec4 32 ssa_16 = intrinsic load_deref (ssa_15 = load_const ( intrinsic store_deref (vec4 r13 vec1 32 ssa_3 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_4 = deref_array &(*ssa_20x00000001 /* 0.000000 */) vec1 32 ssa_197 = deref_array ) ()[0] (shader_temp vec4) /* &shader_in[0] */ vec4 32 ssa_5&0) /*ssa_4 = intrinsic load_deref (ssa_4) (0) /* access=0 */ vec4 32 ssa_6 (*ssa_195)[1] access=0 */ vec1 32 ssa_17 = deref_var &shader_in, = vec4 ssa_1 (shader_temp vec4) /* &shader_in[1] */ ssa_1921, (shader_temp Z) ssa_6decl_var INTERP_MODE_NONE vec4 out@o0-temp decl_var INTERP_MODE_NONE vec4 out@o6-temp decl_var INTERP_MODE_NONE ) (15, 0) /* wrmask=xssa_5.y, ssa_5.z, ssa_5.w intrinsic store_deref (ssa_4, ssa_6) (15, 0) /* wrmask=xyzw */ /* access=0 */ yzw */ /* access=0, ssa_1922, ssa_1923 vec1 32 ssa_1925 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1926 vec1 32 ssa_18 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_19 = deref_array &(*ssa_17)[3] (shader_temp vec4) /* &shader_in[3] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) intrinsic store_deref (ssa_19, ssa_16) (15vec1 32 ssa_7 = deref_var &vs_base_vertex */vec4 r5, decl_var INTERP_MODE_NONE vec4 r7 decl_var 0error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_198 INTERP_MODE_NONE (system uint) vec1 32 ssa_7 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1927 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1928 = load_const (0x00000000 /* 0.000000 */) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_20 = deref_var &v4 = deref_var ) vec4 32 ssa_1929 = vec4 ssa_1925, ssa_1926, ssa_1927, ssa_1928 vec1 32 ssa_1930 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1931 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1932 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1933 = load_const (0x00000000 /* 0.000000 */) &vec4 r11 decl_var INTERP_MODE_NONE vec4 out@o9-temp decl_var INTERP_MODE_NONE vec4 out@o7-temp block block_0: vs_base_vertex (system uint) vec1 32 ssa_8 = vec4 32 ssa_1934/* preds: = intrinsic load_deref (ssa_197) (0) /* access=0 */*/ vec1 32 ssa_2203 = deref_var &v0 (shader_in uint) intrinsic load_deref ( = vec4 ssa_1930, ssa_1931, ssa_1932ssa_7) (0) /* (shader_in vec2) , ssa_1933 vec1 32 ssa_1935 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1936 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1937 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1938 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_1939 = vec4 ssa_1935, access=0 vec1 32 ssa_2204 = deref_var &in@v0-temp (function_temp uint) vec1 32 ssa_199 = imov ssa_198.x vec1 32 ssa_200ssa_1936, ssa_1937, ssa_1938 vec1 32 ssa_1940 */ vec1 32 ssa_8 = intrinsic load_deref ( vec1 32 ssa_9 = deref_var &intrinsic copy_deref (ssa_2204, ssa_2203) () vec1 32 ssa_0 = deref_var &in@v0-temp (function_temp uint) ssa_7) (0) /* access=0 = load_const (vec1 32 ssa_1vec2 32 ssa_21 = intrinsic load_deref (ssa_20) (0) /* access=0 */ vec1 32 ssa_22 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_23 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_24 = deref_array &(*ssa_22)[4] (shader_temp vec4) /* &shader_in[4] */vs_vertex_index = ishl ssa_199, ssa_105 vec1 32 ssa_201 = deref_var &r0 (shader_temp vec4) 0x00000000 /* 0.000000 */) vec1 32 ssa_1941 = load_const (0x00000000 /* 0.000000 */ */ (system uint) vec1 32 ssa_10 = intrinsic load_deref (ssa_9) (0) /* error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_202vec1 32 ssa_9 = deref_var &vs_vertex_index access=0 */ vec4 32 ssa_25 = (system uint) = intrinsic load_deref (ssa_0) (0) /* access=0 */ vec1 32 ssa_2 = deref_var = intrinsic load_deref (ssa_201) (0) /* access=0 */ vec4 32 ssa_203 = vec4 ssa_202.x, ssa_200, ssa_202.z, ssa_202.w vec1 32 ssa_204 = deref_var &r0&shader_in (function_temp Z) vec1 32 ssa_11 = isub vec1 32 ssa_10 = intrinsic load_deref (ssa_9) (0) /* access=0 */ vec1 32 ssa_11 = isub ssa_10, ssa_8 vec1 32 ssa_12 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_13 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_14 = deref_array &(*ssa_12)[1] (shader_temp vec4) /* &shader_in[1] */ vec4 32 ssa_15 = intrinsic load_deref (ssa_14) (0) /* access=0 */ vec4 32 ssa_16 = vec4 ssa_10, ssa_8 vec1 32 ssa_12 = deref_var &shader_in (shader_temp (null)) vec1 32 ssa_13 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_14 = deref_array &(*ssa_12)[1] (shader_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_15 = intrinsic load_deref (ssa_14) (0) /* access=0 */ vec4 32 ssa_16 = vec4 ssa_11, ssa_15.y, ssa_15.z, ssa_15.w intrinsic store_deref (ssa_14, ssa_16) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec4 32 ssa_17 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_18 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_19 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_20 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_21 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_22 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_23 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_24 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_25 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_26 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_27 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_28 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_29 = load_const (true) vec1 1 ssa_30 = load_const (true) vec3 32 ssa_31 = load_const (0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */) vec3 32 ssa_32 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_33 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_34 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_35 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_36 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_37 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_38 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_39 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */) vec2 32 ssa_40 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */) vec4 32 ssa_41 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec4 32 ssa_42 = load_const (0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */) vec4 32 ssa_43 = load_const (0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */) vec4 32 ssa_44 = load_const (0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */) vec3 32 ssa_45 = load_const (0x00000008 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000018 /* 0.000000 */) vec1 32 ssa_46 = load_const (0x3f800000 /* 1.000000 */) vec4 32 ssa_47 = load_const (0x3b808081 /* 0.003922 */, 0x3b808081 /* 0.003922 */, 0x3b808081 /* 0.003922 */, 0x3b808081 /* 0.003922 */) vec4 32 ssa_48 = load_const (0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */) vec3 32 ssa_49 = load_const (0x00000008 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000018 /* 0.000000 */) vec1 32 ssa_50 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_51 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_52 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_53 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_54 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_55 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_56 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_57 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_58 = load_const (0x3a000080 /* 0.000488 */, 0x3a000080 /* 0.000488 */) vec2 32 ssa_59 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_60 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_61 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_62 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_63 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_64 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_65 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_66 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_67 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_68 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_69 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_70 = load_const (0x3a000080 /* 0.000488 */, 0x3a000080 /* 0.000488 */) vec2 32 ssa_71 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_72 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_73 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_74 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_75 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_76 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_77 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_78 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_79 = load_const (true) vec1 1 ssa_80 = load_const (true) vec1 1 ssa_81 = load_const (true) vec4 32 ssa_82 = load_const (0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */) vec1 1 ssa_83 = load_const (true) vec1 1 ssa_84 = load_const (true) vec1 32 ssa_85 = load_const (0x00000004 /* 0.000000 */) vec3 32 ssa_86 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_87 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec3 32 ssa_88 = load_const (0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */) vec3 32 ssa_89 = load_const (0xffffffff /* -nan */, 0xffffffff /* -nan */, 0xffffffff /* -nan */) vec3 32 ssa_90 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_91 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_92 = load_const (0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000004 /* 0.000000 */) vec1 32 ssa_93 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_94 = load_const (0x3f7fff00 /* 0.999985 */) vec3 32 ssa_95 = load_const (0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */) vec4 32 ssa_96 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec2 32 ssa_97 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_98 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_99 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_100 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_101 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_102 = load_const (0x00000001 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */) vec1 32 ssa_103 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_104 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_105 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_106 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_107 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_108 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_109 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_110 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_111 = load_const (0x00000001 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000002 /* 0.000000 */) vec1 1 ssa_112 = load_const (true) vec1 1 ssa_113 = load_const (true) vec1 1 ssa_114 = load_const (true) vec1 32 ssa_115 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_116 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_117 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_118 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_119 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_120 = deref_array &(*ssa_118)[0] (shader_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_121 = intrinsic load_deref (ssa_120) (0) /* access=0 */ vec1 32 ssa_122 = imov ssa_121.x vec1 32 ssa_123 = ishl ssa_122, ssa_117 vec1 32 ssa_124 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_125 = intrinsic load_deref (ssa_124) (0) /* access=0 */ vec4 32 ssa_126 = vec4 ssa_123, ssa_125.y, ssa_125.z, ssa_125.w vec1 32 ssa_127 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_127, ssa_126) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_128 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_129 = intrinsic load_deref (ssa_128) (0) /* access=0 */ vec1 32 ssa_130 = imov ssa_129.x vec1 32 ssa_131 = ushr ssa_130, ssa_116 vec1 32 ssa_132 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_133 = intrinsic load_deref (ssa_132) (0) /* access=0 */ vec4 32 ssa_134 = vec4 ssa_131, ssa_133.y, ssa_133.z, ssa_133.w vec1 32 ssa_135 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_135, ssa_134) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_136 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_137 = intrinsic load_deref (ssa_136) (0) /* access=0 */ vec1 32 ssa_138 = imov ssa_137.x vec1 32 ssa_139 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_140 = intrinsic vulkan_resource_index (ssa_139) (0, 1, 6) /* desc-set=0 */ /* binding=1 */ /* desc_type=UBO */ vec1 32 ssa_141 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_142 = load_const (0x00000130 /* 0.000000 */) vec1 32 ssa_143 = iadd ssa_141, ssa_142 vec1 32 ssa_144 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_145 = iadd ssa_143, ssa_144 vec1 32 ssa_146 = intrinsic load_ubo (ssa_140, ssa_145) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_147 = iadd ssa_138, ssa_146 vec1 32 ssa_148 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_149 = intrinsic load_deref (ssa_148) (0) /* access=0 */ vec4 32 ssa_150 = vec4 ssa_147, ssa_149.y, ssa_149.z, ssa_149.w vec1 32 ssa_151 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_151, ssa_150) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_152 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_153 = intrinsic load_deref (ssa_152) (0) /* access=0 */ vec1 32 ssa_154 = imov ssa_153.x vec1 32 ssa_155 = iadd ssa_154, ssa_115 vec1 32 ssa_156 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_157 = intrinsic load_deref (ssa_156) (0) /* access=0 */ vec4 32 ssa_158 = vec4 ssa_157.x, ssa_155, ssa_157.z, ssa_157.w vec1 32 ssa_159 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_159, ssa_158) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_160 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_161 = intrinsic load_deref (ssa_160) (0) /* access=0 */ vec4 32 ssa_162 = vec4 ssa_161.x, ssa_161.x, ssa_161.x, ssa_161.x vec1 32 ssa_163 = imov ssa_162.x /* succs: block_1 block_2 */ if ssa_114 { block block_1: /* preds: block_0 */ vec1 32 ssa_164 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_165 = txf ssa_164 (texture_deref), ssa_163 (coord), 0 (sampler), vec2 32 ssa_166 = vec2 ssa_165.y, ssa_165.z vec1 32 ssa_167 = deref_var &phi (function_temp vec2) intrinsic store_deref (ssa_167, ssa_166) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ vec1 32 ssa_168 = deref_var &phi (function_temp vec2) intrinsic store_deref (ssa_168, ssa_26) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ vec1 32 ssa_169 = deref_var &phi (function_temp vec2) vec2 32 ssa_170 = intrinsic load_deref (ssa_169) (0) /* access=0 */ vec1 32 ssa_171 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_172 = intrinsic load_deref (ssa_171) (0) /* access=0 */ vec4 32 ssa_173 = vec4 ssa_170.x, ssa_172.y, ssa_170.y, ssa_172.w vec1 32 ssa_174 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_174, ssa_173) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_175 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_176 = intrinsic load_deref (ssa_175) (0) /* access=0 */ vec4 32 ssa_177 = vec4 ssa_176.y, ssa_176.y, ssa_176.y, ssa_176.y vec1 32 ssa_178 = imov ssa_177.x /* succs: block_4 block_5 */ if ssa_113 { block block_4: /* preds: block_3 */ vec1 32 ssa_179 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_180 = txf ssa_179 (texture_deref), ssa_178 (coord), 0 (sampler), vec1 32 ssa_181 = imov ssa_180.w vec1 32 ssa_182 = deref_var &phi@0 (function_temp float) intrinsic store_deref (ssa_182, ssa_181) (1, 0) /* wrmask=x */ /* access=0 */ /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ vec1 32 ssa_183 = deref_var &phi@0 (function_temp float) intrinsic store_deref (ssa_183, ssa_25) (1, 0) /* wrmask=x */ /* access=0 */ /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ vec1 32 ssa_184 = deref_var &phi@0 (function_temp float) vec1 32 ssa_185 = intrinsic load_deref (ssa_184) (0) /* access=0 */ vec1 32 ssa_186 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_187 = intrinsic load_deref (ssa_186) (0) /* access=0 */ vec4 32 ssa_188 = vec4 ssa_187.x, ssa_185, ssa_187.z, ssa_187.w vec1 32 ssa_189 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_189, ssa_188) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_190 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_191 = intrinsic load_deref (ssa_190) (0) /* access=0 */ vec4 32 ssa_192 = vec4 ssa_191.y, ssa_191.y, ssa_191.y, ssa_191.y vec1 32 ssa_193 = imov ssa_192.x /* succs: block_7 block_8 */ if ssa_112 { block block_7: /* preds: block_6 */ vec1 32 ssa_194 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_195 = txf ssa_194 (texture_deref), ssa_193 (coord), 0 (sampler), vec1 32 ssa_196 = deref_var &phi@1 (function_temp vec4) intrinsic store_deref (ssa_196, ssa_195) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ vec1 32 ssa_197 = deref_var &phi@1 (function_temp vec4) intrinsic store_deref (ssa_197, ssa_24) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ vec1 32 ssa_198 = deref_var &phi@1 (function_temp vec4) vec4 32 ssa_199 = intrinsic load_deref (ssa_198) (0) /* access=0 */ vec1 32 ssa_200 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_200, ssa_199) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_201 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_202 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_203 = deref_array &(*ssa_201)[1] (shader_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_204 = intrinsic load_deref (ssa_203) (0) /* access=0 */ vec4 32 ssa_205 = vec4 ssa_204.x, ssa_204.x, ssa_204.x, ssa_204.x vec4 32 ssa_206 = ishl ssa_205, ssa_111 vec1 32 ssa_207 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_207, ssa_206) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_208 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_209 = intrinsic load_deref (ssa_208) (0) /* access=0 */ vec4 32 ssa_210 = vec4 ssa_209.z, ssa_209.w, ssa_209.x, ssa_209.x vec1 32 ssa_211 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_212 = intrinsic load_deref (ssa_211) (0) /* access=0 */ vec4 32 ssa_213 = iadd ssa_210, ssa_212 vec1 32 ssa_214 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_214, ssa_213) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_215 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_216 = intrinsic load_deref (ssa_215) (0) /* access=0 */ vec1 32 ssa_217 = imov ssa_216.y vec1 32 ssa_218 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_219 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_220 = deref_array &(*ssa_218)[1] (shader_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_221 = intrinsic load_deref (ssa_220) (0) /* access=0 */ vec1 32 ssa_222 = imov ssa_221.x vec1 32 ssa_223 = iadd ssa_217, ssa_222 vec1 32 ssa_224 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_225 = intrinsic load_deref (ssa_224) (0) /* access=0 */ vec4 32 ssa_226 = vec4 ssa_225.x, ssa_223, ssa_225.z, ssa_225.w vec1 32 ssa_227 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_227, intrinsic load_deref (ssa_24) (0) /* access=0 */ vec1 32 ssa_3 = load_const (0x00000000 /* 0.000000 */) (shader_temp vec4) ssa_226) (15, 0) /* wrmask=xyzw */ /* access=0vec1 32 ssa_4 = deref_array &(ssa_11), intrinsic store_deref (ssa_204, ssa_203) (15, 0) /* wrmask=xyz*ssa_2)[0] (function_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_5 = vec4 32 ssa_26 = vec4 intrinsic load_deref (ssa_4) (0) /* access=0 */ vec4 32 ssa_6 = vec4 ssa_1 */ ssa_15.y, ssa_15.z, ssa_15.w ssa_21. xw */ /* access=0 */ vec1 32 ssa_205 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_206 = intrinsic load_deref (ssa_205) (0) /* access=0 */ vec3 32 ssa_207 = vec3 , ssa_5vec1 32 ssa_1942 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1943 = load_const (0x00000000 /* 0.000000 */) . , ssa_206 intrinsic store_deref (ssa_14, ssa_21vec4 32 ssa_1944.yvec1 32 ssa_228 = deref_var &r0 = vec4 ssa_1940, ssa_1941, ssa_1942, ssa_1943 vec1 32 ssa_23 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1945 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1946 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_1947 = vec2 ssa_1945, ssa_1946 vec1 32 ssa_25 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_26 = load_const (true) vec1 1 ssa_27 = load_const (true) vec1 32 ssa_28 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_29 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1948 = load_const (0x3c010204 /* 0.007874 */) vec1 32 ssa_1949 = load_const (0x3c010204 /* 0.007874 */) vec1 32 ssa_1950 = load_const (0x3c010204 /* 0.007874 */) vec3 32 ssa_1951 = vec3 ssa_1948, ssa_1949, ssa_1950 vec1 32 ssa_1952 = load_const (0xc2fe0000 /* -127.000000 */) vec1 32 ssa_1953 = load_const (0xc2fe0000 /* -127.000000 */) vec1 32 ssa_1954 = load_const (0xc2fe0000 /* -127.000000 */) vec3 32 ssa_1955 = vec3 ssa_1952, ssa_1953, ssa_1954 vec1 32 ssa_1956 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_1957 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_1958 = load_const (0x000000ff /* 0.000000 */) vec3 32 ssa_1959 = vec3 ssa_1956, ssa_1957, ssa_1958 vec1 32 ssa_1960 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1961 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_1962 = vec2 ssa_1960, ssa_1961 vec1 32 ssa_34 = load_const (0xffffffff /* -nan */) vec1 32 ssa_35 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_36 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_37 = load_const (0xffffffff /* -nan */) vec1 32 ssa_38 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_39 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_40 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1963 = load_const (0x3c010204 /* 0.007874 */) vec1 32 ssa_1964 = load_const (0x3c010204 /* 0.007874 */) vec1 32 ssa_1965 = load_const (0x3c010204 /* 0.007874 */) vec3 32 ssa_1966 = vec3 ssa_1963, ssa_1964, ssa_1965 vec1 32 ssa_1967 = load_const (0xc2fe0000 /* -127.000000 */) vec1 32 ssa_1968 = load_const (0xc2fe0000 /* -127.000000 */) vec1 32 ssa_1969 = load_const (0xc2fe0000 /* -127.000000 */) vec3 32 ssa_1970 = vec3 ssa_1967, ssa_1968, ssa_1969 vec1 32 ssa_1971 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_1972 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_1973 = load_const (0x000000ff /* 0.000000 */) vec3 32 ssa_1974 = vec3 ssa_1971, ssa_1972, ssa_1973 vec1 32 ssa_1975 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1976 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_1977 = vec2 ssa_1975, ssa_1976 vec1 32 ssa_45 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_46 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_47 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_48 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_49 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_50 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_51 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_52 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_53 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_1978 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_1979 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_1980 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_1981 = load_const (0x3b808081 /* 0.003922 */) vec4 32 ssa_1982 = vec4 ssa_1978, ssa_1979, ssa_1980, ssa_1981 vec1 32 ssa_1983 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_1984 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_1985 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_1986 = load_const (0x000000ff /* 0.000000 */) vec4 32 ssa_1987 = vec4 ssa_1983, ssa_1984, ssa_1985, ssa_1986 vec1 32 ssa_1988 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1989 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_1990 = load_const (0x00000018 /* 0.000000 */) vec3 32 ssa_1991 = vec3 ssa_1988, ssa_1989, ssa_1990 vec1 32 ssa_57 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_58 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_59 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_60 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1992 = load_const (0x3a000080 /* 0.000488 */) vec1 32 ssa_1993 = load_const (0x3a000080 /* 0.000488 */) vec2 32 ssa_1994 = vec2 ssa_1992, ssa_1993 vec1 32 ssa_1995 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_1996 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_1997 = vec2 ssa_1995, ssa_1996 vec1 32 ssa_1998 = load_const (0x3a000080 /* 0.000488 */) vec1 32 ssa_1999 = load_const (0x3a000080 /* 0.000488 */) vec2 32 ssa_2000 = vec2 ssa_1998, ssa_1999 vec1 32 ssa_2001 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2002 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_2003 = vec2 ssa_2001, ssa_2002 vec1 32 ssa_2004 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2005 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_2006 = vec2 ssa_2004, ssa_2005 vec1 32 ssa_66 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_67 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_68 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_69 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_70 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_71 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_72 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_73 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_74 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_75 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_76 = load_const (true) vec1 1 ssa_77 = load_const (true) vec1 1 ssa_78 = load_const (true) vec1 32 ssa_2007 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2008 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_2009 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2010 = load_const (0x00000002 /* 0.000000 */) vec4 32 ssa_2011 = vec4 ssa_2007, ssa_2008, ssa_2009, ssa_2010 vec1 1 ssa_80 = load_const (true) vec1 32 ssa_2012 = load_const (0x38000000 /* 0.000031 */) vec1 32 ssa_2013 = load_const (0x38000000 /* 0.000031 */) vec1 32 ssa_2014 = load_const (0x38000000 /* 0.000031 */) vec3 32 ssa_2015 = vec3 ssa_2012, ssa_2013, ssa_2014 vec1 32 ssa_2016 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2017 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2018 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2019 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_2020 = vec4 ssa_2016, ssa_2017, ssa_2018, ssa_2019 vec1 32 ssa_2021 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2022 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_2023 = vec2 ssa_2021, ssa_2022 vec1 32 ssa_84 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_85 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_86 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_87 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2024 = load_const (0x00000005 /* 0.000000 */) vec1 32 ssa_2025 = load_const (0x00000006 /* 0.000000 */) vec2 32 ssa_2026 = vec2 ssa_2024, ssa_2025 vec1 32 ssa_2027 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2028 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_2029 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_2030 = load_const (0x00000004 /* 0.000000 */) vec4 32 ssa_2031 = vec4 ssa_2027, ssa_2028, ssa_2029, ssa_2030 vec1 32 ssa_90 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_91 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_92 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_93 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_94 = load_const (0x00000007 /* 0.000000 */) vec1 1 ssa_95 = load_const (true) vec1 1 ssa_96 = load_const (true) vec1 32 ssa_97 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_98 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_99 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_100 = deref_var &shader_in (function_temp Z) vec1 32 ssa_101 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_102 = deref_array &(*ssa_100)[0] (function_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_103 = intrinsic load_deref (ssa_102) (0) /* access=0 */ vec1 32 ssa_104 = imov ssa_103.x vec1 32 ssa_105 = ishl ssa_104, ssa_99 vec1 32 ssa_106 = deref_var &r0 (function_temp vec4) vec4 32 ssa_107 = intrinsic load_deref (ssa_106) (0) /* access=0 */ vec4 32 ssa_108 = vec4 ssa_105, ssa_107.y, ssa_107.z, ssa_107.w vec1 32 ssa_109 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_109, ssa_108) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_110 = deref_var &r0 (function_temp vec4) vec4 32 ssa_111 = intrinsic load_deref (ssa_110) (0) /* access=0 */ vec1 32 ssa_112 = imov ssa_111.x vec1 32 ssa_113 = ushr ssa_112, ssa_98 vec1 32 ssa_114 = deref_var &r0 (function_temp vec4) vec4 32 ssa_115 = intrinsic load_deref (ssa_114) (0) /* access=0 */ vec4 32 ssa_116 = vec4 ssa_113, ssa_115.y, ssa_115.z, ssa_115.w vec1 32 ssa_117 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_117, ssa_116) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_118 = deref_var &r0 (function_temp vec4) vec4 32 ssa_119 = intrinsic load_deref (ssa_118) (0) /* access=0 */ vec1 32 ssa_120 = imov ssa_119.x vec1 32 ssa_121 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_122 = intrinsic vulkan_resource_index (ssa_121) (0, 1, 6) /* desc-set=0 */ /* binding=1 */ /* desc_type=UBO */ vec1 32 ssa_123 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_124 = load_const (0x00000130 /* 0.000000 */) vec1 32 ssa_125 = iadd ssa_123, ssa_124 ssa_16vec1 32 ssa_126 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_127 = iadd ssa_125, ssa_126 vec1 32 ssa_128 = intrinsic load_ubo (ssa_122, ssa_127) (4, 0) /* align_mul=4 */ /* (shader_temp vec4) , ssa_206.y, ssa_206.y vec1 32 ssa_208 = deref_var &vec4 32 ssa_229 = intrinsic load_deref (ssa_228) (0) /*y, ssa_5.z, ssa_5.w access=0.y, ssa_25.z, ssa_25.w r1intrinsic store_deref () ( */ intrinsic store_deref (ssa_24, ssa_26) (15, 0)ssa_415, 0) /* wrmask=xyzw */ /* access=0 */ vec4 32 ssa_17 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_18 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_19 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_20 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_21 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_22 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_23 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_24 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_25 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_26 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_27 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_28 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_29 = load_const (true) vec1 1 ssa_30 = load_const (true) vec3 32 ssa_31 = load_const (0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */) vec3 32 ssa_32 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_33 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_34 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_35 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_36 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_37 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_38 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_39 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_40 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */) vec2 32 ssa_41 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */) vec4 32 ssa_42 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec4 32 ssa_43 = load_const (0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */) vec4 32 ssa_44 = load_const (0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */) vec4 32 ssa_45 = load_const (0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */, 0x000000ff /* 0.000000 */) vec3 32 ssa_46 = load_const (0x00000008 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000018 /* 0.000000 */) vec1 32 ssa_47 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_48 = load_const (0x3a000080 /* 0.000488 */, 0x3a000080 /* 0.000488 */) vec2 32 ssa_49 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_50 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_51 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_52 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_53 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_54 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_55 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_56 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_57 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_58 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_59 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_60 = load_const (0x3a000080 /* 0.000488 */, 0x3a000080 /* 0.000488 */) vec2 32 ssa_61 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_62 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_63 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_64 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_65 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_66 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_67 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_68 = load_const (true) vec1 1 ssa_69 = load_const (true) vec1 1 ssa_70 = load_const (true) vec4 32 ssa_71 = load_const (0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */) vec1 1 ssa_72 = load_const (true) vec1 32 ssa_73 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_74 = load_const (true) vec1 32 ssa_75 = load_const (0x00000004 /* 0.000000 */) vec3 32 ssa_76 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_77 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec3 32 ssa_78 = load_const (0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */) vec3 32 ssa_79 = load_const (0xffffffff /* -nan */, 0xffffffff /* -nan */, 0xffffffff /* -nan */) vec3 32 ssa_80 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_81 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_82 = load_const (0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000004 /* 0.000000 */) vec1 32 ssa_83 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_84 = load_const (0x3f7fff00 /* 0.999985 */) vec3 32 ssa_85 = load_const (0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */, 0x38000080 /* 0.000031 */) vec4 32 ssa_86 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec2 32 ssa_87 = load_const (0x00000010 /* 0.000000 */, 0x00000010 /* 0.000000 */) vec1 32 ssa_88 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_89 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_90 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_91 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_92 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_93 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_94 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_95 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_96 = load_const (0x00000001 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */) vec1 32 ssa_97 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_98 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_99 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_100 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_101 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_102 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_103 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_104 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_105 = load_const (0x00000001 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000002 /* 0.000000 */) vec1 1 ssa_106 = load_const (true) vec1 1 ssa_107 = load_const (true) vec1 1 ssa_108 = load_const (true) vec1 32 ssa_109 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_110 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_111 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_112 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_113 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_114 = deref_array &(*ssa_112)[0] (shader_temp vec4) /* &shader_in[0] */ vec4 32 ssa_115 = intrinsic load_deref (ssa_114) (0) /* access=0 */ vec1 32 ssa_116 = imov ssa_115.x vec1 32 ssa_117 = ishl ssa_116, ssa_111 vec1 32 ssa_118 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_119 = intrinsic load_deref (ssa_118) (0) /* access=0 */ vec4 32 ssa_120 = vec4 ssa_117, ssa_119.y, ssa_119.z, ssa_119.w vec1 32 ssa_121 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_121, ssa_120) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_122 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_123 = intrinsic load_deref (ssa_122) (0) /* access=0 */ vec1 32 ssa_124 = imov ssa_123.x vec1 32 ssa_125 = ushr ssa_124, ssa_110 vec1 32 ssa_126 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_127 = intrinsic load_deref (ssa_126) (0) /* access=0 */ vec4 32 ssa_128 = vec4 ssa_125, ssa_127.y, ssa_127.z, ssa_127.w vec1 32 ssa_129 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_129, ssa_128) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_130 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_131 = intrinsic load_deref (ssa_130) (0) /* access=0 */ vec1 32 ssa_132 = imov ssa_131.x vec1 32 ssa_133 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_134 = intrinsic vulkan_resource_index (ssa_133) (0, 1, 6) /* desc-set=0 */ /* binding=1 */ /* desc_type=UBO */ vec1 32 ssa_135 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_136 = load_const (0x00000130 /* 0.000000 */) vec1 32 ssa_137 = iadd ssa_135, ssa_136 vec1 32 ssa_138 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_139 = iadd ssa_137, ssa_138 vec1 32 ssa_140 = intrinsic load_ubo (ssa_134, ssa_139) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_141 = iadd ssa_132, ssa_140 vec1 32 ssa_142 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_143 = intrinsic load_deref (ssa_142) (0) /* access=0 */ vec4 32 ssa_144 = vec4 ssa_141, ssa_143.y, ssa_143.z, ssa_143.w vec1 32 ssa_145 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_145, ssa_144) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_146 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_147 = intrinsic load_deref (ssa_146) (0) /* access=0 */ vec1 32 ssa_148 = imov ssa_147.x vec1 32 ssa_149 = iadd ssa_148, ssa_109 vec1 32 ssa_150 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_151 = intrinsic load_deref (ssa_150) (0) /* access=0 */ vec4 32 ssa_152 = vec4 ssa_151.x, ssa_149, ssa_151.z, ssa_151.w vec1 32 ssa_153 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_153, ssa_152) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_154 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_155 = intrinsic load_deref (ssa_154) (0) /* access=0 */ vec4 32 ssa_156 = vec4 ssa_155.x, ssa_155.x, ssa_155.x, ssa_155.x vec1 32 ssa_157 = imov ssa_156.x /* succs: block_1 block_2 */ if ssa_108 { block block_1: /* preds: block_0 */ vec1 32 ssa_158 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_159 = txf ssa_158 (texture_deref), ssa_157 (coord), 0 (sampler), vec2 32 ssa_160 = vec2 ssa_159.y, ssa_159.z vec1 32 ssa_161 = deref_var &phi (function_temp vec2) intrinsic store_deref (ssa_161, ssa_160) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ vec1 32 ssa_162 = deref_var &phi (function_temp vec2) intrinsic store_deref (ssa_162, ssa_26) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ vec1 32 ssa_163 = deref_var &phi (function_temp vec2) vec2 32 ssa_164 = intrinsic load_deref (ssa_163) (0) /* access=0 */ vec1 32 ssa_165 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_166 = intrinsic load_deref (ssa_165) (0) /* access=0 */ vec4 32 ssa_167 = vec4 ssa_164.x, ssa_166.y, ssa_164.y, ssa_166.w vec1 32 ssa_168 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_168, ssa_167) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_169 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_170 = intrinsic load_deref (ssa_169) (0) /* access=0 */ vec4 32 ssa_171 = vec4 ssa_170.y, ssa_170.y, ssa_170.y, ssa_170.y vec1 32 ssa_172 = imov ssa_171.x /* succs: block_4 block_5 */ if ssa_107 { block block_4: /* preds: block_3 */ vec1 32 ssa_173 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_174 = txf ssa_173 (texture_deref), ssa_172 (coord), 0 (sampler), vec1 32 ssa_175 = imov ssa_174.w vec1 32 ssa_176 = deref_var &phi@0 (function_temp float) intrinsic store_deref (ssa_176, ssa_175) (1, 0) /* wrmask=x */ /* access=0 */ /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ vec1 32 ssa_177 = deref_var &phi@0 (function_temp float) intrinsic store_deref (ssa_177, ssa_25) (1, 0) /* wrmask=x */ /* access=0 */ /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ vec1 32 ssa_178 = deref_var &phi@0 (function_temp float) vec1 32 ssa_179 = intrinsic load_deref (ssa_178) (0) /* access=0 */ vec1 32 ssa_180 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_181 = intrinsic load_deref (ssa_180) (0) /* access=0 */ vec4 32 ssa_182 = vec4 ssa_181.x, ssa_179, ssa_181.z, ssa_181.w vec1 32 ssa_183 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_183, ssa_182) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_184 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_185 = intrinsic load_deref (ssa_184) (0) /* access=0 */ vec4 32 ssa_186 = vec4 ssa_185.y, ssa_185.y, ssa_185.y, ssa_185.y vec1 32 ssa_187 = imov ssa_186.x /* succs: block_7 block_8 */ if ssa_106 { block block_7: /* preds: block_6 */ vec1 32 ssa_188 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_189 = txf ssa_188 (texture_deref), ssa_187 (coord), 0 (sampler), vec1 32 ssa_190 = deref_var &phi@1 (function_temp vec4) intrinsic store_deref (ssa_190, ssa_189) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ vec1 32 ssa_191 = deref_var &phi@1 (function_temp vec4) intrinsic store_deref (ssa_191, ssa_24) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ vec1 32 ssa_192 = deref_var &phi@1 (function_temp vec4) vec4 32 ssa_193 = intrinsic load_deref (ssa_192) (0) /* access=0 */ vec1 32 ssa_194 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_194, ssa_193) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_195 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_196 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_197 = deref_array &(*ssa_195)[1] (shader_temp vec4) /* &shader_in[1] */ vec4 32 ssa_198 = intrinsic load_deref (ssa_197) (0) /* access=0 */ vec4 32 ssa_199 = vec4 ssa_198.x, ssa_198.x, ssa_198.x, ssa_198.x vec4 32 ssa_200 = ishl ssa_199, ssa_105 vec1 32 ssa_201 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_201, ssa_200) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_202 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_203 = intrinsic load_deref (ssa_202) (0) /* access=0 */ vec4 32 ssa_204 = vec4 ssa_203.z, ssa_203.w, ssa_203.x, ssa_203.x vec1 32 ssa_205 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_206 = intrinsic load_deref (ssa_205) (0) /* access=0 */ vec4 32 ssa_207 = iadd ssa_204, ssa_206 vec1 32 ssa_208 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_208, ssa_207) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_209 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_210 = intrinsic load_deref (ssa_209) (0) /* access=0 */ vec1 32 ssa_211 = imov ssa_210.y vec1 32 ssa_212 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_213 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_214 = deref_array &(*ssa_212)[1] (shader_temp vec4) /* &shader_in[1] */ vec4 32 ssa_215 = intrinsic load_deref (ssa_214) (0) /* access=0 */ vec1 32 ssa_216 = imov ssa_215.x vec1 32 ssa_217 = iadd ssa_211, ssa_216 vec1 32 ssa_218 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_219 = intrinsic load_deref (ssa_218) (0) /* access=0 */ vec4 32 ssa_220 = vec4 ssa_219.x, ssa_217, ssa_219.z, ssa_219.w vec1 32 ssa_221 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_221, ssa_220) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_222 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_223 = intrinsic load_deref (ssa_222) (0) /* access=0 */ vec1 32 ssa_224 = imov ssa_223.y vec1 32 ssa_225 = ushr ssa_104, ssa_103 vec1 32 ssa_226 = imul ssa_224, ssa_102 vec1 32 ssa_227 = iadd ssa_226, ssa_225 vec1 32 ssa_228 = iadd ssa_227, ssa_101 vec1 32 ssa_229 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_230 = intrinsic vulkan_resource_index (ssa_229) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_231 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_232 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_233 = ishl ssa_228, ssa_232 vec1 32 ssa_234 = iadd ssa_231, ssa_233 vec1 32 ssa_235 = intrinsic load_ssbo (ssa_230, ssa_234) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_236 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_237 = intrinsic load_deref (ssa_236) (0) /* access=0 */ vec4 32 ssa_238 = vec4 ssa_235, ssa_237.y, ssa_237.z, ssa_237.w vec1 32 ssa_239 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_239, ssa_238) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_240 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_241 = intrinsic load_deref (ssa_240) (0) /* access=0 */ vec1 32 ssa_242 = imov ssa_241.x vec1 32 ssa_243 = ushr ssa_100, ssa_99 vec1 32 ssa_244 = imul ssa_242, ssa_98 vec1 32 ssa_245 = iadd ssa_244, ssa_243 vec1 32 ssa_246 = iadd ssa_245, ssa_97 vec1 32 ssa_247 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_248 = intrinsic vulkan_resource_index (ssa_247) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_249 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_250 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_251 = ishl ssa_246, ssa_250 vec1 32 ssa_252 = iadd ssa_249, ssa_251 vec1 32 ssa_253 = intrinsic load_ssbo (ssa_248, ssa_252) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_254 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_255 = intrinsic load_deref (ssa_254) (0) /* access=0 */ vec4 32 ssa_256 = vec4 ssa_255.x, ssa_253, ssa_255.z, ssa_255.w vec1 32 ssa_257 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_257, ssa_256) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_258 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_259 = intrinsic load_deref (ssa_258) (0) /* access=0 */ vec4 32 ssa_260 = iadd ssa_259, ssa_96 vec1 32 ssa_261 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_261, ssa_260) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_262 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_263 = intrinsic load_deref (ssa_262) (0) /* access=0 */ vec1 32 ssa_264 = imov ssa_263.y vec1 32 ssa_265 = ushr ssa_95, ssa_94 vec1 32 ssa_266 = imul ssa_264, ssa_93 vec1 32 ssa_267 = iadd ssa_266, ssa_265 vec1 32 ssa_268 = iadd ssa_267, ssa_92 vec1 32 ssa_269 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_270 = intrinsic vulkan_resource_index (ssa_269) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_271 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_272 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_273 = ishl ssa_268, ssa_272 vec1 32 ssa_274 = iadd ssa_271, ssa_273 vec1 32 ssa_275 = intrinsic load_ssbo (ssa_270, ssa_274) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_276 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_277 = intrinsic load_deref (ssa_276) (0) /* access=0 */ vec4 32 ssa_278 = vec4 ssa_277.x, ssa_275, ssa_277.z, ssa_277.w vec1 32 ssa_279 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_279, ssa_278) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_280 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_281 = intrinsic load_deref (ssa_280) (0) /* access=0 */ vec1 32 ssa_282 = imov ssa_281.x vec1 32 ssa_283 = ushr ssa_91, ssa_90 vec1 32 ssa_284 = imul ssa_282, ssa_89 vec1 32 ssa_285 = iadd ssa_284, ssa_283 vec1 32 ssa_286 = iadd ssa_285, ssa_88 vec1 32 ssa_287 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_288 = intrinsic vulkan_resource_index (ssa_287 (shader_temp vec4) vec4 32 ssa_209 = intrinsic load_deref (ssa_208) (0) align_offset=0 */ vec1 32 ssa_129 = iadd ssa_120, ssa_128 vec1 32 ssa_130 = deref_var &r0 (function_temp vec4) vec4 32 ssa_131 = intrinsic load_deref (ssa_130) (0) /* access=0 */ vec4 32 ssa_132 = vec4 ssa_129, ssa_131. vec1 32 ssa_230 = imov ssa_229.y ) ( vec1 32 ssa_2310, 4y, ssa_131.z, ssa_131.w vec1 32 ssa_133 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_133, ssa_132) (15, 0) /* wrmask= /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1771 = intrinsic load_base_instance () () vec1 32 ssa_1772 = intrinsic load_base_instance () () vec1 32 ssa_1773 = intrinsic load_instance_id () () vec1 32 ssa_1774 = iadd ssa_1773, ssa_1772 vec1 32 ssa_31 = isub ssa_1774, ssa_1771 vec1 32 ssa_32 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_33 = load_const (0x00000006 /* 0.000000 */) vec1 32 ssa_34 = deref_array &(*ssa_32)[6] (shader_temp vec4) /* &shader_in[6] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_35 = intrinsic load_deref (ssa_34) (0) /* access=0 */ vec4 32 ssa_36 = vec4 ssa_31, ssa_35.y, ssa_35.z, ssa_35.w intrinsic store_deref (ssa_34, ssa_36) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec4 32 ssa_37 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_38 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_39 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_40 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_41 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec4 32 ssa_42 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */ /*, , ssa_6) (15, 0) /*xyzw = ushr access=0 */ vec3 32 ssa_210 = vec3 ssa_209.z, ssa_209. */ /* access=0 */ vec1 32 ssa_134 = deref_var &r0ssa_110, 0x00000000 /* 0.000000 */) vec2 32 ssa_43 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_44 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_45 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_46 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_47 = load_const (true) vec1 1 ssa_48 = load_const (true) vec1 32 ssa_49 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_50 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_51 = load_const (0x3f800000 /* 1.000000 */) vec3 32 ssa_52 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_53 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec3 32 ssa_54 = load_const (0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */) vec3 32 ssa_55 = load_const (0xffffffff /* -nan */, 0xffffffff /* -nan */, 0xffffffff /* -nan */) vec3 32 ssa_56 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_57 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec3 32 ssa_58 = load_const (0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000004 /* 0.000000 */) vec1 32 ssa_59 = load_const (0x46fffe00 /* 32767.000000 */) vec1 32 ssa_60 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_61 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */) vec2 32 ssa_62 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */) vec4 32 ssa_63 = load_const (0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */, 0x3f800000 /* 1.000000 */) vec4 32 ssa_64 = load_const (0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */, 0xbf800000 /* -1.000000 */) vec4 32 ssa_65 = load_const (0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */, 0x3c010204 /* 0.007874 */) vec1 32 ssa_66 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_67 = load_const (0x3f800000 /* 1.000000 */) vec2 32 ssa_68 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec2 32 ssa_69 = load_const (0x41800000 /* 16.000000 */, 0x41800000 /* 16.000000 */) vec1 32 ssa_70 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_71 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_72 = load_const (true) vec1 1 ssa_73 = load_const (true) vec1 1 ssa_74 = load_const (true) vec4 32 ssa_75 = load_const (0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */, 0x00000001 /* 0.000000 */, 0x00000002 /* 0.000000 */) vec1 1 ssa_76 = load_const (true) vec1 1 ssa_77 = load_const (true) vec1 32 ssa_78 = load_const (0x00000004 /* 0.000000 */) vec1 1 ssa_79 = load_const (true) vec1 32 ssa_80 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_81 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_82 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_83 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_84 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_85 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_86 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_87 = load_const (0x00000006 /* 0.000000 */) vec1 32 ssa_88 = deref_array &(*ssa_86)[6] (shader_temp vec4) /* &shader_in[6] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_89 = intrinsic load_deref (ssa_88) (0) /* access=0 */ vec1 32 ssa_90 = imov ssa_89.x vec1 32 ssa_91 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_92 = intrinsic vulkan_resource_index (ssa_91) (0, 1, 6) /* desc-set=0 */ /* binding=1 */ /* desc_type=UBO */ vec1 32 ssa_93 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_94 = load_const (0x00000130 /* 0.000000 */) vec1 32 ssa_95 = iadd ssa_93, ssa_94 vec1 32 ssa_96 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_97 = iadd ssa_95, ssa_96 vec1 32 ssa_98 = intrinsic load_ubo (ssa_92, ssa_97) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_99 = iadd ssa_90, ssa_98 vec1 32 ssa_100 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_101 = intrinsic load_deref (ssa_100) (0) /* access=0 */ vec4 32 ssa_102 = vec4 ssa_99, ssa_101.y, ssa_101.z, ssa_101.w vec1 32 ssa_103 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_103, ssa_102) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_104 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_105 = intrinsic load_deref (ssa_104) (0) /* access=0 */ vec1 32 ssa_106 = imov ssa_105.x vec1 32 ssa_107 = ushr ssa_85, ssa_84 vec1 32 ssa_108 = imul ssa_106, ssa_83 vec1 32 ssa_109 = iadd ssa_108, ssa_107 vec1 32 ssa_110 = iadd ssa_109, ssa_82 vec1 32 ssa_111 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_112 = intrinsic vulkan_resource_index (ssa_111) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_113 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_114 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_115 = ishl ssa_110, ssa_114 vec1 32 ssa_116 = iadd ssa_113, ssa_115 vec1 32 ssa_117 = intrinsic load_ssbo (ssa_112, ssa_116) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_118 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_119 = intrinsic load_deref (ssa_118) (0) /* access=0 */ vec4 32 ssa_120 = vec4 ssa_117, ssa_119.y, ssa_119.z, ssa_119.w vec1 32 ssa_121 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_121, ssa_120) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_122 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_123 = intrinsic load_deref (ssa_122) (0) /* access=0 */ vec1 32 ssa_124 = imov ssa_123.x vec1 32 ssa_125 = ishl ssa_124, ssa_81 vec1 32 ssa_126 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_127 = intrinsic load_deref (ssa_126) (0) /* access=0 */ vec4 32 ssa_128 = vec4 ssa_127.x, ssa_125, ssa_127.z, ssa_127.w vec1 32 ssa_129 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1290107:fixme:wbemprox:wbem_services_CreateInstanceEnum unsupported flags 0x00000030 , ssa_128) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_130 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_131 = intrinsic load_deref (ssa_130) (0) /* access=0 */ vec1 32 ssa_132 = imov ssa_131.x vec1 32 ssa_133 = deref_var &o8 (shader_out float) intrinsic store_deref (ssa_133, ssa_132) (1, 0) /* wrmask=x */ /* access=0 */ vec1 32 ssa_134 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_135 = intrinsic load_deref (ssa_134) (0) /* access=0 */ vec1 32 ssa_136 = imov ssa_135.y vec1 32 ssa_137 = ushr ssa_136, ssa_80 vec1 32 ssa_138 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_139 = intrinsic load_deref (ssa_138) (0) /* access=0 */ vec4 32 ssa_140 = vec4 ssa_137, ssa_139.y, ssa_139.z, ssa_139.w vec1 32 ssa_141 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_141, ssa_140) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_142 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_143 = intrinsic load_deref (ssa_142) (0) /* access=0 */ vec1 32 ssa_144 = imov ssa_143.x vec1 32 ssa_145 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_146 = intrinsic vulkan_resource_index (ssa_145) (0, 1, 6) /* desc-set=0 */ /* binding=1 */ /* desc_type=UBO */ vec1 32 ssa_147 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_148 = load_const (0x00000130 /* 0.000000 */) vec1 32 ssa_149 = iadd ssa_147, ssa_148 vec1 32 ssa_150 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_151 = iadd ssa_149, ssa_150 vec1 32 ssa_152 = intrinsic load_ubo (ssa_146, ssa_151) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_153 = iadd ssa_144, ssa_152 vec1 32 ssa_154 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_155 = intrinsic load_deref (ssa_154) (0) /* access=0 */ vec4 32 ssa_156 = vec4 ssa_153, ssa_155.y, ssa_155.z, ssa_155.w vec1 32 ssa_157 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_157, ssa_156) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_158 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_159 = intrinsic load_deref (ssa_158) (0) /* access=0 */ vec4 32 ssa_160 = vec4 ssa_159.x, ssa_159.x, ssa_159.x, ssa_159.x vec1 32 ssa_161 = imov ssa_160.x /* succs: block_1 block_2 */ if ssa_79 { block block_1: /* preds: block_0 */ vec1 32 ssa_162 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_163 = txf ssa_162 (texture_deref), ssa_161 (coord), 0 (sampler), vec2 32 ssa_164 = vec2 ssa_163.y, ssa_163.z vec1 32 ssa_165 = deref_var &phi (function_temp vec2) intrinsic store_deref (ssa_165, ssa_164) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ vec1 32 ssa_166 = deref_var &phi (function_temp vec2) intrinsic store_deref (ssa_166, ssa_44) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ vec1 32 ssa_167 = deref_var &phi (function_temp vec2) vec2 32 ssa_168w, ssa_209.x wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2169 = intrinsic load_first_vertex () () vec1 32 ssa_2170 = intrinsic load_first_vertex () () vec1 32 ssa_2171 = intrinsic load_vertex_id_zero_base () () vec1 32 ssa_2172 = iadd ssa_2171, ssa_2170 vec1 32 ssa_11 = isub ssa_2172, ssa_2169 vec1 32 ssa_12 = deref_var &shader_in (function_temp Z) vec1 32 ssa_13 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_14 = deref_array &(*ssa_12)[1] (function_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_15 = intrinsic load_deref (ssa_14) (0) /* access=0 */ vec3 32 ssa_211 = iadd ssa_207, ssa_210 vec1 32 ssa_212 = deref_var = (function_temp vec4) ssa_109 &r1 (shader_temp vec4) vec4 32 ssa_213 = intrinsic load_deref (ssa_212) (0) /* access=0 */ vec4 32 ssa_214 = vec4 ssa_211.x, ssa_213.y, ssa_211.y, ssa_211.z vec1 32 ssa_215 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_215, ssa_214) (15, 0) /* wrmask= vec1 32 ssa_232 = imul ssa_230, ssa_108 vec4 32 ssa_135 = intrinsic load_deref (ssa_134) (0) /* access=0 */ vec1 32 ssa_136 = imov ssa_135.x vec1 32 ssa_137 = iadd ssa_136, ssa_97 vec1 32 ssa_138 = deref_var &, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec4 32 ssa_16 = vec4 ssa_11, ssa_15.y, ssa_15.z, ssa_15.w intrinsic store_deref (ssa_14, ssa_16) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2229 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2230 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2231 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2232 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2233 = vec4 ssa_2229, ssa_2230, ssa_2231, ssa_2232 vec1 32 ssa_2234 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2235 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2236 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2237 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2238 = vec4 ssa_2234, ssa_2235, ssa_2236, ssa_2237 vec1 32 ssa_2239 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2240 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2241 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2242 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2243 = vec4 ssa_2239, ssa_2240, ssa_2241, ssa_2242 vec1 32 ssa_2244 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2245 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2246 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2247 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2248 = vec4 ssa_2244, ssa_2245, ssa_2246, ssa_2247 vec1 32 ssa_2249 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2250 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2251 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2252 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2253 = vec4 ssa_2249, ssa_2250, ssa_2251, ssa_2252 vec1 32 ssa_2254 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2255 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2256 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2257 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2258 = vec4 ssa_2254, ssa_2255, ssa_2256, ssa_2257 vec1 32 ssa_2259 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2260 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_2261 = vec2 ssa_2259, ssa_2260 vec1 32 ssa_2262 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2263 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2264 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2265 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2266 = vec4 ssa_2262, ssa_2263, ssa_2264, ssa_2265 vec1 32 ssa_25 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2267 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2268 = load_const (0x00000000 /* 0.000000 */) vec2 32 ssa_2269 = vec2 ssa_2267, ssa_2268 vec1 32 ssa_27 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_28 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_29 = load_const (true) vec1 1 ssa_30 = load_const (true) vec1 32 ssa_2270 = load_const (0x38000080 /* 0.000031 */) vec1 32 ssa_2271 = load_const (0x38000080 /* 0.000031 */) vec1 32 ssa_2272 = load_const (0x38000080 /* 0.000031 */) vec3 32 ssa_2273 = vec3 ssa_2270, ssa_2271, ssa_2272 vec1 32 ssa_2274 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2275 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2276 = load_const (0x00000010 /* 0.000000 */) vec3 32 ssa_2277 = vec3 ssa_2274, ssa_2275, ssa_2276 vec1 32 ssa_33 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2278 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2279 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2280 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2281 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2282 = vec4 ssa_2278, ssa_2279, ssa_2280, ssa_2281 vec1 32 ssa_35 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_36 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_37 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_38 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_39 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_2283 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_2284 = load_const (0x3f000000 /* 0.500000 */) vec2 32 ssa_2285 = vec2 ssa_2283, ssa_2284 vec1 32 ssa_2286 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_2287 = load_const (0x3f000000 /* 0.500000 */) vec2 32 ssa_2288 = vec2 ssa_2286, ssa_2287 vec1 32 ssa_2289 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_2290 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_2291 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_2292 = load_const (0x3f800000 /* 1.000000 */) vec4 32 ssa_2293 = vec4 ssa_2289, ssa_2290, ssa_2291, ssa_2292 vec1 32 ssa_2294 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_2295 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_2296 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_2297 = load_const (0xbf800000 /* -1.000000 */) vec4 32 ssa_2298 = vec4 ssa_2294, ssa_2295, ssa_2296, ssa_2297 vec1 32 ssa_2299 = load_const (0x3c010204 /* 0.007874 */) vec1 32 ssa_2300 = load_const (0x3c010204 /* 0.007874 */) vec1 32 ssa_2301 = load_const (0x3c010204 /* 0.007874 */) vec1 32 ssa_2302 = load_const (0x3c010204 /* 0.007874 */) vec4 32 ssa_2303 = vec4 ssa_2299, ssa_2300, ssa_2301, ssa_2302 vec1 32 ssa_2304 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_2305 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_2306 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_2307 = load_const (0x000000ff /* 0.000000 */) vec4 32 ssa_2308 = vec4 ssa_2304, ssa_2305, ssa_2306, ssa_2307 vec1 32 ssa_2309 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2310 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2311 = load_const (0x00000018 /* 0.000000 */) vec3 32 ssa_2312 = vec3 ssa_2309, ssa_2310, ssa_2311 vec1 32 ssa_47 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_2313 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_2314 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_2315 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_2316 = load_const (0x3b808081 /* 0.003922 */) vec4 32 ssa_2317 = vec4 ssa_2313, ssa_2314, ssa_2315, ssa_2316 vec1 32 ssa_2318 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_2319 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_2320 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_2321 = load_const (0x000000ff /* 0.000000 */) vec4 32 ssa_2322 = vec4 ssa_2318, ssa_2319, ssa_2320, ssa_2321 vec1 32 ssa_2323 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2324 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2325 = load_const (0x00000018 /* 0.000000 */) vec3 32 ssa_2326 = vec3 ssa_2323, ssa_2324, ssa_2325 vec1 32 ssa_51 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_52 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_53 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_54 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_55 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_56 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_57 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_58 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_59 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_2327 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_2328 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_2329 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_2330 = load_const (0x3b808081 /* 0.003922 */) vec4 32 ssa_2331 = vec4 ssa_2327, ssa_2328, ssa_2329, ssa_2330 vec1 32 ssa_2332 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_2333 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_2334 = load_const (0x000000ff /* 0.000000 */) vec1 32 ssa_2335 = load_const (0x000000ff /* 0.000000 */) vec4 32 ssa_2336 = vec4 ssa_2332, ssa_2333, ssa_2334, ssa_2335 vec1 32 ssa_2337 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2338 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2339 = load_const (0x00000018 /* 0.000000 */) vec3 32 ssa_2340 = vec3 ssa_2337, ssa_2338, ssa_2339 vec1 32 ssa_63 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_64 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_65 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_66 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2341 = load_const (0x3a000080 /* 0.000488 */) vec1 32 ssa_2342 = load_const (0x3a000080 /* 0.000488 */) vec2 32 ssa_2343 = vec2 ssa_2341, ssa_2342 vec1 32 ssa_2344 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2345 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_2346 = vec2 ssa_2344, ssa_2345 vec1 32 ssa_69 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_70 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_71 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_72 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_73 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_74 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_75 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_76 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_77 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_78 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2347 = load_const (0x3a000080 /* 0.000488 */) vec1 32 ssa_2348 = load_const (0x3a000080 /* 0.000488 */) vec2 32 ssa_2349 = vec2 ssa_2347, ssa_2348 vec1 32 ssa_2350 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2351 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_2352 = vec2 ssa_2350, ssa_2351 vec1 32 ssa_81 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_82 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_83 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_84 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_85 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_86 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_87 = load_const (0x3f800000 /* 1.000000 */) vec1 1 ssa_88 = load_const (true) vec1 1 ssa_89 = load_const (true) vec1 1 ssa_90 = load_const (true) vec1 32 ssa_2353 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2354 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_2355 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2356 = load_const (0x00000002 /* 0.000000 */) vec4 32 ssa_2357 = vec4 ssa_2353, ssa_2354, ssa_2355, ssa_2356 vec1 1 ssa_92 = load_const (true) vec1 1 ssa_93 = load_const (true) vec1 32 ssa_94 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_2358 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2359 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2360 = load_const (0x00000000 /* 0.000000 */) vec3 32 ssa_2361 = vec3 ssa_2358, ssa_2359, ssa_2360 vec1 32 ssa_2362 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_2363 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_289 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2364 = load_const (0x3f800000 /* 1.000000 */) vec3 32 ssa_2365 = vec3 ssa_2362, ssa_2363, ssa_2364 vec1 32 ssa_2366 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_2367 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_290 vec1 32 ssa_233 = iadd ssa_232, ssa_231 = load_const (0x00000002 /* 0.000000 */x vec1 32 ssa_2368 = load_const (0xbf800000 /* -1.000000 */) vec3 32 ssa_2369 = vec3 ssa_2366, ssa_2367, ssa_2368 vec1 32 ssa_2370 = load_const (0xffffffff /* -nan */) vec1 32 ssa_2371 = load_const (0xffffffff /* -nan */) vec1 32 ssa_2372 = load_const (0xffffffff /* -nan */)) vec1 32 ssa_291r0 (function_temp vec4) vec4 32 ssa_139 = vec1 32 ssa_234 = iadd ssa_233 = ishl ssa_286, yvec3 32 ssa_2373 = vec3 ssa_2370, ssa_2371, ssa_2372 vec1 32 ssa_2374 = load_const (ssa_290 zw */ /*0x00000000 /* 0.000000 */) vec1 32 ssa_292 = iadd ssa_289, ssa_291 access=0 vec1 32 ssa_293 = intrinsic load_ssbo ( vec1 32 ssa_2375 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2376 = load_const (0x00000000 /* 0.000000 */) ssa_288, ssa_292) (0 */intrinsic load_deref (ssa_167) (0) /* access=0, 4, vec3 32 ssa_2377 = vec3 ssa_2374, 0)intrinsic load_deref ( */ ssa_2375, ssa_2376 /* access=0 */ /* align_mul=4 */ /* align_offset=0, vec1 32 ssa_2378 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2379 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2380 = load_const (0x00000000 /* 0.000000 */) vec3 32 ssa_2381 */ vec1 32 ssa_294 = deref_var & vec1 32 ssa_169 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_170r3 (shader_temp vec4) vec1 32 ssa_216 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_217 = intrinsic load_deref (ssa_216) (0) vec4 32 ssa_295 = intrinsic load_deref (ssa_294) (0) /* access=0 */ /*ssa_107 access=0ssa_138) (0 vec4 32 ssa_296 = vec4 ssa_295.x = vec3 ssa_2378, ssa_2379, ssa_2380 vec1 32 ssa_2382 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2383 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_2384 = load_const (0x00000004 /* 0.000000 */) vec3 32 ssa_2385, ssa_295.y */ = vec3 ssa_2382, ssa_2383, ssa_2384 , ssa_295.z, ssa_293 vec1 32 ssa_297 = deref_var & = intrinsic load_deref (ssa_169) (0) /* access=0 */ vec4 32 ssa_171 = vec4 ssa_168.x, ssa_168.y, ssa_170.z, ssa_170.w vec1 32 ssa_172 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_172, ssa_171) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_173 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_174 = intrinsic load_deref (ssa_173) (0) /* access=0 */ vec1 32 ssa_175 = imov ssa_174.x vec1 32 ssa_176 = iadd ssa_175, ssa_78 vec1 32 ssa_177 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_178 = intrinsic load_deref (ssa_177) (0) /* access=0 */ vec4 32 ssa_179 = vec4 ssa_178.x, ssa_178.y, ssa_176, ssa_178.w vec1 32 ssa_180 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_180, ssa_179) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_181 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_182 = intrinsic load_deref (ssa_181) (0) /* access=0 */ vec4 32 ssa_183 = vec4 ssa_182.z, ssa_182.z, ssa_182.z, ssa_182.z vec1 32 ssa_184 = imov ssa_183.x /* succs: block_4 block_5 */ if ssa_77 { block block_4: /* preds: block_3 */ vec1 32 ssa_185 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_186 = txf ssa_185 (texture_deref), ssa_184 (coord), 0 (sampler), vec2 32 ssa_187 = vec2 ssa_186.x, ssa_186.y vec1 32 ssa_188 = deref_var &phi@0 (function_temp vec2) intrinsic store_deref (ssa_188, ssa_187) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ vec1 32 ssa_189 = deref_var &phi@0 (function_temp vec2) intrinsic store_deref (ssa_189, ssa_43) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ vec1 32 ssa_190 = deref_var &phi@0 (function_temp vec2) vec2 32 ssa_191 = intrinsic load_deref (ssa_190) (0) /* access=0 */ vec1 32 ssa_192 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_193 = intrinsic load_deref (ssa_192) (0) /* access=0 */ vec4 32 ssa_194 = vec4 ssa_193.x, ssa_193.y, ssa_191.x, ssa_191.y vec1 32 ssa_195 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_195, ssa_194) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_196 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_197 = intrinsic load_deref (ssa_196) (0) /* access=0 */ vec3 32 ssa_198 = vec3 ssa_197.z, ssa_197.z, ssa_197.z vec1 32 ssa_199 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_200 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_201 = deref_array &(*ssa_199)[0] (shader_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_202 = intrinsic load_deref (ssa_201) (0) /* access=0 */ vec3 32 ssa_203 = vec3 ssa_202.x, ssa_202.y, ssa_202.z vec3 32 ssa_204 = fmul ssa_198, ssa_203 vec1 32 ssa_205 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_206 = intrinsic load_deref (ssa_205) (0) /* access=0 */ vec4 32 ssa_207 = vec4 ssa_204.x, ssa_204.y, ssa_204.z, ssa_206.w vec1 32 ssa_208 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_208, ssa_207) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_209 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_210 = intrinsic load_deref (ssa_209) (0) /* access=0 */ vec3 32 ssa_211 = vec3 ssa_210.w, ssa_210.w, ssa_210.w vec1 32 ssa_212 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_213 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_214 = deref_array &(*ssa_212)[1] (shader_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_215 = intrinsic load_deref (ssa_214) (0) /* access=0 */ vec3 32 ssa_216 = vec3 ssa_215.x, ssa_215.y, ssa_215.z vec3 32 ssa_217 = fmul ssa_211, ssa_216 vec1 32 ssa_218 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_219 = intrinsic load_deref (ssa_218) (0) /* access=0 */ vec4 32 ssa_220 = vec4 ssa_217.x, ssa_217.y, ssa_217.z, ssa_219.w vec1 32 ssa_221 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_221, ssa_220) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_222 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_223 = intrinsic load_deref (ssa_222) (0) /* access=0 */ vec4 32 ssa_224 = vec4 ssa_223.x, ssa_223.x, ssa_223.x, ssa_223.x vec1 32 ssa_225 = imov ssa_224.x /* succs: block_7 block_8 */ if ssa_76 { block block_7: /* preds: block_6 */ vec1 32 ssa_226 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_227 = txf ssa_226 (texture_deref), ssa_225 (coord), 0 (sampler), vec1 32 ssa_228 = deref_var &phi@1 (function_temp vec4) intrinsic store_deref (ssa_228, ssa_227) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ vec1 32 ssa_229 = deref_var &phi@1 (function_temp vec4) intrinsic store_deref (ssa_229, ssa_42) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ vec1 32 ssa_230 = deref_var &phi@1 (function_temp vec4) vec4 32 ssa_231 = intrinsic load_deref (ssa_230) (0) /* access=0 */ vec1 32 ssa_232 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_232, ssa_231) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_233 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_234 = intrinsic load_deref (ssa_233) (0) /* access=0 */ vec1 32 ssa_235 = imov ssa_234.w vec1 32 ssa_236 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_237 = intrinsic load_deref (ssa_236) (0) /* access=0 */ vec4 32 ssa_238 = vec4 ssa_235, ssa_237.y, ssa_237.z, ssa_237.w vec1 32 ssa_239 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_239, ssa_238) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_240 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_241 = intrinsic load_deref (ssa_240) (0) /* access=0 */ vec4 32 ssa_242 = vec4 ssa_241.x, ssa_241.x, ssa_241.y, ssa_241.y vec4 32 ssa_243 = iadd ssa_242, ssa_75 vec1 32 ssa_244 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_244, ssa_243) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_245 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_246 = intrinsic load_deref (ssa_245) (0) /* access=0 */ vec4 32 ssa_247 = vec4 ssa_246.y, ssa_246.y, ssa_246.y, ssa_246.y vec1 32 ssa_248 = imov ssa_247.x /* succs: block_10 block_11 */ if ssa_74 { block block_10: /* preds: block_9 */ vec1 32 ssa_249 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_250 = txf ssa_249 (texture_deref), ssa_248 (coord), 0 (sampler), vec1 32 ssa_251 = deref_var &phi@2 (function_temp vec4) intrinsic store_deref (ssa_251, ssa_250) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ vec1 32 ssa_252 = deref_var &phi@2 (function_temp vec4) intrinsic store_deref (ssa_252, ssa_41) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_12 */ } block block_12: /* preds: block_10 block_11 */ vec1 32 ssa_253 = deref_var &phi@2 (function_temp vec4) vec4 32 ssa_254 = intrinsic load_deref (ssa_253) (0) /* access=0 */ vec1 32 ssa_255 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_255, ssa_254) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_256 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_257 = intrinsic load_deref (ssa_256) (0) /* access=0 */ vec4 32 ssa_258 = vec4 ssa_257.x, ssa_257.x, ssa_257.x, ssa_257.x vec1 32 ssa_259 = imov ssa_258.x /* succs: block_13 block_14 */ if ssa_73 { block block_13: /* preds: block_12 */ vec1 32 ssa_260 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_261 = txf ssa_260 (texture_deref), ssa_259 (coord), 0 (sampler), vec1 32 ssa_262 = deref_var &phi@3 (function_temp vec4) intrinsic store_deref (ssa_262, ssa_261) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ vec1 32 ssa_263 = deref_var &phi@3 (function_temp vec4) intrinsic store_deref (ssa_263, ssa_40) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ vec1 32 ssa_264 = deref_var &phi@3 (function_temp vec4) vec4 32 ssa_265 = intrinsic load_deref (ssa_264) (0) /* access=0 */ vec1 32 ssa_266 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_266, ssa_265) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_267 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_268 = intrinsic load_deref (ssa_267) (0) /* access=0 */ vec1 32 ssa_269 = imov ssa_268.w vec1 32 ssa_270 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_271 = intrinsic load_deref (ssa_270) (0) /* access=0 */ vec4 32 ssa_272 = vec4 ssa_271.x, ssa_269, ssa_271.z, ssa_271.w vec1 32 ssa_273 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_273, ssa_272) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_274 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_275 = intrinsic load_deref (ssa_274) (0) /* access=0 */ vec4 32 ssa_276 = vec4 ssa_275.y, ssa_275.y, ssa_275.y, ssa_275.y vec1 32 ssa_277 = imov ssa_276.x /* succs: block_16 block_17 */ if ssa_72 { block block_16: /* preds: block_15 */ vec1 32 ssa_278 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_279 = txf ssa_278 (texture_deref), ssa_277 (coord), 0 (sampler), vec1 32 ssa_280 = deref_var &phi@4 (function_temp vec4) intrinsic store_deref (ssa_280, ssa_279) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ vec1 32 ssa_281 = deref_var &phi@4 (function_temp vec4) intrinsic store_deref (ssa_281, ssa_39) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ vec1 32 ssa_282 = deref_var &phi@4 (function_temp vec4) vec4 32 ssa_283 = intrinsic load_deref (ssa_282) (0) /* access=0 */ vec1 32 ssa_284 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_284, ssa_283) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_285 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_286 = intrinsic load_deref (ssa_285) (0) /* access=0 */ vec1 32 ssa_287 = imov ssa_286.w vec1 32 ssa_288 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_289 = intrinsic load_deref (ssa_288) (0) /* access=0 */ vec4 32 ssa_290 = vec4 ssa_289.x, ssa_289.y, ssa_287, ssa_289.w vec1 32 ssa_291 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_291, ssa_290) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_292 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_293 = intrinsic load_deref (ssa_292) (0) /* access=0 */ vec3 32 ssa_294 = vec3 ssa_293.x, ssa_293.y, ssa_293.z vec1 32 ssa_295 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_296 = intrinsic vulkan_resource_index (ssa_295) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_297 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_298 = load_const (0x00000310 /* 0.000000 */) vec1 32 ssa_299 = iadd ssa_297, ssa_298 vec1 32 ssa_300 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_301 = iadd ssa_299, ssa_300 vec1 32 ssa_302 = intrinsic load_ubo (ssa_296, ssa_301) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_303 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_304 = iadd ssa_299, ssa_303 vec1 32 ssa_305 = intrinsic load_ubo (ssa_296, ssa_304) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_306 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_307 = iadd ssa_299, ssa_306 vec1 32 ssa_308 = intrinsic load_ubo (ssa_296, ssa_307) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_309 = vec3 ssa_302, ssa_305, ssa_308 vec3 32 ssa_310 = fneg ssa_309 vec3 32 ssa_311 = fadd ssa_294, ssa_310 vec1 32 ssa_312 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_313 = intrinsic load_deref (ssa_312) (0) /* access=0 */ vec4 32 ssa_314 = vec4 ssa_311.x, ssa_311.y, ssa_311.z, ssa_313.w vec1 32 ssa_315 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_315, ssa_314) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_316 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_317 = intrinsic load_deref (ssa_316) (0) /* access=0 */ vec4 32 ssa_318 = vec4 ssa_317.x, ssa_317.y, ssa_317.z, ssa_71 vec1 32 ssa_319 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_319, ssa_318) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_320 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_321 = intrinsic load_deref (ssa_320) (0) /* access=0 */ vec1 32 ssa_322 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_323 = intrinsic vulkan_resource_index (ssa_322) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_324 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_325 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_326 = iadd ssa_324, ssa_325 vec1 32 ssa_327 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_328 = iadd ssa_326, ssa_327 vec1 32 ssa_329 = intrinsic load_ubo (ssa_323, ssa_328) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_330 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_331 = iadd ssa_326, ssa_330 vec1 32 ssa_332 = intrinsic load_ubo (ssa_323, ssa_331) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_333 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_334 = iadd ssa_326, ssa_333 vec1 32 ssa_335 = intrinsic load_ubo (ssa_323, ssa_334) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_336 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_337 = iadd ssa_326, ssa_336 vec1 32 ssa_338 = intrinsic load_ubo (ssa_323, ssa_337) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_339 = vec4 ssa_329, ssa_332, ssa_335, ssa_338 vec1 32 ssa_340 = fdot4 ssa_321, ssa_339 vec1 32 ssa_341 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_342 = intrinsic load_deref (ssa_341) (0) /* access=0 */ vec4 32 ssa_343 = vec4 ssa_342.x, ssa_342.y, ssa_342.z, ssa_340 vec1 32 ssa_344 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_344, ssa_343) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_345 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_346 = intrinsic load_deref (ssa_345) (0) /* access=0 */ vec1 32 ssa_347 = imov ssa_346.x vec1 32 ssa_348 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_349 = intrinsic load_deref (ssa_348) (0) /* access=0 */ vec4 32 ssa_350 = vec4 ssa_347, ssa_349.y, ssa_349.z, ssa_349.w vec1 32 ssa_351 = deref_var &r9 (shader_temp vec4) intrinsic store_deref (ssa_351, ssa_350) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_352 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_353 = intrinsic load_deref (ssa_352) (0) /* access=0 */ vec1 32 ssa_354 = imov ssa_353.x vec1 32 ssa_355 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_356 = intrinsic load_deref (ssa_355) (0) /* access=0 */ vec4 32 ssa_357 = vec4 ssa_356.x, ssa_354, ssa_356.z, ssa_356.w vec1 32 ssa_358 = deref_var &r9 (shader_temp vec4) intrinsic store_deref (ssa_358, ssa_357) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_359 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_360 = intrinsic load_deref (ssa_359) (0) /* access=0 */ vec1 32 ssa_361 = imov ssa_360.x vec1 32 ssa_362 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_363 = intrinsic load_deref (ssa_362) (0) /* access=0 */ vec4 32 ssa_364 = vec4 ssa_363.x, ssa_363.y, ssa_361, ssa_363.w vec1 32 ssa_365 = deref_var &r9 (shader_temp vec4) intrinsic store_deref (ssa_365, ssa_364) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_366 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_367 = intrinsic load_deref (ssa_366) (0) /* access=0 */ vec3 32 ssa_368 = vec3 ssa_367.x, ssa_367.y, ssa_367.z vec1 32 ssa_369 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_370 = intrinsic vulkan_resource_index (ssa_369) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_371 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_372 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_373 = iadd ssa_371, ssa_372 vec1 32 ssa_374 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_375 = iadd ssa_373, ssa_374 vec1 32 ssa_376 = intrinsic load_ubo (ssa_370, ssa_375) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_377 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_378 = iadd ssa_373, ssa_377 vec1 32 ssa_379 = intrinsic load_ubo (ssa_370, ssa_378) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_380 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_381 = iadd ssa_373, ssa_380 vec1 32 ssa_382 = intrinsic load_ubo (ssa_370, ssa_381) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_383 = vec3 ssa_376, ssa_379, ssa_382 vec1 32 ssa_384 = fdot3 ssa_368, ssa_383 vec1 32 ssa_385 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_386 = intrinsic load_deref (ssa_385) (0) /* access=0 */ vec4 32 ssa_387 = vec4 ssa_384, ssa_386.y, ssa_386.z, ssa_386.w vec1 32 ssa_388 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_388, ssa_387) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_389 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_390 = intrinsic load_deref (ssa_389) (0) /* access=0 */ vec1 32 ssa_391 = imov ssa_390.y vec1 32 ssa_392 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_393 = intrinsic load_deref (ssa_392) (0) /* access=0 */ vec4 32 ssa_394 = vec4 ssa_391, ssa_393.y, ssa_393.z, ssa_393.w vec1 32 ssa_395 = deref_var &r10 (shader_temp vec4) intrinsic store_deref (ssa_395, ssa_394) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_396 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_397 = intrinsic load_deref (ssa_396) (0) /* access=0 */ vec1 32 ssa_398 = imov ssa_397.y vec1 32 ssa_399 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_400 = intrinsic load_deref (ssa_399) (0) /* access=0 */ vec4 32 ssa_401 = vec4 ssa_400.x, ssa_398, ssa_400.z, ssa_400.w vec1 32 ssa_402 = deref_var &r10 (shader_temp vec4) intrinsic store_deref (ssa_402, ssa_401) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_403 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_404 = intrinsic load_deref (ssa_403) (0) /* access=0 */ vec1 32 ssa_405 = imov ssa_404.y vec1 32 ssa_406 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_407 = intrinsic load_deref (ssa_406) (0) /* access=0 */ vec4 32 ssa_408 = vec4 ssa_407.x, ssa_407.y, ssa_405, ssa_407.w vec1 32 ssa_409 = deref_var &r10 (shader_temp vec4) intrinsic store_deref (ssa_409, ssa_408) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_410 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_411 = intrinsic load_deref (ssa_410) (0) /* access=0 */ vec3 32 ssa_412 = vec3 ssa_411.x, ssa_411.y, ssa_411.z vec1 32 ssa_413 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_414 = intrinsic vulkan_resource_index (ssa_413) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_415 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_416 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_417 = iadd ssa_415, ssa_416 vec1 32 ssa_418 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_419 = iadd ssa_417, ssa_418 vec1 32 ssa_420 = intrinsic load_ubo (ssa_414, ssa_419) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_421 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_422 = iadd ssa_417, ssa_421 vec1 32 ssa_423 = intrinsic load_ubo (ssa_414, ssa_422) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_424 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_425 = iadd ssa_417, ssa_424 vec1 32 ssa_426 = intrinsic load_ubo (ssa_414, ssa_425) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_427 = vec3 ssa_420, ssa_423, ssa_426 vec1 32 ssa_428 = fdot3 ssa_412, ssa_427 vec1 32 ssa_429 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_430 = intrinsic load_deref (ssa_429) (0) /* access=0 */ vec4 32 ssa_431 = vec4 ssa_430.x, ssa_428, ssa_430.z, ssa_430.w vec1 32 ssa_432 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_432, ssa_431) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_433 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_434 = intrinsic load_deref (ssa_433) (0) /* access=0 */ vec1 32 ssa_435 = imov ssa_434.z vec1 32 ssa_436 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_437 = intrinsic load_deref (ssa_436) (0) /* access=0 */ vec4 32 ssa_438 = vec4 ssa_435, ssa_437.y, ssa_437.z, ssa_437.w vec1 32 ssa_439 = deref_var &r11 (shader_temp vec4) intrinsic store_deref (ssa_439, ssa_438) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_440 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_441 = intrinsic load_deref (ssa_440) (0) /* access=0 */ vec1 32 ssa_442 = imov ssa_441.z vec1 32 ssa_443 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_444 = intrinsic load_deref (ssa_443) (0) /* access=0 */ vec4 32 ssa_445 = vec4 ssa_444.x, ssa_442, ssa_444.z, ssa_444.w vec1 32 ssa_446 = deref_var &r11 (shader_temp vec4) ) /*r3 (shader_temp vec4) vec1 32 ssa_102 = load_const (0x3f000000 /* 0.500000 */) intrinsic store_deref (intrinsic store_deref (ssa_446, ssa_445) (15, 0) /* wrmask=x vec1 32 ssa_218 = imov ssa_217.y ssa_297 vec1 32 ssa_235 = load_const (0x00000000 /* 0.000000 */) , vec1 32 ssa_236vec1 32 ssa_103 = load_const (0x3f7fff00 /* 0.999985 */) vec1 32 ssa_2386 = load_const (0x38000080 /* 0.000031 */) vec1 32 ssa_2387 = load_const (0x38000080 /* 0.000031 */) vec1 32 ssa_2388 = load_const (ssa_296 access=0 */ vec4 32 ssa_140 = vec4 ssa_139.x, ssa_137, ssa_139.z, ssa_139.w vec1 32 ssa_141 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_141, ssa_140) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_142 = deref_var &r0 (function_temp vec4) vec4 32 ssa_143 = intrinsic load_deref (ssa_142) (0) = intrinsic vulkan_resource_index (ssa_235) (0, ) (y /*zw */ /* access=0 */ vec1 32 ssa_447 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_448 = intrinsic load_deref (ssa_447) (0) /* access=0 */ vec1 32 ssa_449 = imov ssa_448.z vec1 32 ssa_450 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_451 = intrinsic load_deref (ssa_450) (0) /* access=0 */ vec4 32 ssa_452 = vec4 ssa_451.x, ssa_451.y, ssa_449, ssa_451.w vec1 32 ssa_453 = deref_var &r11 (shader_temp vec4) intrinsic store_deref (ssa_453, ssa_452) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_454 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_455 = intrinsic load_deref (ssa_454) (0) /* access=0 */ vec3 32 ssa_456 = vec3 ssa_455.x, ssa_455.y, ssa_455.z vec1 32 ssa_457 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_458 = intrinsic vulkan_resource_index (ssa_457) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_459 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_460 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_461 = iadd ssa_459, ssa_460 vec1 32 ssa_462 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_463 = iadd ssa_461, ssa_462 vec1 32 ssa_464 = intrinsic load_ubo (ssa_458, ssa_463) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_465 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_466 = iadd ssa_461, ssa_465 vec1 32 ssa_467 = intrinsic load_ubo (ssa_458, ssa_466) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_468 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_469 = iadd ssa_461, ssa_468 vec1 32 ssa_470 = intrinsic load_ubo (ssa_458, ssa_469) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_471 = vec3 ssa_464, ssa_467, ssa_470 vec1 32 ssa_472 = fdot3 ssa_456, ssa_471 vec1 32 ssa_473 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_474 = intrinsic load_deref (ssa_473) (0) /* access=0 */ vec4 32 ssa_475 = vec4 ssa_474.x, ssa_474.y, ssa_472, ssa_474.w vec1 32 ssa_476 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_476, ssa_475) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_477 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_478 = intrinsic load_deref (ssa_477) (0) /* access=0 */ vec4 32 ssa_479 = vec4 ssa_478.x, ssa_478.y, ssa_478.z, ssa_70 vec1 32 ssa_480 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_480, ssa_479) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_481 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_482 = intrinsic load_deref (ssa_481) (0) /* access=0 */ vec1 32 ssa_483 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_484 = intrinsic load_deref (ssa_483) (0) /* access=0 */ vec1 32 ssa_485 = fdot4 ssa_482, ssa_484 vec1 32 ssa_486 = deref_var &o0 (shader_out vec4) vec4 32 ssa_487 = intrinsic load_deref (ssa_486) (0) /* access=0 */ vec4 32 ssa_488 = vec4 ssa_485, ssa_487.y, ssa_487.z, ssa_487.w vec1 32 ssa_489 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_489, ssa_488) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_490 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_491 = intrinsic load_deref (ssa_490) (0) /* access=0 */ vec1 32 ssa_492 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_493 = intrinsic vulkan_resource_index (ssa_492) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_494 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_495 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_496 = iadd ssa_494, ssa_495 vec1 32 ssa_497 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_498 = iadd ssa_496, ssa_497 vec1 32 ssa_499 = intrinsic load_ubo (ssa_493, ssa_498) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_500 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_501 = iadd ssa_496, ssa_500 vec1 32 ssa_502 = intrinsic load_ubo (ssa_493, ssa_501) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_503 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_504 = iadd ssa_496, ssa_503 vec1 32 ssa_505 = intrinsic load_ubo (ssa_493, ssa_504) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_506 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_507 = iadd ssa_496, ssa_506 vec1 32 ssa_508 = intrinsic load_ubo (ssa_493, ssa_507) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_509 = vec4 ssa_499, ssa_502, ssa_505, ssa_508 vec1 32 ssa_510 = fdot4 ssa_491, ssa_509 vec1 32 ssa_511 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_512 = intrinsic load_deref (ssa_511) (0) /* access=0 */ vec4 32 ssa_513 = vec4 ssa_512.x, ssa_512.y, ssa_512.z, ssa_510 vec1 32 ssa_514 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_514, ssa_513) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_515 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_516 = intrinsic load_deref (ssa_515) (0) /* access=0 */ vec3 32 ssa_517 = vec3 ssa_516.x, ssa_516.y, ssa_516.z vec1 32 ssa_518 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_519 = intrinsic vulkan_resource_index (ssa_518) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_520 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_521 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_522 = iadd ssa_520, ssa_521 vec1 32 ssa_523 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_524 = iadd ssa_522, ssa_523 vec1 32 ssa_525 = intrinsic load_ubo (ssa_519, ssa_524) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_526 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_527 = iadd ssa_522, ssa_526 vec1 32 ssa_528 = intrinsic load_ubo (ssa_519, ssa_527) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_529 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_530 = iadd ssa_522, ssa_529 vec1 32 ssa_531 = intrinsic load_ubo (ssa_519, ssa_530) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_532 = vec3 ssa_525, ssa_528, ssa_531 vec1 32 ssa_533 = fdot3 ssa_517, ssa_532 vec1 32 ssa_534 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_535 = intrinsic load_deref (ssa_534) (0) /* access=0 */ vec4 32 ssa_536 = vec4 ssa_533, ssa_535.y, ssa_535.z, ssa_535.w vec1 32 ssa_537 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_537, ssa_536) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_538 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_539 = intrinsic load_deref (ssa_538) (0) /* access=0 */ vec3 32 ssa_540 = vec3 ssa_539.x, ssa_539.y, ssa_539.z vec1 32 ssa_541 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_542 = intrinsic vulkan_resource_index (ssa_541) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_543 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_544 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_545 = iadd ssa_543, ssa_544 vec1 32 ssa_546 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_547 = iadd ssa_545, ssa_546 vec1 32 ssa_548 = intrinsic load_ubo (ssa_542, ssa_547) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_549 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_550 = iadd ssa_545, ssa_549 vec1 32 ssa_551 = intrinsic load_ubo (ssa_542, ssa_550) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_552 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_553 = iadd ssa_545, ssa_552 vec1 32 ssa_554 = intrinsic load_ubo (ssa_542, ssa_553) (4, 0) /*154, access=00x38000080 /* 0.000031 */) vec3 32 ssa_2389 = vec3 ssa_2386, ssa_2387, ssa_2388 vec1 32 ssa_2390 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2391 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2392 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2393 = load_const (0x00000010 /* 0.000000 */) vec4 32 ssa_2394 = vec4 ssa_2390, ssa_2391, ssa_2392, ssa_2393 vec1 32 ssa_2395 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_2396 = load_const (0x00000010 /* 0.000000 */) vec2 32 ssa_2397 = vec2 vec1 32 ssa_219 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_220 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_221 = deref_array &(*ssa_219)[1] (shader_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_222 = intrinsic load_deref (ssa_221) (0) /* access=0 */ vec1 32 ssa_223 = imov ssa_222.x vec1 32 ssa_224 = iadd ssa_218, ssa_223 vec1 32 ssa_225 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_226 = intrinsic load_deref (ssa_225) (0) /* access=0 */ vec4 32 ssa_227 = vec4 ssa_226.x, ssa_224, ssa_226.z, ssa_226.w vec1 32 ssa_228 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_228, ssa_227) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2297) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_237 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_238 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_239 = ishl ssa_234, ssa_238 vec1 32 ssa_240 = iadd ssa_237, ssa_239 vec1 32 ssa_241 = intrinsic load_ssbo (ssa_236, ssa_240) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_242 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_243 = intrinsic load_deref (ssa_242) (0) /* access=0 */ vec4 32 ssa_244 = vec4 ssa_241, ssa_243.y, ssa_243.z, ssa_243.w vec1 32 ssa_245 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_245, ssa_244) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_246 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_247 = intrinsic load_deref (ssa_246) (0) /* access=0 */ vec1 32 ssa_248 = imov ssa_247.x vec1 32 ssa_249 = ushr ssa_106, ssa_105 vec1 32 ssa_250 = imul ssa_248, ssa_104 vec1 32 ssa_251 = iadd ssa_250, ssa_249 vec1 32 ssa_252 = iadd ssa_251, ssa_103 vec1 32 ssa_253 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_254 = intrinsic vulkan_resource_index (ssa_253) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_255 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_256 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_257 = ishl ssa_252, ssa_256 vec1 32 ssa_258 = iadd ssa_255, ssa_257 vec1 32 ssa_259 = intrinsic load_ssbo (ssa_254, ssa_258) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_260 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_261 = intrinsic load_deref (ssa_260) (0) /* access=0 */ vec4 32 ssa_262 = vec4 ssa_261.x, ssa_259, ssa_261.z, ssa_261.w vec1 32 ssa_263 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_263, ssa_262) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_264 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_265 = intrinsic load_deref (ssa_264) (0) /* access=0 */ vec4 32 ssa_266 = iadd ssa_265, ssa_102 vec1 32 ssa_267 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_267, ssa_266) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_268 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_269 = intrinsic load_deref (ssa_268) (0) /* access=0 */ vec1 32 ssa_270 = imov ssa_269.x vec1 32 ssa_271 = ushr ssa_101, ssa_100 vec1 32 ssa_272 = imul ssa_270, ssa_99 vec1 32 ssa_273 = iadd ssa_272, ssa_271 vec1 32 ssa_274 = iadd ssa_273, ssa_98 vec1 32 ssa_275 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_276 = intrinsic vulkan_resource_index (ssa_275) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_277 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_278 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_279 = ishl ssa_274, ssa_278 vec1 32 ssa_280 = iadd ssa_277, ssa_279 vec1 32 ssa_281 = intrinsic load_ssbo (ssa_276, ssa_280) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_282 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_283 = intrinsic load_deref (ssa_282) (0) /* access=0 */ vec4 32 ssa_284 = vec4 ssa_283.x, ssa_283.y, ssa_283.z, ssa_281 vec1 32 ssa_285 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_285, ssa_284) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_286 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_287 = intrinsic load_deref (ssa_286) (0) /* access=0 */ vec2 32 ssa_288 = vec2 ssa_287.y, ssa_287.w vec2 32 ssa_289 = ishl ssa_288, ssa_97 vec1 32 ssa_290 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_291 = intrinsic load_deref (ssa_290) (0) /* access=0 */ vec4 32 ssa_292 = vec4 ssa_289.x, ssa_291.y, ssa_289.y, ssa_291.w vec1 32 ssa_293 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_293, ssa_292) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_294 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_295 = intrinsic load_deref (ssa_294) (0) /* access=0 */ vec4 32 ssa_296 = ishr ssa_295, ssa_96 vec1 32 ssa_297 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_297, ssa_296) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_298 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_299 = intrinsic load_deref (ssa_298) (0) /* access=0 */ vec4 32 ssa_300 = i2f32 ssa_299 vec1 32 ssa_301 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_301, ssa_300) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_302 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_303 = intrinsic load_deref (ssa_302) (0) /* access=0 */ vec3 32 ssa_304 = vec3 ssa_303.x, ssa_303.y, ssa_303.z vec3 32 ssa_305 = fmul ssa_304, ssa_95 vec1 32 ssa_306 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_307 = intrinsic load_deref (ssa_306) (0) /* access=0 */ vec4 32 ssa_308 = vec4 ssa_305.x, ssa_305.y, ssa_305.z, ssa_307.w vec1 32 ssa_309 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_309, ssa_308) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_310 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_311 = intrinsic load_deref (ssa_310) (0) /* access=0 */ vec1 32 ssa_312 = imov ssa_311.w vec1 32 ssa_313 = ffma ssa_312, ssa_94, ssa_93 vec1 32 ssa_314 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_315 = intrinsic load_deref (ssa_314) (0) /* access=0 */ vec4 32 ssa_316 = vec4 ssa_315.x, ssa_313, ssa_315.z, ssa_315.w vec1 32 ssa_317 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_317, ssa_316) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_318 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_319 = intrinsic load_deref (ssa_318) (0) /* access=0 */ vec1 32 ssa_320 = imov ssa_319.y vec1 32 ssa_321 = f2u32 ssa_320 vec1 32 ssa_322 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_323 = intrinsic load_deref (ssa_322) (0) /* access=0 */ vec4 32 ssa_324 = vec4 ssa_323.x, ssa_321, ssa_323.z, ssa_323.w vec1 32 ssa_325 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_325, ssa_324) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_326 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_327 = intrinsic load_deref (ssa_326) (0) /* access=0 */ vec3 32 ssa_328 = vec3 ssa_327.y, ssa_327.y, ssa_327.y vec3 32 ssa_329 = iand ssa_328, ssa_92 vec1 32 ssa_330 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_331 = intrinsic load_deref (ssa_330) (0) /* access=0 */ vec4 32 ssa_332 = vec4 ssa_329.x, ssa_329.y, ssa_329.z, ssa_331.w vec1 32 ssa_333 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_333, ssa_332) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_334 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_335 = intrinsic load_deref (ssa_334) (0) /* access=0 */ vec3 32 ssa_336 = vec3 ssa_335.x, ssa_335.y, ssa_335.z vec3 1 ssa_337 = ult ssa_91, ssa_336 vec3 32 ssa_338 = bcsel ssa_337, ssa_89, ssa_90 vec1 32 ssa_339 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_340 = intrinsic load_deref (ssa_339) (0) /* access=0 */ vec4 32 ssa_341 = vec4 ssa_338.x, ssa_338.y, ssa_338.z, ssa_340.w vec1 32 ssa_342 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_342, ssa_341) (15, 0) /* = deref_var &r0 (shader_temp vec4) vec4 32 ssa_230, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_298 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_299 = intrinsic load_deref (ssa_298) (0) /* access=0 */ vec2 32 ssa_300 = vec2 ssa_299.y, ssa_299.w vec2 32 ssa_301 = ishl ssa_300, ssa_87 vec1 32 ssa_302 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_303 = intrinsic load_deref (ssa_302) (0) /* access=0 */ vec4 32 ssa_304 = vec4 ssa_301.x, ssa_303.y, ssa_301.y, ssa_303.w vec1 32 ssa_305 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_305, ssa_304) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_306 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_307 = intrinsic load_deref (ssa_306) (0) /* access=0 */ vec4 32 ssa_308 = ishr ssa_307, ssa_86 vec1 32 ssa_309 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_309, ssa_308) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_310 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_311 = intrinsic load_deref (ssa_310) (0) /* access=0 */ vec4 32 ssa_312 = i2f32 ssa_311 vec1 32 ssa_313 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_313, ssa_312) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_314 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_315 = intrinsic load_deref (ssa_314) (0) /* access=0 */ vec3 32 ssa_316 = vec3 ssa_315.x, ssa_315.y, ssa_315.z vec3 32 ssa_317 = fmul ssa_316, ssa_85 vec1 32 ssa_318 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_319 = intrinsic load_deref (ssa_318) (0) /* access=0 */ vec4 32 ssa_320 = vec4 ssa_317.x, ssa_317.y, ssa_317.z, ssa_319.w vec1 32 ssa_321 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_321, ssa_320) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_322 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_323 = intrinsic load_deref (ssa_322) (0) /* access=0 */ vec1 32 ssa_324 = imov ssa_323.w vec1 32 ssa_325 = ffma ssa_324, ssa_84, ssa_83 vec1 32 ssa_326 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_327 = intrinsic load_deref (ssa_326) (0) /* access=0 */ vec4 32 ssa_328 = vec4 ssa_327.x, ssa_325, ssa_327.z, ssa_327.w vec1 32 ssa_329 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_329, ssa_328) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_330 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_331 = intrinsic load_deref (ssa_330) (0) /* access=0 */ vec1 32 ssa_332 = imov ssa_331.y vec1 32 ssa_333 = f2u32 ssa_332 vec1 32 ssa_334 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_335 = intrinsic load_deref (ssa_334) (0) /* access=0 */ vec4 32 ssa_336 = vec4 ssa_335.x, ssa_333, ssa_335.z, ssa_335.w vec1 32 ssa_337 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_337, ssa_336) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_338 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_339 = intrinsic load_deref (ssa_338) (0) /* access=0 */ vec3 32 ssa_340 = vec3 ssa_339.y, ssa_339.y, ssa_339.y vec3 32 ssa_341 = iand ssa_340, ssa_82 vec1 32 ssa_342 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_343 = intrinsic load_deref (ssa_342) (0) /* access=0 */ vec4 32 ssa_344 = vec4 ssa_341.x, ssa_341.y, ssa_341.z, ssa_343.w vec1 32 ssa_345 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_345, ssa_344) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_346 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_347 = intrinsic load_deref (ssa_346) (0) /* access=0 */ vec3 32 ssa_348 = vec3 ssa_347.x, ssa_347.y, ssa_347 = intrinsic load_deref (ssa_229) (0) /* access=0 */ vec1 32 ssa_231 = imov ssa_230.y vec1 32 ssa_232 = ushr ssa_104, ssa_103 vec1 32 ssa_233 = imul ssa_231, ssa_102 vec1 32 ssa_234 = iadd ssa_233, ssa_232 vec1 32 ssa_235 = iadd ssa_234, ssa_101 vec1 32 ssa_236 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_237 = intrinsic vulkan_resource_index (ssa_236) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_238 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_239 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_240 = ishl ssa_235, ssa_239 vec1 32 ssa_241 = iadd ssa_238, ssa_240 vec1 32 ssa_242 = intrinsic load_ssbo (ssa_237, ssa_241) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_243 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_244 = intrinsic load_deref (ssa_243) (0) /* access=0 */ vec4 32 ssa_245 = vec4 ssa_242, ssa_244.y, ssa_244.z, ssa_244.w vec1 32 ssa_246 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_246, ssa_245) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_247 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_248 = intrinsic load_deref (ssa_247) (0) /* access=0 */ vec1 32 ssa_249 = imov ssa_248.x vec1 32 ssa_250 = ushr ssa_100, ssa_99 vec1 32 ssa_251 = imul ssa_249, ssa_98 vec1 32 ssa_252 = iadd ssa_251, ssa_250 vec1 32 ssa_253 = iadd ssa_252, ssa_97 vec1 32 ssa_254 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_255 = intrinsic vulkan_resource_index (ssa_254) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_256 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_257 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_258 = ishl ssa_253, ssa_257 vec1 32 ssa_259 = iadd ssa_256, ssa_258 vec1 32 ssa_260 = intrinsic load_ssbo (ssa_255, ssa_259) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_261 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_262 = intrinsic load_deref (ssa_261) (0) /* access=0 */ vec4 32 ssa_263 = vec4 ssa_262.x, ssa_260, ssa_262.z, ssa_262.w vec1 32 ssa_264 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_264, ssa_263) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_265 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_266 = intrinsic load_deref (ssa_265) (0) /* access=0 */ vec3 32 ssa_267 = vec3 ssa_266.x, ssa_266.z, ssa_266.w vec3 32 ssa_268 = iadd ssa_267, ssa_96 vec1 32 ssa_269 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_270 = intrinsic load_deref (ssa_269) (0) /* access=0 */ vec4 32 ssa_271 = vec4 ssa_268.x, ssa_268.y, ssa_268.z, ssa_270.w vec1 32 ssa_272 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_272, ssa_271) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_273 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_274 = intrinsic load_deref (ssa_273) (0) /* access=0 */ vec1 32 ssa_275 = imov ssa_274.x vec1 32 ssa_276 = ushr ssa_95, ssa_94 vec1 32 ssa_277 = imul ssa_275, ssa_93 vec1 32 ssa_278 = iadd ssa_277, ssa_276 vec1 32 ssa_279 = iadd ssa_278, ssa_92 vec1 32 ssa_280 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_281 = intrinsic vulkan_resource_index (ssa_280) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_282 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_283 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_284 = ishl ssa_279, ssa_283 vec1 32 ssa_285 = iadd ssa_282, ssa_284 vec1 32 ssa_286 = intrinsic load_ssbo (ssa_281, ssa_285) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_287 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_288 = intrinsic load_deref (ssa_287) (0) /* access=0 */ vec4 32 ssa_289 = vec4 ssa_288.x, ssa_288.y, ssa_288.z, ssa_286 vec1 32 ssa_290 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_290, ssa_289) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_291 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_292 = intrinsic load_deref (ssa_291) (0) /* access=0 */ vec2 32 ssa_293 = vec2 ssa_292.y, ssa_292.w vec2 32 ssa_294 = ishl ssa_293, ssa_91 vec1 32 ssa_295 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_296 = intrinsic load_deref (ssa_295) (0) /* access=0 */ vec4 32 ssa_297 = vec4 ssa_294.x, ssa_296.y, ssa_294.y, ssa_296.w vec1 32 ssa_298 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_298, ssa_297) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_299 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_300 = intrinsic load_deref (ssa_299) (0) /* access=0 */ vec4 32 ssa_301 = ishr ssa_300, ssa_90 vec1 32 ssa_302 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_302, ssa_301) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_303 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_304 = intrinsic load_deref (ssa_303) (0) /* access=0 */ vec4 32 ssa_305 = i2f32 ssa_304 vec1 32 ssa_306 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_306, ssa_305) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_307 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_308 = intrinsic load_deref (ssa_307) (0) /* access=0 */ vec3 32 ssa_309 = vec3 ssa_308.x, ssa_308.y, ssa_308.z vec3 32 ssa_310 = fmul ssa_309, ssa_89 vec1 32 ssa_311 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_312 = intrinsic load_deref (ssa_311) (0) /* access=0 */ vec4 32 ssa_313 = vec4 ssa_310.x, ssa_310.y, ssa_310.z, ssa_312.w vec1 32 ssa_314 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_314, ssa_313) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_315 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_316 = intrinsic load_deref (ssa_315) (0) /* access=0 */ vec1 32 ssa_317 = imov ssa_316.w vec1 32 ssa_318 = ffma ssa_317, ssa_88, ssa_87 vec1 32 ssa_319 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_320 = intrinsic load_deref (ssa_319) (0) /* access=0 */ vec4 32 ssa_321 = vec4 ssa_320.x, ssa_318, ssa_320.z, ssa_320.w vec1 32 ssa_322 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_322, ssa_321) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_323 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_324 = intrinsic load_deref (ssa_323) (0) /* access=0 */ vec1 32 ssa_325 = imov ssa_324.y vec1 32 ssa_326 = f2u32 ssa_325 vec1 32 ssa_327 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_328 = intrinsic load_deref (ssa_327) (0) /* access=0 */ vec4 32 ssa_329 = vec4 ssa_328.x, ssa_326, ssa_328.z, ssa_328.w vec1 32 ssa_330 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_330, ssa_329) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_331 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_332 = intrinsic load_deref (ssa_331) (0) /* access=0 */ vec3 32 ssa_333 = vec3 ssa_332.y, ssa_332.y, ssa_332.y vec3 32 ssa_334 = iand ssa_333, ssa_86 vec1 32 ssa_335 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_336 = intrinsic load_deref (ssa_335) (0) /* access=0 */ vec4 32 ssa_337 = vec4 ssa_334.x, ssa_334.y, ssa_334.z, ssa_336.w vec1 32 ssa_338 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_338, ssa_337) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_339 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_340 = intrinsic load_deref (ssa_339) (0) /* access=0 */ vec3 32 ssa_341 = vec3 ssa_340.x, ssa_340.y, ssa_340.z vec3 1 ssa_342 = ult ssa_85, ssa_341 vec3 32 ssa_343 = bcsel ssa_342, ssa_83, ssa_84 vec1 32 ssa_344 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_345 = intrinsic load_deref (ssa_344) (0) /* access=0 */ vec4 32 ssa_346 = vec4 ssa_343.x, ssa_343.y, ssa_343.z, ssa_345.w vec1 32 ssa_347 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_347, ssa_346) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_348 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_349 = intrinsic load_deref (ssa_348) (0) /* access=0 */ vec3 32 ssa_350 = vec3 ssa_349.x, ssa_349.y, ssa_349.z vec3 1 ssa_351 = ine ssa_350, ssa_80 vec3 32 ssa_352 = bcsel ssa_351, ssa_82, ssa_81 vec1 32 ssa_353 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_354 = intrinsic load_deref (ssa_353) (0) /* access=0 */ vec4 32 ssa_355 = vec4 ssa_352.x, ssa_352.y, ssa_352.z, ssa_354.w vec1 32 ssa_356 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_356, ssa_355) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_357 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_358 = intrinsic load_deref (ssa_357) (0) /* access=0 */ vec1 32 ssa_359 = imov ssa_358.x vec1 32 ssa_360 = iadd ssa_359, ssa_79 vec1 32 ssa_361 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_362 = intrinsic load_deref (ssa_361) (0) /* access=0 */ vec4 32 ssa_363 = vec4 ssa_362.x, ssa_360, ssa_362.z, ssa_362.w vec1 32 ssa_364 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_364, ssa_363) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_365 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_366 = intrinsic load_deref (ssa_365) (0) /* access=0 */ vec4 32 ssa_367 = vec4 ssa_366.y, ssa_366.y, ssa_366.y, ssa_366.y vec1 32 ssa_368 = imov ssa_367.x /* succs: block_10 block_11 */ if ssa_78 { block block_10: /* preds: block_9 */ vec1 32 ssa_369 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_370 = txf ssa_369 (texture_deref), ssa_368 (coord), 0 (sampler), vec2 32 ssa_371 = vec2 ssa_370.x, ssa_370.y vec1 32 ssa_372 = deref_var &phi@2 (function_temp vec2) intrinsic store_deref (ssa_372, ssa_371) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ vec1 32 ssa_373 = deref_var &phi@2 (function_temp vec2) intrinsic store_deref (ssa_373, ssa_23) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_12 */ } block block_12: /* preds: block_10 block_11 */ vec1 32 ssa_374 = deref_var &phi@2 (function_temp vec2) vec2 32 ssa_375 = intrinsic load_deref (ssa_374) (0) /* access=0 */ vec1 32 ssa_376 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_377 = intrinsic load_deref (ssa_376) (0) /* access=0 */ vec4 32 ssa_378 = vec4 ssa_377.x, ssa_375.x, ssa_377.zssa_2395, ssa_2396 vec1 32 ssa_107 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_108 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_109 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_110 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2398 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2399 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2400 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2401 = load_const (0x00000002 /* 0.000000 */) vec4 32 ssa_2402 = vec4 ssa_2398, ssa_2399, ssa_2400, ssa_2401 vec1 32 ssa_112 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_113 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_114 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_115 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_116 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_117 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_118 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_119 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2403 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2404 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2405 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_2406 = load_const (0x00000002 /* 0.000000 */) vec4 32 ssa_2407 = vec4 ssa_2403, ssa_2404, ssa_2405, ssa_2406 vec1 1 ssa_121 = load_const (true) vec1 1 ssa_122 = load_const (true) vec1 1 ssa_123 = load_const (true) vec1 32 ssa_124 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_125 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_126 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_127 = deref_var &shader_in (function_temp Z) vec1 32 ssa_128 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_129 = deref_array &(*ssa_127)[0] (function_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_130 = intrinsic load_deref (ssa_129) (0) /* access=0 */ vec1 32 ssa_131 = imov ssa_130.x vec1 32 ssa_132 = ishl ssa_131, ssa_126 vec1 32 ssa_133 = deref_var &r0 (function_temp vec4) vec4 32 ssa_134 = intrinsic load_deref (ssa_133) (0) /* access=0 */ vec4 32 ssa_135 = vec4 ssa_132, ssa_134.y, ssa_134.z, ssa_134.w vec1 32 ssa_136 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_136, ssa_135) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_137 = deref_var &r0 (function_temp vec4) vec4 32 ssa_138 = intrinsic load_deref (ssa_137) (0) /* access=0 */ vec1 32 ssa_139 = imov ssa_138.x vec1 32 ssa_140 = ushr ssa_139, ssa_125 vec1 32 ssa_141 = deref_var &r0 (function_temp vec4) vec4 32 ssa_142 = intrinsic load_deref (ssa_141) (0) /* access=0 */ vec4 32 ssa_143 = vec4 ssa_140, ssa_142.y, ssa_142.z, ssa_142.w vec1 32 ssa_144 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_144, ssa_143) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_145 = deref_var &r0 (function_temp vec4) vec4 32 ssa_146 = intrinsic load_deref (ssa_145) (0) /* access=0 */ vec1 32 ssa_147 = imov ssa_146.x vec1 32 ssa_148 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_149 = intrinsic vulkan_resource_index (ssa_148) (0, 1, 6) /* desc-set=0 */ /* binding=1 */ /* desc_type=UBO */ vec1 32 ssa_150 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_151 = load_const (0x00000130 /* 0.000000 */) vec1 32 ssa_152 = iadd ssa_150, ssa_151 vec1 32 ssa_153 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_154 = iadd ssa_152, ssa_153 vec1 32 ssa_155 = intrinsic load_ubo (ssa_149, ssa_154) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_156 = iadd ssa_147, ssa_155 vec1 32 ssa_157 = deref_var &r0 (function_temp vec4) vec4 32 ssa_158 = intrinsic load_deref (ssa_157) (0) /* access=0 */ vec4 32 ssa_159 = vec4 ssa_156, ssa_158.y, ssa_158.z, ssa_158.w vec1 32 ssa_160 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_160, ssa_159) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_161 = deref_var &r0 (function_temp vec4) vec4 32 ssa_162 = intrinsic load_deref (ssa_161) (0) /* access=0 */ vec1 32 ssa_163 = imov ssa_162.x vec1 32 ssa_164 = iadd ssa_163, ssa_124 vec1 32 ssa_165 = deref_var &r0 (function_temp vec4) vec4 32 ssa_166 = intrinsic load_deref (ssa_165) (0) /* access=0 */ vec4 32 ssa_167 = vec4 ssa_166.x, ssa_164, ssa_166.z, ssa_166.w vec1 32 ssa_168 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_168, ssa_167) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_169 = deref_var &r0 (function_temp vec4) vec4 32 ssa_170 = intrinsic load_deref (ssa_169) (0) /* access=0 */ vec4 32 ssa_171 = vec4 ssa_170.x, ssa_170.x, ssa_170.x, ssa_170.x vec1 32 ssa_172 = imov ssa_171.x /* succs: block_1 block_2 */ if ssa_123 { block block_1: /* preds: block_0 */ vec1 32 ssa_173 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_2173 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_174 = txf ssa_173 (texture_deref), ssa_172 (coord), ssa_2173 (lod), 0 (sampler), vec2 32 ssa_175 = vec2 ssa_174.y, ssa_174.z /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ vec2 32 ssa_2193 = phi block_1: ssa_175, block_2: ssa_2269 vec2 32 ssa_2183 = imov ssa_2193 vec1 32 ssa_180 = deref_var &r0 (function_temp vec4) vec4 32 ssa_181 = intrinsic load_deref (ssa_180) (0) /* access=0 */ vec4 32 ssa_182 = vec4 ssa_2183.x, ssa_181.y, ssa_2183.y, ssa_181.w vec1 32 ssa_183 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_183, ssa_182) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_184 = deref_var &r0 (function_temp vec4) vec4 32 ssa_185 = intrinsic load_deref (ssa_184) (0) /* access=0 */ vec4 32 ssa_186 = vec4 ssa_185.y, ssa_185.y, ssa_185.y, ssa_185.y vec1 32 ssa_187 = imov ssa_186.x /* succs: block_4 block_5 */ if ssa_122 { block block_4: /* preds: block_3 */ vec1 32 ssa_188 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_2174 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_189 = txf ssa_188 (texture_deref), ssa_187 (coord), ssa_2174 (lod), 0 (sampler), vec1 32 ssa_190 = imov ssa_189.w /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ vec1 32 ssa_2194 = phi block_4: ssa_190, block_5: ssa_25 vec1 32 ssa_2184 = imov ssa_2194 vec1 32 ssa_195 = deref_var &r0 (function_temp vec4) vec4 32 ssa_196 = intrinsic load_deref (ssa_195) (0) /* access=0 */ vec4 32 ssa_197 = vec4 ssa_196.x, ssa_2184, ssa_196.z, ssa_196.w vec1 32 ssa_198 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_198, ssa_197) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_199 = deref_var &r0 (function_temp vec4) vec4 32 ssa_200 = intrinsic load_deref (ssa_199) (0) /* access=0 */ vec4 32 ssa_201 = vec4 ssa_200.y, ssa_200.y, ssa_200.y, ssa_200.y vec1 32 ssa_202 = imov ssa_201.x /* succs: block_7 block_8 */ if ssa_121 { block block_7: /* preds: block_6 */ vec1 32 ssa_203 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_2175 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_204 = txf ssa_203 (texture_deref), ssa_202 (coord), ssa_2175 (lod), 0 (sampler), /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ vec4 32 ssa_2195 = phi block_7: ssa_204, block_8: ssa_2266 vec4 32 ssa_2185 = imov ssa_2195 vec1 32 ssa_209 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_209, ssa_2185) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_210 = deref_var &shader_in (function_temp Z) vec1 32 ssa_211 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_212 = deref_array &(*ssa_210)[1] (function_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_213 = intrinsic load_deref (ssa_212) (0) /* access=0 */ vec4 32 ssa_214 = vec4 ssa_213.x, ssa_213.x, ssa_213.x, ssa_213.x vec4 32 ssa_215 = ishl ssa_214, ssa_2407 vec1 32 ssa_216 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_216, ssa_215) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_217 = deref_var &r1 (function_temp vec4) vec4 32 ssa_218 = intrinsic load_deref (ssa_217) (0) /* access=0 */ vec4 32 ssa_219 = vec4 ssa_218.z, ssa_218.w, ssa_218.x, ssa_218.x vec1 32 ssa_220 = deref_var &r2 (function_temp vec4) vec4 32 ssa_221 = intrinsic load_deref (ssa_220) (0) /* access=0 */ vec4 32 ssa_222 = iadd ssa_219, ssa_221 vec1 32 ssa_223 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_223, ssa_222) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_224 = deref_var &r1 (function_temp vec4) vec4 32 ssa_225 = intrinsic load_deref (ssa_224) (0) /* access=0 */ vec1 32 ssa_226 = imov ssa_225.y vec1 32 ssa_227 = deref_var &shader_in (function_temp (null)) vec1 32 ssa_228 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_229 = deref_array &(*ssa_227)[1] (function_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_230 = intrinsic load_deref (ssa_229) (0) /* access=0 */ vec1 32 ssa_231 = imov ssa_230.x vec1 32 ssa_232 = iadd ssa_226, ssa_231 vec1 32 ssa_233 = deref_var &r0 (function_temp vec4) vec4 32 ssa_234 = intrinsic load_deref (ssa_233) (0) /* access=0 */ vec4 32 ssa_235 = vec4 ssa_234.x, ssa_232, ssa_234.z, ssa_234.w vec1 32 ssa_236 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_236, ssa_235) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_237 = deref_var &r0 (function_temp vec4) vec4 32 ssa_238 = intrinsic load_deref (ssa_237) (0) /* access=0 */ vec1 32 ssa_239 = imov ssa_238.y vec1 32 ssa_240 = ushr ssa_119, ssa_118 vec1 32 ssa_241 = imul ssa_239, ssa_117 vec1 32 ssa_242 = iadd ssa_241, ssa_240 vec1 32 ssa_243 = iadd ssa_242, ssa_116 vec1 32 ssa_244 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_245 = intrinsic vulkan_resource_index (ssa_244) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_246 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_247 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_248 = ishl ssa_243, ssa_247 vec1 32 ssa_249 = iadd ssa_246, ssa_248 vec1 32 ssa_250 = intrinsic load_ssbo (ssa_245, ssa_249) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_251 = deref_var &r1 (function_temp vec4) vec4 32 ssa_252 = intrinsic load_deref (ssa_251) (0) /* access=0 */ vec4 32 ssa_253 = vec4 ssa_250, ssa_252.y, ssa_252.z, ssa_252.w vec1 32 ssa_254 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_254, ssa_253) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_255 = deref_var &r2 (function_temp vec4) vec4 32 ssa_256 = intrinsic load_deref (ssa_255) (0) /* access=0 */ vec1 32 ssa_257 = imov ssa_256.x vec1 32 ssa_258 = ushr ssa_115, ssa_114 vec1 32 ssa_259 = imul ssa_257, ssa_113 vec1 32 ssa_260 = iadd ssa_259, ssa_258 vec1 32 ssa_261 = iadd ssa_260, ssa_112 vec1 32 ssa_262 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_263 = intrinsic vulkan_resource_index (ssa_262) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_264 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_265 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_266 = ishl ssa_261, ssa_265 vec1 32 ssa_267 = iadd ssa_264, ssa_266 vec1 32 ssa_268 = intrinsic load_ssbo (ssa_263, ssa_267) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_269 = deref_var &r3 (function_temp vec4) vec4 32 ssa_270 = intrinsic load_deref (ssa_269) (0) /* access=0 */ vec4 32 ssa_271 = vec4 ssa_270.x, ssa_268, ssa_270.z, ssa_270.w vec1 32 ssa_272 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_272, ssa_271) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_273 = deref_var &r2 (function_temp vec4) vec4 32 ssa_274 = intrinsic load_deref (ssa_273) (0) /* access=0 */ vec4 32 ssa_275 = iadd ssa_274, ssa_2402 vec1 32 ssa_276 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_276, ssa_275) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_277 = deref_var &r4 (function_temp vec4) vec4 32 ssa_278 = intrinsic load_deref (ssa_277) (0) /* access=0 */ vec1 32 ssa_279 = imov ssa_278.x vec1 32 ssa_280 = ushr ssa_110, ssa_109 vec1 32 ssa_281 = imul ssa_279, ssa_108 vec1 32 ssa_282 = iadd ssa_281, ssa_280 vec1 32 ssa_283 = iadd ssa_282, ssa_107 vec1 32 ssa_284 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_285 = intrinsic vulkan_resource_index (ssa_284) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_286 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_287 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_288 = ishl ssa_283, ssa_287 vec1 32 ssa_289 = iadd ssa_286, ssa_288 vec1 32 ssa_290 = intrinsic load_ssbo (ssa_285, ssa_289) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_291 = deref_var &r3 (function_temp vec4) vec4 32 ssa_292 = intrinsic load_deref (ssa_291) (0) /* access=0 */ vec4 32 ssa_293 = vec4 ssa_292.x, ssa_292.y, ssa_292.z, ssa_290 vec1 32 ssa_294 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_294, ssa_293) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_295 = deref_var &r3 (function_temp vec4) vec4 32 ssa_296 = intrinsic load_deref (ssa_295) (0) /* access=0 */ vec2 32 ssa_297 = vec2 ssa_296.y, ssa_296.w vec2 32 ssa_298 = ishl ssa_297, ssa_2397 vec1 32 ssa_299 = deref_var &r3 (function_temp vec4) vec4 32 ssa_300 = intrinsic load_deref (ssa_299) (0) /* access=0 */ vec4 32 ssa_301 = vec4 ssa_298.x, ssa_300.y, ssa_298.y, ssa_300.w vec1 32 ssa_302 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_302, ssa_301) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_303 = deref_var &r3 (function_temp vec4) vec4 32 ssa_304 = intrinsic load_deref (ssa_303) (0) /* access=0 */ vec4 32 ssa_305 = ishr ssa_304, ssa_2394 vec1 32 ssa_306 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_306, ssa_305) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_307 = deref_var &r3 (function_temp vec4) vec4 32 ssa_308 = intrinsic load_deref (ssa_307) (0) /* access=0 */ vec4 32 ssa_309 = i2f32 ssa_308 vec1 32 ssa_310 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_310, ssa_309) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_311 = deref_var &r3 (function_temp vec4) vec4 32 ssa_312 = intrinsic load_deref (ssa_311) (0) /* access=0 */ vec3 32 ssa_313 = vec3 ssa_312.x, ssa_312.y, ssa_312.z vec3 32 ssa_314 = fmul ssa_313, ssa_2389 vec1 32 ssa_315 = deref_var &r3 (function_temp vec4) vec4 32 ssa_316 = intrinsic load_deref (ssa_315) (0) /* access=0 */ vec4 32 ssa_317 = vec4 ssa_314.x, ssa_314.y, ssa_314.z, ssa_316.w vec1 32 ssa_318 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_318, ssa_317) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_319 = deref_var &r3 (function_temp vec4) vec4 32 ssa_320 = intrinsic load_deref (ssa_319) (0) /* access=0 */ vec1 32 ssa_321 = imov ssa_320.w vec1 32 ssa_322 = ffma ssa_321, ssa_103, ssa_102 vec1 32 ssa_323 = deref_var &r0 (function_temp vec4) vec4 32 ssa_324 = intrinsic load_deref (ssa_323) (0) /* access=0 */ vec4 32 ssa_325 = vec4 ssa_324.x, ssa_322, ssa_324.z, ssa_324.w vec1 32 ssa_326 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_326, ssa_325) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_327 = deref_var &r0 (function_temp vec4) vec4 32 ssa_328 = intrinsic load_deref (ssa_327) (0) /* access=0 */ vec1 32 ssa_329 = imov ssa_328.y vec1 32 ssa_330 = f2u32 ssa_329 vec1 32 ssa_331 = deref_var &r0 (function_temp vec4) vec4 32 ssa_332 = intrinsic load_deref (ssa_331) (0) /* access=0 */ vec4 32 ssa_333 = vec4 ssa_332.x, ssa_330, ssa_332.z, ssa_332.w vec1 32 ssa_334 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_334, ssa_333) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_335 = deref_var &r0 (function_temp vec4) vec4 32 ssa_336 = intrinsic load_deref (ssa_335) (0) /* access=0 */ vec3 32 ssa_337 = vec3 ssa_336.y, ssa_336.y, ssa_336.y vec3 32 ssa_338 = iand ssa_337, ssa_2385 vec1 32 ssa_339 = deref_var &r5 (function_temp vec4) vec4 32 ssa_340 = intrinsic load_deref (ssa_339) (0) /* access=0 */ vec4 32 ssa_341 = vec4 ssa_338.x, ssa_338.y, ssa_338.z, ssa_340.w vec1 32 ssa_342 = deref_var &r5 (function_temp vec4) intrinsic store_deref (ssa_342, ssa_341) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_343 = deref_var &r5 (function_temp vec4) vec4 32 ssa_344 = intrinsic load_deref (ssa_343) (0) /* access=0 */ vec3 32 ssa_345 = vec3 ssa_344.x, ssa_344.y, ssa_344.z vec3 1 ssa_346 = ult ssa_2381, ssa_345 vec3 32 ssa_347 = bcsel ssa_346, ssa_2373, ssa_2377 vec1 32 ssa_348 = deref_var &r5 (function_temp vec4) vec4 32 ssa_349 = intrinsic load_deref (ssa_348) (0) /* access=0 */ vec4 32 ssa_350 = vec4 ssa_347.x, ssa_347.y, ssa_347.z, ssa_349.w vec1 32 ssa_351 = deref_var &r5 (function_temp vec4) intrinsic store_deref (ssa_351, ssa_350) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_352 = deref_var &r5 (function_temp vec4) vec4 32 ssa_353 = intrinsic load_deref (ssa_352) (0) /* access=0 */ vec3 32 ssa_354 = vec3 ssa_353.x, ssa_353.y, ssa_353.z vec3 1 ssa_355 = ine ssa_354, ssa_2361 vec3 32 ssa_356 = bcsel ssa_355, ssa_2369, ssa_2365 vec1 32 ssa_357 = deref_var &r5 (function_temp vec4) vec4 32 ssa_358 = intrinsic load_deref (ssa_357) (0) /* access=0 */ vec4 32 ssa_359 = vec4 ssa_356.x, ssa_356.y, ssa_356.z, ssa_358.w vec1 32 ssa_360 = deref_var &r5 (function_temp vec4) intrinsic store_deref (ssa_360, ssa_359) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_361 = deref_var &r0 (function_temp vec4) vec4 32 ssa_362 = intrinsic load_deref (ssa_361) (0) /* access=0 */ vec1 32 ssa_363 = imov ssa_362.x vec1 32 ssa_364 = iadd ssa_363, ssa_94 vec1 32 ssa_365 = deref_var &r0 (function_temp vec4) vec4 32 ssa_366 = intrinsic load_deref (ssa_365) (0) /* access=0 */ vec4 32 ssa_367 = vec4 ssa_366.x, ssa_364, ssa_366.z, ssa_366.w vec1 32 ssa_368 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_368, ssa_367) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_369 = deref_var &r0 (function_temp vec4) vec4 32 ssa_370 = intrinsic load_deref (ssa_369) (0) /* access=0 */ vec4 32 ssa_371 = vec4 ssa_370.y, ssa_370.y, ssa_370.y, ssa_370.y vec1 32 ssa_372 = imov ssa_371.x /* succs: block_10 block_11 */ if ssa_93 { block block_10: /* preds: block_9 */ vec1 32 ssa_373 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_2176 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_374 = txf ssa_373 (texture_deref), ssa_372 (coord), ssa_2176 (lod), 0 (sampler), vec2 32 ssa_375 = vec2 ssa_374.x, ssa_374.y /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ /* succs: block_12 */ } block block_12: /* preds: block_10 block_11 */ vec2 32 ssa_2196 = phi block_10: ssa_375, block_11: ssa_2261 vec2 32 ssa_2186 = imov ssa_2196 vec1 32 ssa_380 = deref_var &r0 (function_temp vec4) vec4 32 ssa_381 = intrinsic load_deref (ssa_380) (0) /* access=0 */ vec4 32 ssa_382 = vec4 ssa_381.x, ssa_2186.x, ssa_381.z, ssa_2186.y vec1 32 ssa_383 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_383, ssa_382) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_384 = deref_var &r0 (function_temp vec4) vec4 32 ssa_385 = intrinsic load_deref (ssa_384) (0) /* access=0 */ vec3 32 ssa_386 = vec3 ssa_385.y, ssa_385.y, ssa_385.y vec1 32 ssa_387 = deref_var &r3 (function_temp vec4) vec4 32 ssa_388 = intrinsic load_deref (ssa_387) (0) /* access=0 */ vec3 32 ssa_389 = vec3 ssa_388.x, ssa_388.y, ssa_388.z vec3 32 ssa_390 = fmul ssa_386, ssa_389 vec1 32 ssa_391 = deref_var &r3 (function_temp vec4) vec4 32 ssa_392 = intrinsic load_deref (ssa_391) (0) /* access=0 */ vec4 32 ssa_393 = vec4 ssa_390.x, ssa_390.y, ssa_390.z, ssa_392.w vec1 32 ssa_394 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_394, ssa_393) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_395 = deref_var &r0 (function_temp vec4) vec4 32 ssa_396 = intrinsic load_deref (ssa_395) (0) /* access=0 */ vec4 32 ssa_397 = vec4 ssa_396.x, ssa_396.x, ssa_396.x, ssa_396.x vec1 32 ssa_398 = imov ssa_397.x /* succs: block_13 block_14 */ if ssa_92 { block block_13: /* preds: block_12 */ vec1 32 ssa_399 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_2177 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_400 = txf ssa_399 (texture_deref), ssa_398 (coord), ssa_2177 (lod), 0 (sampler), /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ vec4 32 ssa_2197 = phi block_13: ssa_400, block_14: ssa_2258 vec4 32 ssa_2187 = imov ssa_2197 vec1 32 ssa_405 = deref_var &r6 (function_temp vec4) intrinsic store_deref (ssa_405, ssa_2187) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_406 = deref_var &r6 (function_temp vec4) vec4 32 ssa_407 = intrinsic load_deref (ssa_406) (0) /* access=0 */ vec1 32 ssa_408 = imov ssa_407.w vec1 32 ssa_409 = deref_var &r7 (function_temp vec4) vec4 32 ssa_410 = intrinsic load_deref (ssa_409) (0) /* access=0 */ vec4 32 ssa_411 = vec4 ssa_408, ssa_410.y, ssa_410.z, ssa_410.w vec1 32 ssa_412 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_412, ssa_411) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_413 = deref_var &r0 (function_temp vec4) vec4 32 ssa_414 = intrinsic load_deref (ssa_413) (0) /* access=0 */ vec4 32 ssa_415 = vec4 ssa_414.x, ssa_414.x, ssa_414.z, ssa_414.z vec4 32 ssa_416 = iadd ssa_415, ssa_2357 vec1 32 ssa_417 = deref_var &r8 (function_temp vec4) intrinsic store_deref (ssa_417, ssa_416) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_418 = deref_var &r0 (function_temp vec4) vec4 32 ssa_419 = intrinsic load_deref (ssa_418) (0) /* access=0 */ vec4 32 ssa_420 = vec4 ssa_419.z, ssa_419.z, ssa_419.z, ssa_419.z vec1 32 ssa_421 = imov ssa_420.x /* succs: block_16 block_17 */ if ssa_90 { block block_16: /* preds: block_15 */ vec1 32 ssa_422 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_2178 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_423 = txf ssa_422 (texture_deref), ssa_421 (coord), ssa_2178 (lod), 0 (sampler), /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ vec4 32 ssa_2198 = phi block_16: ssa_423, block_17: ssa_2253 vec4 32 ssa_2188 = imov ssa_2198 vec1 32 ssa_428 = deref_var &r9 (function_temp vec4) intrinsic store_deref (ssa_428, ssa_2188) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_429 = deref_var &r8 (function_temp vec4) vec4 32 ssa_430 = intrinsic load_deref (ssa_429) (0) /* access=0 */ vec4 32 ssa_431 = vec4 ssa_430.x, ssa_430.x, ssa_430.x, ssa_430.x vec1 32 ssa_432 = imov ssa_431.x /* succs: block_19 block_20 */ wrmask=xyz */ , w */ /* access=0 */ vec1 32 ssa_343 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_344 = intrinsic load_deref (ssa_343) (0) /* access=0 */ vec4 32 ssa_144 = vec4 ssa_143..z ssa_375.y x, ssa_143.x, vec3 1 ssa_349 = ult ssa_81ssa_143if , ssa_348 .x, ssa_143ssa_89 vec3 32 ssa_345 = vec3 ssa_344.x, ssa_344.y, ssa_344.z vec3 1 ssa_346 = ine ssa_345, ssa_86 vec3 32 ssa_347 = bcsel ssa_346, ssa_88, ssa_87 vec1 32 ssa_348 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_349 = intrinsic load_deref (ssa_348) (0) /* access=0 */ vec4 32 ssa_350 = vec4 ssa_347.x, ssa_347.y, ssa_347.z, ssa_349.w vec1 32 ssa_351 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_351, ssa_350) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_352 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_353 = intrinsic load_deref (ssa_352) (0) /* access=0 */ vec1 32 ssa_354 = imov ssa_353.x vec1 32 ssa_355 = iadd ssa_354, ssa_85 vec1 32 ssa_356 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_357 = intrinsic load_deref (ssa_356) (0) /* access=0 */ vec4 32 ssa_358 = vec4 ssa_357.x, ssa_355, ssa_357.z, ssa_357.w vec1 32 ssa_359 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_359, ssa_358) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_360 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_361 = intrinsic load_deref (ssa_360) (0) /* access=0 */ vec4 32 ssa_362 = vec4 ssa_361.y, ssa_361.y, ssa_361.y, ssa_361.y vec1 32 ssa_363 = imov ssa_362.x /* succs: block_10 block_11 */ if ssa_84 { block block_10: /* preds: block_9 */ vec1 32 ssa_364 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_365 = align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_555 = vec3 ssa_548, ssa_551, ssa_554. vec3 32 ssa_350 = bcsel ssa_349, ssa_79, ssa_80 vec1 32 ssa_351 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_352 = intrinsic load_deref (ssa_351) (0) /* access=0 */ vec4 32 ssa_353 = vec4 ssa_350.x, ssa_350.y, ssa_350.z, ssa_352.w vec1 32 ssa_354 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_354, ssa_353) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_355 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_356 = intrinsic load_deref (ssa_355) (0) /* access=0 */ vec3 32 ssa_357 = vec3 ssa_356.x, ssa_356.y, ssa_356.z vec3 1 ssa_358 = ine ssa_357, ssa_76 vec3 32 ssa_359 = bcsel ssa_358, ssa_78, ssa_77 vec1 32 ssa_360 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_361 = intrinsic load_deref (ssa_360) (0) /* access=0 */ vec4 32 ssa_362 = vec4 ssa_359.x, ssa_359.y, ssa_359.z, ssa_361.w vec1 32 ssa_363 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_363, ssa_362) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_364 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_365 = intrinsic load_deref (ssa_364) (0) /* access=0 */ vec1 32 ssa_366 = imov ssa_365.x vec1 32 ssa_367 = iadd ssa_366, ssa_75 vec1 32 ssa_368 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_369 = intrinsic load_deref (ssa_368) (0) /* access=0 */ vec4 32 ssa_370 = vec4 ssa_369.x, ssa_367, ssa_369.z, ssa_369.w vec1 32 ssa_371 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_371, ssa_370) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_372 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_373 = intrinsic load_deref (ssa_372) (0) /* access=0 */ vec4 32 ssa_374 = vec4 ssa_373.y, ssa_373.y, ssa_373.y, ssa_373.y vec1 32 ssa_375 = imov ssa_374.x /* succs: block_10 block_11 */ if ssa_74 { block block_10: /* preds: block_9 */ vec1 32 ssa_376 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_377 = txf ssa_376 (texture_deref), ssa_375 (coord), 0 (sampler), vec2 32 ssa_378 = vec2 ssa_377.x, ssa_377.y vec1 32 ssa_379 = deref_var &phi@2 (function_temp vec2) intrinsic store_deref (ssa_379, ssa_378) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ vec1 32 ssa_380 = deref_var &phi@2 (function_temp vec2) intrinsic store_deref (ssa_380, ssa_23) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_12 */ } block block_12: /* preds: block_10 block_11 */ vec1 32 ssa_381 = deref_var &phi@2 (function_temp vec2) vec2 32 ssa_382 = intrinsic load_deref (ssa_381) (0) /* access=0 */ vec1 32 ssa_383 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_384 = intrinsic load_deref (ssa_383) (0) /* access=0 */ vec4 32 ssa_385 = vec4 ssa_384.x, ssa_382.x, ssa_384.z, ssa_382.y vec1 32 ssa_386 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_386, ssa_385) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_387 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_388 = intrinsic load_deref (ssa_387) (0) /* access=0 */ vec3 32 ssa_389 = vec3 ssa_388.y, ssa_388.y, ssa_388.y vec1 32 ssa_390 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_391 = intrinsic load_deref (ssa_390) (0) /* access=0 */ vec3 32 ssa_392 = vec3 ssa_391.x, ssa_391.y, ssa_391.z vec3 32 ssa_393 = fmul ssa_389, ssa_392 vec1 32 ssa_394 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_395 = intrinsic load_deref (ssa_394) (0) /* access=0 */ vec4 32 ssa_396 = vec4 ssa_393.x, ssa_393.y, ssa_393.z, ssa_395.w vec1 32 ssa_397 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_397, ssa_396) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_398 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_399 = intrinsic load_deref (ssa_398) (0) /* access=0 */ vec4 32 ssa_400 = vec4 ssa_399.x, ssa_399.y, ssa_399.z, ssa_73 vec1 32 ssa_401 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_401, ssa_400) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_402 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_403 = intrinsic load_deref (ssa_402) (0) /* access=0 */ vec4 32 ssa_404 = vec4 ssa_403.x, ssa_403.x, ssa_403.x, ssa_403.x vec1 32 ssa_405 = imov ssa_404.x /* succs: block_13 block_14 */ if ssa_72 { block block_13: /* preds: block_12 */ vec1 32 ssa_406 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_407 = txf ssa_406 (texture_deref), ssa_405 (coord), 0 (sampler), vec1 32 ssa_408 = deref_var &phi@3 (function_temp vec4) intrinsic store_deref (ssa_408, ssa_407) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ vec1 32 ssa_409 = deref_var &phi@3 (function_temp vec4) intrinsic store_deref (ssa_409, ssa_22) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ vec1 32 ssa_410 = deref_var &phi@3 (function_temp vec4) vec4 32 ssa_411 = intrinsic load_deref (ssa_410) (0) /* access=0 */ vec1 32 ssa_412 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_412, ssa_411) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_413 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_414 = intrinsic load_deref (ssa_413) (0) /* access=0 */ vec1 32 ssa_415 = imov ssa_414.w vec1 32 ssa_416 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_417 = intrinsic load_deref (ssa_416) (0) /* access=0 */ vec4 32 ssa_418 = vec4 ssa_415, ssa_417.y, ssa_417.z, ssa_417.w vec1 32 ssa_419 = deref_var &r9 (shader_temp vec4) intrinsic store_deref (ssa_419, ssa_418) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_420 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_421 = intrinsic load_deref (ssa_420) (0) /* access=0 */ vec4 32 ssa_422 = vec4 ssa_421.x, ssa_421.x, ssa_421.z, ssa_421.z vec4 32 ssa_423 = iadd ssa_422, ssa_71 vec1 32 ssa_424 = deref_var &r10 (shader_temp vec4) intrinsic store_deref (ssa_424, ssa_423) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_425 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_426 = intrinsic load_deref (ssa_425) (0) /* access=0 */ vec4 32 ssa_427 = vec4 ssa_426.z, ssa_426.z, ssa_426.z, ssa_426.z vec1 32 ssa_428 = imov ssa_427.x /* succs: block_16 block_17 */ if ssa_70 { block block_16: /* preds: block_15 */ vec1 32 ssa_429 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_430 = txf ssa_429 (texture_deref), ssa_428 (coord), 0 (sampler), vec1 32 ssa_431 = deref_var &phi@4 (function_temp vec4) intrinsic store_deref (ssa_431, ssa_430txf ssa_364 (texture_deref), ssa_363 (coord), 0 (sampler), vec2 32 ssa_366 = vec2 ssa_365.x, ssa_365.y vec1 32 ssa_367 = deref_var &phi@2 (function_temp vec2) intrinsic store_deref (ssa_367, ssa_366) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ vec1 32 ssa_368 = deref_var &phi@2 (function_temp vec2) intrinsic store_deref (ssa_368, ssa_23) (3, 0) /* wrmask=xy */ /* access=0 */ /* succs: x vec1 32 ssa_379 = deref_var &block_12 */ vec1 32 ssa_145 = imov) (15, 0) /* wrmask=xyzw */ /* access=0 ssa_144r0 (shader_temp vec4) intrinsic store_deref (ssa_379, ssa_378) (15, 0. } block block_12: ) /* wrmask=x /* succs: block_1 /* preds: block_10 block_11 */ vec1 32 ssa_369 = deref_var &phi@2 (function_temp vec2) vec2 32 ssa_370 = intrinsic load_deref (ssa_369) (0) /* access=0 */ vec1 32 ssa_371 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_372 = intrinsic load_deref (ssa_371) (0) /* access=0 */ vec4 32 ssa_373 = vec4 ssa_372.x, ssa_370.x, ssa_372.z, ssa_370.y vec1 32 ssa_374 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_374, ssa_373) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_375 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_376 = intrinsic load_deref (ssa_375) (0) /* access=0 */ vec3 32 ssa_377 = vec3 ssa_376.y, ssa_376.y, ssa_376.y vec1 32 ssa_378 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_379 = intrinsic load_deref (ssa_378) (0) /* access=0 */ vec3 32 ssa_380 = vec3 ssa_379.x, ssa_379.y, ssa_379.z vec3 32 ssa_381 = fmul ssa_377, ssa_380 vec1 32 ssa_382 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_383 = intrinsic load_deref (ssa_382) (0) /* access=0 */ vec4 32 ssa_384 = vec4 ssa_381.x, ssa_381.y, ssa_381.z, ssa_383.w vec1 32 ssa_385 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_385, ssa_384) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_386 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_387 = intrinsic load_deref (ssa_386) (0) /* access=0 */ vec4 32 ssa_388 = vec4 ssa_387.x, ssa_387.x, ssa_387.x, ssa_387.x vec1 32 ssa_389 = imov ssa_388.x /* succs: block_13 block_14 */ if ssa_83 { block block_13: /* preds: block_12 */ vec1 32 ssa_390 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_391 = txf ssa_390 (texture_deref), ssa_389 (coord), 0 (sampler), vec1 32 ssa_392 = deref_var &phi@3 (function_temp vec4) intrinsic store_deref (ssa_392, ssa_391) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ vec1 32 ssa_393 = deref_var &phi@3 (function_temp vec4) intrinsic store_deref (ssa_393, ssa_22) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ vec1 32 ssa_394 = deref_var &phi@3 (function_temp vec4) vec4 32 ssa_395 = intrinsic load_deref (ssa_394) (0) /* access=0 */ vec1 32 ssa_396 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_396, ssa_395) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_397 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_398 = intrinsic load_deref (ssa_397) (0) /* access=0 */ vec1 32 ssa_399 = imov ssa_398.w vec1 32 ssa_400 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_401 = intrinsic load_deref (ssa_400) (0) /* access=0 */ vec4 32 ssa_402 = vec4 ssa_399, ssa_401.y, ssa_401.z, ssa_401.w vec1 32 ssa_403 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_403, ssa_402) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_404 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_405 = intrinsic load_deref (ssa_404) (0) /* access=0 */ vec4 32 ssa_406 = vec4 ssa_405.x, ssa_405.x, ssa_405.z, ssa_405.z vec4 32 ssa_407 = iadd ssa_406, ssa_82 vec1 32 ssa_408 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_408, ssa_407) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_409 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_410 = intrinsic load_deref (ssa_409) (0) /* access=0 */ vec4 32 ssa_411 = vec4 ssa_410.z, ssa_410.z, ssa_410.z, ssa_410.z vec1 32 ssa_412 = imov ssa_411.x /* succs: block_16 block_17 */ if ssa_81 { block block_16: /* preds: block_15 */ vec1 32 ssa_413 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_414 = txf ssa_413 (texture_deref), ssa_412 (coord), 0 (sampler), vec1 32 ssa_415 = deref_var &phi@4 (function_temp vec4) intrinsic store_deref (ssa_415, ssa_414) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ vec1 32 ssa_416 = deref_var &phi@4 (function_temp vec4) intrinsic store_deref (ssa_416, ssa_21) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ vec1 32 ssa_417 = deref_var &phi@4 (function_temp vec4) vec4 32 ssa_418 = intrinsic load_deref (ssa_417) (0) /* access=0 */ vec1 32 ssa_419 = deref_var &r9 (shader_temp vec4) intrinsic store_deref (ssa_419, ssa_418) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_420 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_421 = intrinsic load_deref (ssa_420) (0) /* access=0 */ vec4 32 ssa_422 = vec4 ssa_421.x, ssa_421.x, ssa_421.x, ssa_421.x vec1 32 ssa_423 = imov ssa_422.x /* succs: block_19 block_20 */ if ssa_80 { block block_19: /* preds: block_18 */ vec1 32 ssa_424 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_425 = txf ssa_424 (texture_deref), ssa_423 (coord), 0 (sampler), vec1 32 ssa_426 = deref_var &phi@5 (function_temp vec4) intrinsic store_deref (ssa_426, ssa_425) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_21 */ } else { block block_20: /* preds: block_18 */ vec1 32 ssa_427 = deref_var &phi@5 (function_temp vec4) intrinsic store_deref (ssa_427, ssa_20) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_21 */ } block block_21: /* preds: block_19 { block block_19: /* preds: block_18 */ vec1 32 ssa_433 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_556xyzw */ /* access=0 */ vec1 32 ssa_380 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_381 = intrinsic load_deref (ssa_380) (0) /* access=0 */ vec3 32 ssa_382 = vec3 ssa_381.y, ssa_381.y, ssa_381.y vec1 32 ssa_383 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_384 = intrinsic load_deref (ssa_383) (0) /* access=0 */ vec3 32 ssa_385 = vec3 ssa_384.x, ssa_384.y, ssa_384.z vec3 32 ssa_386 = fmul ssa_382, ssa_385 vec1 32 ssa_387 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_388 = intrinsic load_deref (ssa_387) (0) /* access=0 */ vec4 32 ssa_389 = vec4 ssa_386.x, ssa_386.y, ssa_386.z, ssa_388.w vec1 32 ssa_390 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_390, ssa_389) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_391 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_392 = intrinsic load_deref (ssa_391) (0) /* access=0 */ vec4 32 ssa_393 = vec4 ssa_392.x, ssa_392.x, ssa_392.x, ssa_392.x vec1 32 ssa_394 = imov ssa_393.x /* succs: block_13 block_14 */ if ssa_77 { block block_13: /* preds: block_12 */ vec1 32 ssa_395 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_396 = txf ssa_395 (texture_deref), ssa_394 (coord), 0 (sampler), vec1 32 ssa_397 = deref_var &phi@3 (function_temp vec4) intrinsic store_deref (ssa_397, ssa_396) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ vec1 32 ssa_398 = deref_var &phi@3 (function_temp vec4) intrinsic store_deref (ssa_398, ssa_22) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ vec1 32 ssa_399 = deref_var &phi@3 (function_temp vec4) vec4 32 ssa_400 = intrinsic load_deref (ssa_399) (0) /* access=0 */ vec1 32 ssa_401 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_401, ssa_400) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_402 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_403 = intrinsic load_deref (ssa_402) (0) /* access=0 */ vec1 32 ssa_404 = imov ssa_403.w vec1 32 ssa_405 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_406 = intrinsic load_deref (ssa_405) (0) /* access=0 */ vec4 32 ssa_407 = vec4 ssa_404, ssa_406.y, ssa_406.z, ssa_406.w vec1 32 ssa_408 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_408, ssa_407) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_409 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_410 = intrinsic load_deref (ssa_409) (0) /* access=0 */ vec4 32 ssa_411 = vec4 ssa_410.x, ssa_410.x, ssa_410.z, ssa_410.z vec4 32 ssa_412 = iadd ssa_411, ssa_76 vec1 32 ssa_413 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_413, ssa_412) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_414 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_415 = intrinsic load_deref (ssa_414) (0) /* access=0 */ vec4 32 ssa_416 = vec4 ssa_415.z, ssa_415.z, ssa_415.z, ssa_415.z vec1 32 ssa_417 = imov ssa_416.x /* succs: block_16 block_17 */ if ssa_75 { block block_16: /* preds: block_15 */ vec1 32 ssa_418 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_419 = txf ssa_418 (texture_deref), ssa_417 (coord), 0 (sampler), vec1 32 ssa_420 = deref_var &phi@4 (function_temp vec4) intrinsic store_deref (ssa_420, ssa_419) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ vec1 32 ssa_421 = deref_var &phi@4 (function_temp vec4) intrinsic store_deref (ssa_421, ssa_21) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ vec1 32 ssa_422 = deref_var &phi@4 (function_temp vec4) vec4 32 ssa_423 = intrinsic load_deref (ssa_422) (0) /* access=0 */ vec1 32 ssa_424 = deref_var &r9 (shader_temp vec4) intrinsic store_deref (ssa_424, ssa_423) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_425 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_426 = intrinsic load_deref (ssa_425) (0) /* access=0 */ vec4 32 ssa_427 = vec4 ssa_426.x, ssa_426.x, ssa_426.x, ssa_426.x vec1 32 ssa_428 = imov ssa_427.x /* succs: block_19 block_20 */ if ssa_74 { block block_19: /* preds: 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. block_18 */ vec1 32 ssa_429 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_430 = txf ssa_429 (texture_deref), ssa_428 (coord), 0 (sampler), vec1 32 ssa_431 = deref_var &phi@5 (function_temp vec4) intrinsic store_deref (ssa_431, ssa_430) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_21 */ } else { block block_20: /* preds: block_18 */ vec1 32 ssa_432 = deref_var &phi@5 (function_temp vec4) intrinsic store_deref (ssa_432, ssa_20) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_21 */ } block block_21: /* preds: block_19 block_20 */ vec1 32 ssa_433 = deref_var &phi@5 (function_temp vec4) vec4 32 ssa_434 = intrinsic load_deref (ssa_433) (0) /* access=0 */ vec1 32 ssa_435 = deref_var &r10 (shader_temp vec4) intrinsic store_deref (ssa_435, ssa_434) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_436 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_437 = intrinsic load_deref (ssa_436) (0) /* access=0 */ vec1 32 ssa_438 = imov ssa_437.w vec1 32 ssa_439 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_440 = intrinsic load_deref (ssa_439) (0) /* access=0 */ vec4 32 ssa_441 = vec4 ssa_440.x, ssa_438, ssa_440.z, ssa_440.w vec1 32 ssa_442 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_442, ssa_441) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_443 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_444 = intrinsic load_deref (ssa_443) (0) /* access=0 */ vec4 32 ssa_445 = vec4 ssa_444.y, ssa_444.y, ssa_444.y, ssa_444.y vec1 32 ssa_446 = imov ssa_445.x /* succs: block_22 block_23 */ if ssa_73 { block block_22: /* preds: block_21 */ vec1 32 ssa_447 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_448 = txf ssa_447 (texture_deref), ssa_446 (coord), 0 (sampler), vec1 32 ssa_449 = deref_var &phi@6 (function_temp vec4) intrinsic store_deref (ssa_449, ssa_448) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_24 */ } else { block block_23: /* preds: block_21 */ vec1 32 ssa_450 = deref_var &phi@6 (function_temp vec4) intrinsic store_deref (ssa_450, ssa_19) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_24 */ } block block_24: /* preds: block_22 block_23 */ vec1 32 ssa_451 = deref_var &phi@6 (function_temp vec4) vec4 32 ssa_452 = intrinsic load_deref (ssa_451) (0) /* access=0 */ vec1 32 ssa_453 = deref_var &r11 (shader_temp vec4) intrinsic store_deref (ssa_453, ssa_452) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_454 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_455 = intrinsic load_deref (ssa_454) (0) /* access=0 */ vec1 32 ssa_456 = imov ssa_455.w vec1 32 ssa_457 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_458 = intrinsic load_deref (ssa_457) (0block_20 */ vec1 32 ssa_428 = deref_var &phi@5 (function_temp vec4) ) /* access=0 */ vec4 32 ssa_459 = vec4 ssa_458.x, ssa_458.y, ssa_456, ssa_458.w vec1 32 ssa_460 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_460, ssa_459) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_461 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_462 = intrinsic load_deref (ssa_461) (0) /* access=0 */ vec3 32 ssa_463 = vec3 ssa_462.x, ssa_462.y, ssa_462.z vec1 32 ssa_464 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_465 = intrinsic vulkan_resource_index (ssa_464) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_466 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_467 = load_const (0x00000310 /* 0.000000 */) vec1 32 ssa_468 = iadd ssa_466, ssa_467 vec1 32 ssa_469 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_470 = iadd ssa_468, ssa_469 vec1 32 ssa_471 = intrinsic load_ubo (ssa_465, ssa_470) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_472 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_473 = iadd ssa_468, ssa_472 vec1 32 ssa_474 = intrinsic load_ubo (ssa_465, ssa_473) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_475 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_476 = iadd ssa_468, ssa_475 vec1 32 ssa_477 = intrinsic load_ubo (ssa_465, ssa_476) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_478 = vec3 ssa_471, ssa_474, ssa_477 vec3 32 ssa_479 = fneg ssa_478 vec3 32 ssa_480 = fadd ssa_463, ssa_479 vec1 32 ssa_481 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_482 = intrinsic load_deref (ssa_481) (0) /* access=0 */ vec4 32 ssa_483 = vec4 ssa_480.x, ssa_480.y, ssa_480.z, ssa_482.w vec1 32 ssa_484 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_484, ssa_483) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_485 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_486 = intrinsic load_deref (ssa_485) (0) /* access=0 */ vec4 32 ssa_487 = vec4 ssa_486.x, ssa_486.y, ssa_486.z, ssa_72 vec1 32 ssa_488 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_488, ssa_487) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_489 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_490 = intrinsic load_deref (ssa_489) (0) /* access=0 */ vec1 32 ssa_491 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_492 = intrinsic vulkan_resource_index (ssa_491) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_493 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_494 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_495 = iadd ssa_493, ssa_494 vec1 32 ssa_496 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_497 = iadd ssa_495, ssa_496 vec1 32 ssa_498 = intrinsic load_ubo (ssa_492, ssa_497) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_499 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_500 = iadd ssa_495, ssa_499 vec1 32 ssa_501 = intrinsic load_ubo (ssa_492, ssa_500) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_502 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_503 = iadd ssa_495, ssa_502 vec1 32 ssa_504 = intrinsic load_ubo (ssa_492, ssa_503) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_505 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_506 = iadd ssa_495, ssa_505 vec1 32 ssa_507 = intrinsic load_ubo (ssa_492, ssa_506) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_508 = vec4 ssa_498, ssa_501, ssa_504, ssa_507 vec1 32 ssa_509 = fdot4 ssa_490, ssa_508 vec1 32 ssa_510 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_511 = intrinsic load_deref (ssa_510) (0) /* access=0 */ vec4 32 ssa_512 = vec4 ssa_511.x, ssa_511.y, ssa_511.z, ssa_509 vec1 32 ssa_513 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_513, ssa_512) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_514 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_515 = intrinsic load_deref (ssa_514) (0) /* access=0 */ vec1 32 ssa_516 = imov ssa_515.x vec1 32 ssa_517 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_518 = intrinsic load_deref (ssa_517) (0) /* access=0 */ vec4 32 ssa_519 = vec4 ssa_516, ssa_518.y, ssa_518.z, ssa_518.w vec1 32 ssa_520 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_520, ssa_519) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_521 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_522 = intrinsic load_deref (ssa_521) (0) /* access=0 */ vec1 32 ssa_523 = imov ssa_522.x vec1 32 ssa_524 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_525 = intrinsic load_deref (ssa_524) (0) /* access=0 */ vec4 32 ssa_526 = vec4 ssa_525.x, ssa_523, ssa_525.z, ssa_525.w vec1 32 ssa_527 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_527, ssa_526) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_528 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_529 = intrinsic load_deref (ssa_528) (0) /* access=0 */ vec1 32 ssa_530 = imov ssa_529.x vec1 32 ssa_531 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_532 = intrinsic load_deref (ssa_531) (0) /* access=0 */ vec4 32 ssa_533 = vec4 ssa_532.x, ssa_532.y, ssa_530, ssa_532.w vec1 32 ssa_534 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_534, ssa_533) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_535 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_536 = intrinsic load_deref (ssa_535) (0) /* access=0 */ vec3 32 ssa_537 = vec3 ssa_536.x, ssa_536.y, ssa_536.z vec1 32 ssa_538 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_539 = intrinsic vulkan_resource_index (ssa_538) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_540 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_541 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_542 = iadd ssa_540, ssa_541 vec1 32 ssa_543 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_544 = iadd ssa_542, ssa_543 vec1 32 ssa_545 = intrinsic load_ubo (ssa_539, ssa_544) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_546 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_547 = iadd ssa_542, ssa_546 vec1 32 ssa_548 = intrinsic load_ubo (ssa_539, ssa_547) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_549 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_550 = iadd ssa_542, ssa_549 vec1 32 ssa_551 = intrinsic load_ubo (ssa_539, ssa_550) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_552 = vec3 ssa_545, ssa_548, ssa_551 vec1 32 ssa_553 = fdot3 ssa_537, ssa_552 vec1 32 ssa_554 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_555 = intrinsic load_deref (ssa_554) (0) /* access=0 */ vec4 32 ssa_556 = vec4 ssa_553, ssa_555.y, ssa_555.z, ssa_555.w vec1 32 ssa_557 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_557, ssa_556) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_558 = deref_var &r6 (shader_temp vec4) block_2 */ /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ vec1 32 ssa_432 = deref_var &phi@4 (function_temp vec4) intrinsic store_deref (ssa_432, ssa_21) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ vec1 32 ssa_433 = deref_var &phi@4 (function_temp vec4) vec4 32 ssa_559 = intrinsic load_deref (ssa_558) (0) /* access=0 */ vec1 32 ssa_560 = imov ssa_559.y vec1 32 ssa_561 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_434 = intrinsic load_deref (ssa_433) (0) /* access=0 */ vec1 32 ssa_435 = deref_var &r11 (shader_temp vec4) intrinsic store_deref (ssa_435, ssa_434) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_436 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_437 = intrinsic load_deref (ssa_436) (0) /* access=0 */ vec4 32 ssa_438 = vec4 ssa_437.x, ssa_437.x, ssa_437.x, ssa_437.x vec1 32 ssa_439 = imov ssa_438.x = fdot3 ssa_540, ssa_555 vec1 32 ssa_557 = deref_var & vec1 32 ssa_2179 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_434 = txf ssa_433 (texture_deref), ssa_432 (coord), ssa_2179 (lod), 0 (sampler), /* succs: block_21 */ } else { block block_20: /* preds: block_18 */ /* succs: block_21 */ } block block_21: /* preds: block_19 block_20 */ vec4 32 ssa_2199 = phi block_19: ssa_434, block_20: ssa_2248 vec4 32 ssa_2189 = imov ssa_2199 vec1 32 ssa_439 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_439, ssa_2189) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_440 = deref_var &r10 (function_temp vec4) vec4 32 ssa_441 = intrinsic load_deref (ssa_440) (0) /* access=0 */ vec1 32 ssa_442 = imov ssa_441.w vec1 32 ssa_443 = deref_var &r7 (function_temp vec4) vec4 32 ssa_444 = intrinsic load_deref (ssa_443) (0) /* access=0 */ vec4 32 ssa_445 = vec4 ssa_444.x, ssa_442, ssa_444.z, ssa_444.w vec1 32 ssa_446 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_446, ssa_445) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_447 = deref_var &r8 (function_temp vec4) vec4 32 ssa_448 = intrinsic load_deref (ssa_447) (0) /* access=0 */ vec4 32 ssa_449 = vec4 ssa_448.y, ssa_448.y, ssa_448.y, ssa_448.y vec1 32 ssa_450 = imov ssa_449.x /* succs: block_22 block_23 */ if ssa_88 { block block_22: /* preds: block_21 */ vec1 32 ssa_451 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_2180 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_452 = txf ssa_451 (texture_deref), ssa_450 (coord), ssa_2180 (lod), 0 (sampler), /* succs: block_24 */ } else { block block_23: /* preds: block_21 */ /* succs: block_24 */ } block block_24: /* preds: block_22 block_23 */ vec4 32 ssa_2200 = phi block_22: ssa_452, block_23: ssa_2243 vec4 32 ssa_2190 = imov ssa_2200 vec1 32 ssa_457 = deref_var &r11 (function_temp vec4) intrinsic store_deref (ssa_457, ssa_2190) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_458 = deref_var &r11 (function_temp vec4) vec4 32 ssa_459 = intrinsic load_deref (ssa_458) (0) /* access=0 */ vec1 32 ssa_460 = imov ssa_459.w vec1 32 ssa_461 = deref_var &r7 (function_temp vec4) vec4 32 ssa_462 = intrinsic load_deref (ssa_461) (0) /* access=0 */ vec4 32 ssa_463 = vec4 ssa_462.x, ssa_462.y, ssa_460, ssa_462.w vec1 32 ssa_464 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_464, ssa_463) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_465 = deref_var &r7 (function_temp vec4) vec4 32 ssa_466 = intrinsic load_deref (ssa_465) (0) /* access=0 */ vec3 32 ssa_467 = vec3 ssa_466.x, ssa_466.y, ssa_466.z vec1 32 ssa_468 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_469 = intrinsic vulkan_resource_index (ssa_468) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_470 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_471 = load_const (0x00000310 /* 0.000000 */) vec1 32 ssa_472 = iadd ssa_470, ssa_471 vec1 32 ssa_473 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_474 = iadd ssa_472, ssa_473 vec1 32 ssa_475 = intrinsic load_ubo (ssa_469, ssa_474) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_476 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_477 = iadd ssa_472, ssa_476 vec1 32 ssa_478 = intrinsic load_ubo (ssa_469, ssa_477) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_479 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_480 = iadd ssa_472, ssa_479 vec1 32 ssa_481 = intrinsic load_ubo (ssa_469, ssa_480) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_482 = vec3 ssa_475, ssa_478, ssa_481 vec3 32 ssa_483 = fneg ssa_482 vec3 32 ssa_484 = fadd ssa_467, ssa_483 vec1 32 ssa_485 = deref_var &r7 (function_temp vec4) vec4 32 ssa_486 = intrinsic load_deref (ssa_485) (0) /* access=0 */ vec4 32 ssa_487 = vec4 ssa_484.x, ssa_484.y, ssa_484.z, ssa_486.w vec1 32 ssa_488 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_488, ssa_487) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_489 = deref_var &r7 (function_temp vec4) vec4 32 ssa_490 = intrinsic load_deref (ssa_489) (0) /* access=0 */ vec4 32 ssa_491 = vec4 ssa_490.x, ssa_490.y, ssa_490.z, ssa_87 vec1 32 ssa_492 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_492, ssa_491) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_493 = deref_var &r7 (function_temp vec4) vec4 32 ssa_494 = intrinsic load_deref (ssa_493) (0) /* access=0 */ vec1 32 ssa_495 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_496 = intrinsic vulkan_resource_index (ssa_495) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_497 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_498 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_499 = iadd ssa_497, ssa_498 vec1 32 ssa_500 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_501 = iadd ssa_499, ssa_500 vec1 32 ssa_502 = intrinsic load_ubo (ssa_496, ssa_501) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_503 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_504 = iadd ssa_499, ssa_503 vec1 32 ssa_505 = intrinsic load_ubo (ssa_496, ssa_504) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_506 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_507 = iadd ssa_499, ssa_506 vec1 32 ssa_508 = intrinsic load_ubo (ssa_496, ssa_507) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_509 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_510 = iadd ssa_499, ssa_509 vec1 32 ssa_511 = intrinsic load_ubo (ssa_496, ssa_510) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_512 = vec4 ssa_502, ssa_505, ssa_508, ssa_511 vec1 32 ssa_513 = fdot4 ssa_494, ssa_512 vec1 32 ssa_514 = deref_var &r12 (function_temp vec4) vec4 32 ssa_515 = intrinsic load_deref (ssa_514) (0) /* access=0 */ vec4 32 ssa_516 = vec4 ssa_515.x, ssa_515.y, ssa_515.z, ssa_513 vec1 32 ssa_517 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_517, ssa_516) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_518 = deref_var &r6 (function_temp vec4) vec4 32 ssa_519 = intrinsic load_deref (ssa_518) (0) /* access=0 */ vec1 32 ssa_520 = imov ssa_519.x vec1 32 ssa_521 = deref_var &r0 (function_temp vec4) vec4 32 ssa_522 = intrinsic load_deref (ssa_521) (0) /* access=0 */ vec4 32 ssa_523 = vec4 ssa_520, ssa_522.y, ssa_522.z, ssa_522.w vec1 32 ssa_524 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_524, ssa_523) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_525 = deref_var &r10 (function_temp vec4) vec4 32 ssa_526 = intrinsic load_deref (ssa_525) (0) /* access=0 */ vec1 32 ssa_527 = imov ssa_526.x vec1 32 ssa_528 = deref_var &r0 (function_temp vec4) vec4 32 ssa_529 = intrinsic load_deref (ssa_528) (0) /* access=0 */ vec4 32 ssa_530 = vec4 ssa_529.x, ssa_527, ssa_529.z, ssa_529.w vec1 32 ssa_531 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_531, ssa_530) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_532 = deref_var &r11 (function_temp vec4) vec4 32 ssa_533 = intrinsic load_deref (ssa_532) (0) /* access=0 */ vec1 32 ssa_534 = imov ssa_533.x vec1 32 ssa_535 = deref_var &r0 (function_temp vec4) vec4 32 ssa_536 = intrinsic load_deref (ssa_535) (0) /* access=0 */ vec4 32 ssa_537 = vec4 ssa_536.x, ssa_536.y, ssa_534, ssa_536.w vec1 32 ssa_538 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_538, ssa_537) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_539 = deref_var &r0 (function_temp vec4) vec4 32 ssa_540 = intrinsic load_deref (ssa_539) (0) /* access=0 */ vec3 32 ssa_541 = vec3 ssa_540.x, ssa_540.y, ssa_540.z vec1 32 ssa_542 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_543 = intrinsic vulkan_resource_index (ssa_542) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_544 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_545 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_546 = iadd ssa_544, ssa_545 vec1 32 ssa_547 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_548 = iadd ssa_546, ssa_547 vec1 32 ssa_549 = intrinsic load_ubo (ssa_543, ssa_548) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_550 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_551 = iadd ssa_546, ssa_550 vec1 32 ssa_552 = intrinsic load_ubo (ssa_543, ssa_551) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_553 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_554 = iadd ssa_546, ssa_553 vec1 32 ssa_555 = intrinsic load_ubo (ssa_543, ssa_554) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_556 = vec3 ssa_549, ssa_552, ssa_555 vec1 32 ssa_557 = fdot3 ssa_541, ssa_556 vec1 32 ssa_558 = deref_var &r12 (function_temp vec4) vec4 32 ssa_559 = intrinsic load_deref (ssa_558) (0) /* access=0 */ vec4 32 ssa_560 = vec4 ssa_557, ssa_559.y, ssa_559.z, ssa_559.w vec1 32 ssa_561 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_561, ssa_560) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_562 = deref_var &r6 (function_temp vec4) vec4 32 ssa_563 = intrinsic load_deref (ssa_562) (0) /* access=0 */ vec1 32 ssa_564 = imov ssa_563.y vec1 32 ssa_565 = deref_var &r13 (function_temp vec4) vec4 32 ssa_566 = intrinsic load_deref (ssa_565) (0) /* access=0 */ vec4 32 ssa_567 = vec4 ssa_564, ssa_566.y, ssa_566.z, ssa_566.w vec1 32 ssa_568 = deref_var &r13 (function_temp vec4) intrinsic store_deref (ssa_568, ssa_567) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_569 = deref_var &r10 (function_temp vec4) vec4 32 ssa_570 = intrinsic load_deref (ssa_569) (0) /* access=0 */ vec1 32 ssa_571 = imov ssa_570.y vec1 32 ssa_572 = deref_var &r13 (function_temp vec4) vec4 32 ssa_573 = intrinsic load_deref (ssa_572) (0) /* access=0 */ vec4 32 ssa_574 = vec4 ssa_573.x, ssa_571, ssa_573.z, ssa_573.w vec1 32 ssa_575 = deref_var &r13 (function_temp vec4) intrinsic store_deref (ssa_575, ssa_574) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_576 = deref_var &r11 (function_temp vec4) vec4 32 ssa_577 = intrinsic load_deref (ssa_576) (0) /* access=0 */ vec1 32 ssa_578 = imov ssa_577.y vec1 32 ssa_579 = deref_var &r13 (function_temp vec4) vec4 32 ssa_580 = intrinsic load_deref (ssa_579) (0) /* access=0 */ vec4 32 ssa_581 = vec4 ssa_580.x, ssa_580.y, ssa_578, ssa_580.w vec1 32 ssa_582 = deref_var &r13 (function_temp vec4) intrinsic store_deref (ssa_582, ssa_581) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_583 = deref_var &r13 (function_temp vec4) vec4 32 ssa_584 = intrinsic load_deref (ssa_583) (0) /* access=0 */ vec3 32 ssa_585 = vec3 ssa_584.x, ssa_584.y, ssa_584.z vec1 32 ssa_586 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_587 = intrinsic vulkan_resource_index (ssa_586) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_588 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_589 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_590 = iadd ssa_588, ssa_589 vec1 32 ssa_591 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_592 = iadd ssa_590, ssa_591 vec1 32 ssa_593 = intrinsic load_ubo (ssa_587, ssa_592) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_594 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_595 = iadd ssa_590, ssa_594 vec1 32 ssa_596 = intrinsic load_ubo (ssa_587, ssa_595) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_597 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_598 = iadd ssa_590, ssa_597 vec1 32 ssa_599 = intrinsic load_ubo (ssa_587, ssa_598) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_600 = vec3 ssa_593, ssa_596, ssa_599 vec1 32 ssa_601 = fdot3 ssa_585, ssa_600 vec1 32 ssa_602 = deref_var &r12 (function_temp vec4) vec4 32 ssa_603 = intrinsic load_deref (ssa_602) (0) /* access=0 */ vec4 32 ssa_604 = vec4 ssa_603.x, ssa_601, ssa_603.z, ssa_603.w vec1 32 ssa_605 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_605, ssa_604) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_606 = deref_var &r6 (function_temp vec4) vec4 32 ssa_607 = intrinsic load_deref (ssa_606) (0) /* access=0 */ vec1 32 ssa_608 = imov ssa_607.z vec1 32 ssa_609 = deref_var &r14 (function_temp vec4) vec4 32 ssa_610 = intrinsic load_deref (ssa_609) (0) /* access=0 */ vec4 32 ssa_611 = vec4 ssa_608, ssa_610.y, ssa_610.z, ssa_610.w vec1 32 ssa_612 = deref_var &r14 (function_temp vec4) intrinsic store_deref (ssa_612, ssa_611) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_613 = deref_var &r10 (function_temp vec4) vec4 32 ssa_614 = intrinsic load_deref (ssa_613) (0) /* access=0 */ vec1 32 ssa_615 = imov ssa_614.z vec1 32 ssa_616 = deref_var &r14 (function_temp vec4) vec4 32 ssa_617 = intrinsic load_deref (ssa_616) (0) /* access=0 */ vec4 32 ssa_618 = vec4 ssa_617.x, ssa_615, ssa_617.z, ssa_617.w vec1 32 ssa_619 = deref_var &r14 (function_temp vec4) intrinsic store_deref (ssa_619, ssa_618) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_620 = deref_var &r11 (function_temp vec4) vec4 32 ssa_621 = intrinsic load_deref (ssa_620) (0) /* access=0 */ vec1 32 ssa_622 = imov ssa_621.z vec1 32 ssa_623 = deref_var &r14 (function_temp vec4) vec4 32 ssa_624 = intrinsic load_deref (ssa_623) (0) /* access=0 */ vec4 32 ssa_625 = vec4 ssa_624.x, ssa_624.y, ssa_622, ssa_624.w vec1 32 ssa_626 = deref_var &r14 (function_temp vec4) intrinsic store_deref (ssa_626, ssa_625) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_627 = deref_var &r14 (function_temp vec4) vec4 32 ssa_628 = intrinsic load_deref (ssa_627) (0) /* access=0 */ vec3 32 ssa_629 = vec3 ssa_628.x, ssa_628.y, ssa_628.z vec1 32 ssa_630 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_631 = intrinsic vulkan_resource_index (ssa_630) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_632 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_633 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_634 = iadd ssa_632, ssa_633 vec1 32 ssa_635 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_636 = iadd ssa_634, ssa_635 vec1 32 ssa_637 = intrinsic load_ubo (ssa_631, ssa_636) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_638 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_639 = iadd ssa_634, ssa_638 vec1 32 ssa_640 = intrinsic load_ubo (ssa_631, ssa_639) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_641 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_642 = iadd ssa_634, ssa_641 vec1 32 ssa_643 = intrinsic load_ubo (ssa_631, ssa_642) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_644 = vec3 ssa_637, ssa_640, ssa_643 vec1 32 ssa_645 = fdot3 ssa_629, ssa_644 vec1 32 ssa_646 = deref_var &r12 (function_temp vec4) vec4 32 ssa_647 = intrinsic load_deref (ssa_646) (0) /* access=0/* succs: block_19 block_20 */ if ssa_69 { block block_19: /* preds: block_18 */ vec1 32 ssa_440 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_441 = txf ssa_440 (texture_deref), ssa_439 (coord), 0 (sampler), vec1 32 ssa_442 = deref_var &phi@5 (function_temp vec4) intrinsic store_deref (ssa_442, ssa_441) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_21 */ } else { block block_20: /* preds: block_18 */ vec1 32 ssa_443 = deref_var &phi@5 (function_temp vec4) intrinsic store_deref (ssa_443, ssa_20) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_21 */ } block block_21: /* preds: block_19 block_20 */ vec1 32 ssa_444 = deref_var &phi@5 (function_temp vec4) vec4 32 ssa_445 = intrinsic load_deref (ssa_444) (0) /* access=0 */ vec1 32 ssa_446 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_446, ssa_445) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_447 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_448 = intrinsic load_deref (ssa_447) (0) /* access=0 */ vec1 32 ssa_449 = imov ssa_448.w vec1 32 ssa_450 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_451 = intrinsic load_deref (ssa_450) (0) /* access=0 */ vec4 32 ssa_452 = vec4 ssa_451.x, ssa_449, ssa_451.z, ssa_451.w vec1 32 ssa_453 = deref_var &r9 (shader_temp vec4) intrinsic store_deref (ssa_453, ssa_452) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_454 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_455 = intrinsic load_deref (ssa_454) (0) /* access=0 */ vec4 32 ssa_456 = vec4 ssa_455.y, ssa_455.y, ssa_455.y, ssa_455.y vec1 32 ssa_457 = imov ssa_456.x /* succs: block_22 block_23 */ if ssa_68 { block block_22: /* preds: block_21 */ vec1 32 ssa_458 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_459 = txf ssa_458 (texture_deref), ssa_457 (coord), 0 (sampler), vec1 32 ssa_460 = deref_var &phi@6 (function_temp vec4) intrinsic store_deref (ssa_460, ssa_459) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_24 */ } else { block block_23: /* preds: block_21 */ vec1 32 ssa_461 = deref_var &phi@6 (function_temp vec4) intrinsic store_deref (ssa_461, ssa_19) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_24 */ } block block_24: /* preds: block_22 block_23 */ vec1 32 ssa_462 = deref_var &phi@6 (function_temp vec4) vec4 32 ssa_463 = intrinsic load_deref (ssa_462) (0) /* access=0 */ vec1 32 ssa_464 = deref_var &r13 (shader_temp vec4) intrinsic store_deref (ssa_464, ssa_463) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_465 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_466 = intrinsic load_deref (ssa_465) (0) /* access=0 */ vec1 32 ssa_467 = imov ssa_466.w vec1 32 ssa_468 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_469 = intrinsic load_deref (ssa_468) (0) /* access=0 */ vec4 32 ssa_470 = vec4 ssa_469.x, ssa_469.y, ssa_467, ssa_469.w vec1 32 ssa_471 = deref_var &r9 (shader_temp vec4) intrinsic store_deref (ssa_471, ssa_470) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_472 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_473 = intrinsic load_deref (ssa_472) (0) /* access=0 */ vec3 32 ssa_474 = vec3 ssa_473.x, ssa_473.y, ssa_473.z vec1 32 ssa_475 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_476 = intrinsic vulkan_resource_index (ssa_475) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_477 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_478 = load_const (0x00000310 /* 0.000000 */) vec1 32 ssa_479 = iadd ssa_477, ssa_478 vec1 32 ssa_480 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_481 = iadd ssa_479, ssa_480 vec1 32 ssa_482 = intrinsic load_ubo (ssa_476, ssa_481) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_483 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_484 = iadd ssa_479, ssa_483 vec1 32 ssa_485 = intrinsic load_ubo (ssa_476, ssa_484) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_486 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_487 = iadd ssa_479, ssa_486 vec1 32 ssa_488 = intrinsic load_ubo (ssa_476, ssa_487) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_489 = vec3 ssa_482, ssa_485, ssa_488 vec3 32 ssa_490 = fneg ssa_489 vec3 32 ssa_491 = fadd ssa_474, ssa_490 vec1 32 ssa_492 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_493 = intrinsic load_deref (ssa_492) (0) /* access=0 */ vec4 32 ssa_494 = vec4 ssa_491.x, ssa_491.y, ssa_491.z, ssa_493.w vec1 32 ssa_495 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_495, ssa_494) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_496 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_497 = intrinsic load_deref (ssa_496) (0) /* access=0 */ vec3 32 ssa_498 = vec3 ssa_497.x, ssa_497.y, ssa_497.z vec3 32 ssa_499 = fneg ssa_498 vec1 32 ssa_500 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_501 = intrinsic vulkan_resource_index (ssa_500) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_502 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_503 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_504 = iadd ssa_502, ssa_503 vec1 32 ssa_505 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_506 = iadd ssa_504, ssa_505 vec1 32 ssa_507 = intrinsic load_ubo (ssa_501, ssa_506) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_508 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_509 = iadd ssa_504, ssa_508 vec1 32 ssa_510 = intrinsic load_ubo (ssa_501, ssa_509) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_511 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_512 = iadd ssa_504, ssa_511 vec1 32 ssa_513 = intrinsic load_ubo (ssa_501, ssa_512) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_514 = vec3 ssa_507, ssa_510, ssa_513 vec3 32 ssa_515 = fadd ssa_499, ssa_514 vec1 32 ssa_516 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_517 = intrinsic load_deref (ssa_516) (0) /* access=0 */ vec4 32 ssa_518 = vec4 ssa_515.x, ssa_515.y, ssa_515.z, ssa_517.w vec1 32 ssa_519 = deref_var &r9 (shader_temp vec4) intrinsic store_deref (ssa_519, ssa_518) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_520 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_521 = intrinsic load_deref (ssa_520) (0) /* access=0 */ vec1 32 ssa_522 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_523 = intrinsic vulkan_resource_index (ssa_522) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_524 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_525 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_526 = iadd ssa_524, ssa_525 vec1 32 ssa_527 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_528 = iadd ssa_526, ssa_527 vec1 32 ssa_529 = intrinsic load_ubo (ssa_523, ssa_528) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_530 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_531 = iadd ssa_526, ssa_530 vec1 32 ssa_532 = intrinsic load_ubo (ssa_523, ssa_531) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_533 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_534 = iadd ssa_526, ssa_533 vec1 32 ssa_535 = intrinsic load_ubo (ssa_523, ssa_534) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_536 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_537 = iadd ssa_526, ssa_536 vec1 32 ssa_538 = intrinsic load_ubo (ssa_523, ssa_537) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_539 = vec4 ssa_529, ssa_532, ssa_535, ssa_538 vec1 32 ssa_540 = fdot4 ssa_521, ssa_539 vec1 32 ssa_541 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_542 = intrinsic load_deref (ssa_541) (0) /* access=0 */ vec4 32 ssa_543 = vec4 ssa_542.x, ssa_542.y, ssa_542.z, ssa_540 vec1 32 ssa_544 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_544, ssa_543) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_545 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_546 = intrinsic load_deref (ssa_545) (0) /* access=0 */ vec1 32 ssa_547 = imov ssa_546.x vec1 32 ssa_548 = deref_var &r15 (shader_temp vec4) vec4 32 ssa_549 = intrinsic load_deref (ssa_548) (0) /* access=0 */ vec4 32 ssa_550 = vec4 ssa_547, ssa_549.y, ssa_549.z, ssa_549.w vec1 32 ssa_551 = deref_var &r15 (shader_temp vec4) intrinsic store_deref (ssa_551, ssa_550) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_552 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_553 = intrinsic load_deref (ssa_552) (0) /* access=0 */ vec1 32 ssa_554 = imov ssa_553.x vec1 32 ssa_555 = deref_var &r15 (shader_temp vec4) vec4 32 ssa_556 = intrinsic load_deref (ssa_555) (0) /* access=0 */ vec4 32 ssa_557 = vec4 ssa_556.x, ssa_554, ssa_556.z, ssa_556.w vec1 32 ssa_558 = deref_var &r15 (shader_temp vec4) intrinsic store_deref (ssa_558, ssa_557) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_559 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_560 = intrinsic load_deref (ssa_559) (0) /* access=0 */ vec1 32 ssa_561 = imov ssa_560.x vec1 32 ssa_562 = deref_var &r15 (shader_temp vec4) vec4 32 ssa_563 = intrinsic load_deref (ssa_562) (0) /* access=0 */ vec4 32 ssa_564 = vec4 ssa_563.x, ssa_563.y, ssa_561 vec4 32 ssa_429 = intrinsic load_deref (ssa_428) (0) /* access=0 */ vec1 32 ssa_430 = deref_var &r10 (shader_temp vec4) */ vec4 32 ssa_648 = vec4 ssa_647.x, ssa_647.y, ssa_645, ssa_647.w vec1 32 ssa_649 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_649, ssa_648r8 (shader_temp vec4) vec4 32 ssa_558 = intrinsic load_deref (ssa_557) (0) /* access=0 */ vec4 32 ssa_559 = vec4 ssa_558.x, ssa_556, ssa_558.z, ssa_558.w vec1 32 ssa_560 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_560, ssa_559) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_561 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_562 = intrinsic load_deref (ssa_561) (0) /* access=0 */ vec3 32 ssa_563 = vec3 ssa_562.x, ssa_562.y, ssa_562.z vec1 32 ssa_564 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_565 = intrinsic vulkan_resource_index (ssa_564) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_566 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_567 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_568 = iadd ssa_566, ssa_567 vec1 32 ssa_569 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_570 = iadd ssa_568, ssa_569 vec1 32 ssa_571 = intrinsic load_ubo (ssa_565, ssa_570) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_572 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_573 = iadd ssa_568, ssa_572 vec1 32 ssa_574 = intrinsic load_ubo (ssa_565, ssa_573) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_575 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_576 = iadd ssa_568, ssa_575 vec1 32 ssa_577 = intrinsic load_ubo (ssa_565, ssa_576) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_578 = vec3 ssa_571, ssa_574, ssa_577 vec1 32 ssa_579 = fdot3 ssa_563, ssa_578 vec1 32 ssa_580 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_581 = intrinsic load_deref (ssa_580) (0) /* access=0 */ vec4 32 ssa_582 = vec4 ssa_581.x, ssa_581.y, ssa_579, ssa_581.w vec1 32 ssa_583 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_583, ssa_582) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_584 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_585 = intrinsic load_deref (ssa_584) (0) /* access=0 */ vec1 32 ssa_586 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_587 = intrinsic load_deref (ssa_586) (0) /* access=0 */ vec1 32 ssa_588 = fdot4 ssa_585, ssa_587 vec1 32 ssa_589 = deref_var &o0 (shader_out vec4) vec4 32 ssa_590 = intrinsic load_deref (ssa_589) (0) /* access=0 */ vec4 32 ssa_591 = vec4 ssa_590.x, ssa_588, ssa_590.z, ssa_590.w vec1 32 ssa_592 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_592, ssa_591) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_593 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_594 = intrinsic load_deref (ssa_593) (0) /* access=0 */ vec1 32 ssa_595 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_596 = intrinsic vulkan_resource_index (ssa_595) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_597 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_598 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_599 = iadd ssa_597, ssa_598 vec1 32 ssa_600 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_601 = iadd ssa_599, ssa_600 vec1 32 ssa_602 = intrinsic load_ubo (ssa_596, ssa_601) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_603 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_604 = iadd ssa_599, ssa_603 vec1 32 ssa_605 = intrinsic load_ubo (ssa_596, ssa_604) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_606 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_607 = iadd ssa_599, ssa_606 vec1 32 ssa_608 = intrinsic load_ubo (ssa_596, ssa_607) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_609 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_610 = iadd ssa_599, ssa_609 vec1 32 ssa_611 = intrinsic load_ubo (ssa_596, ssa_610) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_612 = vec4 ssa_602, ssa_605, ssa_608, ssa_611 vec1 32 ssa_613 = fdot4 ssa_594, ssa_612 vec1 32 ssa_614 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_615 = intrinsic load_deref (ssa_614) (0) /* access=0 */ vec4 32 ssa_616 = vec4 ssa_615.x, ssa_615.y, ssa_615.z, ssa_613 vec1 32 ssa_617 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_617, ssa_616) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_618 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_619 = intrinsic load_deref (ssa_618) (0) /* access=0 */ vec1 32 ssa_620 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_621 = intrinsic vulkan_resource_index (ssa_620) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_622 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_623 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_624 = iadd ssa_622, ssa_623 vec1 32 ssa_625 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_626 = iadd ssa_624, ssa_625 vec1 32 ssa_627 = intrinsic load_ubo (ssa_621, ssa_626) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_628 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_629 = iadd ssa_624, ssa_628 vec1 32 ssa_630 = intrinsic load_ubo (ssa_621, ssa_629) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_631 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_632 = iadd ssa_624, ssa_631 vec1 32 ssa_633 = intrinsic load_ubo (ssa_621, ssa_632) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_634 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_635 = iadd ssa_624, ssa_634 vec1 32 ssa_636 = intrinsic load_ubo (ssa_621, ssa_635) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_637 = vec4 ssa_627, ssa_630, ssa_633, ssa_636 vec1 32 ssa_638 = fdot4 ssa_619, ssa_637 vec1 32 ssa_639 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_640 = intrinsic load_deref (ssa_639) (0) /* access=0 */ vec4 32 ssa_641 = vec4 ssa_640.x, ssa_640.y, ssa_640.z, ssa_638 vec1 32 ssa_642 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_642, ssa_641) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_643 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_644 = intrinsic load_deref (ssa_643) (0) /* access=0 */ vec3 32 ssa_645 = vec3 ssa_644.x, ssa_644.y, ssa_644.z vec1 32 ssa_646 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_647 = intrinsic vulkan_resource_index (ssa_646) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_648 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_649 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_650 = iadd ssa_648, ssa_649 vec1 32 ssa_651 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_652 = iadd ssa_650, ssa_651 vec1 32 ssa_653 = intrinsic load_ubo (ssa_647, ssa_652) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_654 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_655 = iadd ssa_650, ssa_654 vec1 32 ssa_656 = intrinsic load_ubo (ssa_647, ssa_655) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_657 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_658 = iadd ssa_650, ssa_657 vec1 32 ssa_659 = intrinsic load_ubo (ssa_647, ssa_658) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_660 = vec3 ssa_653, ssa_656, ssa_659 vec1 32 ssa_661 = fdot3 ssa_645, ssa_660 vec1 32 ssa_662 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_663 = intrinsic load_deref (ssa_662) (0) /* access=0 */ vec4 32 ssa_664 = vec4 ssa_661, ssa_663.y, ssa_663.z, ssa_663.w vec1 32 ssa_665 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_665, ssa_664) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_666 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_667 = intrinsic load_deref (ssa_666) (0) /* access=0 */ vec3 32 ssa_668 = vec3 ssa_667.x, ssa_667.y, ssa_667.z vec1 32 ssa_669 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_670 = intrinsic vulkan_resource_index (ssa_669) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_671 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_672 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_673 = iadd ssa_671, ssa_672 vec1 32 ssa_674 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_675 = iadd ssa_673, ssa_674 vec1 32 ssa_676 = intrinsic load_ubo (ssa_670, ssa_675) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_677 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_678 = iadd ssa_673, ssa_677 vec1 32 ssa_679 = intrinsic load_ubo (ssa_670, ssa_678) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_680 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_681 = iadd ssa_673, ssa_680 vec1 32 ssa_682 = intrinsic load_ubo (ssa_670, ssa_681) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_683 = vec3 ssa_676, ssa_679, ssa_682 vec1 32 ssa_684 = fdot3 ssa_668, ssa_683 vec1 32 ssa_685 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_686 = intrinsic load_deref (ssa_685) (0) /* access=0 */ vec4 32 ssa_687 = vec4 ssa_684, ssa_686.y, ssa_686.z, ssa_686.w vec1 32 ssa_688 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_688, ssa_687) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_689 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_690 = intrinsic load_deref (ssa_689) (0) /* access=0 */ vec3 32 ssa_691 = vec3 ssa_690.x, ssa_690.y, ssa_690.z vec1 32 ssa_692 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_693 = intrinsic vulkan_resource_index (ssa_692) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_694 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_695 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_696 = iadd ssa_694, ssa_695 vec1 32 ssa_697 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_698 = iadd ssa_696, ssa_697 vec1 32 ssa_699 = intrinsic load_ubo (ssa_693, ssa_698) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_700 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_701 = iadd ssa_696, ssa_700 vec1 32 ssa_702 = intrinsic load_ubo (ssa_693, ssa_701) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_703 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_704 = iadd ssa_696, ssa_703 vec1 32 ssa_705 = intrinsic load_ubo (ssa_693, ssa_704) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_706 = vec3 ssa_699, ssa_702, ssa_705 vec1 32 ssa_707 = fdot3 ssa_691, ssa_706 vec1 32 ssa_708 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_709 = intrinsic load_deref (ssa_708) (0) /* access=0 */ vec4 32 ssa_710 = vec4 ssa_709.x, ssa_707, ssa_709.z, ssa_709.w vec1 32 ssa_711 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_711, ssa_710) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_712 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_713 = intrinsic load_deref (ssa_712) (0) /* access=0 */ vec3 32 ssa_714 = vec3 ssa_713.x, ssa_713.y, ssa_713.z vec1 32 ssa_715 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_716 = intrinsic vulkan_resource_index (ssa_715) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_717 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_718 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_719 = iadd ssa_717, ssa_718 vec1 32 ssa_720 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_721 = iadd ssa_719, ssa_720 vec1 32 ssa_722 = intrinsic load_ubo (ssa_716, ssa_721) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_723 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_724 = iadd ssa_719, ssa_723 vec1 32 ssa_725 = intrinsic load_ubo (ssa_716, ssa_724) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_726 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_727 = iadd ssa_719, ssa_726 vec1 32 ssa_728 = intrinsic load_ubo (ssa_716, ssa_727) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_729 = vec3 ssa_722, ssa_725, ssa_728 vec1 32 ssa_730 = fdot3 ssa_714, ssa_729 vec1 32 ssa_731 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_732 = intrinsic load_deref (ssa_731) (0) /* access=0 */ vec4 32 ssa_733 = vec4 ssa_732.x, ssa_730, ssa_732.z, ssa_732.w vec1 32 ssa_734 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_734, ssa_733) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_735 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_736 = intrinsic load_deref (ssa_735) (0) /* access=0 */ vec3 32 ssa_737 = vec3 ssa_736.x, ssa_736.y, ssa_736.z vec1 32 ssa_738 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_739 = intrinsic vulkan_resource_index (ssa_738) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_740 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_741 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_742 = iadd ssa_740, ssa_741 vec1 32 ssa_743 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_744 = iadd ssa_742, ssa_743 vec1 32 ssa_745 = intrinsic load_ubo (ssa_739, ssa_744) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_746 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_747 = iadd ssa_742, ssa_746 vec1 32 ssa_748 = intrinsic load_ubo (ssa_739, ssa_747) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_749 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_750 = iadd ssa_742, ssa_749 vec1 32 ssa_751 = intrinsic load_ubo (ssa_739, ssa_750) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_752 = vec3 ssa_745, ssa_748, ssa_751 vec1 32 ssa_753 = fdot3 ssa_737, ssa_752 vec1 32 ssa_754 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_755 = intrinsic load_deref (ssa_754) (0) /* access=0 */ vec4 32 ssa_756 = vec4 ssa_755.x, ssa_755.y, ssa_753, ssa_755.w vec1 32 ssa_757 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_757, ssa_756) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_758 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_759 = intrinsic load_deref (ssa_758) (0) /* access=0 */ vec3 32 ssa_760 = vec3 ssa_759.x, ssa_759.y, ssa_759.z vec1 32 ssa_761 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_762 = intrinsic vulkan_resource_index (ssa_761) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_763 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_764 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_765 = iadd ssa_763, ssa_764 vec1 32 ssa_766 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_767 = iadd ssa_765, ssa_766 vec1 32 ssa_768 = intrinsic load_ubo (ssa_762, ssa_767) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_769 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_770 = iadd ssa_765, ssa_769 vec1 32 ssa_771 = intrinsic load_ubo (ssa_762, ssa_770) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_772 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_773 = iadd ssa_765, ssa_772 vec1 32 ssa_774 = intrinsic load_ubo (ssa_762, ssa_773) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_775 = vec3 ssa_768, ssa_771, ssa_774 vec1 32 ssa_776 = fdot3 ssa_760, ssa_775 vec1 32 ssa_777 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_778 = intrinsic load_deref (ssa_777) (0) /* access=0 */ vec4 32 ssa_779 = vec4 ssa_778.x, ssa_778.y, ssa_776, ssa_778.w vec1 32 ssa_780 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_780, ssa_779) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_781 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_782 = intrinsic load_deref (ssa_781) (0) /* access=0 */ vec1 32 ssa_783 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_784 = intrinsic load_deref (ssa_783) (0) /* access=0 */ vec1 32 ssa_785 = fdot4 ssa_782, ssa_784 vec1 32 ssa_786 = deref_var &o0 (shader_out vec4) vec4 32 ssa_787 = intrinsic load_deref (ssa_786) (0) /* access=0 */ vec4 32 ssa_788 = vec4 ssa_787.x, ssa_787.y, ssa_787.z, ssa_785 vec1 32 ssa_789 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_789, ssa_788) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_790 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_791 = intrinsic load_deref (ssa_790) (0) /* access=0 */ vec1 32 ssa_792 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_793 = intrinsic load_deref (ssa_792) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_650 = deref_var &r3, (function_temp vec4) ssa_563.w vec1 32 ssa_565 = deref_var &r15 (shader_temp vec4) intrinsic store_deref (ssa_565, ssa_564) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_566 = deref_var &r15 (shader_temp vec4) vec4 32 ssa_567 = intrinsic load_deref (ssa_566*/ if intrinsic store_deref (ssa_430, ) (0) /* access=0 */ vec1 32 ssa_794 = fdot4 ssa_791, ssa_793 vec1 32 ssa_795 = deref_var &o0 (shader_out vec4) vec4 32 ssa_796 = intrinsic load_deref (ssa_795) (0) /* access=0 */ vec4 32 ssa_797 = vec4 ssa_796.x, ssa_796.y, ssa_794, ssa_796.w vec1 32 ssa_798 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_96) (0) /* access=0 */ vec3 32 ssa_568 = vec3 ssa_567.x, ssa_567.y, ssa_567.z vec1 32 ssa_569 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_570 = intrinsic vulkan_resource_index (ssa_569) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_571 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_572 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_573 = iadd ssa_571, ssa_572 vec1 32 ssa_574 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_575 = iadd ssa_573, ssa_574 vec1 32 ssa_576 = intrinsic load_ubo (ssa_570, ssa_575) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_577 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_578 = iadd ssa_573, ssa_577 vec1 32 ssa_579 = intrinsic load_ubo (ssa_570, ssa_578) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_580 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_581 = iadd ssa_573, ssa_580 vec1 32 ssa_582 = intrinsic load_ubo (ssa_570, ssa_581) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_583 = vec3 ssa_576, ssa_579, ssa_582 vec1 32 ssa_584 = fdot3 ssa_568, ssa_583 vec1 32 ssa_585 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_586 = intrinsic load_deref (ssa_585) (0) /* access=0 */ vec4 32 ssa_587 = vec4 ssa_584, ssa_586.y, ssa_586.z, ssa_586.w vec1 32 ssa_588 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_588, ssa_587) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_589 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_590 = intrinsic load_deref (ssa_589) (0) /* access=0 */ vec1 32 ssa_591 = imov ssa_590.y vec1 32 ssa_592 = deref_var &r16 (shader_temp vec4) vec4 32 ssa_593 = intrinsic load_deref (ssa_592) (0) /* access=0 */ vec4 32 ssa_594 = vec4 ssa_591, ssa_593.y, ssa_593.z, ssa_593.w vec1 32 ssa_595 = deref_var &r16 (shader_temp vec4) intrinsic store_deref (ssa_595, ssa_594) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_596 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_597 = intrinsic load_deref (ssa_596) (0) /* access=0 */ vec1 32 ssa_598 = imov ssa_597.y vec1 32 ssa_599 = deref_var &r16 (shader_temp vec4) vec4 32 ssa_600 = intrinsic load_deref (ssa_599) (0) /* access=0 */ vec4 32 ssa_601 = vec4 ssa_600.x, ssa_598, ssa_600.z, ssa_600.w vec1 32 ssa_602 = deref_var &r16 (shader_temp vec4) intrinsic store_deref (ssa_602, ssa_601) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_603 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_604 = intrinsic load_deref (ssa_603) (0) /* access=0 */ vec1 32 ssa_605 = imov ssa_604.y vec1 32 ssa_606 = deref_var &r16 (shader_temp vec4) vec4 32 ssa_607 = intrinsic load_deref (ssa_606) (0) /* access=0 */ vec4 32 ssa_608 = vec4 ssa_607.x, ssa_607.y, ssa_605, ssa_607.w vec1 32 ssa_609 = deref_var &r16 (shader_temp vec4) intrinsic store_deref (ssa_609, ssa_608) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_610 = deref_var &r16 (shader_temp vec4) vec4 32 ssa_611 = intrinsic load_deref (ssa_610) (0) /* access=0 */ vec3 32 ssa_612 = vec3 ssa_611.x, ssa_611.y, ssa_611.z vec1 32 ssa_613 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_614 = intrinsic vulkan_resource_index (ssa_613) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_615 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_616 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_617 = iadd ssa_615, ssa_616 vec1 32 ssa_618 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_619 = iadd ssa_617, ssa_618 vec1 32 ssa_620 = intrinsic load_ubo (ssa_614, ssa_619) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_621 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_622 = iadd ssa_617, ssa_621 vec1 32 ssa_623 = intrinsic load_ubo (ssa_614, ssa_622) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_624 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_625 = iadd ssa_617, ssa_624 vec1 32 ssa_626 = intrinsic load_ubo (ssa_614, ssa_625) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_627 = vec3 ssa_620, ssa_623, ssa_626 vec1 32 ssa_628 = fdot3 ssa_612, ssa_627 vec1 32 ssa_629 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_630 = intrinsic load_deref (ssa_629) (0) /* access=0 */ vec4 32 ssa_631 = vec4 ssa_630.x, ssa_628, ssa_630.z, ssa_630.w vec1 32 ssa_632 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_632, ssa_631) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_633 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_634 = intrinsic load_deref (ssa_633) (0) /* access=0 */ vec1 32 ssa_635 = imov ssa_634.z vec1 32 ssa_636 = deref_var &r17 (shader_temp vec4) vec4 32 ssa_637 = intrinsic load_deref (ssa_636) (0) /* access=0 */ vec4 32 ssa_638 = vec4 ssa_635, ssa_637.y, ssa_637.z, ssa_637.w vec1 32 ssa_639 = deref_var &r17 (shader_temp vec4) intrinsic store_deref (ssa_639, ssa_638) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_640 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_641 = intrinsic load_deref (ssa_640) (0) /* access=0 */ vec1 32 ssa_642 = imov ssa_641.z vec1 32 ssa_643 = deref_var &r17 (shader_temp vec4) vec4 32 ssa_644 = intrinsic load_deref (ssa_643) (0) /* access=0 */ vec4 32 ssa_645 = vec4 ssa_644.x, ssa_642, ssa_644.z, ssa_644.w vec1 32 ssa_646 = deref_var &r17 (shader_temp vec4) intrinsic store_deref (ssa_646, ssa_645) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_647 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_648 = intrinsic load_deref (ssa_647) (0) /* access=0 */ vec1 32 ssa_649 = imov ssa_648.z vec1 32 ssa_650 = deref_var &r17 (shader_temp vec4) vec4 32 ssa_651 = intrinsic load_deref (ssa_650) (0) /* access=0 */ vec4 32 ssa_652 = vec4 ssa_651.x, ssa_651.y, ssa_649, ssa_651.w vec1 32 ssa_653 = deref_var &r17 (shader_temp vec4) intrinsic store_deref (ssa_653, ssa_652) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_654 = deref_var &r17 (shader_temp vec4) vec4 32 ssa_655 = intrinsic load_deref (ssa_654) (0) /* access=0 */ vec3 32 ssa_656 = vec3 ssa_655.x, ssa_655.y, ssa_655.z vec1 32 ssa_657 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_658 = intrinsic vulkan_resource_index (ssa_657) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_659 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_660 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_661 = iadd ssa_659, ssa_660 vec1 32 ssa_662 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_663 = iadd ssa_661, ssa_662 vec1 32 ssa_664 = intrinsic load_ubo (ssa_658, ssa_663) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_665 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_666 = iadd ssa_661, ssa_665 vec1 32 ssa_667 = intrinsic load_ubo (ssa_658, ssa_666) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_668 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_669 = iadd ssa_661, ssa_668 ssa_798, { block block_1: /* preds: block_0 */ vec1 32 ssa_146 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_1869 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_147 = txf ssa_146 (texture_deref), ssa_145 (coord), ssa_1869 (lod), 0 (sampler), vec2 32 ssa_148 = vec2 ssa_147.y, ssa_147.z /* succs: block_3 */ } else { ssa_797) (15, 0)vec4 32 ssa_562 = intrinsic load_deref (ssa_561) (0) /* access=0 */ vec4 32 ssa_563 = vec4 ssa_560, ssa_562.y, ssa_562.z, ssa_562.w vec1 32 ssa_564 = deref_var &r13 (shader_temp vec4) intrinsic store_deref (ssa_564, ssa_563) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_565 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_566 = intrinsic load_deref (ssa_565) (0) /* access=0 */ vec1 32 ssa_567 = imov ssa_566.y vec1 32 ssa_568 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_569 = intrinsic load_deref (ssa_568) (0) /* access=0 */ vec4 32 ssa_570 = vec4 ssa_569.x, ssa_567, ssa_569.z, ssa_569.w vec1 32 ssa_571 = deref_var &r13 (shader_temp vec4) intrinsic store_deref (ssa_571, ssa_570) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_572 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_573 = intrinsic load_deref (ssa_572) (0) /* access=0 */ vec1 32 ssa_574 = imov ssa_573.y vec1 32 ssa_575 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_576 = intrinsic load_deref (ssa_575) (0) /* access=0 */ vec4 32 ssa_577 = vec4 ssa_576.x, ssa_576.y, ssa_574, ssa_576.w vec1 32 ssa_578 = deref_var &r13 (shader_temp vec4) intrinsic store_deref (ssa_578, ssa_577) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_579 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_580 = intrinsic load_deref (ssa_579) (0) /* access=0 */ vec3 32 ssa_581 = vec3 ssa_580.x, ssa_580.y, ssa_580.z vec1 32 ssa_582 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_583 = intrinsic vulkan_resource_index (ssa_582) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_584 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_585 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_586 = iadd ssa_584, ssa_585 vec1 32 ssa_587 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_588 = iadd ssa_586, ssa_587 vec1 32 ssa_589 = intrinsic load_ubo (ssa_583, ssa_588) (4, 0) /* align_mul=4 */vec1 32 ssa_670 = intrinsic load_ubo (ssa_658, ssa_669) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_671 = vec3 ssa_664, /*ssa_429) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_431 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_432 = intrinsic load_deref (ssa_431) (0) /* access=0 */ vec1 32 ssa_433 = imov ssa_432.w vec1 32 ssa_434 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_435 = intrinsic load_deref (ssa_434) (0) /* access=0 */ vec4 32 ssa_436 = vec4 ssa_435.x, ssa_433, ssa_435.z, ssa_435.w vec1 32 ssa_437 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_437, ssa_436) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_438 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_439 = intrinsic load_deref (ssa_438) (0) /* access=0 */ vec4 32 ssa_440 = vec4 ssa_439.y, ssa_439.y, ssa_439.y, ssa_439.y vec1 32 ssa_441 = imov ssa_440.x /* succs: block_22 block_23 */ if ssa_79 { block block_22: /* preds: block_21 */ vec1 32 ssa_442 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_443 = txf ssa_442 (texture_deref), ssa_441 (coord), 0 (sampler), vec1 32 ssa_444 = deref_var &phi@6 (function_temp vec4) intrinsic store_deref (ssa_444, ssa_443) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_24 */ } else { block block_23: /* preds: block_21 */ vec1 32 ssa_445 = deref_var &phi@6 (function_temp vec4) intrinsic store_deref (ssa_445, ssa_19) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_24 */ } block block_24: /* preds: block_22 block_23 */ vec1 32 ssa_446 = deref_var &phi@6 (function_temp vec4) vec4 32 ssa_447 = intrinsic load_deref (ssa_446) (0) /* access=0 */ vec1 32 ssa_448 = deref_var &r11 (shader_temp vec4) intrinsic store_deref (ssa_448, ssa_447) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_449 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_450 = intrinsic load_deref (ssa_449) (0) /* access=0 */ vec1 32 ssa_451 = imov ssa_450.w vec1 32 ssa_452 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_453 = intrinsic load_deref (ssa_452) (0) /* access=0 */ vec4 32 ssa_454 = vec4 ssa_453.x, ssa_453.y, ssa_451, ssa_453.w vec1 32 ssa_455 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_455, ssa_454) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_456 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_457 = intrinsic load_deref (ssa_456) (0) /* access=0 */ vec3 32 ssa_458 = vec3 ssa_457.x, ssa_457.y, ssa_457.z vec1 32 ssa_459 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_460 = intrinsic vulkan_resource_index (ssa_459) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_461 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_462 = load_const (0x00000310 /* 0.000000 */) vec1 32 ssa_463 = iadd ssa_461, ssa_462 vec1 32 ssa_464 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_465 = iadd ssa_463, ssa_464 vec1 32 ssa_466 = intrinsic load_ubo (ssa_460, ssa_465) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_467 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_468 = iadd ssa_463, ssa_467 vec1 32 ssa_469 = intrinsic load_ubo (ssa_460, ssa_468) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_470 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_471 = iadd ssa_463, ssa_470 vec1 32 ssa_472 = intrinsic load_ubo (ssa_460, ssa_471) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_473 = vec3 ssa_466, ssa_469, ssa_472 vec3 32 ssa_474 = fneg ssa_473 vec3 32 ssa_475 = fadd ssa_458, ssa_474 vec1 32 ssa_476 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_477 = intrinsic load_deref (ssa_476) (0) /* access=0 */ vec4 32 ssa_478 = vec4 ssa_475.x, ssa_475.y, ssa_475.z, ssa_477.w vec1 32 ssa_479 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_479, ssa_478) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_480 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_481 = intrinsic load_deref (ssa_480) (0) /* access=0 */ vec4 32 ssa_482 = vec4 ssa_481.x, ssa_481.y, ssa_481.z, ssa_78 vec1 32 ssa_483 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_483, ssa_482) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_484 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_485 = intrinsic load_deref (ssa_484) (0) /* access=0 */ vec1 32 ssa_486 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_487 = intrinsic vulkan_resource_index (ssa_486) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_488 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_489 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_490 = iadd ssa_488, ssa_489 vec1 32 ssa_491 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_492 = iadd ssa_490, ssa_491 vec1 32 ssa_493 = intrinsic load_ubo (ssa_487, ssa_492) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_494 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_495 = iadd ssa_490, ssa_494 vec1 32 ssa_496 = intrinsic load_ubo (ssa_487, ssa_495) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_497 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_498 = iadd ssa_490, ssa_497 vec1 32 ssa_499 = intrinsic load_ubo ( wrmask=xyzw */ /* access=0 */ /* align_offset=0 */ vec1 32 ssa_590 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_591 = iadd ssa_586 vec1 32 ssa_799 = deref_var &shader_in (shader_temp Z) ssa_487, ssa_498) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_500 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_800 = load_const (0x00000004 /* 0.000000 */) vec4 32 ssa_651 = intrinsic load_deref (ssa_650) ( 0) /* access=0 */ vec4 32 ssa_652 = vec4 ssa_651.x, ssa_651.y, ssa_651.z, ssa_86 vec1 32 ssa_653 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_653, ssa_652) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_654 = deref_var &r3 (function_temp vec4) vec4 32 ssa_655 = intrinsic load_deref (ssa_654) (0) /* access=0 */ , ssa_590 vec1 32 ssa_592 = intrinsic load_ubo (ssa_583, ssa_591) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_593 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_594 = iadd ssa_586, ssa_593 vec1 32 ssa_595 = intrinsic load_ubo (ssa_583, ssa_594) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_596 = vec3 ssa_589, ssa_592, ssa_595 vec1 32 ssa_597 = fdot3 ssa_581, ssa_596 vec1 32 ssa_598 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_599 = intrinsic load_deref (ssa_598) (0) /* access=0 */ vec4 32 ssa_600 = vec4 ssa_599.x, ssa_597, ssa_599.z, ssa_599.w vec1 32 ssa_601 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_601, ssa_600) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_602 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_603 = intrinsic load_deref (ssa_602) (0) /* access=0 */ vec1 32 ssa_604 = imov ssa_603.z vec1 32 ssa_605 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_606 = intrinsic load_deref (ssa_605) (0) /* access=0 */ vec4 32 ssa_607 = vec4 ssa_604, ssa_606.y, ssa_606.z, ssa_606.w vec1 32 ssa_608 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_608, ssa_607) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_609 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_610 = intrinsic load_deref (ssa_609) (0) /* access=0 */ vec1 32 ssa_611 = imov ssa_610.z vec1 32 ssa_612 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_613 = intrinsic load_deref (ssa_612) (0) /* access=0 */ vec4 32 ssa_614 = vec4 ssa_613.x, ssa_611, ssa_613.z, ssa_613.w vec1 32 ssa_615 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_615, ssa_614) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_616 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_617 = intrinsic load_deref (ssa_616) (0) /* access=0 */ vec1 32 ssa_618 = imov ssa_617.z vec1 32 ssa_619 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_620 = intrinsic load_deref (ssa_619) (0) /* access=0 */ vec4 32 ssa_621 = vec4 ssa_620.x, ssa_620.y, ssa_618, ssa_620.w vec1 32 ssa_622 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_622, ssa_621) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_623 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_624 = intrinsic load_deref (ssa_623) (0) /* access=0 */ vec3 32 ssa_625 = vec3 ssa_624.x, ssa_624.y, ssa_624.z vec1 32 ssa_626 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_627 = intrinsic vulkan_resource_index (ssa_626) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_628 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_629 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_630 = iadd ssa_628, ssa_629 vec1 32 ssa_631 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_632 = iadd ssa_630, ssa_631 vec1 32 ssa_633 = intrinsic load_ubo (ssa_627, ssa_632) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_634 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_635 = iadd ssa_630, ssa_634 vec1 32 ssa_636 = intrinsic load_ubo (ssa_627, ssa_635) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_637 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_638 = iadd ssa_630, ssa_637 vec1 32 ssa_639 = intrinsic load_ubo (ssa_627, ssa_638) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_640 = vec3 ssa_633, ssa_636, ssa_639 vec1 32 ssa_641 = fdot3 ssa_625, ssa_640 vec1 32 ssa_642 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_643 = intrinsic load_deref (ssa_642) (0) /* access=0 */ vec4 32 ssa_644 = vec4 ssa_643.x, ssa_643.y, ssa_641, ssa_643.w vec1 32 ssa_645 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_645, ssa_644) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_646 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_647 = intrinsic load_deref (ssa_646) (0) /* access=0 */ vec4 32 ssa_648 = vec4 ssa_647.x, ssa_647.y, ssa_647.z, ssa_71 vec1 32 ssa_649 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_649, ssa_648) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_650 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_651 = intrinsic load_deref (ssa_650) (0) /* access=0 */ vec1 32 ssa_652 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_653 = intrinsic load_deref (ssa_652) (0) /* access=0 */ vec1 32 ssa_654 = fdot4 ssa_651, ssa_653 vec1 32 ssa_655 = deref_var &o0 (shader_out vec4) vec4 32 ssa_656 = intrinsic load_deref (ssa_655) (0) /* access=0 */ vec4 32 ssa_657 = vec4 ssa_654, ssa_656.y, ssa_656.z, ssa_656.w vec1 32 ssa_658 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_658, ssa_657) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_659 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_660 = intrinsic load_deref (ssa_659) (0) /* access=0 */ vec1 32 ssa_661 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_662 = intrinsic vulkan_resource_index (ssa_661) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_663 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_664 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_665 = iadd ssa_663, ssa_664 vec1 32 ssa_666 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_667 = iadd ssa_665, ssa_666 vec1 32 ssa_668 = intrinsic load_ubo (ssa_662, ssa_667) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_669 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_670 = iadd ssa_665, ssa_669 vec1 32 ssa_671 = intrinsic load_ubo (ssa_662, ssa_670) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_672 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_673 = iadd ssa_665, ssa_672 vec1 32 ssa_674 = intrinsic load_ubo (ssa_662, ssa_673) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_675 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_676 = iadd ssa_665, ssa_675 vec1 32 ssa_677 = intrinsic load_ubo (ssa_662, ssa_676) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_678 = vec4 ssa_668, ssa_671, ssa_674, ssa_677 vec1 32 ssa_679 = fdot4 ssa_660, ssa_678 vec1 32 ssa_680 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_681 = intrinsic load_deref (ssa_680) (0) /* access=0 */ vec4 32 ssa_682 = vec4 ssa_681.x, ssa_681.y, ssa_681.z, ssa_679 vec1 32 ssa_683 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_683, ssa_682) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_684 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_685 = intrinsic load_deref (ssa_684) (0) /* access=0 */ vec3 32 ssa_686 = vec3 ssa_685.x, ssa_685.y, ssa_685.z vec1 32 ssa_687 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_688 = intrinsic vulkan_resource_index (ssa_687) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_689 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_690 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_691 = iadd ssa_689, ssa_690 vec1 32 ssa_692 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_693 = iadd ssa_691, ssa_692 vec1 32 ssa_694 = intrinsic load_ubo (ssa_688, ssa_693) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_695 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_696 = iadd ssa_691, ssa_695 vec1 32 ssa_697 = intrinsic load_ubo (ssa_688, ssa_696) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_698 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_699 = iadd ssa_691, ssa_698 vec1 32 ssa_700 = intrinsic load_ubo (ssa_688, ssa_699) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_701 = vec3 ssa_694, ssa_697, ssa_700 vec1 32 ssa_702 = fdot3 ssa_686, ssa_701 vec1 32 ssa_703 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_704 = intrinsic load_deref (ssa_703) (0) /* access=0 */ vec4 32 ssa_705 = vec4 ssa_702, ssa_704.y, ssa_704.z, ssa_704.w vec1 32 ssa_706 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_706, ssa_705) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_707 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_708 = intrinsic load_deref (ssa_707) (0) /* access=0 */ vec3 32 ssa_709 = vec3 ssa_708.x, ssa_708.y, ssa_708.z vec1 32 ssa_710 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_711 = intrinsic vulkan_resource_index (ssa_710) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_712 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_713 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_714 = iadd ssa_712, ssa_713 vec1 32 ssa_715 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_716 = iadd ssa_714, ssa_715 vec1 32 ssa_717 = intrinsic load_ubo (ssa_711, ssa_716) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_718 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_719 = iadd ssa_714, ssa_718 vec1 32 ssa_720 = intrinsic load_ubo (ssa_711, ssa_719) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_721 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_722 = iadd ssa_714, ssa_721 vec1 32 ssa_723 = intrinsic load_ubo (ssa_711, ssa_722) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_724 = vec3 ssa_717, ssa_720, ssa_723 vec1 32 ssa_725 = fdot3 ssa_709, ssa_724 vec1 32 ssa_726 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_727 = intrinsic load_deref (ssa_726) (0) /* access=0 */ vec4 32 ssa_728 = vec4 ssa_727.x, ssa_725, ssa_727.z, ssa_727.w vec1 32 ssa_729 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_729, ssa_728) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_730 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_731 = intrinsic load_deref (ssa_730) (0) /* access=0 */ vec3 32 ssa_732 = vec3 ssa_731.x, ssa_731.y, ssa_731.z vec1 32 ssa_733 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_734 = intrinsic vulkan_resource_index (ssa_733) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_735 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_736 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_737 = iadd ssa_735, ssa_736 vec1 32 ssa_738 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_739 = iadd ssa_737, ssa_738 vec1 32 ssa_740 = intrinsic load_ubo (ssa_734, ssa_739) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_741 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_742 = iadd ssa_737, ssa_741 vec1 32 ssa_743 = intrinsic load_ubo (ssa_734, ssa_742) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_744 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_745 = iadd ssa_737, ssa_744 vec1 32 ssa_746 = intrinsic load_ubo (ssa_734, ssa_745) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_747 = vec3 ssa_740, ssa_743, ssa_746 vec1 32 ssa_748 = fdot3 ssa_732, ssa_747 vec1 32 ssa_749ssa_667 , ssa_670 block block_2: /* preds: block_0 */ /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ vec2 32 ssa_1885 = phi block_1: ssa_148, block_2: ssa_1947 vec2 32 ssa_1877 = imov ssa_1885 vec1 32 ssa_153 = deref_var &r0 (function_temp vec4) vec4 32 ssa_154 = intrinsic load_deref (ssa_153) (0) /* access=0 */ vec4 32 ssa_155 = vec4 ssa_1877.x, ssa_154.y, ssa_1877.y, ssa_154.w vec1 32 ssa_156 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_156, ssa_155) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_157 = deref_var &r0 (function_temp vec4) vec4 32 ssa_158 = intrinsic load_deref (ssa_157) (0) /* access=0 */ vec4 32 ssa_159 = vec4 ssa_158.y, ssa_158.y, ssa_158.y, ssa_158.y vec1 32 ssa_160 = imov ssa_159.x /* succs: block_4 block_5 */ if ssa_95 { block block_4: /* preds: block_3 */ vec1 32 ssa_161 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_1870 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_162 = txf ssa_161 (texture_deref), ssa_160 (coord), ssa_1870 (lod), 0 (sampler), vec1 32 ssa_163 = imov ssa_162.z /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ vec1 32 ssa_1886 = phi block_4: ssa_163, block_5: ssa_23 vec1 32 ssa_1878 = imov ssa_1886 vec1 32 ssa_168 = deref_var &r0 (function_temp vec4) vec4 32 ssa_169 = intrinsic load_deref (ssa_168) (0) /* access=0 */ vec4 32 ssa_170 = vec4 ssa_169.x, ssa_1878, ssa_169.z, ssa_169.w vec1 32 ssa_171 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_171, ssa_170) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_172 = deref_var &shader_in (function_temp Z) vec1 32 ssa_173 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_174 = deref_array &(*ssa_172)[1] (function_temp vec4) /* &shader_in[1] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_175 = intrinsic load_deref (ssa_174) (0) /* access=0 */ vec1 32 ssa_176 = imov ssa_175.x vec1 32 ssa_177 = deref_var &r0 (function_temp vec4) vec4 32 ssa_178 = intrinsic load_deref (ssa_177) (0) /* access=0 */ vec1 32 ssa_179 = imov ssa_178.y vec1 32 ssa_180 = imul ssa_176, ssa_94 vec1 32 ssa_181 = iadd ssa_180, ssa_179 vec1 32 ssa_182 = deref_var &r0 (function_temp vec4) vec4 32 ssa_183 = intrinsic load_deref (ssa_182) (0) /* access=0 */ vec4 32 ssa_184 = vec4 ssa_183.x, ssa_181, ssa_183.z, ssa_183.w vec1 32 ssa_185 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_185, ssa_184) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_186 = deref_var &r0 (function_temp vec4) vec4 32 ssa_187 = intrinsic load_deref (ssa_186) (0) /* access=0 */ vec1 32 ssa_188 = imov ssa_187.y vec1 32 ssa_189 = ushr ssa_93, ssa_92 vec1 32 ssa_190 = imul ssa_188, ssa_91 vec1 32 ssa_191 = iadd ssa_190, ssa_189 vec1 32 ssa_192 = iadd ssa_191, ssa_90 vec1 32 ssa_193 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_194 = intrinsic vulkan_resource_index (ssa_193) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_195 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_196 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_197 = ishl ssa_192, ssa_196 vec1 32 ssa_198 = iadd ssa_195, ssa_197 vec1 32 ssa_199 = intrinsic load_ssbo (ssa_194, ssa_198) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_200 = deref_var &r1 (function_temp vec4) vec4 32 ssa_201 = intrinsic load_deref (ssa_200) (0) /* access=0 */ vec4 32 ssa_202 = vec4 ssa_201.x, ssa_199, ssa_201.z, ssa_201.w vec1 32 ssa_203 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_203, ssa_202) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_204 = deref_var &r0 (function_temp vec4) vec4 32 ssa_205 = intrinsic load_deref (ssa_204) (0) /* access=0 */ vec4 32 ssa_206 = vec4 ssa_205.y, ssa_205.y, ssa_205.y, ssa_205.y vec4 32 ssa_207 = iadd ssa_206, ssa_2031 vec1 32 ssa_208 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_208, ssa_207) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_209 = deref_var &r0 (function_temp vec4) vec4 32 ssa_210 = intrinsic load_deref (ssa_209) (0) /* access=0 */ vec2 32 ssa_211 = vec2 ssa_210.y, ssa_210.y vec2 32 ssa_212 = iadd ssa_211, ssa_2026 vec1 32 ssa_213 = deref_var &r0 (function_temp vec4) vec4 32 ssa_214 = intrinsic load_deref (ssa_213) (0) /* access=0 */ vec4 32 ssa_215 = vec4 ssa_214.x, ssa_212.x, ssa_214.z, ssa_212.y vec1 32 ssa_216 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_216, ssa_215) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_217 = deref_var &r2 (function_temp vec4) vec4 32 ssa_218 = intrinsic load_deref (ssa_217) (0) /* access=0 */ vec1 32 ssa_219 = imov ssa_218.x vec1 32 ssa_220 = ushr ssa_87, ssa_86 vec1 32 ssa_221 = imul ssa_219, ssa_85 vec1 32 ssa_222 = iadd ssa_221, ssa_220 vec1 32 ssa_223 = iadd ssa_222, ssa_84 vec1 32 ssa_224 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_225 = intrinsic vulkan_resource_index (ssa_224) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_226 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_227 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_228 = ishl ssa_223, ssa_227 vec1 32 ssa_229 = iadd ssa_226, ssa_228 vec1 32 ssa_230 = intrinsic load_ssbo (ssa_225, ssa_229) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_231 = deref_var &r1 (function_temp vec4) vec4 32 ssa_232 = intrinsic load_deref (ssa_231) (0) /* access=0 */ vec4 32 ssa_233 = vec4 ssa_232.x, ssa_232.y, ssa_232.z, ssa_230 vec1 32 ssa_234 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_234, ssa_233) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_235 = deref_var &r1 (function_temp vec4) vec4 32 ssa_236 = intrinsic load_deref (ssa_235) (0) /* access=0 */ vec2 32 ssa_237 = vec2 ssa_236.y, ssa_236.w vec2 32 ssa_238 = ishl ssa_237, ssa_2023 vec1 32 ssa_239 = deref_var &r1 (function_temp vec4) vec4 32 ssa_240 = intrinsic load_deref (ssa_239) (0) /* access=0 */ vec4 32 ssa_241 = vec4 ssa_238.x, ssa_240.y, ssa_238.y, ssa_240.w vec1 32 ssa_242 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_242, ssa_241) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_243 = deref_var &r1 (function_temp vec4) vec4 32 ssa_244 = intrinsic load_deref (ssa_243) (0) /* access=0 */ vec4 32 ssa_245 = ishr ssa_244, ssa_2020 vec1 32 ssa_246 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_246, ssa_245) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_247 = deref_var &r1 (function_temp vec4) vec4 32 ssa_248 = intrinsic load_deref (ssa_247) (0) /* access=0 */ vec3 32 ssa_249 = vec3 ssa_248.w, ssa_248.w, ssa_248.w vec1 32 ssa_250 = deref_var &r1 (function_temp vec4) vec4 32 ssa_251 = intrinsic load_deref (ssa_250) (0) /* access=0 */ vec3 32 ssa_252 = vec3 ssa_251.x, ssa_251.y, ssa_251.z vec3 32 ssa_253 = imul ssa_249, ssa_252 vec1 32 ssa_254 = deref_var &r1 (function_temp vec4) vec4 32 ssa_255 = intrinsic load_deref (ssa_254) (0) /* access=0 */ vec4 32 ssa_256 = vec4 ssa_253.x, ssa_253.y, ssa_253.z, ssa_255.w vec1 32 ssa_257 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_257, ssa_256) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_258 = deref_var &r1 (function_temp vec4) vec4 32 ssa_259 = intrinsic load_deref (ssa_258) (0) /* access=0 */ vec3 32 ssa_260 = vec3 ssa_259vec1 32 ssa_501 = iadd vec1 32 ssa_801 = deref_array .ssa_490x vec1 32 ssa_672 = fdot3 = deref_var &r12 (shader_temp vec4) vec1 32 ssa_656 = deref_var &r12 (function_temp vec4) , ssa_656 vec4 32 ssa_750 = intrinsic load_deref (ssa_749) (0) /* access=0 */ ssa_259.y, ssa_259.z vec3 32 ssa_261 = i2f32 ssa_260 vec1 32 ssa_262 = deref_var &r1 (function_temp vec4) vec4 32 ssa_263 = intrinsic load_deref (ssa_262) (0, , ssa_671 vec1 32 ssa_673 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_674 = intrinsic load_deref (ssa_673) (0 vec4 32 ssa_657 = intrinsic load_deref (ssa_656) (0) /* access=0 */ vec4 32 ssa_264 = vec4 ssa_261.x, ssa_261.y, ssa_261.z, ssa_263.w vec1 32 ssa_265 = deref_var &) /* access=0 */ vec4 32 ssa_751 = vec4ssa_500 vec1 32 ssa_502 = intrinsic load_ubo (ssa_487, ssa_501) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_503 = vec4 ssa_493, ssa_496, ssa_499, ssa_502 vec1 32 ssa_504 = fdot4 ssa_485, ssa_503 vec1 32 ssa_505 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_506 = intrinsic load_deref (ssa_505) (0) /* access=0 */ vec4 32 ssa_507 = vec4 ssa_506.x, ssa_506.y, ssa_506.z, ssa_504 vec1 32 ssa_508 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_508, ssa_507) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_509 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_510 = intrinsic load_deref (ssa_509) (0) /* access=0 */ vec1 32 ssa_511 = imov ssa_510.x vec1 32 ssa_512 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_513 = intrinsic load_deref (ssa_512) (0) /* access=0 */ vec4 32 ssa_514 = vec4 ssa_511, ssa_513.y, ssa_513.z, ssa_513.w vec1 32 ssa_515 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_515, ssa_514) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_516 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_517 = intrinsic load_deref (ssa_516) (0) /* access=0 */ vec1 32 ssa_518 = imov ssa_517.x vec1 32 ssa_519 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_520 = intrinsic load_deref (ssa_519) (0) /* access=0 */ vec4 32 ssa_521 = vec4 ssa_520.x, ssa_518, ssa_520.z, ssa_520.w vec1 32 ssa_522 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_522, ssa_521) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_523 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_524 = intrinsic load_deref (ssa_523) (0) /* access=0 */ vec1 32 ssa_525 = imov ssa_524.x vec1 32 ssa_526 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_527 = intrinsic load_deref (ssa_526) (0) /* access=0 */ vec4 32 ssa_528 = vec4 ssa_527.x, ssa_527.y, ssa_525, ssa_527.w vec1 32 ssa_529 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_529, ssa_528) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_530 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_531 = intrinsic load_deref (ssa_530) (0) /* access=0 */ vec3 32 ssa_532 = vec3 ssa_531.x, ssa_531.y, ssa_531.z vec1 32 ssa_533 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_534 = intrinsic vulkan_resource_index (ssa_533) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_535 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_536 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_537 = iadd ssa_535, ssa_536 vec1 32 ssa_538 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_539 = iadd ssa_537, ssa_538 vec1 32 ssa_540 = intrinsic load_ubo (ssa_534, ssa_539) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_541 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_542 = iadd ssa_537, ssa_541 vec1 32 ssa_543 = intrinsic load_ubo (ssa_534, ssa_542) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_544 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_545 = iadd ssa_537, ssa_544 vec1 32 ssa_546 = intrinsic load_ubo (ssa_534, ssa_545) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_547 = vec3 ssa_540, ssa_543, ssa_546 vec1 32 ssa_548 = fdot3 ssa_532, ssa_547 vec1 32 ssa_549 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_550 = intrinsic load_deref (ssa_549) (0) /* access=0 */ vec4 32 ssa_551 = vec4 ssa_548, ssa_550.y, ssa_550.z, ssa_550.w vec1 32 ssa_552 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_552, ssa_551) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_553 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_554 = intrinsic load_deref (ssa_553) (0) /* access=0 */ vec1 32 ssa_555 = imov ssa_554.y vec1 32 ssa_556 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_557 = intrinsic load_deref (ssa_556) (0) /* access=0 */ vec4 32 ssa_558 = vec4 ssa_555, ssa_557.y, ssa_557.z, ssa_557.w vec1 32 ssa_559 = deref_var &r13 (shader_temp vec4) intrinsic store_deref (ssa_559, ssa_558) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_560 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_561 = intrinsic load_deref (ssa_560) (0) /* access=0 */ vec1 32 ssa_562 = imov ssa_561.y vec1 32 ssa_563 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_564 = intrinsic load_deref (ssa_563) (0) /* access=0 */ vec4 32 ssa_565 = vec4 ssa_564.x, ssa_562, ssa_564.z, ssa_564.w vec1 32 ssa_566 = deref_var &r13 (shader_temp vec4) intrinsic store_deref (ssa_566, ssa_565) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_567 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_568 = intrinsic load_deref (ssa_567) (0) /* access=0 */ vec1 32 ssa_569 = imov ssa_568.y vec1 32 ssa_570 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_571 = intrinsic load_deref (ssa_570) (0) /* access=0 */ vec4 32 ssa_572 = vec4 ssa_571.x, ssa_571.y, ssa_569, ssa_571.w vec1 32 ssa_573 = deref_var &r13 (shader_temp vec4) intrinsic store_deref (ssa_573, ssa_572) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_574 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_575 = intrinsic load_deref (ssa_574) (0) /* access=0 */ vec3 32 ssa_576 = vec3 ssa_575.x, ssa_575.y, ssa_575.z vec1 32 ssa_577 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_578 = intrinsic vulkan_resource_index (ssa_577) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_579 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_580 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_581 = iadd ssa_579, ssa_580 vec1 32 ssa_582 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_583 = iadd ssa_581, ssa_582 vec1 32 ssa_584 = intrinsic load_ubo (ssa_578, ssa_583) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_585 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_586 = iadd ssa_581, ssa_585 vec1 32 ssa_587 = intrinsic load_ubo (ssa_578, ssa_586) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_588 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_589 = iadd ssa_581, ssa_588 vec1 32 ssa_590 = intrinsic load_ubo (ssa_578, ssa_589) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_591 = vec3 ssa_584, ssa_587, ssa_590 vec1 32 ssa_592 = fdot3 ssa_576, ssa_591 vec1 32 ssa_593 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_594 = intrinsic load_deref (ssa_593) (0) /* access=0 */ vec4 32 ssa_595 = vec4 ssa_594.x, ssa_592, ssa_594.z, ssa_594.w vec1 32 ssa_596 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_596, ssa_595) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_597 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_598 = intrinsic load_deref (ssa_597) (0) /* access=0 */ vec1 32 ssa_599 = imov ssa_598.z vec1 32 ssa_600 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_601 = intrinsic load_deref (ssa_600) (0) /* access=0 */ vec4 32 ssa_602 = vec4 ssa_599, ssa_601.y, ssa_601.z, ssa_601.w vec1 32 ssa_603 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_603, ssa_602) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_604 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_605 = intrinsic load_deref (ssa_604) (0) /* access=0 */ vec1 32 ssa_606 = imov ssa_605.z vec1 32 ssa_607 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_608 = intrinsic load_deref (ssa_607) (0) /* access=0 */ vec4 32 ssa_609 = vec4 ssa_608.x, ssa_606, ssa_608.z, ssa_608.w vec1 32 ssa_610 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_610, ssa_609) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_611 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_612 = intrinsic load_deref (ssa_611) (0) /* access=0 */ vec1 32 ssa_613 = imov ssa_612.z vec1 32 ssa_614 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_615 = intrinsic load_deref (ssa_614) (0) /* access=0 */ vec4 32 ssa_616 = vec4 ssa_615.x, ssa_615.y, ssa_613, ssa_615.w vec1 32 ssa_617 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_617, ssa_616) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_618 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_619 = intrinsic load_deref (ssa_618) (0) /* access=0 */ vec3 32 ssa_620 = vec3 ssa_619.x, ssa_619.y, ssa_619.z vec1 32 ssa_621 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_622 = intrinsic vulkan_resource_index (ssa_621) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_623 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_624 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_625 = iadd ssa_623, ssa_624 vec1 32 ssa_626 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_627 = iadd ssa_625, ssa_626 vec1 32 ssa_628 = intrinsic load_ubo (ssa_622, ssa_627) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_629 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_630 = iadd ssa_625, ssa_629 vec1 32 ssa_631 = intrinsic load_ubo (ssa_622, ssa_630) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_632 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_633 = iadd ssa_625, ssa_632 vec1 32 ssa_634 = intrinsic load_ubo (ssa_622, ssa_633) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_635 = vec3 ssa_628, ssa_631, ssa_634 vec1 32 ssa_636 = fdot3 ssa_620, ssa_635 vec1 32 ssa_637 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_638 = intrinsic load_deref (ssa_637) (0) /* access=0 */ vec4 32 ssa_639 = vec4 ssa_638.x, ssa_638.y, ssa_636, ssa_638.w vec1 32 ssa_640 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_640, ssa_639) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_641 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_642 = intrinsic load_deref (ssa_641) (0) /* access=0 */ vec4 32 ssa_643 = vec4 ssa_642.x, ssa_642.y, ssa_642.z, ssa_77 vec1 32 ssa_644 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_644, ssa_643) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_645 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_646 = intrinsic load_deref (ssa_645) (0) /* access=0 */ vec1 32 ssa_647 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_648 = intrinsic load_deref (ssa_647) (0) /* access=0 */ vec1 32 ssa_649 = fdot4 ssa_646, ssa_648 vec1 32 ssa_650 = deref_var &o0 (shader_out vec4) vec4 32 ssa_651 = intrinsic load_deref (ssa_650) (0) /* access=0 */ vec4 32 ssa_652 = vec4 ssa_649, ssa_651.y, ssa_651.z, ssa_651.w vec1 32 ssa_653 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_653, ssa_652) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_654 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_655 = intrinsic load_deref (ssa_654) (0) /* access=0 */ vec1 32 ssa_656 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_657 = intrinsic vulkan_resource_index (ssa_656) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_658 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_659 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_660 = iadd ssa_658, ssa_659 vec1 32 ssa_661 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_662 = iadd ssa_660, ssa_661 vec1 32 ssa_663 = intrinsic load_ubo (ssa_657, ssa_662) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_664 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_665 = iadd ssa_660, ssa_664 vec1 32 ssa_666 = intrinsic load_ubo (ssa_657, ssa_665) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_667 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_668 = iadd ssa_660, ssa_667 vec1 32 ssa_669 = intrinsic load_ubo (ssa_657, ssa_668) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_670 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_671 = iadd ssa_660, ssa_670 vec1 32 ssa_672 = intrinsic load_ubo (ssa_657, ssa_671) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_673 = vec4 ssa_663, ssa_666, ssa_669, ssa_672 vec1 32 ssa_674 = fdot4 ssa_655, ssa_673 vec1 32 ssa_675 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_676 = intrinsic load_deref (ssa_675) (0) /* access=0 */ vec4 32 ssa_677 = vec4 ssa_676.x, ssa_676.y, ssa_676.z, ssa_674 vec1 32 ssa_678 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_678, ssa_677) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_679 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_680 = intrinsic load_deref (ssa_679) (0) /* access=0 */ vec3 32 ssa_681 = vec3 ssa_680.x, ssa_680.y, ssa_680.z vec1 32 ssa_682 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_683 = intrinsic vulkan_resource_index (ssa_682) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_684 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_685 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_686 = iadd ssa_684, ssa_685 vec1 32 ssa_687 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_688 = iadd ssa_686, ssa_687 vec1 32 ssa_689 = intrinsic load_ubo (ssa_683, ssa_688) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_690 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_691 = iadd ssa_686, ssa_690 vec1 32 ssa_692 = intrinsic load_ubo (ssa_683, ssa_691) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_693 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_694 = iadd ssa_686, ssa_693 vec1 32 ssa_695 = intrinsic load_ubo (ssa_683, ssa_694) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_696 = vec3 ssa_689, ssa_692, ssa_695 vec1 32 ssa_697 = fdot3 ssa_681, ssa_696 vec1 32 ssa_698 = deref_var )&(*ssa_799)[4] (shader_temp vec4) /* &shader_in[4] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_802 /* access=0 */ vec4 32 ssa_675 = vec4 ssa_674.x, ssa_674.y, ssa_672, ssa_674.w vec1 32 ssa_676 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_676, ssa_675) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_677 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_678 = intrinsic load_deref (ssa_677) (0) /* access=0 */ vec4 32 ssa_679 = vec4 ssa_678.x, ssa_678.y, ssa_678.z, ssa_67 vec1 32 ssa_680 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_680, ssa_679) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_681 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_682 = intrinsic load_deref (ssa_681) (0) /* access=0 */ vec1 32 ssa_683 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_684 = intrinsic load_deref (ssa_683) (0) /* access=0 */ vec1 32 ssa_685 = fdot4 ssa_682, ssa_684 vec1 32 ssa_686 = deref_var &o0 (shader_out vec4) vec4 32 ssa_687 = intrinsic load_deref (ssa_686) (0) /* access=0 */ vec4 32 ssa_688 = vec4 ssa_685, ssa_687.y, ssa_687.z, ssa_687.w vec1 32 ssa_689 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_689, ssa_688) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_690 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_691 = intrinsic load_deref (ssa_690) (0) /* access=0 */ vec1 32 ssa_692 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_693 = intrinsic vulkan_resource_index (ssa_692) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_694 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_695 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_696 = iadd ssa_694, ssa_695 vec1 32 ssa_697 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_698 = iadd ssa_696, ssa_697 vec1 32 ssa_699 = intrinsic load_ubo (ssa_693, ssa_698) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_700 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_701 = iadd ssa_696, ssa_700 vec1 32 ssa_702 = intrinsic load_ubo (ssa_693, ssa_701) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_703 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_704 = iadd ssa_696, ssa_703 vec1 32 ssa_705 = intrinsic load_ubo (ssa_693, ssa_704) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_706 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_707 = iadd ssa_696, ssa_706 vec1 32 ssa_708 = intrinsic load_ubo (ssa_693, ssa_707) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_709 = vec4 ssa_699, ssa_702, ssa_705, ssa_708 vec1 32 ssa_710 = fdot4 ssa_691, ssa_709 vec1 32 ssa_711 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_712 = intrinsic load_deref (ssa_711) (0) /* access=0 */ vec4 32 ssa_713 = vec4 ssa_712.x, ssa_712.y, ssa_712.z, ssa_710 vec1 32 ssa_714 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_714, ssa_713) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_715 = deref_var &r15 (shader_temp vec4) vec4 32 ssa_716 = intrinsic load_deref (ssa_715) (0) /* access=0 */ vec3 32 ssa_717 = vec3 ssa_716.x, ssa_716.y, ssa_716.z vec1 32 ssa_718 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_719 = intrinsic vulkan_resource_index (ssa_718) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_720 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_721 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_722 = iadd ssa_720, ssa_721 vec1 32 ssa_723 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_724 = iadd ssa_722, ssa_723 vec1 32 ssa_725 = intrinsic load_ubo (ssa_719, ssa_724) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_726 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_727 = iadd ssa_722, ssa_726 vec1 32 ssa_728 = intrinsic load_ubo (ssa_719, ssa_727) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_729 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_730 = iadd ssa_722, ssa_729 vec1 32 ssa_731 = intrinsic load_ubo (ssa_719, ssa_730) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_732 = vec3 ssa_725, ssa_728, ssa_731 vec1 32 ssa_733 = fdot3 ssa_717, ssa_732 vec1 32 ssa_734 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_735 = intrinsic load_deref (ssa_734) (0) /* access=0 */ vec4 32 ssa_736 = vec4 ssa_733, ssa_735.y, ssa_735.z, ssa_735.w vec1 32 ssa_737 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_737, ssa_736) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_738 = deref_var &r16 (shader_temp vec4) vec4 32 ssa_739 = intrinsic load_deref (ssa_738) (0) /* access=0 */ vec3 32 ssa_740 = vec3 ssa_739.x, ssa_739.y, ssa_739.z vec1 32 ssa_741 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_742 = intrinsic vulkan_resource_index (ssa_741) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_743 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_744 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_745 = iadd ssa_743, ssa_744 vec1 32 ssa_746 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_747 = iadd ssa_745, ssa_746 vec1 32 ssa_748 = intrinsic load_ubo (ssa_742, ssa_747) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_749 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_750 = iadd ssa_745, ssa_749 vec1 32 ssa_751 = intrinsic load_ubo (ssa_742, ssa_750) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_752 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_753 = iadd ssa_745, ssa_752 vec1 32 ssa_754 = intrinsic load_ubo (ssa_742, ssa_753) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_755 = vec3 ssa_748, ssa_751, ssa_754 vec1 32 ssa_756 = fdot3 ssa_740, ssa_755 vec1 32 ssa_757 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_758 = intrinsic load_deref (ssa_757) (0) /* access=0 */ vec4 32 ssa_759 = vec4 ssa_758.x, ssa_756, ssa_758.z, ssa_758.w vec1 32 ssa_760 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_760, ssa_759) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_761 = deref_var &r17 (shader_temp vec4) vec4 32 ssa_762 = intrinsic load_deref (ssa_761) (0) /* access=0 */ vec3 32 ssa_763 = vec3 ssa_762.x, ssa_762.y, ssa_762.z vec1 32 ssa_764 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_765 = intrinsic vulkan_resource_index (ssa_764) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_766 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_767 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_768 = iadd ssa_766, ssa_767 vec1 32 ssa_769 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_770 = iadd ssa_768, ssa_769 vec1 32 ssa_771 = intrinsic load_ubo (ssa_765, ssa_770) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_772 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_773 = iadd ssa_768, ssa_772 vec1 32 ssa_774 = intrinsic load_ubo (ssa_765, ssa_773) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_775 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_776 = iadd ssa_768, ssa_775 vec1 32 ssa_777 = intrinsic load_ubo (ssa_765, ssa_776) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_778 = vec3 ssa_771, ssa_774, ssa_777 vec1 32 ssa_779 = fdot3 ssa_763, ssa_778 vec1 32 ssa_780 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_781 = intrinsic load_deref (ssa_780) (0) /* access=0 */ vec4 32 ssa_782 = vec4 ssa_781.r1 (function_temp vec4) intrinsic store_deref (ssa_265, ssa_264) (15, 0) /* wrmask=xyzw */ /* access=0 */ = intrinsic load_deref (ssa_801) (0) /* access=0 */ vec2 32 ssa_803 = vec2 ssa_802.x, ssa_802.y vec2 32 ssa_804 = fmul ssa_803, ssa_69 vec1 32 ssa_805 = deref_var &o1 (shader_out vec4) vec4 32 ssa_806 = intrinsic load_deref (ssa_805) (0) /* access=0 */ vec4 32 ssa_807 = vec4 ssa_804.x, ssa_804.y, ssa_806.z, ssa_806.w vec1 32 ssa_808 = deref_var &o1 (shader_out vec4) intrinsic store_deref (ssa_808, ssa_807) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_809 = deref_var &o1 (shader_out vec4) vec4 32 ssa_810 = intrinsic load_deref (ssa_809) (0) /* access=0 */ vec4 32 ssa_811 = vec4 ssa_810.x, ssa_810.y, ssa_68.x, ssa_68.y vec1 32 ssa_812 = deref_var &o1 (shader_out vec4) intrinsic store_deref (ssa_812, ssa_811) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_813 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_814 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_815 = deref_array &(*ssa_813)[3] (shader_temp vec4) /* &shader_in[3] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_816 = intrinsic load_deref (ssa_815) (0) /* access=0 */ vec1 32 ssa_817 = deref_var &o2 (shader_out vec4) intrinsic store_deref (ssa_817, ssa_816) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_818 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_819 = intrinsic load_deref (ssa_818) (0) /* access=0 */ vec1 32 ssa_820 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_821 = intrinsic load_deref (ssa_820) (0) /* access=0 */ vec1 32 ssa_822 = fdot4 ssa_819, ssa_821 vec1 32 ssa_823 = deref_var &o3 (shader_out vec4) vec4 32 ssa_824 = intrinsic load_deref (ssa_823) (0) /* access=0 */ vec4 32 ssa_825 = vec4 ssa_822, ssa_824.y, ssa_824.z, ssa_824.w vec1 32 ssa_826 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_826, ssa_825) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_827 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_828 = intrinsic load_deref (ssa_827) (0) /* access=0 */ vec1 32 ssa_829 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_830 = intrinsic load_deref (ssa_829) (0) /* access=0 */ vec1 32 ssa_831 = fdot4 ssa_828, ssa_830 vec1 32 ssa_832 = deref_var &o3 (shader_out vec4) vec4 32 ssa_833 = intrinsic load_deref (ssa_832) (0) /* access=0 */ vec4 32 ssa_834 = vec4 ssa_833.x, ssa_831, ssa_833.z, ssa_833.w vec1 32 ssa_835 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_835, ssa_834) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_836 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_837 = intrinsic load_deref (ssa_836) (0) /* access=0 */ vec1 32 ssa_838 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_839 = intrinsic load_deref (ssa_838) (0) /* access=0 */ vec1 32 ssa_840 = fdot4 ssa_837, ssa_839 vec1 32 ssa_841 = deref_var &o3 (shader_out vec4) vec4 32 ssa_842 = intrinsic load_deref (ssa_841) (0) /* access=0 */ vec4 32 ssa_843 = vec4 ssa_842.x, ssa_842.y, ssa_840, ssa_842.w vec1 32 ssa_844 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_844, ssa_843) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_845 = deref_var &o3 (shader_out vec4) vec4 32 ssa_846 = intrinsic load_deref (ssa_845) (0) /* access=0 */ vec4 32 ssa_847 = vec4 ssa_846.x, ssa_846.y, ssa_846.z, ssa_67 vec1 32 ssa_848 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_848, ssa_847) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_849 = deref_var &o4 (shader_out vec4) vec4 32 ssa_850 = intrinsic load_deref (ssa_849) (0) /* access=0 */ vec4 32 ssa_851 = vec4 ssa_850.x, ssa_850.y, ssa_850.z, ssa_66 vec1 32 ssa_852 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_852, ssa_851) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_853 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_854 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_855 = deref_array &(*ssa_853)[2] (shader_temp vec4) /* &shader_in[2] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_856 = intrinsic load_deref (ssa_855) (0) /* access=0 */ vec4 32 ssa_857 = u2f32 ssa_856 vec1 32 ssa_858 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_858, ssa_857) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_266 = deref_var &r1 (function_temp vec4) vec4 32 ssa_267 = intrinsic load_deref (ssa_266) (0) /* access=0 */ vec3 32 ssa_268 = vec3 ssa_267.x, ssa_267.y, ssa_267.z vec3 32 ssa_269 = fmul ssa_268, ssa_2015 vec1 32 ssa_270 = deref_var &r3 (function_temp vec4) vec4 32 ssa_271 = intrinsic load_deref (ssa_270) (0) /* access=0 */ vec4 32 ssa_272 = vec4 ssa_269.x, ssa_269.y, ssa_269.z, ssa_271.w vec1 32 ssa_273 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_273, ssa_272) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_274 = deref_var &r0 (function_temp vec4) vec4 32 ssa_275 = intrinsic load_deref (ssa_274) (0) /* access=0 */ x&r12 (shader_temp vec4) vec1 32 ssa_859 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_860 = intrinsic load_deref (ssa_859) (0) /* access=0 */ vec4 32 ssa_861 = ffma ssa_860, ssa_65, , ssa_64 vec1 32 ssa_862 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_862, ssa_861) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_863 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_864 = intrinsic load_deref (ssa_863) (0) /* access=0 */ vec4 32 ssa_865 = fmin ssa_864, ssa_63 vec1 32 ssa_866 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_866, ssa_865) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_867 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_868 = intrinsic load_deref (ssa_867) (0) /* access=0 */ vec2 32 ssa_869 = vec2 ssa_868.y, ssa_868.w vec4 32 ssa_699 = intrinsic load_deref (ssa_698) (0) /* access=0 */ vec4 32 ssa_700vec1 32 ssa_870 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_871 = intrinsic load_deref (ssa_870) (0) /* access=0 */ vec2 32 ssa_872 = vec2 ssa_871.x, ssa_871.z vec2 32 ssa_873 = fadd ssa_869, ssa_872 vec1 32 ssa_874 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_875 = intrinsic load_deref (ssa_874) (0) /* access=0 */ vec4 32 ssa_876 = vec4 ssa_873.x, ssa_873.y, ssa_875.z, ssa_875.w vec1 32 ssa_877 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_877, ssa_876) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_878 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_879 = intrinsic load_deref (ssa_878) (0) /* access=0 */ vec2 32 ssa_880 = vec2 ssa_879.y, ssa_879.w vec2 32 ssa_881 = fneg ssa_880 vec1 32 ssa_882 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_883 = intrinsic load_deref (ssa_882) (0) /* access=0 */ vec2 32 ssa_884 = vec2 ssa_883.x, ssa_883.z vec2 32 ssa_885 = fadd ssa_881, ssa_884 vec1 32 ssa_886 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_887 = intrinsic load_deref (ssa_886) (0) /* access=0 */ vec4 32 ssa_888 = vec4 ssa_887.x, ssa_887.y, ssa_885.x, ssa_885.y vec1 32 ssa_889 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_889, ssa_888) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_890 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_891 = intrinsic load_deref (ssa_890) (0) /* access=0 */ vec2 32 ssa_892 = vec2 ssa_891.y, ssa_891.w vec2 32 ssa_893 = fmul ssa_892, ssa_62 vec1 32 ssa_894 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_895 = intrinsic load_deref (ssa_894) (0) /* access=0 */ vec4 32 ssa_896 = vec4 ssa_895.x, ssa_893.x, ssa_893.y, ssa_895.w vec1 32 ssa_897 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_897, ssa_896) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_898 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_899 = intrinsic load_deref (ssa_898) (0) /* access=0 */ vec2 32 ssa_900 = vec2 ssa_899.x, ssa_899.z vec2 32 ssa_901 = fmul ssa_900, ssa_61 vec1 32 ssa_902 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_903 = intrinsic load_deref (ssa_902) (0) /* access=0 */ vec4 32 ssa_904 = vec4 ssa_901.x, ssa_901.y, ssa_903.z, ssa_903.w vec1 32 ssa_905 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_905, ssa_904) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_906 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_907 = intrinsic load_deref (ssa_906) (0) /* access=0 */ vec1 32 ssa_908 = imov ssa_907.y vec1 32 ssa_909 = fabs ssa_908 vec1 32 ssa_910 = fneg ssa_909 vec1 32 ssa_911 = fadd ssa_910, ssa_60 vec1 32 ssa_912 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_913 = intrinsic load_deref (ssa_912) (0) /* access=0 */ vec4 32 ssa_914 = vec4 ssa_911, ssa_913.y, ssa_913.z, ssa_913.w vec1 32 ssa_915 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_915, ssa_914) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_916 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_917 = intrinsic load_deref (ssa_916) (0) /* access=0 */ vec1 32 ssa_918 = imov ssa_917.z vec1 32 ssa_919 = fabs ssa_918 vec1 32 ssa_658 = fdot4 ssa_655, ssa_657 vec1 32 ssa_659 = deref_var &out@o0-temp (function_temp vec4) vec4 32 ssa_660 = intrinsic load_deref (ssa_659) (0) /* access=0 */ vec4 32 ssa_661 = vec4 ssa_658, ssa_660.y, ssa_660.z, ssa_660.w vec1 32 ssa_662 = deref_var &out@o0-temp (function_temp vec4) intrinsic store_deref (ssa_662, ssa_661) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_663 = deref_var &r7 (function_temp vec4) vec4 32 ssa_664 = intrinsic load_deref (ssa_663) (0) /* access=0 */ vec1 32 ssa_665 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_666 = intrinsic vulkan_resource_index (ssa_665) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_667 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_668 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_669 = iadd ssa_667, ssa_668 vec1 32 ssa_670 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_671 = iadd ssa_669, ssa_670 vec1 32 ssa_672 = intrinsic load_ubo (ssa_666, ssa_671) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_673 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_674 = iadd ssa_669, ssa_673 vec1 32 ssa_675 = intrinsic load_ubo (ssa_666, ssa_674) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_676 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_677 = iadd ssa_669, ssa_676 vec1 32 ssa_678 = intrinsic load_ubo (ssa_666, ssa_677) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_679 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_680 = iadd ssa_669, ssa_679 vec1 32 ssa_681 = intrinsic load_ubo (ssa_666, ssa_680) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_682 = vec4 ssa_672, ssa_675, ssa_678, ssa_681 vec1 32 ssa_683 = fdot4 ssa_664, ssa_682 vec1 32 ssa_684 = deref_var &r12 (function_temp vec4) vec4 32 ssa_685 = intrinsic load_deref (ssa_684) (0) /* access=0 */ vec4 32 ssa_686 = vec4 ssa_685.x, ssa_685.y, ssa_685.z, ssa_683 vec1 32 ssa_687 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_687, ssa_686) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_688 = deref_var &r0 (function_temp vec4) vec4 32 ssa_689 = intrinsic load_deref (ssa_688) (0) /* access=0 */ vec3 32 ssa_690 = vec3 ssa_689.x, ssa_689.y, ssa_689.z vec1 32 ssa_691 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_692 = intrinsic vulkan_resource_index (ssa_691) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_693 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_694 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_695 = iadd ssa_693, ssa_694 vec1 32 ssa_696 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_697 = iadd ssa_695, ssa_696 vec1 32 ssa_698 = intrinsic load_ubo (ssa_692, ssa_697) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_699 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_700 = iadd ssa_695, ssa_699 vec1 32 ssa_701 = intrinsic load_ubo (ssa_692, ssa_700) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_702 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_703 = iadd ssa_695, ssa_702 vec1 32 ssa_704 = intrinsic load_ubo (ssa_692, ssa_703) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_705 = vec3 ssa_698, ssa_701, ssa_704 vec1 32 ssa_706 = fdot3 ssa_690, ssa_705 vec1 32 ssa_707 = deref_var &r12 (function_temp vec4) vec4 32 ssa_708 = intrinsic load_deref (ssa_707) (0) /* access=0 */ vec4 32 ssa_709 = vec4 ssa_706, ssa_708.y, ssa_708.z, ssa_708.w vec1 32 ssa_710 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_710, ssa_709) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_711 = deref_var &r13 (function_temp vec4) vec4 32 ssa_712 = intrinsic load_deref (ssa_711) (0) /* access=0 */ vec3 32 ssa_713 = vec3 ssa_712.x, ssa_712.y, ssa_712.z vec1 32 ssa_714 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_715 = intrinsic vulkan_resource_index (ssa_714) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_716 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_717 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_718 = iadd ssa_716, ssa_717 vec1 32 ssa_719 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_720 = iadd ssa_718, ssa_719 vec1 32 ssa_721 = intrinsic load_ubo (ssa_715, ssa_720) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_722 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_723 = iadd ssa_718, ssa_722 vec1 32 ssa_724 = intrinsic load_ubo (ssa_715, ssa_723) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_725 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_726 = iadd ssa_718, ssa_725 vec1 32 ssa_727 = intrinsic load_ubo (ssa_715, ssa_726) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_728 = vec3 ssa_721, ssa_724, ssa_727 vec1 32 ssa_729 = fdot3 ssa_713, ssa_728 vec1 32 ssa_730 = deref_var &r12 (function_temp vec4) vec4 32 ssa_731 = intrinsic load_deref (ssa_730) (0) /* access=0 */ vec4 32 ssa_732 = vec4 ssa_731.x, ssa_729, ssa_731.z, ssa_731.w vec1 32 ssa_733 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_733, ssa_732) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_734 = deref_var &r14 (function_temp vec4) vec4 32 ssa_735 = intrinsic load_deref (ssa_734) (0) /* access=0 */ vec3 32 ssa_736 = vec3 ssa_735.x, ssa_735.y, ssa_735.z vec1 32 ssa_737 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_738 = intrinsic vulkan_resource_index (ssa_737) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_739 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_740 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_741 = iadd ssa_739, ssa_740 vec1 32 ssa_742 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_743 = iadd ssa_741, ssa_742 vec1 32 ssa_744 = intrinsic load_ubo (ssa_738, ssa_743) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_745 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_746 = iadd ssa_741, ssa_745 vec1 32 ssa_747 = intrinsic load_ubo (ssa_738, ssa_746) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_748 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_749 = iadd ssa_741, ssa_748 vec1 32 ssa_750 = intrinsic load_ubo (ssa_738, ssa_749) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_751 = vec3 ssa_744, ssa_747, ssa_750 vec1 32 ssa_752 = fdot3 ssa_736, ssa_751 vec1 32 ssa_753 = deref_var &r12 (function_temp vec4) vec4 32 ssa_754 = intrinsic load_deref (ssa_753) (0) /* access=0 */ vec4 32 ssa_755 = vec4 ssa_754.x, ssa_754.y, ssa_752, ssa_754.w vec1 32 ssa_756 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_756, ssa_755) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_757 = deref_var &r3 (function_temp vec4) vec4 32 ssa_758 = intrinsic load_deref (ssa_757) (0) /* access=0 */ vec1 32 ssa_759 = deref_var &r12 (function_temp vec4) vec4 32 ssa_760 = intrinsic load_deref (ssa_759) (0) /* access=0 */ vec1 32 ssa_761 = fdot4 ssa_758, ssa_760 vec1 32 ssa_762 = deref_var &out@o0-temp (function_temp vec4) vec4 32 ssa_763 = intrinsic load_deref (ssa_762) (0) /* access=0 */ vec4 32 ssa_764 = vec4 ssa_763.x, ssa_761, ssa_763.z, ssa_763.w vec1 32 ssa_765 = deref_var &out@o0-temp (function_temp vec4) intrinsic store_deref (ssa_765, ssa_764) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_766 = deref_var &r7 (function_temp vec4) vec4 32 ssa_767 = intrinsic load_deref (ssa_766) (0) /* access=0 */ vec1 32 ssa_768 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_769 = intrinsic vulkan_resource_index (ssa_768) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_770 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_771 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_772 = iadd ssa_770, ssa_771 vec1 32 ssa_773 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_774 = iadd ssa_772, ssa_773 vec1 32 ssa_775 = intrinsic load_ubo (ssa_769, ssa_774) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_776 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_777 = iadd ssa_772, ssa_776 vec1 32 ssa_778 = intrinsic load_ubo (ssa_769, ssa_777) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_779 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_780 = iadd ssa_772, ssa_779 vec1 32 ssa_781 = intrinsic load_ubo (ssa_769, ssa_780) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_782 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_783 = iadd ssa_772, ssa_782 vec1 32 ssa_784 = intrinsic load_ubo (ssa_769, ssa_783) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_785 = vec4 ssa_775, ssa_778, ssa_781, ssa_784 vec1 32 ssa_786 = fdot4 ssa_767, ssa_785 vec1 32 ssa_787 = deref_var &r12 (function_temp vec4) vec4 32 ssa_788 = intrinsic load_deref (ssa_787) (0) /* access=0 */ vec4 32 ssa_789 = vec4 ssa_788.x, ssa_788.y, ssa_788.z, ssa_786 vec1 32 ssa_790 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_790, ssa_789) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_791 = deref_var &r7 (function_temp vec4) vec4 32 ssa_792 = intrinsic load_deref (ssa_791) (0) /* access=0 */ vec1 32 ssa_793 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_794 = intrinsic vulkan_resource_index (ssa_793) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_795 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_796 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_797 = iadd ssa_795, ssa_796 vec1 32 ssa_798 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_799 = iadd ssa_797, ssa_798 vec1 32 ssa_800 = intrinsic load_ubo (ssa_794, ssa_799) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_801 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_802 = iadd ssa_797, ssa_801 vec1 32 ssa_803 = intrinsic load_ubo (ssa_794, ssa_802) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_804 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_805 = iadd ssa_797, ssa_804 vec1 32 ssa_806 = intrinsic load_ubo (ssa_794, ssa_805) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_807 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_808 = iadd ssa_797, ssa_807 vec1 32 ssa_809 = intrinsic load_ubo (ssa_794, ssa_808) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_810 = vec4 ssa_800, ssa_803, ssa_806, ssa_809 vec1 32 ssa_811 = fdot4 ssa_792, ssa_810 vec1 32 ssa_812 = deref_var &r7 (function_temp vec4) vec4 32 ssa_813 = intrinsic load_deref (ssa_812) (0) /* access=0 */ vec4 32 ssa_814 = vec4 ssa_813.x, ssa_813.y, ssa_813.z, ssa_811 vec1 32 ssa_815 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_815, ssa_814) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_816 = deref_var &r0 (function_temp vec4) vec4 32 ssa_817 = intrinsic load_deref (ssa_816) (0) /* access=0 */ vec3 32 ssa_818 = vec3 ssa_817.x, ssa_817.y, ssa_817.z vec1 32 ssa_819 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_820 = intrinsic vulkan_resource_index (ssa_819) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_821 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_822 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_823 = iadd ssa_821, ssa_822 vec1 32 ssa_824 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_825 = iadd ssa_823, ssa_824 vec1 32 ssa_826 = intrinsic load_ubo (ssa_820, ssa_825) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_827 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_828 = iadd ssa_823, ssa_827 vec1 32 ssa_829 = intrinsic load_ubo (ssa_820, ssa_828) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_830 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_831 = iadd ssa_823, ssa_830 vec1 32 ssa_832 = intrinsic load_ubo (ssa_820, ssa_831) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_833 = vec3 ssa_826, ssa_829, ssa_832 vec1 32 ssa_834 = fdot3 ssa_818, ssa_833 vec1 32 ssa_835 = deref_var &r12 (function_temp vec4) vec4 32 ssa_836 = intrinsic load_deref (ssa_835) (0) /* access=0 */ vec4 32 ssa_837 = vec4 ssa_834, ssa_836.y, ssa_836.z, ssa_836.w vec1 32 ssa_838 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_838, ssa_837) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_839 = deref_var &r0 (function_temp vec4) vec4 32 ssa_840 = intrinsic load_deref (ssa_839) (0) /* access=0 */ vec3 32 ssa_841 = vec3 ssa_840.x, ssa_840.y, ssa_840.z vec1 32 ssa_842 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_843 = intrinsic vulkan_resource_index (ssa_842) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_844 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_845 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_846 = iadd ssa_844, ssa_845 vec1 32 ssa_847 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_848 = iadd ssa_846, ssa_847 vec1 32 ssa_849 = intrinsic load_ubo (ssa_843, ssa_848) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_850 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_851 = iadd ssa_846, ssa_850 vec1 32 ssa_852 = intrinsic load_ubo (ssa_843, ssa_851) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_853 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_854 = iadd ssa_846, ssa_853 vec1 32 ssa_855 = intrinsic load_ubo (ssa_843, ssa_854) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_856 = vec3 ssa_849, ssa_852, ssa_855 vec1 32 ssa_857 = fdot3 ssa_841, ssa_856 vec1 32 ssa_858 = deref_var &r7 (function_temp vec4) vec4 32 ssa_859 = intrinsic load_deref (ssa_858) (0) /* access=0 */ vec4 32 ssa_860 = vec4 ssa_857, ssa_859.y, ssa_859.z, ssa_859.w vec1 32 ssa_861 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_861, ssa_860) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_862 = deref_var &r13 (function_temp vec4) vec4 32 ssa_863 = intrinsic load_deref (ssa_862) (0) /* access=0 */ vec3 32 ssa_864 = vec3 ssa_863.x, ssa_863.y, ssa_863.z vec1 32 ssa_865 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_866 = intrinsic vulkan_resource_index (ssa_865) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_867 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_868 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_869 = iadd ssa_867, ssa_868 vec1 32 ssa_870 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_871 = iadd ssa_869, ssa_870 vec1 32 ssa_872 = intrinsic load_ubo (ssa_866, ssa_871) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_873 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_874 = iadd ssa_869, ssa_873 vec1 32 ssa_875 = intrinsic load_ubo (ssa_866, ssa_874) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_876 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_877 = iadd ssa_869, ssa_876 vec1 32 ssa_878 = intrinsic load_ubo (ssa_866, ssa_877) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_879 = vec3 ssa_872, ssa_875, ssa_878 vec1 32 ssa_880 = fdot3 ssa_864, ssa_879 vec1 32 ssa_881 = deref_var &r12 (function_temp vec4) vec4 32 ssa_882 = intrinsic load_deref (ssa_881) (0) /* access=0 */ vec4 32 ssa_883 = vec4 ssa_882.x, ssa_880, ssa_882.z, ssa_882.w vec1 32 ssa_884 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_884, ssa_883) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_885 = deref_var &r13 (function_temp vec4) vec4 32 ssa_886 = intrinsic load_deref (ssa_885) (0) /* access=0 */ vec3 32 ssa_887 = vec3 ssa_886.x, ssa_886.y, ssa_886.z vec1 32 ssa_888 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_889 = intrinsic vulkan_resource_index (ssa_888) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_890 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_891 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_892 = iadd ssa_890, ssa_891 vec1 32 ssa_893 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_894 = iadd ssa_892, ssa_893 vec1 32 ssa_895 = intrinsic load_ubo (ssa_889, ssa_894) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_896 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_897 = iadd ssa_892, ssa_896 vec1 32 ssa_898 = intrinsic load_ubo (ssa_889, ssa_897) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_899 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_900 = iadd ssa_892, ssa_899 vec1 32 ssa_901 = intrinsic load_ubo (ssa_889, ssa_900) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_902 = vec3 ssa_895, ssa_898, ssa_901 vec1 32 ssa_903 = fdot3 ssa_887, ssa_902 vec1 32 ssa_904 = deref_var &r7 (function_temp vec4) vec4 32 ssa_905 = intrinsic load_deref (ssa_904) (0) /* access=0 */ vec4 32 ssa_906 = vec4 ssa_905.x, ssa_903, ssa_905.z, ssa_905.w vec1 32 ssa_907 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_907, ssa_906) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_908 = deref_var &r14 (function_temp vec4) vec4 32 ssa_909 = intrinsic load_deref (ssa_908) (0) /* access=0 */ vec3 32 ssa_910 = vec3 ssa_909.x, ssa_909.y, ssa_909.z vec1 32 ssa_911 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_912 = intrinsic vulkan_resource_index (ssa_911) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_913 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_914 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_915 = iadd ssa_913, ssa_914 vec1 32 ssa_916 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_917 = iadd ssa_915, ssa_916 vec1 32 ssa_918 = intrinsic load_ubo (ssa_912, ssa_917) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_919 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_920 = iadd ssa_915, ssa_919 vec1 32 ssa_921 = intrinsic load_ubo (ssa_912, ssa_920) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_922 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_923 = iadd ssa_915, ssa_922 vec1 32 ssa_924 = intrinsic load_ubo (ssa_912, ssa_923) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_925 = vec3 ssa_918, ssa_921, ssa_924 vec1 32 ssa_926 = fdot3 ssa_910, ssa_925 vec1 32 ssa_927 = deref_var &r12 (function_temp vec4) vec4 32 ssa_928 = intrinsic load_deref (ssa_927) (0) /* access=0 */ vec4 32 ssa_929 = vec4 ssa_928.x, ssa_928.y, ssa_926, ssa_928.w vec1 32 ssa_930 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_930, ssa_929) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_931 = deref_var &r14 (function_temp vec4) vec4 32 ssa_932 = intrinsic load_deref (ssa_931) (0) /* access=0 */ vec3 32 ssa_933 = vec3 ssa_932.x, ssa_932.y, ssa_932.z vec1 32 ssa_934 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_935 = intrinsic vulkan_resource_index (ssa_934) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_936 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_937 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_938 = iadd ssa_936, ssa_937 vec1 32 ssa_939 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_940 = iadd ssa_938, ssa_939 vec1 32 ssa_941 = intrinsic load_ubo (ssa_935, ssa_940) (4ssa_781.y, ssa_779, ssa_781, vec1 32 ssa_920 = fneg ssa_919 vec1 32 ssa_921 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_922 = intrinsic load_deref (ssa_921) (0) /* access=0 */ vec1 32 ssa_923 = imov ssa_922.x vec1 32 ssa_924 = fadd ssa_920, ssa_923 vec1 32 ssa_925 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_926 = intrinsic load_deref (ssa_925) (0) /* access=0 */ vec4 32 ssa_927 = vec4 ssa_926.x, ssa_926.y, ssa_926.z, ssa_924 vec1 32 ssa_928 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_928, ssa_927) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_929 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_930 = intrinsic load_deref (ssa_929) (0) /* access=0 */ vec3 32 ssa_931 = vec3 ssa_930.y, ssa_930.z, ssa_930.w vec1 32 ssa_932 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_933 = intrinsic load_deref (ssa_932) (0) /* access=0 */ vec3 32 ssa_934 = vec3 ssa_933.y, ssa_933.z, ssa_933.w vec1 32 ssa_935 = fdot3 ssa_931, ssa_934 vec1 32 ssa_936 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_937 = intrinsic load_deref (ssa_936) (0) /* access=0 */ vec4 32 ssa_938 = vec4 ssa_935, ssa_937.y, ssa_937.z, ssa_937.w vec1 32 ssa_939 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_939, ssa_938) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_940 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_941 = intrinsic load_deref (ssa_940) (0) /* access=0 */ vec1 32 ssa_942 = imov ssa_941.x vec1 32 ssa_943 = frsq ssa_942 vec1 32 ssa_944 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_945 = intrinsic load_deref (ssa_944) (0) /* access=0 */ vec4 32 ssa_946 = vec4 ssa_943, ssa_945.y, ssa_945.z, ssa_945.w vec1 32 ssa_947 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_947, ssa_946) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_948 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_949 = intrinsic load_deref (ssa_948) (0) /* access=0 */ vec3 32 ssa_950 = vec3 ssa_949.x, ssa_949.x, ssa_949.x vec1 32 ssa_951 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_952 = intrinsic load_deref (ssa_951) (0) /* access=0 */ vec3 32 ssa_953 = vec3 ssa_952.y, ssa_952.z, ssa_952.w vec3 32 ssa_954 = fmul ssa_950, ssa_953 vec1 32 ssa_955 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_956 = intrinsic load_deref (ssa_955) (0) /* access=0 */ vec4 32 ssa_957 = vec4 ssa_956.x, ssa_954.x, ssa_954.y, ssa_954.z vec1 32 ssa_958 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_958, ssa_957) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_959 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_960 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_961 = deref_array &(*ssa_959)[0] (shader_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_962 = intrinsic load_deref (ssa_961) (0) /* access=0 */ vec1 32 ssa_963 = imov ssa_962.w vec1 32 ssa_964 = fmul ssa_963, ssa_59 vec1 32 ssa_965 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_966 = intrinsic load_deref (ssa_965) (0) /* access=0 */ vec4 32 ssa_967 = vec4 ssa_966.x, ssa_966.y, 0107:fixme:d3d:wined3d_adapter_init_gl_caps A set of 4 devices is not supported. ssa_966.z, ssa_964 vec1 32 ssa_968 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_968, ssa_967) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_969 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_970 = intrinsic load_deref (ssa_969) (0) /* access=0 */ vec1 32 ssa_971 = imov ssa_970.w vec1 32 ssa_972 = f2u32 ssa_971 vec1 32 ssa_973 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_974 = intrinsic load_deref (ssa_973) (0) /* access=0 */ vec4 32 ssa_975 = vec4 ssa_974.x, ssa_974.y, ssa_974.z, ssa_972 vec1 32 ssa_976 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_976, ssa_975) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_977 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_978 = intrinsic load_deref (ssa_977) (0) /* access=0 */ vec3 32 ssa_979 = vec3 ssa_978.w, ssa_978.w, ssa_978.w vec3 32 ssa_980 = iand ssa_979, ssa_58 vec1 32 ssa_981 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_982 = intrinsic load_deref (ssa_981) (0) /* access=0 */ vec4 32 ssa_983 = vec4 ssa_980.x, ssa_980.y, ssa_980.z, ssa_982.w vec1 32 ssa_984 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_984, ssa_983) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_985 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_986 = intrinsic load_deref (ssa_985) (0) /* access=0 */ vec3 32 ssa_987 = vec3 ssa_986.x, ssa_986.y, ssa_986.z vec3 1 ssa_988 = ult ssa_57, ssa_987 vec3 32 ssa_989 = bcsel ssa_988, ssa_55, ssa_56 vec1 32 ssa_990 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_991 = intrinsic load_deref (ssa_990) (0) /* access=0 */ vec4 32 ssa_992 = vec4 ssa_989.x, ssa_989.y, ssa_989.z, ssa_991.w vec1 32 ssa_993 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_993, ssa_992) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_994 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_995 = intrinsic load_deref (ssa_994) (0) /* access=0 */ vec3 32 ssa_996 = vec3 ssa_995.x, ssa_995.y, ssa_995.z vec3 1 ssa_997 = ine ssa_996, ssa_52 vec3 32 ssa_998 = bcsel ssa_997, ssa_54, ssa_53 vec1 32 ssa_999 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1000 = intrinsic load_deref (ssa_999) (0) /* access=0 */ vec4 32 ssa_1001 = vec4 ssa_998.x, ssa_998.y, ssa_998.z, ssa_1000.w vec1 32 ssa_1002 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_1002, ssa_1001) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1003 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1004 = intrinsic load_deref (ssa_1003) (0) /* access=0 */ vec1 32 ssa_1005 = imov ssa_1004.w vec1 32 ssa_1006 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1007 = intrinsic load_deref (ssa_1006) (0) /* access=0 */ vec1 32 ssa_1008 = imov ssa_1007.y vec1 32 ssa_1009 = fmul ssa_1005, ssa_1008 vec1 32 ssa_1010 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1011 = intrinsic load_deref (ssa_1010) (0) /* access=0 */ vec4 32 ssa_1012 = vec4 ssa_1009, ssa_1011.y, ssa_1011.z, ssa_1011.w vec1 32 ssa_1013 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1013, ssa_1012) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1014 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1015 = intrinsic load_deref (ssa_1014) (0) /* access=0 */ vec3 32 ssa_1016 = vec3 ssa_1015.y, ssa_1015.z, ssa_1015.x ssa_750.x.w vec1 32 ssa_783 = deref_var & = vec4 ssa_697, ssa_699.y, ssa_699.z, ssa_699.w 0) vec4 32 ssa_276 = vec4 ssa_275.x, ssa_275.x, ssa_275.x, ssa_275.x vec1 32 ssa_277 = imov ssa_276.x /* succs: vec1 32 ssa_1017 = deref_var &r3 (shader_temp vec4) block_7 block_8 */ /* align_mul=4 */ /* align_offset=0, ssa_750.yr14 (shader_temp vec4) , vec1 32 ssa_701 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_701, if ssa_748, ssa_750.w vec1 32 ssa_752 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_752, ssa_80 { block block_7: /* preds: block_6 */ vec1 32 ssa_278 = deref_var & */ vec1 32 ssa_942intrinsic store_deref (ssa_783, t80 ssa_700) (15, 0) /* (uniform samplerBuffer) = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_943 = iadd ssa_938, ssa_942 vec1 32 ssa_944 = intrinsic load_ubo (ssa_935, ssa_943) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_945 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_946 = iadd ssa_938, ssa_945 vec1 32 ssa_947 = intrinsic load_ubo (ssa_935, ssa_946) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_948 = vec3 ssa_941, ssa_944, ssa_947 vec1 32 ssa_949 = fdot3 ssa_933, ssa_948 vec1 32 ssa_950 = deref_var &r7 (function_temp vec4) vec4 32 ssa_951 = intrinsic load_deref (ssa_950) (0) /* access=0 */ vec4 32 ssa_952 = vec4 ssa_951.x, ssa_951.y, ssa_949, ssa_951.w vec1 32 ssa_953 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_953, ssa_952) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_954 = deref_var &r3 (function_temp vec4) vec4 32 ssa_955 = intrinsic load_deref (ssa_954) (0) /* access=0 */ vec1 32 ssa_956 = deref_var &r7 (function_temp vec4) vec4 32 ssa_957 = intrinsic load_deref (ssa_956) (0) /* access=0 */ vec1 32 ssa_958 = fdot4 ssa_955, ssa_957 vec1 32 ssa_959 = deref_var &out@o0-temp (function_temp vec4) vec4 32 ssa_960 = intrinsic load_deref (ssa_959) (0) /* access=0 */ vec4 32 ssa_961 = vec4 ssa_960.x, ssa_960.y, ssa_960.z, ssa_958 vec1 32 ssa_962 = deref_var &out@o0-temp (function_temp vec4) intrinsic store_deref (ssa_962, ssa_961) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_963 = deref_var &r3 (function_temp vec4) vec4 32 ssa_964 = intrinsic load_deref (ssa_963) (0) /* access=0 */ vec1 32 ssa_965 = deref_var &r12 (function_temp vec4) vec4 32 ssa_966 = intrinsic load_deref (ssa_965) (0) /* access=0 */ vec1 32 ssa_967 = fdot4 ssa_964, ssa_966 vec1 32 ssa_968 = deref_var &out@o0-temp (function_temp vec4) vec4 32 ssa_969 = intrinsic load_deref (ssa_968) (0) /* access=0 */ vec4 32 ssa_970 = vec4 ssa_969.x, ssa_969.y, ssa_967, ssa_969.w vec1 32 ssa_971 = deref_var &out@o0-temp (function_temp vec4) intrinsic store_deref (ssa_971, ssa_970) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_972 = deref_var &r4 (function_temp vec4) vec4 32 ssa_973 = intrinsic load_deref (ssa_972) (0) /* access=0 */ vec1 32 ssa_974 = imov ssa_973.z vec1 32 ssa_975 = ushr ssa_85, ssa_84 vec1 32 ssa_976 = imul ssa_974, ssa_83 vec1 32 ssa_977 = iadd ssa_976, ssa_975 vec1 32 ssa_978 = iadd ssa_977, ssa_82 vec1 32 ssa_979 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_980 = intrinsic vulkan_resource_index (ssa_979) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_981 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_982 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_983 = ishl ssa_978, ssa_982 vec1 32 ssa_984 = iadd ssa_981, ssa_983 vec1 32 ssa_985 = intrinsic load_ssbo (ssa_980, ssa_984) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_986 = deref_var &r0 (function_temp vec4) vec4 32 ssa_987 = intrinsic load_deref (ssa_986) (0) /* access=0 */ vec4 32 ssa_988 = vec4 ssa_987.x, ssa_985, ssa_987.z, ssa_987.w vec1 32 ssa_989 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_989, ssa_988) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_990 = deref_var &r0 (function_temp vec4) vec4 32 ssa_991 = intrinsic load_deref (ssa_990) (0) /* access=0 */ vec1 32 ssa_992 = imov ssa_991.y vec1 32 ssa_993 = ishl ssa_992, ssa_81 vec1 32 ssa_994 = deref_var &r0 (function_temp vec4) vec4 32 ssa_995 = intrinsic load_deref (ssa_994) (0) /* access=0 */ vec4 32 ssa_996 = vec4 ssa_993, ssa_995.y, ssa_995.z, ssa_995.w vec1 32 ssa_997 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_997, ssa_996) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_998 = deref_var &r0 (function_temp vec4) vec4 32 ssa_999 = intrinsic load_deref (ssa_998) (0) /* access=0 */ vec2 32 ssa_1000 = vec2 ssa_999.x, ssa_999.y vec2 32 ssa_1001 = ishr ssa_1000, ssa_2352 vec1 32 ssa_1002 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1003 = intrinsic load_deref (ssa_1002) (0) /* access=0 */ vec4 32 ssa_1004 = vec4 ssa_1001.x, ssa_1001.y, ssa_1003.z, ssa_1003.w vec1 32 ssa_1005 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1005, ssa_1004) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1006 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1007 = intrinsic load_deref (ssa_1006) (0) /* access=0 */ vec2 32 ssa_1008 = vec2 ssa_1007.x, ssa_1007.y vec2 32 ssa_1009 = i2f32 ssa_1008 vec1 32 ssa_1010 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1011 = intrinsic load_deref (ssa_1010) (0) /* access=0 */ vec4 32 ssa_1012 = vec4 ssa_1009.x, ssa_1009.y, ssa_1011.z, ssa_1011.w vec1 32 ssa_1013 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1013, ssa_1012) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1014 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1015 = intrinsic load_deref (ssa_1014) (0) /* access=0 */ vec2 32 ssa_1016 = vec2 ssa_1015.x, ssa_1015.y vec2 32 ssa_1017 = fmul ssa_1016, ssa_2349 vec1 32 ssa_1018 = deref_var &out@o1-temp (function_temp vec4) vec4 32 ssa_1019 = intrinsic load_deref (ssa_1018) (0) /* access=0 */ vec4 32 ssa_1020 = vec4 ssa_1017.x, ssa_1017.y, ssa_1019.z, ssa_1019.w vec1 32 ssa_1021 = deref_var &out@o1-temp (function_temp vec4) intrinsic store_deref (ssa_1021, ssa_1020) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1022 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1023 = intrinsic load_deref (ssa_1022) (0) /* access=0 */ ssa_751ssa_782) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec4 32 ssa_1018 = intrinsic load_deref (ssa_1017) (0) /* wrmask=vec1 32 ssa_784 = deref_var ) (15xyzwvec1 32 ssa_1871 = load_const (0x00000000 /* 0.000000 */) & */ /* access=0 */vec1 32 ssa_1024, 0) /* wrmask=xyzw */ /* access=0 */ access=0 */ vec3 32 ssa_1019 = vec3 ssa_1018.x, ssa_1018.y, ssa_1018.z = imov ssa_1023.w vec1 32 ssa_753 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_754 = intrinsic load_deref (ssa_753) (0) /* access=0 */ vec1 32 ssa_755 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_756 = intrinsic load_deref (ssa_755) (0) /* access=0 */ vec1 32 ssa_757 = fdot4 ssa_754, ssa_756 vec1 32 ssa_758 = deref_var &o0 (shader_out vec4) vec4 32 ssa_759 = intrinsic load_deref (ssa_758) (0) /* access=0 */ vec4 32 ssa_760 = vec4 ssa_759.x, ssa_757, ssa_759.z, ssa_759.w vec1 32 ssa_761 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_761, ssa_760) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_762 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_763 = intrinsic load_deref (ssa_762) (0) /* access=0 */ vec1 32 ssa_764 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_765 = intrinsic vulkan_resource_index (ssa_764) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_766 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_767 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_768 = iadd ssa_766, ssa_767 vec1 32 ssa_769 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_770 = iadd ssa_768, ssa_769 vec1 32 ssa_771 = intrinsic load_ubo (ssa_765, ssa_770) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_772 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_773 = iadd ssa_768, ssa_772 vec1 32 ssa_774 = intrinsic load_ubo (ssa_765, ssa_773) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_775 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_776 = iadd ssa_768, ssa_775 vec1 32 ssa_777 = intrinsic load_ubo (ssa_765, ssa_776) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_778 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_779 = iadd ssa_768, ssa_778 vec1 32 ssa_780 = intrinsic load_ubo (ssa_765, ssa_779) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_781 = vec4 ssa_771, ssa_774, ssa_777, ssa_780 vec1 32 ssa_782 = fdot4 ssa_763, ssa_781 vec1 32 ssa_783 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_784 = intrinsic load_deref (ssa_783) (0) /* access=0 */ vec4 32 ssa_785 = vec4 ssa_784.x, ssa_784.y, ssa_784.z, ssa_782 vec1 32 ssa_786 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_786, ssa_785) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_787 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_788 = intrinsic load_deref (ssa_787) (0) /* access=0 */ vec1 32 ssa_789 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_790 = intrinsic vulkan_resource_index (ssa_789) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_791 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_792 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_793 = iadd ssa_791, ssa_792 vec1 32 ssa_794 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_795 = iadd ssa_793, ssa_794 vec1 32 ssa_796 = intrinsic load_ubo (ssa_790, ssa_795) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_797 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_798 = iadd ssa_793, ssa_797 vec1 32 ssa_799 = intrinsic load_ubo (ssa_790, ssa_798) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_800 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_801 = iadd ssa_793, ssa_800 vec1 32 ssa_802 = intrinsic load_ubo (ssa_790, ssa_801) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_803 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_804 = iadd ssa_793, ssa_803 vec1 32 ssa_805 = intrinsic load_ubo (ssa_790, ssa_804) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_806 = vec4 ssa_796, ssa_799, ssa_802, ssa_805 vec1 32 ssa_807 = fdot4 ssa_788, ssa_806 vec1 32 ssa_808 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_809 = intrinsic load_deref (ssa_808) (0) /* access=0 */ vec4 32 ssa_810 = vec4 ssa_809.x, ssa_809.y, ssa_809.z, ssa_807 vec1 32 ssa_811 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_811, ssa_810) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_812 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_813 = intrinsic load_deref (ssa_812) (0) /* access=0 */ vec3 32 ssa_814 = vec3 ssa_813.x, ssa_813.y, ssa_813.z vec1 32 ssa_815 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_816 = intrinsic vulkan_resource_index (ssa_815) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_817 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_818 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_819 = iadd ssa_817, ssa_818 vec1 32 ssa_820 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_821 = iadd ssa_819, ssa_820 vec1 32 ssa_822 = intrinsic load_ubo (ssa_816, ssa_821) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_823 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_824 = iadd ssa_819, ssa_823 vec1 32 ssa_825 = intrinsic load_ubo (ssa_816, ssa_824) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_826 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_827 = iadd ssa_819, ssa_826 vec1 32 ssa_828 = intrinsic load_ubo (ssa_816, ssa_827) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_829 = vec3 ssa_822, ssa_825, ssa_828 vec1 32 ssa_830 = fdot3 ssa_814, ssa_829 vec1 32 ssa_831 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_832 = intrinsic load_deref (ssa_831) (0) /* access=0 */ vec4 32 ssa_833 = vec4 ssa_830, ssa_832.y, ssa_832.z, ssa_832.w vec1 32 ssa_834 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_834, ssa_833) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_835 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_836 = intrinsic load_deref (ssa_835) (0) /* access=0 */ vec3 32 ssa_837 = vec3 ssa_836.x, ssa_836.y, ssa_836.z vec1 32 ssa_838 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_839 = intrinsic vulkan_resource_index (ssa_838) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_840 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_841 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_842 = iadd ssa_840, ssa_841 vec1 32 ssa_843 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_844 = iadd ssa_842, ssa_843 vec1 32 ssa_845 = intrinsic load_ubo (ssa_839, ssa_844) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_846 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_847 = iadd ssa_842, ssa_846 vec1 32 ssa_848 = intrinsic load_ubo (ssa_839, ssa_847) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_849 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_850 = iadd ssa_842, ssa_849 vec1 32 ssa_851 = intrinsic load_ubo (ssa_839, ssa_850) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_852 = vec3 ssa_845, ssa_848, ssa_851 vec1 32 ssa_853 = fdot3 ssa_837, ssa_852 vec1 32 ssa_854 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_855 = intrinsic load_deref (ssa_854) (0) /* access=0 */ vec4 32 ssa_856 = vec4 ssa_853, ssa_855.y, ssa_855.z, ssa_855.w vec1 32 ssa_857 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_857, ssa_856) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_858 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_859 = intrinsic load_deref (ssa_858) (0) /* access=0 */ vec3 32 ssa_860 = vec3 ssa_859.x, ssa_859.y, ssa_859.z vec1 32 ssa_861 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_862 = intrinsic vulkan_resource_index (ssa_861) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_863 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_864 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_865 = iadd ssa_863, ssa_864 vec1 32 ssa_866 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_867 = iadd ssa_865, ssa_866 vec1 32 ssa_868 = intrinsic load_ubo (ssa_862, ssa_867) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_869 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_870 = iadd ssa_865, ssa_869 vec1 32 ssa_871 = intrinsic load_ubo (ssa_862, ssa_870) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_872 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_873 = iadd ssa_865, ssa_872 vec1 32 ssa_874 = intrinsic load_ubo (ssa_862, ssa_873) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_875 = vec3 ssa_868, ssa_871, ssa_874 vec1 32 ssa_876 = fdot3 ssa_860, ssa_875 vec1 32 ssa_877 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_878 = intrinsic load_deref (ssa_877) (0) /* access=0 */ vec4 32 ssa_879 = vec4 ssa_878.x, ssa_876, ssa_878.z, ssa_878.w vec1 32 ssa_880 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_880, ssa_879) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_881 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_882 = intrinsic load_deref (ssa_881) (0) /* access=0 */ vec3 32 ssa_883 = vec3 ssa_882.x, ssa_882.y, ssa_882.z vec1 32 ssa_884 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_885 = intrinsic vulkan_resource_index (ssa_884) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_886 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_887 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_888 = iadd ssa_886, ssa_887 vec1 32 ssa_889vec1 32 ssa_1025r6 vec1 32 ssa_702 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_279 = load_const ( = ushrvec4 32 ssa_703 (shader_temp vec4) vec4 32 ssa_785 = intrinsic load_deref (ssa_784) (0) /* access=0 */ vec1 32 ssa_786 = deref_var &r14 (shader_temp vec4) vec1 32 ssa_1020 = fdot3 ssa_1016, ssa_1019 vec1 32 ssa_1021 = deref_var &o4 (shader_out vec4) = txf ssa_278 (texture_deref), ssa_277 (coord), ssa_1871 (lod), 0 (sampler), /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ vec4 32 ssa_1887 = phi block_7: ssa_279, block_8: ssa_1944 vec4 32 ssa_1879 = imov ssa_1887 vec1 32 ssa_284 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_284, ssa_1879) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_285 = deref_var &r4 (function_temp vec4) vec4 32 ssa_286 = intrinsic load_deref (ssa_285) (0) /* access=0 */ vec1 32 ssa_287 = imov ssa_286.w vec1 32 ssa_288 = deref_var &r1 (function_temp vec4) vec4 32 ssa_289 = intrinsic load_deref (ssa_288) (0) /* access=0 */ vec4 32 ssa_290 = vec4 ssa_287, ssa_289.y, ssa_289.z, ssa_289.w vec1 32 ssa_291 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_291, ssa_290) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_292 = deref_var &r0 (function_temp vec4) vec4 32 ssa_293 = intrinsic load_deref (ssa_292) (0) /* access=0 */ vec4 32 ssa_294 = vec4 ssa_293.x, ssa_293.x, ssa_293.z, ssa_293.z vec4 32 ssa_295 = iadd ssa_294, ssa_2011 vec1 32 ssa_296 = deref_var &r5 (function_temp vec4) intrinsic store_deref (ssa_296, ssa_295) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_297 = deref_var &r0 (function_temp vec4) vec4 32 ssa_298 = intrinsic load_deref (ssa_297) (0) /* access=0 */ vec4 32 ssa_299 = vec4 ssa_298.z, ssa_298.z, ssa_298.z, ssa_298.z vec1 32 ssa_300 = imov ssa_299.x /* succs: block_10 block_11 */ if ssa_78 { block block_10: /* preds: block_9 */ vec1 32 ssa_301 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_1872 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_302 = txf ssa_301 (texture_deref), ssa_300 (coord), ssa_1872 (lod), 0 (sampler), /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ /* succs: block_12 */ } block block_12: /* preds: block_10 block_11 */ vec4 32 ssa_1888 = phi block_10: ssa_302, block_11: ssa_1939 vec4 32 ssa_1880 = imov ssa_1888 vec1 32 ssa_307 = deref_var &r6 (function_temp vec4) intrinsic store_deref (ssa_307, ssa_1880) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_308 = deref_var &r5 (function_temp vec4) vec4 32 ssa_309 = intrinsic load_deref (ssa_308) (0) /* access=0 */ vec4 32 ssa_310 = vec4 ssa_309.x, ssa_309.x, ssa_309.x, ssa_309.x vec1 32 ssa_311 = imov ssa_310.x /* succs: block_13 block_14 */ if ssa_77 { block block_13: /* preds: block_12 */ vec1 32 ssa_312 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_1873 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_313 = txf ssa_312 (texture_deref), ssa_311 (coord), ssa_1873 (lod), 0 (sampler), /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ vec4 32 ssa_1889 = phi block_13: ssa_313, block_14: ssa_1934 vec4 32 ssa_1881 = imov ssa_1889 vec1 32 ssa_318 = deref_var &r7 (function_temp vec4) intrinsic store_deref (ssa_318, ssa_1881) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_319 = deref_var &r7 (function_temp vec4) vec4 32 ssa_320 = intrinsic load_deref (ssa_319) (0) /* access=0 */ vec1 32 ssa_321 = imov ssa_320.w vec1 32 ssa_322 = deref_var &r1 (function_temp vec4) vec4 32 ssa_323 = intrinsic load_deref (ssa_322) (0) /* access=0 */ vec4 32 ssa_324 = vec4 ssa_323.x, ssa_321, ssa_323.z, ssa_323.w vec1 32 ssa_325 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_325, ssa_324) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_326 = deref_var &r5 (function_temp vec4) vec4 32 ssa_327 = intrinsic load_deref (ssa_326) (0) /* access=0 */ vec4 32 ssa_328 = vec4 ssa_327.y, ssa_327.y, ssa_327.y, ssa_327.y vec1 32 ssa_329 = imov ssa_328.x /* succs: block_16 block_17 */ if ssa_76 { block block_16: /* preds: block_15 */ vec1 32 ssa_330 = deref_var &t80 (uniform samplerBuffer) = intrinsic load_deref (ssa_702) (0) /* access=0 */ vec3 32 ssa_704 = vec3 ssa_703.x, ssa_703.y, ssa_703.z vec1 32 ssa_705 = load_const (0x00000000 /* 0.000000 */) ssa_78, ssa_77 vec1 32 ssa_1026 = imul ssa_1024, ssa_76 vec1 32 ssa_1027 = iadd ssa_1026, ssa_1025 vec1 32 ssa_1028 = iadd ssa_1027, ssa_75 vec1 32 ssa_1029 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1030 = intrinsic vulkan_resource_index (ssa_1029) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1031 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1032 vec4 32 ssa_1022 = intrinsic load_deref (ssa_1021) (0) /* access=0 */ vec4 32 ssa_1023 = vec4 ssa_1020, ssa_1022.y, ssa_1022.z, ssa_1022.w vec1 32 ssa_1024vec4 32 ssa_787 = intrinsic load_deref (ssa_786) (0) /* access=0 */ vec1 32 ssa_788 = fdot4 ssa_785, ssa_787 vec1 32 ssa_789 = deref_var &o0 (shader_out vec4) vec4 32 ssa_790 = intrinsic load_deref (ssa_789) (0) /* access=0 */ vec4 32 ssa_791 = vec4 0x00000000 /* 0.000000 */) vec1 32 ssa_890 = iadd ssa_888, ssa_889 vec1 32 ssa_891 = intrinsic load_ubo (ssa_885, ssa_890) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_892 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_893 = iadd ssa_888, ssa_892 vec1 32 ssa_894 = intrinsic load_ubo (ssa_885, ssa_893) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_895 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_896 = iadd ssa_888, ssa_895 vec1 32 ssa_897 = intrinsic load_ubo (ssa_885, ssa_896) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_898 = vec3 ssa_891, ssa_894, ssa_897 vec1 32 ssa_899 = fdot3 ssa_883, ssa_898 vec1 32 ssa_900 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_901 = intrinsic load_deref (ssa_900) (0) /* access=0 */ vec4 32 ssa_902 = vec4 ssa_901.x, ssa_899, ssa_901.z, ssa_901.w vec1 32 ssa_903 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_903, ssa_902) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_904 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_905 = intrinsic load_deref (ssa_904) (0) /* access=0 */ vec3 32 ssa_906 = vec3 ssa_905.x, ssa_905.y, ssa_905.z vec1 32 ssa_907 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_908 = intrinsic vulkan_resource_index (ssa_907) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_909 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_910 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_911 = iadd ssa_909, ssa_910 vec1 32 ssa_912 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_913 = iadd ssa_911, ssa_912 vec1 32 ssa_914 = intrinsic load_ubo (ssa_908, ssa_913) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_915 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_916 = iadd ssa_911, ssa_915 vec1 32 ssa_917 = intrinsic load_ubo (ssa_908, ssa_916) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_918 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_919 = iadd ssa_911, ssa_918 vec1 32 ssa_920 = intrinsic load_ubo (ssa_908, ssa_919) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_921 = vec3 ssa_914, ssa_917, ssa_920 vec1 32 ssa_922 = fdot3 ssa_906, ssa_921 vec1 32 ssa_923 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_924 = intrinsic load_deref (ssa_923) (0) /* access=0 */ vec4 32 ssa_925 = vec4 ssa_924.x, ssa_924.y, ssa_922, ssa_924.w vec1 32 ssa_926 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_926, ssa_925) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_927 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_928 = intrinsic load_deref (ssa_927) (0) /* access=0 */ vec3 32 ssa_929 = vec3 ssa_928.x, ssa_928.y, ssa_928.z vec1 32 ssa_930 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_931 = intrinsic vulkan_resource_index (ssa_930) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_932 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_933 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_934 = iadd ssa_932, ssa_933 vec1 32 ssa_935 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_936 = iadd ssa_934, ssa_935 vec1 32 ssa_937 = intrinsic load_ubo (ssa_931, ssa_936) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_938 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_939 = iadd ssa_934, ssa_938 vec1 32 ssa_940 = intrinsic load_ubo (ssa_931, ssa_939) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_941 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_942 = iadd ssa_934, ssa_941 vec1 32 ssa_943 = intrinsic load_ubo (ssa_931, ssa_942) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_944 = vec3 ssa_937, ssa_940, ssa_943 vec1 32 ssa_945 = fdot3 ssa_929, ssa_944 vec1 32 ssa_946 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_947 = intrinsic load_deref (ssa_946) (0) /* access=0 */ vec4 32 ssa_948 = vec4 ssa_947.x, ssa_947.y, ssa_945, ssa_947.w vec1 32 ssa_949 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_949, ssa_948) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_950 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_951 = intrinsic load_deref (ssa_950) (0) /* access=0 */ vec1 32 ssa_952 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_953 = intrinsic load_deref (ssa_952) (0) /* access=0 */ vec1 32 ssa_954 = fdot4 ssa_951, ssa_953 vec1 32 ssa_955 = deref_var &o0 (shader_out vec4) vec4 32 ssa_956 = intrinsic load_deref (ssa_955) (0) /* access=0 */ vec4 32 ssa_957 = vec4 ssa_956.x, ssa_956.y, ssa_956.z, ssa_954 vec1 32 ssa_958 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_958, ssa_957) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_959 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_960 = intrinsic load_deref (ssa_959) (0) /* access=0 */ vec1 32 ssa_961 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_962 = intrinsic load_deref (ssa_961) (0) /* access=0 */ vec1 32 ssa_963 = fdot4 ssa_960, ssa_962 vec1 32 ssa_964 = deref_var &o0 (shader_out vec4) vec4 32 ssa_965 = intrinsic load_deref (ssa_964) (0) /* access=0 */ vec4 32 ssa_966 = vec4 ssa_965.x, ssa_965.y, ssa_963, ssa_965.w vec1 32 ssa_967 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_967, ssa_966) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_968 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_969 = intrinsic load_deref (ssa_968) (0) /* access=0 */ vec1 32 ssa_970 = imov ssa_969.z vec1 32 ssa_971 = ushr ssa_70, ssa_69 vec1 32 ssa_972 = imul ssa_970, ssa_68 vec1 32 ssa_973 = iadd ssa_972, ssa_971 vec1 32 ssa_974 = iadd ssa_973, ssa_67 vec1 32 ssa_975 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_976 = intrinsic vulkan_resource_index (ssa_975) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_977 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_978 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_979 = ishl ssa_974, ssa_978 vec1 32 ssa_980 = iadd ssa_977, ssa_979 vec1 32 ssa_981 = intrinsic load_ssbo (ssa_976, ssa_980) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_982 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_983 = intrinsic load_deref (ssa_982) (0) /* access=0 */ vec4 32 ssa_984 = vec4 ssa_983.x, ssa_981, ssa_983.z, ssa_983.w vec1 32 ssa_985 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_985, ssa_984) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_986 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_987 = intrinsic load_deref (ssa_986) (0) /* access=0 */ vec1 32 ssa_988 = imov ssa_987.y vec1 32 ssa_989 = ushr ssa_66, ssa_65 vec1 32 ssa_990 = imul ssa_988, ssa_64 vec1 32 ssa_991 = iadd ssa_990, ssa_989 vec1 32 ssa_992 = iadd ssa_991, ssa_63 vec1 32 ssa_993 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_994 = intrinsic vulkan_resource_index (ssa_993) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_995 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_996 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_997 = ishl ssa_992, ssa_996 vec1 32 ssa_998 = iadd ssa_995, ssa_997 vec1 32 ssa_999 = intrinsic load_ssbo (ssa_994, ssa_998) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1000 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1001 = intrinsic load_deref (ssa_1000) (0) /* access=0 */ vec4 32 ssa_1002 = vec4 ssa_1001.x, ssa_1001.y, ssa_999, ssa_1001.w vec1 32 ssa_1003 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1003, ssa_1002) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1004 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1005 = intrinsic load_deref (ssa_1004) (0) /* access=0 */ vec1 32 ssa_1006 = imov ssa_1005.z vec1 32 ssa_1007 = ishl ssa_1006, ssa_62 vec1 32 ssa_1008 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1009 = intrinsic load_deref (ssa_1008) (0) /* access=0 */ vec4 32 ssa_1010 = vec4 ssa_1009.x, ssa_1009.y, ssa_1007, ssa_1009.w vec1 32 ssa_1011 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1011, ssa_1010) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1012 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1013 = intrinsic load_deref (ssa_1012) (0) /* access=0 */ vec1 32 ssa_1014 = imov ssa_1013.y vec1 32 ssa_1015 = ishl ssa_1014, ssa_61 vec1 32 ssa_1016 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1017 = intrinsic load_deref (ssa_1016) (0) /* access=0 */ vec4 32 ssa_1018 = vec4 ssa_1015, ssa_1017.y, ssa_1017.z, ssa_1017.w vec1 32 ssa_1019 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1019, ssa_1018) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1020 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1021 = intrinsic load_deref (ssa_1020) (0) /* access=0 */ vec2 32 ssa_1022 = vec2 ssa_1021.x, ssa_1021.y vec2 32 ssa_1023 = ishr ssa_1022, ssa_60 vec1 32 ssa_1024 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1025 = intrinsic load_deref (ssa_1024) (0) /* access=0 */ vec4 32 ssa_1026 = vec4 ssa_1023.x, ssa_1023.y, ssa_1025.z, ssa_1025.w vec1 32 ssa_1027 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1027, ssa_1026) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1028 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1029 = intrinsic load_deref (ssa_1028) (0) /* access=0 */ vec2 32 ssa_1030 = vec2 ssa_1029.x, ssa_1029.y vec2 32 ssa_1031 = i2f32 ssa_1030 vec1 32 ssa_1032 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1033 = intrinsic load_deref (ssa_1032) (0) /* access=0 */ vec4 32 ssa_1034 = vec4 ssa_1031.x, ssa_1031.y, ssa_1033.z, ssa_1033.w vec1 32 ssa_1035 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1035, ssa_1034) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1036 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1037 = intrinsic load_deref (ssa_1036) (0) /* access=0 */ vec2 32 ssa_1038 = vec2 ssa_1037.x, ssa_1037.y vec2 32 ssa_1039 = fmul ssa_1038, ssa_59 vec1 32 ssa_1040 = deref_var &o1 (shader_out vec4) vec4 32 ssa_1041 = intrinsic load_deref (ssa_1040) (0) /* access=0 */ vec4 32 ssa_1042 = vec4 ssa_1039.x, ssa_1039.y, ssa_1041.z, ssa_1041.w vec1 32 ssa_1043 = deref_var &o1 (shader_out vec4) intrinsic store_deref (ssa_1043, ssa_1042) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1044 = deref_var &o1 (shader_out vec4) vec4 32 ssa_1045 = intrinsic load_deref (ssa_1044) (0) /* access=0 */ vec4 32 ssa_1046 = vec4 ssa_1045.x, ssa_1045.y, ssa_58.x, ssa_58.y vec1 32 ssa_1047 = deref_var &o1 (shader_out vec4) intrinsic store_deref (ssa_1047, ssa_1046) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1048 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1049 = intrinsic load_deref (ssa_1048) (0) /* access=0 */ vec1 32 ssa_1050 = imov ssa_1049.w vec1 32 ssa_1051 = ushr ssa_57, ssa_56 vec1 32 ssa_1052 = imul ssa_1050, ssa_55 vec1 32 ssa_1053 = iadd ssa_1052, ssa_1051 vec1 32 ssa_1054 = iadd ssa_1053, ssa_54 vec1 32 ssa_1055 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1056 = intrinsic vulkan_resource_index (ssa_1055) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1057 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1058 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1059 = ishl ssa_1054, ssa_1058 vec1 32 ssa_1060 = iadd ssa_1057, ssa_1059 vec1 32 ssa_1061 = intrinsic load_ssbo (ssa_1056, ssa_1060) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1062 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1063 = intrinsic load_deref (ssa_1062) (0) /* access=0 */ vec4 32 ssa_1064 = vec4 ssa_1061, ssa_1063.y, ssa_1063.z, ssa_1063.w vec1 32 ssa_1065 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_1065, ssa_1064) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1066 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1067 = intrinsic load_deref (ssa_1066) (0) /* access=0 */ vec1 32 ssa_1068 = imov ssa_1067.z vec1 32 ssa_1069 = ushr ssa_53, ssa_52 vec1 32 ssa_1070 = imul ssa_1068, ssa_51 vec1 32 ssa_1071 = iadd ssa_1070, ssa_1069 vec1 32 ssa_1072 = iadd ssa_1071, ssa_50 vec1 32 ssa_1073 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1074 = intrinsic vulkan_resource_index (ssa_1073) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_1075 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1076 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1077 = ishl ssa_1072, ssa_1076 vec1 32 ssa_1078 = iadd ssa_1075, ssa_1077 vec1 32 ssa_1079 = intrinsic load_ssbo (ssa_1074, ssa_1078) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1080 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1081 = intrinsic load_deref (ssa_1080) (0) /* access=0 */ vec4 32 ssa_1082 = vec4 ssa_1081.x, ssa_1079, ssa_1081.z, ssa_1081.w vec1 32 ssa_1083 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1083, ssa_1082) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1084 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1085 = intrinsic load_deref (ssa_1084) (0) /* access=0 */ vec3 32 ssa_1086 = vec3 ssa_1085.x, ssa_1085.x, ssa_1085.x vec3 32 ssa_1087 = ushr ssa_1086, ssa_49 vec1 32 ssa_1088 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1089 = intrinsic load_deref (ssa_1088) (0) /* access=0 */ vec4 32 ssa_1090 = vec4 ssa_1089.x, ssa_1087.x, ssa_1087.y, ssa_1087.z vec1 32 ssa_1091 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_1091, ssa_1090) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1092 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1093 = intrinsic load_deref (ssa_1092) (0) /* access=0 */ vec4 32 ssa_1094 = iand ssa_1093, ssa_48 vec1 32 ssa_1095 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1095, ssa_1094) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1096 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1097 = intrinsic load_deref (ssa_1096) (0) /* access=0 */ vec4 32 ssa_1098 = u2f32 ssa_1097 vec1 32 ssa_1099 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1099, ssa_1098) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1100 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1101 = intrinsic load_deref (ssa_1100) (0) /* access=0 */ vec4 32 ssa_1102 = fmul ssa_1101, ssa_47 vec1 32 ssa_1103 = deref_var &o2 (shader_out vec4) intrinsic store_deref (ssa_1103, ssa_1102) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1104 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1105 = intrinsic load_deref (ssa_1104) (0) /* access=0 */ vec1 32 ssa_1106 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1107 = intrinsic load_deref (ssa_1106) (0) /* access=0 */ vec1 32 ssa_1108 = fdot4 ssa_1105, ssa_1107 vec1 32 ssa_1109 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1110 = intrinsic load_deref (ssa_1109) (0) /* access=0 */ vec4 32 ssa_1111 = vec4 ssa_1108, ssa_1110.y, ssa_1110.z, ssa_1110.w vec1 32 ssa_1112 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1112, ssa_1111) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1113 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1114 = intrinsic load_deref (ssa_1113) (0) /* access=0 */ vec1 32 ssa_1115 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_1116 = intrinsic load_deref (ssa_1115) (0) /* access=0 */ vec1 32 ssa_1117 = fdot4 ssa_1114, ssa_1116 vec1 32 ssa_1118 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1119 = intrinsic load_deref (ssa_1118) (0) /* access=0 */ vec4 32 ssa_1120 = vec4 ssa_1119.x, ssa_1117, ssa_1119.z, ssa_1119.w vec1 32 ssa_1121 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1121, ssa_1120) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1122 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1123 = intrinsic load_deref (ssa_1122) (0) /* access=0 */ vec1 32 ssa_1124 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_1125 = intrinsic load_deref (ssa_1124) (0) /* access=0 */ vec1 32 ssa_1126 = fdot4 ssa_1123, ssa_1125 vec1 32 ssa_1127 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1128 = intrinsic load_deref (ssa_1127) (0) /* access=0 */ vec4 32 ssa_1129 = vec4 ssa_1128.x, ssa_1128.y, ssa_1126, ssa_1128.w vec1 32 ssa_1130 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1130, ssa_1129) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1131 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1132 = intrinsic load_deref (ssa_1131) (0) /* access=0 */ vec4 32 ssa_1133 = vec4 ssa_1132.x, ssa_1132.y, ssa_1132.z, ssa_46 vec1 32 ssa_1134 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1134, ssa_1133) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1135 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1136 = intrinsic load_deref (ssa_1135) (0) /* access=0 */ vec3 32 ssa_1137 = vec3 ssa_1136.x, ssa_1136.x, ssa_1136.x vec3 32 ssa_1138 = ushr ssa_1137, ssa_45 vec1 32 ssa_1139 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1140 = intrinsic load_deref (ssa_1139) (0) /* access=0 */ vec4 32 ssa_1141 = vec4 ssa_1140.x, ssa_1138.x, ssa_1138.y, ssa_1138.z vec1 32 ssa_1142 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1142, ssa_1141) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1143 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1144 = intrinsic load_deref (ssa_1143) (0) /* access=0 */ vec4 32 ssa_1145 = iand ssa_1144, ssa_44 vec1 32 ssa_1146 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1146, ssa_1145) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1147 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1148 = intrinsic load_deref (ssa_1147) (0) /* access=0 */ vec4 32 ssa_1149 = u2f32 ssa_1148 vec1 32 ssa_1150 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1150, ssa_1149) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1151 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1152 = intrinsic load_deref (ssa_1151) (0) /* access=0 */ vec4 32 ssa_1153 = ffma ssa_1152, ssa_43, ssa_42 vec1 32 ssa_1154 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1154, ssa_1153) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1155 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1156 = intrinsic load_deref (ssa_1155) (0) /* access=0 */ vec4 32 ssa_1157 = fmin ssa_1156, ssa_41 vec1 32 ssa_1158 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1158, ssa_1157) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1159 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1160 = intrinsic load_deref (ssa_1159) (0) /* access=0 */ vec2 32 ssa_1161 = vec2 ssa_1160.y, ssa_1160.w vec1 32 ssa_1162 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1163 = intrinsic load_deref (ssa_1162) (0) /* access=0 */ vec2 32 ssa_1164 = vec2 ssa_1163.x, ssa_1163.z vec2 32 ssa_1165 = fadd ssa_1161, ssa_1164 vec1 32 ssa_1166 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1167 = intrinsic load_deref (ssa_1166) (0) /* access=0 */ vec4 32 ssa_1168 = vec4 ssa_1165.x, ssa_1165.y, ssa_1167.z, ssa_1167.w vec1 32 ssa_1169 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1169, ssa_1168) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1170 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1171 = intrinsic load_deref (ssa_1170) (0) /* access=0 */ vec2 32 ssa_1172 = vec2 ssa_1171.y, ssa_1171.w vec2 32 ssa_1173 = fneg ssa_1172 vec1 32 ssa_1174 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1175 = intrinsic load_deref (ssa_1174) (0) /* access=0 */ vec2 32 ssa_1176 = vec2 ssa_1175.x, ssa_1175.z vec2 32 ssa_1177 = fadd ssa_1173, ssa_1176 vec1 32 ssa_1178 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1179 = intrinsic load_deref (ssa_1178) (0) /* access=0 */ vec4 32 ssa_1180 = vec4 ssa_1179.x, ssa_1179.y, ssa_1177.x, ssa_1177.y vec1 32 ssa_1181 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1181, ssa_1180) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1182 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1183 = intrinsic load_deref (ssa_1182) (0) /* access=0 */ vec2 32 ssa_1184 = vec2 ssa_1183.y, ssa_1183.w vec2 32 ssa_1185 = fmul ssa_1184, ssa_40 vec1 32 ssa_1186 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1187 = intrinsic load_deref (ssa_1186) (0) /* access=0 */ vec4 32 ssa_1188 = vec4 ssa_1187.x, ssa_1185.x, ssa_1185.y, ssa_1187.w vec1 32 ssa_1189 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1189, ssa_1188) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1190 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1191 = intrinsic load_deref (ssa_1190) (0) /* access=0 */ vec2 32 ssa_1192 = vec2 ssa_1191.x, ssa_1191.z vec2 32 ssa_1193 = fmul ssa_1192, ssa_39 vec1 32 ssa_1194 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1195 = intrinsic load_deref (ssa_1194) (0) /* access=0 */ vec4 32 ssa_1196 = vec4 ssa_1193.x, ssa_1193.y, ssa_1195.z, ssa_1195.w vec1 32 ssa_1197 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1197, ssa_1196) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1198 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1199 = intrinsic load_deref (ssa_1198) (0) /* access=0 */ vec1 32 ssa_1200 = imov ssa_1199.y vec1 32 ssa_1201 = fabs ssa_1200 vec1 32 ssa_1202 = fneg ssa_1201 vec1 32 ssa_1203 = fadd ssa_1202, ssa_38 vec1 32 ssa_1204 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1205 = intrinsic load_deref (ssa_1204) (0) /* access=0 */ vec4 32 ssa_1206 = vec4 ssa_1203, ssa_1205.y, ssa_1205.z, ssa_1205.w vec1 32 ssa_1207 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1207, ssa_1206) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1208 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1209 = intrinsic load_deref (ssa_1208) (0) /* access=0 */ vec1 32 ssa_1210 = imov ssa_1209.z vec1 32 ssa_1211 = fabs ssa_1210 vec1 32 ssa_1212 = fneg ssa_1211 vec1 32 ssa_1213 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1214 = intrinsic load_deref (ssa_1213) (0) /* access=0 */ vec1 32 ssa_1215 = imov ssa_1214.x vec1 32 ssa_1216 = fadd ssa_1212, ssa_1215 vec1 32 ssa_1217 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1218 = intrinsic load_deref (ssa_1217) (0) /* access=0 */ vec4 32 ssa_1219 = vec4 ssa_1218.x, ssa_1218.y, ssa_1218.z, ssa_1216 vec1 32 ssa_1220 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1220, ssa_1219) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1221 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1222 = intrinsic load_deref (ssa_1221) (0) /* access=0 */ vec3 32 ssa_1223 = vec3 ssa_1222.y, ssa_1222.z, ssa_1222.w vec1 32 ssa_1224 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1225 = intrinsic load_deref (ssa_1224) (0) /* access=0 */ vec3 32 ssa_1226 = vec3 ssa_1225.y, ssa_1225.z, ssa_1225.w vec1 32 ssa_1227 = fdot3 ssa_1223, ssa_1226 vec1 32 ssa_1228 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1229 = intrinsic load_deref (ssa_1228) (0) /* access=0 */ vec4 32 ssa_1230 = vec4 ssa_1227, ssa_1229.y, ssa_1229.z, ssa_1229.w vec1 32 ssa_1231 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1231, ssa_1230) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1232 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1233 = intrinsic load_deref (ssa_1232) (0) /* access=0 */ vec1 32 ssa_1234 = imov ssa_1233.x vec1 32 ssa_1235 = frsq ssa_1234 vec1 32 ssa_1236 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1237 = intrinsic load_deref (ssa_1236) (0) /* access=0 */ vec4 32 ssa_1238 = vec4 vec1 32 ssa_1874 = load_const (0x00000000 /* 0.000000 */ vec1 32 ssa_706 = intrinsic vulkan_resource_index (ssa_705) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1033 = ishl vec1 32 ssa_707 = load_const (0x00000000 /* 0.000000 */) ssa_1235, ssa_1237.y, ssa_1237.z, ssa_1237.w vec1 32 ssa_1239 = deref_var &r1 (shader_temp vec4) intrinsic store_deref ( = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1024, ssa_1023) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1025 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1026 = intrinsic load_deref (ssa_1025) (0) /* access=0 */ vec3 32 ssa_1027 = vec3 ssa_1026.y, ssa_1026.z, ssa_1026.x vec1 32 ssa_1028 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1029 = intrinsic load_deref (ssa_1028) (0) /* access=0 */ vec3 32 ssa_1030 = vec3 ssa_1029.x, ssa_1029.y, ssa_1029.z vec1 32 ssa_1031 = fdot3 ssa_1027, ssa_1030 vec1 32 ssa_1032 = deref_var &o4 (shader_out vec4) vec4 32 ssa_1033 = intrinsic load_deref (ssa_1032) (0) /* access=0 */ vec4 32 ssa_1034 = vec4 ssa_1033.x, ssa_1031, ssa_1033.z, ssa_1033.w vec1 32 ssa_1035 = deref_var &o4 (shader_out vec4) ssa_790 .vec1 32 ssa_708x = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_709 = iadd ssa_707, ssa_708 vec1 32 ssa_710 = load_const (0x00000000 /* 0.000000 */)ssa_1028, ssa_1032 intrinsic store_deref (ssa_1239ssa_1035, vec1 32 ssa_1034ssa_1034) (15, 0) /* wrmask=xyzw */ /*) = iadd ssa_1031, ssa_1033 vec1 32 ssa_1035 = intrinsic load_ssbo (ssa_1030, ssa_1034) (0, 4, access=0, ssa_788, ssa_790 vec1 32 ssa_711 = iadd ssa_709, ssa_710 vec1 32 ssa_712 = intrinsic load_ubo (ssa_706, ssa_711) (4, 0), ssa_1238) (15, 0) /* wrmask=xyzw /* align_mul=4 */ /* align_offset=0 */ */ /* access=0 */ vec1 32 ssa_1240 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1241 = intrinsic load_deref (ssa_1240) (0) /* access=0 */0 */ vec1 32 ssa_1036 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1037 = intrinsic load_deref (ssa_1036) (0). /* vec1 32 ssa_713 = load_const (0x00000004 /* 0.000000 */) z) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1036 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1037 = intrinsic load_deref (ssa_1036) (0) /* access=0 */ vec4 32 ssa_1038 = vec4 , /* access=0 */ vec3 32 ssa_1242 = vec3 vec4 32 ssa_331 = txf ssa_330 (texture_deref), ssa_329 (coord), ssa_1874 (lod), 0 (sampler), /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ vec4 32 ssa_1890 = phi block_16: ssa_331, vec1 32 ssa_714 = iadd ssa_709, ssa_713 vec1 32 ssa_715 = intrinsic load_ubo (ssa_706, ssa_714) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_716 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_717 = iaddssa_1241.x, ssa_1241.x, ssa_1241.x ssa_1037.x, ssa_1037.y, ssa_1037.z, block_17: ssa_790. wssa_709, ssa_716 vec1 32 ssa_718 = intrinsic load_ubo (ssa_706, ssa_717) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_719 = vec3 ssa_712, ssa_715, ssa_718 vec1 32 ssa_720 = fdot3 ssa_704, ssa_719 vec1 32 ssa_721 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_722 = intrinsic load_deref (ssa_721) (0) /* access=0 */ vec4 32 ssa_723 = vec4 ssa_722.x, ssa_720, ssa_722.z, ssa_722.w vec1 32 ssa_724 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_724, ssa_723) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_725 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_726 = intrinsic load_deref (ssa_725) (0) /* access=0 */ vec3 32 ssa_727 = vec3 ssa_726.x, ssa_726.y, ssa_726.z vec1 32 ssa_728 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_729 = intrinsic vulkan_resource_index (ssa_728) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_730 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_731 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_732 = iadd ssa_730, ssa_731 vec1 32 ssa_733 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_734 = iadd ssa_732, ssa_733 vec1 32 ssa_735 = intrinsic load_ubo (ssa_729, ssa_734) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_736 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_737 = iadd ssa_732, ssa_736 vec1 32 ssa_738 = intrinsic load_ubo (ssa_729, ssa_737) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_739 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_740 = iadd ssa_732, ssa_739 vec1 32 ssa_741 = intrinsic load_ubo (ssa_729, ssa_740) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_742 = vec3 ssa_735, ssa_738, ssa_741 vec1 32 ssa_743 = fdot3 ssa_727, ssa_742 vec1 32 ssa_744 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_745 = intrinsic load_deref (ssa_744) (0) /* access=0 */ vec4 32 ssa_746 = vec4 ssa_745.x, ssa_745.y, ssa_743, ssa_745.w vec1 32 ssa_747 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_747, ssa_746) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_748 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_749 = intrinsic load_deref (ssa_748) (0) /* access=0 */ vec1 32 ssa_750 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_751 = intrinsic load_deref (ssa_750) (0) /* access=0 */ vec1 32 ssa_752 = fdot4 ssa_749, ssa_751 vec1 32 ssa_753 = deref_var &o0 (shader_out vec4) vec4 32 ssa_754 = intrinsic load_deref (ssa_753) (0) /* access=0 */ vec4 32 ssa_755 = vec4 ssa_754.x, ssa_752, ssa_754.z, ssa_754.w vec1 32 ssa_756 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_756, ssa_755) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_757 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_758 = intrinsic load_deref (ssa_757) (0) /* access=0 */ vec1 32 ssa_759 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_760 = intrinsic vulkan_resource_index (ssa_759) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_761 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_762 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_763 = iadd ssa_761, ssa_762 vec1 32 ssa_764 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_765 = iadd ssa_763, ssa_764 vec1 32 ssa_766 = intrinsic load_ubo (ssa_760, ssa_765) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_767 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_768 = iadd ssa_763, ssa_767 vec1 32 ssa_769 = intrinsic load_ubo (ssa_760, ssa_768) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_770 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_771 = iadd ssa_763, ssa_770 vec1 32 ssa_772 = intrinsic load_ubo (ssa_760, ssa_771) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_773 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_774 = iadd ssa_763, ssa_773 vec1 32 ssa_775 = intrinsic load_ubo (ssa_760, ssa_774) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_776 = vec4 ssa_766, ssa_769, ssa_772, ssa_775 vec1 32 ssa_777 = fdot4 ssa_758, ssa_776 vec1 32 ssa_778 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_779 = intrinsic load_deref (ssa_778) (0) /* access=0 */ vec4 32 ssa_780 = vec4 ssa_779.x, ssa_779.y, ssa_779.z, ssa_777 vec1 32 ssa_781 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_781, ssa_780) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_782 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_783 = intrinsic load_deref (ssa_782) (0) /* access=0 */ vec1 32 ssa_784 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_785 = intrinsic vulkan_resource_index (ssa_784) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_786 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_787 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_788 = iadd ssa_786, ssa_787 vec1 32 ssa_789 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_790 = iadd ssa_788, ssa_789 vec1 32 ssa_791 = intrinsic load_ubo (ssa_785, ssa_790) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_792 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_793 = iadd ssa_788, ssa_792 vec1 32 ssa_794 = intrinsic load_ubo (ssa_785, ssa_793) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_795 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_796 = iadd ssa_788, ssa_795 vec1 32 ssa_797 = intrinsic load_ubo (ssa_785, ssa_796) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_798 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_799 = iadd ssa_788, ssa_798 vec1 32 ssa_800 = intrinsic load_ubo ( ssa_785, ssa_799ssa_1929ssa_1035 ) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1882 = imov ssa_1890 access=0 */vec1 32 ssa_792vec1 32 ssa_1243 = deref_var vec3 32 ssa_1038 = vec3 ssa_1037.y, ssa_1037.z, ssa_1037. = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1244 = intrinsic load_deref (ssa_1243) (0 vec1 32 ssa_336 = deref_var &r8) vec4 32 ssa_801 = vec4 ssa_791, ssa_794, ssa_797x vec1 32 ssa_1039 = deref_var vec1 32 ssa_1039 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1039, ssa_1038) (15, 0) /* (function_temp vec4) intrinsic store_deref (ssa_336, ssa_1882) (15, 0 /*&r7 (shader_temp vec4) &o0 wrmask=) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_337, ssa_800 (shader_out vec4) x access=0 = deref_var vec4 32 ssa_1040 yzw */&r8 = intrinsic load_deref (ssa_1039) (0) /* access=0 */ vec3 32 ssa_1041 = vec3 ssa_1040.x, ssa_1040.y, ssa_1040.z vec1 32 ssa_1042 = fdot3 ssa_1038, ssa_1041 vec1 32 ssa_1043 = deref_var &o4 (shader_out vec4) vec4 32 ssa_1044 = intrinsic load_deref (ssa_1043) (0) /* access=0 */ vec4 32 ssa_1045 = vec4 ssa_1044.x, ssa_1044.y, ssa_1042, ssa_1044.w vec1 32 ssa_1046 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1046, ssa_1045) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1047 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1048 = intrinsic load_deref (ssa_1047) (0) /* access=0 */ vec1 32 ssa_1049 = imov ssa_1048.x vec1 32 ssa_1050 = fabs ssa_1049 vec1 32 ssa_1051 = fneg ssa_1050 vec1 32 ssa_1052 = fadd ssa_1051, ssa_51 vec1 32 ssa_1053 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1054 = intrinsic load_deref (ssa_1053) (0) /* access=0 */ vec4 32 ssa_1055 = vec4 ssa_1054.x, ssa_1054.y, ssa_1054.z, ssa_1052 vec1 32 ssa_1056 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1056, ssa_1055) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1057 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1058 = intrinsic load_deref (ssa_1057) (0) /* access=0 */ vec1 32 ssa_1059 = imov ssa_1058.y vec1 32 ssa_1060 = fabs ssa_1059 vec1 32 ssa_1061 = fneg ssa_1060 vec1 32 ssa_1062 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1063 = intrinsic load_deref (ssa_1062) (0) /* access=0 */ vec1 32 ssa_1064 = imov ssa_1063.w vec1 32 ssa_1065 = fadd ssa_1061, ssa_1064 vec1 32 ssa_1066 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1067 = intrinsic load_deref (ssa_1066) (0) /* access=0 */ vec4 32 ssa_1068 = vec4 ssa_1067.x, ssa_1067.y, ssa_1065, ssa_1067.w vec1 32 ssa_1069 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1069, ssa_1068) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1070 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1071 = intrinsic load_deref (ssa_1070) (0) /* access=0 */ vec3 32 ssa_1072 = vec3 ssa_1071.x, ssa_1071.y, ssa_1071.z vec1 32 ssa_1073 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1074 = intrinsic load_deref (ssa_1073) (0) /* access=0 */ vec3 32 ssa_1075 = vec3 ssa_1074.x, ssa_1074.y, ssa_1074.z vec1 32 ssa_1076 = fdot3 ssa_1072, ssa_1075 vec1 32 ssa_1077 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1078 = intrinsic load_deref (ssa_1077) (0) /* access=0 */ vec4 32 ssa_1079 = vec4 ssa_1078.x, ssa_1078.y, ssa_1078.z, ssa_1076 vec1 32 ssa_1080 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1080, ssa_1079) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1081 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1082 = intrinsic load_deref (ssa_1081) (0) /* access=0 */ vec1 32 ssa_1083 = imov ssa_1082.w vec1 32 ssa_1084 = frsq ssa_1083 vec1 32 ssa_1085 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1086 = intrinsic load_deref (ssa_1085) (0) /* access=0 */ vec4 32 ssa_1087 = vec4 ssa_1086.x, ssa_1086.y, ssa_1086.z, ssa_1084 vec1 32 ssa_1088 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1088, ssa_1087) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1089 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1090 = intrinsic load_deref (ssa_1089) (0) /* access=0 */ vec3 32 ssa_1091 = vec3 ssa_1090.w, ssa_1090.w, ssa_1090.w vec1 32 ssa_1092 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1093 = intrinsic load_deref (ssa_1092) (0) /* access=0 */ vec3 32 ssa_1094 = vec3 ssa_1093.x, ssa_1093.y, ssa_1093.z vec3 32 ssa_1095 = fmul ssa_1091, ssa_1094 vec1 32 ssa_1096 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1097 = intrinsic load_deref (ssa_1096) (0) /* access=0 */ vec4 32 ssa_1098 = vec4 ssa_1095.x, ssa_1095.y, ssa_1095.z, ssa_1097.w vec1 32 ssa_1099 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1099, ssa_1098) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1100 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1101 = intrinsic load_deref (ssa_1100) (0) /* access=0 */ vec1 32 ssa_1102 = imov ssa_1101.x vec1 32 ssa_1103 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1104 = intrinsic load_deref (ssa_1103) (0) /* access=0 */ vec1 32 ssa_1105 = imov ssa_1104.z vec1 32 ssa_1106 = fmul ssa_1102, ssa_1105 vec1 32 ssa_1107 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1108 = intrinsic load_deref (ssa_1107) (0) /* access=0 */ vec4 32 ssa_1109 = vec4 ssa_1108.x, ssa_1108.y, ssa_1108.z, ssa_1106 vec1 32 ssa_1110 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1110, ssa_1109) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1111 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1112 = intrinsic load_deref (ssa_1111) (0) /* access=0 */ vec3 32 ssa_1113 = vec3 ssa_1112.z, ssa_1112.x, ssa_1112.y vec1 32 ssa_1114 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1115 = intrinsic load_deref (ssa_1114) (0) /* access=0 */ vec3 32 ssa_1116 = vec3 ssa_1115.w, ssa_1115.x, ssa_1115.y vec3 32 ssa_1117 = fmul ssa_1113, ssa_1116 vec1 32 ssa_1118 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1119 = intrinsic load_deref (ssa_1118) (0) /* access=0 */ vec4 32 ssa_1120 = vec4 ssa_1117.x, ssa_1117.y, ssa_1119.z, ssa_1117.z vec1 32 ssa_1121 = deref_var &r8 (shader_temp vec4) intrinsic store_deref (ssa_1121, ssa_1120) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1122 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1123 = intrinsic load_deref (ssa_1122) (0) /* access=0 */ vec3 32 ssa_1124 = vec3 ssa_1123.y, ssa_1123.w, ssa_1123.x vec1 32 ssa_1125 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1126 = intrinsic load_deref (ssa_1125) (0) /* access=0 */ vec3 32 ssa_1127 = vec3 ssa_1126.x, ssa_1126.y, ssa_1126.z vec1 32 ssa_1128 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1129 = intrinsic load_deref (ssa_1128) (0) /* access=0 */ vec3 32 ssa_1130 = vec3 ssa_1129.x, ssa_1129.y, ssa_1129.w vec3 32 ssa_1131 = fneg ssa_1130 vec3 32 ssa_1132 = ffma ssa_1124, ssa_1127, ssa_1131 vec1 32 ssa_1133 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1134 = intrinsic load_deref (ssa_1133) (0) /* access=0 */ vec4 32 ssa_1135 = vec4 ssa_1132.x, ssa_1132.y, ssa_1132.z, ssa_1134.w vec1 32 ssa_1136 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1136, ssa_1135) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1137 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1138 = intrinsic load_deref (ssa_1137) (0) /* access=0 */ vec3 32 ssa_1139 = vec3 ssa_1138.z, ssa_1138.z, ssa_1138.z vec1 32 ssa_1140 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1141 = intrinsic load_deref (ssa_1140) (0) /* access=0 */ vec3 32 ssa_1142 = vec3 ssa_1141.x, ssa_1141.y, ssa_1141.z vec3 32 ssa_1143 = fmul ssa_1139, ssa_1142 vec1 32 ssa_1144 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1145 = intrinsic load_deref (ssa_1144) (0) /* access=0 */ vec4 32 ssa_1146 = vec4 ssa_1143.x, ssa_1143.y, ssa_1143.z, ssa_1145.w vec1 32 ssa_1147 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1147, ssa_1146) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1148 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1149 = intrinsic load_deref (ssa_1148) (0) /* access=0 */ vec3 32 ssa_1150 = vec3 ssa_1149.x, ssa_1149.y, ssa_1149.z vec1 32 ssa_1151 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1152 = intrinsic load_deref (ssa_1151) (0) /* access=0 */ vec3 32 ssa_1153 = vec3 ssa_1152.x, ssa_1152.y, ssa_1152.z vec1 32 ssa_1154 = fdot3 (function_temp vec4) vec4 32 ssa_338 = intrinsic load_deref (ssa_337) (0) /* access=0 */ vec1 32 ssa_339 = imov ssa_338.w vec1 32 ssa_340 = deref_var &r1 (function_temp vec4) vec4 32 ssa_341 = intrinsic load_deref (ssa_340) (0) /* access=0 */ vec4 32 ssa_342 = vec4 ssa_341.x, ssa_341.y, ssa_339, ssa_341.w vec1 32 ssa_343 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_343, ssa_342) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_344 = deref_var &r1 (function_temp vec4) vec4 32 ssa_345 = intrinsic load_deref (ssa_344) (0) /* access=0 */ vec3 32 ssa_346 = vec3 ssa_345.x, ssa_345.y, ssa_345.z vec1 32 ssa_347 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_348 = intrinsic vulkan_resource_index (ssa_347) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_349 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_350 = load_const (0x00000310 /* 0.000000 */) vec1 32 ssa_351 = iadd ssa_349, ssa_350 vec1 32 ssa_352 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_353 = iadd ssa_351, ssa_352 vec1 32 ssa_354 = intrinsic load_ubo (ssa_348, ssa_353) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_355 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_356 = iadd ssa_351, ssa_355 vec1 32 ssa_357 = intrinsic load_ubo (ssa_348, ssa_356) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_358 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_359 = iadd ssa_351, ssa_358 vec1 32 ssa_360 = intrinsic load_ubo (ssa_348, ssa_359) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_361 = vec3 ssa_354, ssa_357, ssa_360 vec3 32 ssa_362 = fneg ssa_361 vec3 32 ssa_363 = fadd ssa_346, ssa_362 vec1 32 ssa_364 = deref_var &r9 (function_temp vec4) vec4 32 ssa_365 = intrinsic load_deref (ssa_364) (0) /* access=0 */ vec4 32 ssa_366 = vec4 ssa_363.x, ssa_363.y, ssa_363.z, ssa_365.w vec1 32 ssa_367 = deref_var &r9 (function_temp vec4) intrinsic store_deref (ssa_367, ssa_366) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_368 = deref_var &r9 (function_temp vec4) vec4 32 ssa_369 = intrinsic load_deref (ssa_368) (0) /* access=0 */ vec4 32 ssa_370 = vec4 ssa_369.x, ssa_369.y, ssa_369.z, ssa_75 vec1 32 ssa_371 = deref_var &r9 (function_temp vec4) intrinsic store_deref (ssa_371, ssa_370) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_372 = deref_var &r9 (function_temp vec4) vec4 32 ssa_373 = intrinsic load_deref (ssa_372) (0) /* access=0 */ vec1 32 ssa_374 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_375 = intrinsic vulkan_resource_index (ssa_374) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_376 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_377 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_378 = iadd ssa_376, ssa_377 vec1 32 ssa_379 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_380 = iadd ssa_378, ssa_379 vec1 32 ssa_381 = intrinsic load_ubo (ssa_375, ssa_380) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_382 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_383 = iadd ssa_378, ssa_382 vec1 32 ssa_384 = intrinsic load_ubo (ssa_375, ssa_383) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_385 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_386 = iadd ssa_378, ssa_385 vec1 32 ssa_387 = intrinsic load_ubo (ssa_375, ssa_386) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_388 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_389 = iadd ssa_378, ssa_388 vec1 32 ssa_390 = intrinsic load_ubo (ssa_375, ssa_389) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_391 = vec4 ssa_381, ssa_384, ssa_387, ssa_390 vec1 32 ssa_392 = fdot4 ssa_373, ssa_391 vec1 32 ssa_393 = deref_var &r10 (function_temp vec4) vec4 32 ssa_394 = intrinsic load_deref (ssa_393) (0) /* access=0 */ vec4 32 ssa_395 = vec4 ssa_394.x, ssa_394.y, ssa_394.z, ssa_392 vec1 32 ssa_396 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_396, ssa_395) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_397 = deref_var &r4 (function_temp vec4) vec4 32 ssa_398 = intrinsic load_deref (ssa_397) (0) /* access=0 */ vec1 32 ssa_399 = imov ssa_398.x vec1 32 ssa_400 = deref_var &r1 (function_temp vec4) vec4 32 ssa_401 = intrinsic load_deref (ssa_400) (0) /* access=0 */ vec4 32 ssa_402 = vec4 ssa_399, ssa_401.y, ssa_401.z, ssa_401.w vec1 32 ssa_403 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_403, ssa_402) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_404 = deref_var &r7 (function_temp vec4) vec4 32 ssa_405 = intrinsic load_deref (ssa_404) (0) /* access=0 */ vec1 32 ssa_406 = imov ssa_405.x vec1 32 ssa_407 = deref_var &r1 (function_temp vec4) vec4 32 ssa_408 = intrinsic load_deref (ssa_407) (0) /* access=0 */ vec4 32 ssa_409 = vec4 ssa_408.x, ssa_406, ssa_408.z, ssa_408.w vec1 32 ssa_410 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_410, ssa_409) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_411 = deref_var &r8 (function_temp vec4) vec4 32 ssa_412 = intrinsic load_deref (ssa_411) (0) /* access=0 */ vec1 32 ssa_413 = imov ssa_412.x vec1 32 ssa_414 = deref_var &r1 (function_temp vec4) vec4 32 ssa_415 = intrinsic load_deref (ssa_414) (0) /* access=0 */ vec4 32 ssa_416 = vec4 ssa_415.x, ssa_415.y, ssa_413, ssa_415.w vec1 32 ssa_417 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_417, ssa_416) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_418 = deref_var &r1 (function_temp vec4) vec4 32 ssa_419 = intrinsic load_deref (ssa_418) (0) /* access=0 */ vec3 32 ssa_420 = vec3 ssa_419.x, ssa_419.y, ssa_419.z vec1 32 ssa_421 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_422 = intrinsic vulkan_resource_index (ssa_421) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_423 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_424 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_425 = iadd ssa_423, ssa_424 vec1 32 ssa_426 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_427 = iadd ssa_425, ssa_426 vec1 32 ssa_428 = intrinsic load_ubo (ssa_422, ssa_427) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_429 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_430 = iadd ssa_425, ssa_429 vec1 32 ssa_431 = intrinsic load_ubo (ssa_422, ssa_430) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_432 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_433 = iadd ssa_425, ssa_432 vec1 32 ssa_434 = intrinsic load_ubo (ssa_422, ssa_433) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_435 = vec3 ssa_428, ssa_431, ssa_434 vec1 32 ssa_436 = fdot3 ssa_420, ssa_435 vec1 32 ssa_437 = deref_var &r10 (function_temp vec4) vec4 32 ssa_438 = intrinsic load_deref (ssa_437) (0) /* access=0 */ vec4 32 ssa_439 = vec4 ssa_436, ssa_438.y, ssa_438.z, ssa_438.w vec1 32 ssa_440 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_440, ssa_439) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_441 = deref_var &r4 (function_temp vec4) vec4 32 ssa_442 = intrinsic load_deref (ssa_441) (0) /* access=0 */ vec1 32 ssa_443 = imov ssa_442.y vec1 32 ssa_444 = deref_var &r11 (function_temp vec4) vec4 32 ssa_445 = intrinsic load_deref (ssa_444) (0) /* access=0 */ vec4 32 ssa_446 = vec4 ssa_443, ssa_445.y, ssa_445.z, ssa_445.w vec1 32 ssa_447 = deref_var &r11 (function_temp vec4) intrinsic store_deref (ssa_447, ssa_446) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_448 = deref_var &r7 (function_temp vec4) ssa_1150, ssa_1153 vec1 32 ssa_1155 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1156 = intrinsic load_deref (ssa_1155) (0) /* access=0 */ vec4 32 ssa_1157 = vec4 ssa_1154, ssa_1156.y, ssa_1156.z */ vec3 32 ssa_1245 = vec3 ssa_1244.y, ssa_1244.z, ssa_1244.w intrinsic store_deref (ssa_792, ssa_791) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_793 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_794 = intrinsic load_deref (ssa_793) (0) /* access=0 */ vec1 32 ssa_795 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_796 = intrinsic vulkan_resource_index (ssa_795) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_797 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_798 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_799 = iadd ssa_797, ssa_798 vec1 32 ssa_800 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_801 = iadd ssa_799, ssa_800 vec1 32 ssa_802 = intrinsic load_ubo (ssa_796, ssa_801) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_803 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_804 = iadd ssa_799, ssa_803 vec1 32 ssa_805 = intrinsic load_ubo (ssa_796, ssa_804) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_806 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_807 = iadd ssa_799, ssa_806 vec1 32 ssa_808 = intrinsic load_ubo (ssa_796, ssa_807) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_809 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_810 = iadd ssa_799, ssa_809 vec1 32 ssa_811 = intrinsic load_ubo (ssa_796, ssa_810) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_812 = vec4 ssa_802, ssa_805, ssa_808, ssa_811 vec1 32 ssa_813 = fdot4 ssa_794, ssa_812 vec1 32 ssa_814 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_815 = intrinsic load_deref (ssa_814) (0) /* access=0 */ vec4 32 ssa_816 = vec4 ssa_815.x, ssa_815.y, ssa_815.z, ssa_813 vec1 32 ssa_817 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_817, ssa_816) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_818 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_819 = intrinsic load_deref (ssa_818) (0) /* access=0 */ vec1 32 ssa_820 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_821 = intrinsic vulkan_resource_index (ssa_820) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_822 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_823 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_824 = iadd ssa_822, ssa_823 vec1 32 ssa_825 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_826 = iadd ssa_824, ssa_825 vec1 32 ssa_827 = intrinsic load_ubo (ssa_821, ssa_826) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_828 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_829 = iadd ssa_824, ssa_828 vec1 32 ssa_830 = intrinsic load_ubo (ssa_821, ssa_829) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_831 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_832 = iadd ssa_824, ssa_831 vec1 32 ssa_833 = intrinsic load_ubo (ssa_821, ssa_832) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_834 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_835 = iadd ssa_824, ssa_834 vec1 32 ssa_836 = intrinsic load_ubo (ssa_821, ssa_835) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_837 = vec4 ssa_827, ssa_830, ssa_833, ssa_836 vec1 32 ssa_838 = fdot4 ssa_819, ssa_837 vec1 32 ssa_839 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_840 = intrinsic load_deref (ssa_839) (0) /* access=0 */ vec4 32 ssa_841 = vec4 ssa_840.x, ssa_840.y, ssa_840.z, ssa_838 vec1 32 ssa_842 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_842, ssa_841) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_843 = deref_var &r15 (shader_temp vec4) vec4 32 ssa_844 = intrinsic load_deref (ssa_843) (0) /* access=0 */ vec3 32 ssa_845 = vec3 ssa_844.x, ssa_844.y, ssa_844.z vec1 32 ssa_846 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_847 = intrinsic vulkan_resource_index (ssa_846) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_848 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_849 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_850 = iadd ssa_848, ssa_849 vec1 32 ssa_851 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_852 = iadd ssa_850, ssa_851 vec1 32 ssa_853 = intrinsic load_ubo (ssa_847, ssa_852) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_854 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_855 = iadd ssa_850, ssa_854 vec1 32 ssa_856 = intrinsic load_ubo (ssa_847, ssa_855) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_857 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_858 = iadd ssa_850, ssa_857 vec1 32 ssa_859 = intrinsic load_ubo (ssa_847, ssa_858) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_860 = vec3 ssa_853, ssa_856, ssa_859 vec1 32 ssa_861 = fdot3 ssa_845, ssa_860 vec1 32 ssa_862 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_863 = intrinsic load_deref (ssa_862) (0) /* access=0 */ vec4 32 ssa_864 = vec4 ssa_861, ssa_863.y, ssa_863.z, ssa_863.w vec1 32 ssa_865 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_865, ssa_864) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_866 = deref_var &r15 (shader_temp vec4) vec4 32 ssa_867 = intrinsic load_deref (ssa_866) (0) /* access=0 */ vec3 32 ssa_868 = vec3 ssa_867.x, ssa_867.y, ssa_867.z vec1 32 ssa_869 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_870 = intrinsic vulkan_resource_index (ssa_869) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_871 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_872 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_873 = iadd ssa_871, ssa_872 vec1 32 ssa_874 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_875 = iadd ssa_873, ssa_874 vec1 32 ssa_876 = intrinsic load_ubo (ssa_870, ssa_875) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_877 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_878 = iadd ssa_873, ssa_877 vec1 32 ssa_879 = intrinsic load_ubo (ssa_870, ssa_878) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_880 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_881 = iadd ssa_873, ssa_880 vec1 32 ssa_882 = intrinsic load_ubo (ssa_870, ssa_881) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_883 = vec3 ssa_876, ssa_879, ssa_882 vec1 32 ssa_884 = fdot3 ssa_868, ssa_883 vec1 32 ssa_885 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_886 = intrinsic load_deref (ssa_885) (0) /* access=0 */ vec4 32 ssa_887 = vec4 ssa_884, ssa_886.y, ssa_886.z, ssa_886.w vec1 32 ssa_888 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_888, ssa_887) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_889 = deref_var &r16 (shader_temp vec4) vec4 32 ssa_890 = intrinsic load_deref (ssa_889) (0) /* access=0 */ vec3 32 ssa_891 = vec3 ssa_890.x, ssa_890.y, ssa_890.z vec1 32 ssa_892 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_893 = intrinsic vulkan_resource_index (ssa_892) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_894 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_895 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_896 = iadd ssa_894, ssa_895 vec1 32 ssa_897 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_898 = iadd ssa_896, ssa_897 vec1 32 ssa_899 = intrinsic load_ubo (ssa_893, ssa_898) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_900 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_901 = iadd ssa_896, ssa_900 vec1 32 ssa_902 = intrinsic load_ubo (ssa_893, ssa_901) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_903 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_904 = iadd ssa_896, ssa_903 vec1 32 ssa_905 = intrinsic load_ubo (ssa_893, ssa_904) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_906 = vec3 ssa_899, ssa_902, ssa_905 vec1 32 ssa_907 = fdot3 ssa_891, vec4 32 ssa_449 = intrinsic load_deref (ssa_448) (0), ssa_1156.w vec1 32 ssa_1158 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1158, ssa_1157) (15, 0) /* wrmask=xyzw */ /* vec1 32 ssa_802 = fdot4 ssa_783, ssa_801 vec1 32 ssa_803 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_804 = intrinsic load_deref (ssa_803) (0) /* access=0 */ /* access=0 */ vec3 32 ssa_1246 = fmul ssa_1242 access=0 */ /* access=0 */ vec1 32 ssa_450 = imov ssa_449.y vec1 32 ssa_1040 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1041 = intrinsic load_deref (ssa_1040) ( vec1 32 ssa_451 = deref_var , ssa_1245 vec1 32 ssa_1247 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1248 = intrinsic load_deref (ssa_1247) (0) /* access=0 */ vec4 32 ssa_1249 = vec4 ssa_1248.x, ssa_1246.x, ssa_1246.y, ssa_1246.z vec1 32 ssa_1250 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1250, ssa_1249) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1251 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1252 = intrinsic load_deref (ssa_1251) (0) /* access=0 */ vec1 32 ssa_1253 = imov ssa_1252.y vec1 32 ssa_1254 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1255 = intrinsic load_deref (ssa_1254) (0) /* access=0 */ vec1 32 ssa_1256 = imov ssa_1255.w vec1 32 ssa_1257 = fmul ssa_1253, ssa_1256 vec1 32 ssa_1258 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1259 = intrinsic load_deref (ssa_1258) (0) /* access=0 */ vec4 32 ssa_1260 = vec4 ssa_1257, ssa_1259.y, ssa_1259.z, ssa_1259.w vec1 32 ssa_1261 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1261, ssa_1260) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1262 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1263 = intrinsic load_deref (ssa_1262) (0) /* access=0 */ vec3 32 ssa_1264 = vec3 ssa_1263.y, ssa_1263.z, ssa_1263.x vec1 32 ssa_1265 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1266 = intrinsic load_deref (ssa_1265) (0) /* access=0 */ vec3 32 ssa_1267 = vec3 ssa_1266.x, ssa_1266.y, ssa_1266.z vec1 32 ssa_1268 = fdot3 ssa_1264, ssa_1267 vec1 32 ssa_1269 = deref_var &o4 (shader_out vec4) vec4 32 ssa_1270 = intrinsic load_deref (ssa_1269) (0) /* access=0 */ vec4 32 ssa_1271 = vec4 ssa_1268, ssa_1270.y, ssa_1270.z, ssa_1270.w vec1 32 ssa_1272 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1272, ssa_1271) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1273 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1274 = intrinsic load_deref (ssa_1273) (0) /* access=0 */ vec3 32 ssa_1275 = vec3 ssa_1274.y, ssa_1274.z, ssa_1274.x vec1 32 ssa_1276 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_1277 = intrinsic load_deref (ssa_1276) (0) /* access=0 */ vec3 32 ssa_1278 = vec3 ssa_1277.x, ssa_1277.y, ssa_1277.z vec1 32 ssa_1279 = fdot3 ssa_1275, ssa_1278 vec1 32 ssa_1280 = deref_var &o4 (shader_out vec4) vec4 32 ssa_1281 = intrinsic load_deref (ssa_1280) (0) /* access=0 */ vec4 32 ssa_1282 = vec4 ssa_1281.x, ssa_1279, ssa_1281.z, ssa_1281.w vec1 32 ssa_1283 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1283, ssa_1282) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1284 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1285 = intrinsic load_deref (ssa_1284) (0) /* access=0 */ vec3 32 ssa_1286 = vec3 ssa_1285.y, ssa_1285.z, ssa_1285.x vec1 32 ssa_1287 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_1288 = intrinsic load_deref (ssa_1287) (0) /* access=0 */ vec3 32 ssa_1289 = vec3 ssa_1288.x, ssa_1288.y, ssa_1288.z vec1 32 ssa_1290 = fdot3 ssa_1286, ssa_1289 vec1 32 ssa_1291 = deref_var &o4 (shader_out vec4) vec4 32 ssa_1292 = intrinsic load_deref (ssa_1291) (0) /* access=0 */ vec4 32 ssa_1293 = vec4 ssa_1292.x, ssa_1292.y, ssa_1290, ssa_1292.w vec1 32 ssa_1294 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1294, ssa_1293) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1295 = deref_var &o4 (shader_out vec4) vec4 32 ssa_1296 = intrinsic load_deref (ssa_1295) (0) /* access=0 */ vec4 32 ssa_1297 = vec4 ssa_1296.x, ssa_1296.y, ssa_1296.z, ssa_37 vec1 32 ssa_1298 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1298, ssa_1297) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1299 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1300 vec1 32 ssa_1159 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1160 = = intrinsic load_deref (ssa_1299) (0) /* access=0 */ vec1 32 ssa_1301 = imov vec4 32 ssa_805&0) /* access=0 */ ssa_906 vec1 32 ssa_908ssa_1300.x vec1 32 ssa_1302 = fabs ssa_1301 = deref_var = vec4r11intrinsic load_deref (ssa_1159&vec1 32 ssa_1042 = imov (function_temp vec4) ) (0) /* access=0 */ r14 (shader_temp vec4) vec4 32 ssa_909 = intrinsic load_deref (ssa_908) (0) /* access=0 */ vec4 32 ssa_910 = vec4 ssa_1041.y vec1 32 ssa_1043 = ushr vec3 32 ssa_1161 ssa_909.x, ssa_907, ssa_909.z, ssa_909.w vec1 32 ssa_911 = deref_var &r14 (shader_temp vec4) ssa_804.x, ssa_804.y, ssa_74, ssa_73 vec1 32 ssa_1044 = imul ssa_1042, ssa_72 vec1 32 ssa_1045 = iadd ssa_1044, ssa_804ssa_1043 vec1 32 ssa_1303 = fneg ssa_1302 vec1 32 ssa_1304 = fadd ssa_1303, ssa_36 vec1 32 ssa_1305 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1306 = intrinsic load_deref (ssa_1305) (0) /* access=0 */ vec4 32 ssa_1307 = vec4 ssa_1306.x, ssa_1306.y, ssa_1306.z, ssa_1304 vec1 32 ssa_1308 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1308, ssa_1307) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1309 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1310 = intrinsic load_deref (ssa_1309) (0) /* access=0 */ vec1 32 ssa_1311 = imov ssa_1310.y vec1 32 ssa_1312 = fabs ssa_1311 vec1 32 ssa_1313 = fneg ssa_1312 vec1 32 ssa_1314 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1315 = intrinsic load_deref (ssa_1314) (0) /* access=0 */ vec1 32 ssa_1316 = imov ssa_1315.w vec1 32 ssa_1317 = fadd ssa_1313, ssa_1316 vec1 32 ssa_1318 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1319 = intrinsic load_deref (ssa_1318) (0) /* access=0 */ vec4 32 ssa_1320 = vec4 ssa_1319.x, ssa_1319.y, ssa_1317, ssa_1319.w vec1 32 ssa_1321 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1321, ssa_1320) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1322 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1323 = intrinsic load_deref (ssa_1322) (0) /* access=0 */ vec3 32 ssa_1324 = vec3 ssa_1323.x, ssa_1323.y, ssa_1323.z vec1 32 ssa_1325 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1326 = intrinsic load_deref (ssa_1325) (0) /* access=0 */ vec3 32 ssa_1327 = vec3 ssa_1326.x, ssa_1326.y, ssa_1326.z vec1 32 ssa_1328 = fdot3 ssa_1324, ssa_1327 vec1 32 ssa_1329 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1330 = intrinsic load_deref (ssa_1329) (0) /* access=0 */ vec4 32 ssa_1331 = vec4 ssa_1330.x, ssa_1330.y, ssa_1330.z, ssa_1328 vec1 32 ssa_1332 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1332, ssa_1331) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1333 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1334 = intrinsic load_deref (ssa_1333) (0) /* access=0 */ vec1 32 ssa_1335 = imov ssa_1334.w vec1 32 ssa_1336 = frsq ssa_1335 vec1 32 ssa_1337 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1338 = intrinsic load_deref (ssa_1337) (0) /* access=0 */ vec4 32 ssa_1339 = vec4 ssa_1338.x, ssa_1338.y, ssa_1338.z, ssa_1336 vec1 32 ssa_1340 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1340, ssa_1339) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1341 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1342 = intrinsic load_deref (ssa_1341) (0) /* access=0 */ vec3 32 ssa_1343 = vec3 ssa_1342.x, ssa_1342.y, ssa_1342.z vec1 32 ssa_1344 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1345 = intrinsic load_deref (ssa_1344) (0) /* access=0 */ vec3 32 ssa_1346 = vec3 ssa_1345.w, ssa_1345.w, ssa_1345.w vec3 32 ssa_1347 = fmul ssa_1343, ssa_1346 vec1 32 ssa_1348 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1349 = intrinsic load_deref (ssa_1348) (0) /* access=0 */ vec4 32 ssa_1350 = vec4 ssa_1347.x, ssa_1347.y, ssa_1347.z, ssa_1349.w vec1 32 ssa_1351 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1351, ssa_1350) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1352 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1353 = intrinsic load_deref (ssa_1352) (0) /* access=0 */ vec1 32 ssa_1354 = imov ssa_1353.x vec1 32 ssa_1355 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1356 = intrinsic load_deref (ssa_1355) (0) /* access=0 */ vec1 32 ssa_1357 = imov ssa_1356.z vec1 32 ssa_1358 = fmul ssa_1354, ssa_1357 vec1 32 ssa_1359 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1360 = intrinsic load_deref (ssa_1359) (0) /* access=0 */ vec4 32 ssa_1361 = vec4 ssa_1360.x, ssa_1360.y, ssa_1360.z, ssa_1358 vec1 32 ssa_1362 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1362, ssa_1361) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1363 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1364 = intrinsic load_deref (ssa_1363) (0) /* access=0 */ vec3 32 ssa_1365 = vec3 ssa_1364.z, ssa_1364.x, ssa_1364.y vec1 32 ssa_1366 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1367 = intrinsic load_deref (ssa_1366) (0) /* access=0 */ vec3 32 ssa_1368 = vec3 ssa_1367.w, ssa_1367.x, ssa_1367.y vec3 32 ssa_1369 = fmul ssa_1365, ssa_1368 vec1 32 ssa_1370 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1371 = intrinsic load_deref (ssa_1370) (0) /* access=0 */ vec4 32 ssa_1372 = vec4 ssa_1369.x, ssa_1369.y, ssa_1369.z, ssa_1371.w vec1 32 ssa_1373 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1373, ssa_1372) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1374 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1375 = intrinsic load_deref (ssa_1374) (0) /* access=0 */ vec3 32 ssa_1376 = vec3 ssa_1375.y, ssa_1375.w, ssa_1375.x vec1 32 ssa_1377 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1378 = intrinsic load_deref (ssa_1377) (0) /* access=0 */ vec3 32 ssa_1379 = vec3 ssa_1378.x, ssa_1378.y, ssa_1378.z vec1 32 ssa_1380 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1381 = intrinsic load_deref (ssa_1380) (0) /* access=0 */ vec3 32 ssa_1382 = vec3 ssa_1381.x, ssa_1381.y, ssa_1381.z vec3 32 ssa_1383 = fneg ssa_1382 vec3 32 ssa_1384 = ffma ssa_1376, ssa_1379, ssa_1383 vec1 32 ssa_1385 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1386 = intrinsic load_deref (ssa_1385) (0) /* access=0 */ vec4 32 ssa_1387 = vec4 ssa_1384.x, ssa_1384.y, ssa_1384.z, ssa_1386.w vec1 32 ssa_1388 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1388, ssa_1387) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1389 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1390 = intrinsic load_deref (ssa_1389) (0) /* access=0 */ vec3 32 ssa_1391 = vec3 ssa_1390.z, ssa_1390.z, ssa_1390.z vec1 32 ssa_1392 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1393 = intrinsic load_deref (ssa_1392) (0) /* access=0 */ vec3 32 ssa_1394 = vec3 ssa_1393.x, ssa_1393.y, ssa_1393.z vec3 32 ssa_1395 = fmul ssa_1391, ssa_1394 vec1 32 ssa_1396 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1397 = intrinsic load_deref (ssa_1396) (0) /* access=0 */ vec4 32 ssa_1398 = vec4 ssa_1395.x, ssa_1395.y, ssa_1395.z, ssa_1397.w vec1 32 ssa_1399 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1399, ssa_1398) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1400 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1401 = intrinsic load_deref (ssa_1400) (0) /* access=0 */ vec3 32 ssa_1402 = vec3 ssa_1401.x, ssa_1401.y, ssa_1401.z vec1 32 ssa_1403 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1404 = intrinsic load_deref (ssa_1403) (0) /* access=0 */ vec3 32 ssa_1405 = vec3 ssa_1404.x, ssa_1404.y, ssa_1404.z vec1 32 ssa_1406 = fdot3 ssa_1402, ssa_1405 vec1 32 ssa_1407 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1408 = intrinsic load_deref (ssa_1407) (0) /* access=0 */ vec4 32 ssa_1409 = vec4 ssa_1406, ssa_1408.y, ssa_1408.z, ssa_1408.w vec1 32 ssa_1410 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1410, ssa_1409) (15, 0) /* wrmask=x0107:fixme:ddraw:ddraw7_Initialize Ignoring guid {aeb2cdd4-6e41-43ea-941c-8361cc760781}. yzw */ /* access=0 */ vec1 32 ssa_1411 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1412 = intrinsic load_deref (ssa_1411) (0) /* access=0 */ vec3 32 ssa_1413 = vec3 ssa_1412.x, ssa_1412.y, ssa_1412.w vec1 32 ssa_1414 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1415 = intrinsic load_deref (ssa_1414) (0) /* access=0 */ vec3 32 ssa_1416 = vec3 ssa_1415.x, ssa_1415.y, ssa_1415.z vec1 32 ssa_1417 = fdot3 ssa_1413, ssa_1416 vec1 32 ssa_1418 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1419 = intrinsic load_deref (ssa_1418) (0) /* access=0 */ vec4 32 ssa_1420 = vec4 ssa_1417, ssa_1419.y, ssa_1419.z, ssa_1419.w vec1 32 ssa_1421 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1421, ssa_1420) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1422 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1423 = intrinsic load_deref (ssa_1422) (0) /* access=0 */ vec3 32 ssa_1424 = vec3 ssa_1423.x, ssa_1423.y, ssa_1423.z vec1 32 ssa_1425 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_1426 = intrinsic load_deref (ssa_1425) (0) /* access=0 */ vec3 32 ssa_1427 = vec3 ssa_1426.x, ssa_1426.y, ssa_1426.z vec1 32 ssa_1428 = fdot3 ssa_1424, ssa_1427 vec1 32 ssa_1429 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1430 = intrinsic load_deref (ssa_1429) (0) /* access=0 */ vec4 32 ssa_1431 = vec4 ssa_1430.x, ssa_1428, ssa_1430.z, ssa_1430.w vec1 32 ssa_1432 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1432, ssa_1431) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1433 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1434 = intrinsic load_deref (ssa_1433) (0) /* access=0 */ vec3 32 ssa_1435 = vec3 ssa_1434.x, ssa_1434.y, ssa_1434.w vec1 32 ssa_1436 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_1437 = intrinsic load_deref (ssa_1436) (0) /* access=0 */ vec3 32 ssa_1438 = vec3 ssa_1437.x, ssa_1437.y, ssa_1437.z vec1 32 ssa_1439 = fdot3 ssa_1435, ssa_1438 vec1 32 ssa_1440 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1441 = intrinsic load_deref (ssa_1440) (0) /* access=0 */ vec4 32 ssa_1442 = vec4 ssa_1441.x, ssa_1439, ssa_1441.z, ssa_1441.w vec1 32 ssa_1443 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1443, ssa_1442) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1444 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1445 = intrinsic load_deref (ssa_1444) (0) /* access=0 */ vec3 32 ssa_1446 = vec3 ssa_1445.x, ssa_1445.y, ssa_1445.w vec1 32 ssa_1447 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_1448 = intrinsic load_deref (ssa_1447) (0) /* access=0 */ vec3 32 ssa_1449 = vec3 ssa_1448.x, ssa_1448.y, ssa_1448.z vec1 32 ssa_1450 = fdot3 ssa_1446, ssa_1449 vec1 32 ssa_1451 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1452 = intrinsic load_deref (ssa_1451) (0) /* access=0 */ vec4 32 ssa_1453 = vec4 ssa_1452.x, ssa_1452.y, ssa_1450, ssa_1452.w vec1 32 ssa_1454 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1454, ssa_1453) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1455 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1456 = intrinsic load_deref (ssa_1455) (0) /* access=0 */ vec3 32 ssa_1457 = vec3 ssa_1456.x, ssa_1456.y, ssa_1456.z vec1 32 ssa_1458 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_1459 = intrinsic load_deref (ssa_1458) (0) /* access=0 */ vec3 32 ssa_1460 = vec3 ssa_1459.x, ssa_1459.y, ssa_1459.z vec1 32 ssa_1461 = fdot3 ssa_1457, ssa_1460 vec1 32 ssa_1462 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1463 = intrinsic load_deref (ssa_1462) (0) /* access=0 */ vec4 32 ssa_1464 = vec4 ssa_1463.x, ssa_1463.y, ssa_1461, ssa_1463.w vec1 32 ssa_1465 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1465, ssa_1464) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1466 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1467 = intrinsic load_deref (ssa_1466) (0) /* access=0 */ vec4 32 ssa_1468 = vec4 ssa_1467.x, ssa_1467.y, ssa_1467.z, ssa_35 vec1 32 ssa_1469 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1469, ssa_1468) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1470 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1471 = intrinsic load_deref (ssa_1470) (0) /* access=0 */ vec4 32 ssa_1472 = vec4 ssa_1471.x, ssa_1471.y, ssa_1471.z, ssa_34 vec1 32 ssa_1473 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1473, ssa_1472) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1474 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1475 = intrinsic load_deref (ssa_1474) (0) /* access=0 */ vec1 32 ssa_1476 = imov ssa_1475.y vec1 32 ssa_1477 = ishl ssa_1476, ssa_33 vec1 32 ssa_1478 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1479 = intrinsic load_deref (ssa_1478) (0) /* access=0 */ vec4 32 ssa_1480 = vec4 ssa_1477, ssa_1479.y, ssa_1479.z, ssa_1479.w vec1 32 ssa_1481 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1481, ssa_1480) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1482 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1483 = intrinsic load_deref (ssa_1482) (0) /* access=0 */ vec3 32 ssa_1484 = vec3 ssa_1483.x, ssa_1483.y, ssa_1483.z vec3 32 ssa_1485 = ishr ssa_1484, ssa_32 vec1 32 ssa_1486 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1487 = intrinsic load_deref (ssa_1486) (0) /* access=0 */ vec4 32 ssa_1488 = vec4 ssa_1485.x, ssa_1485.y, ssa_1485.z, ssa_1487.w vec1 32 ssa_1489 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1489, ssa_1488) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1490 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1491 = intrinsic load_deref (ssa_1490) (0) /* access=0 */ vec3 32 ssa_1492 = vec3 ssa_1491.x, ssa_1491.y, ssa_1491.z vec3 32 ssa_1493 = i2f32 ssa_1492 vec1 32 ssa_1494 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1495 = intrinsic load_deref (ssa_1494) (0) /* access=0 */ vec4 32 ssa_1496 = vec4 ssa_1493.x, ssa_1493.y, ssa_1493.z, ssa_1495.w vec1 32 ssa_1497 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1497, ssa_1496) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1498 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1499 = intrinsic load_deref (ssa_1498) (0) /* access=0 */ vec3 32 ssa_1500 = vec3 ssa_1499.x, ssa_1499.y, ssa_1499.z vec3 32 ssa_1501 = fmul ssa_1500, ssa_31 vec1 32 ssa_1502 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1503 = intrinsic load_deref (ssa_1502) (0) /* access=0 */ vec4 32 ssa_1504 = vec4 ssa_1501.x, ssa_1501.y, ssa_1501.z, ssa_1503.w vec1 32 ssa_1505 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1505, ssa_1504) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1506 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1507 = intrinsic load_deref (ssa_1506) (0) /* access=0 */ vec3 32 ssa_1508 = vec3 ssa_1507.w, ssa_1507.w, ssa_1507.w vec1 32 ssa_1509 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1510 = intrinsic load_deref (ssa_1509) (0) /* access=0 */ vec3 32 ssa_1511 = vec3 ssa_1510.x, ssa_1510.y, ssa_1510.z vec3 32 ssa_1512 = fmul ssa_1508, ssa_1511 vec1 32 ssa_1513 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1514 = intrinsic load_deref (ssa_1513) (0) /* access=0 */ vec4 32 ssa_1515 = vec4 ssa_1512.x, ssa_1512.y, ssa_1512.z, ssa_1514.w vec1 32 ssa_1516 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1516, ssa_1515) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1517 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_1518 = intrinsic load_deref (ssa_1517) (0) /* access=0 */ vec1 32 ssa_1519 = imov ssa_1518.w vec1 32 ssa_1520 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1521 = intrinsic load_deref (ssa_1520) (0) /* access=0 */ vec4 32 ssa_1522 = vec4 ssa_1519, ssa_1521.y, ssa_1521.z, ssa_1521.w vec1 32 ssa_1523 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1523, ssa_1522) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1524 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1525 = intrinsic load_deref (ssa_1524) (0) /* access=0 */ vec4 32 ssa_1526 = vec4 ssa_1525.z, ssa_1525.z, ssa_1525.z, ssa_1525.z vec1 32 ssa_1527 = imov ssa_1526.x /* succs: block_25 block_26 */ if ssa_30 { block block_25: /* preds: block_24 */ vec1 32 ssa_1528 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_1529 = txf ssa_1528 (texture_deref), ssa_1527 (coord), 0 (sampler), vec4 32 ssa_1530 = vec4 ssa_1529.x, ssa_1529.z, ssa_1529.y, ssa_1529.w vec1 32 ssa_1531 = deref_var &phi@7 (function_temp vec4) intrinsic store_deref (ssa_1531, ssa_1530) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_27 */ } else { block block_26: /* preds: block_24 */ vec1 32 ssa_1532 = deref_var &phi@7 (function_temp vec4) intrinsic store_deref (ssa_1532, ssa_18) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_27 */ } block block_27: /* preds: block_25 block_26 */ vec1 32 ssa_1533 = deref_var &phi@7 (function_temp vec4) vec4 32 ssa_1534 = intrinsic load_deref (ssa_1533) (0) /* access=0 */ vec1 32 ssa_1535 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1535, ssa_1534) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1536 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1537 = intrinsic load_deref (ssa_1536) (0) /* access=0 */ vec4 32 ssa_1538 = vec4 ssa_1537.w, ssa_1537.w, ssa_1537.w, ssa_1537.w vec1 32 ssa_1539 = imov ssa_1538.x /* succs: block_28 block_29 */ if ssa_29 { block block_28: /* preds: block_27 */ vec1 32 ssa_1540 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_1541 = txf ssa_1540 (texture_deref), ssa_1539 (coord), 0 (sampler), vec1 32 ssa_1542 = deref_var &phi@8 (function_temp vec4) intrinsic store_deref (ssa_1542, ssa_1541) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_30 */ } else { block block_29: /* preds: block_27 */ vec1 32 ssa_1543 = deref_var &phi@8 (function_temp vec4) intrinsic store_deref (ssa_1543, ssa_17) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_30 */ } block block_30: /* preds: block_28 block_29 */ vec1 32 ssa_1544 = deref_var &phi@8 (function_temp vec4) vec4 32 ssa_1545 = intrinsic load_deref (ssa_1544) (0) /* access=0 */ vec1 32 ssa_1546 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1546, ssa_1545) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1547 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1548 = intrinsic load_deref (ssa_1547) (0) /* access=0 */ vec1 32 ssa_1549 = imov ssa_1548.w vec1 32 ssa_1550 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1551 = intrinsic load_deref (ssa_1550) (0) /* access=0 */ vec4 32 ssa_1552 = vec4 ssa_1551.x, ssa_1549, ssa_1551.z, ssa_1551.w vec1 32 ssa_1553 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1553, ssa_1552) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1554 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1555 = intrinsic load_deref (ssa_1554) (0) /* access=0 */ vec1 32 ssa_1556 = imov ssa_1555.w vec1 32 ssa_1557 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1558 = intrinsic load_deref (ssa_1557) (0) /* access=0 */ vec4 32 ssa_1559 = vec4 ssa_1558.x, ssa_1558.y, ssa_1556, ssa_1558.w vec1 32 ssa_1560 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1560, ssa_1559) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1561 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1562 = intrinsic load_deref (ssa_1561) (0) /* access=0 */ vec3 32 ssa_1563 = vec3 ssa_1562.x, ssa_1562.y, ssa_1562.z vec1 32 ssa_1564 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1565 = intrinsic vulkan_resource_index (ssa_1564) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1566 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1567 = load_const (0x00000320 /* 0.000000 */) vec1 32 ssa_1568 = iadd ssa_1566, ssa_1567 vec1 32 ssa_1569 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1570 = iadd ssa_1568, ssa_1569 vec1 32 ssa_1571 = intrinsic load_ubo (ssa_1565, ssa_1570) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1572 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1573 = iadd ssa_1568, ssa_1572 vec1 32 ssa_1574 = intrinsic load_ubo (ssa_1565, ssa_1573) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1575 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1576 = iadd ssa_1568, ssa_1575 vec1 32 ssa_1577 = intrinsic load_ubo (ssa_1565, ssa_1576) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1578 = vec3 ssa_1571, ssa_1574, ssa_1577 vec3 32 ssa_1579 = fneg ssa_1578 vec3 32 ssa_1580 = fadd ssa_1563, ssa_1579 vec1 32 ssa_1581 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1582 = intrinsic load_deref (ssa_1581) (0) /* access=0 */ vec4 32 ssa_1583 = vec4 ssa_1580.x, ssa_1580.y, ssa_1580.z, ssa_1582.w vec1 32 ssa_1584 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1584, ssa_1583) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1585 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1586 = intrinsic load_deref (ssa_1585) (0) /* access=0 */ vec4 32 ssa_1587 = vec4 ssa_1586.x, ssa_1586.y, ssa_1586.z, ssa_28 vec1 32 ssa_1588 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1588, ssa_1587) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1589 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1590 = intrinsic load_deref (ssa_1589) (0) /* access=0 */ vec1 32 ssa_1591 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1592 = intrinsic vulkan_resource_index (ssa_1591) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1593 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1594 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1595 = iadd ssa_1593, ssa_1594 vec1 32 ssa_1596 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1597 = iadd ssa_1595, ssa_1596 vec1 32 ssa_1598 = intrinsic load_ubo (ssa_1592, ssa_1597) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1599 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1600 = iadd ssa_1595, ssa_1599 vec1 32 ssa_1601 = intrinsic load_ubo (ssa_1592, ssa_1600) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1602 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1603 = iadd ssa_1595, ssa_1602 vec1 32 ssa_1604 = intrinsic load_ubo (ssa_1592, ssa_1603) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1605 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1606 = iadd ssa_1595, ssa_1605 vec1 32 ssa_1607 = intrinsic load_ubo (ssa_1592, ssa_1606) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1608 = vec4 ssa_1598, ssa_1601, ssa_1604, ssa_1607 vec1 32 ssa_1609 = fdot4 ssa_1590, ssa_1608 vec1 32 ssa_1610 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1611 = intrinsic load_deref (ssa_1610) (0) /* access=0 */ vec4 32 ssa_1612 = vec4 ssa_1611.x, ssa_1611.y, ssa_1611.z, ssa_1609 vec1 32 ssa_1613 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1613, ssa_1612) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1614 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1615 = intrinsic load_deref (ssa_1614) (0) /* access=0 */ vec1 32 ssa_1616 = imov ssa_1615.x vec1 32 ssa_1617 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1618 = intrinsic load_deref (ssa_1617) (0) /* access=0 */ vec4 32 ssa_1619 = vec4 ssa_1618.x, ssa_1616, ssa_1618.z, ssa_1618.w vec1 32 ssa_1620 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1620, ssa_1619) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1621 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1622 = intrinsic load_deref (ssa_1621) (0) /* access=0 */ vec1 32 ssa_1623 = imov ssa_1622.x vec1 32 ssa_1624 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1625 = intrinsic load_deref (ssa_1624) (0) /* access=0 */ vec4 32 ssa_1626 = vec4 ssa_1625.x, ssa_1625.y, ssa_1623, ssa_1625.w vec1 32 ssa_1627 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1627, ssa_1626) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1628 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_1629 = intrinsic load_deref (ssa_1628) (0) /* access=0 */ vec1 32 ssa_1630 = imov ssa_1629.x vec1 32 ssa_1631 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1632 = intrinsic load_deref (ssa_1631) (0) /* access=0 */ vec4 32 ssa_1633 = vec4 ssa_1630, ssa_1632.y, ssa_1632.z, ssa_1632.w vec1 32 ssa_1634 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1634, ssa_1633) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1635 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1636 = intrinsic load_deref (ssa_1635) (0) /* access=0 */ vec3 32 ssa_1637 = vec3 ssa_1636.x, ssa_1636.y, ssa_1636.z vec1 32 ssa_1638 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1639 = intrinsic vulkan_resource_index (ssa_1638) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1640 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1641 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1642 = iadd ssa_1640, ssa_1641 vec1 32 ssa_1643 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1644 = iadd ssa_1642, ssa_1643 vec1 32 ssa_1645 = intrinsic load_ubo (ssa_1639, ssa_1644) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_452 = intrinsic load_deref (ssa_451 = vec3 .z, ssa_802vec1 32 ssa_1646 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1647 = iadd ssa_1642, ssa_1646 ) ( vec1 32 ssa_1648 = intrinsic load_ubo (ssa_1639, ssa_1647ssa_1160.x, ssa_1160.y, ssa_1160. vec1 32 ssa_806 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (w vec1 32 ssa_1162 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1163 = intrinsic load_deref (ssa_1162) (0ssa_806 0) /* access=0 */ vec4 32 ssa_453 = vec4 ssa_452.x, ssa_450, ssa_452.z, ssa_452.w vec1 32 ssa_454 = deref_var &r11 (function_temp vec4) intrinsic store_deref (ssa_454, ssa_453) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_455 = deref_var &r8 (function_temp vec4) vec4 32 ssa_456 = intrinsic load_deref (ssa_455) (0) /* access=0 */ vec1 32 ssa_457 = imov ssa_456.y vec1 32 ssa_458 = deref_var &r11 (function_temp vec4) vec4 32 ssa_459 = intrinsic load_deref (ssa_458) (0) /* access=0 */ vec4 32 ssa_460 = vec4 ssa_459.x, ssa_459.y, ssa_457, ssa_459.w vec1 32 ssa_461 = deref_var &r11 (function_temp vec4) intrinsic store_deref (ssa_461, ssa_460) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_462 = deref_var &r11 (function_temp vec4) vec4 32 ssa_463 = intrinsic load_deref (ssa_462) (0) /* access=0 */ vec3 32 ssa_464 = vec3 ssa_463.x, ssa_463.y, ssa_463.z vec1 32 ssa_465 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_466 = intrinsic vulkan_resource_index (ssa_465) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_467 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_468 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_469 = iadd ssa_467, ssa_468 vec1 32 ssa_470 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_471 = iadd ssa_469, ssa_470 vec1 32 ssa_472 = intrinsic load_ubo (ssa_466, ssa_471) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_473 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_474 = iadd ssa_469, ssa_473 vec1 32 ssa_475 = intrinsic load_ubo (ssa_466, ssa_474) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_476 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_477 = iadd ssa_469, ssa_476 vec1 32 ssa_478 = intrinsic load_ubo (ssa_466, ssa_477) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_479 = vec3 ssa_472, ssa_475, ssa_478 vec1 32 ssa_480 = fdot3 ssa_464, ssa_479 vec1 32 ssa_481 = deref_var &r10 (function_temp vec4) vec4 32 ssa_482 = intrinsic load_deref (ssa_481) (0) /* access=0 */ vec4 32 ssa_483 = vec4 ssa_482.x, ssa_480, ssa_482.z, ssa_482.w vec1 32 ssa_484 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_484, ssa_483) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_485 = deref_var &r4 (function_temp vec4) vec4 32 ssa_486 = intrinsic load_deref (ssa_485) (0) /* access=0 */ vec1 32 ssa_487 = imov ssa_486.z vec1 32 ssa_488 = deref_var &r12 (function_temp vec4) vec4 32 ssa_489 = intrinsic load_deref (ssa_488) (0) /* access=0 */ vec4 32 ssa_490 = vec4 ssa_487, ssa_489.y, ssa_489.z, ssa_489.w vec1 32 ssa_491 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_491, ssa_490) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_492 = deref_var &r7 (function_temp vec4) vec4 32 ssa_493 = intrinsic load_deref (ssa_492) (0) /* access=0 */ vec1 32 ssa_494 = imov ssa_493.z vec1 32 ssa_495 = deref_var &r12 (function_temp vec4) vec4 32 ssa_496 = intrinsic load_deref (ssa_495) (0) /* access=0 */ vec4 32 ssa_497 = vec4 ssa_496.x, ssa_494, ssa_496.z, ssa_496.w vec1 32 ssa_498 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_498, ssa_497) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_499 = deref_var &r8 (function_temp vec4) vec4 32 ssa_500 = intrinsic load_deref (ssa_499) (0) /* access=0 */ vec1 32 ssa_501 = imov ssa_500.z vec1 32 ssa_502 = deref_var &r12 (function_temp vec4) vec4 32 ssa_503 = intrinsic load_deref (ssa_502) (0) /* access=0 */ vec4 32 ssa_504 = vec4 ssa_503.x, ssa_503.y, ssa_501, ssa_503.w vec1 32 ssa_505 = deref_var &r12 (function_temp vec4) intrinsic store_deref (ssa_505, ssa_504) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_506 = deref_var &r12 (function_temp vec4) vec4 32 ssa_507 = intrinsic load_deref (ssa_506) (0) /* access=0 */ vec3 32 ssa_508 = vec3 ssa_507.x, ssa_507.y, ssa_507.z vec1 32 ssa_509 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_510 = intrinsic vulkan_resource_index (ssa_509) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_511 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_512 = load_const (0x00000210 /* 0.000000 */) vec1 32 ssa_513 = iadd ssa_511, ssa_512 vec1 32 ssa_514 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_515 = iadd ssa_513, ssa_514 vec1 32 ssa_516 = intrinsic load_ubo (ssa_510, ssa_515) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_517 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_518 = iadd ssa_513, ssa_517 vec1 32 ssa_519 = intrinsic load_ubo (ssa_510, ssa_518) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_520 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_521 = iadd ssa_513, ssa_520 vec1 32 ssa_522 = intrinsic load_ubo (ssa_510, ssa_521) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_523 = vec3 ssa_516, ssa_519, ssa_522 vec1 32 ssa_524 = fdot3 ssa_508, ssa_523 vec1 32 ssa_525 = deref_var &r10 (function_temp vec4) vec4 32 ssa_526 = intrinsic load_deref (ssa_525) (0) /* access=0 */ vec4 32 ssa_527 = vec4 ssa_526.x, ssa_526.y, ssa_524, ssa_526.w vec1 32 ssa_528 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_528, ssa_527) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_529 = deref_var &r3 (function_temp vec4) vec4 32 ssa_530 = intrinsic load_deref (ssa_529) (0) /* access=0 */ vec4 32 ssa_531 = vec4 ssa_530.x, ssa_530.y, ssa_530.z, ssa_74 vec1 32 ssa_532 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_532, ssa_531) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_533 = deref_var &r3 (function_temp vec4) vec4 32 ssa_534 = intrinsic load_deref (ssa_533) (0) /* access=0 */ vec1 32 ssa_535 = deref_var &r10 (function_temp vec4) vec4 32 ssa_536 = intrinsic load_deref (ssa_535) (0) /* access=0 */ vec1 32 ssa_537 = fdot4 ssa_534, ssa_536 vec1 32 ssa_538 = deref_var &out@o0-temp (function_temp vec4) vec4 32 ssa_539 = intrinsic load_deref (ssa_538) (0) /* access=0 */ vec4 32 ssa_540 = vec4 ssa_537, ssa_539.y, ssa_539.z, ssa_539.w vec1 32 ssa_541 = deref_var &out@o0-temp (function_temp vec4) intrinsic store_deref (ssa_541, ssa_540) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_542 = deref_var &r9 (function_temp vec4) vec4 32 ssa_543 = intrinsic load_deref (ssa_542) (0) /* access=0 */ vec1 32 ssa_544 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_545 = intrinsic vulkan_resource_index (ssa_544) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_546 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_547 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_548 = iadd ssa_546, ssa_547 vec1 32 ssa_549 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_550 = iadd ssa_548, ssa_549 vec1 32 ssa_551 = intrinsic load_ubo (ssa_545, ssa_550) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_552 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_553 = iadd ssa_548, ssa_552 vec1 32 ssa_554 = intrinsic load_ubo (ssa_545, ssa_553) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_555 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_556 = iadd ssa_548, ssa_555 vec1 32 ssa_557 = intrinsic load_ubo (ssa_545, ssa_556) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_558 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_559 = iadd ssa_548, ssa_558 vec1 32 ssa_560 = intrinsic load_ubo (ssa_545, ssa_559) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_561 = vec4 ssa_551, ssa_554, ssa_557, ssa_560 vec1 32 ssa_562 = fdot4 ssa_543, ssa_561 vec1 32 ssa_563 = deref_var &r10 (function_temp vec4) vec4 32 ssa_564 = intrinsic load_deref (ssa_563) (0) /* access=0 */ vec4 32 ssa_565 = vec4 ssa_564.x, ssa_564.y, ssa_564.z, ssa_562 vec1 32 ssa_566 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_566, ssa_565) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_567 = deref_var &r1 (function_temp vec4) vec4 32 ssa_568 = intrinsic load_deref (ssa_567) (0) /* access=0 */ vec3 32 ssa_569 = vec3 ssa_568.x, ssa_568.y, ssa_568.z vec1 32 ssa_570 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_571 = intrinsic vulkan_resource_index (ssa_570) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_572 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_573 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_574 = iadd ssa_572, ssa_573 vec1 32 ssa_575 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_576 = iadd ssa_574, ssa_575 vec1 32 ssa_577 = intrinsic load_ubo (ssa_571, ssa_576) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_578 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_579 = iadd ssa_574, ssa_578 vec1 32 ssa_580 = intrinsic load_ubo (ssa_571, ssa_579) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_581 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_582 = iadd ssa_574, ssa_581 vec1 32 ssa_583 = intrinsic load_ubo (ssa_571, ssa_582) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_584 = vec3 ssa_577, ssa_580, ssa_583 vec1 32 ssa_585 = fdot3 ssa_569, ssa_584 vec1 32 ssa_586 = deref_var &r10 (function_temp vec4) vec4 32 ssa_587 = intrinsic load_deref (ssa_586) (0) /* access=0 */ vec4 32 ssa_588 = vec4 ssa_585, ssa_587.y, ssa_587.z, ssa_587.w vec1 32 ssa_589 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_589, ssa_588) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_590 = deref_var &r11 (function_temp vec4) vec4 32 ssa_591 = intrinsic load_deref (ssa_590) (0) /* access=0 */ vec3 32 ssa_592 = vec3 ssa_591.x, ssa_591.y, ssa_591.z vec1 32 ssa_593 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_594 = intrinsic vulkan_resource_index (ssa_593) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_595 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_596 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_597 = iadd ssa_595, ssa_596 vec1 32 ssa_598 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_599 = iadd ssa_597, ssa_598 vec1 32 ssa_600 = intrinsic load_ubo (ssa_594, ssa_599) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_601 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_602 = iadd ssa_597, ssa_601 vec1 32 ssa_603 = intrinsic load_ubo (ssa_594, ssa_602) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_604 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_605 = iadd ssa_597, ssa_604 vec1 32 ssa_606 = intrinsic load_ubo (ssa_594, ssa_605) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_607 = vec3 ssa_600, ssa_603, ssa_606 vec1 32 ssa_608 = fdot3 ssa_592, ssa_607 vec1 32 ssa_609 = deref_var &r10 (function_temp vec4) vec4 32 ssa_610 = intrinsic load_deref (ssa_609) (0) /* access=0 */ vec4 32 ssa_611 = vec4 ssa_610.x, ssa_608, ssa_610.z, ssa_610.w vec1 32 ssa_612 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_612, ssa_611) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_613 = deref_var &r12 (function_temp vec4) vec4 32 ssa_614 = intrinsic load_deref (ssa_613) (0) /* access=0 */ vec3 32 ssa_615 = vec3 ssa_614.x, ssa_614.y, ssa_614.z vec1 32 ssa_616 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_617 = intrinsic vulkan_resource_index (ssa_616) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_618 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_619 = load_const (0x00000220 /* 0.000000 */) vec1 32 ssa_620 = iadd ssa_618, ssa_619 vec1 32 ssa_621 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_622 = iadd ssa_620, ssa_621 vec1 32 ssa_623 = intrinsic load_ubo (ssa_617, ssa_622) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_624 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_625 = iadd ssa_620, ssa_624 vec1 32 ssa_626 = intrinsic load_ubo (ssa_617, ssa_625) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_627 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_628 = iadd ssa_620, ssa_627 vec1 32 ssa_629 = intrinsic load_ubo (ssa_617, ssa_628) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_630 = vec3 ssa_623, ssa_626, ssa_629 vec1 32 ssa_631 = fdot3 ssa_615, ssa_630 vec1 32 ssa_632 = deref_var &r10 (function_temp vec4) vec4 32 ssa_633 = intrinsic load_deref (ssa_632) (0) /* access=0 */ vec4 32 ssa_634 = vec4 ssa_633.x, ssa_633.y, ssa_631, ssa_633.w vec1 32 ssa_635 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_635, ssa_634) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_636 = deref_var &r3 (function_temp vec4) vec4 32 ssa_637 = intrinsic load_deref (ssa_636) (0) /* access=0 */ vec1 32 ssa_638 = deref_var &r10 (function_temp vec4) vec4 32 ssa_639 = intrinsic load_deref (ssa_638) (0) /* access=0 */ vec1 32 ssa_640 = fdot4 ssa_637, ssa_639 vec1 32 ssa_641 = deref_var &out@o0-temp (function_temp vec4) vec4 32 ssa_642 = intrinsic load_deref (ssa_641) (0) /* access=0 */ vec4 32 ssa_643 = vec4 ssa_642.x, ssa_640, ssa_642.z, ssa_642.w vec1 32 ssa_644 = deref_var &out@o0-temp (function_temp vec4) intrinsic store_deref (ssa_644, ssa_643) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_645 = deref_var &r9 (function_temp vec4) vec4 32 ssa_646 = intrinsic load_deref (ssa_645) (0) /* access=0 */ vec1 32 ssa_647 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_648 = intrinsic vulkan_resource_index (ssa_647) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_649 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_650 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_651 = iadd ssa_649, ssa_650 vec1 32 ssa_652 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_653 = iadd ssa_651, ssa_652 vec1 32 ssa_654 = intrinsic load_ubo (ssa_648, ssa_653) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_655 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_656 = iadd ssa_651, ssa_655 vec1 32 ssa_657 = intrinsic load_ubo (ssa_648, ssa_656) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_658 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_659 = iadd ssa_651, ssa_658 vec1 32 ssa_660 = intrinsic load_ubo (ssa_648, ssa_659) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_661 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_662 = iadd ssa_651, ssa_661 vec1 32 ssa_663 = intrinsic load_ubo (ssa_648, ssa_662) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_664 = vec4 ssa_654, ssa_657, ssa_660, ssa_663 vec1 32 ssa_665 = fdot4 ssa_646, ssa_664 vec1 32 ssa_666 = deref_var &r10 (function_temp vec4) vec4 32 ssa_667 = intrinsic load_deref (ssa_666) (0) /* access=0 */ vec4 32 ssa_668 = vec4 ssa_667.x, ssa_667.y, ssa_667.z, ssa_665 vec1 32 ssa_669 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_669, ssa_668) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_670 = deref_var &r9 (function_temp vec4) vec4 32 ssa_671 = intrinsic load_deref (ssa_670) (0) /* access=0 */ vec1 32 ssa_672 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_673 = intrinsic vulkan_resource_index (ssa_672) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_674 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_675 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_676 = iadd ssa_674, ssa_675 vec1 32 ssa_677 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_678 = iadd ssa_676, ssa_677 vec1 32 ssa_679 = intrinsic load_ubo (ssa_673, ssa_678) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_680 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_681 = iadd ssa_676, ssa_680 vec1 32 ssa_682 = intrinsic load_ubo (ssa_673, ssa_681) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_683 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_684 = iadd ssa_676, ssa_683 vec1 32 ssa_685 = intrinsic load_ubo (ssa_673, ssa_684) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_686 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_687 = iadd ssa_676, ssa_686 vec1 32 ssa_688 = intrinsic load_ubo (ssa_673, ssa_687) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_689 = vec4 ssa_679, ssa_682, ssa_685, ssa_688 vec1 32 ssa_690 = fdot4 ssa_671, ssa_689 vec1 32 ssa_691 = deref_var &r9 (function_temp vec4) vec4 32 ssa_692 = intrinsic load_deref (ssa_691) (0) /* access=0 */ vec4 32 ssa_693 = vec4 ssa_692.x, ssa_692.y, ssa_692.z, ssa_690 vec1 32 ssa_694 = deref_var &r9 (function_temp vec4) intrinsic store_deref (ssa_694, ssa_693) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_695 = deref_var &r1 (function_temp vec4) vec4 32 ssa_696 = intrinsic load_deref (ssa_695) (0) /* access=0 */ vec3 32 ssa_697 = vec3 ssa_696.x, ssa_696.y, ssa_696.z vec1 32 ssa_698 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_699 = intrinsic vulkan_resource_index (ssa_698) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_700 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_701 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_702 = iadd ssa_700, ssa_701 vec1 32 ssa_703 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_704 = iadd ssa_702, ssa_703 vec1 32 ssa_705 = intrinsic load_ubo (ssa_699, ssa_704) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_706 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_707 = iadd ssa_702, ssa_706 vec1 32 ssa_708 = intrinsic load_ubo (ssa_699, ssa_707) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_709 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_710 = iadd ssa_702, ssa_709 vec1 32 ssa_711 = intrinsic load_ubo (ssa_699, ssa_710) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_712 = vec3 ssa_705, ssa_708, ssa_711 vec1 32 ssa_713 = fdot3 ssa_697, ssa_712 vec1 32 ssa_714 = deref_var &r10 (function_temp vec4) vec4 32 ssa_715 = intrinsic load_deref (ssa_714) (0) /* access=0 */ vec4 32 ssa_716 = vec4 ssa_713, ssa_715.y, ssa_715.z, ssa_715.w vec1 32 ssa_717 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_717, ssa_716) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_718 = deref_var &r1 (function_temp vec4) vec4 32 ssa_719 = intrinsic load_deref (ssa_718) (0) /* access=0 */ vec3 32 ssa_720 = vec3 ssa_719.x, ssa_719.y, ssa_719.z vec1 32 ssa_721 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_722 = intrinsic vulkan_resource_index (ssa_721) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_723 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_724 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_725 = iadd ssa_723, ssa_724 vec1 32 ssa_726 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_727 = iadd ssa_725, ssa_726 vec1 32 ssa_728 = intrinsic load_ubo (ssa_722, ssa_727) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_729 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_730 = iadd ssa_725, ssa_729 vec1 32 ssa_731 = intrinsic load_ubo (ssa_722, ssa_730) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_732 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_733 = iadd ssa_725, ssa_732 vec1 32 ssa_734 = intrinsic load_ubo (ssa_722, ssa_733) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_735 = vec3 ssa_728, ssa_731, ssa_734 vec1 32 ssa_736 = fdot3 ssa_720, ssa_735 vec1 32 ssa_737 = deref_var &r9 (function_temp vec4) vec4 32 ssa_738 = intrinsic load_deref (ssa_737) (0) /* access=0 */ vec4 32 ssa_739 = vec4 ssa_736, ssa_738.y, ssa_738.z, ssa_738.w vec1 32 ssa_740 = deref_var &r9 (function_temp vec4) intrinsic store_deref (ssa_740, ssa_739) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_741 = deref_var &r11 (function_temp vec4) vec4 32 ssa_742 = intrinsic load_deref (ssa_741) (0) /* access=0 */ vec3 32 ssa_743 = vec3 ssa_742.x, ssa_742.y, ssa_742.z vec1 32 ssa_744 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_745 = intrinsic vulkan_resource_index (ssa_744) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_746 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_747 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_748 = iadd ssa_746, ssa_747 vec1 32 ssa_749 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_750 = iadd ssa_748, ssa_749 vec1 32 ssa_751 = intrinsic load_ubo (ssa_745, ssa_750) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_752 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_753 = iadd ssa_748, ssa_752 vec1 32 ssa_754 = intrinsic load_ubo (ssa_745, ssa_753) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_755 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_756 = iadd ssa_748, ssa_755 vec1 32 ssa_757 = intrinsic load_ubo (ssa_745, ssa_756) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_758 = vec3 ssa_751, ssa_754, ssa_757 vec1 32 ssa_759 = fdot3 ssa_743, ssa_758 vec1 32 ssa_760 = deref_var &r10 (function_temp vec4) vec4 32 ssa_761 = intrinsic load_deref (ssa_760) (0) /* access=0 */ vec4 32 ssa_762 = vec4 ssa_761.x, ssa_759, ssa_761.z, ssa_761.w vec1 32 ssa_763 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_763, ssa_762) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_764 = deref_var &r11 (function_temp vec4) vec4 32 ssa_765 = intrinsic load_deref (ssa_764) (0) /* access=0 */ vec3 32 ssa_766 = vec3 ssa_765.x, ssa_765.y, ssa_765.z vec1 32 ssa_767 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_768 = intrinsic vulkan_resource_index (ssa_767) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_769 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_770 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_771 = iadd ssa_769, ssa_770 vec1 32 ssa_772 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_773 = iadd ssa_771, ssa_772 vec1 32 ssa_774 = intrinsic load_ubo (ssa_768, ssa_773) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_775 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_776 = iadd ssa_771, ssa_775 vec1 32 ssa_777 = intrinsic load_ubo (ssa_768, ssa_776) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_778 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_779 = iadd ssa_771, ssa_778 vec1 32 ssa_780 = intrinsic load_ubo (ssa_768, ssa_779) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_781 = vec3 ssa_774, ssa_777, ssa_780 vec1 32 ssa_782 = fdot3 ssa_766, ssa_781 vec1 32 ssa_783 = deref_var &r9 (function_temp vec4) vec4 32 ssa_784 = intrinsic load_deref (ssa_783) (0) /* access=0 */ vec4 32 ssa_785 = vec4 ssa_784.x, ssa_782, ssa_784.z, ssa_784.w vec1 32 ssa_786 = deref_var &r9 (function_temp vec4) intrinsic store_deref (ssa_786, ssa_785) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_787 = deref_var &r12 (function_temp vec4) vec4 32 ssa_788 = intrinsic load_deref (ssa_787) (0) /* access=0 */ vec3 32 ssa_789 = vec3 ssa_788.x, ssa_788.y, ssa_788.z vec1 32 ssa_790 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_791 = intrinsic vulkan_resource_index (ssa_790) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_792 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_793 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_794 = iadd ssa_792, ssa_793 vec1 32 ssa_795 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_796 = iadd ssa_794, ssa_795 vec1 32 ssa_797 = intrinsic load_ubo (ssa_791, ssa_796) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_798 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_799 = iadd ssa_794, ssa_798 vec1 32 ssa_800 = intrinsic load_ubo (ssa_791, ssa_799) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_801 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_802 = iadd ssa_794, ssa_801 vec1 32 ssa_803 = intrinsic load_ubo (ssa_791, ssa_802) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_804 = vec3 ssa_797, ssa_800, ssa_803 vec1 32 ssa_805 = fdot3 ssa_789, ssa_804 vec1 32 ssa_806 = deref_var &r10 (function_temp vec4) vec4 32 ssa_807 = intrinsic load_deref (ssa_806) (0) /* access=0 */ vec4 32 ssa_808 = vec4 ssa_807.x, ssa_807.y, ssa_805, ssa_807.w vec1 32 ssa_809 = deref_var &r10 (function_temp vec4) intrinsic store_deref (ssa_809, ssa_808) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_810 = deref_var &r12 (function_temp vec4) vec4 32 ssa_811 = intrinsic load_deref (ssa_810) (0) /* access=0 */ vec3 32 ssa_812 = vec3 ssa_811.x, ssa_811.y, ssa_811.z vec1 32 ssa_813 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_814 = intrinsic vulkan_resource_index (ssa_813) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_815 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_816 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_817 = iadd ssa_815, ssa_816 vec1 32 ssa_818 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_819 = iadd ssa_817, ssa_818 vec1 32 ssa_820 = intrinsic load_ubo (ssa_814, ssa_819) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_821 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_822 = iadd ssa_817, , ssa_805) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_807 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_808 = intrinsic load_deref (ssa_807) (0) /* access=0 */ ) (4, 0) /*intrinsic store_deref (ssa_911, ssa_910) (15 )vec3 32 ssa_809 = vec3 ssa_808.x, ssa_808.y, ssa_808.z vec1 32 ssa_810 = load_const ( align_mul=4 vec1 32 ssa_1046 = iadd ssa_1045, ssa_71 vec1 32 ssa_1047 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1048 = intrinsic vulkan_resource_index (ssa_1047) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_1049 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1050 = load_const (0x00000002 /* 0.000000 */) 0x00000000 /* 0.000000 */) vec1 32 ssa_811 = intrinsic vulkan_resource_index (ssa_810) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_812 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_813 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_814 = iadd ssa_812, ssa_813 vec1 32 ssa_815 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_816 = iadd ssa_814, ssa_815 vec1 32 ssa_817 = intrinsic load_ubo (ssa_811, ssa_816) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_818 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_819 = iadd ssa_814, ssa_818 vec1 32 ssa_820 = intrinsic load_ubo (ssa_811, ssa_819) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_821 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_822 = iadd ssa_814, ssa_821 vec1 32 ssa_823 = intrinsic load_ubo (ssa_811, ssa_822) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_824 = vec3 ssa_817, ssa_820, ssa_823 vec1 32 ssa_825 = fdot3 ssa_809, ssa_824 vec1 32 ssa_826 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_827 = intrinsic load_deref (ssa_826) (0) /* access=0 */ vec4 32 ssa_828 = vec4 ssa_825, ssa_827.y, ssa_827.z, ssa_827.w vec1 32 ssa_829 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_829, ssa_828) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_830 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_831 = intrinsic load_deref (ssa_830) (0) /* access=0 */ vec3 32 ssa_832 = vec3 ssa_831.x, ssa_831.y, ssa_831.z vec1 32 ssa_833 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_834 = intrinsic vulkan_resource_index (ssa_833) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_835 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_836 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_837 = iadd ssa_835, ssa_836 vec1 32 ssa_838 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_839 = iadd ssa_837, ssa_838 vec1 32 ssa_840 = intrinsic load_ubo (ssa_834, ssa_839) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_841 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_842 = iadd ssa_837, ssa_841 vec1 32 ssa_843 = intrinsic load_ubo (ssa_834, ssa_842) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_844 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_845 = iadd ssa_837, ssa_844 vec1 32 ssa_846 = intrinsic load_ubo (ssa_834, ssa_845) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_847 = vec3 ssa_840, ssa_843, ssa_846 vec1 32 ssa_848 = fdot3 ssa_832, ssa_847 vec1 32 ssa_849 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_850 = intrinsic load_deref (ssa_849) (0) /* access=0 */ vec4 32 ssa_851 = vec4 ssa_848, ssa_850.y, ssa_850.z, ssa_850.w vec1 32 ssa_852 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_852, ssa_851) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_853 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_854 = intrinsic load_deref (ssa_853) (0) /* access=0 */ vec3 32 ssa_855 = vec3 ssa_854.x, ssa_854.y, ssa_854.z vec1 32 ssa_856 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_857 = intrinsic vulkan_resource_index (ssa_856) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_858 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_859 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_860 = iadd ssa_858, ssa_859 vec1 32 ssa_861 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_862 = iadd ssa_860, ssa_861 vec1 32 ssa_863 = intrinsic load_ubo (ssa_857, ssa_862) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_864 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_865 = iadd ssa_860, ssa_864 vec1 32 ssa_866 = intrinsic load_ubo (ssa_857, ssa_865) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_867 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_868 = iadd ssa_860, ssa_867 vec1 32 ssa_869 = intrinsic load_ubo (ssa_857, ssa_868) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_870 = vec3 ssa_863, ssa_866, ssa_869 vec1 32 ssa_871 = fdot3 ssa_855, ssa_870 vec1 32 ssa_872 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_873 = intrinsic load_deref (ssa_872) (0) /* access=0 */ vec4 32 ssa_874 = vec4 ssa_873.x, ssa_871, ssa_873.z, ssa_873.w vec1 32 ssa_875 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_875, ssa_874) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_876 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_877 = intrinsic load_deref (ssa_876) (0) /* access=0 */ vec3 32 ssa_878 = vec3 ssa_877.x, ssa_877.y, ssa_877.z vec1 32 ssa_879 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_880 = intrinsic vulkan_resource_index (ssa_879) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_881 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_882 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_883 = iadd ssa_881, ssa_882 vec1 32 ssa_884 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_885 = iadd ssa_883, ssa_884 vec1 32 ssa_886 = intrinsic load_ubo (ssa_880, ssa_885) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_887 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_888 = iadd ssa_883, ssa_887 vec1 32 ssa_889 = intrinsic load_ubo (ssa_880, ssa_888) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_890 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_891 = iadd ssa_883, ssa_890 vec1 32 ssa_892 = intrinsic load_ubo (ssa_880, ssa_891) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_893 = vec3 ssa_886, ssa_889, ssa_892 vec1 32 ssa_894 = fdot3 ssa_878, ssa_893 vec1 32 ssa_895 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_896 = intrinsic load_deref (ssa_895) (0) /* access=0 */ vec4 32 ssa_897 = vec4 ssa_896.x, ssa_894, ssa_896.z, ssa_896.w vec1 32 ssa_898 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_898, ssa_897) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_899 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_900 = intrinsic load_deref (ssa_899) (0) /* access=0 */ vec3 32 ssa_901 = vec3 ssa_900.x, ssa_900.y, ssa_900.z vec1 32 ssa_902 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_903 = intrinsic vulkan_resource_index (ssa_902) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_904 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_905 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_906 = iadd ssa_904, ssa_905 vec1 32 ssa_907 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_908 = iadd ssa_906, ssa_907 vec1 32 ssa_909 = intrinsic load_ubo (ssa_903, ssa_908) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_910 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_911 = iadd ssa_906, ssa_910 vec1 32 ssa_912 = intrinsic load_ubo (ssa_903, ssa_911) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_913 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_914 = iadd ssa_906, ssa_913 vec1 32 ssa_915 = intrinsic load_ubo (ssa_903, ssa_914) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_916 = vec3 ssa_909, ssa_912, ssa_915 vec1 32 ssa_917 = fdot3 ssa_901, ssa_916 vec1 32 ssa_918 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_919 = intrinsic load_deref (ssa_918) (0) /* access=0 */ vec4 32 ssa_920 = vec4 ssa_919.x, ssa_919.y, ssa_917, ssa_919.w vec1 32 ssa_921 = deref_var &r12 (shader_temp vec4) intrinsic store_deref (ssa_921, ssa_920) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_922 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_923 = intrinsic load_deref (ssa_922) (0) /* access=0 */ vec3 32 ssa_924 = vec3 ssa_923.x, ssa_923.y, ssa_923.z vec1 32 ssa_925 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_926 = intrinsic vulkan_resource_index (ssa_925) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_927 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_928 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_929 = iadd ssa_927, ssa_928 vec1 32 ssa_930 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_931 = iadd ssa_929, ssa_930 vec1 32 ssa_932 = intrinsic load_ubo (ssa_926, ssa_931) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_933 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_934 = iadd ssa_929, ssa_933 vec1 32 ssa_935 = intrinsic load_ubo (ssa_926, ssa_934) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_936 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_937 = iadd ssa_929, ssa_936 vec1 32 ssa_938 = intrinsic load_ubo (ssa_926, ssa_937) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_939 = vec3 ssa_932, ssa_935, ssa_938 vec1 32 ssa_940 = fdot3 ssa_924, ssa_939 vec1 32 ssa_941 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_942 = intrinsic load_deref (ssa_941) (0) /* access=0 */ vec4 32 ssa_943 = vec4 ssa_942.x, ssa_942.y, ssa_940, ssa_942.w vec1 32 ssa_944 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_944, ssa_943) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_945 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_946 = intrinsic load_deref (ssa_945) (0) /* access=0 */ vec1 32 ssa_947 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_948 = intrinsic load_deref (ssa_947) (0) /* access=0 */ vec1 32 ssa_949 = fdot4 ssa_946, ssa_948 vec1 32 ssa_950 = deref_var &o0 (shader_out vec4) vec4 32 ssa_951 = intrinsic load_deref (ssa_950) (0) /* access=0 */ vec4 32 ssa_952 = vec4 ssa_951.x, ssa_951.y, ssa_951.z, ssa_949 vec1 32 ssa_953 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_953, ssa_952) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_954 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_955 = intrinsic load_deref (ssa_954) (0) /* access=0 */ vec1 32 ssa_956 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_957 = intrinsic load_deref (ssa_956) (0) /* access=0 */ vec1 32 ssa_958 = fdot4 ssa_955, ssa_957 vec1 32 ssa_959 = deref_var &o0 (shader_out vec4) vec4 32 ssa_960 = intrinsic load_deref (ssa_959) (0) /* access=0 */ vec4 32 ssa_961 = vec4 ssa_960.x, ssa_960.y, ssa_958, ssa_960.w vec1 32 ssa_962 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_962, ssa_961) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_963 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_964 = intrinsic load_deref (ssa_963) (0) /* access=0 */ vec1 32 ssa_965 = imov ssa_964.z vec1 32 ssa_966 = ushr ssa_76, ssa_75 vec1 32 ssa_967 = imul ssa_965, ssa_74 vec1 32 ssa_968 = iadd ssa_967, ssa_966 vec1 32 ssa_969 = iadd ssa_968, ssa_73 vec1 32 ssa_970 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_971 = intrinsic vulkan_resource_index (ssa_970) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_972 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_973 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_974 = ishl ssa_969, ssa_973 vec1 32 ssa_975 = iadd ssa_972, ssa_974 vec1 32 ssa_976 = intrinsic load_ssbo (ssa_971, ssa_975) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_977 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_978 = intrinsic load_deref (ssa_977) (0) /* access=0 */ vec4 32 ssa_979 = vec4 ssa_978.x, ssa_976, ssa_978.z, ssa_978.w vec1 32 ssa_980 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_980, ssa_979) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_981 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_982 = intrinsic load_deref (ssa_981) (0) /* access=0 */ vec1 32 ssa_983 = imov ssa_982.y vec1 32 ssa_984 = ishl ssa_983, ssa_72 vec1 32 ssa_985 = deref_var &r0 /* access=0 vec1 32 ssa_1051 = ishl ssa_1046, ssa_1050 vec1 32 ssa_1052 = iadd ssa_1049, ssa_1051 */ (shader_temp vec4) vec4 32 ssa_986 = intrinsic load_deref (ssa_985) (0) /* access=0 */ vec4 32 ssa_987 = vec4 ssa_984, ssa_986.y, ssa_986.z, ssa_986.w vec1 32 ssa_988 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_988, ssa_987) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_989 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_990 = intrinsic load_deref (ssa_989) (0) /* access=0 */ vec2 32 ssa_991 = vec2 ssa_990.x, ssa_990.y vec2 32 ssa_992 = ishr ssa_991, ssa_71 vec1 32 ssa_993 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_994 = intrinsic load_deref (ssa_993) (0) /* access=0 */ vec4 32 ssa_995 = vec4 ssa_992.x, ssa_992.y, ssa_994.z, ssa_994.w vec1 32 ssa_996 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_996, ssa_995) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_997 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_998 = intrinsic load_deref (ssa_997) (0) /* access=0 */ vec2 32 ssa_999 = vec2 ssa_998.x, ssa_998.y vec2 32 ssa_1000 = i2f32 ssa_999 vec1 32 ssa_1001 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1002 = intrinsic load_deref (ssa_1001) (0) /* access=0 */ vec4 32 ssa_1003 = vec4 ssa_1000.x, ssa_1000.y, ssa_1002.z, ssa_1002.w vec1 32 ssa_1004 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1004, ssa_1003) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1005 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1006 = intrinsic load_deref (ssa_1005) (0) /* access=0 */ vec2 32 ssa_1007 = vec2 ssa_1006.x, ssa_1006.y vec2 32 ssa_1008 = fmul ssa_1007, ssa_70 vec1 32 ssa_1009 = deref_var &o1 (shader_out vec4) vec4 32 ssa_1010 = intrinsic load_deref (ssa_1009) (0) /* access=0 */ vec4 32 ssa_1011 = vec4 ssa_1008.x, ssa_1008.y, ssa_1010.z, ssa_1010.w vec1 32 ssa_1012 = deref_var &o1 (shader_out vec4) intrinsic store_deref (ssa_1012, ssa_1011) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1013 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1014 = intrinsic load_deref (ssa_1013) (0) /* access=0 */ vec1 32 ssa_1015 = imov ssa_1014.w vec1 32 ssa_1016 = ushr ssa_69, ssa_68 vec1 32 ssa_1017 = imul ssa_1015, ssa_67 vec1 32 ssa_1018 = iadd ssa_1017, ssa_1016 vec1 32 ssa_1019 = iadd ssa_1018, ssa_66 vec1 32 ssa_1020 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1021 = intrinsic vulkan_resource_index (ssa_1020) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1022 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1023 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1024 = ishl ssa_1019, ssa_1023 vec1 32 ssa_1025 = iadd ssa_1022, ssa_1024 vec1 32 ssa_1026 = intrinsic load_ssbo (ssa_1021, ssa_1025) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1027 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1028 = intrinsic load_deref (ssa_1027) (0) /* access=0 */ vec4 32 ssa_1029 = vec4 ssa_1028.x, ssa_1028.y, ssa_1028.z, ssa_1026 vec1 32 ssa_1030 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1030, ssa_1029) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1031 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1032 = intrinsic load_deref (ssa_1031) (0) /* access=0 */ vec1 32 ssa_1033 = imov ssa_1032.y vec1 32 ssa_1034 = ushr ssa_65, ssa_64 vec1 32 ssa_1035 = imul ssa_1033, ssa_63 vec1 32 ssa_1036 = iadd ssa_1035, ssa_1034 vec1 32 ssa_1037 = iadd ssa_1036, ssa_62 vec1 32 ssa_1038 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1039 = intrinsic vulkan_resource_index (ssa_1038) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_1040 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1041 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1042 = ishl ssa_1037, ssa_1041 vec1 32 ssa_1043 = iadd ssa_1040, ssa_1042 vec1 32 ssa_1044 = intrinsic load_ssbo (ssa_1039, ssa_1043) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1045 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1046 = intrinsic load_deref (ssa_1045) (0) /* access=0 */ vec4 32 ssa_1047 = vec4 ssa_1044, ssa_1046.y, ssa_1046.z, ssa_1046.w vec1 32 ssa_1048 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1048, ssa_1047) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1049 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1050 = intrinsic load_deref (ssa_1049) (0) /* access=0 */ vec1 32 ssa_1051 = imov ssa_1050.x vec1 32 ssa_1052 = ishl ssa_1051, ssa_61 vec1 32 ssa_1053 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1054 = intrinsic load_deref (ssa_1053) (0) /* access=0 */ vec4 32 ssa_1055 = vec4 ssa_1054.x, ssa_1054.y, ssa_1052, ssa_1054.w vec1 32 ssa_1056 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1056, ssa_1055) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1057 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1058 = intrinsic load_deref (ssa_1057) (0) /* access=0 */ vec1 32 ssa_1059 = imov ssa_1058.w vec1 32 ssa_1060 = ishl ssa_1059, ssa_60 vec1 32 ssa_1061 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1062 = intrinsic load_deref (ssa_1061) (0) /* access=0 */ vec4 32 ssa_1063 = vec4 ssa_1062.x, ssa_1062.y, ssa_1060, ssa_1062.w vec1 32 ssa_1064 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1064, ssa_1063) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1065 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1066 = intrinsic load_deref (ssa_1065) (0) /* access=0 */ vec2 32 ssa_1067 = vec2 ssa_1066.z, ssa_1066.w vec2 32 ssa_1068 = ishr ssa_1067, ssa_59 vec1 32 ssa_1069 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1070 = intrinsic load_deref (ssa_1069) (0) /* access=0 */ vec4 32 ssa_1071 = vec4 ssa_1068.x, ssa_1070.y, ssa_1068.y, ssa_1070.w vec1 32 ssa_1072 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1072, ssa_1071) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1073 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1074 = intrinsic load_deref (ssa_1073) (0) /* access=0 */ vec2 32 ssa_1075 = vec2 ssa_1074.x, ssa_1074.z vec2 32 ssa_1076 = i2f32 ssa_1075 vec1 32 ssa_1077 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1078 = intrinsic load_deref (ssa_1077) (0) /* access=0 */ vec4 32 ssa_1079 = vec4 ssa_1076.x, ssa_1078.y, ssa_1076.y, ssa_1078.w vec1 32 ssa_1080 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1080, ssa_1079) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1081 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1082 = intrinsic load_deref (ssa_1081) (0) /* access=0 */ vec2 32 ssa_1083 = vec2 ssa_1082.x, ssa_1082.z vec2 32 ssa_1084 = fmul ssa_1083, ssa_58 vec1 32 ssa_1085 = deref_var &o1 (shader_out vec4) vec4 32 ssa_1086 = intrinsic load_deref (ssa_1085) (0) /* access=0 */ vec4 32 ssa_1087 = vec4 ssa_1086.x, ssa_1086.y, ssa_1084.x, ssa_1084.y vec1 32 ssa_1088 = deref_var &o1 (shader_out vec4) intrinsic store_deref (ssa_1088, ssa_1087) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1089 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1090 = intrinsic load_deref (ssa_1089) (0) /* access=0 */ vec1 32 ssa_1091 = imov ssa_1090.w vec1 32 ssa_1092 = ushr ssa_57, ssa_56 vec1 32 ssa_1093 = imul ssa_1091, ssa_55 vec1 32 ssa_1094 = iadd ssa_1093, ssa_1092 vec1 32 ssa_1095 = iadd ssa_1094, ssa_54 vec1 32 ssa_1096 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1097 = intrinsic vulkan_resource_index (ssa_1096) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1098 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1099 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1100 = ishl ssa_1095, ssa_1099 vec1 32 ssa_1101 = iadd ssa_1098, ssa_1100 vec1 32 ssa_1102 = intrinsic load_ssbo (ssa_1097, ssa_1101) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1103 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1104 = intrinsic load_deref (ssa_1103) (0) /* access=0 */ vec4 32 ssa_1105 = vec4 ssa_1102, ssa_1104.y, ssa_1104.z, ssa_1104.w vec1 32 ssa_1106 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1106, ssa_1105) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1107 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1108 = intrinsic load_deref (ssa_1107) (0) /* access=0 */ vec1 32 ssa_1109 = imov ssa_1108.y vec1 32 ssa_1110 = ushr ssa_53, ssa_52 vec1 32 ssa_1111 = imul ssa_1109, ssa_51 vec1 32 ssa_1112 = iadd ssa_1111, ssa_1110 vec1 32 ssa_1113 = iadd ssa_1112, ssa_50 vec1 32 ssa_1114 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1115 = intrinsic vulkan_resource_index (ssa_1114) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_1116 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1117 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1118 = ishl ssa_1113, ssa_1117 vec1 32 ssa_1119 = iadd ssa_1116, ssa_1118 vec1 32 ssa_1120 = intrinsic load_ssbo (ssa_1115, ssa_1119) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1121 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1122 = intrinsic load_deref (ssa_1121) (0) /* access=0 */ vec4 32 ssa_1123 = vec4 ssa_1122.x, ssa_1120, ssa_1122.z, ssa_1122.w vec1 32 ssa_1124 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1124, ssa_1123) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1125 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1126 = intrinsic load_deref (ssa_1125) (0) /* access=0 */ vec3 32 ssa_1127 = vec3 ssa_1126.x, ssa_1126.x, ssa_1126.x vec3 32 ssa_1128 = ushr ssa_1127, ssa_49 vec1 32 ssa_1129 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1130 = intrinsic load_deref (ssa_1129) (0) /* access=0 */ vec4 32 ssa_1131 = vec4 ssa_1130.x, ssa_1128.x, ssa_1128.y, ssa_1128.z vec1 32 ssa_1132 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1132, ssa_1131) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1133 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1134 = intrinsic load_deref (ssa_1133) (0) /* access=0 */ vec4 32 ssa_1135 = iand ssa_1134, ssa_48 vec1 32 ssa_1136 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1136, ssa_1135) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1137 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1138 = intrinsic load_deref (ssa_1137) (0) /* access=0 */ vec4 32 ssa_1139 = u2f32 ssa_1138 vec1 32 ssa_1140 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1140, ssa_1139) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1141 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1142 = intrinsic load_deref (ssa_1141) (0) /* access=0 */ vec4 32 ssa_1143 = fmul ssa_1142, ssa_47 vec1 32 ssa_1144 = deref_var &o2 (shader_out vec4) intrinsic store_deref (ssa_1144, ssa_1143) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1145 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1146 = intrinsic load_deref (ssa_1145) (0) /* access=0 */ vec1 32 ssa_1147 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1148 = intrinsic load_deref (ssa_1147) (0) /* access=0 */ vec1 32 ssa_1149 = fdot4 ssa_1146, ssa_1148 vec1 32 ssa_1150 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1151 = intrinsic load_deref (ssa_1150) (0) /* access=0 */ vec4 32 ssa_1152 = vec4 ssa_1149, ssa_1151.y, ssa_1151.z, ssa_1151.w vec1 32 ssa_1153 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1153, ssa_1152) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1154 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1155 = intrinsic load_deref (ssa_1154) (0) /* access=0 */ vec1 32 ssa_1156 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_1157 = intrinsic load_deref (ssa_1156) (0) /* access=0 */ vec1 32 ssa_1158 = fdot4 ssa_1155, ssa_1157 vec1 32 ssa_1159 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1160 = intrinsic load_deref (ssa_1159) (0) /* access=0 */ vec4 32 ssa_1161 = vec4 ssa_1160.x, ssa_1158, ssa_1160.z, ssa_1160.w vec1 32 ssa_1162 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1162, ssa_1161) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1163 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1164 = intrinsic load_deref (ssa_1163) (0) /* access=0 */ vec1 32 ssa_1165 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_1166 = intrinsic load_deref (ssa_1165) (0) /* access=0 */ vec1 32 ssa_1167 = fdot4 ssa_1164, ssa_1166 vec1 32 ssa_1168 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1169 = intrinsic load_deref (ssa_1168) (0) /* access=0 */ vec4 32 ssa_1170 = vec4 ssa_1169.x, ssa_1169.y, ssa_1167, ssa_1169.w vec1 32 ssa_1171 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1171, ssa_1170) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1172 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1173 = intrinsic load_deref (ssa_1172) (0) /* access=0 */ vec4 32 ssa_1174 = vec4 ssa_1173.x, ssa_1173.y, ssa_1173.z, ssa_46 vec1 32 ssa_1175 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1175, ssa_1174) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1176 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1177 = intrinsic load_deref (ssa_1176) (0) /* access=0 */ vec3 32 ssa_1178 = vec3 ssa_1177.x, ssa_1177.x, ssa_1177.x vec3 32 ssa_1179 = ushr ssa_1178, ssa_45 vec1 32 ssa_1180 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1181 = intrinsic load_deref (ssa_1180) (0) /* access=0 */ vec4 32 ssa_1182 = vec4 ssa_1181.x, ssa_1179.x, ssa_1179.y, ssa_1179.z vec1 32 ssa_1183 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1183, ssa_1182) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1184 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1185 = intrinsic load_deref (ssa_1184) (0) /* access=0 */ vec4 32 ssa_1186 = iand ssa_1185, ssa_44 vec1 32 ssa_1187 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1187, ssa_1186) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1188 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1189 = intrinsic load_deref (ssa_1188) (0) /* access=0 */ vec4 32 ssa_1190 = u2f32 ssa_1189 vec1 32 ssa_1191 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1191, ssa_1190) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1192 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1193 = intrinsic load_deref (ssa_1192) (0) /* access=0 */ vec4 32 ssa_1194 = ffma ssa_1193, ssa_43, ssa_42 vec1 32 ssa_1195 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1195, ssa_1194) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1196 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1197 = intrinsic load_deref (ssa_1196) (0) /* access=0 */ vec4 32 ssa_1198 = fmin ssa_1197, ssa_41 vec1 32 ssa_1199 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1199, ssa_1198) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1200 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1201 = intrinsic load_deref (ssa_1200) (0) /* access=0 */ vec2 32 ssa_1202 = vec2 ssa_1201.y, ssa_1201.w vec1 32 ssa_1203 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1204 = intrinsic load_deref (ssa_1203) (0) /* access=0 */ vec2 32 ssa_1205 = vec2 ssa_1204.x, ssa_1204.z vec2 32 ssa_1206 = fadd ssa_1202, ssa_1205 vec1 32 ssa_1207 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1208 = intrinsic load_deref (ssa_1207) (0) /* access=0 */ vec4 32 ssa_1209 = vec4 ssa_1206.x, ssa_1206.y, ssa_1208.z, ssa_1208.w vec1 32 ssa_1210 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1210, ssa_1209) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1211 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1212 = intrinsic load_deref (ssa_1211) (0) /* access=0 */ vec2 32 ssa_1213 = vec2 ssa_1212.y, ssa_1212.w vec2 32 ssa_1214 = fneg ssa_1213 vec1 32 ssa_1215 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1216 = intrinsic load_deref (ssa_1215) (0) /* access=0 */ vec2 32 ssa_1217 = vec2 ssa_1216.x, ssa_1216.z vec2 32 ssa_1218 = fadd ssa_1214, ssa_1217 vec1 32 ssa_1219 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1220 = intrinsic load_deref (ssa_1219) (0) /* access=0 */ vec4 32 ssa_1221 = vec4 ssa_1220.x, ssa_1220.y, ssa_1218.x, ssa_1218.y vec1 32 ssa_1222 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1222, ssa_1221) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1223 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1224 = intrinsic load_deref (ssa_1223) (0) /* access=0 */ vec2 32 ssa_1225 = vec2 ssa_1224.y, ssa_1224.w vec2 32 ssa_1226 = fmul ssa_1225, ssa_40 vec1 32 ssa_1227 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1228 = intrinsic load_deref (ssa_1227) (0) /* access=0 */ vec4 32 ssa_1229 = vec4 ssa_1228.x, ssa_1226.x, ssa_1226.y, ssa_1228.w vec1 32 ssa_1230 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1230, ssa_1229) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1231 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1232 = intrinsic load_deref (ssa_1231) (0) /* access=0 */ vec2 32 ssa_1233 = vec2 ssa_1232.x, ssa_1232.z vec2 32 ssa_1234 = fmul ssa_1233, ssa_39 vec1 32 ssa_1235 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1236 = intrinsic load_deref (ssa_1235) (0) /* access=0 */ vec4 32 ssa_1237 = vec4 ssa_1234.x, ssa_1234.y, ssa_1236.z, ssa_1236.w vec1 32 ssa_1238 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1238, ssa_1237) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1239 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1240 = intrinsic load_deref (ssa_1239) (0) /* access=0 */ vec1 32 ssa_1241 = imov ssa_1240.y vec1 32 ssa_1242 = fabs ssa_1241 vec1 32 ssa_1243 = fneg ssa_1242 vec1 32 ssa_1244 = fadd ssa_1243, ssa_38 vec1 32 ssa_1245 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1246 = intrinsic load_deref (ssa_1245) (0) /* access=0 */ vec4 32 ssa_1247 = vec4 ssa_1244, ssa_1246.y, ssa_1246.z, ssa_1246.w vec1 32 ssa_1248 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1248, ssa_1247) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1249 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1250 = intrinsic load_deref (ssa_1249) (0) /* access=0 */ vec1 32 ssa_1251 = imov ssa_1250.z vec1 32 ssa_1252 = fabs ssa_1251 vec1 32 ssa_1253 = fneg ssa_1252 vec1 32 ssa_1254 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1255 = intrinsic load_deref (ssa_1254) (0) /* access=0 */ vec1 32 ssa_1256 = imov ssa_1255.x vec1 32 ssa_1257 = fadd ssa_1253, ssa_1256 vec1 32 ssa_1258 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1259 = intrinsic load_deref (ssa_1258) (0) /* access=0 */ vec4 32 ssa_1260 = vec4 ssa_1259.x, ssa_1259.y, ssa_1259.z, ssa_1257 vec1 32 ssa_1261 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1261, ssa_1260) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1262 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1263 = intrinsic load_deref (ssa_1262) (0) /* access=0 */ vec3 32 ssa_1264 = vec3 ssa_1263.y, ssa_1263.z, ssa_1263.w vec1 32 ssa_1265 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1266 = intrinsic load_deref (ssa_1265) (0) /* access=0 */ vec3 32 ssa_1267 = vec3 ssa_1266.y, ssa_1266.z, ssa_1266.w vec1 32 ssa_1268 = fdot3 ssa_1264, ssa_1267 vec1 32 ssa_1269 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1270 = intrinsic load_deref (ssa_1269) (0) /* access=0 */ vec4 32 ssa_1271 = vec4 ssa_1268, ssa_1270.y, ssa_1270.z, ssa_1270.w vec1 32 ssa_1272 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1272, ssa_1271) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1273 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1274 = intrinsic load_deref (ssa_1273) (0) /* access=0 */ vec1 32 ssa_1275 = imov ssa_1274.x vec1 32 ssa_1276 = frsq ssa_1275 vec1 32 ssa_1277 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1278 = intrinsic load_deref (ssa_1277) (0) /* access=0 */ vec4 32 ssa_1279 = vec4 ssa_1276, ssa_1278.y, ssa_1278.z, ssa_1278.w vec1 32 ssa_1280 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1280, ssa_1279) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1281 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1282 = intrinsic load_deref (ssa_1281) (0) /* access=0 */ vec3 32 ssa_1283 = vec3 ssa_1282.x, ssa_1282.x, ssa_1282.x vec1 32 ssa_1284 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1285 = intrinsic load_deref (ssa_1284) (0) /* access=0 */ vec3 32 ssa_1286 = vec3 ssa_1285.y, ssa_1285.z, ssa_1285.w vec3 32 ssa_1287 = fmul ssa_1283, ssa_1286 vec1 32 ssa_1288 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1289 = intrinsic load_deref (ssa_1288) (0) /* access=0 */ vec4 32 ssa_1290 = vec4 ssa_1289.x, ssa_1287.x, ssa_1287.y, ssa_1287.z vec1 32 ssa_1291 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1291, ssa_1290) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1292 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1293 = intrinsic load_deref (ssa_1292) (0), 0vec3 32 ssa_1164 = vec3 ssa_1163.x, ssa_1163.y, ssa_1163 */) /* vec1 32 ssa_1053 = intrinsic load_ssbo (ssa_1048, ssa_1052 /*. /*ssa_821 align_offset=0 */ access=0 */ vec1 32 ssa_1294) ( vec1 32 ssa_1649 = load_const (0x00000008 /* 0.000000 */) z wrmask=xy = imov ssa_1293.y vec1 32 ssa_1295 = deref_var &vec1 32 ssa_1650 = iadd ssa_1642, ssa_1649 vec1 32 ssa_1651 = intrinsic load_ubo ( ssa_1639vec1 32 ssa_1165 = fdot3 ssa_1161, zw */ /* access=0r1 vec1 32 ssa_823 = intrinsic load_ubo (ssa_814, ssa_822) (4 */ (shader_temp vec4) , 0, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_824 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_825 = iadd ssa_817, ssa_824 ssa_1164 ssa_1650, ) (4, 0 vec4 32 ssa_12964 ) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1652 = , vec1 32 ssa_826 = intrinsic load_ubo (vec1 32 ssa_1166 = deref_var &0o6 (shader_out vec4) ssa_814, ssa_825) (4, 0) /* align_mul=4intrinsic load_deref (ssa_1295) (0) /* access=0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1167 = intrinsic load_deref (ssa_1166) (0) */ /* */ vec1 32 ssa_1297 = imov align_offset=0 */ vec3 32 ssa_827 = vec3 ssa_820vec1 32 ssa_912 = deref_var &r16 = vec3, /* access=0 */ ssa_1645, ssa_1648, ssa_1651 vec1 32 ssa_1653 = fdot3 ssa_1637, ssa_1652 vec4 32 ssa_1168 = vec4ssa_1296 (shader_temp vec4) vec1 32 ssa_1054ssa_823, ssa_826 vec1 32 ssa_828 = fdot3 ssa_812, ssa_827 vec1 32 ssa_829 = deref_var &r9 (function_temp vec4) vec4 32 ssa_830 = intrinsic load_deref (ssa_829) (0) /* vec4 32 ssa_913 = intrinsic load_deref (ssa_912) (0) /* access=0 */ vec3 32 ssa_914 = vec3 ssa_913.x, ssa_913.y, ssa_913.z vec1 32 ssa_915 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_916 = intrinsic vulkan_resource_index (ssa_915) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_917 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_918 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_919 = iadd ssa_917, ssa_918 vec1 32 ssa_920 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_921 = iadd ssa_919, ssa_920 vec1 32 ssa_922 = intrinsic load_ubo (ssa_916, ssa_921) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_923 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_924 = iadd ssa_919, ssa_923 vec1 32 ssa_925 = intrinsic load_ubo (ssa_916, ssa_924) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_926 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_927 = iadd ssa_919, ssa_926 vec1 32 ssa_928 = intrinsic load_ubo (ssa_916, ssa_927) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_929 = vec3 ssa_922, ssa_925, ssa_928 vec1 32 ssa_930 = fdot3 ssa_914, ssa_929 vec1 32 ssa_931 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_932 = intrinsic load_deref (ssa_931) (0) /* access=0 */ vec4 32 ssa_933 = vec4 ssa_932.x, ssa_930, ssa_932.z, ssa_932.w vec1 32 ssa_934 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_934, ssa_933) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_935 = deref_var &r17 (shader_temp vec4) vec4 32 ssa_936 = intrinsic load_deref (ssa_935) (0) /* access=0 */ vec3 32 ssa_937 = vec3 ssa_936.x, ssa_936.y, ssa_936.z vec1 32 ssa_938 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_939 = intrinsic vulkan_resource_index (ssa_938) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_940 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_941 = load_const (0x00000230 /* 0.000000 */) vec1 32 ssa_942 = iadd ssa_940, ssa_941 vec1 32 ssa_943 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_944 = iadd ssa_942, ssa_943 vec1 32 ssa_945 = intrinsic load_ubo (ssa_939, ssa_944) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_946 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_947 = iadd ssa_942, ssa_946 vec1 32 ssa_948 = intrinsic load_ubo (ssa_939, ssa_947) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_949 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_950 = iadd ssa_942, ssa_949 vec1 32 ssa_951 = intrinsic load_ubo (ssa_939, ssa_950) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_952 = vec3 ssa_945, ssa_948, ssa_951 vec1 32 ssa_953 = fdot3 ssa_937, ssa_952 vec1 32 ssa_954 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_955 = intrinsic load_deref (ssa_954) (0) /* access=0 */ vec4 32 ssa_956 = vec4 ssa_955.x, ssa_955.y, ssa_953, ssa_955.w vec1 32 ssa_957 = deref_var &r14 (shader_temp vec4) intrinsic store_deref (ssa_957, ssa_956) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_958 = deref_var &r17 (shader_temp vec4) vec4 32 ssa_959 = intrinsic load_deref (ssa_958) (0) /* access=0 */ vec3 32 ssa_960 = vec3 ssa_959.x, ssa_959.y, ssa_959.z vec1 32 ssa_961 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_962 = intrinsic vulkan_resource_index (ssa_961) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_963 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_964 = load_const (0x00000240 /* 0.000000 */) vec1 32 ssa_965 = iadd ssa_963, ssa_964 vec1 32 ssa_966 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_967 = iadd ssa_965, ssa_966 vec1 32 ssa_968 = intrinsic load_ubo (ssa_962, ssa_967) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_969 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_970 = iadd ssa_965, ssa_969 vec1 32 ssa_971 = intrinsic load_ubo (ssa_962, ssa_970) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_972 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_973 = iadd ssa_965, ssa_972 vec1 32 ssa_974 = intrinsic load_ubo (ssa_962, ssa_973) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_975 = vec3 ssa_968, ssa_971, ssa_974 vec1 32 ssa_976 = fdot3 ssa_960, ssa_975 vec1 32 ssa_977 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_978 = intrinsic load_deref (ssa_977) (0) /* access=0 */ vec4 32 ssa_979 = vec4 ssa_978.x, ssa_978.y, ssa_976, ssa_978.w vec1 32 ssa_980 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_980, ssa_979) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_981 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_982 = intrinsic load_deref (ssa_981) (0) /* access=0 */ vec1 32 ssa_983 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_984 = intrinsic load_deref (ssa_983) (0) /* access=0 */ vec1 32 ssa_985 = fdot4 ssa_982, ssa_984 vec1 32 ssa_986 = deref_var &o0 (shader_out vec4) vec4 32 ssa_987 = intrinsic load_deref (ssa_986) (0) /* access=0 */ vec4 32 ssa_988 = vec4 ssa_987.x, ssa_987.y, ssa_987.z, ssa_985 vec1 32 ssa_989 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_989, ssa_988) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_990 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_991 = intrinsic load_deref (ssa_990) (0) /* access=0 */ vec1 32 ssa_992 = deref_var &r14 (shader_temp vec4) vec4 32 ssa_993 = intrinsic load_deref (ssa_992) (0) /* access=0 */ vec1 32 ssa_994 = fdot4 ssa_991, ssa_993 vec1 32 ssa_995 = deref_var &o0 (shader_out vec4) vec4 32 ssa_996 = intrinsic load_deref (ssa_995) (0) /* access=0 */ vec4 32 ssa_997 = vec4 ssa_996.x, ssa_996.y, ssa_994, ssa_996.w vec1 32 ssa_998 = deref_var &o0 (shader_out vec4) intrinsic store_deref (ssa_998, ssa_997) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_999 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1000 = intrinsic load_deref (ssa_999) (0) /* access=0 */ vec1 32 ssa_1001 = imov ssa_1000.z vec1 32 ssa_1002 = ushr ssa_66, ssa_65 vec1 32 ssa_1003 = imul ssa_1001, ssa_64 vec1 32 ssa_1004 = iadd ssa_1003, ssa_1002 vec1 32 ssa_1005 = iadd ssa_1004, ssa_63 vec1 32 ssa_1006 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1007 = intrinsic vulkan_resource_index (ssa_1006) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1008 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1009 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1010 = ishl ssa_1005, ssa_1009 vec1 32 ssa_1011 = iadd ssa_1008, ssa_1010 vec1 32 ssa_1012 = intrinsic load_ssbo (ssa_1007, ssa_1011) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1013 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1014 = intrinsic load_deref (ssa_1013) (0) /* access=0 */ vec4 32 ssa_1015 = vec4 ssa_1014.x, ssa_1012, ssa_1014.z, ssa_1014.w vec1 32 ssa_1016 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_1016, ssa_1015) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1017 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1018 = intrinsic load_deref (ssa_1017) (0) /* access=0 */ vec1 32 ssa_1019 = imov ssa_1018.y vec1 32 ssa_1020 = ishl ssa_1019, ssa_62 vec1 32 ssa_1021 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1022 = intrinsic load_deref (ssa_1021) (0) /* access=0 */ vec4 32 ssa_1023 = vec4 ssa_1020, ssa_1022.y, ssa_1022.z, ssa_1022.w vec1 32 ssa_1024 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_1024, ssa_1023) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1025 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1026 = intrinsic load_deref (ssa_1025) (0) /* access=0 */ vec2 32 ssa_1027 = vec2 ssa_1026.x, ssa_1026.y vec2 32 ssa_1028 = ishr ssa_1027, ssa_61 vec1 32 ssa_1029 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1030 = intrinsic load_deref (ssa_1029) (0) /* access=0 */ vec4 32 ssa_1031 = vec4 ssa_1028.x, ssa_1030.y, ssa_1028.y, ssa_1030.w vec1 32 ssa_1032 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1032, ssa_1031) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1033 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1034 = intrinsic load_deref (ssa_1033) (0) /* access=0 */ vec2 32 ssa_1035 = vec2 ssa_1034.x, ssa_1034.z vec2 32 ssa_1036 = i2f32 ssa_1035 vec1 32 ssa_1037 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1038 = intrinsic load_deref (ssa_1037) (0) /* access=0 */ vec4 32 ssa_1039 = vec4 ssa_1036.x, ssa_1038.y, ssa_1036.y, ssa_1038.w vec1 32 ssa_1040 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1040, ssa_1039) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1041 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1042 = intrinsic load_deref (ssa_1041) (0) /* access=0 */ vec2 32 ssa_1043 = vec2 ssa_1042.x, ssa_1042.z vec2 32 ssa_1044 = fmul ssa_1043, ssa_60 vec1 32 ssa_1045 = deref_var &o1 (shader_out vec4) vec4 32 ssa_1046 = intrinsic load_deref (ssa_1045) (0) /* access=0 */ vec4 32 ssa_1047 = vec4 ssa_1044.x, ssa_1044.y, ssa_1046.z, ssa_1046.w vec1 32 ssa_1048 = deref_var &o1 (shader_out vec4) intrinsic store_deref (ssa_1048, ssa_1047) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1049 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1050 = intrinsic load_deref (ssa_1049) (0) /* access=0 */ vec1 32 ssa_1051 = imov ssa_1050.w vec1 32 ssa_1052 = ushr ssa_59, ssa_58 vec1 32 ssa_1053 = imul ssa_1051, ssa_57 vec1 32 ssa_1054 = iadd ssa_1053, ssa_1052 vec1 32 ssa_1055 = iadd ssa_1054, ssa_56 vec1 32 ssa_1056 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1057 = intrinsic vulkan_resource_index (ssa_1056) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1058 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1059 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1060 = ishl ssa_1055, ssa_1059 vec1 32 ssa_1061 = iadd ssa_1058, ssa_1060 vec1 32 ssa_1062 = intrinsic load_ssbo (ssa_1057, ssa_1061) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1063 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1064 = intrinsic load_deref (ssa_1063) (0) /* access=0 */ vec4 32 ssa_1065 = vec4 ssa_1064.x, ssa_1064.y, ssa_1064.z, ssa_1062 vec1 32 ssa_1066 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1066, ssa_1065) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1067 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1068 = intrinsic load_deref (ssa_1067) (0) /* access=0 */ vec1 32 ssa_1069 = imov ssa_1068.y vec1 32 ssa_1070 = ushr ssa_55, ssa_54 vec1 32 ssa_1071 = imul ssa_1069, ssa_53 vec1 32 ssa_1072 = iadd ssa_1071, ssa_1070 vec1 32 ssa_1073 = iadd ssa_1072, ssa_52 vec1 32 ssa_1074 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1075 = intrinsic vulkan_resource_index (ssa_1074) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_1076 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1077 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1078 = ishl ssa_1073, ssa_1077 vec1 32 ssa_1079 = iadd ssa_1076, ssa_1078 vec1 32 ssa_1080 = intrinsic load_ssbo (ssa_1075, ssa_1079) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1081 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1082 = intrinsic load_deref (ssa_1081) (0) /* access=0 */ vec4 32 ssa_1083 = vec4 ssa_1080, ssa_1082.y, ssa_1082.z, ssa_1082.w vec1 32 ssa_1084 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1084, ssa_1083) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1085 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1086 = intrinsic load_deref (ssa_1085) (0) /* access=0 */ vec1 32 ssa_1087 = imov ssa_1086.x vec1 32 ssa_1088 = ishl ssa_1087, ssa_51 vec1 32 ssa_1089 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1090 = intrinsic load_deref (ssa_1089) (0) /* access=0 */ vec4 32 ssa_1091 = vec4 ssa_1090.x, ssa_1090.y, ssa_1088, ssa_1090.w vec1 32 ssa_1092 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1092, ssa_1091) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1093 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1094 = intrinsic load_deref (ssa_1093) (0) /* access=0 */ vec1 32 ssa_1095 = imov ssa_1094.w vec1 32 ssa_1096 = ishl ssa_1095, ssa_50 vec1 32 ssa_1097 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1098 = intrinsic load_deref (ssa_1097) (0) /* access=0 */ vec4 32 ssa_1099 = vec4 ssa_1098.x, ssa_1098.y, ssa_1096, ssa_1098.w vec1 32 ssa_1100 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1100, ssa_1099) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1101 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1102 = intrinsic load_deref (ssa_1101) (0) /* access=0 */ vec2 32 ssa_1103 = vec2 ssa_1102.z, ssa_1102.w vec2 32 ssa_1104 = ishr ssa_1103, ssa_49 vec1 32 ssa_1105 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1106 = intrinsic load_deref (ssa_1105) (0) /* access=0 */ vec4 32 ssa_1107 = vec4 ssa_1104.x, ssa_1106.y, ssa_1104.y, ssa_1106.w vec1 32 ssa_1108 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1108, ssa_1107) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1109 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1110 = intrinsic load_deref (ssa_1109) (0) /* access=0 */ vec2 32 ssa_1111 = vec2 ssa_1110.x, ssa_1110.z vec2 32 ssa_1112 = i2f32 ssa_1111 vec1 32 ssa_1113 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1114 = intrinsic load_deref (ssa_1113) (0) /* access=0 */ vec4 32 ssa_1115 = vec4 ssa_1112.x, ssa_1114.y, ssa_1112.y, ssa_1114.w vec1 32 ssa_1116 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1116, ssa_1115) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1117 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1118 = intrinsic load_deref (ssa_1117) (0) /* access=0 */ vec2 32 ssa_1119 = vec2 ssa_1118.x, ssa_1118.z vec2 32 ssa_1120 = fmul ssa_1119, ssa_48 vec1 32 ssa_1121 = deref_var &o1 (shader_out vec4) vec4 32 ssa_1122 = intrinsic load_deref (ssa_1121) (0) /* access=0 */ vec4 32 ssa_1123 = vec4 ssa_1122.x, ssa_1122.y, ssa_1120.x, ssa_1120.y vec1 32 ssa_1124 = deref_var &o1 (shader_out vec4) intrinsic store_deref (ssa_1124, ssa_1123) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1125 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1126 = intrinsic load_deref (ssa_1125) (0) /* access=0 */ vec1 32 ssa_1127 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1128 = intrinsic load_deref (ssa_1127) (0) /* access=0 */ vec1 32 ssa_1129 = fdot4 ssa_1126, ssa_1128 vec1 32 ssa_1130 = deref_var &o2 (shader_out vec4) vec4 32 ssa_1131 = intrinsic load_deref (ssa_1130) (0) /* access=0 */ vec4 32 ssa_1132 = vec4 ssa_1129, ssa_1131.y, ssa_1131.z, ssa_1131.w vec1 32 ssa_1133 = deref_var &o2 (shader_out vec4) intrinsic store_deref (ssa_1133, ssa_1132) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1134 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1135 = intrinsic load_deref (ssa_1134) (0) /* access=0 */ vec1 32 ssa_1136 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_1137 = intrinsic load_deref (ssa_1136) (0) access=0 */.w ssa_1165vec1 32 ssa_1654 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1055 = intrinsic load_deref (ssa_1054) (0) /* access=0 */ vec4 32 ssa_1056 = vec4 ssa_1053, ssa_1055.y, ssa_1055.z, ssa_1055.w vec1 32 ssa_1057 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1057, ssa_1056) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1058 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1059 = intrinsic load_deref (ssa_1058) (0) /* access=0 */ vec1 32 ssa_1060 = imov ssa_1059.x vec1 32 ssa_1061 = ishl ssa_1060, ssa_70 vec1 32 ssa_1062 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1063 = intrinsic load_deref (ssa_1062) (0) /* access=0 */ vec4 32 ssa_1064 = vec4 ssa_1063.x, ssa_1063.y, ssa_1061, ssa_1063.w vec1 32 ssa_1065 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1065, ssa_1064) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1066 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1067 = intrinsic load_deref (ssa_1066) (0) /* access=0 */ vec1 32 ssa_1068 = imov ssa_1067.w vec1 32 ssa_1069 = ishl ssa_1068, ssa_69 vec1 32 ssa_1070 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1071 = intrinsic load_deref (ssa_1070) (0) /* access=0 */ vec4 32 ssa_1072 = vec4 ssa_1071.x, ssa_1071.y, ssa_1069, ssa_1071.w vec1 32 ssa_1073 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1073, ssa_1072) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1074 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1075 = intrinsic load_deref (ssa_1074) (0) /* access=0 */ vec2 32 ssa_1076 = vec2 ssa_1075.z, ssa_1075.w vec2 32 ssa_1077 = ishr ssa_1076, ssa_2346 vec1 32 ssa_1078 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1079 = intrinsic load_deref (ssa_1078) (0) /* access=0 */ vec4 32 ssa_1080 = vec4 ssa_1077.x, ssa_1079.y, ssa_1077.y, ssa_1079.w vec1 32 ssa_1081 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1081, ssa_1080) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1082 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1083 = intrinsic load_deref (ssa_1082) (0) /* access=0 */ vec2 32 ssa_1084 = vec2 ssa_1083.x, ssa_1083.z vec2 32 ssa_1085 = i2f32 ssa_1084 vec1 32 ssa_1086 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1087 = intrinsic load_deref (ssa_1086) (0) /* access=0 */ vec4 32 ssa_1088 = vec4 ssa_1085.x, ssa_1087.y, ssa_1085.y, ssa_1087.w vec1 32 ssa_1089 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1089, ssa_1088) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1090 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1091 = intrinsic load_deref (ssa_1090) (0) /* access=0 */ vec2 32 ssa_1092 = vec2 ssa_1091.x, ssa_1091.z vec2 32 ssa_1093 = fmul ssa_1092, ssa_2343 vec1 32 ssa_1094 = deref_var &out@o1-temp (function_temp vec4) vec4 32 ssa_1095 = intrinsic load_deref (ssa_1094) (0) /* access=0 */ vec4 32 ssa_1096 = vec4 ssa_1095.x, ssa_1095.y, ssa_1093.x, ssa_1093.y vec1 32 ssa_1097 = deref_var &out@o1-temp (function_temp vec4) intrinsic store_deref (ssa_1097, ssa_1096) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1098 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1099 = intrinsic load_deref (ssa_1098) (0) /* access=0 */ vec1 32 ssa_1100 = imov ssa_1099.w vec1 32 ssa_1101 = ushr ssa_66, ssa_65 vec1 32 ssa_1102 = imul ssa_1100, ssa_64 vec1 32 ssa_1103 = iadd ssa_1102, ssa_1101 vec1 32 ssa_1104 = iadd ssa_1103, ssa_63 vec1 32 ssa_1105 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1106 = intrinsic vulkan_resource_index (ssa_1105) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1107 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1108 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1109 = ishl ssa_1104, ssa_1108 vec1 32 ssa_1110 = iadd ssa_1107, ssa_1109 vec1 32 ssa_1111 = intrinsic load_ssbo (ssa_1106, ssa_1110) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1112 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1113 = intrinsic load_deref (ssa_1112) (0) /* access=0 */ vec4 32 ssa_1114 = vec4 ssa_1111, ssa_1113.y, ssa_1113.z, ssa_1113.w vec1 32 ssa_1115 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1115, ssa_1114) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1116 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1117 = intrinsic load_deref (ssa_1116) (0) /* access=0 */ vec3 32 ssa_1118 = vec3 ssa_1117.x, ssa_1117.x, ssa_1117.x vec3 32 ssa_1119 = ushr ssa_1118, ssa_2340 vec1 32 ssa_1120 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1121 = intrinsic load_deref (ssa_1120) (0) /* access=0 */ vec4 32 ssa_1122 = vec4 ssa_1121.x, ssa_1119.x, ssa_1119.y, ssa_1119.z vec1 32 ssa_1123 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1123, ssa_1122) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1124 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1125 = intrinsic load_deref (ssa_1124) (0) /* access=0 */ vec4 32 ssa_1126 = iand ssa_1125, ssa_2336 vec1 32 ssa_1127 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1127, ssa_1126) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1128 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1129 = intrinsic load_deref (ssa_1128) (0) /* access=0 */ vec4 32 ssa_1130 = u2f32 ssa_1129 vec1 32 ssa_1131 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1131, ssa_1130) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1132 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1133 = intrinsic load_deref (ssa_1132) (0) /* access=0 */ vec4 32 ssa_1134 = fmul ssa_1133, ssa_2331 vec1 32 ssa_1135 = deref_var &out@o2-temp (function_temp vec4) intrinsic store_deref (ssa_1135, ssa_1134) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1136 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1137 = intrinsic load_deref (ssa_1136) (0) /* access=0 */ vec1 32 ssa_1138 = imov ssa_1137.w vec1 32 ssa_1139 = iadd ssa_1138, ssa_59 vec1 32 ssa_1140 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1141 = intrinsic load_deref (ssa_1140) (0) /* access=0 */ vec4 32 ssa_1142 = vec4 ssa_1139, ssa_1141.y, ssa_1141.z, ssa_1141.w vec1 32 ssa_1143 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1143, ssa_1142) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1144 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1145 = intrinsic load_deref (ssa_1144) (0) /* access=0 */ vec1 32 ssa_1146 = imov ssa_1145.y vec1 32 ssa_1147 = ushr ssa_58, ssa_57 vec1 32 ssa_1148 = imul ssa_1146, ssa_56 vec1 32 ssa_1149 = iadd ssa_1148, ssa_1147 vec1 32 ssa_1150 = iadd ssa_1149, ssa_55 vec1 32 ssa_1151 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1152 = intrinsic vulkan_resource_index (ssa_1151) (0, 4, 7) /* desc-set=0 */ /* binding=4 */ /* desc_type=SSBO */ vec1 32 ssa_1153 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1154 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1155 = ishl ssa_1150, ssa_1154 vec1 32 ssa_1156 = iadd ssa_1153, ssa_1155 vec1 32 ssa_1157 = intrinsic load_ssbo (ssa_1152, ssa_1156) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1158 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1159 = intrinsic load_deref (ssa_1158) (0) /* access=0 */ vec4 32 ssa_1160 = vec4 ssa_1159.x, ssa_1157, ssa_1159.z, ssa_1159.w vec1 32 ssa_1161 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1161, ssa_1160) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1162 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1163 = intrinsic load_deref (ssa_1162) (0) /* access=0 */ vec1 32 ssa_1164 = imov ssa_1163.x vec1 32 ssa_1165 = ushr ssa_54, ssa_53 vec1 32 ssa_1166 = imul ssa_1164, ssa_52 vec1 32 ssa_1167 = iadd ssa_1166, ssa_1165 vec1 32 ssa_1168 = iadd ssa_1167, ssa_51 vec1 32 ssa_1169 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1170 = intrinsic vulkan_resource_index (ssa_1169) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1171 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1172 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1173 = ishl ssa_1168, ssa_1172 vec1 32 ssa_1174 = iadd ssa_1171, ssa_1173 vec1 32 ssa_1175 = intrinsic load_ssbo (ssa_1170, ssa_1174) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1176 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1177 = intrinsic load_deref (ssa_1176) (0) /* access=0 */ vec4 32 ssa_1178 = vec4 ssa_1175, ssa_1177.y, ssa_1177.z, ssa_1177.w vec1 32 ssa_1179 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1179, ssa_1178) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1180 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1181 = intrinsic load_deref (ssa_1180) (0) /* access=0 */ vec3 32 ssa_1182 = vec3 ssa_1181.x, ssa_1181.x, ssa_1181.x vec3 32 ssa_1183 = ushr ssa_1182, ssa_2326 vec1 32 ssa_1184 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1185 = intrinsic load_deref (ssa_1184) (0) /* access=0 */ vec4 32 ssa_1186 = vec4 ssa_1185.x, ssa_1183.x, ssa_1183.y, ssa_1183.z vec1 32 ssa_1187 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1187, ssa_1186) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1188 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1189 = intrinsic load_deref (ssa_1188) (0) /* access=0 */ vec4 32 ssa_1190 = iand ssa_1189, ssa_2322 vec1 32 ssa_1191 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1191, ssa_1190) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1192 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1193 = intrinsic load_deref (ssa_1192) (0) /* access=0 */ vec4 32 ssa_1194 = u2f32 ssa_1193 vec1 32 ssa_1195 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1195, ssa_1194) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1196 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1197 = intrinsic load_deref (ssa_1196) (0) /* access=0 */ vec4 32 ssa_1198 = fmul ssa_1197, ssa_2317 vec1 32 ssa_1199 = deref_var &out@o3-temp (function_temp vec4) intrinsic store_deref (ssa_1199, ssa_1198) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1200 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1201 = intrinsic load_deref (ssa_1200) (0) /* access=0 */ vec1 32 ssa_1202 = deref_var &r6 (function_temp vec4) vec4 32 ssa_1203 = intrinsic load_deref (ssa_1202) (0) /* access=0 */ vec1 32 ssa_1204 = fdot4 ssa_1201, ssa_1203 vec1 32 ssa_1205 = deref_var &out@o4-temp (function_temp vec4) vec4 32 ssa_1206 = intrinsic load_deref (ssa_1205) (0) /* access=0 */ vec4 32 ssa_1207 = vec4 ssa_1204, ssa_1206.y, ssa_1206.z, ssa_1206.w vec1 32 ssa_1208 = deref_var &out@o4-temp (function_temp vec4) intrinsic store_deref (ssa_1208, ssa_1207) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1209 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1210 = intrinsic load_deref (ssa_1209) (0) /* access=0 */ vec1 32 ssa_1211 = deref_var &r10 (function_temp vec4) vec4 32 ssa_1212 = intrinsic load_deref (ssa_1211) (0) /* access=0 */ vec1 32 ssa_1213 = fdot4 ssa_1210, ssa_1212 vec1 32 ssa_1214 = deref_var &out@o4-temp (function_temp vec4) vec4 32 ssa_1215 = intrinsic load_deref (ssa_1214) (0) /* access=0 */ vec4 32 ssa_1216 = vec4 ssa_1215.x, ssa_1213, ssa_1215.z, ssa_1215.w vec1 32 ssa_1217 = deref_var &out@o4-temp (function_temp vec4) intrinsic store_deref (ssa_1217, ssa_1216) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1218 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1219 = intrinsic load_deref (ssa_1218) (0) /* access=0 */ vec1 32 ssa_1220 = deref_var &r11 (function_temp vec4) vec4 32 ssa_1221 = intrinsic load_deref (ssa_1220) (0) /* access=0 */ vec1 32 ssa_1222 = fdot4 ssa_1219, ssa_1221 vec1 32 ssa_1223 = deref_var &out@o4-temp (function_temp vec4) vec4 32 ssa_1224 = intrinsic load_deref (ssa_1223) (0) /* access=0 */ vec4 32 ssa_1225 = vec4 ssa_1224.x, ssa_1224.y, ssa_1222, ssa_1224.w vec1 32 ssa_1226 = deref_var &out@o4-temp (function_temp vec4) intrinsic store_deref (ssa_1226, ssa_1225) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1227 = deref_var &out@o4-temp (function_temp vec4) vec4 32 ssa_1228 = intrinsic load_deref (ssa_1227) (0) /* access=0 */ vec4 32 ssa_1229 = vec4 ssa_1228.x, ssa_1228.y, ssa_1228.z, ssa_47 vec1 32 ssa_1230 = deref_var &out@o4-temp (function_temp vec4) intrinsic store_deref (ssa_1230, ssa_1229) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1231 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1232 = intrinsic load_deref (ssa_1231) (0) /* access=0 */ vec3 32 ssa_1233 = vec3 ssa_1232.x, ssa_1232.x, ssa_1232.x vec3 32 ssa_1234 = ushr ssa_1233, ssa_2312 vec1 32 ssa_1235 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1236 = intrinsic load_deref (ssa_1235) (0) /* access=0 */ vec4 32 ssa_1237 = vec4 ssa_1236.x, ssa_1234.x, ssa_1234.y, ssa_1234.z vec1 32 ssa_1238 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1238, ssa_1237) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1239 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1240 = intrinsic load_deref (ssa_1239) (0) /* access=0 */ vec4 32 ssa_1241 = iand ssa_1240, ssa_2308 vec1 32 ssa_1242 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1242, ssa_1241) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1243 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1244 = intrinsic load_deref (ssa_1243) (0) /* access=0 */ vec4 32 ssa_1245 = u2f32 ssa_1244 vec1 32 ssa_1246 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1246, ssa_1245) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1247 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1248 = intrinsic load_deref (ssa_1247) (0) /* access=0 */ vec4 32 ssa_1249 = ffma ssa_1248, ssa_2303, ssa_2298 vec1 32 ssa_1250 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1250, ssa_1249) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1251 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1252 = intrinsic load_deref (ssa_1251) (0) /* access=0 */ vec4 32 ssa_1253 = fmin ssa_1252, ssa_2293 vec1 32 ssa_1254 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1254, ssa_1253) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1255 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1256 = intrinsic load_deref (ssa_1255) (0) /* access=0 */ vec2 32 ssa_1257 = vec2 ssa_1256.y, ssa_1256.w vec1 32 ssa_1258 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1259 = intrinsic load_deref (ssa_1258) (0) /* access=0 */ vec2 32 ssa_1260 = vec2 ssa_1259.x, ssa_1259.z vec2 32 ssa_1261 = fadd ssa_1257, ssa_1260 vec1 32 ssa_1262 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1263 = intrinsic load_deref (ssa_1262) (0) /* access=0 */ vec4 32 ssa_1264 = vec4 ssa_1261.x, ssa_1261.y, ssa_1263.z, ssa_1263.w vec1 32 ssa_1265 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1265, ssa_1264) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1266 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1267 = intrinsic load_deref (ssa_1266) (0) /* access=0 */ vec2 32 ssa_1268 = vec2 ssa_1267.y, ssa_1267.w vec2 32 ssa_1269 = fneg ssa_1268 vec1 32 ssa_1270 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1271 = intrinsic load_deref (ssa_1270) (0) /* access=0 */ vec2 32 ssa_1272 = vec2 ssa_1271.x, ssa_1271.z vec2 32 ssa_1273 = fadd ssa_1269, ssa_1272 vec1 32 ssa_1274 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1275 = intrinsic load_deref (ssa_1274) (0) /* access=0 */ vec4 32 ssa_1276 = vec4 ssa_1275.x, ssa_1275.y, ssa_1273.x, ssa_1273.y vec1 32 ssa_1277 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1277, ssa_1276) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1278 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1279 = intrinsic load_deref (ssa_1278) (0) /* access=0 */ vec2 32 ssa_1280 = vec2 ssa_1279.y, ssa_1279.w vec2 32 ssa_1281 = fmul ssa_1280, ssa_2288 vec1 32 ssa_1282 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1283 = intrinsic load_deref (ssa_1282) (0) /* access=0 */ vec4 32 ssa_1284 = vec4 ssa_1283.x, ssa_1281.x, ssa_1281.y, ssa_1283.w vec1 32 ssa_1285 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1285, ssa_1284) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1286 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1287 = intrinsic load_deref (ssa_1286) (0) /* access=0 */ vec2 32 ssa_1288 = vec2 ssa_1287.x, ssa_1287.z vec2 32 ssa_1289 = fmul ssa_1288, ssa_2285 vec1 32 ssa_1290 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1291 = intrinsic load_deref (ssa_1290) (0) /* access=0 */ vec4 32 ssa_1292 = vec4 ssa_1289.x, ssa_1289.y, ssa_1291.z, ssa_1291.w vec1 32 ssa_1293 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1293, ssa_1292) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1294 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1295 = intrinsic load_deref (ssa_1294) (0) /* access=0 */ vec1 32 ssa_1296 = imov ssa_1295.y vec1 32 ssa_1297 = fabs ssa_1296 vec1 32 ssa_1298 = fneg ssa_1297 vec1 32 ssa_1299 = fadd ssa_1298, ssa_39 vec1 32 ssa_1300 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1301 = intrinsic load_deref (ssa_1300) (0) /* access=0 */ vec4 32 ssa_1302 = vec4 ssa_1299, ssa_1301.y, ssa_1301.z, ssa_1301.w vec1 32 ssa_1303 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1303, ssa_1302) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1304 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1305 = intrinsic load_deref (ssa_1304) (0) /* access=0 */ vec1 32 ssa_1306 = imov ssa_1305.z vec1 32 ssa_1307 = fabs ssa_1306 vec1 32 ssa_1308 = fneg ssa_1307 vec1 32 ssa_1309 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1310 = intrinsic load_deref (ssa_1309) (0) /* access=0 */ vec1 32 ssa_1311 = imov ssa_1310.x vec1 32 ssa_1312 = fadd ssa_1308, ssa_1311 vec1 32 ssa_1313 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1314 = intrinsic load_deref (ssa_1313) (0) /* access=0 */ vec4 32 ssa_1315 = vec4 ssa_1314.x, ssa_1314.y, ssa_1314.z, ssa_1312 vec1 32 ssa_1316 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1316, ssa_1315) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1317 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1318 = intrinsic load_deref (ssa_1317) (0) /* access=0 */ vec3 32 ssa_1319 = vec3 ssa_1318.y, ssa_1318.z, ssa_1318.w vec1 32 ssa_1320 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1321 = intrinsic load_deref (ssa_1320) (0) /* access=0 */ vec3 32 ssa_1322 = vec3 ssa_1321.y, ssa_1321.z, ssa_1321.w vec1 32 ssa_1323 = fdot3 ssa_1319, ssa_1322 vec1 32 ssa_1324 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1325 = intrinsic load_deref (ssa_1324) (0) /* access=0 */ vec4 32 ssa_1326 = vec4 ssa_1323, ssa_1325.y, ssa_1325.z, ssa_1325.w vec1 32 ssa_1327 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1327, ssa_1326) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1328 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1329 = intrinsic load_deref (ssa_1328) (0) /* access=0 */ vec1 32 ssa_1330 = imov ssa_1329.x vec1 32 ssa_1331 = frsq ssa_1330 vec1 32 ssa_1332 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1333 = intrinsic load_deref (ssa_1332) (0) /* access=0 */ vec4 32 ssa_1334 = vec4 ssa_1331, ssa_1333.y, ssa_1333.z, ssa_1333.w vec1 32 ssa_1335 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1335, ssa_1334) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1336 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1337 = intrinsic load_deref (ssa_1336) (0) /* access=0 */ vec3 32 ssa_1338 = vec3 ssa_1337.x, ssa_1337.x, ssa_1337.x vec1 32 ssa_1339 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1340 = intrinsic load_deref (ssa_1339) (0) /* access=0 */ vec3 32 ssa_1341 = vec3 ssa_1340.y, ssa_1340.z, ssa_1340.w vec3 32 ssa_1342 = fmul ssa_1338, ssa_1341 vec1 32 ssa_1343 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1344 = intrinsic load_deref (ssa_1343) (0) /* access=0 */ vec4 32 ssa_1345 = vec4 ssa_1344.x, ssa_1342.x, ssa_1342.y, ssa_1342.z vec1 32 ssa_1346 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1346, ssa_1345) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1347 = deref_var &r5 (function_temp vec4) vec4 32 ssa_1348 = intrinsic load_deref (ssa_1347) (0) /* access=0 */ vec1 32 ssa_1349 = imov ssa_1348.y vec1 32 ssa_1350 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1351 = intrinsic load_deref (ssa_1350) (0) /* access=0 */ vec1 32 ssa_1352 = imov ssa_1351.w vec1 32 ssa_1353 = fmul ssa_1349, ssa_1352 vec1 32 ssa_1354 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1355 = intrinsic load_deref (ssa_1354) (0) /* access=0 */ vec4 32 ssa_1356 = vec4 ssa_1353, ssa_1355.y, ssa_1355.z, ssa_1355.w vec1 32 ssa_1357 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1357, ssa_1356) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1358 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1359 = intrinsic load_deref (ssa_1358) (0) /* access=0 */ vec3 32 ssa_1360 = vec3 ssa_1359.y, ssa_1359.z, ssa_1359.x vec1 32 ssa_1361 = deref_var &r6 (function_temp vec4) vec4 32 ssa_1362 = intrinsic load_deref (ssa_1361) (0) /* access=0 */ vec3 32 ssa_1363 = vec3 ssa_1362.x, ssa_1362.y, ssa_1362.z vec1 32 ssa_1364 = fdot3 ssa_1360, ssa_1363 vec1 32 ssa_1365 = deref_var &out@o5-temp (function_temp vec4) = deref_var /* access=0 */ vec1 32 ssa_1138 = fdot4 ssa_1135, ssa_1137 vec1 32 ssa_1139 = deref_var &o2 (shader_out vec4) vec4 32 ssa_1140 = intrinsic load_deref (ssa_1139) (0) /* access=0 */ vec4 32 ssa_1141 = vec4 ssa_1140.x, ssa_1138, ssa_1140.z, ssa_1140.w vec1 32 ssa_1142 = deref_var &o2 (shader_out vec4) intrinsic store_deref (ssa_1142, ssa_1141) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1143 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1144 = intrinsic load_deref (ssa_1143) (0) /* access=0 */ vec1 32 ssa_1145 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_1146 = intrinsic load_deref (ssa_1145) (0) /* access=0 */ vec1 32 ssa_1147 = fdot4 ssa_1144, ssa_1146 vec1 32 ssa_1148 = deref_var &o2 (shader_out vec4) vec4 32 ssa_1149 = intrinsic load_deref (ssa_1148) (0) /* access=0 */ vec4 32 ssa_1150 = vec4 ssa_1149.x, ssa_1149.y, ssa_1147, ssa_1149.w vec1 32 ssa_1151 = deref_var &o2 (shader_out vec4) intrinsic store_deref (ssa_1151, ssa_1150) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1152 = deref_var &o2 (shader_out vec4) vec4 32 ssa_1153 = intrinsic load_deref (ssa_1152) (0) /* access=0 */ vec4 32 ssa_1154 = vec4 ssa_1153.x, ssa_1153.y, ssa_1153.z, ssa_47 vec1 32 ssa_1155 = deref_var &o2 (shader_out vec4) intrinsic store_deref (ssa_1155, ssa_1154) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1156 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1157 = intrinsic load_deref (ssa_1156) (0) /* access=0 */ vec3 32 ssa_1158 = vec3 ssa_1157.x, ssa_1157.x, ssa_1157.x vec3 32 ssa_1159 = ushr ssa_1158, ssa_46 vec1 32 ssa_1160 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1161 = intrinsic load_deref (ssa_1160) (0) /* access=0 */ vec4 32 ssa_1162 = vec4 ssa_1161.x, ssa_1159.x, ssa_1159.y, ssa_1159.z vec1 32 ssa_1163 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1163, ssa_1162) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1164 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1165 = intrinsic load_deref (ssa_1164) (0) /* access=0 */ vec4 32 ssa_1166 = iand ssa_1165, ssa_45 vec1 32 ssa_1167 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1167, ssa_1166) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1168 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1169 = intrinsic load_deref (ssa_1168) (0) /* access=0 */ vec4 32 ssa_1170 = u2f32 ssa_1169 vec1 32 ssa_1171 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1171, ssa_1170) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1172 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1173 = intrinsic load_deref (ssa_1172) (0) /* access=0 */ vec4 32 ssa_1174 = ffma ssa_1173, ssa_44, ssa_43 vec1 32 ssa_1175 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1175, ssa_1174) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1176 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1177 = intrinsic load_deref (ssa_1176) (0) /* access=0 */ vec4 32 ssa_1178 = fmin ssa_1177, ssa_42 vec1 32 ssa_1179 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1179, ssa_1178) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1180 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1181 = intrinsic load_deref (ssa_1180) (0) /* access=0 */ vec2 32 ssa_1182 = vec2 ssa_1181.y, ssa_1181.w vec1 32 ssa_1183 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1184 = intrinsic load_deref (ssa_1183) (0) /* access=0 */ vec2 32 ssa_1185 = vec2 ssa_1184.x, ssa_1184.z vec2 32 ssa_1186 = fadd ssa_1182, ssa_1185 vec1 32 ssa_1187 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1188 = intrinsic load_deref (ssa_1187) (0) /* access=0 */ vec4 32 ssa_1189 = vec4 ssa_1186.x, ssa_1186.y, ssa_1188.z, ssa_1188.w vec1 32 ssa_1190 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1190, ssa_1189) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1191 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1192 = intrinsic load_deref (ssa_1191) (0) /* access=0 */ vec2 32 ssa_1193 = vec2 ssa_1192.y, ssa_1192.w vec2 32 ssa_1194 = fneg ssa_1193 vec1 32 ssa_1195 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1196 = intrinsic load_deref (ssa_1195) (0) /* access=0 */ vec2 32 ssa_1197 = vec2 ssa_1196.x, ssa_1196.z vec2 32 ssa_1198 = fadd ssa_1194, ssa_1197 vec1 32 ssa_1199 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1200 = intrinsic load_deref (ssa_1199) (0) /* access=0 */ vec4 32 ssa_1201 = vec4 ssa_1200.x, ssa_1200.y, ssa_1198.x, ssa_1198.y vec1 32 ssa_1202 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1202, ssa_1201) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1203 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1204 = intrinsic load_deref (ssa_1203) (0) /* access=0 */ vec2 32 ssa_1205 = vec2 ssa_1204.y, ssa_1204.w vec2 32 ssa_1206 = fmul ssa_1205, ssa_41 vec1 32 ssa_1207 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1208 = intrinsic load_deref (ssa_1207) (0) /* access=0 */ vec4 32 ssa_1209 = vec4 ssa_1208.x, ssa_1206.x, ssa_1206.y, ssa_1208.w vec1 32 ssa_1210 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1210, ssa_1209) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1211 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1212 = intrinsic load_deref (ssa_1211) (0) /* access=0 */ vec2 32 ssa_1213 = vec2 ssa_1212.x, ssa_1212.z vec2 32 ssa_1214 = fmul ssa_1213, ssa_40 vec1 32 ssa_1215 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1216 = intrinsic load_deref (ssa_1215) (0) /* access=0 */ vec4 32 ssa_1217 = vec4 ssa_1214.x, ssa_1214.y, ssa_1216.z, ssa_1216.w vec1 32 ssa_1218 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1218, ssa_1217) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1219 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1220 = intrinsic load_deref (ssa_1219) (0) /* access=0 */ vec1 32 ssa_1221 = imov ssa_1220.y vec1 32 ssa_1222 = fabs ssa_1221 vec1 32 ssa_1223 = fneg ssa_1222 vec1 32 ssa_1224 = fadd ssa_1223, ssa_39 vec1 32 ssa_1225 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1226 = intrinsic load_deref (ssa_1225) (0) /* access=0 */ vec4 32 ssa_1227 = vec4 ssa_1224, ssa_1226.y, ssa_1226.z, ssa_1226.w vec1 32 ssa_1228 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1228, ssa_1227) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1229 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1230 = intrinsic load_deref (ssa_1229) (0) /* access=0 */ vec1 32 ssa_1231 = imov ssa_1230.z vec1 32 ssa_1232 = fabs ssa_1231 vec1 32 ssa_1233 = fneg ssa_1232 vec1 32 ssa_1234 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1235 = intrinsic load_deref (ssa_1234) (0) /* access=0 */ vec1 32 ssa_1236 = imov ssa_1235.x vec1 32 ssa_1237 = fadd ssa_1233, ssa_1236 vec1 32 ssa_1238 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1239 = intrinsic load_deref (ssa_1238) (0) /* access=0 */ vec4 32 ssa_1240 = vec4 ssa_1239.x, ssa_1239.y, ssa_1239.z, ssa_1237 vec1 32 ssa_1241 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1241, ssa_1240) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1242 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1243 = intrinsic load_deref (ssa_1242) (0) /* access=0 */ vec3 32 ssa_1244 = vec3 ssa_1243.y, ssa_1243.z, ssa_1243.w vec1 32 ssa_1245 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1246 = intrinsic load_deref (ssa_1245) (0) /* access=0 */ vec3 32 ssa_1247 = vec3 ssa_1246.y, ssa_1246.z, ssa_1246.w vec1 32 ssa_1248 = fdot3 ssa_1244, ssa_1247 vec1 32 ssa_1249 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1250 = intrinsic load_deref (ssa_1249) (0) /* access=0 */ vec4 32 ssa_1251 = vec4 ssa_1248, ssa_1250.y, ssa_1250.z, ssa_1250.w vec1 32 ssa_1252 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1252, ssa_1251) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1253 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1254 = intrinsic load_deref (ssa_1253) (0) /* access=0 */ vec1 32 ssa_1255 = imov ssa_1254.x vec1 32 ssa_1256 = frsq ssa_1255 vec1 32 ssa_1257 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1258 = intrinsic load_deref (ssa_1257) (0) /* access=0 */ vec4 32 ssa_1259 = vec4 ssa_1256, ssa_1258.y, ssa_1258.z, ssa_1258.w vec1 32 ssa_1260 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1260, ssa_1259) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1261 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1262 = intrinsic load_deref (ssa_1261) (0) /* access=0 */ vec3 32 ssa_1263 = vec3 ssa_1262.x, ssa_1262.x, ssa_1262.x vec1 32 ssa_1264 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1265 = intrinsic load_deref (ssa_1264) (0) /* access=0 */ vec3 32 ssa_1266 = vec3 ssa_1265.y, ssa_1265.z, ssa_1265.w vec3 32 ssa_1267 = fmul ssa_1263, ssa_1266 vec1 32 ssa_1268 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1366 = intrinsic load_deref (ssa_1365) (0) /* access=0 */ vec4 32 ssa_1367 = vec4 ssa_1364, ssa_1366.y, ssa_1366.z, ssa_1366.w vec1 32 ssa_1368 = deref_var &out@o5-temp (function_temp vec4) intrinsic store_deref (ssa_1368, ssa_1367) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1369 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1370 = intrinsic load_deref (ssa_1369) (0) /* access=0 */ vec3 32 ssa_1371 = vec3 ssa_1370.y, ssa_1370.z, ssa_1370.x vec1 32 ssa_1372 = deref_var &r10 (function_temp vec4) vec4 32 ssa_1373 = intrinsic load_deref (ssa_1372) (0) /* access=0 */ vec3 32 ssa_1374 = vec3 ssa_1373.x, ssa_1373.y, ssa_1373.z , ssa_1167.y, ssa_1167.z, ssa_1167.w vec1 32 ssa_1375 = fdot3 vec4 32 ssa_831 ssa_1371, ssa_1374 vec1 32 ssa_1376 = deref_var &out@o5-temp (function_temp vec4) vec4 32 ssa_1377 = intrinsic load_deref (ssa_1376) (0) /* access=0 */ vec4 32 ssa_1378 = vec4 ssa_1377.x, ssa_1375, ssa_1377.z, ssa_1377.w vec1 32 ssa_1379 = deref_var &out@o5-temp (function_temp vec4) intrinsic store_deref (ssa_1379, ssa_1378) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1380 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1381 = intrinsic load_deref (ssa_1380) (0) /* access=0 */ vec3 32 ssa_1382 = vec3 ssa_1381.y, ssa_1381.z, ssa_1381.x vec1 32 ssa_1383 = deref_var &r11 (function_temp vec4) vec4 32 ssa_1384 = intrinsic load_deref (ssa_1383) (0) /* access=0 */ vec3 32 ssa_1385 = vec3 ssa_1384.x, ssa_1384.y, ssa_1384.z vec1 32 ssa_1386 = fdot3 ssa_1382, ssa_1385 vec1 32 ssa_1387 = deref_var &out@o5-temp (function_temp vec4) vec4 32 ssa_1388 = intrinsic load_deref (ssa_1387) (0) /* access=0 */ vec4 32 ssa_1389 = vec4 ssa_1388.x, ssa_1388.y, ssa_1386, ssa_1388.w vec1 32 ssa_1390 = deref_var &out@o5-temp (function_temp vec4) intrinsic store_deref (ssa_1390, ssa_1389) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1391 = deref_var &out@o5-temp (function_temp vec4) vec4 32 ssa_1392 = intrinsic load_deref (ssa_1391) (0) /* access=0 */ vec4 32 ssa_1393 = vec4 ssa_1392.x, ssa_1392.y, ssa_1392.z, ssa_38 vec1 32 ssa_1394 = deref_var &out@o5-temp (function_temp vec4) intrinsic store_deref (ssa_1394, ssa_1393) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1395 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1396 = intrinsic load_deref (ssa_1395) (0) /* access=0 */ vec1 32 ssa_1397 = imov ssa_1396.x vec1 32 ssa_1398 = fabs ssa_1397 vec1 32 ssa_1399 = fneg ssa_1398 vec1 32 ssa_1298 = fmul ssa_1294, ssa_1297 vec1 32 ssa_1299 = deref_var & vec1 32 ssa_1169& = deref_var r1 = vec4 (shader_temp vec4) vec4 32 ssa_1269&o6 (shader_out vec4) vec4 32 ssa_1300 = intrinsic load_deref (ssa_1299) (0) /* access=0 */ vec4 32 ssa_1301 = vec4 ssa_1298, ssa_1300.y, ssa_1300.z, ssa_1300.w vec1 32 ssa_1400 = fadd = ssa_830.vec1 32 ssa_1302 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1302, ssa_1399ssa_1301) (15, 0)x /*r4 (shader_temp vec4) intrinsic load_deref (, intrinsic store_deref (ssa_1169, ssa_1168) (, ssa_1268) (0) /* access=0 */ vec4 32 ssa_1270 = vec4vec4 32 ssa_1655 wrmask=xyzw */ /* access=0 */ ssa_37 vec1 32 ssa_1401 = deref_var ssa_830.y, = intrinsic load_deref (ssa_1654) (0) /* access=0 */ vec4 32 ssa_1656 = vec4ssa_1269ssa_828, ssa_830.w vec1 32 ssa_1303 = deref_var .15, 0) /* wrmask=xx, ssa_1267 &&r1 (function_temp vec4) vec1 32 ssa_832 = deref_var &ssa_1653r9 (function_temp vec4) intrinsic store_deref (, vec4 32 ssa_1402r1 (shader_temp vec4) vec4 32 ssa_1304 = intrinsic load_deref (ssa_1303) (yzw */ /* access=0 */ vec1 32 ssa_1170 = deref_var &0)ssa_1655. = r1 (shader_temp vec4) ssa_832, ssa_831intrinsic load_deref (ssa_1401 vec4 32 ssa_1171x, ssa_1267.y. = y /* access=0 */ vec3 32 ssa_1305 = vec3 ssa_1304.y, intrinsic load_deref () (, ) (15, 0) /* wrmask=ssa_1655.ssa_1304.z, x, ssa_1267.z0) /* access=0 */ vec4 32 ssa_1403 = vec4 ssa_1402.xssa_1170) (0) /* access=0ssa_1304.x */ vec3 32 ssa_1172 = vec3 ssa_1171.x, ssa_1171.yz, vec1 32 ssa_1306 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1307 = , , ssa_1171ssa_1402ssa_1655intrinsic load_deref (ssa_1306.) (0).z vec1 32 ssa_1173 = deref_var &r6 (shader_temp vec4) .w vec1 32 ssa_1657 = deref_var yzw */vec1 32 ssa_1271 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1271, ssa_1270 /* access=0 /*y, ssa_1402.z, ssa_1400 vec1 32 ssa_1404 = deref_var */ access=0 */ &r4 (shader_temp vec4) intrinsic store_deref (ssa_1657, ssa_1656) (15 ) (15vec3 32 ssa_1308 = vec3 ssa_1307.x, ssa_1307.y&, r1vec4 32 ssa_1174 = , vec1 32 ssa_833 (function_temp vec4) intrinsic load_deref (0ssa_1173) (0) /*, ) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1658 = deref_var intrinsic store_deref (ssa_1404, ssa_1403) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1405 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1406 = intrinsic load_deref (ssa_1405) (0) /* access=0 */ vec1 32 ssa_1407 = imov ssa_1406.y vec1 32 ssa_1408 = fabs ssa_1407 vec1 32 ssa_1409 = fneg ssa_1408 vec1 32 ssa_1410 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1411 = intrinsic load_deref (ssa_1410) (0) /* access=0 */ vec1 32 ssa_1412 = imov ssa_1411.w vec1 32 ssa_1413 = fadd ssa_1409, ssa_1412 vec1 32 ssa_1414 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1415 = intrinsic load_deref (ssa_1414) (0) /* access=0 */ vec4 32 ssa_1416 = vec4 ssa_1415.x, ssa_1415.y, ssa_1413, ssa_1415.w vec1 32 ssa_1417 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1417, ssa_1416) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1418 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1419 = intrinsic load_deref (ssa_1418) (0) /* access=0 */ vec3 32 ssa_1420 = vec3 ssa_1419.x, ssa_1419.y, ssa_1419.z vec1 32 ssa_1421 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1422 = intrinsic load_deref (ssa_1421) (0) /* access=0 */ vec3 32 ssa_1423 = vec3 ssa_1422.x, ssa_1422.y, ssa_1422.z vec1 32 ssa_1424 = fdot3 ssa_1420, ssa_1423 vec1 32 ssa_1425 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1426 = intrinsic load_deref (ssa_1425) (0) /* access=0 */ vec4 32 ssa_1427 = vec4 ssa_1426.x, ssa_1426.y, ssa_1426.z, ssa_1424 vec1 32 ssa_1428 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1428, ssa_1427) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1429 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1430 = intrinsic load_deref (ssa_1429) (0) /* access=0 */ vec1 32 ssa_1431 = imov ssa_1430.w vec1 32 ssa_1432 = frsq ssa_1431 vec1 32 ssa_1433 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1434 = intrinsic load_deref (ssa_1433) (0) /* access=0 */ vec4 32 ssa_1435 = vec4 ssa_1434.x, ssa_1434.y, ssa_1434.z, ssa_1432 vec1 32 ssa_1436 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1436, ssa_1435) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1437 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1438 = intrinsic load_deref (ssa_1437) (0) /* access=0 */ vec3 32 ssa_1439 = vec3 ssa_1438.w, ssa_1438.w, ssa_1438.w vec1 32 ssa_1440 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1441 = intrinsic load_deref (ssa_1440) (0) /* access=0 */ vec3 32 ssa_1442 = vec3 ssa_1441.x, ssa_1441.y, ssa_1441.z vec3 32 ssa_1443 = fmul ssa_1439, ssa_1442 vec1 32 ssa_1444 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1445 = intrinsic load_deref (ssa_1444) (0) /* access=0 */ vec4 32 ssa_1446 = vec4 ssa_1443.x, ssa_1443.y, ssa_1443.z, ssa_1445.w vec1 32 ssa_1447 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1447, ssa_1446) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1448 = deref_var &r5 (function_temp vec4) vec4 32 ssa_1449 = intrinsic load_deref (ssa_1448) (0) /* access=0 */ vec1 32 ssa_1450 = imov ssa_1449.x vec1 32 ssa_1451 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1452 = intrinsic load_deref (ssa_1451) (0) /* access=0 */ vec1 32 ssa_1453 = imov ssa_1452.z vec1 32 ssa_1454 = fmul ssa_1450, ssa_1453 vec1 32 ssa_1455 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1456 = intrinsic load_deref (ssa_1455) (0) /* access=0 */ vec4 32 ssa_1457 = vec4 ssa_1456.x, ssa_1456.y, ssa_1456.z, ssa_1454 vec1 32 ssa_1458 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1458, ssa_1457) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1459 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1460 = intrinsic load_deref (ssa_1459) (0) /* access=0 */ vec3 32 ssa_1461 = vec3 ssa_1460.z, ssa_1460.x, ssa_1460.y vec1 32 ssa_1462 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1463 = intrinsic load_deref (ssa_1462) (0) /* access=0 */ vec3 32 ssa_1464 = vec3 ssa_1463.w, ssa_1463.x, ssa_1463.y vec3 32 ssa_1465 = fmul ssa_1461, ssa_1464 vec1 32 ssa_1466 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1467 = intrinsic load_deref (ssa_1466) (0) /* access=0 */ vec4 32 ssa_1468 = vec4 ssa_1465.x, ssa_1465.y, ssa_1465.z, ssa_1467.w vec1 32 ssa_1469 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_1469, ssa_1468) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1470 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1471 = intrinsic load_deref (ssa_1470) (0) /* access=0 */ vec3 32 ssa_1472 = vec3 ssa_1471.y, ssa_1471.w, ssa_1471.x vec1 32 ssa_1473 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1474 = intrinsic load_deref (ssa_1473) (0) /* access=0 */ vec3 32 ssa_1475 = vec3 ssa_1474.x, ssa_1474.y, ssa_1474.z vec1 32 ssa_1476 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1477 = intrinsic load_deref (ssa_1476) (0) /* access=0 */ vec3 32 ssa_1478 = vec3 ssa_1477.x, ssa_1477.y, ssa_1477.z vec3 32 ssa_1479 = fneg ssa_1478 vec3 32 ssa_1480 = ffma ssa_1472, ssa_1475, ssa_1479 vec1 32 ssa_1481 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1482 = intrinsic load_deref (ssa_1481) (0) /* access=0 */ vec4 32 ssa_1483 = vec4 ssa_1480.x, ssa_1480.y, ssa_1480.z, ssa_1482.w vec1 32 ssa_1484 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1484, ssa_1483) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1485 = deref_var &r5 (function_temp vec4) vec4 32 ssa_1486 = intrinsic load_deref (ssa_1485) (0) /* access=0 */ vec3 32 ssa_1487 = vec3 ssa_1486.z, ssa_1486.z, ssa_1486.z vec1 32 ssa_1488 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1489 = intrinsic load_deref (ssa_1488) (0) /* access=0 */ vec3 32 ssa_1490 = vec3 ssa_1489.x, ssa_1489.y, ssa_1489.z vec3 32 ssa_1491 = fmul ssa_1487, ssa_1490 vec1 32 ssa_1492 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1493 = intrinsic load_deref (ssa_1492) (0) /* access=0 */ vec4 32 ssa_1494 = vec4 ssa_1491.x, ssa_1491.y, ssa_1491.z, ssa_1493.w vec1 32 ssa_1495 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1495, ssa_1494) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1496 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1497 = intrinsic load_deref (ssa_1496) (0) /* access=0 */ vec3 32 ssa_1498 = vec3 ssa_1497.x, ssa_1497.y, ssa_1497.z vec1 32 ssa_1499 = deref_var &r6 (function_temp vec4) vec4 32 ssa_1500 = intrinsic load_deref (ssa_1499) (0) /* access=0 */ vec3 32 ssa_1501 = vec3 ssa_1500.x, ssa_1500.y, ssa_1500.z vec1 32 ssa_1502 = fdot3 ssa_1498, ssa_1501 vec1 32 ssa_1503 = deref_var &out@o6-temp (function_temp vec4) vec4 32 ssa_1504 = intrinsic load_deref (ssa_1503) (0) /* access=0 */ vec4 32 ssa_1505 = vec4 ssa_1502, ssa_1504.y, ssa_1504.z, ssa_1504.w vec1 32 ssa_1506 = deref_var &out@o6-temp (function_temp vec4) intrinsic store_deref (ssa_1506, ssa_1505) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1507 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1508 = intrinsic load_deref (ssa_1507) (0) /* access=0 */ vec3 32 ssa_1509 = vec3 ssa_1508.x, ssa_1508.y, ssa_1508.w vec1 32 ssa_1510 = deref_var &r6 (function_temp vec4) vec4 32 ssa_1511 = intrinsic load_deref (ssa_1510) (0) /* access=0 */ vec3 32 ssa_1512 = vec3 ssa_1511.x, ssa_1511.y, ssa_1511.z vec1 32 ssa_1513 = fdot3 ssa_1509, ssa_1512 vec1 32 ssa_1514 = deref_var &out@o7-temp (function_temp vec4) vec4 32 ssa_1515 = intrinsic load_deref (ssa_1514) (0) /* access=0 */ vec4 32 ssa_1516 = vec4 ssa_1513, ssa_1515.y, ssa_1515.z, ssa_1515.w vec1 32 ssa_1517 = deref_var &out@o7-temp (function_temp vec4) intrinsic store_deref (ssa_1517, ssa_1516) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1518 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1519 = intrinsic load_deref (ssa_1518) (0) /* access=0 */ vec3 32 ssa_1520 = vec3 ssa_1519.x, ssa_1519.y, ssa_1519.z vec1 32 ssa_1521 = deref_var &r10 (function_temp vec4) vec4 32 ssa_1522 = intrinsic load_deref (ssa_1521) (0) /* access=0 */ vec3 32 ssa_1523 = vec3 ssa_1522.x, ssa_1522.y, ssa_1522.z vec1 32 ssa_1524 = fdot3 ssa_1520, ssa_1523 vec1 32 ssa_1525 = deref_var &out@o6-temp (function_temp vec4) vec4 32 ssa_1526 = intrinsic load_deref (ssa_1525) (0) /* access=0 */ vec4 32 ssa_1527 = vec4 ssa_1526.x, ssa_1524, ssa_1526.z, ssa_1526.w vec1 32 ssa_1528 = deref_var &out@o6-temp (function_temp vec4) intrinsic store_deref (ssa_1528, ssa_1527) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1529 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1530 = intrinsic load_deref (ssa_1529) (0) /* access=0 */ vec3 32 ssa_1531 = vec3 ssa_1530.x, ssa_1530.y, ssa_1530.w vec1 32 ssa_1532 = deref_var &r10 (function_temp vec4) vec4 32 ssa_1533 = intrinsic load_deref (ssa_1532) (0) /* access=0 */ vec3 32 ssa_1534 = vec3 ssa_1533.x, ssa_1533.y, ssa_1533.z vec1 32 ssa_1535 = fdot3 ssa_1531, ssa_1534 vec1 32 ssa_1536 = deref_var &out@o7-temp (function_temp vec4) vec4 32 ssa_1537 = intrinsic load_deref (ssa_1536) (0) /* access=0 */ vec4 32 ssa_1538 = vec4 ssa_1537.x, ssa_1535, ssa_1537.z, ssa_1537.w vec1 32 ssa_1539 = deref_var &out@o7-temp (function_temp vec4) intrinsic store_deref (ssa_1539, ssa_1538) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1540 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1541 = intrinsic load_deref (ssa_1540) (0) /* access=0 */ vec3 32 ssa_1542 = vec3 ssa_1541.x, ssa_1541.y, ssa_1541.w vec1 32 ssa_1543 = deref_var &r11 (function_temp vec4) vec4 32 ssa_1544 = intrinsic load_deref (ssa_1543) (0) /* access=0 */ vec3 32 ssa_1545 = vec3 ssa_1544.x, ssa_1544.y, ssa_1544.z vec1 32 ssa_1546 = fdot3 ssa_1542, ssa_1545 vec1 32 ssa_1547 = deref_var &out@o7-temp (function_temp vec4) vec4 32 ssa_1548 = intrinsic load_deref (ssa_1547) (0) /* access=0 */ vec4 32 ssa_1549 = vec4 ssa_1548.x, ssa_1548.y, ssa_1546, ssa_1548.w vec1 32 ssa_1550 = deref_var &out@o7-temp (function_temp vec4) intrinsic store_deref (ssa_1550, ssa_1549) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1551 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1552 = intrinsic load_deref (ssa_1551) (0) /* access=0 */ vec3 32 ssa_1553 = vec3 ssa_1552.x, ssa_1552.y, ssa_1552.z vec1 32 ssa_1554 = deref_var &r11 (function_temp vec4) vec4 32 ssa_1555 = intrinsic load_deref (ssa_1554) (0) /* access=0 */ vec3 32 ssa_1556 = vec3 ssa_1555.x, ssa_1555.y, ssa_1555.z vec1 32 ssa_1557 = fdot3 ssa_1553, ssa_1556 vec1 32 ssa_1558 = deref_var &out@o6-temp (function_temp vec4) vec4 32 ssa_1559 = intrinsic load_deref (ssa_1558) (0) /* access=0 */ vec4 32 ssa_1560 = vec4 ssa_1559.x, ssa_1559.y, ssa_1557, ssa_1559.w vec1 32 ssa_1561 = deref_var &out@o6-temp (function_temp vec4) intrinsic store_deref (ssa_1561, ssa_1560) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1562 = deref_var &out@o6-temp (function_temp vec4) vec4 32 ssa_1563 = intrinsic load_deref (ssa_1562) (0) /* access=0 */ vec4 32 ssa_1564 = vec4 ssa_1563.x, ssa_1563.y, ssa_1563.z, ssa_36 vec1 32 ssa_1565 = deref_var &out@o6-temp (function_temp vec4) intrinsic store_deref (ssa_1565, ssa_1564) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1566 = deref_var &out@o7-temp (function_temp vec4) vec4 32 ssa_1567 = intrinsic load_deref (ssa_1566) (0) /* access=0 */ vec4 32 ssa_1568 = vec4 ssa_1567.x, ssa_1567.y, ssa_1567.z, ssa_35 vec1 32 ssa_1569 = deref_var &out@o7-temp (function_temp vec4) intrinsic store_deref (ssa_1569, ssa_1568) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1570 = deref_var &out@o8-temp (function_temp vec4) intrinsic store_deref (ssa_1570, ssa_2282) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1571 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1572 = intrinsic load_deref (ssa_1571) (0) /* access=0 */ vec1 32 ssa_1573 = imov ssa_1572.y vec1 32 ssa_1574 = ishl ssa_1573, ssa_33 vec1 32 ssa_1575 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1576 = intrinsic load_deref (ssa_1575) (0) /* access=0 */ vec4 32 ssa_1577 = vec4 ssa_1574, ssa_1576.y, ssa_1576.z, ssa_1576.w vec1 32 ssa_1578 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1578, ssa_1577) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1579 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1580 = intrinsic load_deref (ssa_1579) (0) /* access=0 */ vec3 32 ssa_1581 = vec3 ssa_1580.x, ssa_1580.y, ssa_1580.z vec3 32 ssa_1582 = ishr ssa_1581, ssa_2277 vec1 32 ssa_1583 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1584 = intrinsic load_deref (ssa_1583) (0) /* access=0 */ vec4 32 ssa_1585 = vec4 ssa_1582.x, ssa_1582.y, ssa_1582.z, ssa_1584.w vec1 32 ssa_1586 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1586, ssa_1585) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1587 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1588 = intrinsic load_deref (ssa_1587) (0) /* access=0 */ vec3 32 ssa_1589 = vec3 ssa_1588.x, ssa_1588.y, ssa_1588.z vec3 32 ssa_1590 = i2f32 ssa_1589 vec1 32 ssa_1591 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1592 = intrinsic load_deref (ssa_1591) (0) /* access=0 */ vec4 32 ssa_1593 = vec4 ssa_1590.x, ssa_1590.y, ssa_1590.z, ssa_1592.w vec1 32 ssa_1594 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1594, ssa_1593) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1595 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1596 = intrinsic load_deref (ssa_1595) (0) /* access=0 */ vec3 32 ssa_1597 = vec3 ssa_1596.x, ssa_1596.y, ssa_1596.z vec3 32 ssa_1598 = fmul ssa_1597, ssa_2273 vec1 32 ssa_1599 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1600 = intrinsic load_deref (ssa_1599) (0) /* access=0 */ vec4 32 ssa_1601 = vec4 ssa_1598.x, ssa_1598.y, ssa_1598.z, ssa_1600.w vec1 32 ssa_1602 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1602, ssa_1601) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1603 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1604 = intrinsic load_deref (ssa_1603) (0) /* access=0 */ vec3 32 ssa_1605 = vec3 ssa_1604.w, ssa_1604.w, ssa_1604.w vec1 32 ssa_1606 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1607 = intrinsic load_deref (ssa_1606) (0) /* access=0 */ vec3 32 ssa_1608 = vec3 ssa_1607.x, ssa_1607.y, ssa_1607.z vec3 32 ssa_1609 = fmul ssa_1605, ssa_1608 vec1 32 ssa_1610 = deref_var &r0 (function_temp vec4) 0 = deref_var ) /*&ssa_1307.z wrmask=vec1 32 ssa_1309r2 access=0&xvec4 32 ssa_1611 (shader_temp vec4) */ vec3 32 ssa_1175y = intrinsic load_deref (ssa_1610 = fdot3 = vec3 ssa_1174.x, ssa_1174.y, ssa_1174.z vec1 32 ssa_1176 = fdot3 ssa_1172, ssa_1175 vec1 32 ssa_1177 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1178 = intrinsic load_deref (ssa_1177) (0) /* access=0 */ vec4 32 ssa_1179 = vec4 ssa_1178.x, ssa_1176, ssa_1178.z, ssa_1178.w vec1 32 ssa_1180 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1180, ssa_1179) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1181 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1182 = intrinsic load_deref (ssa_1181) (0) /* access=0 */ vec3 32 ssa_1183 = vec3 ssa_1182.x, ssa_1182.y, ssa_1182.w vec1 32 ssa_1184 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1185 = intrinsic load_deref (ssa_1184) (0) /* access=0 */ vec3 32 ssa_1186 = vec3 ssa_1185.x, ssa_1185.y, ssa_1185.z vec1 32 ssa_1187 = fdot3 ssa_1183, ssa_1186 vec1 32 ssa_1188 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1189 = intrinsic load_deref (ssa_1188) (0) /* access=0 */ vec4 32 ssa_1190 = vec4 ssa_1189.x, ssa_1187, ssa_1189.z, ssa_1189.w vec1 32 ssa_1191 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1191, ssa_1190) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1192 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1193 = intrinsic load_deref (ssa_1192) (0) /* access=0 */ vec3 32 ssa_1194 = vec3 ssa_1193.x, ssa_1193.y, ssa_1193.w vec1 32 ssa_1195 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1196 = intrinsic load_deref (ssa_1195) (0) /* access=0 */ vec3 32 ssa_1197 = vec3 ssa_1196.x, ssa_1196.y, ssa_1196.z vec1 32 ssa_1198 = fdot3 ssa_1194, ssa_1197 vec1 32 ssa_1199 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1200 = intrinsic load_deref (ssa_1199) (0) /* access=0 */ vec4 32 ssa_1201 = vec4 ssa_1200.x, ssa_1200.y, ssa_1198, ssa_1200.w vec1 32 ssa_1202 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1202, ssa_1201) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1203 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1204 = intrinsic load_deref (ssa_1203) (0) /* access=0 */ vec3 32 ssa_1205 = vec3 ssa_1204.x, ssa_1204.y, ssa_1204.z vec1 32 ssa_1206 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1207 = intrinsic load_deref (ssa_1206) (0) /* access=0 */ vec3 32 ssa_1208 = vec3 ssa_1207.x, ssa_1207.y, ssa_1207.z vec1 32 ssa_1209 = fdot3 ssa_1205, ssa_1208 vec1 32 ssa_1210 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1211 = intrinsic load_deref (ssa_1210) (0) /* access=0 */ vec4 32 ssa_1212 = vec4 ssa_1211.x, ssa_1211.y, ssa_1209, ssa_1211.w vec1 32 ssa_1213 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1213, ssa_1212) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1214 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1215 = intrinsic load_deref (ssa_1214) (0) /* access=0 */ vec4 32 ssa_1216 = vec4 ssa_1215.x, ssa_1215.y, ssa_1215.z, ssa_50 vec1 32 ssa_1217 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1217, ssa_1216) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1218 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1219 = intrinsic load_deref (ssa_1218) (0) /* access=0 */ vec4 32 ssa_1220 = vec4 ssa_1219.x, ssa_1219.y, ssa_1219.z, ssa_49 vec1 32 ssa_1221 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1221, ssa_1220) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1222 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1223 = intrinsic load_deref (ssa_1222) (0) /* access=0 */ vec1 32 ssa_1224 = imov ssa_1223.w vec1 32 ssa_1225 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1226 = intrinsic load_deref (ssa_1225) (0) /* access=0 */ vec4 32 ssa_1227 = vec4 ssa_1224, ssa_1226.y, ssa_1226.z, ssa_1226.w vec1 32 ssa_1228 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1228, ssa_1227) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1229 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1230 = intrinsic load_deref (ssa_1229) (0) /* access=0 */ vec4 32 ssa_1231 = vec4 ssa_1230.z, ssa_1230.z, ssa_1230.z, ssa_1230.z vec1 32 ssa_1232 = imov ssa_1231.x /* succs: block_19 block_20 */ if ssa_48 { block block_19: /* preds: block_18 */ vec1 32 ssa_1233 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_1234 = txf ssa_1233 (texture_deref), ssa_1232 (coord), 0 (sampler), vec4 32 ssa_1235 = vec4 ssa_1234.x, ssa_1234.z, ssa_1234.y, ssa_1234.w vec1 32 ssa_1236 = deref_var &phi@5 (function_temp vec4) intrinsic store_deref (ssa_1236, ssa_1235) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_21 */ } else { block block_20: /* preds: block_18 */ vec1 32 ssa_1237 = deref_var &phi@5 (function_temp vec4) intrinsic store_deref (ssa_1237, ssa_38) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_21 */ } block block_21: /* preds: block_19 block_20 */ vec1 32 ssa_1238 = deref_var &phi@5 (function_temp vec4) vec4 32 ssa_1239 = intrinsic load_deref (ssa_1238) (0) /* access=0 */ vec1 32 ssa_1240 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1240, ssa_1239) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1241 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1242 = intrinsic load_deref (ssa_1241) (0) /* access=0 */ vec4 32 ssa_1243 = vec4 ssa_1242.w, ssa_1242.w, ssa_1242.w, ssa_1242.w vec1 32 ssa_1244 = imov ssa_1243.x /* succs: block_22 block_23 */ if ssa_47 { block block_22: /* preds: block_21 */ vec1 32 ssa_1245 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_1246 = txf ssa_1245 (texture_deref), ssa_1244 (coord), 0 (sampler), vec1 32 ssa_1247 = deref_var &phi@6 (function_temp vec4) intrinsic store_deref (ssa_1247, ssa_1246) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_24 */ } else { block block_23: /* preds: block_21 */ vec1 32 ssa_1248 = deref_var &phi@6 (function_temp vec4) intrinsic store_deref (ssa_1248, ssa_37) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_24 */ } block block_24: /* preds: block_22 block_23 */ vec1 32 ssa_1249 = deref_var &phi@6 (function_temp vec4) vec4 32 ssa_1250 = intrinsic load_deref (ssa_1249) (0) /* access=0 */ vec1 32 ssa_1251 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1251, ssa_1250) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1252 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1253 = intrinsic load_deref (ssa_1252) (0) /* access=0 */ vec1 32 ssa_1254 = imov ssa_1253.w vec1 32 ssa_1255 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1256 = intrinsic load_deref (ssa_1255) (0) /* access=0 */ vec4 32 ssa_1257 = vec4 ssa_1256.x, ssa_1254, ssa_1256.z, ssa_1256.w vec1 32 ssa_1258 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1258, ssa_1257) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1259 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1260 = intrinsic load_deref (ssa_1259) (0) /* access=0 */ vec1 32 ssa_1261 = imov ssa_1260.w vec1 32 ssa_1262 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1263 = intrinsic load_deref (ssa_1262) (0) /* access=0 */ vec4 32 ssa_1264 = vec4 ssa_1263.x, ssa_1263.y, ssa_1261, ssa_1263.w vec1 32 ssa_1265 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1265, ssa_1264) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1266 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1267 = intrinsic load_deref (ssa_1266) (0) /* access=0 */ vec3 32 ssa_1268 = vec3 ssa_1267.x, ssa_1267.y, ssa_1267.z vec1 32 ssa_1269 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1270 = intrinsic vulkan_resource_index (ssa_1269) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1271 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1272 = load_const (0x00000320 /* 0.000000 */) vec1 32 ssa_1273 = iadd ssa_1271, ssa_1272 vec1 32 ssa_1274 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1275 = iadd ssa_1273, ssa_1274 vec1 32 ssa_1276 = intrinsic load_ubo (ssa_1270, ssa_1275) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1277 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1278 = iadd ssa_1273, ssa_1277 vec1 32 ssa_1279 = intrinsic load_ubo (ssa_1270, ssa_1278) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1280 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1281 = iadd ssa_1273, ssa_1280 vec1 32 ssa_1282 = intrinsic load_ubo (ssa_1270, ssa_1281) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1283 = vec3 ssa_1276, ssa_1279, ssa_1282 vec3 32 ssa_1284 = fneg ssa_1283 vec3 32 ssa_1285 = fadd ssa_1268, ssa_1284 vec1 32 ssa_1286 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1287 = intrinsic load_deref (ssa_1286) (0) /* access=0 */ vec4 32 ssa_1288 = vec4 ssa_1285.x, ssa_1285.y, ssa_1285.z, ssa_1287.w vec1 32 ssa_1289 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1289, ssa_1288) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1290 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1291 = intrinsic load_deref (ssa_1290) (0) /* access=0 */ vec4 32 ssa_1292 = vec4 ssa_1291.x, ssa_1291.y, ssa_1291.z, ssa_46 vec1 32 ssa_1293 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1293, ssa_1292) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1294 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1295 = intrinsic load_deref (ssa_1294) (0) /* access=0 */ vec1 32 ssa_1296 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1297 = intrinsic vulkan_resource_index (ssa_1296) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1298 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1299 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1300 = iadd ssa_1298, ssa_1299 vec1 32 ssa_1301 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1302 = iadd ssa_1300, ssa_1301 vec1 32 ssa_1303 = intrinsic load_ubo (ssa_1297, ssa_1302) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1304 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1305 = iadd ssa_1300, ssa_1304 vec1 32 ssa_1306 = intrinsic load_ubo (ssa_1297, ssa_1305) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1307 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1308 = iadd ssa_1300, ssa_1307 vec1 32 ssa_1309 = intrinsic load_ubo (ssa_1297, ssa_1308) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1310 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1311 = iadd ssa_1300, ssa_1310 vec1 32 ssa_1312 = intrinsic load_ubo (ssa_1297, ssa_1311) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1313 = vec4 ssa_1303, ssa_1306, ssa_1309, ssa_1312 vec1 32 ssa_1314 = fdot4 ssa_1295, ssa_1313 vec1 32 ssa_1315 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1316 = intrinsic load_deref (ssa_1315) (0) /* access=0 */ vec4 32 ssa_1317 = vec4 ssa_1316.x, ssa_1316.y, ssa_1316.z, ssa_1314 vec1 32 ssa_1318 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1318, ssa_1317) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1319 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1320 = intrinsic load_deref (ssa_1319) (0) /* access=0 */ vec1 32 ssa_1321 = imov ssa_1320.x vec1 32 ssa_1322 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1323 = intrinsic load_deref (ssa_1322) (0) /* access=0 */ vec4 32 ssa_1324 = vec4 ssa_1323.x, ssa_1321, ssa_1323.z, ssa_1323.w vec1 32 ssa_1325 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_1325, ssa_1324) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1326 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1327 = intrinsic load_deref (ssa_1326) (0) /* access=0 */ vec1 32 ssa_1328 = imov ssa_1327.x vec1 32 ssa_1329 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1330 = intrinsic load_deref (ssa_1329) (0) /* access=0 */ vec4 32 ssa_1331 = vec4 ssa_1330.x, ssa_1330.y, ssa_1328, ssa_1330.w vec1 32 ssa_1332 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_1332, ssa_1331) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1333 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1334 = intrinsic load_deref (ssa_1333) (0) /* access=0 */ vec1 32 ssa_1335 = imov ssa_1334.x vec1 32 ssa_1336 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1337 = intrinsic load_deref (ssa_1336) (0) /* access=0 */ vec4 32 ssa_1338 = vec4 ssa_1335, ssa_1337.y, ssa_1337.z, ssa_1337.w vec1 32 ssa_1339 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_1339, ssa_1338) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1340 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1341 = intrinsic load_deref (ssa_1340) (0) /* access=0 */ vec3 32 ssa_1342 = vec3 ssa_1341.x, ssa_1341.y, ssa_1341.z vec1 32 ssa_1343 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1344 = intrinsic vulkan_resource_index (ssa_1343) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1345 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1346 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1347 = iadd ssa_1345, ssa_1346 vec1 32 ssa_1348 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1349 = iadd ssa_1347, ssa_1348 vec1 32 ssa_1350 = intrinsic load_ubo (ssa_1344, ssa_1349) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1351 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1352 = iadd ssa_1347, ssa_1351 vec1 32 ssa_1353 = intrinsic load_ubo (ssa_1344, ssa_1352) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1354 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1355 = iadd ssa_1347, ssa_1354 vec1 32 ssa_1356 = intrinsic load_ubo (ssa_1344, ssa_1355) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1357 = vec3 ssa_1350, ssa_1353, ssa_1356 vec1 32 ssa_1358 = fdot3 ssa_1342, ssa_1357 vec1 32 ssa_1359 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1360 = intrinsic load_deref (ssa_1359) (0) /* access=0 */ vec4 32 ssa_1361 = vec4 ssa_1358, ssa_1360.y, ssa_1360.z, ssa_1360.w vec1 32 ssa_1362 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1362, ssa_1361) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1363 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1364 = intrinsic load_deref (ssa_1363) (0) /* access=0 */ vec1 32 ssa_1365 = imov ssa_1364.z vec1 32 ssa_1366 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1367 = intrinsic load_deref (ssa_1366) (0) /* access=0 */ vec4 32 ssa_1368 = vec4 ssa_1367.x, ssa_1365, ssa_1367.z, ssa_1367.w vec1 32 ssa_1369 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_1369, ssa_1368) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1370 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1371 = intrinsic load_deref (ssa_1370) (0) /* access=0 */ vec1 32 ssa_1372 = imov ssa_1371.y vec1 32 ssa_1373 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1374 = intrinsic load_deref (ssa_1373) (0) /* access=0 */ vec4 32 ssa_1375 = vec4 ssa_1374.x, ssa_1374.y, ssa_1372, ssa_1374.w vec1 32 ssa_1376 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_1376, ssa_1375) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1377 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1378 = intrinsic load_deref (ssa_1377) (0) /* access=0 */ vec1 32 ssa_1379 = imov ssa_1378.z vec1 32 ssa_1380 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1381 = intrinsic load_deref (ssa_1380) (0) /* access=0 */ vec4 32 ssa_1382 = vec4 ssa_1381.x, ssa_1381.y, ssa_1379, ssa_1381.w vec1 32 ssa_1383 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1383, ssa_1382) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1384 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1385 = intrinsic load_deref (ssa_1384) (0) /* access=0 */ vec1 32 ssa_1386 = imov ssa_1385.y vec1 32 ssa_1387 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1388 = intrinsic load_deref (ssa_1387) (0) /* access=0 */ vec4 32 ssa_1389 = vec4 ssa_1386, ssa_1388.y, ssa_1388.z, ssa_1388.w vec1 32 ssa_1390 = deref_var &r7 (shader_temp vec4) intrinsic store_deref (ssa_1390, ssa_1389) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1391 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1392 = intrinsic load_deref (ssa_1391) (0) /* access=0 */ vec1 32 ssa_1393 = imov ssa_1392.z vec1 32 ssa_1394 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1395 = intrinsic load_deref (ssa_1394) (0) /* access=0 */ vec4 32 ssa_1396 = vec4 ssa_1393, ssa_1395.y, ssa_1395.z, ssa_1395.w vec1 32 ssa_1397 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1397, ssa_1396) (15, 0) /* wrmask=xyzw */ /* access=0) (0) /* access=0 */ */ ssa_1305zw */vec4 32 ssa_1659 r3 (function_temp vec4) /* = intrinsic load_deref (ssa_1658) (0) /* access=0 */ vec1 32 ssa_1660 = imov ssa_1659.z vec1 32 ssa_1661 = deref_var , ssa_1308 vec1 32 ssa_1310 = deref_var &o4 access=0 */ vec1 32 ssa_1272 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1273 = intrinsic load_deref (ssa_1272) (0) /* access=0 */ vec1 32 ssa_1274 = imov ssa_1273.y vec1 32 ssa_1275 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1276 = intrinsic load_deref (ssa_1275) (0) /* access=0& */ vec4 32 ssa_834 vec1 32 ssa_1277 = imov ssa_1276.w vec1 32 ssa_1278 = fmul ssa_1274, ssa_1277 (shader_out vec4) vec4 32 ssa_1311 = intrinsic load_deref (ssa_1310) (0) /* access=0 */ vec4 32 ssa_1312 = vec4 ssa_1309, ssa_1311.y, ssa_1311.z, ssa_1311.w vec1 32 ssa_1313 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1313, ssa_1312) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1314 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1315 = intrinsic load_deref (ssa_1314) (0) /* access=0 */ vec3 32 ssa_1316 = vec3 ssa_1315.y, ssa_1315.z, ssa_1315.x vec1 32 ssa_1317 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_1318 = intrinsic load_deref (ssa_1317) (0) /* access=0 */ vec3 32 ssa_1319 = vec3 ssa_1318.x, ssa_1318.y, ssa_1318.z vec1 32 ssa_1320 = fdot3 ssa_1316, ssa_1319 vec1 32 ssa_1321 = deref_var &o4 (shader_out vec4) vec4 32 ssa_1322 = intrinsic load_deref (ssa_1321) (0) /* access=0 */ vec4 32 ssa_1323 = vec4 ssa_1322.x, ssa_1320, ssa_1322.z, ssa_1322.w vec1 32 ssa_1324 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1324, ssa_1323) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1325 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1326 = intrinsic load_deref (ssa_1325) (0) /* access=0 */ vec3 32 ssa_1327 = vec3 ssa_1326.y, ssa_1326.z, ssa_1326.x vec1 32 ssa_1328 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_1329 = intrinsic load_deref (ssa_1328) (0) /* access=0 */ vec3 32 ssa_1330 = vec3 ssa_1329.x, ssa_1329.y, ssa_1329.z vec1 32 ssa_1331 = fdot3 ssa_1327, ssa_1330 vec1 32 ssa_1332 = deref_var &o4 (shader_out vec4) vec4 32 ssa_1333 = intrinsic load_deref (ssa_1332) (0) /* access=0 */ vec4 32 ssa_1334 = vec4 ssa_1333.x, ssa_1333.y, ssa_1331, ssa_1333.w vec1 32 ssa_1335 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1335, ssa_1334) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1336 = deref_var &o4 (shader_out vec4) vec4 32 ssa_1337 = intrinsic load_deref (ssa_1336) (0) /* access=0 */ vec4 32 ssa_1338 = vec4 ssa_1337.x, ssa_1337.y, ssa_1337.z, ssa_37 vec1 32 ssa_1339 = deref_var &o4 (shader_out vec4) intrinsic store_deref (ssa_1339, ssa_1338) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1340 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1341 = intrinsic load_deref (ssa_1340) (0) /* access=0 */ vec1 32 ssa_1342 = imov ssa_1341.x vec1 32 ssa_1343 = fabs ssa_1342 vec1 32 ssa_1344 = fneg ssa_1343 vec1 32 ssa_1345 = fadd ssa_1344, ssa_36 vec1 32 ssa_1346 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1347 = intrinsic load_deref (ssa_1346) (0) /* access=0 */ vec4 32 ssa_1348 = vec4 ssa_1347.x, ssa_1347.y, ssa_1347.z, ssa_1345 vec1 32 ssa_1349 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1349, ssa_1348) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1350 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1351 = intrinsic load_deref (ssa_1350) (0) /* access=0 */ vec1 32 ssa_1352 = imov ssa_1351.y vec1 32 ssa_1353 = fabs ssa_1352 vec1 32 ssa_1354 = fneg ssa_1353 vec1 32 ssa_1355 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1356 = intrinsic load_deref (ssa_1355) (0) /* access=0 */ vec1 32 ssa_1357 = imov ssa_1356.w vec1 32 ssa_1358 = fadd ssa_1354, ssa_1357 vec1 32 ssa_1359 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1360 = intrinsic load_deref (ssa_1359) (0) /* access=0 */ vec4 32 ssa_1361 = vec4 ssa_1360.x, ssa_1360.y, ssa_1358, ssa_1360.w vec1 32 ssa_1362 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1362, ssa_1361) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1363 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1364 = intrinsic load_deref (ssa_1363) (0) /* access=0 */ vec3 32 ssa_1365 = vec3 ssa_1364.x, ssa_1364.y, ssa_1364.z vec1 32 ssa_1366 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1367 = intrinsic load_deref (ssa_1366) (0) /* access=0 */ vec3 32 ssa_1368 = vec3 ssa_1367.x, ssa_1367.y, ssa_1367.z vec1 32 ssa_1369 = fdot3 ssa_1365, ssa_1368 vec1 32 ssa_1370 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1371 = intrinsic load_deref (ssa_1370) (0) /* access=0 */ vec4 32 ssa_1372 = vec4 ssa_1371.x, ssa_1371.y, ssa_1371.z, ssa_1369 vec1 32 ssa_1373 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1373, ssa_1372) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1374 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1375 = intrinsic load_deref (ssa_1374) (0) /* access=0 */ vec1 32 ssa_1376 = imov ssa_1375.w vec1 32 ssa_1377 = frsq ssa_1376 vec1 32 ssa_1378 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1379 = intrinsic load_deref (ssa_1378) (0) /* access=0 */ vec4 32 ssa_1380 = vec4 ssa_1379.x, ssa_1379.y, ssa_1379.z, ssa_1377 vec1 32 ssa_1381 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1381, ssa_1380) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1382 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1383 = intrinsic load_deref (ssa_1382) (0) /* access=0 */ vec3 32 ssa_1384 = vec3 ssa_1383.w, ssa_1383.w, ssa_1383.w vec1 32 ssa_1385 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1386 = intrinsic load_deref (ssa_1385) (0) /* access=0 */ vec3 32 ssa_1387 = vec3 ssa_1386.x, ssa_1386.y, ssa_1386.z vec3 32 ssa_1388 = fmul ssa_1384, ssa_1387 vec1 32 ssa_1389 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1390 = intrinsic load_deref (ssa_1389) (0) /* access=0 */ vec4 32 ssa_1391 = vec4 ssa_1388.x, ssa_1388.y, ssa_1388.z, ssa_1390.w vec1 32 ssa_1392 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1392, ssa_1391) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1393 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1394 = intrinsic load_deref (ssa_1393) (0) /* access=0 */ vec1 32 ssa_1395 = imov ssa_1394.x vec1 32 ssa_1396 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1397 = intrinsic load_deref (ssa_1396) (0) /* access=0 */ vec1 32 ssa_1398 = imov ssa_1397.z vec1 32 ssa_1399 = fmul ssa_1395, ssa_1398 vec1 32 ssa_1400 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1401 = intrinsic load_deref (ssa_1400) (0) /* access=0 */ vec4 32 ssa_1402 = vec4 ssa_1401.x, ssa_1401.y, ssa_1401.z, ssa_1399 vec1 32 ssa_1403 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1403, ssa_1402) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1404 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1405 = intrinsic load_deref (ssa_1404) (0) /* access=0 */ vec3 32 ssa_1406 = vec3 ssa_1405.z, ssa_1405.x, ssa_1405.y vec1 32 ssa_1407 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1408 = intrinsic load_deref (ssa_1407) (0) /* access=0 */ vec3 32 ssa_1409 = vec3 ssa_1408.w, ssa_1408.x, ssa_1408.y vec3 32 ssa_1410 = fmul ssa_1406, ssa_1409 vec1 32 ssa_1411 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1412 = intrinsic load_deref (ssa_1411) (0) /* access=0 */ vec4 32 ssa_1413 = vec4 ssa_1410.x, ssa_1410.y, ssa_1410.z, ssa_1412.w vec1 32 ssa_1414 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1414, ssa_1413) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1415 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1416 = intrinsic load_deref (ssa_1415) (0) /* access=0 */ vec3 32 ssa_1417 = vec3 ssa_1416.y, ssa_1416.w, ssa_1416.x vec1 32 ssa_1418 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1419 = intrinsic load_deref (ssa_1418) (0) /* access=0 */ vec3 32 ssa_1420 = vec3 ssa_1419.x, ssa_1419.y, ssa_1419.z vec1 32 ssa_1421 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1422 = intrinsic load_deref (ssa_1421) (0) /* access=0 */ vec3 32 ssa_1423 = vec3 ssa_1422.x, ssa_1422.y, ssa_1422.z vec3 32 ssa_1424 = fneg ssa_1423 vec3 32 ssa_1425 = ffma ssa_1417, ssa_1420, ssa_1424 vec1 32 ssa_1426 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1427 = intrinsic load_deref (ssa_1426) (0) /* access=0 */ vec4 32 ssa_1428 = vec4 ssa_1425.x, ssa_1425.y, ssa_1425.z, ssa_1427.w vec1 32 ssa_1429 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1429, ssa_1428) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1430 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1431 = intrinsic load_deref (ssa_1430) (0) /* access=0 */ vec3 32 ssa_1432 = vec3 ssa_1431.z, ssa_1431.z, ssa_1431.z vec1 32 ssa_1433 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1434 = intrinsic load_deref (ssa_1433) (0) /* access=0 */ vec3 32 ssa_1435 = vec3 ssa_1434.x, ssa_1434.y, ssa_1434.z vec3 32 ssa_1436 = fmul ssa_1432, ssa_1435 vec1 32 ssa_1437 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1438 = intrinsic load_deref (ssa_1437) (0) /* access=0 */ vec4 32 ssa_1439 = vec4 ssa_1436.x, ssa_1436.y, ssa_1436.z, ssa_1438.w vec1 32 ssa_1440 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1440, ssa_1439) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1441 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1442 = intrinsic load_deref (ssa_1441) (0) /* access=0 */ vec3 32 ssa_1443 = vec3 ssa_1442.x, ssa_1442.y, ssa_1442.z vec1 32 ssa_1444 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1445 = intrinsic load_deref (ssa_1444) (0) /* access=0 */ vec3 32 ssa_1446 = vec3 ssa_1445.x, ssa_1445.y, ssa_1445.z vec1 32 ssa_1447 = fdot3 ssa_1443, ssa_1446 vec1 32 ssa_1448 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1449 = intrinsic load_deref (ssa_1448) (0) /* access=0 */ vec4 32 ssa_1450 = vec4 ssa_1447, ssa_1449.y, ssa_1449.z, ssa_1449.w vec1 32 ssa_1451 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1451, ssa_1450) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1452 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1453 = intrinsic load_deref (ssa_1452) (0) /* access=0 */ vec3 32 ssa_1454 = vec3 ssa_1453.x, ssa_1453.y, ssa_1453.w vec1 32 ssa_1455 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1456 = intrinsic load_deref (ssa_1455) (0) /* access=0 */ vec3 32 ssa_1457 = vec3 ssa_1456.x, ssa_1456.y, ssa_1456.z vec1 32 ssa_1458 = fdot3 ssa_1454, ssa_1457 vec1 32 ssa_1459 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1460 = intrinsic load_deref (ssa_1459) (0) /* access=0 */ vec4 32 ssa_1461 = vec4 ssa_1458, ssa_1460.y, ssa_1460.z, ssa_1460.w vec1 32 ssa_1462 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1462, ssa_1461) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1463 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1464 = intrinsic load_deref (ssa_1463) (0) /* access=0 */ vec3 32 ssa_1465 = vec3 ssa_1464.x, ssa_1464.y, ssa_1464.z vec1 32 ssa_1466 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_1467 = intrinsic load_deref (ssa_1466) (0) /* access=0 */ vec3 32 ssa_1468 = vec3 ssa_1467.x, ssa_1467.y, ssa_1467.z vec1 32 ssa_1469 = fdot3 ssa_1465, ssa_1468 vec1 32 ssa_1470 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1471 = intrinsic load_deref (ssa_1470) (0) /* access=0 */ vec4 32 ssa_1472 = vec4 ssa_1471.x, ssa_1469, ssa_1471.z, ssa_1471.w vec1 32 ssa_1473 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1473, ssa_1472) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1474 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1475 = intrinsic load_deref (ssa_1474) (0) /* access=0 */ vec3 32 ssa_1476 = vec3 ssa_1475.x, ssa_1475.y, ssa_1475.w vec1 32 ssa_1477 = deref_var &r10 (shader_temp vec4) vec4 32 ssa_1478 = intrinsic load_deref (ssa_1477) (0) /* access=0 */ vec3 32 ssa_1479 = vec3 ssa_1478.x, ssa_1478.y, ssa_1478.z vec1 32 ssa_1480 = fdot3 ssa_1476, ssa_1479 vec1 32 ssa_1481 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1482 = intrinsic load_deref (ssa_1481) (0) /* access=0 */ vec4 32 ssa_1483 = vec4 ssa_1482.x, ssa_1480, ssa_1482.z, ssa_1482.w vec1 32 ssa_1484 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1484, ssa_1483) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1485 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1486 = intrinsic load_deref (ssa_1485) (0) /* access=0 */ vec3 32 ssa_1487 = vec3 ssa_1486.x, ssa_1486.y, ssa_1486.w vec1 32 ssa_1488 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_1489 = intrinsic load_deref (ssa_1488) (0) /* access=0 */ vec3 32 ssa_1490 = vec3 ssa_1489.x, ssa_1489.y, ssa_1489.z vec1 32 ssa_1491 = fdot3 ssa_1487, ssa_1490 vec1 32 ssa_1492 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1493 = intrinsic load_deref (ssa_1492) (0) /* access=0 */ vec4 32 ssa_1494 = vec4 ssa_1493.x, ssa_1493.y, ssa_1491, ssa_1493.w vec1 32 ssa_1495 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1495, ssa_1494) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1496 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1497 = intrinsic load_deref (ssa_1496) (0) /* access=0 */ vec3 32 ssa_1498 = vec3 ssa_1497.x, ssa_1497.y, ssa_1497.z vec1 32 ssa_1499 = deref_var &r11 (shader_temp vec4) vec4 32 ssa_1500 = intrinsic load_deref (ssa_1499) (0) /* access=0 */ vec3 32 ssa_1501 = vec3 ssa_1500.x, ssa_1500.y, ssa_1500.z vec1 32 ssa_1502 = fdot3 ssa_1498, ssa_1501 vec1 32 ssa_1503 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1504 = intrinsic load_deref (ssa_1503) (0) /* access=0 */ vec4 32 ssa_1505 = vec4 ssa_1504.x, ssa_1504.y, ssa_1502, ssa_1504.w vec1 32 ssa_1506 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1506, ssa_1505) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1507 = deref_var &o5 (shader_out vec4) vec4 32 ssa_1508 = intrinsic load_deref (ssa_1507) (0) /* access=0 */ vec4 32 ssa_1509 = vec4 ssa_1508.x, ssa_1508.y, ssa_1508.z, ssa_35 vec1 32 ssa_1510 = deref_var &o5 (shader_out vec4) intrinsic store_deref (ssa_1510, ssa_1509) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1511 = deref_var &o6 (shader_out vec4) vec4 32 ssa_1512 = intrinsic load_deref (ssa_1511) (0) /* access=0 */ vec4 32 ssa_1513 = vec4 ssa_1512.x, ssa_1512.y, ssa_1512.z, ssa_34 vec1 32 ssa_1514 = deref_var &o6 (shader_out vec4) intrinsic store_deref (ssa_1514, ssa_1513) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1515 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1516 = intrinsic load_deref (ssa_1515) (0) /* access=0 */ vec1 32 ssa_1517 = imov ssa_1516.y vec1 32 ssa_1518 = ishl ssa_1517, ssa_33 vec1 32 ssa_1519 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1520 = intrinsic load_deref (ssa_1519) (0) /* access=0 */ vec4 32 ssa_1521 = vec4 ssa_1518, ssa_1520.y, ssa_1520.z, ssa_1520.w vec1 32 ssa_1522 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1522, ssa_1521) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1523 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1524 = intrinsic load_deref (ssa_1523) (0) /* access=0 */ vec3 32 ssa_1525 = vec3 ssa_1524.x, ssa_1524.y, ssa_1524.z vec3 32 ssa_1526 = ishr ssa_1525, ssa_32 vec1 32 ssa_1527 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1528 = intrinsic load_deref (ssa_1527) (0) /* access=0 */ vec4 32 ssa_1529 = vec4 ssa_1526.x, ssa_1526.y, ssa_1526.z, ssa_1528.w vec1 32 ssa_1530 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1530, ssa_1529) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1531 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1532 = intrinsic load_deref (ssa_1531) (0) /* access=0 */ vec3 32 ssa_1533 = vec3 ssa_1532.x, ssa_1532.y, ssa_1532.z vec3 32 ssa_1534 = i2f32 ssa_1533 vec1 32 ssa_1535 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1536 = intrinsic load_deref (ssa_1535) (0) /* access=0 */ vec4 32 ssa_1537 = vec4 ssa_1534.x, ssa_1534.y, ssa_1534.z, ssa_1536.w vec1 32 ssa_1538 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1538, ssa_1537) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1539 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1540 = intrinsic load_deref (ssa_1539) (0) /* access=0 */ vec3 32 ssa_1541 = vec3 ssa_1540.x, ssa_1540.y, ssa_1540.z vec3 32 ssa_1542 = fmul ssa_1541, ssa_31 vec1 32 ssa_1543 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1544 = intrinsic load_deref (ssa_1543) (0) /* access=0 */ vec4 32 ssa_1545 = vec4 ssa_1542.x, ssa_1542.y, ssa_1542.z, ssa_1544.w vec1 32 ssa_1546 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1546, ssa_1545) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1547 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1548 = intrinsic load_deref (ssa_1547) (0) /* access=0 */ vec3 32 ssa_1549 = vec3 ssa_1548.w, ssa_1548.w, ssa_1548.w vec1 32 ssa_1550 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1551 = intrinsic load_deref (ssa_1550) (0) /* access=0 */ vec3 32 ssa_1552 = vec3 ssa_1551.x, ssa_1551.y, ssa_1551.z vec3 32 ssa_1553 = fmul ssa_1549, ssa_1552 vec1 32 ssa_1554 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1555 = intrinsic load_deref (ssa_1554) (0) /* access=0 */ vec4 32 ssa_1556 = vec4 ssa_1553.x, ssa_1553.y, ssa_1553.z, ssa_1555.w vec1 32 ssa_1557 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1557, ssa_1556) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1558 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_1559 = intrinsic load_deref (ssa_1558) (0) /* access=0 */ vec1 32 ssa_1560 = imov ssa_1559.w vec1 32 ssa_1561 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1562 = intrinsic load_deref (ssa_1561) (0) /* access=0 */ vec4 32 ssa_1563 = vec4 ssa_1560, ssa_1562.y, ssa_1562.z, ssa_1562.w vec1 32 ssa_1564 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1564, ssa_1563) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1565 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1566 = intrinsic load_deref (ssa_1565) (0) /* access=0 */ vec4 32 ssa_1567 = vec4 ssa_1566.z, ssa_1566.z, ssa_1566.z, ssa_1566.z vec1 32 ssa_1568 = imov ssa_1567.x /* succs: block_25 block_26 */ if ssa_30 { block block_25: /* preds: block_24 */ vec1 32 ssa_1569 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_1570 = txf ssa_1569 (texture_deref), ssa_1568 (coord), 0 (sampler), vec4 32 ssa_1571 = vec4 ssa_1570.x, ssa_1570.z, ssa_1570.y, ssa_1570.w vec1 32 ssa_1572 = deref_var &phi@7 (function_temp vec4) intrinsic store_deref (ssa_1572, ssa_1571) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_27 */ } else { block block_26: /* preds: block_24 */ vec1 32 ssa_1573 = deref_var &phi@7 (function_temp vec4) intrinsic store_deref (ssa_1573, ssa_18) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_27 */ } block block_27: /* preds: block_25 block_26 */ vec1 32 ssa_1574 = deref_var &phi@7 (function_temp vec4) = intrinsic load_deref (ssa_833) (0) /* access=0 */ vec1 32 ssa_835 = deref_var &r9 (function_temp vec4) vec4 32 ssa_836vec4 32 ssa_1612 = vec4 ssa_1609.r6 = intrinsic load_deref (ssa_835) (0) /* access=0xvec1 32 ssa_1398vec1 32 ssa_1279 = deref_var (shader_temp vec4) vec4 32 ssa_1662 = intrinsic load_deref (ssa_1661) (0) /* access=0 */ vec4 32 ssa_1575 = intrinsic load_deref (ssa_1574, ssa_1609.y, ssa_1609.z, */ = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1663 = vec4 ssa_1662.x, ssa_1660, ssa_1662.z, ssa_1662.w &r1 (shader_temp vec4) vec4 32 ssa_1280 = intrinsic load_deref (ssa_1279) (0) /* access=0 */ ssa_1611vec1 32 ssa_1664 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_1664, ssa_1663) (.15 vec1 32 ssa_837 = fdot4 ssa_834, ssa_836 vec1 32 ssa_838 w vec4 32 ssa_1399 = deref_var ) (, 0) /*0) /* access=0 */ vec1 32 ssa_1576 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1576, ssa_1575) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1577 = deref_var &r8 (shader_temp vec4) vec4 32 ssa_1578 = intrinsic load_deref (ssa_1577) (0) /* access=0 */ vec4 32 ssa_1579 = vec4 ssa_1578.w, ssa_1578.w, ssa_1578.w, ssa_1578.w vec1 32 ssa_1580 = imov ssa_1579.x /* succs: block_28 block_29 */ if ssa_29 { block block_28: /* preds: block_27 */ vec1 32 ssa_1581 = deref_var &t80 (uniform samplerBuffer) vec4 32 ssa_1582 = txf ssa_1581 (texture_deref), ssa_1580 (coord), 0 (sampler), vec1 32 ssa_1583 = deref_var &phi@8 (function_temp vec4) intrinsic store_deref (ssa_1583, ssa_1582) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_30 */ } else { block block_29: /* preds: block_27 */ vec1 32 ssa_1584 = deref_var &phi@8 (function_temp vec4) intrinsic store_deref (ssa_1584, ssa_17) (15, 0) /* wrmask=xyzw */ /* access=0 */ /* succs: block_30 */ } block block_30: /* preds: block_28 block_29 */ vec1 32 ssa_1585 = deref_var &phi@8 (function_temp vec4) vec4 32 ssa_1586 = intrinsic load_deref (ssa_1585) (0) /* access=0 */ vec1 32 ssa_1587 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1587, ssa_1586) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1588 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1589 = intrinsic load_deref (ssa_1588) (0) /* access=0 */ vec1 32 ssa_1590 = imov ssa_1589.w vec1 32 ssa_1591 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1592 = intrinsic load_deref (ssa_1591) (0) /* access=0 */ vec4 32 ssa_1593 = vec4 ssa_1592.x, ssa_1590, ssa_1592.z, ssa_1592.w vec1 32 ssa_1594 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1594, ssa_1593) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1595 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1596 = intrinsic load_deref (ssa_1595) (0) /* access=0 */ vec1 32 ssa_1597 = imov ssa_1596.w vec1 32 ssa_1598 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1599 = intrinsic load_deref (ssa_1598) (0) /* access=0 */ vec4 32 ssa_1600 = vec4 ssa_1599.x, ssa_1599.y, ssa_1597, ssa_1599.w vec1 32 ssa_1601 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1601, ssa_1600) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1602 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1603 = intrinsic load_deref (ssa_1602) (0) /* access=0 */ vec3 32 ssa_1604 = vec3 ssa_1603.x, ssa_1603.y, ssa_1603.z vec1 32 ssa_1605 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1606 = intrinsic vulkan_resource_index (ssa_1605) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1607 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1608 = load_const (0x00000320 /* 0.000000 */) vec1 32 ssa_1609 = iadd ssa_1607, ssa_1608 vec1 32 ssa_1610 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1611 = iadd ssa_1609, ssa_1610 vec1 32 ssa_1612 = intrinsic load_ubo (ssa_1606, ssa_1611) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1613 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1614 = iadd ssa_1609, ssa_1613 vec1 32 ssa_1615 = intrinsic load_ubo (ssa_1606, ssa_1614) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1616 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1617 = iadd ssa_1609, ssa_1616 vec1 32 ssa_1618 = intrinsic load_ubo (ssa_1606, ssa_1617) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1619 = vec3 ssa_1612, ssa_1615, ssa_1618 vec3 32 ssa_1620 = fneg ssa_1619 vec3 32 ssa_1621 = fadd ssa_1604, ssa_1620 vec1 32 ssa_1622 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1623 = intrinsic load_deref (ssa_1622) (0) /* access=0 */ vec4 32 ssa_1624 = vec4 ssa_1621.x, ssa_1621.y, ssa_1621.z, ssa_1623.w vec1 32 ssa_1625 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1625, ssa_1624) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1626 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1627 = intrinsic load_deref (ssa_1626) (0) /* access=0 */ vec4 32 ssa_1628 = vec4 ssa_1627.x, ssa_1627.y, ssa_1627.z, ssa_28 vec1 32 ssa_1629 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1629, ssa_1628) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1630 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1631 = intrinsic load_deref (ssa_1630) (0) /* access=0 */ vec1 32 ssa_1632 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1633 = intrinsic vulkan_resource_index (ssa_1632) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1634 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1635 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1636 = iadd ssa_1634, ssa_1635 vec1 32 ssa_1637 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1638 = iadd ssa_1636, ssa_1637 vec1 32 ssa_1639 = intrinsic load_ubo (ssa_1633, ssa_1638) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1640 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1641 = iadd ssa_1636, ssa_1640 vec1 32 ssa_1642 = intrinsic load_ubo (ssa_1633, ssa_1641) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1643 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1644 = iadd ssa_1636, ssa_1643 vec1 32 ssa_1645 = intrinsic load_ubo (ssa_1633, ssa_1644) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1646 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1647 = iadd ssa_1636, ssa_1646 vec1 32 ssa_1648 = intrinsic load_ubo (ssa_1633, ssa_1647) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1649 = vec4 ssa_1639, ssa_1642, ssa_1645, ssa_1648 vec1 32 ssa_1650 = fdot4 ssa_1631, ssa_1649 vec1 32 ssa_1651 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1652 = intrinsic load_deref (ssa_1651) (0) /* access=0 */ vec4 32 ssa_1653 = vec4 ssa_1652.x, ssa_1652.y, ssa_1652.z, ssa_1650 vec1 32 ssa_1654 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1654, ssa_1653) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1655 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1656 = intrinsic load_deref (ssa_1655) (0) /* access=0 */ vec1 32 ssa_1657 = imov ssa_1656.x vec1 32 ssa_1658 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1659 = intrinsic load_deref (ssa_1658) (0) /* access=0 */ vec4 32 ssa_1660 = vec4 ssa_1659.x, ssa_1657, ssa_1659.z, ssa_1659.w vec1 32 ssa_1661 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1661, ssa_1660) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1662 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1663 = intrinsic load_deref (ssa_1662) (0) /* access=0 */ vec1 32 ssa_1664 = imov ssa_1663.x vec1 32 ssa_1665 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1666 = intrinsic load_deref (ssa_1665) (0) /* access=0 */ vec4 32 ssa_1667 = vec4 ssa_1666.x, ssa_1666.y, ssa_1664, ssa_1666.w vec1 32 ssa_1668 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1668, ssa_1667) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1669 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_1670 = intrinsic load_deref (ssa_1669) (0) /* access=0 */ vec1 32 ssa_1671 = imov ssa_1670.x vec1 32 ssa_1672 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1673 = intrinsic load_deref (ssa_1672) (0) /* access=0 */ vec4 32 ssa_1674 = vec4 ssa_1671, ssa_1673.y, ssa_1673.z, ssa_1673.w vec1 32 ssa_1675 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1675, ssa_1674) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1676 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1677 = intrinsic load_deref (ssa_1676) (0) /* access=0 */ vec3 32 ssa_1678 = vec3 ssa_1677.x, ssa_1677.y, ssa_1677.z vec1 32 ssa_1679 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1680 = intrinsic vulkan_resource_index (ssa_1679) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1681 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1682 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1683 = iadd ssa_1681, ssa_1682 vec1 32 ssa_1684 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1685 = iadd ssa_1683, ssa_1684 vec1 32 ssa_1686 = intrinsic load_ubo (ssa_1680, ssa_1685) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1687 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1688 = iadd ssa_1683, ssa_1687 vec1 32 ssa_1689 = intrinsic load_ubo (ssa_1680, ssa_1688) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1690 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1691 = iadd ssa_1683, ssa_1690 vec1 32 ssa_1692 = intrinsic load_ubo (ssa_1680, ssa_1691) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1693 = vec3 ssa_1686, ssa_1689, ssa_1692 vec1 32 ssa_1694 = fdot3 ssa_1678, ssa_1693 vec1 32 ssa_1695 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1696 = intrinsic load_deref (ssa_1695) (0) /* access=0 */ vec4 32 ssa_1697 = vec4 ssa_1694, ssa_1696.y, ssa_1696.z, ssa_1696.w vec1 32 ssa_1698 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1698, ssa_1697) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1699 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1700 = intrinsic load_deref (ssa_1699) (0) /* access=0 */ vec1 32 ssa_1701 = imov ssa_1700.z vec1 32 ssa_1702 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1703 = intrinsic load_deref (ssa_1702) (0) /* access=0 */ vec4 32 ssa_1704 = vec4 ssa_1703.x, ssa_1701, ssa_1703.z, ssa_1703.w vec1 32 ssa_1705 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_1705, ssa_1704) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1706 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1707 = intrinsic load_deref (ssa_1706) (0) /* access=0 */ vec1 32 ssa_1708 = imov ssa_1707.y vec1 32 ssa_1709 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1710 = intrinsic load_deref (ssa_1709) (0) /* access=0 */ vec4 32 ssa_1711 = vec4 ssa_1710.x, ssa_1710.y, ssa_1708, ssa_1710.w vec1 32 ssa_1712 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_1712, ssa_1711) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1713 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1714 = intrinsic load_deref (ssa_1713) (0) /* access=0 */ vec1 32 ssa_1715 = imov ssa_1714.z vec1 32 ssa_1716 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1717 = intrinsic load_deref (ssa_1716) (0) /* access=0 */ vec4 32 ssa_1718 = vec4 ssa_1717.x, ssa_1717.y, ssa_1715, ssa_1717.w vec1 32 ssa_1719 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1719, ssa_1718) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1720 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_1721 = intrinsic load_deref (ssa_1720) (0) /* access=0 */ vec1 32 ssa_1722 = imov ssa_1721.y vec1 32 ssa_1723 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1724 = intrinsic load_deref (ssa_1723) (0) /* access=0 */ vec4 32 ssa_1725 = vec4 ssa_1722, ssa_1724.y, ssa_1724.z, ssa_1724.w vec1 32 ssa_1726 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_1726, ssa_1725) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1727 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_1728 = intrinsic load_deref (ssa_1727) (0) /* access=0 */ vec1 32 ssa_1729 = imov ssa_1728.z vec1 32 ssa_1730 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1731 = intrinsic load_deref (ssa_1730) (0) /* access=0 */ vec4 32 ssa_1732 = vec4 ssa_1729, ssa_1731.y, ssa_1731.z, ssa_1731.w vec1 32 ssa_1733 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1733, ssa_1732) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1734 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1735 = intrinsic load_deref (ssa_1734) (0) /* access=0 */ vec3 32 ssa_1736 = vec3 ssa_1735.x, ssa_1735.y, ssa_1735.z vec1 32 ssa_1737 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1738 = intrinsic vulkan_resource_index (ssa_1737) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1739 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1740 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1741 = iadd ssa_1739, ssa_1740 vec1 32 ssa_1742 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1743 = iadd ssa_1741, ssa_1742 vec1 32 ssa_1744 = intrinsic load_ubo (ssa_1738, ssa_1743) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1745 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1746 = iadd ssa_1741, ssa_1745 vec1 32 ssa_1747 = intrinsic load_ubo (ssa_1738, ssa_1746) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1748 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1749 = iadd ssa_1741, ssa_1748 vec1 32 ssa_1750 = intrinsic load_ubo (ssa_1738, ssa_1749) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1751 = vec3 ssa_1744, ssa_1747, ssa_1750 vec1 32 ssa_1752 = fdot3 ssa_1736, ssa_1751 vec1 32 ssa_1753 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1754 = intrinsic load_deref (ssa_1753) (0) /* access=0 */ vec4 32 ssa_1755 = vec4 ssa_1754.x, ssa_1752, ssa_1754.z, ssa_1754.w vec1 32 ssa_1756 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1756, ssa_1755) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1757 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1758 = intrinsic load_deref (ssa_1757) (0) /* access=0 */ vec3 32 ssa_1759 = vec3 ssa_1758.x, ssa_1758.y, ssa_1758.z vec1 32 ssa_1760 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1761 = intrinsic vulkan_resource_index (ssa_1760) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1762 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1763 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1764 = iadd ssa_1762, ssa_1763 vec1 32 ssa_1765 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1766 = iadd ssa_1764, ssa_1765 vec1 32 ssa_1767 = intrinsic load_ubo (ssa_1761, ssa_1766) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1768 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1769 = iadd ssa_1764, ssa_1768 vec1 32 ssa_1770 = intrinsic load_ubo (ssa_1761, ssa_1769) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1771 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1772 = iadd ssa_1764, ssa_1771 vec1 32 ssa_1773 = intrinsic load_ubo (ssa_1761, ssa_1772) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1774 = vec3 ssa_1767, ssa_1770, ssa_1773 vec1 32 ssa_1775 = fdot3 ssa_1759, ssa_1774 vec1 32 ssa_1776 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1777 = intrinsic load_deref (ssa_1776) (0) /* access=0 */ vec4 32 ssa_1778 = vec4 ssa_1777.x, ssa_1777.y, ssa_1775, ssa_1777.w vec1 32 ssa_1779 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1779, ssa_1778) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1780 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1781 = intrinsic load_deref (ssa_1780) (0) /* access=0 */ vec4 32 ssa_1782 = vec4 ssa_1781.x, ssa_1781.y, ssa_1781.z, ssa_27 vec1 32 ssa_1783 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1783, ssa_1782) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1784 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1785 = intrinsic load_deref (ssa_1784) (0) /* access=0 */ vec1 32 ssa_1786 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1787 = intrinsic load_deref (ssa_1786) (0) /* access=0 */ vec1 32 ssa_1788 = fdot4 ssa_1785, ssa_1787 vec1 32 ssa_1789 = deref_var &o7 (shader_out vec4) vec4 32 ssa_1790 = intrinsic load_deref (ssa_1789) (0) /* access=0 */ vec4 32 ssa_1791 = vec4 ssa_1788, ssa_1790.y, ssa_1790.z, ssa_1790.w vec1 32 ssa_1792 = deref_var &o7 (shader_out vec4) intrinsic store_deref (ssa_1792, ssa_1791) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1793 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1794 = intrinsic load_deref (ssa_1793) (0) /* access=0 */ vec1 32 ssa_1795 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1796 = intrinsic vulkan_resource_index (ssa_1795) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1797 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1798 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1799 = iadd ssa_1797, ssa_1798 vec1 32 ssa_1800 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1801 = iadd ssa_1799, ssa_1800 vec1 32 ssa_1802 = intrinsic load_ubo (ssa_1796, ssa_1801) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1803 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1804 = iadd ssa_1799, ssa_1803 vec1 32 ssa_1805 = intrinsic load_ubo (ssa_1796, ssa_1804) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1806 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1807 = iadd ssa_1799, ssa_1806 vec1 32 ssa_1808 = intrinsic load_ubo (ssa_1796, ssa_1807) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1809 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1810 = iadd ssa_1799, ssa_1809 vec1 32 ssa_1811 = intrinsic load_ubo (ssa_1796, ssa_1810) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1812 = vec4 ssa_1802, ssa_1805, ssa_1808, ssa_1811 vec1 32 ssa_1813 = fdot4 ssa_1794, ssa_1812 vec1 32 ssa_1814 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1815 = intrinsic load_deref (ssa_1814) (0) /* access=0 */ vec4 32 ssa_1816 = vec4 ssa_1815.x, ssa_1815.y, ssa_1815.z, ssa_1813 vec1 32 ssa_1817 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1817, ssa_1816) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1818 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1819 = intrinsic load_deref (ssa_1818) (0) /* access=0 */ vec3 32 ssa_1820 = vec3 ssa_1819.x, ssa_1819.y, ssa_1819.z vec1 32 ssa_1821 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1822 = intrinsic vulkan_resource_index (ssa_1821) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1823 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1824 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1825 = iadd ssa_1823, ssa_1824 vec1 32 ssa_1826 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1827 = iadd ssa_1825, ssa_1826 vec1 32 ssa_1828 = intrinsic load_ubo (ssa_1822, ssa_1827) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1829 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1830 = iadd ssa_1825, ssa_1829 vec1 32 ssa_1831 = intrinsic load_ubo (ssa_1822, ssa_1830) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1832 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1833 = iadd ssa_1825, ssa_1832 vec1 32 ssa_1834 = intrinsic load_ubo (ssa_1822, ssa_1833) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1835 = vec3 ssa_1828, ssa_1831, ssa_1834 vec1 32 ssa_1836 = fdot3 ssa_1820, ssa_1835 vec1 32 ssa_1837 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1838 = intrinsic load_deref (ssa_1837) (0) /* access=0 */ vec4 32 ssa_1839 = vec4 ssa_1836, ssa_1838.y, ssa_1838.z, ssa_1838.w vec1 32 ssa_1840 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1840, ssa_1839) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1841 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1842 = intrinsic load_deref (ssa_1841) (0) /* access=0 */ vec3 32 ssa_1843 = vec3 ssa_1842.x, ssa_1842.y, ssa_1842.z vec1 32 ssa_1844 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1845 = intrinsic vulkan_resource_index (ssa_1844) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1846 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1847 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1848 = iadd ssa_1846, ssa_1847 vec1 32 ssa_1849 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1850 = iadd ssa_1848, ssa_1849 vec1 32 ssa_1851 = intrinsic load_ubo (ssa_1845, ssa_1850) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1852 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1853 = iadd ssa_1848, ssa_1852 vec1 32 ssa_1854 = intrinsic load_ubo (ssa_1845, ssa_1853) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1855 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1856 = iadd ssa_1848, ssa_1855 vec1 32 ssa_1857 = intrinsic load_ubo (ssa_1845, ssa_1856) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1858 = vec3 ssa_1851, ssa_1854, ssa_1857 vec1 32 ssa_1859 = fdot3 ssa_1843, ssa_1858 vec1 32 ssa_1860 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1861 = intrinsic load_deref (ssa_1860) (0) /* access=0 */ vec4 32 ssa_1862 = vec4 ssa_1861.x, ssa_1859, ssa_1861.z, ssa_1861.w vec1 32 ssa_1863 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1863, ssa_1862) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1864 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1865 = intrinsic load_deref (ssa_1864) (0) /* access=0 */ vec3 32 ssa_1866 = vec3 ssa_1865.x, ssa_1865.y, ssa_1865.z vec1 32 ssa_1867 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1868 = intrinsic vulkan_resource_index (ssa_1867) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1869 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1870 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1871 = iadd ssa_1869, ssa_1870 vec1 32 ssa_1872 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1873 = iadd ssa_1871, ssa_1872 vec1 32 ssa_1874 = intrinsic load_ubo (ssa_1868, ssa_1873) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1875 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1876 = iadd ssa_1871, ssa_1875 vec1 32 ssa_1877 = intrinsic load_ubo (ssa_1868, ssa_1876) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1878 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1879 = iadd ssa_1871, ssa_1878 vec1 32 ssa_1880 = intrinsic load_ubo (ssa_1868, ssa_1879) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1881 = vec3 ssa_1874, ssa_1877, ssa_1880 vec1 32 ssa_1882 = fdot3 ssa_1866, ssa_1881 vec1 32 ssa_1883 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1884 = intrinsic load_deref (ssa_1883) (0) /* access=0 */ vec4 32 ssa_1885 = vec4 ssa_1884.x, ssa_1884.y, ssa_1882, ssa_1884.w vec1 32 ssa_1886 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1886, ssa_1885) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1887 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1888 = intrinsic load_deref (ssa_1887) (0) /* access=0 */ vec1 32 ssa_1889 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1890 = intrinsic load_deref (ssa_1889) (0) /* access=0 */ vec1 32 ssa_1891 = fdot4 ssa_1888, ssa_1890 vec1 32 ssa_1892 = deref_var &o7 (shader_out vec4) vec4 32 ssa_1893 = intrinsic load_deref (ssa_1892) (0) /* access=0 */ vec4 32 ssa_1894 = vec4 ssa_1893.x, ssa_1891, ssa_1893.z, ssa_1893.w vec1 32 ssa_1895 = deref_var &o7 (shader_out vec4) intrinsic store_deref (ssa_1895, ssa_1894) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1896 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1897 = intrinsic load_deref (ssa_1896) (0) /* access=0 */ vec1 32 ssa_1898 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1899 = intrinsic vulkan_resource_index (ssa_1898) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1900 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1901 = load_const (0x000002b0 /* 0.000000 */) vec1 32 ssa_1902 = iadd ssa_1900, ssa_1901 vec1 32 ssa_1903 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1904 = iadd ssa_1902, ssa_1903 vec1 32 ssa_1905 = intrinsic load_ubo (ssa_1899, ssa_1904) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1906 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1907 = iadd ssa_1902, ssa_1906 vec1 32 ssa_1908 = intrinsic load_ubo (ssa_1899, ssa_1907) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1909 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1910 = iadd ssa_1902, ssa_1909 vec1 32 ssa_1911 = intrinsic load_ubo (ssa_1899, ssa_1910) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1912 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1913 = iadd ssa_1902, ssa_1912 vec1 32 ssa_1914 = intrinsic load_ubo (ssa_1899, ssa_1913) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1915 = vec4 ssa_1905, ssa_1908, ssa_1911, ssa_1914 vec1 32 ssa_1916 = fdot4 ssa_1897, ssa_1915 vec1 32 ssa_1917 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1918 = intrinsic load_deref (ssa_1917) (0) /* access=0 */ vec4 32 ssa_1919 = vec4 ssa_1918.x, ssa_1918.y, ssa_1918.z, ssa_1916 vec1 32 ssa_1920 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1920, ssa_1919) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1921 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1922 = intrinsic load_deref (ssa_1921) (0) /* access=0 */ vec1 32 ssa_1923 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1924 = intrinsic vulkan_resource_index (ssa_1923) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1925 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1926 = load_const (0x000002c0 /* 0.000000 */) vec1 32 ssa_1927 = iadd ssa_1925, ssa_1926 vec1 32 ssa_1928 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1929 = iadd ssa_1927, ssa_1928 vec1 32 ssa_1930 = intrinsic load_ubo (ssa_1924, ssa_1929) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1931 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1932 = iadd ssa_1927, ssa_1931 vec1 32 ssa_1933 = intrinsic load_ubo (ssa_1924, ssa_1932) (4, 0) /* = vec1 32 ssa_1613 wrmask= align_mul=4xy */ /*zw */ /* access=0 */ vec1 32 ssa_1665 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1666 = intrinsic load_deref (ssa_1665) (0) /* access=0 */ vec1 32 ssa_1667 = imov ssa_1666.y vec1 32 ssa_1668 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1669 = intrinsic load_deref (ssa_1668) (0) /* access=0 */ vec4 32 ssa_1670 = vec4 ssa_1669.x, ssa_1669.y, ssa_1667, ssa_1669.w vec1 32 ssa_1671 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_1671, ssa_1670) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1672 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1673 = intrinsic load_deref (ssa_1672) (0) /* access=0 */ vec1 32 ssa_1674 = imov ssa_1673.z vec1 32 ssa_1675 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1676 = intrinsic load_deref (ssa_1675) (0) /* access=0 */ vec4 32 ssa_1677 = vec4 ssa_1676.x, ssa_1676.y, ssa_1674, ssa_1676.w vec1 32 ssa_1678 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1678, ssa_1677) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1679 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_1680 = intrinsic load_deref (ssa_1679) (0) /* access=0 */ vec1 32 ssa_1681 = imov ssa_1680.y vec1 32 ssa_1682 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1683 = intrinsic load_deref (ssa_1682) (0) /* access=0 */ vec4 32 ssa_1684 = vec4 ssa_1681, ssa_1683.y, ssa_1683.z, ssa_1683.w vec1 32 ssa_1685 = deref_var &r6 (shader_temp vec4) intrinsic store_deref (ssa_1685, ssa_1684) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1686 = deref_var &r9 (shader_temp vec4) vec4 32 ssa_1687 = intrinsic load_deref (ssa_1686) (0) /* access=0 */ vec1 32 ssa_1688 = imov ssa_1687.z vec1 32 ssa_1689 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1690 = intrinsic load_deref (ssa_1689) (0) /* access=0 */ vec4 32 ssa_1691 = vec4 ssa_1688, ssa_1690.y, ssa_1690.z, ssa_1690.w vec1 32 ssa_1692 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1692, ssa_1691) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1693 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1694 = intrinsic load_deref (ssa_1693) (0) /* access=0 */ vec3 32 ssa_1695 = vec3 ssa_1694.x, ssa_1694.y, ssa_1694.z vec1 32 ssa_1696 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1697 = intrinsic vulkan_resource_index (ssa_1696) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1698 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1699 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1700 = iadd ssa_1698, ssa_1699 vec1 32 ssa_1701 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1702 = iadd ssa_1700, ssa_1701 vec1 32 ssa_1703 = intrinsic load_ubo (ssa_1697, ssa_1702) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1704 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1705 = iadd ssa_1700, ssa_1704 vec1 32 ssa_1706 = intrinsic load_ubo (ssa_1697, ssa_1705) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1707 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1708 = iadd ssa_1700, ssa_1707 vec1 32 ssa_1709 = intrinsic load_ubo (ssa_1697, ssa_1708) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1710 = vec3 = deref_var & align_offset=0 */ vec1 32 ssa_1934 = load_const (r00x00000008 /* 0.000000 */&out@o0-temp (function_temp vec4) vec4 32 ssa_839 = intrinsic load_deref (ssa_838) (0) /* access=0 */ (function_temp vec4) ) vec1 32 ssa_1935 = iadd ssa_1927, ssa_1934 vec1 32 ssa_1936 = intrinsic load_ubo (ssa_1924, ssa_1935intrinsic load_deref (ssa_1398) (0) ssa_1703 vec4 32 ssa_840 = vec4intrinsic store_deref ( ) (4, 0) /* vec4 32 ssa_1281 = vec4 /* access=0 */ vec3 32 ssa_1400 = vec3 ssa_1399.x, ssa_1399.y, ssa_1399.z vec1 32 ssa_1401 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1402 = intrinsic vulkan_resource_index (ssa_1401) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1403 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1404 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1405 = iadd ssa_1403, ssa_1404 vec1 32 ssa_1406 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1407 = iadd ssa_1405, ssa_1406 vec1 32 ssa_1408 = intrinsic load_ubo (ssa_1402, ssa_1407) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1409 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1410 = iadd ssa_1405, ssa_1409 vec1 32 ssa_1411 = intrinsic load_ubo (ssa_1402, ssa_1410) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1412 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1413 = iadd ssa_1405, ssa_1412 vec1 32 ssa_1414 = intrinsic load_ubo (ssa_1402, ssa_1413) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1415 = vec3 ssa_1408, ssa_1411, ssa_1414 vec1 32 ssa_1416 = fdot3 ssa_1400, ssa_1415 vec1 32 ssa_1417 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1418 = intrinsic load_deref (ssa_1417) (0) /* access=0 */ vec4 32 ssa_1419 = vec4 ssa_1418.x, ssa_1416, ssa_1418.z, ssa_1418.w vec1 32 ssa_1420 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1420, ssa_1419) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1421 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1422 = intrinsic load_deref (ssa_1421) (0) /* access=0 */ vec3 32 ssa_1423 = vec3 ssa_1422.x, ssa_1422.y, ssa_1422.z vec1 32 ssa_1424 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1425 = intrinsic vulkan_resource_index (ssa_1424) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1426 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1427 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1428 = iadd ssa_1426, ssa_1427 vec1 32 ssa_1429 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1430 = iadd ssa_1428, ssa_1429 vec1 32 ssa_1431 = intrinsic load_ubo (ssa_1425, ssa_1430) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1432 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1433 = iadd ssa_1428, ssa_1432 vec1 32 ssa_1434 = intrinsic load_ubo (ssa_1425, ssa_1433) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1435 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1436 = iadd ssa_1428, ssa_1435 vec1 32 ssa_1437 = intrinsic load_ubo (ssa_1425, ssa_1436) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1438 = vec3 ssa_1431, ssa_1434, ssa_1437 vec1 32 ssa_1439 = fdot3 ssa_1423, ssa_1438 vec1 32 ssa_1440 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1441 = intrinsic load_deref (ssa_1440) (0) /* access=0 */ vec4 32 ssa_1442 = vec4 ssa_1441.x, ssa_1441.y, ssa_1439, ssa_1441.w vec1 32 ssa_1443 = deref_var &r5 (shader_temp vec4) intrinsic store_deref (ssa_1443, ssa_1442) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1444 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1445 = intrinsic load_deref (ssa_1444) (0) /* access=0 */ vec4 32 ssa_1446 = vec4 ssa_1445.x, ssa_1445.y, ssa_1445.z, ssa_45 vec1 32 ssa_1447 = deref_var &r2 (shader_temp vec4) intrinsic store_deref (ssa_1447, ssa_1446) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1448 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1449 = intrinsic load_deref (ssa_1448) (0) /* access=0 */ vec1 32 ssa_1450 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1451 = intrinsic load_deref (ssa_1450) (0) /* access=0 */ vec1 32 ssa_1452 = fdot4 ssa_1449, ssa_1451 vec1 32 ssa_1453 = deref_var &o7 (shader_out vec4) vec4 32 ssa_1454 = intrinsic load_deref (ssa_1453) (0) /* access=0 */ vec4 32 ssa_1455 = vec4 ssa_1452, ssa_1454.y, ssa_1454.z, ssa_1454.w vec1 32 ssa_1456 = deref_var &o7 (shader_out vec4) intrinsic store_deref (ssa_1456, ssa_1455) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1457 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1458 = intrinsic load_deref (ssa_1457) (0) /* access=0 */ vec1 32 ssa_1459 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1460 = intrinsic vulkan_resource_index (ssa_1459) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1461 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1462 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1463 = iadd ssa_1461, ssa_1462 vec1 32 ssa_1464 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1465 = iadd ssa_1463, ssa_1464 vec1 32 ssa_1466 = intrinsic load_ubo (ssa_1460, ssa_1465) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1467 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1468 = iadd ssa_1463, ssa_1467 vec1 32 ssa_1469 = intrinsic load_ubo (ssa_1460, ssa_1468) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1470 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1471 = iadd ssa_1463, ssa_1470 vec1 32 ssa_1472 = intrinsic load_ubo (ssa_1460, ssa_1471) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1473 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1474 = iadd ssa_1463, ssa_1473 vec1 32 ssa_1475 = intrinsic load_ubo (ssa_1460, ssa_1474) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1476 = vec4 ssa_1466, ssa_1469, ssa_1472, ssa_1475 vec1 32 ssa_1477 = fdot4 ssa_1458, ssa_1476 vec1 32 ssa_1478 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1479 = intrinsic load_deref (ssa_1478) (0) /* access=0 */ vec4 32 ssa_1480 = vec4 ssa_1479.x, ssa_1479.y, ssa_1479.z, ssa_1477 vec1 32 ssa_1481 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1481, ssa_1480) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1482 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1483 = intrinsic load_deref (ssa_1482) (0) /* access=0 */ vec3 32 ssa_1484 = vec3 ssa_1483.x, ssa_1483.y, ssa_1483.z vec1 32 ssa_1485 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1486 = intrinsic vulkan_resource_index (ssa_1485) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1487 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1488 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1489 = iadd ssa_1487, ssa_1488 vec1 32 ssa_1490 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1491 = iadd ssa_1489, ssa_1490 vec1 32 ssa_1492 = intrinsic load_ubo (ssa_1486, ssa_1491) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1493 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1494 = iadd ssa_1489, ssa_1493 vec1 32 ssa_1495 = intrinsic load_ubo (ssa_1486, ssa_1494) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1496 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1497 = iadd ssa_1489, ssa_1496 vec1 32 ssa_1498 = intrinsic load_ubo (ssa_1486, ssa_1497) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1499 = vec3 ssa_1492, ssa_1495, ssa_1498 vec1 32 ssa_1500 = fdot3 ssa_1484, ssa_1499 vec1 32 ssa_1501 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1502 = intrinsic load_deref (ssa_1501) (0) /* access=0 */ vec4 32 ssa_1503 = vec4 ssa_1500, ssa_1502.y, ssa_1502.z, ssa_1502.w vec1 32 ssa_1504 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1504, ssa_1503) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1505 = deref_var &r7 (shader_temp vec4) vec4 32 ssa_1506 = intrinsic load_deref (ssa_1505) (0) /* access=0 */ vec3 32 ssa_1507 = vec3 ssa_1506.x, ssa_1506.y, ssa_1506.z vec1 32 ssa_1508 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1509 = intrinsic vulkan_resource_index (ssa_1508) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1510 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1511 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1512 = iadd ssa_1510, ssa_1511 vec1 32 ssa_1513 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1514 = iadd ssa_1512, ssa_1513 vec1 32 ssa_1515 = intrinsic load_ubo (ssa_1509, ssa_1514) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1516 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1517 = iadd ssa_1512, ssa_1516 vec1 32 ssa_1518 = intrinsic load_ubo (ssa_1509, ssa_1517) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1519 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1520 = iadd ssa_1512, ssa_1519 vec1 32 ssa_1521 = intrinsic load_ubo (ssa_1509, ssa_1520) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1522 = vec3 ssa_1515, ssa_1518, ssa_1521 vec1 32 ssa_1523 = fdot3 ssa_1507, ssa_1522 vec1 32 ssa_1524 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1525 = intrinsic load_deref (ssa_1524) (0) /* access=0 */ vec4 32 ssa_1526 = vec4 ssa_1525.x, ssa_1523, ssa_1525.z, ssa_1525.w vec1 32 ssa_1527 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1527, ssa_1526) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1528 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1529 = intrinsic load_deref (ssa_1528) (0) /* access=0 */ vec3 32 ssa_1530 = vec3 ssa_1529.x, ssa_1529.y ssa_1278, ssa_1280.y, ssa_1706, ssa_1709 vec1 32 ssa_1711 = fdot3 ssa_1695, ssa_1710 vec1 32 ssa_1712 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1713 = intrinsic load_deref (ssa_1712) (0) /* access=0 */ vec4 32 ssa_1714 = vec4 ssa_1713.x, ssa_1711, ssa_1713.z, ssa_1713.w vec1 32 ssa_1715 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1715, ssa_1714) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1716 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1717 = intrinsic load_deref (ssa_1716) (0) /* access=0 */ vec3 32 ssa_1718 = vec3 ssa_1717.x, ssa_1717.y, ssa_1717.z vec1 32 ssa_1719 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1720 = intrinsic vulkan_resource_index (ssa_1719) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1721 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1722 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1723 = iadd ssa_1721, ssa_1722 vec1 32 ssa_1724 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1725 = iadd ssa_1723, ssa_1724 vec1 32 ssa_1726 = intrinsic load_ubo (ssa_1720, ssa_1725) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1727 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1728 = iadd ssa_1723, ssa_1727 vec1 32 ssa_1729 = intrinsic load_ubo (ssa_1720, ssa_1728) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1730 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1731 = iadd ssa_1723, ssa_1730 vec1 32 ssa_1732 = intrinsic load_ubo (ssa_1720, ssa_1731) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1733 = vec3 ssa_1726, ssa_1729, ssa_1732 vec1 32 ssa_1734 = fdot3 ssa_1718, ssa_1733 vec1 32 ssa_1735 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1736 = intrinsic load_deref (ssa_1735) (0) /* access=0 */ vec4 32 ssa_1737 = vec4 ssa_1736.x, ssa_1736.y, ssa_1734, ssa_1736.w vec1 32 ssa_1738 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1738, ssa_1737) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1739 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1740 = intrinsic load_deref (ssa_1739) (0) /* access=0 */ vec4 32 ssa_1741 = vec4 ssa_1740.x, ssa_1740.y, ssa_1740.z, ssa_27 vec1 32 ssa_1742 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1742, ssa_1741) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1743 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1744 = intrinsic load_deref (ssa_1743) (0) /* access=0 */ vec1 32 ssa_1745 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1746 = intrinsic load_deref (ssa_1745) (0) /* access=0 */ vec1 32 ssa_1747 = fdot4 ssa_1744, ssa_1746 vec1 32 ssa_1748 = deref_var &o7 (shader_out vec4) vec4 32 ssa_1749 = intrinsic load_deref (ssa_1748) (0) /* access=0 */ vec4 32 ssa_1750 = vec4 ssa_1747, ssa_1749.y, ssa_1749.z, ssa_1749.w vec1 32 ssa_1751 = deref_var &o7 (shader_out vec4) intrinsic store_deref (ssa_1751, ssa_1750) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1752 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1753 = intrinsic load_deref (ssa_1752) (0) /* access=0 */ vec1 32 ssa_1754 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1755 = intrinsic vulkan_resource_index (ssa_1754) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1756 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1757 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1758 = iadd ssa_1756, ssa_1757 vec1 32 ssa_1759 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1760 = iadd ssa_1758, ssa_1759 vec1 32 ssa_1761 = intrinsic load_ubo (ssa_1755, ssa_1760) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1762 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1763 = iadd ssa_1758, ssa_1762 vec1 32 ssa_1764 = intrinsic load_ubo (ssa_1755, ssa_1763) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1765 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1766 = iadd ssa_1758, ssa_1765 vec1 32 ssa_1767 = intrinsic load_ubo (ssa_1755, ssa_1766) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1768 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1769 = iadd ssa_1758, ssa_1768 vec1 32 ssa_1770 = intrinsic load_ubo (ssa_1755, ssa_1769) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1771 = vec4 ssa_1761, ssa_1764, ssa_1767, ssa_1770 vec1 32 ssa_1772 = fdot4 ssa_1753, ssa_1771 vec1 32 ssa_1773 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1774 = intrinsic load_deref (ssa_1773) (0) /* access=0 */ vec4 32 ssa_1775 = vec4 ssa_1774.x, ssa_1774.y, ssa_1774.z, ssa_1772 vec1 32 ssa_1776 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1776, ssa_1775) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1777 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1778 = intrinsic load_deref (ssa_1777) (0) /* access=0 */ vec3 32 ssa_1779 = vec3 ssa_1778.x, ssa_1778.y, ssa_1778.z vec1 32 ssa_1780 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1781 = intrinsic vulkan_resource_index (ssa_1780) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1782 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1783 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1784 = iadd ssa_1782, ssa_1783 vec1 32 ssa_1785 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1786 = iadd ssa_1784, ssa_1785 vec1 32 ssa_1787 = intrinsic load_ubo (ssa_1781, ssa_1786) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1788 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1789 = iadd ssa_1784, ssa_1788 vec1 32 ssa_1790 = intrinsic load_ubo (ssa_1781, ssa_1789) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1791 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1792 = iadd ssa_1784, ssa_1791 vec1 32 ssa_1793 = intrinsic load_ubo (ssa_1781, ssa_1792) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1794 = vec3 ssa_1787, ssa_1790, ssa_1793 vec1 32 ssa_1795 = fdot3 ssa_1779, ssa_1794 vec1 32 ssa_1796 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1797 = intrinsic load_deref (ssa_1796) (0) /* access=0 */ vec4 32 ssa_1798 = vec4 ssa_1795, ssa_1797.y, ssa_1797.z, ssa_1797.w vec1 32 ssa_1799 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1799, align_mul=4, , ssa_1529ssa_1798.zssa_839.ssa_1280.z, ssa_1280.w ssa_1613, ssa_1612) (15, x vec1 32 ssa_1282 ) ( 15 */ = deref_var 0, ssa_839.y, ssa_839.z, ssa_837 vec1 32 ssa_841 = deref_var &out@o0-temp (function_temp vec4) &) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1614 = deref_var &r9 (function_temp vec4) vec4 32 ssa_1615 = intrinsic load_deref (ssa_1614) (0) /* access=0r1vec1 32 ssa_1531 = load_const ( */0x00000000 /* 0.000000 */) (shader_temp vec4) /* align_offset=0 */ , 0intrinsic store_deref (ssa_1282, ssa_1281) (15, 0) /* wrmask= vec1 32 ssa_1616 = imov ssa_1615.w vec1 32 ssa_1617 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1618 = intrinsic load_deref (ssa_1617) (0) /* access=0 */ vec4 32 ssa_1619 = vec4 ssa_1616, ssa_1618.y, ssa_1618.z, ssa_1618.w vec1 32 ssa_1620 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1620, ssa_1619) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1621 = deref_var &r8 (function_temp vec4) vec4 32 ssa_1622 = intrinsic load_deref (ssa_1621) (0) /* access=0 */ vec4 32 ssa_1623 = vec4 ssa_1622.z, ssa_1622.z, ssa_1622.z, ssa_1622.z vec1 32 ssa_1624 = imov ssa_1623.x /* succs: block_25 block_26 */ if ssa_30 { block block_25: /* preds: block_24 */ vec1 32 ssa_1625 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_2181 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_1626 = txf ssa_1625 (texture_deref), ssa_1624 (coord), ssa_2181 (lod), 0 (sampler), vec4 32 ssa_1627 = vec4 ssa_1626.x, ssa_1626.z, ssa_1626.y, ssa_1626.w /* succs: block_27 */ } else { block block_26: /* preds: block_24 */ /* succs: block_27 */ } block block_27: /* preds: block_25 block_26 */ vec4 32 ssa_2201 = phi block_25: ssa_1627, block_26: ssa_2238 vec4 32 ssa_2191 = imov ssa_2201 vec1 32 ssa_1632 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1632, ssa_2191) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1633 = deref_var &r8 (function_temp vec4) vec4 32 ssa_1634 = intrinsic load_deref (ssa_1633) (0) /* access=0 */ vec4 32 ssa_1635 = vec4 ssa_1634.w, ssa_1634.w, ssa_1634.w, ssa_1634.w vec1 32 ssa_1636 = imov ssa_1635.x /* succs: block_28 block_29 */ if ssa_29 { block block_28: /* preds: block_27 */ vec1 32 ssa_1637 = deref_var &t80 (uniform samplerBuffer) vec1 32 ssa_2182 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_1638 = txf ssa_1637 (texture_deref), ssa_1636 (coord), ssa_2182 (lod), 0 (sampler), /* succs: block_30 */ } else { block block_29: /* preds: block_27 */ /* succs: block_30 */ } block block_30: /* preds: block_28 block_29 */ vec4 32 ssa_2202 = phi block_28: ssa_1638, block_29: ssa_2233 vec4 32 ssa_2192 = imov ssa_2202 vec1 32 ssa_1643 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_1643, ssa_2192) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1644 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1645 = intrinsic load_deref (ssa_1644) (0) /* access=0 */ vec1 32 ssa_1646 = imov ssa_1645.w vec1 32 ssa_1647 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1648 = intrinsic load_deref (ssa_1647) (0) /* access=0 */ vec4 32 ssa_1649 = vec4 ssa_1648.x, ssa_1646, ssa_1648.z, ssa_1648.w vec1 32 ssa_1650 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1650, ssa_1649) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1651 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1652 = intrinsic load_deref (ssa_1651) (0) /* access=0 */ vec1 32 ssa_1653 = imov ssa_1652.w vec1 32 ssa_1654 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1655 = intrinsic load_deref (ssa_1654) (0) /* access=0 */ vec4 32 ssa_1656 = vec4 ssa_1655.x, ssa_1655.y, ssa_1653, ssa_1655.w vec1 32 ssa_1657 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1657, ssa_1656) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1658 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1659 = intrinsic load_deref (ssa_1658) (0) /* access=0 */ vec3 32 ssa_1660 = vec3 ssa_1659.x, ssa_1659.y, ssa_1659.z vec1 32 ssa_1661 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1662 = intrinsic vulkan_resource_index (ssa_1661) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1663 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1664 = load_const (0x00000320 /* 0.000000 */) vec1 32 ssa_1665 = iadd ssa_1663, ssa_1664 vec1 32 ssa_1666 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1667 = iadd ssa_1665, ssa_1666 vec1 32 ssa_1668 = intrinsic load_ubo (ssa_1662, ssa_1667) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1669 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1670 = iadd ssa_1665, ssa_1669 vec1 32 ssa_1671 = intrinsic load_ubo (ssa_1662, ssa_1670) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1672 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1673 = iadd ssa_1665, ssa_1672 vec1 32 ssa_1674 = intrinsic load_ubo (ssa_1662, ssa_1673) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1675 = vec3 ssa_1668, ssa_1671, ssa_1674 vec3 32 ssa_1676 = fneg ssa_1675 vec3 32 ssa_1677 = fadd ssa_1660, ssa_1676 vec1 32 ssa_1678 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1679 = intrinsic load_deref (ssa_1678) (0) /* access=0 */ vec4 32 ssa_1680 = vec4 ssa_1677.x, ssa_1677.y, ssa_1677.z, ssa_1679.w vec1 32 ssa_1681 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1681, ssa_1680) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1682 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1683 = intrinsic load_deref (ssa_1682) (0) /* access=0 */ vec4 32 ssa_1684 = vec4 ssa_1683.x, ssa_1683.y, ssa_1683.z, ssa_28 vec1 32 ssa_1685 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1685, ssa_1684) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1686 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1687 = intrinsic load_deref (ssa_1686) (0) /* access=0 */ vec1 32 ssa_1688 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1689 = intrinsic vulkan_resource_index (ssa_1688) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1690 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1691 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1692 = iadd ssa_1690, ssa_1691 vec1 32 ssa_1693 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1694 = iadd ssa_1692, ssa_1693 vec1 32 ssa_1695 = intrinsic load_ubo (ssa_1689, ssa_1694) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1696 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1697 = iadd ssa_1692, ssa_1696 vec1 32 ssa_1698 = intrinsic load_ubo (ssa_1689, ssa_1697) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1699 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1700 = iadd ssa_1692, ssa_1699 vec1 32 ssa_1701 = intrinsic load_ubo (ssa_1689, ssa_1700) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1702 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1703 = iadd ssa_1692, ssa_1702 vec1 32 ssa_1704 = intrinsic load_ubo (ssa_1689, ssa_1703) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1705 = vec4 ssa_1695, ssa_1698, ssa_1701, ssa_1704 vec1 32 ssa_1706 = fdot4 ssa_1687, ssa_1705 vec1 32 ssa_1707 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1708 = intrinsic load_deref (ssa_1707) (0) /* access=0 */ vec4 32 ssa_1709 = vec4 ssa_1708.x, ssa_1708.y, ssa_1708.z, ssa_1706 vec1 32 ssa_1710 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1710, ssa_1709) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1711 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1712 = intrinsic load_deref (ssa_1711) (0) /* access=0 */ vec1 32 ssa_1713 = imov ssa_1712.x vec1 32 ssa_1714 = deref_var &r5 (function_temp vec4) vec4 32 ssa_1715 = intrinsic load_deref (ssa_1714) (0) /* access=0 */ vec4 32 ssa_1716 = vec4 ssa_1715.x, ssa_1713, ssa_1715.z, ssa_1715.w vec1 32 ssa_1717 = deref_var &r5 (function_temp vec4) intrinsic store_deref (ssa_1717, ssa_1716) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1718 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1719 = intrinsic load_deref (ssa_1718) (0) /* access=0 */ vec1 32 ssa_1720 = imov vec1 32 ssa_1937 = load_const (0x0000000c /* 0.000000 */)) /*xvec1 32 ssa_1532 = intrinsic vulkan_resource_index (ssa_1531) (0, 0, 6)ssa_1719.x vec1 32 ssa_1721 = deref_var &r5 (function_temp vec4) vec4 32 ssa_1722 /* = intrinsic load_deref (ssa_1721) (0) /* access=0 */ vec4 32 ssa_1723 = vec4 ssa_1722.x, ssa_1722.y, ssa_1720, ssa_1722.w vec1 32 ssa_1724 = deref_var &r5 (function_temp vec4) intrinsic store_deref (ssa_1724, vec1 32 ssa_1938 wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1800 = deref_var &ssa_1723) (15, intrinsic store_deref (ssa_841, ssa_840) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_842 = deref_var &r3 (function_temp vec4) r6 desc-set=0 = iadd ssa_1927, ssa_1937 vec1 32 ssa_1939 = intrinsic load_ubo (ssa_1924, ssa_1938) (0) /* wrmask=xyzw (shader_temp vec4) 4 vec4 32 ssa_843 = intrinsic load_deref ( */ /* access=0 */ ssa_842) (0) /* access=0 */ vec1 32 ssa_844 = deref_var &r10 (function_temp vec4) vec4 32 ssa_845 = intrinsic load_deref (ssa_844) (0) /* access=0 */ yvec4 32 ssa_1801 = intrinsic load_deref (ssa_1800) (0) /*zw */ /* access=0 */ access=0 */ vec3 32 ssa_1802 */ /* vec1 32 ssa_846 = fdot4 ssa_843, ssa_845 vec1 32 ssa_847 = deref_var &out@o0-temp (function_temp vec4) vec4 32 ssa_848 = intrinsic load_deref (ssa_847) (0) /* access=0 */ vec4 32 ssa_849 = vec4 ssa_848.x, ssa_848.y, ssa_846, ssa_848.w vec1 32 ssa_850 = deref_var &out@o0-temp (function_temp vec4) intrinsic store_deref (ssa_850, ssa_849) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_851 = deref_var &r0 (function_temp vec4) vec4 32 ssa_852 = intrinsic load_deref (ssa_851) (0) /* access=0 */ vec1 32 ssa_853 = imov ssa_852.y vec1 32 ssa_854 = ushr ssa_73, ssa_72 vec1 32 ssa_855 = imul ssa_853, ssa_71 vec1 32 ssa_856 = iadd ssa_855, ssa_854 vec1 32 ssa_857 = iadd ssa_856, ssa_70 vec1 32 ssa_858 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_859 = intrinsic vulkan_resource_index (ssa_858) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_860 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_861 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_862 = ishl ssa_857, ssa_861 vec1 32 ssa_863 = iadd ssa_860, ssa_862 vec1 32 ssa_864 = intrinsic load_ssbo (ssa_859, ssa_863) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_865 = deref_var &r0 (function_temp vec4) vec4 32 ssa_866 = intrinsic load_deref (ssa_865) (0) /* access=0 */ vec4 32 ssa_867 = vec4 ssa_866.x, ssa_864, ssa_866.z, ssa_866.w vec1 32 ssa_868 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_868, ssa_867) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_869 = deref_var &r0 (function_temp vec4) vec4 32 ssa_870 = intrinsic load_deref (ssa_869) (0) /* access=0 */ vec1 32 ssa_871 = imov ssa_870.w vec1 32 ssa_872 = ushr ssa_69, ssa_68 vec1 32 ssa_873 = imul ssa_871, ssa_67 vec1 32 ssa_874 = iadd ssa_873, ssa_872 vec1 32 ssa_875 = iadd ssa_874, ssa_66 vec1 32 ssa_876 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_877 = intrinsic vulkan_resource_index (ssa_876) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_878 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_879 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_880 = ishl ssa_875, ssa_879 vec1 32 ssa_881 = iadd ssa_878, ssa_880 vec1 32 ssa_882 = intrinsic load_ssbo (ssa_877, ssa_881) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_883 = deref_var &r0 (function_temp vec4) vec4 32 ssa_884 = intrinsic load_deref (ssa_883) (0) /* access=0 */ vec4 32 ssa_885 = vec4 ssa_884.x, ssa_884.y, ssa_884.z, ssa_882 vec1 32 ssa_886 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_886, ssa_885) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_887 = deref_var &r0 (function_temp vec4) vec4 32 ssa_888 = intrinsic load_deref (ssa_887) (0) /* access=0 */ vec2 32 ssa_889 = vec2 ssa_888.y, ssa_888.w vec2 32 ssa_890 = ishl ssa_889, ssa_2006 vec1 32 ssa_891 = deref_var &r0 (function_temp vec4) vec4 32 ssa_892 = intrinsic load_deref (ssa_891) (0) /* access=0 */ vec4 32 ssa_893 = vec4 ssa_890.x, ssa_892.y, ssa_890.y, ssa_892.w vec1 32 ssa_894 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_894, ssa_893) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_895 = deref_var &r0 (function_temp vec4) vec4 32 ssa_896 = intrinsic load_deref (ssa_895) (0) /* access=0 */ vec2 32 ssa_897 = vec2 ssa_896.x, ssa_896.y vec2 32 ssa_898 = ishr ssa_897, ssa_2003 vec1 32 ssa_899 = deref_var &r0 (function_temp vec4) vec4 32 ssa_900 = intrinsic load_deref (ssa_899) (0) /* access=0 */ vec4 32 ssa_901 = vec4 ssa_898.x, ssa_898.y, ssa_900.z, ssa_900.w vec1 32 ssa_902 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_902, ssa_901) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_903 = deref_var &r0 (function_temp vec4) vec4 32 ssa_904 = intrinsic load_deref (ssa_903) (0) /* access=0 */ vec2 32 ssa_905 = vec2 ssa_904.x, ssa_904.y vec2 32 ssa_906 = i2f32 ssa_905 vec1 32 ssa_907 = deref_var &r0 (function_temp vec4) vec4 32 ssa_908 = intrinsic load_deref (ssa_907) (0) /* access=0 */ vec4 32 ssa_909 = vec4 ssa_906.x, ssa_906.y, ssa_908.z, ssa_908.w vec1 32 ssa_910 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_910, ssa_909) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_911 = deref_var &r0 (function_temp vec4) vec4 32 ssa_912 = intrinsic load_deref (ssa_911) (0) /* access=0 */ vec2 32 ssa_913 = vec2 ssa_912.x, ssa_912.y vec2 32 ssa_914 = fmul ssa_913, ssa_2000 vec1 32 ssa_915 = deref_var &out@o1-temp (function_temp vec4) vec4 32 ssa_916 = intrinsic load_deref (ssa_915) (0) /* access=0 */ vec4 32 ssa_917 = vec4 ssa_914.x, ssa_914.y, ssa_916.z, ssa_916.w vec1 32 ssa_918 = deref_var &out@o1-temp (function_temp vec4) intrinsic store_deref (ssa_918, ssa_917) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_919 = deref_var &r0 (function_temp vec4) vec4 32 ssa_920 = intrinsic load_deref (ssa_919) (0) /* access=0 */ vec2 32 ssa_921 = vec2 ssa_920.z, ssa_920.w vec2 32 ssa_922 = ishr ssa_921, ssa_1997 vec1 32 ssa_923 = deref_var &r0 (function_temp vec4) vec4 32 ssa_924 = intrinsic load_deref (ssa_923) (0) /* access=0 */ vec4 32 ssa_925 = vec4 ssa_922.x, ssa_922.y, ssa_924.z, ssa_924.w vec1 32 ssa_926 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_926, ssa_925) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_927 = deref_var &r0 (function_temp vec4) vec4 32 ssa_928 = intrinsic load_deref (ssa_927) (0) /* access=0 */ vec2 32 ssa_929 = vec2 ssa_928.x, ssa_928.y vec2 32 ssa_930 = i2f32 ssa_929 vec1 32 ssa_931 = deref_var &r0 (function_temp vec4) vec4 32 ssa_932 = intrinsic load_deref (ssa_931) (0) /* access=0 */ vec4 32 ssa_933 = vec4 ssa_930.x, ssa_930.y, ssa_932.z, ssa_932.w vec1 32 ssa_934 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_934, ssa_933) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_935 = deref_var &r0 (function_temp vec4) vec4 32 ssa_936 = intrinsic load_deref (ssa_935) (0) /* access=0 */ vec2 32 ssa_937 = vec2 ssa_936.x, ssa_936.y vec2 32 ssa_938 = fmul ssa_937, ssa_1994 vec1 32 ssa_939 = deref_var &out@o1-temp (function_temp vec4) vec4 32 ssa_940 = intrinsic load_deref (ssa_939) (0) /* access=0 */ vec4 32 ssa_941 = vec4 ssa_940.x, ssa_940.y, ssa_938.x, ssa_938.y vec1 32 ssa_942 = deref_var &out@o1-temp (function_temp vec4) intrinsic store_deref (ssa_942, ssa_941) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_943 = deref_var &r2 (function_temp vec4) vec4 32 ssa_944 = intrinsic load_deref (ssa_943) (0) /* access=0 */ vec1 32 ssa_945 = imov ssa_944.w vec1 32 ssa_946 = ushr ssa_60, ssa_59 vec1 32 ssa_947 = imul ssa_945, ssa_58 vec1 32 ssa_948 = iadd ssa_947, ssa_946 vec1 32 ssa_949 = iadd ssa_948, ssa_57 vec1 32 ssa_950 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_951 = intrinsic vulkan_resource_index (ssa_950) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_952 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_953 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_954 = ishl ssa_949, ssa_953 vec1 32 ssa_955 = iadd ssa_952, ssa_954 vec1 32 ssa_956 = intrinsic load_ssbo (ssa_951, ssa_955) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_957 = deref_var &r0 (function_temp vec4) vec4 32 ssa_958 = intrinsic load_deref (ssa_957) (0) /* access=0 */ vec4 32 ssa_959 = vec4 ssa_956, ssa_958.y, ssa_958.z, ssa_958.w vec1 32 ssa_960 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_960, ssa_959) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_961 = deref_var &r0 (function_temp vec4) vec4 32 ssa_962 = intrinsic load_deref (ssa_961) (0) /* access=0 */ vec3 32 ssa_963 = vec3 ssa_962.x, ssa_962.x, ssa_962.x vec3 32 ssa_964 = ushr ssa_963, ssa_1991 vec1 32 ssa_965 = deref_var &r0 (function_temp vec4) vec4 32 ssa_966 = intrinsic load_deref (ssa_965) (0) /* access=0 */ vec4 32 ssa_967 = vec4 ssa_966.x, ssa_964.x, ssa_964.y, ssa_964.z vec1 32 ssa_968 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_968, ssa_967) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_969 = deref_var &r0 (function_temp vec4) vec4 32 ssa_970 = intrinsic load_deref (ssa_969) (0) /* access=0 */ vec4 32 ssa_971 = iand ssa_970, ssa_1987 vec1 32 ssa_972 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_972, ssa_971) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_973 = deref_var &r0 (function_temp vec4) vec4 32 ssa_974 = intrinsic load_deref (ssa_973) (0) /* access=0 */ vec4 32 ssa_975 = u2f32 ssa_974 vec1 32 ssa_976 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_976, ssa_975) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_977 = deref_var &r0 (function_temp vec4) vec4 32 ssa_978 = intrinsic load_deref (ssa_977) (0) /* access=0 */ vec4 32 ssa_979 = fmul ssa_978, ssa_1982 vec1 32 ssa_980 = deref_var &out@o2-temp (function_temp vec4) intrinsic store_deref (ssa_980, ssa_979) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_981 = deref_var &out@o3-temp (function_temp vec4) vec4 32 ssa_982 = intrinsic load_deref (ssa_981) (0) /* access=0 */ vec4 32 ssa_983 = vec4 ssa_982.x, ssa_982.y, ssa_982.z, ssa_53 vec1 32 ssa_984 = deref_var &out@o3-temp (function_temp vec4) intrinsic store_deref (ssa_984, ssa_983) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_985 = deref_var &r3 (function_temp vec4) vec4 32 ssa_986 = intrinsic load_deref (ssa_985) (0) /* access=0 */ vec1 32 ssa_987 = deref_var &r4 (function_temp vec4) vec4 32 ssa_988 = intrinsic load_deref (ssa_987) (0) /* access=0 */ vec1 32 ssa_989 = fdot4 ssa_986, ssa_988 vec1 32 ssa_990 = deref_var &out@o3-temp (function_temp vec4) vec4 32 ssa_991 = intrinsic load_deref (ssa_990) (0) /* access=0 */ vec4 32 ssa_992 = vec4 ssa_989, ssa_991.y, ssa_991.z, ssa_991.w vec1 32 ssa_993 = deref_var &out@o3-temp (function_temp vec4) intrinsic store_deref (ssa_993, ssa_992) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_994 = deref_var &r3 (function_temp vec4) vec4 32 ssa_995 = intrinsic load_deref (ssa_994) (0) /* access=0 */ vec1 32 ssa_996 = deref_var &r7 (function_temp vec4) vec4 32 ssa_997 = intrinsic load_deref (ssa_996) (0) /* access=0 */ vec1 32 ssa_998 = fdot4 ssa_995, ssa_997 vec1 32 ssa_999 = deref_var &out@o3-temp (function_temp vec4) vec4 32 ssa_1000 = intrinsic load_deref (ssa_999) (0) /* access=0 */ vec4 32 ssa_1001 = vec4 ssa_1000.x, ssa_998, ssa_1000.z, ssa_1000.w vec1 32 ssa_1002 = deref_var &out@o3-temp (function_temp vec4) intrinsic store_deref (ssa_1002, ssa_1001) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1003 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1004 = intrinsic load_deref (ssa_1003) (0) /* access=0 */ vec1 32 ssa_1005 = deref_var &r8 (function_temp vec4) vec4 32 ssa_1006 = intrinsic load_deref (ssa_1005) (0) /* access=0 */ vec1 32 ssa_1007 = fdot4 ssa_1004, ssa_1006 vec1 32 ssa_1008 = deref_var &out@o3-temp (function_temp vec4) vec4 32 ssa_1009 = intrinsic load_deref (ssa_1008) (0) /* access=0 */ vec4 32 ssa_1010 = vec4 ssa_1009.x, ssa_1009.y, ssa_1007, ssa_1009.w vec1 32 ssa_1011 = deref_var &out@o3-temp (function_temp vec4) intrinsic store_deref (ssa_1011, ssa_1010) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1012 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1013 = intrinsic load_deref (ssa_1012) (0) /* access=0 */ vec1 32 ssa_1014 = imov ssa_1013.z vec1 32 ssa_1015 = ushr ssa_52, ssa_51 vec1 32 ssa_1016 = imul ssa_1014, ssa_50 vec1 32 ssa_1017 = iadd ssa_1016, ssa_1015 vec1 32 ssa_1018 = iadd ssa_1017, ssa_49 vec1 32 ssa_1019 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1020 = intrinsic vulkan_resource_index (ssa_1019) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1021 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1022 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1023 = ishl ssa_1018, ssa_1022 vec1 32 ssa_1024 = iadd ssa_1021, ssa_1023 vec1 32 ssa_1025 = intrinsic load_ssbo (ssa_1020, ssa_1024) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1026 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1027 = intrinsic load_deref (ssa_1026) (0) /* access=0 */ vec4 32 ssa_1028 = vec4 ssa_1025, ssa_1027.y, ssa_1027.z, ssa_1027.w vec1 32 ssa_1029 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1029, ssa_1028) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1030 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1031 = intrinsic load_deref (ssa_1030) (0) /* access=0 */ vec1 32 ssa_1032 = imov ssa_1031.y vec1 32 ssa_1033 = ushr ssa_48, ssa_47 vec1 32 ssa_1034 = imul ssa_1032, ssa_46 vec1 32 ssa_1035 = iadd ssa_1034, ssa_1033 vec1 32 ssa_1036 = iadd ssa_1035, ssa_45 vec1 32 ssa_1037 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1038 = intrinsic vulkan_resource_index (ssa_1037) (0, 3, 7) /* desc-set=0 */ /* binding=3 */ /* desc_type=SSBO */ vec1 32 ssa_1039 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1040 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_1041 = ishl ssa_1036, ssa_1040 vec1 32 ssa_1042 = iadd ssa_1039, ssa_1041 vec1 32 ssa_1043 = intrinsic load_ssbo (ssa_1038, ssa_1042) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1044 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1045 = intrinsic load_deref (ssa_1044) (0) /* access=0 */ vec4 32 ssa_1046 = vec4 ssa_1043, ssa_1045.y, ssa_1045.z, ssa_1045.w vec1 32 ssa_1047 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1047, ssa_1046) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1048 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1049 = intrinsic load_deref (ssa_1048) (0) /* access=0 */ vec2 32 ssa_1050 = vec2 ssa_1049.x, ssa_1049.x vec2 32 ssa_1051 = ushr ssa_1050, ssa_1977 vec1 32 ssa_1052 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1053 = intrinsic load_deref (ssa_1052) (0) /* access=0 */ vec4 32 ssa_1054 = vec4 ssa_1053.x, ssa_1051.x, ssa_1051.y, ssa_1053.w vec1 32 ssa_1055 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1055, ssa_1054) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1056 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1057 = intrinsic load_deref (ssa_1056) (0) /* access=0 */ vec3 32 ssa_1058 = vec3 ssa_1057.x, ssa_1057.y, ssa_1057.z vec3 32 ssa_1059 = iand ssa_1058, ssa_1974 vec1 32 ssa_1060 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1061 = intrinsic load_deref (ssa_1060) (0) /* access=0 */ vec4 32 ssa_1062 = vec4 ssa_1059.x, ssa_1059.y, ssa_1059.z, ssa_1061.w vec1 32 ssa_1063 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1063, ssa_1062) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1064 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1065 = intrinsic load_deref (ssa_1064) (0) /* access=0 */ vec3 32 ssa_1066 = vec3 ssa_1065.x, ssa_1065.y, ssa_1065.z vec3 32 ssa_1067 = u2f32 ssa_1066 vec1 32 ssa_1068 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1069 = intrinsic load_deref (ssa_1068) (0) /* access=0 */ vec4 32 ssa_1070 = vec4 ssa_1067.x, ssa_1067.y, ssa_1067.z, ssa_1069.w vec1 32 ssa_1071 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1071, ssa_1070) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1072 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1073 = intrinsic load_deref (ssa_1072) (0) /* access=0 */ vec3 32 ssa_1074 = vec3 ssa_1073.x, ssa_1073.y, ssa_1073.z vec3 32 ssa_1075 = fadd ssa_1074, ssa_1970 vec1 32 ssa_1076 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1077 = intrinsic load_deref (ssa_1076) (0) /* access=0 */ vec4 32 ssa_1078 = vec4 ssa_1075.x, ssa_1075.y, ssa_1075.z, ssa_1077.w vec1 32 ssa_1079 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1079, ssa_1078) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1080 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1081 = intrinsic load_deref (ssa_1080) (0) /* access=0 */ vec3 32 ssa_1082 = vec3 ssa_1081.x, ssa_1081.y, ssa_1081.z vec3 32 ssa_1083 = fmul ssa_1082, ssa_1966 vec1 32 ssa_1084 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1085 = intrinsic load_deref (ssa_1084) (0) /* access=0 */ vec4 32 ssa_1086 = vec4 ssa_1083.x, ssa_1083.y, ssa_1083.z, ssa_1085.w vec1 32 ssa_1087 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1087, ssa_1086) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1088 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1089 = intrinsic load_deref (ssa_1088) (0) /* access=0 */ vec3 32 ssa_1090 = vec3 ssa_1089.x, ssa_1089.y, ssa_1089.z vec1 32 ssa_1091 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1092 = intrinsic load_deref (ssa_1091) (0) /* access=0 */ vec3 32 ssa_1093 = vec3 ssa_1092.x, ssa_1092.y, ssa_1092.z vec1 32 ssa_1094 = fdot3 ssa_1090, ssa_1093 vec1 32 ssa_1095 = deref_var &out@o4-temp (function_temp vec4) vec4 32 ssa_1096 = intrinsic load_deref (ssa_1095) (0) /* access=0 */ vec4 32 ssa_1097 = vec4 ssa_1094, ssa_1096.y, ssa_1096.z, ssa_1096.w vec1 32 ssa_1098 = deref_var &out@o4-temp (function_temp vec4) intrinsic store_deref (ssa_1098, ssa_1097) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1099 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1100 = intrinsic load_deref (ssa_1099) (0) /* access=0 */ vec3 32 ssa_1101 = vec3 ssa_1100.x, ssa_1100.y, ssa_1100.z vec1 32 ssa_1102 = deref_var &r7 (function_temp vec4) vec4 32 ssa_1103 = intrinsic load_deref (ssa_1102) (0) /* access=0 */ vec3 32 ssa_1104 = vec3 ssa_1103.x, ssa_1103.y, ssa_1103.z vec1 32 ssa_1105 = fdot3 ssa_1101, ssa_1104 vec1 32 ssa_1106 = deref_var &out@o4-temp (function_temp vec4) vec4 32 ssa_1107 = intrinsic load_deref (ssa_1106) (0) /* access=0 */ vec4 32 ssa_1108 = vec4 ssa_1107.x, ssa_1105, ssa_1107.z, ssa_1107.w vec1 32 ssa_1109 = deref_var &out@o4-temp (function_temp vec4) intrinsic store_deref (ssa_1109, ssa_1108) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1110 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1111 = intrinsic load_deref (ssa_1110) (0) /* access=0 */ vec3 32 ssa_1112 = vec3 ssa_1111.x, ssa_1111.y, ssa_1111.z vec1 32 ssa_1113 = deref_var &r8 (function_temp vec4) vec4 32 ssa_1114 = intrinsic load_deref (ssa_1113) (0) /* access=0 */ vec3 32 ssa_1115 = vec3 ssa_1114.x, ssa_1114.y, ssa_1114.z vec1 32 ssa_1116 = fdot3 ssa_1112, ssa_1115 vec1 32 ssa_1117 = deref_var &out@o4-temp (function_temp vec4) vec4 32 ssa_1118 = intrinsic load_deref (ssa_1117) (0) /* access=0 */ vec4 32 ssa_1119 = vec4 ssa_1118.x, ssa_1118.y, ssa_1116, ssa_1118.w vec1 32 ssa_1120 = deref_var &out@o4-temp (function_temp vec4) intrinsic store_deref (ssa_1120, ssa_1119) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1121 = deref_var &out@o4-temp (function_temp vec4) vec4 32 ssa_1122 = intrinsic load_deref (ssa_1121) (0) /* access=0 */ vec4 32 ssa_1123 = vec4 ssa_1122.x, ssa_1122.y, ssa_1122.z, ssa_40 vec1 32 ssa_1124 = deref_var &out@o4-temp (function_temp vec4) intrinsic store_deref (ssa_1124, ssa_1123) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1125 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1126 = intrinsic load_deref (ssa_1125) (0) /* access=0 */ vec1 32 ssa_1127 = imov ssa_1126.w vec1 1 ssa_1128 = ilt ssa_39, ssa_1127 vec1 32 ssa_1129 = bcsel ssa_1128, ssa_37, ssa_38 vec1 32 ssa_1130 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1131 = intrinsic load_deref (ssa_1130) (0) /* access=0 */ vec4 32 ssa_1132 = vec4 ssa_1131.x, ssa_1131.y, ssa_1131.z, ssa_1129 vec1 32 ssa_1133 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1133, ssa_1132) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1134 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1135 = intrinsic load_deref (ssa_1134) (0) /* access=0 */ vec1 32 ssa_1136 = imov ssa_1135.w vec1 1 ssa_1137 = ilt ssa_1136, ssa_36 vec1 32 ssa_1138 = bcsel ssa_1137, ssa_34, ssa_35 vec1 32 ssa_1139 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1140 = intrinsic load_deref (ssa_1139) (0) /* access=0 */ vec4 32 ssa_1141 = vec4 ssa_1140vec1 32 ssa_1725 = deref_var &r9 (function_temp vec4) vec4 32 ssa_1726 = intrinsic load_deref (ssa_1725) (0) /* access=0 */ vec1 32 ssa_1727 = imov ssa_1726.x vec1 32 ssa_1728 = deref_var &r5 (function_temp vec4) vec4 32 ssa_1729 = intrinsic load_deref (ssa_1728) (0) /* access=0 */ vec4 32 ssa_1730 = vec4 ssa_1727, ssa_1729.y, ssa_1729.z, ssa_1729.w vec1 32 ssa_1731 = deref_var &r5 (function_temp vec4) intrinsic store_deref (ssa_1731, ssa_1730) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1732 = deref_var &r5 (function_temp vec4) vec4 32 ssa_1733 = intrinsic load_deref (ssa_1732) (0) /* access=0 */ vec3 32 ssa_1734 = vec3 ssa_1733.x, ssa_1733.y, ssa_1733.z vec1 32 ssa_1735 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1736 = intrinsic vulkan_resource_index (ssa_1735) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1737 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1738 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1739 = iadd ssa_1737, ssa_1738 vec1 32 ssa_1740 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1741 = iadd ssa_1739, ssa_1740 vec1 32 ssa_1742 = intrinsic load_ubo (ssa_1736, ssa_1741) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1743 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1744 = iadd ssa_1739, ssa_1743 vec1 32 ssa_1745 = intrinsic load_ubo (ssa_1736, ssa_1744) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1746 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1747 = iadd ssa_1739, ssa_1746 vec1 32 ssa_1748 = intrinsic load_ubo (ssa_1736, ssa_1747) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1749 = vec3 ssa_1742, ssa_1745, ssa_1748 vec1 32 ssa_1750 = fdot3 ssa_1734, ssa_1749 vec1 32 ssa_1751 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1752 = intrinsic load_deref (ssa_1751) (0) /* access=0 */ vec4 32 ssa_1753 = vec4 ssa_1750, ssa_1752.y, ssa_1752.z, ssa_1752.w vec1 32 ssa_1754 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1754, ssa_1753) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1755 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1756 = intrinsic load_deref (ssa_1755) (0) /* access=0 */ vec1 32 ssa_1757 = imov ssa_1756.z vec1 32 ssa_1758 = deref_var &r6 (function_temp vec4) vec4 32 ssa_1759 = intrinsic load_deref (ssa_1758) (0) /* access=0 */ vec4 32 ssa_1760 = vec4 ssa_1759.x, ssa_1757, ssa_1759.z, ssa_1759.w vec1 32 ssa_1761 = deref_var &r6 (function_temp vec4) intrinsic store_deref (ssa_1761, ssa_1760) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1762 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1763 = intrinsic load_deref (ssa_1762) (0) /* access=0 */ vec1 32 ssa_1764 = imov ssa_1763.y vec1 32 ssa_1765 = deref_var &r6 (function_temp vec4) vec4 32 ssa_1766 = intrinsic load_deref (ssa_1765) (0) /* access=0 */ vec4 32 ssa_1767 = vec4 ssa_1766.x, ssa_1766.y, ssa_1764, ssa_1766.w vec1 32 ssa_1768 = deref_var &r6 (function_temp vec4) intrinsic store_deref (ssa_1768, ssa_1767) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1769 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1770 = intrinsic load_deref (ssa_1769) (0) /* access=0 */ vec1 32 ssa_1771 = imov ssa_1770.z vec1 32 ssa_1772 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1773 = intrinsic load_deref (ssa_1772) (0) /* access=0 */ vec4 32 ssa_1774 = vec4 ssa_1773.x, ssa_1773.y, ssa_1771, ssa_1773.w vec1 32 ssa_1775 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1775, ssa_1774) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1776 = deref_var &r9 (function_temp vec4) vec4 32 ssa_1777 = intrinsic load_deref (ssa_1776) (0) /* access=0 */ vec1 32 ssa_1778 = imov ssa_1777.y vec1 32 ssa_1779 = deref_var &r6 (function_temp vec4) vec4 32 ssa_1780 = intrinsic load_deref (ssa_1779) (0) /* access=0 */ vec4 32 ssa_1781 = vec4 ssa_1778, ssa_1780.y, ssa_1780.z, ssa_1780.w vec1 32 ssa_1782 = deref_var &r6 (function_temp vec4) intrinsic store_deref (ssa_1782, ssa_1781) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1783 = deref_var &r9 (function_temp vec4) vec4 32 ssa_1784 = intrinsic load_deref (ssa_1783) (0) /* access=0 */ vec1 32 ssa_1785 = imov ssa_1784.z vec1 32 ssa_1786 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1787 = intrinsic load_deref (ssa_1786) (0) /* access=0 */ vec4 32 ssa_1788 = vec4 ssa_1785, ssa_1787.y, ssa_1787.z, ssa_1787.w vec1 32 ssa_1789 = deref_var &r2 (function_temp vec4) intrinsic store_deref (ssa_1789, ssa_1788) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1790 = deref_var &r6 (function_temp vec4) vec4 32 ssa_1791 = intrinsic load_deref (ssa_1790) (0) /* access=0 */ vec3 32 ssa_1792 = vec3 ssa_1791.x, ssa_1791.y, ssa_1791.z vec1 32 ssa_1793 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1794 = intrinsic vulkan_resource_index (ssa_1793) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1795 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1796 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1797 = iadd ssa_1795, ssa_1796 vec1 32 ssa_1798 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1799 = iadd ssa_1797, ssa_1798 vec1 32 ssa_1800 = intrinsic load_ubo (ssa_1794, ssa_1799) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1801 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1802 = iadd ssa_1797, ssa_1801 vec1 32 ssa_1803 = intrinsic load_ubo (ssa_1794, ssa_1802) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1804 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1805 = iadd ssa_1797, ssa_1804 vec1 32 ssa_1806 = intrinsic load_ubo (ssa_1794, ssa_1805) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1807 = vec3 ssa_1800, ssa_1803, ssa_1806 vec1 32 ssa_1808 = fdot3 ssa_1792, ssa_1807 vec1 32 ssa_1809 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1810 = intrinsic load_deref (ssa_1809) (0) /* access=0 */ vec4 32 ssa_1811 = vec4 ssa_1810.x, ssa_1808, ssa_1810.z, ssa_1810.w vec1 32 ssa_1812 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1812, ssa_1811) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1813 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1814 = intrinsic load_deref (ssa_1813) (0) /* access=0 */ vec3 32 ssa_1815 = vec3 ssa_1814.x, ssa_1814.y, ssa_1814.z vec1 32 ssa_1816 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1817 = intrinsic vulkan_resource_index (ssa_1816) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1818 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1819 = load_const (0x00000290 /* 0.000000 */) vec1 32 ssa_1820 = iadd ssa_1818, ssa_1819 vec1 32 ssa_1821 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1822 = iadd ssa_1820, ssa_1821 vec1 32 ssa_1823 = intrinsic load_ubo (ssa_1817, ssa_1822) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1824 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1825 = iadd ssa_1820, ssa_1824 vec1 32 ssa_1826 = intrinsic load_ubo (ssa_1817, ssa_1825) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1827 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1828 = iadd ssa_1820, ssa_1827 vec1 32 ssa_1829 = intrinsic load_ubo (ssa_1817, ssa_1828) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1830 = vec3 ssa_1823, ssa_1826, ssa_1829 vec1 32 ssa_1831 = fdot3 ssa_1815, ssa_1830 vec1 32 ssa_1832 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1833 = intrinsic load_deref (ssa_1832) (0) /* access=0 */ vec4 32 ssa_1834 = vec4 ssa_1833.x, ssa_1833.y, ssa_1831, ssa_1833.w vec1 32 ssa_1835 = deref_var &r4 (function_temp vec4) intrinsic store_deref (ssa_1835, ssa_1834) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1836 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1837 = intrinsic load_deref (ssa_1836) (0) /* access=0 */ vec4 32 ssa_1838 = vec4 ssa_1837.x, ssa_1837.y, ssa_1837.z, ssa_27 vec1 32 ssa_1839 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1839, ssa_1838) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1840 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1841 = intrinsic load_deref (ssa_1840) (0) /* access=0 */ vec1 32 ssa_1842 = deref_var &r4 (function_temp vec4) vec4 32 ssa_1843 = intrinsic load_deref (ssa_1842) (0) /* access=0 */ vec1 32 ssa_1844 = fdot4 ssa_1841, ssa_1843 vec1 32 ssa_1845 = deref_var &out@o9-temp (function_temp vec4) vec4 32 ssa_1846 = intrinsic load_deref (ssa_1845) (0) /* access=0 */ vec4 32 ssa_1847 = vec4 ssa_1844, ssa_1846.y, ssa_1846.z, ssa_1846.w vec1 32 ssa_1848 = deref_var &out@o9-temp (function_temp vec4) intrinsic store_deref (ssa_1848, ssa_1847) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1849 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1850 = intrinsic load_deref (ssa_1849) (0) /* access=0 */ vec1 32 ssa_1851 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1852 = intrinsic vulkan_resource_index (ssa_1851) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1853 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1854 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1855 = iadd ssa_1853, ssa_1854 vec1 32 ssa_1856 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1857 = iadd ssa_1855, ssa_1856 vec1 32 ssa_1858 = intrinsic load_ubo (ssa_1852, ssa_1857) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1859 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1860 = iadd ssa_1855, ssa_1859 vec1 32 ssa_1861 = intrinsic load_ubo (ssa_1852, ssa_1860) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1862 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1863 = iadd ssa_1855, ssa_1862 vec1 32 ssa_1864 = intrinsic load_ubo (ssa_1852, ssa_1863) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1865 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1866 = iadd ssa_1855, ssa_1865 vec1 32 ssa_1867 = intrinsic load_ubo (ssa_1852, ssa_1866) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1868 = vec4 ssa_1858, ssa_1861, ssa_1864, ssa_1867 vec1 32 ssa_1869 = fdot4 ssa_1850, ssa_1868 vec1 32 ssa_1870 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1871 = intrinsic load_deref (ssa_1870) (0) /* access=0 */ vec4 32 ssa_1872 = vec4 ssa_1871.x, ssa_1871.y, ssa_1871.z, ssa_1869 vec1 32 ssa_1873 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_1873, ssa_1872) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1874 = deref_var &r5 (function_temp vec4) vec4 32 ssa_1875 = intrinsic load_deref (ssa_1874) (0) /* access=0 */ vec3 32 ssa_1876 = vec3 ssa_1875.x, ssa_1875.y, ssa_1875.z vec1 32 ssa_1877 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1878 = intrinsic vulkan_resource_index (ssa_1877) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1879 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1880 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1881 = iadd ssa_1879, ssa_1880 vec1 32 ssa_1882 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1883 = iadd ssa_1881, ssa_1882 vec1 32 ssa_1884 = intrinsic load_ubo (ssa_1878, ssa_1883) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1885 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1886 = iadd ssa_1881, ssa_1885 vec1 32 ssa_1887 = intrinsic load_ubo (ssa_1878, ssa_1886) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1888 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1889 = iadd ssa_1881, ssa_1888 vec1 32 ssa_1890 = intrinsic load_ubo (ssa_1878, ssa_1889) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1891 = vec3 ssa_1884, ssa_1887, ssa_1890 vec1 32 ssa_1892 = fdot3 ssa_1876, ssa_1891 vec1 32 ssa_1893 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1894 = intrinsic load_deref (ssa_1893) (0) /* access=0 */ vec4 32 ssa_1895 = vec4 ssa_1892, ssa_1894.y, ssa_1894.z, ssa_1894.w vec1 32 ssa_1896 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_1896, ssa_1895) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1897 = deref_var &r6 (function_temp vec4) vec4 32 ssa_1898 = intrinsic load_deref (ssa_1897) (0) /* access=0 */ vec3 32 ssa_1899 = vec3 ssa_1898.x, ssa_1898.y, ssa_1898.z vec1 32 ssa_1900 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1901 = intrinsic vulkan_resource_index (ssa_1900) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1902 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1903 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1904 = iadd ssa_1902, ssa_1903 vec1 32 ssa_1905 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1906 = iadd ssa_1904, ssa_1905 vec1 32 ssa_1907 = intrinsic load_ubo (ssa_1901, ssa_1906) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1908 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1909 = iadd ssa_1904, ssa_1908 vec1 32 ssa_1910 = intrinsic load_ubo (ssa_1901, ssa_1909) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1911 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1912 = iadd ssa_1904, ssa_1911 vec1 32 ssa_1913 = intrinsic load_ubo (ssa_1901, ssa_1912) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1914 = vec3 ssa_1907, ssa_1910, ssa_1913 vec1 32 ssa_1915 = fdot3 ssa_1899, ssa_1914 vec1 32 ssa_1916 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1917 = intrinsic load_deref (ssa_1916) (0) /* access=0 */ vec4 32 ssa_1918 = vec4 ssa_1917.x, ssa_1915, ssa_1917.z, ssa_1917.w vec1 32 ssa_1919 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_1919, ssa_1918) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1920 = deref_var &r2 (function_temp vec4) vec4 32 ssa_1921 = intrinsic load_deref (ssa_1920) (0) /* access=0 */ vec3 32 ssa_1922 = vec3 ssa_1921.x, ssa_1921.y, ssa_1921.z vec1 32 ssa_1923 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1924 = intrinsic vulkan_resource_index (ssa_1923) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1925 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1926 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1927 = iadd ssa_1925, ssa_1926 vec1 32 ssa_1928 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1929 = iadd ssa_1927, ssa_1928 vec1 32 ssa_1930 = intrinsic load_ubo (ssa_1924, ssa_1929) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1931 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1932 = iadd ssa_1927, ssa_1931 vec1 32 ssa_1933 = intrinsic load_ubo (ssa_1924, ssa_1932) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1934 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1935 = iadd ssa_1927, ssa_1934 vec1 32 ssa_1936 = intrinsic load_ubo (ssa_1924, ssa_1935) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1937 = vec3 ssa_1930, ssa_1933, ssa_1936 vec1 32 ssa_1938 = fdot3 ssa_1922, ssa_1937 vec1 32 ssa_1939 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1940 = intrinsic load_deref (ssa_1939) (0) /* access=0 */ vec4 32 ssa_1941 = vec4 ssa_1940.x, ssa_1940.y, ssa_1938, ssa_1940.w vec1 32 ssa_1942 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_1942, ssa_1941) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1943 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1944 = intrinsic load_deref (ssa_1943) (0) /* access=0 */ vec1 32 ssa_1945 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1946 = intrinsic load_deref (ssa_1945) (0) /* access=0 */ vec1 32 ssa_1947 = fdot4 ssa_1944, ssa_1946 vec1 32 ssa_1948 = deref_var &out@o9-temp (function_temp vec4) vec4 32 ssa_1949 = intrinsic load_deref (ssa_1948) (0) /* access=0 */ vec4 32 ssa_1950 = vec4 ssa_1949.x, ssa_1947, ssa_1949.z, ssa_1949.w vec1 32 ssa_1951 = deref_var &out@o9-temp (function_temp vec4) intrinsic store_deref (ssa_1951, ssa_1950) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1952 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1953 = intrinsic load_deref (ssa_1952) (0) /* access=0 */ vec1 32 ssa_1954 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1955 = intrinsic vulkan_resource_index (ssa_1954) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1956 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1957 = load_const (0x000002b0 /* 0.000000 */) vec1 32 ssa_1958 = iadd ssa_1956, ssa_1957 vec1 32 ssa_1959 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1960 = iadd ssa_1958, ssa_1959 vec1 32 ssa_1961 = intrinsic load_ubo (ssa_1955, ssa_1960) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1962 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1963 = iadd ssa_1958, ssa_1962 vec1 32 ssa_1964 = intrinsic load_ubo (ssa_1955, ssa_1963) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1965 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1966 = iadd ssa_1958, ssa_1965 vec1 32 ssa_1967 = intrinsic load_ubo (ssa_1955, ssa_1966) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1968 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1969 = iadd ssa_1958, ssa_1968 vec1 32 ssa_1970 = intrinsic load_ubo (ssa_1955, ssa_1969) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1971 = vec4 ssa_1961, ssa_1964, ssa_1967, ssa_1970 vec1 32 ssa_1972 = fdot4 ssa_1953, ssa_1971 vec1 32 ssa_1973 = deref_var &r3 (function_temp vec4) vec4 32 ssa_1974 = intrinsic load_deref (ssa_1973) (0) /* access=0 */ vec4 32 ssa_1975 = vec4 ssa_1974.x, ssa_1974.y, ssa_1974.z, ssa_1972 vec1 32 ssa_1976 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_1976, ssa_1975) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1977 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1978 = intrinsic load_deref (ssa_1977) (0) /* access=0 */ vec1 32 ssa_1979 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1980 = intrinsic vulkan_resource_index (ssa_1979) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1981 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1982 = load_const (0x000002c0 /* 0.000000 */) vec1 32 ssa_1983 = iadd ssa_1981, ssa_1982 vec1 32 ssa_1984 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1985 = iadd ssa_1983, ssa_1984 vec1 32 ssa_1986 = intrinsic load_ubo (ssa_1980, ssa_1985) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1987 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1988 = iadd ssa_1983, ssa_1987 vec1 32 ssa_1989 = intrinsic load_ubo (ssa_1980, ssa_1988) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1990 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1991 = iadd ssa_1983, ssa_1990 vec1 32 ssa_1992 = intrinsic load_ubo (ssa_1980, ssa_1991) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1993 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_1994 = iadd ssa_1983, ssa_1993 vec1 32 ssa_1995 = intrinsic load_ubo (ssa_1980, ssa_1994) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1996 = vec4 ssa_1986, ssa_1989, ssa_1992, ssa_1995 vec1 32 ssa_1997 = fdot4 ssa_1978, ssa_1996 vec1 32 ssa_1998 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1999 = intrinsic load_deref (ssa_1998) (0) /* access=0 */ vec4 32 ssa_2000 = vec4 ssa_1999.x, ssa_1999.y, ssa_1999.z, ssa_1997 vec1 32 ssa_2001 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_2001, ssa_2000) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2002 = deref_var &r5 (function_temp vec4) vec4 32 ssa_2003 = intrinsic load_deref (ssa_2002) (0) /* access=0 */ vec3 32 ssa_2004 = vec3 ssa_2003.x, ssa_2003.y, ssa_2003.z vec1 32 ssa_2005 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2006 = intrinsic vulkan_resource_index (ssa_2005) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_2007 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2008 = load_const (0x000002b0 /* 0.000000 */) vec1 32 ssa_2009 = iadd ssa_2007, ssa_2008 vec1 32 ssa_2010 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2011 = iadd ssa_2009, ssa_2010 vec1 32 ssa_2012 = intrinsic load_ubo (ssa_2006, ssa_2011) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2013 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_2014 = iadd ssa_2009, ssa_2013 vec1 32 ssa_2015 = intrinsic load_ubo (ssa_2006, ssa_2014) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2016 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2017 = iadd ssa_2009, ssa_2016 vec1 32 ssa_2018 = intrinsic load_ubo (ssa_2006, ssa_2017) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_2019 = vec3 ssa_2012, ssa_2015, ssa_2018 vec1 32 ssa_2020 = fdot3 ssa_2004, ssa_2019 vec1 32 ssa_2021 = deref_var &r3 (function_temp vec4) vec4 32 ssa_2022 = intrinsic load_deref (ssa_2021) (0) /* access=0 */ vec4 32 ssa_2023 = vec4 ssa_2020, ssa_2022.y, ssa_2022.z, ssa_2022.w vec1 32 ssa_2024 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_2024, ssa_2023) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2025 = deref_var &r5 (function_temp vec4) vec4 32 ssa_2026 = intrinsic load_deref (ssa_2025) (0) /* access=0 */ vec3 32 ssa_2027 = vec3 ssa_2026.x, ssa_2026.y, ssa_2026.z vec1 32 ssa_2028 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2029 = intrinsic vulkan_resource_index (ssa_2028) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */.x, ssa_1140.y, ssa_1140.z, ssa_1138 vec1 32 ssa_1142 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1142, ssa_1141) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1143 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1144 = intrinsic load_deref (ssa_1143) (0) /* access=0 */ vec1 32 ssa_1145 = imov ssa_1144.w vec1 32 ssa_1146 = ineg ssa_1145 vec1 32 ssa_1147, 0) /* align_mul=4 */ /* align_offset=0 */ vec4 32 ssa_1940 = vec4 ssa_1930, ssa_1933, = deref_var vec1 32 ssa_2030 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2031 = load_const (ssa_1936& binding=0 = vec3 ssa_18010x000002c0 /* 0.000000 */) vec1 32 ssa_1283 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1284 = intrinsic load_deref (ssa_1283) (0) /* access=0 */ vec3 32 ssa_1285 = vec3 ssa_1284.y, ssa_1284.z, ssa_1284.x vec1 32 ssa_1286 = deref_var &r8 (shader_temp vec4) */ /*. desc_type=UBOr1 (function_temp vec4) vec4 32 ssa_1148 = intrinsic load_deref (, x, ssa_1801.y, ssa_1801.z vec1 32 ssa_1803 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1804 = intrinsic vulkan_resource_index (ssa_1803 */ vec1 32 ssa_1533 = load_const (ssa_1147) (0) /* access=0 */ vec1 32 ssa_1149 = imov ssa_1148vec1 32 ssa_2032 = iadd ssa_2030, .ssa_2031 ) (vec4 32 ssa_1287 = intrinsic load_deref (ssa_1286) (0) /* access=00x00000000 /* 0.000000 */) 0, w vec1 32 ssa_11500, ssa_19396) /* desc-set=0 */ /* vec1 32 ssa_2033 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1534 = iadd ssa_1146, ssa_1149 vec1 32 ssa_1151 = deref_var &r0 (function_temp vec4) */ vec3 32 ssa_1288 = vec3 ssa_1287.x, ssa_1287.y, ssa_1287.z vec1 32 ssa_1289 = fdot3 vec1 32 ssa_2034 = iadd ssa_2032, binding=0ssa_2033 = load_const ( vec4 32 ssa_1152 = intrinsic load_deref (ssa_1151) (0) /* access=0 */ */ 0x000002a0 /* 0.000000 */) vec1 32 ssa_1941 = fdot4 ssa_1285, ssa_1288 vec1 32 ssa_1535 = iadd ssa_1533, ssa_1534 /* desc_type=UBO */ vec1 32 ssa_1805 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1806 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1807 = iadd ssa_1805, ssa_1806 vec1 32 ssa_1808 = load_const (0x00000000 /* 0.000000 */)vec1 32 ssa_1290 vec1 32 ssa_1536ssa_1922, ssa_1940 vec1 32 ssa_1942 = deref_var &r1vec1 32 ssa_2035 = intrinsic load_ubo (ssa_2029, ssa_2034) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2036 = load_const (0x00000004 /* 0.000000 */) vec4 32 ssa_1153 = vec4 ssa_1152.x, ssa_1152.y, ssa_1152.z, ssa_1150 vec1 32 ssa_1154 = deref_var &r0 (function_temp vec4) = deref_var & (shader_temp vec4) vec1 32 ssa_1809 = iadd ssa_1807, ssa_1808 vec1 32 ssa_1810 = intrinsic load_ubo (ssa_1804, ssa_1809) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1811 = load_const (0x00000004 /* 0.000000 */)o3 (shader_out vec4) intrinsic store_deref (ssa_1154, ssa_1153vec4 32 ssa_1943 = intrinsic load_deref (ssa_1942) (0) /* access=0 */ vec4 32 ssa_1944 = vec4 ssa_1943.x, = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1537 = iadd ssa_1535, ssa_1943.y, ssa_1943.z, ssa_1941 vec1 32 ssa_1945 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_1945, ssa_1944) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2037vec1 32 ssa_1946 ssa_1536 vec1 32 ssa_1538 = intrinsic load_ubo (ssa_1532, ssa_1537) (4, 0) /* align_mul=4vec4 32 ssa_1291 = intrinsic load_deref (ssa_1290) (0) /* access=0 */ vec1 32 ssa_1812 = iadd ssa_1807, ssa_1811 vec1 32 ssa_1813 = = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1947 = intrinsic load_deref (ssa_1946) ( */ /* align_offset=0 */ vec1 32 ssa_1539 = load_const (0x00000004 /* 0.000000 */) = iadd 0) /* access=0 */ vec3 32 ssa_1948 = vec3 ssa_1947vec4 32 ssa_1292 = vec4 ssa_1289, ssa_1291.y, ssa_1291.z, ssa_1291.w vec1 32 ssa_1293 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1293, ssa_1292) (15, 0) /* wrmask=) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1155 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1156 = intrinsic load_deref (ssa_1155) (0) /* access=0 */ vec1 32 ssa_1157 = imov ssa_1156.w vec1 32 ssa_1158 = i2f32 ssa_1157 vec1 32 ssa_1159 = deref_var &r0 (function_temp vec4) vec4 32 ssa_1160 = intrinsic load_deref (ssa_1159) (ssa_2032, ssa_2036 vec1 32 ssa_2038 = .x, ssa_1947.y, ssa_1947.z vec1 32 ssa_1949 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1950 = intrinsic vulkan_resource_index (ssa_1949) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1951 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1952 = load_const (0x000002b0 /* 0.000000 */) vec1 32 ssa_1953 = iadd ssa_1951, ssa_1952 vec1 32 ssa_1954 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1955 = iadd ssa_1953, ssa_1954 vec1 32 ssa_1956 = intrinsic load_ubo (ssa_1950, ssa_1955) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1957 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1958 = iadd ssa_1953, ssa_1957 vec1 32 ssa_1959 = intrinsic load_ubo (ssa_1950, ssa_1958) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1960 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1961 = iadd ssa_1953, ssa_1960 vec1 32 ssa_1962 = intrinsic load_ubo (ssa_1950, ssa_1961) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1963 = vec3 ssa_1956, ssa_1959, ssa_1962 vec1 32 ssa_1964 = fdot3 ssa_1948, ssa_1963 vec1 32 ssa_1965 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_1966 = intrinsic load_deref (ssa_1965) (0) /* access=0 */ vec4 32 ssa_1967 = vec4 ssa_1964, ssa_1966.y, ssa_1966.z, ssa_1966.w vec1 32 ssa_1968 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_1968, ssa_1967) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1969 = deref_var &r5 (shader_temp vec4) vec4 32 ssa_1970 = intrinsic load_deref (ssa_1969) (0) /* access=0 */ vec3 32 ssa_1971 = vec3 ssa_1970.x, ssa_1970.y, ssa_1970.z vec1 32 ssa_1972 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1973 = intrinsic vulkan_resource_index (ssa_1972) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1974 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1975 = load_const (0x000002c0 /* 0.000000 */) vec1 32 ssa_1976 = iadd ssa_1974, ssa_1975 vec1 32 ssa_1977 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1978 = iadd ssa_1976, ssa_1977 vec1 32 ssa_1979 = intrinsic load_ubo (ssa_1973, ssa_1978) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1980 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_1981 = iadd ssa_1976, ssa_1980 vec1 32 ssa_1982 = intrinsic load_ubo (ssa_1973, ssa_1981) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1983 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_1984 = iadd ssa_1976, ssa_1983 vec1 32 ssa_1985 = intrinsic load_ubo (ssa_1973, ssa_1984) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1986 = vec3 ssa_1979, ssa_1982, ssa_1985 vec1 32 ssa_1987 = fdot3 ssa_1971, ssa_1986 vec1 32 ssa_1988 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1989 = intrinsic load_deref (ssa_1988) (0) /* access=0 */ vec4 32 ssa_1990 = vec4 ssa_1987, ssa_1989.y, ssa_1989.z, ssa_1989.w vec1 32 ssa_1991 = deref_var &r1 (shader_temp vec4) intrinsic load_ubo (ssa_2029, xyzw */ /* access=0 */ 0) /* access=0 */ vec4 32 ssa_1161 = vec4 ssa_1160.x, ssa_1160.y, ssa_1160.z, ssa_1158 vec1 32 ssa_1162 = deref_var &r0 (function_temp vec4) intrinsic store_deref (ssa_1162, ssa_1161) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1163 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1164 = intrinsic load_deref (ssa_1163) (intrinsic load_ubo (ssa_1804, ssa_1812) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1814 = load_const (0x00000008 /* 0.000000 */)0) /* access=0 */ vec2 32 ssa_1165 = vec2 intrinsic store_deref ( vec1 32 ssa_1815 = iadd ssa_1807, ssa_1814 vec1 32 ssa_1816 = ssa_1991vec1 32 ssa_1540ssa_2037intrinsic load_ubo ( ssa_1164.x, ssa_1164.x vec2 32 ssa_1166 = ushr ssa_1165, ssa_1962 vec1 32 ssa_1167 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1168 = intrinsic load_deref (ssa_1167) (0) /* access=0 */ vec4 32 ssa_1169 = vec4 ssa_1168.x, ssa_1166.x, ssa_1166., ssa_1990) (15, 0)vec1 32 ssa_1294) (y, ssa_1168.w vec1 32 ssa_1170 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1170, ssa_1169) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1171 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1172 = intrinsic load_deref (ssa_1171) (0)4 = iadd ssa_1535, ssa_1539 vec1 32 ssa_1541 = intrinsic load_ubo (ssa_1532, ssa_1540) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_1542 = load_const (0x00000008 /* 0.000000 */) /* wrmask=xyzw */ /* access=0 */ = deref_var ssa_1804 /* vec1 32 ssa_1543 = iadd ssa_1535, ssa_1542 , 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2039 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2040 = iadd ssa_2032, ssa_2039 vec1 32 ssa_2041 = , ssa_1815) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1817 = vec3 ssa_1810, ssa_1813, ssa_1816 vec1 32 ssa_1818 = fdot3 ssa_1802, ssa_1817 vec1 32 ssa_1819 = deref_var &&r1 (shader_temp vec4) vec4 32 ssa_1295 = intrinsic load_deref (ssa_1294) (0) /* access=0 */ vec3 32 ssa_1296 = vec3 ssa_1295.y, ssa_1295.z, ssa_1295.x vec1 32 ssa_1297 = deref_var &r12 (shader_temp vec4) vec4 32 ssa_1298 = intrinsic load_deref (ssa_1297) (0) /* access=0 */ vec3 32 ssa_1299 = vec3 ssa_1298.x, ssa_1298.y, ssa_1298.z vec1 32 ssa_1300 = fdot3 ssa_1296, ssa_1299 vec1 32 ssa_1301 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1302 = intrinsic load_deref (ssa_1301) (0) /* access=0 */ vec4 32 ssa_1303 = vec4 ssa_1302.x, ssa_1300, ssa_1302.z, ssa_1302.w vec1 32 ssa_1304 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1304, ssa_1303) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1305 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_1306 = intrinsic load_deref (ssa_1305) (0) /* access=0 */ vec3 32 ssa_1307 = vec3 ssa_1306.y, ssa_1306.z, ssa_1306.x vec1 32 ssa_1308 = deref_var &r13 (shader_temp vec4) vec4 32 ssa_1309 = intrinsic load_deref (ssa_1308) (0) /* access=0 */ vec3 32 ssa_1310 = vec3 ssa_1309.x, ssa_1309.y, ssa_1309.z vec1 32 ssa_1311 = fdot3 ssa_1307, ssa_1310 vec1 32 ssa_1312 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1313 = intrinsic load_deref (ssa_1312) (0) /* access=0 */ vec4 32 ssa_1314 = vec4 ssa_1313.x, ssa_1313.y, ssa_1311, ssa_1313.w vec1 32 ssa_1315 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1315, ssa_1314) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1316 = deref_var &o3 (shader_out vec4) vec4 32 ssa_1317 = intrinsic load_deref (ssa_1316) (0) /* access=0 */ vec4 32 ssa_1318 = vec4 ssa_1317.x, ssa_1317.y, ssa_1317.z, ssa_38 vec1 32 ssa_1319 = deref_var &o3 (shader_out vec4) intrinsic store_deref (ssa_1319, ssa_1318) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1320 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1321 = intrinsic load_deref (ssa_1320) (0) /* access=0 */ vec1 32 ssa_1322 = imov ssa_1321.x vec1 32 ssa_1323 = fabs ssa_1322 vec1 32 ssa_1324 = fneg ssa_1323 vec1 32 ssa_1325 = fadd ssa_1324, ssa_37 vec1 32 ssa_1326 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1327 = intrinsic load_deref (ssa_1326) (0) /* access=0 */ vec4 32 ssa_1328 = vec4 ssa_1325, ssa_1327.y, ssa_1327.z, ssa_1327.w vec1 32 ssa_1329 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1329, ssa_1328) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1330 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1331 = intrinsic load_deref (ssa_1330) (0) /* access=0 */ vec1 32 ssa_1332 = imov ssa_1331.y vec1 32 ssa_1333 = fabs ssa_1332 vec1 32 ssa_1334 = fneg ssa_1333 vec1 32 ssa_1335 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1336 = intrinsic load_deref (ssa_1335) (0) /* access=0 */ vec1 32 ssa_1337 = imov ssa_1336.x vec1 32 ssa_1338 = fadd ssa_1334, ssa_1337 vec1 32 ssa_1339 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1340 = intrinsic load_deref (ssa_1339) (0) /* access=0 */ vec4 32 ssa_1341 = vec4 ssa_1340.x, ssa_1340.y, ssa_1338, ssa_1340.w vec1 32 ssa_1342 = deref_var &r4 (shader_temp vec4) intrinsic store_deref (ssa_1342, ssa_1341) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_1343 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1344 = intrinsic load_deref (ssa_1343) (0) /* access=0 */ r3 (shader_temp vec4) vec3 32 ssa_1345 = vec3 ssa_1344.x, access=0 */ vec3 32 ssa_1173 = vec3 ssa_1172.ssa_1344xvec1 32 ssa_1544 = vec4 32 ssa_1820., ssa_1172.y, ssa_1172.z vec3 32 ssa_1174 = iand ssa_1173, ssa_1959 vec1 32 ssa_1175 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1176 = intrinsic load_deref (ssa_1175) (0) /* access=0 */ y, ssa_1344.z intrinsic load_ubo (ssa_2029, ssa_2040) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_2042 = intrinsic load_deref (ssa_1819) (0) /* access=0 */ vec1 32 ssa_1346 = deref_var &r4 (shader_temp vec4) vec4 32 ssa_1347 = intrinsic load_deref (intrinsic load_ubo (vec4 32 ssa_1177 = vec4 ssa_1174.x, ssa_1174.y, ssa_1174.z, ssa_1176.w vec1 32 ssa_1178 = deref_var &r1 (function_temp vec4) intrinsic store_deref (ssa_1178, ssa_1177) (15, 0) /* wrmask=xyzvec1 32 ssa_1992ssa_1346) (0) /* access=0 */ = deref_var &r6 (shader_temp vec4) vec4 32 ssa_1993 = intrinsic load_deref (ssa_1992) (0) vec3 32 ssa_1348 = vec3 ssa_1347.x, ssa_1347.y, ssa_1347. /*zssa_1532 = vec3, vec1 32 ssa_1349 = fdot3 ssa_1345, ssa_1348 vec1 32 ssa_1350 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1351 = intrinsic load_deref (ssa_1350) (0 ) ssa_2035, ssa_2038, ssa_2041 vec1 32 ssa_2043 = fdot3 ssa_2027, ssa_2042 vec1 32 ssa_2044 = deref_var &r1 (function_temp vec4) vec4 32 ssa_2045 = intrinsic load_deref (ssa_2044vec4 32 ssa_1821 access=0 */ vec3 32 ssa_1994 = vec3 ssa_1993.x, ssa_1993ssa_1543) (4w */ /* access=0 */ vec1 32 ssa_1179 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1180 = intrinsic load_deref (ssa_1179) (0, = vec4 ssa_1820.x, ssa_1818, ssa_1820.z, ssa_1820.w vec1 32 ssa_1822 = deref_var &r3 (shader_temp vec4) ) (0) /* access=0 */ vec4 32 ssa_2046 = vec4 ssa_2043, .)intrinsic store_deref (ssa_1822, ssa_1821) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec4 32 ssa_1352 = vec4 0 /* access=0 */y, ssa_1993.z vec1 32 ssa_1995 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1996 = intrinsic vulkan_resource_index (ssa_1995) (0, 0 /*ssa_2045.y, ssa_2045.z, ssa_2045.w vec3 32 ssa_1181 = vec3 ssa_1180.x, ssa_1180.y, ssa_1180.z vec3 32 ssa_1182 = u2f32 ssa_1181 vec1 32 ssa_1183 = deref_var &r1 (function_temp vec4) vec4 32 ssa_1184 = intrinsic load_deref (ssa_1183) ( access=0 */ vec1 32 ssa_1823 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1824 = intrinsic load_deref (ssa_1823) (0) /*, 0 vec1 32 ssa_2047 = deref_var access=06 */ssa_1349, ssa_1351.y, ssa_1351.z, ) /* access=0 */ vec4 32 ssa_1185 = vec4ssa_1351&.w vec1 32 ssa_1353 = deref_var &r0 (shader_temp vec4) ) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1997 = load_const ( ) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_1545 = vec3 ssa_1538, ssa_1541, ssa_1544 vec1 32 ssa_1546 = fdot3 ssa_1530, ssa_1545 vec1 32 ssa_1547 = deref_var & intrinsic store_deref (ssa_1353, ssa_1352) (15, 0) /* wrmask=xyzwr0 (shader_temp vec4) vec4 32 ssa_1548 = intrinsic load_deref (ssa_1547) (0) /* access=0 */ vec4 32 ssa_1549 = vec4 ssa_1548.x, ssa_1548.y, ssa_15460x00000000 /* 0.000000 */) vec1 32 ssa_1998 = load_const (0x000002b0 /* 0.000000 */) vec3 32 ssa_1825 = vec3 ssa_1824.x, ssa_1824.y, ssa_1824.z vec1 32 ssa_1826 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1827 = intrinsic vulkan_resource_index (ssa_1826) (0, 0, 6, ssa_1548.w vec1 32 ssa_1550 = deref_var &r0 (shader_temp vec4) intrinsic store_deref (ssa_1550 vec1 32 ssa_1999 = iadd ssa_1997, ssa_1998 */ ) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_1828 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1829 = load_const (0x000002a0 /* 0.000000 */) vec1 32 ssa_1830 = iadd ssa_1828, ssa_1829 vec1 32 ssa_1831 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1832 = iadd ssa_1830, ssa_1831 vec1 32 ssa_1833 = intrinsic load_ubo (ssa_1827, ssa_1832) (, ssa_1549) (15, 0) /* wrmask=xyzw */r1 (function_temp vec4) intrinsic store_deref (ssa_2047, ssa_2046) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2048 = deref_var /* access=0 */ ssa_1182 /*vec1 32 ssa_20004, 0) /* align_mul=4 */. /* = load_const (&0x00000000 /* 0.000000 */) r6 (function_temp vec4) vec4 32 ssa_2049 = intrinsic load_deref (ssa_2048) (0 access=0 */ vec1 32 ssa_1354 = deref_var &) /* access=0vec1 32 ssa_1551 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_1552 = intrinsic load_deref (ssa_1551) (0) /* access=0 */ vec1 32 ssa_1553 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_1554 = intrinsic load_deref (ssa_1553) (0) /* access=0 */ vec1 32 ssa_1555 = fdot4 ssa_1552, ssa_1554 vec1 32 ssa_1556 = deref_var vec1 32 ssa_2001 = iadd ssa_1999, ssa_2000 vec1 32 ssa_2002 = intrinsic load_ubo (ssa_1996, ssa_2001) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2003 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_2004 = iadd ssa_1999, ssa_2003 vec1 32 ssa_2005 = intrinsic load_ubo (ssa_1996, ssa_2004) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2006 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2007 = iadd ssa_1999, ssa_2006 vec1 32 ssa_2008 = intrinsic load_ubo (ssa_1996, ssa_2007) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_2009 = vec3 ssa_2002, ssa_2005, ssa_2008 vec1 32 ssa_2010 = fdot3 ssa_1994, ssa_2009 vec1 32 ssa_2011 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_2012 = intrinsic load_deref (ssa_2011) (0) /* access=0 */ vec4 32 ssa_2013 = vec4 ssa_2012.x, ssa_2010, ssa_2012.z, ssa_2012.w vec1 32 ssa_2014 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_2014, ssa_2013) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2015 = deref_var &r6 (shader_temp vec4) vec4 32 ssa_2016 = intrinsic load_deref (ssa_2015) (0) /* access=0 */ vec3 32 ssa_2017 = vec3 ssa_2016.x, ssa_2016.y, ssa_2016.z vec1 32 ssa_2018 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2019 = intrinsic vulkan_resource_index (ssa_2018) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_2020 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2021 = load_const (0x000002c0 /* 0.000000 */) vec1 32 ssa_2022 = iadd ssa_2020, ssa_2021 vec1 32 ssa_2023 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2024 = iadd ssa_2022, ssa_2023 vec1 32 ssa_2025 = intrinsic load_ubo (ssa_2019, ssa_2024) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2026 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_2027 = iadd ssa_2022, ssa_2026 vec1 32 ssa_2028 = intrinsic load_ubo (ssa_2019, ssa_2027) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2029 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2030 = iadd ssa_2022, ssa_2029 vec1 32 ssa_2031 = intrinsic load_ubo (ssa_2019, ssa_2030) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_2032 = vec3 ssa_2025, ssa_2028, ssa_2031 vec1 32 ssa_2033 = fdot3 ssa_2017, ssa_2032 vec1 32 ssa_2034 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_2035 = intrinsic load_deref (ssa_2034) (0) /* access=0 */ vec4 32 ssa_2036 = vec4 ssa_2035.x, ssa_2033, ssa_2035.z, ssa_2035.w vec1 32 ssa_2037 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_2037, ssa_2036) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2038 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_2039 = intrinsic load_deref (ssa_2038) (0) /* access=0 */ vec3 32 ssa_2040 = vec3 ssa_2039.x, ssa_2039.y, ssa_2039.z vec1 32 ssa_2041 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2042 = intrinsic vulkan_resource_index (ssa_2041) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_2043 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2044 = load_const (0x000002b0 /* 0.000000 */) vec1 32 ssa_2045 = iadd ssa_2043, ssa_2044 vec1 32 ssa_2046 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2047 = iadd ssa_2045, ssa_2046 vec1 32 ssa_2048 = intrinsic load_ubo (ssa_2042, ssa_2047) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2049 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_2050 = iadd ssa_2045, ssa_2049 vec1 32 ssa_2051 = intrinsic load_ubo (ssa_2042, ssa_2050) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2052 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2053 = iadd ssa_2045, ssa_2052 vec1 32 ssa_2054 = intrinsic load_ubo (ssa_2042, ssa_2053) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_2055 = vec3 ssa_2048, ssa_2051, ssa_2054 vec1 32 ssa_2056 = fdot3 ssa_2040, ssa_2055 vec1 32 ssa_2057 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_2058 = intrinsic load_deref (ssa_2057) (0) /* access=0 */ vec4 32 ssa_2059 = vec4 ssa_2058.x, ssa_2058.y, ssa_2056, ssa_2058.w vec1 32 ssa_2060 = deref_var &r3 (shader_temp vec4) intrinsic store_deref (ssa_2060, ssa_2059) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2061 = deref_var &r2 (shader_temp vec4) vec4 32 ssa_2062 = intrinsic load_deref (ssa_2061) (0) /* access=0 */ vec3 32 ssa_2063 = vec3 ssa_2062.x, ssa_2062.y, ssa_2062.z vec1 32 ssa_2064 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2065 = intrinsic vulkan_resource_index (ssa_2064) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_2066 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2067 = load_const (0x000002c0 /* 0.000000 */) vec1 32 ssa_2068 = iadd ssa_2066, ssa_2067 vec1 32 ssa_2069 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2070 = iadd ssa_2068, ssa_2069 vec1 32 ssa_2071 = intrinsic load_ubo (ssa_2065, ssa_2070) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2072 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_2073 = iadd ssa_2068, ssa_2072 vec1 32 ssa_2074 = intrinsic load_ubo (ssa_2065, ssa_2073) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2075 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2076 = iadd ssa_2068, ssa_2075 vec1 32 ssa_2077 = intrinsic load_ubo (ssa_2065, ssa_2076) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_2078 = vec3 ssa_2071, ssa_2074, ssa_2077 vec1 32 ssa_2079 = fdot3 ssa_2063, ssa_2078 vec1 32 ssa_2080 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_2081 = intrinsic load_deref (ssa_2080) (0) /* access=0 */ vec4 32 ssa_2082 = vec4 ssa_2081.x, ssa_2081.y, ssa_2079, ssa_2081.w vec1 32 ssa_2083 = deref_var &r1 (shader_temp vec4) intrinsic store_deref (ssa_2083, ssa_2082) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2084 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_2085 = intrinsic load_deref (ssa_2084) (0) /* access=0 */ vec1 32 ssa_2086 = deref_var &r1 (shader_temp vec4) vec4 32 ssa_2087 = intrinsic load_deref (ssa_2086) (0) /* access=0 */ vec1 32 ssa_2088 = fdot4 ssa_2085, ssa_2087 vec1 32 ssa_2089 = deref_var &o7 (shader_out vec4) vec4 32 ssa_2090 = intrinsic load_deref (ssa_2089) (0) /* access=0 */ vec4 32 ssa_2091 = vec4 ssa_2090.x, ssa_2090.y, ssa_2090.z, ssa_2088 vec1 32 ssa_2092 = deref_var &o7 (shader_out vec4) intrinsic store_deref (ssa_2092, ssa_2091) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2093 = deref_var &r0 (shader_temp vec4) vec4 32 ssa_2094 = intrinsic load_deref (ssa_2093) (0) /* access=0 */ vec1 32 ssa_2095 = deref_var &r3 (shader_temp vec4) vec4 32 ssa_2096 = intrinsic load_deref (ssa_2095) (0) /* access=0 */ vec1 32 ssa_2097 = fdot4 ssa_2094, ssa_2096 vec1 32 ssa_2098 = deref_var &o7 (shader_out vec4) vec4 32 ssa_2099 = intrinsic load_deref (ssa_2098) (0) /* access=0 */ vec4 32 ssa_2100 = vec4 ssa_2099.x, ssa_2099.y, ssa_2097, ssa_2099.w vec1 32 ssa_2101 = deref_var &o7 (shader_out vec4) intrinsic store_deref (ssa_2101, ssa_2100) (15, 0) /* wrmask=xyzw */ /* access=0 */ vec1 32 ssa_2102 = deref_var &shader_in (shader_temp Z) vec1 32 ssa_2103 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2104 = deref_array &(*ssa_2102)[0] (shader_temp vec4) /* &shader_in[0] */ error: instr->type == glsl_get_array_element(parent->type) (../src/compiler/nir/nir_validate.c:466) vec4 32 ssa_2105 = intrinsic load_deref (ssa_2104) (0) /* access=0 */ vec1 32 ssa_2106 = imov ssa_2105.x vec1 32 ssa_2107 = deref_var &o8 (shader_out float) intrinsic store_deref (ssa_2107, ssa_2106) (1, 0) /* wrmask=x */ /* access=0 */ vec1 32 ssa_2108 = deref_var &o0 (shader_out vec4) vec4 32 ssa_2109 = intrinsic load_deref (ssa_2108) (0) /* access=0 */ vec1 32 ssa_2110 = deref_var &vs_vertex_out (shader_out Z) vec1 32 ssa_2111 = deref_struct &ssa_2110 */ vec3 32 ssa_2050 = vec3 ssa_2049.x, ssa_2049.y, ssa_2049.z vec1 32 ssa_2051 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2052 = intrinsic vulkan_resource_index (ssa_2051) (0, 0, 6) /* desc-set=0 */ /* binding=0 */ /* desc_type=UBO */ vec1 32 ssa_2053 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2054 = load_const (0x000002b0 /* 0.000000 */) vec1 32 ssa_2055 = iadd ssa_2053, ssa_2054 vec1 32 ssa_2056 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_2057 = iadd ssa_2055, ssa_2056 vec1 32 ssa_2058 = intrinsic load_ubo (ssa_2052, ssa_2057) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2059 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_2060 = iadd ssa_2055, ssa_2059 vec1 32 ssa_2061 = intrinsic load_ubo (ssa_2052, ssa_2060) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_2062 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_2063 = iadd ssa_2055, ssa_2062 vec1 32 ssa_2064 = intrinsic load_ubo (ssa_2052, ssa_2063) (4, 0) /* align_mul=4 */ /* align_offset=0 */ vec3 32 ssa_2065 = vec3 ssa_2058, ssa_2061, ssa_2064 vec1 32 ssa_2066 = fdot3 ssa_2050, ssa_2065 vec1 32 ssa_2067 = deref_var &r3 (function_temp vec4) vec4 32 ssa_2068 = intrinsic load_deref (ssa_2067) (0) /* access=0 */ vec4 32 ssa_2069 = vec4 ssa_2068.x, ssa_2066, ssa_2068.z, ssa_2068.w vec1 32 ssa_2070 = deref_var &r3 (function_temp vec4) intrinsic store_deref (ssa_2070, ssa_2069) (15, 0x, ssa_1182.y, ssa_1182.zwine: Unhandled page fault on read access to 0x00000008 at address 0x7f60e86ea26e (thread 00fa), starting debugger...