diff --git a/.gitignore b/.gitignore index 1ef8fa6..190cbae 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,9 @@ q2pro q2proded game*.so +*.swp +build/ +/baseq2 +shader_vkpt +./vkpt +./vkptded diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..740cbf5 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,64 @@ +cmake_minimum_required (VERSION 3.8) + +include("cmake/HunterGate.cmake") +# ---------- Startup hunter ------------------------- +HunterGate( + URL "https://github.com/ruslo/hunter/archive/v0.20.7.tar.gz" + SHA1 "a376088a679fbfbe3a2c330746801f1d47e51a37" +) + +PROJECT(quake2-pt) + +OPTION(CONFIG_GL_RENDERER "Enable GL renderer") +OPTION(CONFIG_VKPT_RENDERER "Enable VKPT renderer" ON) +OPTION(CONFIG_USE_OPTIX "Use optix for tracing rays") +SET(CONFIG_OPTIX_DIR "" CACHE PATH "Optix SDK directory") + +# ---------- Setup output Directories ------------------------- +SET (CMAKE_LIBRARY_OUTPUT_DIRECTORY + ${PROJECT_BINARY_DIR}/Bin + CACHE PATH + "Single Directory for all Libraries" + ) + +# --------- Setup the Executable output Directory ------------- +SET (CMAKE_RUNTIME_OUTPUT_DIRECTORY + ${PROJECT_BINARY_DIR}/Bin + CACHE PATH + "Single Directory for all Executables." + ) + +# --------- Dependencies ------------------------------------- +if (CMAKE_SIZEOF_VOID_P EQUAL 8) + set( IS_64_BIT 1 ) +else () +endif () + +# --------- Dependencies ------------------------------------- +hunter_add_package(zlib) +hunter_add_package(png) +hunter_add_package(jpeg) +hunter_add_package(sdl2) +#hunter_add_package(curl) + +find_package(ZLIB CONFIG REQUIRED) +find_package(PNG CONFIG REQUIRED) +find_package(JPEG CONFIG REQUIRED) +find_package(SDL2 CONFIG REQUIRED) +#find_package(Vulkan) +#find_library(Vulkan_LIBRARY NAMES vulkan-1 vulkan PATHS "C:/VulkanSDK/1.1.85.0") + +link_directories(.) + +# Compatibility mode +find_package(ZLIB REQUIRED) +string(COMPARE EQUAL "${ZLIB_INCLUDE_DIRS}" "" is_empty) +if(is_empty) + message(FATAL_ERROR "Expected non-empty") +endif() + + +SET(CMAKE_INSTALL_PREFIX ${PROJECT_BINARY_DIR}/install) + + +ADD_SUBDIRECTORY(src) diff --git a/Makefile b/Makefile index 431bdc0..5af71f0 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ -include .config + ifdef CONFIG_WINDOWS CPU ?= x86 SYS ?= Win32 @@ -45,6 +46,17 @@ LDFLAGS_s := LDFLAGS_c := LDFLAGS_g := -shared +ifdef CONFIG_SANITIZE +CFLAGS_c += -O0 -fsanitize=address -fno-omit-frame-pointer +LDFLAGS_c += -fsanitize=address +endif + +ifdef CONFIG_VKPT_ENABLE_VALIDATION +ifneq ($(CONFIG_VKPT_ENABLE_VALIDATION),0) +CFLAGS_c += -DVKPT_ENABLE_VALIDATION +endif +endif + ifdef CONFIG_WINDOWS # Force i?86-netware calling convention on x86 Windows ifeq ($(CPU),x86) @@ -117,7 +129,7 @@ ifndef CONFIG_WINDOWS PATH_DEFS += -DHOMEDIR='"$(CONFIG_PATH_HOME)"' endif -CFLAGS_s += $(BUILD_DEFS) $(VER_DEFS) $(PATH_DEFS) -DUSE_SERVER=1 +CFLAGS_s += $(BUILD_DEFS) $(VER_DEFS) $(PATH_DEFS) -DUSE_SERVER=1 -DUSE_CLIENT=0 CFLAGS_c += $(BUILD_DEFS) $(VER_DEFS) $(PATH_DEFS) -DUSE_SERVER=1 -DUSE_CLIENT=1 # windres needs special quoting... @@ -252,6 +264,13 @@ OBJS_g := \ ### Configuration Options ### +# cd tracks via ogg +ifdef CONFIG_OGG + OBJS_c += src/client/sound/ogg.o + CFLAGS_c += -DOGG + LIBS_c += -lvorbisfile -lvorbis -logg +endif + ifdef CONFIG_HTTP CURL_CFLAGS ?= $(shell pkg-config libcurl --cflags) CURL_LIBS ?= $(shell pkg-config libcurl --libs) @@ -331,7 +350,8 @@ ifdef CONFIG_SOFTWARE_RENDERER OBJS_c += src/refresh/sw/surf.o OBJS_c += src/refresh/sw/sird.o OBJS_c += src/refresh/sw/sky.o -else +endif +ifdef CONFIG_GL_RENDERER CFLAGS_c += -DREF_GL=1 -DUSE_REF=1 -DVID_REF='"gl"' OBJS_c += src/refresh/gl/draw.o OBJS_c += src/refresh/gl/hq2x.o @@ -354,6 +374,30 @@ else OBJS_c += src/refresh/gl/qgl/dynamic.o endif endif +ifdef CONFIG_VKPT_RENDERER + VKPT_SHADER_DIR=shader_vkpt + CFLAGS_c += -DREF_VKPT=1 -DUSE_REF=1 -DVID_REF='"vkpt"' -Isrc/refresh/vkpt/ -DVKPT_SHADER_DIR='"$(VKPT_SHADER_DIR)"' + LDFLAGS +=-lvulkan + OBJS_c += src/refresh/vkpt/main.o + OBJS_c += src/refresh/vkpt/textures.o + OBJS_c += src/refresh/vkpt/draw.o + OBJS_c += src/refresh/vkpt/matrix.o + OBJS_c += src/refresh/vkpt/models.o + OBJS_c += src/refresh/vkpt/path_tracer.o + OBJS_c += src/refresh/vkpt/vk_util.o + OBJS_c += src/refresh/vkpt/bsp_mesh.o + OBJS_c += src/refresh/vkpt/uniform_buffer.o + OBJS_c += src/refresh/vkpt/vertex_buffer.o + OBJS_c += src/refresh/vkpt/light_hierarchy.o + OBJS_c += src/refresh/vkpt/asvgf.o + OBJS_c += src/refresh/vkpt/stb.o + OBJS_c += src/refresh/vkpt/profiler.o + + VKPT_SHADER_SRC = $(shell find src/refresh/vkpt/shader -type f | egrep '\.(vert|frag|geom|rchit|rgen|rmiss|rcall|comp)$$' | sed s!.*/!!) + VKPT_SHADER_HDR = $(shell find src/refresh/vkpt/shader -type f | egrep '\.(h|glsl)$$') + VKPT_SHADER_SPV = $(VKPT_SHADER_SRC:%=$(VKPT_SHADER_DIR)/%.spv) + +endif CONFIG_DEFAULT_MODELIST ?= 640x480 800x600 1024x768 CONFIG_DEFAULT_GEOMETRY ?= 640x480 @@ -366,6 +410,14 @@ ifndef CONFIG_SOFTWARE_RENDERER endif endif +ifdef CONFIG_NO_BINDLESS_TEXTURES + CFLAGS_c += -DNO_BINDLESS_TEXTURES=1 +endif + +ifdef CONFIG_SMALL_GPU + CFLAGS_c += -DUSE_SMALL_GPU=1 +endif + ifndef CONFIG_NO_TGA CFLAGS_c += -DUSE_TGA=1 endif @@ -454,7 +506,8 @@ ifdef CONFIG_WINDOWS ifdef CONFIG_SOFTWARE_RENDERER OBJS_c += src/windows/swimp.o - else + endif + ifdef CONFIG_GL_RENDERER OBJS_c += src/windows/glimp.o OBJS_c += src/windows/wgl.o endif @@ -498,7 +551,8 @@ else ifdef CONFIG_SOFTWARE_RENDERER OBJS_c += src/unix/sdl/swimp.o - else + endif + ifdef CONFIG_GL_RENDERER OBJS_c += src/unix/sdl/glimp.o endif @@ -507,7 +561,7 @@ else X11_LIBS ?= -lX11 CFLAGS_c += -DUSE_X11=1 $(X11_CFLAGS) LIBS_c += $(X11_LIBS) - ifndef CONFIG_SOFTWARE_RENDERER + ifdef CONFIG_GL_RENDERER OBJS_c += src/unix/sdl/glx.o endif endif @@ -575,6 +629,10 @@ ifdef CONFIG_DEBUG CFLAGS_s += -D_DEBUG endif +ifdef CONFIG_GL_DEBUG + CFLAGS_c += -D_GL_DEBUG +endif + ifeq ($(CPU),x86) OBJS_c += src/common/x86/fpu.o OBJS_s += src/common/x86/fpu.o @@ -591,10 +649,12 @@ ifdef CONFIG_WINDOWS TARG_s := q2proded.exe TARG_c := q2pro.exe TARG_g := game$(CPU).dll + TARG_t := else - TARG_s := q2proded - TARG_c := q2pro + TARG_s := vkptded + TARG_c := q2vkpt TARG_g := game$(CPU).so + TARG_t := endif all: $(TARG_s) $(TARG_c) $(TARG_g) @@ -626,6 +686,24 @@ DEPS_s := $(OBJS_s:.o=.d) DEPS_c := $(OBJS_c:.o=.d) DEPS_g := $(OBJS_g:.o=.d) + +ifdef CONFIG_VKPT_RENDERER +$(TARG_c): $(VKPT_SHADER_SPV) +compile_shaders: $(VKPT_SHADER_SPV) + +CONFIG_GLSLANGVALIDATOR ?= glslangValidator + +$(VKPT_SHADER_DIR)/%.spv: src/refresh/vkpt/shader/% $(VKPT_SHADER_HDR) + $(E) [GLSL] $@ + $(Q)$(MKDIR) $(@D) + $(Q)$(CONFIG_GLSLANGVALIDATOR) \ + --target-env vulkan1.1 \ + -DVKPT_SHADER \ + -DSHADER_STAGE_$$(echo $< | sed 's/.*\.//' | tr 'a-z' 'A-Z') \ + -V $< -o $@ | sed 1d + +endif + -include $(DEPS_s) -include $(DEPS_c) -include $(DEPS_g) @@ -633,6 +711,7 @@ DEPS_g := $(OBJS_g:.o=.d) clean: $(E) [CLEAN] $(Q)$(RM) $(TARG_s) $(TARG_c) $(TARG_g) + $(Q)$(RM) $(VKPT_SHADER_SPV) $(Q)$(RMDIR) $(BUILD_s) $(BUILD_c) $(BUILD_g) strip: $(TARG_s) $(TARG_c) $(TARG_g) diff --git a/README b/README index 026f26b..c92951e 100644 --- a/README +++ b/README @@ -1,8 +1,13 @@ -Welcome to Q2PRO, an enhanced, multiplayer oriented Quake 2 client and server. +Welcome to Q2VKPT, a Quake II engine with real-time path tracing. This client +implements fully dynamic illumination without precomputation supporting area +light sources, reflections, soft shadows, and indirect illumination. This +client is a port of our real-time path tracer vkpt and is based on the Quake II +engine Q2PRO. -For building Q2PRO, please consult the INSTALL file. +This client requires a high-end GPU supporting the Vulkan extension +VK_NV_ray_tracing. -For information on using and configuring Q2PRO, please refer to client and -server manuals available in doc/ subdirectory. +Please refer to our project page for more details. + +Project homepage: http://brechpunkt.de/q2vkpt -Project homepage: http://skuller.net/q2pro/ diff --git a/VC/inc/AL/al.h b/VC/inc/AL/al.h new file mode 100644 index 0000000..413b383 --- /dev/null +++ b/VC/inc/AL/al.h @@ -0,0 +1,656 @@ +#ifndef AL_AL_H +#define AL_AL_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#ifndef AL_API + #if defined(AL_LIBTYPE_STATIC) + #define AL_API + #elif defined(_WIN32) + #define AL_API __declspec(dllimport) + #else + #define AL_API extern + #endif +#endif + +#if defined(_WIN32) + #define AL_APIENTRY __cdecl +#else + #define AL_APIENTRY +#endif + + +/** Deprecated macro. */ +#define OPENAL +#define ALAPI AL_API +#define ALAPIENTRY AL_APIENTRY +#define AL_INVALID (-1) +#define AL_ILLEGAL_ENUM AL_INVALID_ENUM +#define AL_ILLEGAL_COMMAND AL_INVALID_OPERATION + +/** Supported AL version. */ +#define AL_VERSION_1_0 +#define AL_VERSION_1_1 + +/** 8-bit boolean */ +typedef char ALboolean; + +/** character */ +typedef char ALchar; + +/** signed 8-bit 2's complement integer */ +typedef signed char ALbyte; + +/** unsigned 8-bit integer */ +typedef unsigned char ALubyte; + +/** signed 16-bit 2's complement integer */ +typedef short ALshort; + +/** unsigned 16-bit integer */ +typedef unsigned short ALushort; + +/** signed 32-bit 2's complement integer */ +typedef int ALint; + +/** unsigned 32-bit integer */ +typedef unsigned int ALuint; + +/** non-negative 32-bit binary integer size */ +typedef int ALsizei; + +/** enumerated 32-bit value */ +typedef int ALenum; + +/** 32-bit IEEE754 floating-point */ +typedef float ALfloat; + +/** 64-bit IEEE754 floating-point */ +typedef double ALdouble; + +/** void type (for opaque pointers only) */ +typedef void ALvoid; + + +/* Enumerant values begin at column 50. No tabs. */ + +/** "no distance model" or "no buffer" */ +#define AL_NONE 0 + +/** Boolean False. */ +#define AL_FALSE 0 + +/** Boolean True. */ +#define AL_TRUE 1 + + +/** + * Relative source. + * Type: ALboolean + * Range: [AL_TRUE, AL_FALSE] + * Default: AL_FALSE + * + * Specifies if the Source has relative coordinates. + */ +#define AL_SOURCE_RELATIVE 0x202 + + +/** + * Inner cone angle, in degrees. + * Type: ALint, ALfloat + * Range: [0 - 360] + * Default: 360 + * + * The angle covered by the inner cone, where the source will not attenuate. + */ +#define AL_CONE_INNER_ANGLE 0x1001 + +/** + * Outer cone angle, in degrees. + * Range: [0 - 360] + * Default: 360 + * + * The angle covered by the outer cone, where the source will be fully + * attenuated. + */ +#define AL_CONE_OUTER_ANGLE 0x1002 + +/** + * Source pitch. + * Type: ALfloat + * Range: [0.5 - 2.0] + * Default: 1.0 + * + * A multiplier for the frequency (sample rate) of the source's buffer. + */ +#define AL_PITCH 0x1003 + +/** + * Source or listener position. + * Type: ALfloat[3], ALint[3] + * Default: {0, 0, 0} + * + * The source or listener location in three dimensional space. + * + * OpenAL, like OpenGL, uses a right handed coordinate system, where in a + * frontal default view X (thumb) points right, Y points up (index finger), and + * Z points towards the viewer/camera (middle finger). + * + * To switch from a left handed coordinate system, flip the sign on the Z + * coordinate. + */ +#define AL_POSITION 0x1004 + +/** + * Source direction. + * Type: ALfloat[3], ALint[3] + * Default: {0, 0, 0} + * + * Specifies the current direction in local space. + * A zero-length vector specifies an omni-directional source (cone is ignored). + */ +#define AL_DIRECTION 0x1005 + +/** + * Source or listener velocity. + * Type: ALfloat[3], ALint[3] + * Default: {0, 0, 0} + * + * Specifies the current velocity in local space. + */ +#define AL_VELOCITY 0x1006 + +/** + * Source looping. + * Type: ALboolean + * Range: [AL_TRUE, AL_FALSE] + * Default: AL_FALSE + * + * Specifies whether source is looping. + */ +#define AL_LOOPING 0x1007 + +/** + * Source buffer. + * Type: ALuint + * Range: any valid Buffer. + * + * Specifies the buffer to provide sound samples. + */ +#define AL_BUFFER 0x1009 + +/** + * Source or listener gain. + * Type: ALfloat + * Range: [0.0 - ] + * + * A value of 1.0 means unattenuated. Each division by 2 equals an attenuation + * of about -6dB. Each multiplicaton by 2 equals an amplification of about + * +6dB. + * + * A value of 0.0 is meaningless with respect to a logarithmic scale; it is + * silent. + */ +#define AL_GAIN 0x100A + +/** + * Minimum source gain. + * Type: ALfloat + * Range: [0.0 - 1.0] + * + * The minimum gain allowed for a source, after distance and cone attenation is + * applied (if applicable). + */ +#define AL_MIN_GAIN 0x100D + +/** + * Maximum source gain. + * Type: ALfloat + * Range: [0.0 - 1.0] + * + * The maximum gain allowed for a source, after distance and cone attenation is + * applied (if applicable). + */ +#define AL_MAX_GAIN 0x100E + +/** + * Listener orientation. + * Type: ALfloat[6] + * Default: {0.0, 0.0, -1.0, 0.0, 1.0, 0.0} + * + * Effectively two three dimensional vectors. The first vector is the front (or + * "at") and the second is the top (or "up"). + * + * Both vectors are in local space. + */ +#define AL_ORIENTATION 0x100F + +/** + * Source state (query only). + * Type: ALint + * Range: [AL_INITIAL, AL_PLAYING, AL_PAUSED, AL_STOPPED] + */ +#define AL_SOURCE_STATE 0x1010 + +/** Source state value. */ +#define AL_INITIAL 0x1011 +#define AL_PLAYING 0x1012 +#define AL_PAUSED 0x1013 +#define AL_STOPPED 0x1014 + +/** + * Source Buffer Queue size (query only). + * Type: ALint + * + * The number of buffers queued using alSourceQueueBuffers, minus the buffers + * removed with alSourceUnqueueBuffers. + */ +#define AL_BUFFERS_QUEUED 0x1015 + +/** + * Source Buffer Queue processed count (query only). + * Type: ALint + * + * The number of queued buffers that have been fully processed, and can be + * removed with alSourceUnqueueBuffers. + * + * Looping sources will never fully process buffers because they will be set to + * play again for when the source loops. + */ +#define AL_BUFFERS_PROCESSED 0x1016 + +/** + * Source reference distance. + * Type: ALfloat + * Range: [0.0 - ] + * Default: 1.0 + * + * The distance in units that no attenuation occurs. + * + * At 0.0, no distance attenuation ever occurs on non-linear attenuation models. + */ +#define AL_REFERENCE_DISTANCE 0x1020 + +/** + * Source rolloff factor. + * Type: ALfloat + * Range: [0.0 - ] + * Default: 1.0 + * + * Multiplier to exaggerate or diminish distance attenuation. + * + * At 0.0, no distance attenuation ever occurs. + */ +#define AL_ROLLOFF_FACTOR 0x1021 + +/** + * Outer cone gain. + * Type: ALfloat + * Range: [0.0 - 1.0] + * Default: 0.0 + * + * The gain attenuation applied when the listener is outside of the source's + * outer cone. + */ +#define AL_CONE_OUTER_GAIN 0x1022 + +/** + * Source maximum distance. + * Type: ALfloat + * Range: [0.0 - ] + * Default: +inf + * + * The distance above which the source is not attenuated any further with a + * clamped distance model, or where attenuation reaches 0.0 gain for linear + * distance models with a default rolloff factor. + */ +#define AL_MAX_DISTANCE 0x1023 + +/** Source buffer position, in seconds */ +#define AL_SEC_OFFSET 0x1024 +/** Source buffer position, in sample frames */ +#define AL_SAMPLE_OFFSET 0x1025 +/** Source buffer position, in bytes */ +#define AL_BYTE_OFFSET 0x1026 + +/** + * Source type (query only). + * Type: ALint + * Range: [AL_STATIC, AL_STREAMING, AL_UNDETERMINED] + * + * A Source is Static if a Buffer has been attached using AL_BUFFER. + * + * A Source is Streaming if one or more Buffers have been attached using + * alSourceQueueBuffers. + * + * A Source is Undetermined when it has the NULL buffer attached using + * AL_BUFFER. + */ +#define AL_SOURCE_TYPE 0x1027 + +/** Source type value. */ +#define AL_STATIC 0x1028 +#define AL_STREAMING 0x1029 +#define AL_UNDETERMINED 0x1030 + +/** Buffer format specifier. */ +#define AL_FORMAT_MONO8 0x1100 +#define AL_FORMAT_MONO16 0x1101 +#define AL_FORMAT_STEREO8 0x1102 +#define AL_FORMAT_STEREO16 0x1103 + +/** Buffer frequency (query only). */ +#define AL_FREQUENCY 0x2001 +/** Buffer bits per sample (query only). */ +#define AL_BITS 0x2002 +/** Buffer channel count (query only). */ +#define AL_CHANNELS 0x2003 +/** Buffer data size (query only). */ +#define AL_SIZE 0x2004 + +/** + * Buffer state. + * + * Not for public use. + */ +#define AL_UNUSED 0x2010 +#define AL_PENDING 0x2011 +#define AL_PROCESSED 0x2012 + + +/** No error. */ +#define AL_NO_ERROR 0 + +/** Invalid name paramater passed to AL call. */ +#define AL_INVALID_NAME 0xA001 + +/** Invalid enum parameter passed to AL call. */ +#define AL_INVALID_ENUM 0xA002 + +/** Invalid value parameter passed to AL call. */ +#define AL_INVALID_VALUE 0xA003 + +/** Illegal AL call. */ +#define AL_INVALID_OPERATION 0xA004 + +/** Not enough memory. */ +#define AL_OUT_OF_MEMORY 0xA005 + + +/** Context string: Vendor ID. */ +#define AL_VENDOR 0xB001 +/** Context string: Version. */ +#define AL_VERSION 0xB002 +/** Context string: Renderer ID. */ +#define AL_RENDERER 0xB003 +/** Context string: Space-separated extension list. */ +#define AL_EXTENSIONS 0xB004 + + +/** + * Doppler scale. + * Type: ALfloat + * Range: [0.0 - ] + * Default: 1.0 + * + * Scale for source and listener velocities. + */ +#define AL_DOPPLER_FACTOR 0xC000 +AL_API void AL_APIENTRY alDopplerFactor(ALfloat value); + +/** + * Doppler velocity (deprecated). + * + * A multiplier applied to the Speed of Sound. + */ +#define AL_DOPPLER_VELOCITY 0xC001 +AL_API void AL_APIENTRY alDopplerVelocity(ALfloat value); + +/** + * Speed of Sound, in units per second. + * Type: ALfloat + * Range: [0.0001 - ] + * Default: 343.3 + * + * The speed at which sound waves are assumed to travel, when calculating the + * doppler effect. + */ +#define AL_SPEED_OF_SOUND 0xC003 +AL_API void AL_APIENTRY alSpeedOfSound(ALfloat value); + +/** + * Distance attenuation model. + * Type: ALint + * Range: [AL_NONE, AL_INVERSE_DISTANCE, AL_INVERSE_DISTANCE_CLAMPED, + * AL_LINEAR_DISTANCE, AL_LINEAR_DISTANCE_CLAMPED, + * AL_EXPONENT_DISTANCE, AL_EXPONENT_DISTANCE_CLAMPED] + * Default: AL_INVERSE_DISTANCE_CLAMPED + * + * The model by which sources attenuate with distance. + * + * None - No distance attenuation. + * Inverse - Doubling the distance halves the source gain. + * Linear - Linear gain scaling between the reference and max distances. + * Exponent - Exponential gain dropoff. + * + * Clamped variations work like the non-clamped counterparts, except the + * distance calculated is clamped between the reference and max distances. + */ +#define AL_DISTANCE_MODEL 0xD000 +AL_API void AL_APIENTRY alDistanceModel(ALenum distanceModel); + +/** Distance model value. */ +#define AL_INVERSE_DISTANCE 0xD001 +#define AL_INVERSE_DISTANCE_CLAMPED 0xD002 +#define AL_LINEAR_DISTANCE 0xD003 +#define AL_LINEAR_DISTANCE_CLAMPED 0xD004 +#define AL_EXPONENT_DISTANCE 0xD005 +#define AL_EXPONENT_DISTANCE_CLAMPED 0xD006 + +/** Renderer State management. */ +AL_API void AL_APIENTRY alEnable(ALenum capability); +AL_API void AL_APIENTRY alDisable(ALenum capability); +AL_API ALboolean AL_APIENTRY alIsEnabled(ALenum capability); + +/** State retrieval. */ +AL_API const ALchar* AL_APIENTRY alGetString(ALenum param); +AL_API void AL_APIENTRY alGetBooleanv(ALenum param, ALboolean *values); +AL_API void AL_APIENTRY alGetIntegerv(ALenum param, ALint *values); +AL_API void AL_APIENTRY alGetFloatv(ALenum param, ALfloat *values); +AL_API void AL_APIENTRY alGetDoublev(ALenum param, ALdouble *values); +AL_API ALboolean AL_APIENTRY alGetBoolean(ALenum param); +AL_API ALint AL_APIENTRY alGetInteger(ALenum param); +AL_API ALfloat AL_APIENTRY alGetFloat(ALenum param); +AL_API ALdouble AL_APIENTRY alGetDouble(ALenum param); + +/** + * Error retrieval. + * + * Obtain the first error generated in the AL context since the last check. + */ +AL_API ALenum AL_APIENTRY alGetError(void); + +/** + * Extension support. + * + * Query for the presence of an extension, and obtain any appropriate function + * pointers and enum values. + */ +AL_API ALboolean AL_APIENTRY alIsExtensionPresent(const ALchar *extname); +AL_API void* AL_APIENTRY alGetProcAddress(const ALchar *fname); +AL_API ALenum AL_APIENTRY alGetEnumValue(const ALchar *ename); + + +/** Set Listener parameters */ +AL_API void AL_APIENTRY alListenerf(ALenum param, ALfloat value); +AL_API void AL_APIENTRY alListener3f(ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +AL_API void AL_APIENTRY alListenerfv(ALenum param, const ALfloat *values); +AL_API void AL_APIENTRY alListeneri(ALenum param, ALint value); +AL_API void AL_APIENTRY alListener3i(ALenum param, ALint value1, ALint value2, ALint value3); +AL_API void AL_APIENTRY alListeneriv(ALenum param, const ALint *values); + +/** Get Listener parameters */ +AL_API void AL_APIENTRY alGetListenerf(ALenum param, ALfloat *value); +AL_API void AL_APIENTRY alGetListener3f(ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +AL_API void AL_APIENTRY alGetListenerfv(ALenum param, ALfloat *values); +AL_API void AL_APIENTRY alGetListeneri(ALenum param, ALint *value); +AL_API void AL_APIENTRY alGetListener3i(ALenum param, ALint *value1, ALint *value2, ALint *value3); +AL_API void AL_APIENTRY alGetListeneriv(ALenum param, ALint *values); + + +/** Create Source objects. */ +AL_API void AL_APIENTRY alGenSources(ALsizei n, ALuint *sources); +/** Delete Source objects. */ +AL_API void AL_APIENTRY alDeleteSources(ALsizei n, const ALuint *sources); +/** Verify a handle is a valid Source. */ +AL_API ALboolean AL_APIENTRY alIsSource(ALuint source); + +/** Set Source parameters. */ +AL_API void AL_APIENTRY alSourcef(ALuint source, ALenum param, ALfloat value); +AL_API void AL_APIENTRY alSource3f(ALuint source, ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +AL_API void AL_APIENTRY alSourcefv(ALuint source, ALenum param, const ALfloat *values); +AL_API void AL_APIENTRY alSourcei(ALuint source, ALenum param, ALint value); +AL_API void AL_APIENTRY alSource3i(ALuint source, ALenum param, ALint value1, ALint value2, ALint value3); +AL_API void AL_APIENTRY alSourceiv(ALuint source, ALenum param, const ALint *values); + +/** Get Source parameters. */ +AL_API void AL_APIENTRY alGetSourcef(ALuint source, ALenum param, ALfloat *value); +AL_API void AL_APIENTRY alGetSource3f(ALuint source, ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +AL_API void AL_APIENTRY alGetSourcefv(ALuint source, ALenum param, ALfloat *values); +AL_API void AL_APIENTRY alGetSourcei(ALuint source, ALenum param, ALint *value); +AL_API void AL_APIENTRY alGetSource3i(ALuint source, ALenum param, ALint *value1, ALint *value2, ALint *value3); +AL_API void AL_APIENTRY alGetSourceiv(ALuint source, ALenum param, ALint *values); + + +/** Play, replay, or resume (if paused) a list of Sources */ +AL_API void AL_APIENTRY alSourcePlayv(ALsizei n, const ALuint *sources); +/** Stop a list of Sources */ +AL_API void AL_APIENTRY alSourceStopv(ALsizei n, const ALuint *sources); +/** Rewind a list of Sources */ +AL_API void AL_APIENTRY alSourceRewindv(ALsizei n, const ALuint *sources); +/** Pause a list of Sources */ +AL_API void AL_APIENTRY alSourcePausev(ALsizei n, const ALuint *sources); + +/** Play, replay, or resume a Source */ +AL_API void AL_APIENTRY alSourcePlay(ALuint source); +/** Stop a Source */ +AL_API void AL_APIENTRY alSourceStop(ALuint source); +/** Rewind a Source (set playback postiton to beginning) */ +AL_API void AL_APIENTRY alSourceRewind(ALuint source); +/** Pause a Source */ +AL_API void AL_APIENTRY alSourcePause(ALuint source); + +/** Queue buffers onto a source */ +AL_API void AL_APIENTRY alSourceQueueBuffers(ALuint source, ALsizei nb, const ALuint *buffers); +/** Unqueue processed buffers from a source */ +AL_API void AL_APIENTRY alSourceUnqueueBuffers(ALuint source, ALsizei nb, ALuint *buffers); + + +/** Create Buffer objects */ +AL_API void AL_APIENTRY alGenBuffers(ALsizei n, ALuint *buffers); +/** Delete Buffer objects */ +AL_API void AL_APIENTRY alDeleteBuffers(ALsizei n, const ALuint *buffers); +/** Verify a handle is a valid Buffer */ +AL_API ALboolean AL_APIENTRY alIsBuffer(ALuint buffer); + +/** Specifies the data to be copied into a buffer */ +AL_API void AL_APIENTRY alBufferData(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq); + +/** Set Buffer parameters, */ +AL_API void AL_APIENTRY alBufferf(ALuint buffer, ALenum param, ALfloat value); +AL_API void AL_APIENTRY alBuffer3f(ALuint buffer, ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +AL_API void AL_APIENTRY alBufferfv(ALuint buffer, ALenum param, const ALfloat *values); +AL_API void AL_APIENTRY alBufferi(ALuint buffer, ALenum param, ALint value); +AL_API void AL_APIENTRY alBuffer3i(ALuint buffer, ALenum param, ALint value1, ALint value2, ALint value3); +AL_API void AL_APIENTRY alBufferiv(ALuint buffer, ALenum param, const ALint *values); + +/** Get Buffer parameters. */ +AL_API void AL_APIENTRY alGetBufferf(ALuint buffer, ALenum param, ALfloat *value); +AL_API void AL_APIENTRY alGetBuffer3f(ALuint buffer, ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +AL_API void AL_APIENTRY alGetBufferfv(ALuint buffer, ALenum param, ALfloat *values); +AL_API void AL_APIENTRY alGetBufferi(ALuint buffer, ALenum param, ALint *value); +AL_API void AL_APIENTRY alGetBuffer3i(ALuint buffer, ALenum param, ALint *value1, ALint *value2, ALint *value3); +AL_API void AL_APIENTRY alGetBufferiv(ALuint buffer, ALenum param, ALint *values); + +/** Pointer-to-function type, useful for dynamically getting AL entry points. */ +typedef void (AL_APIENTRY *LPALENABLE)(ALenum capability); +typedef void (AL_APIENTRY *LPALDISABLE)(ALenum capability); +typedef ALboolean (AL_APIENTRY *LPALISENABLED)(ALenum capability); +typedef const ALchar* (AL_APIENTRY *LPALGETSTRING)(ALenum param); +typedef void (AL_APIENTRY *LPALGETBOOLEANV)(ALenum param, ALboolean *values); +typedef void (AL_APIENTRY *LPALGETINTEGERV)(ALenum param, ALint *values); +typedef void (AL_APIENTRY *LPALGETFLOATV)(ALenum param, ALfloat *values); +typedef void (AL_APIENTRY *LPALGETDOUBLEV)(ALenum param, ALdouble *values); +typedef ALboolean (AL_APIENTRY *LPALGETBOOLEAN)(ALenum param); +typedef ALint (AL_APIENTRY *LPALGETINTEGER)(ALenum param); +typedef ALfloat (AL_APIENTRY *LPALGETFLOAT)(ALenum param); +typedef ALdouble (AL_APIENTRY *LPALGETDOUBLE)(ALenum param); +typedef ALenum (AL_APIENTRY *LPALGETERROR)(void); +typedef ALboolean (AL_APIENTRY *LPALISEXTENSIONPRESENT)(const ALchar *extname); +typedef void* (AL_APIENTRY *LPALGETPROCADDRESS)(const ALchar *fname); +typedef ALenum (AL_APIENTRY *LPALGETENUMVALUE)(const ALchar *ename); +typedef void (AL_APIENTRY *LPALLISTENERF)(ALenum param, ALfloat value); +typedef void (AL_APIENTRY *LPALLISTENER3F)(ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +typedef void (AL_APIENTRY *LPALLISTENERFV)(ALenum param, const ALfloat *values); +typedef void (AL_APIENTRY *LPALLISTENERI)(ALenum param, ALint value); +typedef void (AL_APIENTRY *LPALLISTENER3I)(ALenum param, ALint value1, ALint value2, ALint value3); +typedef void (AL_APIENTRY *LPALLISTENERIV)(ALenum param, const ALint *values); +typedef void (AL_APIENTRY *LPALGETLISTENERF)(ALenum param, ALfloat *value); +typedef void (AL_APIENTRY *LPALGETLISTENER3F)(ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +typedef void (AL_APIENTRY *LPALGETLISTENERFV)(ALenum param, ALfloat *values); +typedef void (AL_APIENTRY *LPALGETLISTENERI)(ALenum param, ALint *value); +typedef void (AL_APIENTRY *LPALGETLISTENER3I)(ALenum param, ALint *value1, ALint *value2, ALint *value3); +typedef void (AL_APIENTRY *LPALGETLISTENERIV)(ALenum param, ALint *values); +typedef void (AL_APIENTRY *LPALGENSOURCES)(ALsizei n, ALuint *sources); +typedef void (AL_APIENTRY *LPALDELETESOURCES)(ALsizei n, const ALuint *sources); +typedef ALboolean (AL_APIENTRY *LPALISSOURCE)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCEF)(ALuint source, ALenum param, ALfloat value); +typedef void (AL_APIENTRY *LPALSOURCE3F)(ALuint source, ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +typedef void (AL_APIENTRY *LPALSOURCEFV)(ALuint source, ALenum param, const ALfloat *values); +typedef void (AL_APIENTRY *LPALSOURCEI)(ALuint source, ALenum param, ALint value); +typedef void (AL_APIENTRY *LPALSOURCE3I)(ALuint source, ALenum param, ALint value1, ALint value2, ALint value3); +typedef void (AL_APIENTRY *LPALSOURCEIV)(ALuint source, ALenum param, const ALint *values); +typedef void (AL_APIENTRY *LPALGETSOURCEF)(ALuint source, ALenum param, ALfloat *value); +typedef void (AL_APIENTRY *LPALGETSOURCE3F)(ALuint source, ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +typedef void (AL_APIENTRY *LPALGETSOURCEFV)(ALuint source, ALenum param, ALfloat *values); +typedef void (AL_APIENTRY *LPALGETSOURCEI)(ALuint source, ALenum param, ALint *value); +typedef void (AL_APIENTRY *LPALGETSOURCE3I)(ALuint source, ALenum param, ALint *value1, ALint *value2, ALint *value3); +typedef void (AL_APIENTRY *LPALGETSOURCEIV)(ALuint source, ALenum param, ALint *values); +typedef void (AL_APIENTRY *LPALSOURCEPLAYV)(ALsizei n, const ALuint *sources); +typedef void (AL_APIENTRY *LPALSOURCESTOPV)(ALsizei n, const ALuint *sources); +typedef void (AL_APIENTRY *LPALSOURCEREWINDV)(ALsizei n, const ALuint *sources); +typedef void (AL_APIENTRY *LPALSOURCEPAUSEV)(ALsizei n, const ALuint *sources); +typedef void (AL_APIENTRY *LPALSOURCEPLAY)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCESTOP)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCEREWIND)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCEPAUSE)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCEQUEUEBUFFERS)(ALuint source, ALsizei nb, const ALuint *buffers); +typedef void (AL_APIENTRY *LPALSOURCEUNQUEUEBUFFERS)(ALuint source, ALsizei nb, ALuint *buffers); +typedef void (AL_APIENTRY *LPALGENBUFFERS)(ALsizei n, ALuint *buffers); +typedef void (AL_APIENTRY *LPALDELETEBUFFERS)(ALsizei n, const ALuint *buffers); +typedef ALboolean (AL_APIENTRY *LPALISBUFFER)(ALuint buffer); +typedef void (AL_APIENTRY *LPALBUFFERDATA)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq); +typedef void (AL_APIENTRY *LPALBUFFERF)(ALuint buffer, ALenum param, ALfloat value); +typedef void (AL_APIENTRY *LPALBUFFER3F)(ALuint buffer, ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +typedef void (AL_APIENTRY *LPALBUFFERFV)(ALuint buffer, ALenum param, const ALfloat *values); +typedef void (AL_APIENTRY *LPALBUFFERI)(ALuint buffer, ALenum param, ALint value); +typedef void (AL_APIENTRY *LPALBUFFER3I)(ALuint buffer, ALenum param, ALint value1, ALint value2, ALint value3); +typedef void (AL_APIENTRY *LPALBUFFERIV)(ALuint buffer, ALenum param, const ALint *values); +typedef void (AL_APIENTRY *LPALGETBUFFERF)(ALuint buffer, ALenum param, ALfloat *value); +typedef void (AL_APIENTRY *LPALGETBUFFER3F)(ALuint buffer, ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +typedef void (AL_APIENTRY *LPALGETBUFFERFV)(ALuint buffer, ALenum param, ALfloat *values); +typedef void (AL_APIENTRY *LPALGETBUFFERI)(ALuint buffer, ALenum param, ALint *value); +typedef void (AL_APIENTRY *LPALGETBUFFER3I)(ALuint buffer, ALenum param, ALint *value1, ALint *value2, ALint *value3); +typedef void (AL_APIENTRY *LPALGETBUFFERIV)(ALuint buffer, ALenum param, ALint *values); +typedef void (AL_APIENTRY *LPALDOPPLERFACTOR)(ALfloat value); +typedef void (AL_APIENTRY *LPALDOPPLERVELOCITY)(ALfloat value); +typedef void (AL_APIENTRY *LPALSPEEDOFSOUND)(ALfloat value); +typedef void (AL_APIENTRY *LPALDISTANCEMODEL)(ALenum distanceModel); + +#if defined(__cplusplus) +} /* extern "C" */ +#endif + +#endif /* AL_AL_H */ diff --git a/VC/inc/AL/alc.h b/VC/inc/AL/alc.h new file mode 100644 index 0000000..294e8b3 --- /dev/null +++ b/VC/inc/AL/alc.h @@ -0,0 +1,237 @@ +#ifndef AL_ALC_H +#define AL_ALC_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#ifndef ALC_API + #if defined(AL_LIBTYPE_STATIC) + #define ALC_API + #elif defined(_WIN32) + #define ALC_API __declspec(dllimport) + #else + #define ALC_API extern + #endif +#endif + +#if defined(_WIN32) + #define ALC_APIENTRY __cdecl +#else + #define ALC_APIENTRY +#endif + + +/** Deprecated macro. */ +#define ALCAPI ALC_API +#define ALCAPIENTRY ALC_APIENTRY +#define ALC_INVALID 0 + +/** Supported ALC version? */ +#define ALC_VERSION_0_1 1 + +/** Opaque device handle */ +typedef struct ALCdevice_struct ALCdevice; +/** Opaque context handle */ +typedef struct ALCcontext_struct ALCcontext; + +/** 8-bit boolean */ +typedef char ALCboolean; + +/** character */ +typedef char ALCchar; + +/** signed 8-bit 2's complement integer */ +typedef signed char ALCbyte; + +/** unsigned 8-bit integer */ +typedef unsigned char ALCubyte; + +/** signed 16-bit 2's complement integer */ +typedef short ALCshort; + +/** unsigned 16-bit integer */ +typedef unsigned short ALCushort; + +/** signed 32-bit 2's complement integer */ +typedef int ALCint; + +/** unsigned 32-bit integer */ +typedef unsigned int ALCuint; + +/** non-negative 32-bit binary integer size */ +typedef int ALCsizei; + +/** enumerated 32-bit value */ +typedef int ALCenum; + +/** 32-bit IEEE754 floating-point */ +typedef float ALCfloat; + +/** 64-bit IEEE754 floating-point */ +typedef double ALCdouble; + +/** void type (for opaque pointers only) */ +typedef void ALCvoid; + + +/* Enumerant values begin at column 50. No tabs. */ + +/** Boolean False. */ +#define ALC_FALSE 0 + +/** Boolean True. */ +#define ALC_TRUE 1 + +/** Context attribute: Hz. */ +#define ALC_FREQUENCY 0x1007 + +/** Context attribute: Hz. */ +#define ALC_REFRESH 0x1008 + +/** Context attribute: AL_TRUE or AL_FALSE. */ +#define ALC_SYNC 0x1009 + +/** Context attribute: requested Mono (3D) Sources. */ +#define ALC_MONO_SOURCES 0x1010 + +/** Context attribute: requested Stereo Sources. */ +#define ALC_STEREO_SOURCES 0x1011 + +/** No error. */ +#define ALC_NO_ERROR 0 + +/** Invalid device handle. */ +#define ALC_INVALID_DEVICE 0xA001 + +/** Invalid context handle. */ +#define ALC_INVALID_CONTEXT 0xA002 + +/** Invalid enum parameter passed to an ALC call. */ +#define ALC_INVALID_ENUM 0xA003 + +/** Invalid value parameter passed to an ALC call. */ +#define ALC_INVALID_VALUE 0xA004 + +/** Out of memory. */ +#define ALC_OUT_OF_MEMORY 0xA005 + + +/** Runtime ALC version. */ +#define ALC_MAJOR_VERSION 0x1000 +#define ALC_MINOR_VERSION 0x1001 + +/** Context attribute list properties. */ +#define ALC_ATTRIBUTES_SIZE 0x1002 +#define ALC_ALL_ATTRIBUTES 0x1003 + +/** String for the default device specifier. */ +#define ALC_DEFAULT_DEVICE_SPECIFIER 0x1004 +/** + * String for the given device's specifier. + * + * If device handle is NULL, it is instead a null-char separated list of + * strings of known device specifiers (list ends with an empty string). + */ +#define ALC_DEVICE_SPECIFIER 0x1005 +/** String for space-separated list of ALC extensions. */ +#define ALC_EXTENSIONS 0x1006 + + +/** Capture extension */ +#define ALC_EXT_CAPTURE 1 +/** + * String for the given capture device's specifier. + * + * If device handle is NULL, it is instead a null-char separated list of + * strings of known capture device specifiers (list ends with an empty string). + */ +#define ALC_CAPTURE_DEVICE_SPECIFIER 0x310 +/** String for the default capture device specifier. */ +#define ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER 0x311 +/** Number of sample frames available for capture. */ +#define ALC_CAPTURE_SAMPLES 0x312 + + +/** Enumerate All extension */ +#define ALC_ENUMERATE_ALL_EXT 1 +/** String for the default extended device specifier. */ +#define ALC_DEFAULT_ALL_DEVICES_SPECIFIER 0x1012 +/** + * String for the given extended device's specifier. + * + * If device handle is NULL, it is instead a null-char separated list of + * strings of known extended device specifiers (list ends with an empty string). + */ +#define ALC_ALL_DEVICES_SPECIFIER 0x1013 + + +/** Context management. */ +ALC_API ALCcontext* ALC_APIENTRY alcCreateContext(ALCdevice *device, const ALCint* attrlist); +ALC_API ALCboolean ALC_APIENTRY alcMakeContextCurrent(ALCcontext *context); +ALC_API void ALC_APIENTRY alcProcessContext(ALCcontext *context); +ALC_API void ALC_APIENTRY alcSuspendContext(ALCcontext *context); +ALC_API void ALC_APIENTRY alcDestroyContext(ALCcontext *context); +ALC_API ALCcontext* ALC_APIENTRY alcGetCurrentContext(void); +ALC_API ALCdevice* ALC_APIENTRY alcGetContextsDevice(ALCcontext *context); + +/** Device management. */ +ALC_API ALCdevice* ALC_APIENTRY alcOpenDevice(const ALCchar *devicename); +ALC_API ALCboolean ALC_APIENTRY alcCloseDevice(ALCdevice *device); + + +/** + * Error support. + * + * Obtain the most recent Device error. + */ +ALC_API ALCenum ALC_APIENTRY alcGetError(ALCdevice *device); + +/** + * Extension support. + * + * Query for the presence of an extension, and obtain any appropriate + * function pointers and enum values. + */ +ALC_API ALCboolean ALC_APIENTRY alcIsExtensionPresent(ALCdevice *device, const ALCchar *extname); +ALC_API void* ALC_APIENTRY alcGetProcAddress(ALCdevice *device, const ALCchar *funcname); +ALC_API ALCenum ALC_APIENTRY alcGetEnumValue(ALCdevice *device, const ALCchar *enumname); + +/** Query function. */ +ALC_API const ALCchar* ALC_APIENTRY alcGetString(ALCdevice *device, ALCenum param); +ALC_API void ALC_APIENTRY alcGetIntegerv(ALCdevice *device, ALCenum param, ALCsizei size, ALCint *values); + +/** Capture function. */ +ALC_API ALCdevice* ALC_APIENTRY alcCaptureOpenDevice(const ALCchar *devicename, ALCuint frequency, ALCenum format, ALCsizei buffersize); +ALC_API ALCboolean ALC_APIENTRY alcCaptureCloseDevice(ALCdevice *device); +ALC_API void ALC_APIENTRY alcCaptureStart(ALCdevice *device); +ALC_API void ALC_APIENTRY alcCaptureStop(ALCdevice *device); +ALC_API void ALC_APIENTRY alcCaptureSamples(ALCdevice *device, ALCvoid *buffer, ALCsizei samples); + +/** Pointer-to-function type, useful for dynamically getting ALC entry points. */ +typedef ALCcontext* (ALC_APIENTRY *LPALCCREATECONTEXT)(ALCdevice *device, const ALCint *attrlist); +typedef ALCboolean (ALC_APIENTRY *LPALCMAKECONTEXTCURRENT)(ALCcontext *context); +typedef void (ALC_APIENTRY *LPALCPROCESSCONTEXT)(ALCcontext *context); +typedef void (ALC_APIENTRY *LPALCSUSPENDCONTEXT)(ALCcontext *context); +typedef void (ALC_APIENTRY *LPALCDESTROYCONTEXT)(ALCcontext *context); +typedef ALCcontext* (ALC_APIENTRY *LPALCGETCURRENTCONTEXT)(void); +typedef ALCdevice* (ALC_APIENTRY *LPALCGETCONTEXTSDEVICE)(ALCcontext *context); +typedef ALCdevice* (ALC_APIENTRY *LPALCOPENDEVICE)(const ALCchar *devicename); +typedef ALCboolean (ALC_APIENTRY *LPALCCLOSEDEVICE)(ALCdevice *device); +typedef ALCenum (ALC_APIENTRY *LPALCGETERROR)(ALCdevice *device); +typedef ALCboolean (ALC_APIENTRY *LPALCISEXTENSIONPRESENT)(ALCdevice *device, const ALCchar *extname); +typedef void* (ALC_APIENTRY *LPALCGETPROCADDRESS)(ALCdevice *device, const ALCchar *funcname); +typedef ALCenum (ALC_APIENTRY *LPALCGETENUMVALUE)(ALCdevice *device, const ALCchar *enumname); +typedef const ALCchar* (ALC_APIENTRY *LPALCGETSTRING)(ALCdevice *device, ALCenum param); +typedef void (ALC_APIENTRY *LPALCGETINTEGERV)(ALCdevice *device, ALCenum param, ALCsizei size, ALCint *values); +typedef ALCdevice* (ALC_APIENTRY *LPALCCAPTUREOPENDEVICE)(const ALCchar *devicename, ALCuint frequency, ALCenum format, ALCsizei buffersize); +typedef ALCboolean (ALC_APIENTRY *LPALCCAPTURECLOSEDEVICE)(ALCdevice *device); +typedef void (ALC_APIENTRY *LPALCCAPTURESTART)(ALCdevice *device); +typedef void (ALC_APIENTRY *LPALCCAPTURESTOP)(ALCdevice *device); +typedef void (ALC_APIENTRY *LPALCCAPTURESAMPLES)(ALCdevice *device, ALCvoid *buffer, ALCsizei samples); + +#if defined(__cplusplus) +} +#endif + +#endif /* AL_ALC_H */ diff --git a/VC/inc/GL/glext.h b/VC/inc/GL/glext.h new file mode 100644 index 0000000..44ab7c6 --- /dev/null +++ b/VC/inc/GL/glext.h @@ -0,0 +1,12740 @@ +#ifndef __glext_h_ +#define __glext_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2007-2012 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +/* Header file version number, required by OpenGL ABI for Linux */ +/* glext.h last updated $Date: 2012-09-19 19:02:24 -0700 (Wed, 19 Sep 2012) $ */ +/* Current version at http://www.opengl.org/registry/ */ +#define GL_GLEXT_VERSION 85 +/* Function declaration macros - to move into glplatform.h */ + +#if defined(_WIN32) && !defined(APIENTRY) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__) +#define WIN32_LEAN_AND_MEAN 1 +#include +#endif + +#ifndef APIENTRY +#define APIENTRY +#endif +#ifndef APIENTRYP +#define APIENTRYP APIENTRY * +#endif +#ifndef GLAPI +#define GLAPI extern +#endif + +/*************************************************************/ + +#ifndef GL_VERSION_1_2 +#define GL_UNSIGNED_BYTE_3_3_2 0x8032 +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_INT_8_8_8_8 0x8035 +#define GL_UNSIGNED_INT_10_10_10_2 0x8036 +#define GL_TEXTURE_BINDING_3D 0x806A +#define GL_PACK_SKIP_IMAGES 0x806B +#define GL_PACK_IMAGE_HEIGHT 0x806C +#define GL_UNPACK_SKIP_IMAGES 0x806D +#define GL_UNPACK_IMAGE_HEIGHT 0x806E +#define GL_TEXTURE_3D 0x806F +#define GL_PROXY_TEXTURE_3D 0x8070 +#define GL_TEXTURE_DEPTH 0x8071 +#define GL_TEXTURE_WRAP_R 0x8072 +#define GL_MAX_3D_TEXTURE_SIZE 0x8073 +#define GL_UNSIGNED_BYTE_2_3_3_REV 0x8362 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_UNSIGNED_SHORT_5_6_5_REV 0x8364 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV 0x8365 +#define GL_UNSIGNED_SHORT_1_5_5_5_REV 0x8366 +#define GL_UNSIGNED_INT_8_8_8_8_REV 0x8367 +#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 +#define GL_BGR 0x80E0 +#define GL_BGRA 0x80E1 +#define GL_MAX_ELEMENTS_VERTICES 0x80E8 +#define GL_MAX_ELEMENTS_INDICES 0x80E9 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_TEXTURE_MIN_LOD 0x813A +#define GL_TEXTURE_MAX_LOD 0x813B +#define GL_TEXTURE_BASE_LEVEL 0x813C +#define GL_TEXTURE_MAX_LEVEL 0x813D +#define GL_SMOOTH_POINT_SIZE_RANGE 0x0B12 +#define GL_SMOOTH_POINT_SIZE_GRANULARITY 0x0B13 +#define GL_SMOOTH_LINE_WIDTH_RANGE 0x0B22 +#define GL_SMOOTH_LINE_WIDTH_GRANULARITY 0x0B23 +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_RESCALE_NORMAL 0x803A +#define GL_LIGHT_MODEL_COLOR_CONTROL 0x81F8 +#define GL_SINGLE_COLOR 0x81F9 +#define GL_SEPARATE_SPECULAR_COLOR 0x81FA +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#endif + +#ifndef GL_ARB_imaging +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_BLEND_COLOR 0x8005 +#define GL_FUNC_ADD 0x8006 +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +#define GL_BLEND_EQUATION 0x8009 +#define GL_FUNC_SUBTRACT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_CONVOLUTION_1D 0x8010 +#define GL_CONVOLUTION_2D 0x8011 +#define GL_SEPARABLE_2D 0x8012 +#define GL_CONVOLUTION_BORDER_MODE 0x8013 +#define GL_CONVOLUTION_FILTER_SCALE 0x8014 +#define GL_CONVOLUTION_FILTER_BIAS 0x8015 +#define GL_REDUCE 0x8016 +#define GL_CONVOLUTION_FORMAT 0x8017 +#define GL_CONVOLUTION_WIDTH 0x8018 +#define GL_CONVOLUTION_HEIGHT 0x8019 +#define GL_MAX_CONVOLUTION_WIDTH 0x801A +#define GL_MAX_CONVOLUTION_HEIGHT 0x801B +#define GL_POST_CONVOLUTION_RED_SCALE 0x801C +#define GL_POST_CONVOLUTION_GREEN_SCALE 0x801D +#define GL_POST_CONVOLUTION_BLUE_SCALE 0x801E +#define GL_POST_CONVOLUTION_ALPHA_SCALE 0x801F +#define GL_POST_CONVOLUTION_RED_BIAS 0x8020 +#define GL_POST_CONVOLUTION_GREEN_BIAS 0x8021 +#define GL_POST_CONVOLUTION_BLUE_BIAS 0x8022 +#define GL_POST_CONVOLUTION_ALPHA_BIAS 0x8023 +#define GL_HISTOGRAM 0x8024 +#define GL_PROXY_HISTOGRAM 0x8025 +#define GL_HISTOGRAM_WIDTH 0x8026 +#define GL_HISTOGRAM_FORMAT 0x8027 +#define GL_HISTOGRAM_RED_SIZE 0x8028 +#define GL_HISTOGRAM_GREEN_SIZE 0x8029 +#define GL_HISTOGRAM_BLUE_SIZE 0x802A +#define GL_HISTOGRAM_ALPHA_SIZE 0x802B +#define GL_HISTOGRAM_LUMINANCE_SIZE 0x802C +#define GL_HISTOGRAM_SINK 0x802D +#define GL_MINMAX 0x802E +#define GL_MINMAX_FORMAT 0x802F +#define GL_MINMAX_SINK 0x8030 +#define GL_TABLE_TOO_LARGE 0x8031 +#define GL_COLOR_MATRIX 0x80B1 +#define GL_COLOR_MATRIX_STACK_DEPTH 0x80B2 +#define GL_MAX_COLOR_MATRIX_STACK_DEPTH 0x80B3 +#define GL_POST_COLOR_MATRIX_RED_SCALE 0x80B4 +#define GL_POST_COLOR_MATRIX_GREEN_SCALE 0x80B5 +#define GL_POST_COLOR_MATRIX_BLUE_SCALE 0x80B6 +#define GL_POST_COLOR_MATRIX_ALPHA_SCALE 0x80B7 +#define GL_POST_COLOR_MATRIX_RED_BIAS 0x80B8 +#define GL_POST_COLOR_MATRIX_GREEN_BIAS 0x80B9 +#define GL_POST_COLOR_MATRIX_BLUE_BIAS 0x80BA +#define GL_POST_COLOR_MATRIX_ALPHA_BIAS 0x80BB +#define GL_COLOR_TABLE 0x80D0 +#define GL_POST_CONVOLUTION_COLOR_TABLE 0x80D1 +#define GL_POST_COLOR_MATRIX_COLOR_TABLE 0x80D2 +#define GL_PROXY_COLOR_TABLE 0x80D3 +#define GL_PROXY_POST_CONVOLUTION_COLOR_TABLE 0x80D4 +#define GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE 0x80D5 +#define GL_COLOR_TABLE_SCALE 0x80D6 +#define GL_COLOR_TABLE_BIAS 0x80D7 +#define GL_COLOR_TABLE_FORMAT 0x80D8 +#define GL_COLOR_TABLE_WIDTH 0x80D9 +#define GL_COLOR_TABLE_RED_SIZE 0x80DA +#define GL_COLOR_TABLE_GREEN_SIZE 0x80DB +#define GL_COLOR_TABLE_BLUE_SIZE 0x80DC +#define GL_COLOR_TABLE_ALPHA_SIZE 0x80DD +#define GL_COLOR_TABLE_LUMINANCE_SIZE 0x80DE +#define GL_COLOR_TABLE_INTENSITY_SIZE 0x80DF +#define GL_CONSTANT_BORDER 0x8151 +#define GL_REPLICATE_BORDER 0x8153 +#define GL_CONVOLUTION_BORDER_COLOR 0x8154 +#endif + +#ifndef GL_VERSION_1_3 +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_MULTISAMPLE 0x809D +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_ALPHA_TO_ONE 0x809F +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_PROXY_TEXTURE_CUBE_MAP 0x851B +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_COMPRESSED_RGB 0x84ED +#define GL_COMPRESSED_RGBA 0x84EE +#define GL_TEXTURE_COMPRESSION_HINT 0x84EF +#define GL_TEXTURE_COMPRESSED_IMAGE_SIZE 0x86A0 +#define GL_TEXTURE_COMPRESSED 0x86A1 +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_CLAMP_TO_BORDER 0x812D +#define GL_CLIENT_ACTIVE_TEXTURE 0x84E1 +#define GL_MAX_TEXTURE_UNITS 0x84E2 +#define GL_TRANSPOSE_MODELVIEW_MATRIX 0x84E3 +#define GL_TRANSPOSE_PROJECTION_MATRIX 0x84E4 +#define GL_TRANSPOSE_TEXTURE_MATRIX 0x84E5 +#define GL_TRANSPOSE_COLOR_MATRIX 0x84E6 +#define GL_MULTISAMPLE_BIT 0x20000000 +#define GL_NORMAL_MAP 0x8511 +#define GL_REFLECTION_MAP 0x8512 +#define GL_COMPRESSED_ALPHA 0x84E9 +#define GL_COMPRESSED_LUMINANCE 0x84EA +#define GL_COMPRESSED_LUMINANCE_ALPHA 0x84EB +#define GL_COMPRESSED_INTENSITY 0x84EC +#define GL_COMBINE 0x8570 +#define GL_COMBINE_RGB 0x8571 +#define GL_COMBINE_ALPHA 0x8572 +#define GL_SOURCE0_RGB 0x8580 +#define GL_SOURCE1_RGB 0x8581 +#define GL_SOURCE2_RGB 0x8582 +#define GL_SOURCE0_ALPHA 0x8588 +#define GL_SOURCE1_ALPHA 0x8589 +#define GL_SOURCE2_ALPHA 0x858A +#define GL_OPERAND0_RGB 0x8590 +#define GL_OPERAND1_RGB 0x8591 +#define GL_OPERAND2_RGB 0x8592 +#define GL_OPERAND0_ALPHA 0x8598 +#define GL_OPERAND1_ALPHA 0x8599 +#define GL_OPERAND2_ALPHA 0x859A +#define GL_RGB_SCALE 0x8573 +#define GL_ADD_SIGNED 0x8574 +#define GL_INTERPOLATE 0x8575 +#define GL_SUBTRACT 0x84E7 +#define GL_CONSTANT 0x8576 +#define GL_PRIMARY_COLOR 0x8577 +#define GL_PREVIOUS 0x8578 +#define GL_DOT3_RGB 0x86AE +#define GL_DOT3_RGBA 0x86AF +#endif + +#ifndef GL_VERSION_1_4 +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_POINT_FADE_THRESHOLD_SIZE 0x8128 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_DEPTH_COMPONENT24 0x81A6 +#define GL_DEPTH_COMPONENT32 0x81A7 +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD +#define GL_TEXTURE_LOD_BIAS 0x8501 +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_TEXTURE_DEPTH_SIZE 0x884A +#define GL_TEXTURE_COMPARE_MODE 0x884C +#define GL_TEXTURE_COMPARE_FUNC 0x884D +#define GL_POINT_SIZE_MIN 0x8126 +#define GL_POINT_SIZE_MAX 0x8127 +#define GL_POINT_DISTANCE_ATTENUATION 0x8129 +#define GL_GENERATE_MIPMAP 0x8191 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_FOG_COORDINATE_SOURCE 0x8450 +#define GL_FOG_COORDINATE 0x8451 +#define GL_FRAGMENT_DEPTH 0x8452 +#define GL_CURRENT_FOG_COORDINATE 0x8453 +#define GL_FOG_COORDINATE_ARRAY_TYPE 0x8454 +#define GL_FOG_COORDINATE_ARRAY_STRIDE 0x8455 +#define GL_FOG_COORDINATE_ARRAY_POINTER 0x8456 +#define GL_FOG_COORDINATE_ARRAY 0x8457 +#define GL_COLOR_SUM 0x8458 +#define GL_CURRENT_SECONDARY_COLOR 0x8459 +#define GL_SECONDARY_COLOR_ARRAY_SIZE 0x845A +#define GL_SECONDARY_COLOR_ARRAY_TYPE 0x845B +#define GL_SECONDARY_COLOR_ARRAY_STRIDE 0x845C +#define GL_SECONDARY_COLOR_ARRAY_POINTER 0x845D +#define GL_SECONDARY_COLOR_ARRAY 0x845E +#define GL_TEXTURE_FILTER_CONTROL 0x8500 +#define GL_DEPTH_TEXTURE_MODE 0x884B +#define GL_COMPARE_R_TO_TEXTURE 0x884E +#endif + +#ifndef GL_VERSION_1_5 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_QUERY_COUNTER_BITS 0x8864 +#define GL_CURRENT_QUERY 0x8865 +#define GL_QUERY_RESULT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE 0x8867 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_READ_ONLY 0x88B8 +#define GL_WRITE_ONLY 0x88B9 +#define GL_READ_WRITE 0x88BA +#define GL_BUFFER_ACCESS 0x88BB +#define GL_BUFFER_MAPPED 0x88BC +#define GL_BUFFER_MAP_POINTER 0x88BD +#define GL_STREAM_DRAW 0x88E0 +#define GL_STREAM_READ 0x88E1 +#define GL_STREAM_COPY 0x88E2 +#define GL_STATIC_DRAW 0x88E4 +#define GL_STATIC_READ 0x88E5 +#define GL_STATIC_COPY 0x88E6 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_DYNAMIC_READ 0x88E9 +#define GL_DYNAMIC_COPY 0x88EA +#define GL_SAMPLES_PASSED 0x8914 +#define GL_SRC1_ALPHA 0x8589 +#define GL_VERTEX_ARRAY_BUFFER_BINDING 0x8896 +#define GL_NORMAL_ARRAY_BUFFER_BINDING 0x8897 +#define GL_COLOR_ARRAY_BUFFER_BINDING 0x8898 +#define GL_INDEX_ARRAY_BUFFER_BINDING 0x8899 +#define GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING 0x889A +#define GL_EDGE_FLAG_ARRAY_BUFFER_BINDING 0x889B +#define GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING 0x889C +#define GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING 0x889D +#define GL_WEIGHT_ARRAY_BUFFER_BINDING 0x889E +#define GL_FOG_COORD_SRC 0x8450 +#define GL_FOG_COORD 0x8451 +#define GL_CURRENT_FOG_COORD 0x8453 +#define GL_FOG_COORD_ARRAY_TYPE 0x8454 +#define GL_FOG_COORD_ARRAY_STRIDE 0x8455 +#define GL_FOG_COORD_ARRAY_POINTER 0x8456 +#define GL_FOG_COORD_ARRAY 0x8457 +#define GL_FOG_COORD_ARRAY_BUFFER_BINDING 0x889D +#define GL_SRC0_RGB 0x8580 +#define GL_SRC1_RGB 0x8581 +#define GL_SRC2_RGB 0x8582 +#define GL_SRC0_ALPHA 0x8588 +#define GL_SRC2_ALPHA 0x858A +#endif + +#ifndef GL_VERSION_2_0 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_VERTEX_PROGRAM_POINT_SIZE 0x8642 +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_MAX_DRAW_BUFFERS 0x8824 +#define GL_DRAW_BUFFER0 0x8825 +#define GL_DRAW_BUFFER1 0x8826 +#define GL_DRAW_BUFFER2 0x8827 +#define GL_DRAW_BUFFER3 0x8828 +#define GL_DRAW_BUFFER4 0x8829 +#define GL_DRAW_BUFFER5 0x882A +#define GL_DRAW_BUFFER6 0x882B +#define GL_DRAW_BUFFER7 0x882C +#define GL_DRAW_BUFFER8 0x882D +#define GL_DRAW_BUFFER9 0x882E +#define GL_DRAW_BUFFER10 0x882F +#define GL_DRAW_BUFFER11 0x8830 +#define GL_DRAW_BUFFER12 0x8831 +#define GL_DRAW_BUFFER13 0x8832 +#define GL_DRAW_BUFFER14 0x8833 +#define GL_DRAW_BUFFER15 0x8834 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49 +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A +#define GL_MAX_VARYING_FLOATS 0x8B4B +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_SHADER_TYPE 0x8B4F +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_1D 0x8B5D +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_SAMPLER_1D_SHADOW 0x8B61 +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_DELETE_STATUS 0x8B80 +#define GL_COMPILE_STATUS 0x8B81 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_POINT_SPRITE_COORD_ORIGIN 0x8CA0 +#define GL_LOWER_LEFT 0x8CA1 +#define GL_UPPER_LEFT 0x8CA2 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_VERTEX_PROGRAM_TWO_SIDE 0x8643 +#define GL_POINT_SPRITE 0x8861 +#define GL_COORD_REPLACE 0x8862 +#define GL_MAX_TEXTURE_COORDS 0x8871 +#endif + +#ifndef GL_VERSION_2_1 +#define GL_PIXEL_PACK_BUFFER 0x88EB +#define GL_PIXEL_UNPACK_BUFFER 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A +#define GL_SRGB 0x8C40 +#define GL_SRGB8 0x8C41 +#define GL_SRGB_ALPHA 0x8C42 +#define GL_SRGB8_ALPHA8 0x8C43 +#define GL_COMPRESSED_SRGB 0x8C48 +#define GL_COMPRESSED_SRGB_ALPHA 0x8C49 +#define GL_CURRENT_RASTER_SECONDARY_COLOR 0x845F +#define GL_SLUMINANCE_ALPHA 0x8C44 +#define GL_SLUMINANCE8_ALPHA8 0x8C45 +#define GL_SLUMINANCE 0x8C46 +#define GL_SLUMINANCE8 0x8C47 +#define GL_COMPRESSED_SLUMINANCE 0x8C4A +#define GL_COMPRESSED_SLUMINANCE_ALPHA 0x8C4B +#endif + +#ifndef GL_VERSION_3_0 +#define GL_COMPARE_REF_TO_TEXTURE 0x884E +#define GL_CLIP_DISTANCE0 0x3000 +#define GL_CLIP_DISTANCE1 0x3001 +#define GL_CLIP_DISTANCE2 0x3002 +#define GL_CLIP_DISTANCE3 0x3003 +#define GL_CLIP_DISTANCE4 0x3004 +#define GL_CLIP_DISTANCE5 0x3005 +#define GL_CLIP_DISTANCE6 0x3006 +#define GL_CLIP_DISTANCE7 0x3007 +#define GL_MAX_CLIP_DISTANCES 0x0D32 +#define GL_MAJOR_VERSION 0x821B +#define GL_MINOR_VERSION 0x821C +#define GL_NUM_EXTENSIONS 0x821D +#define GL_CONTEXT_FLAGS 0x821E +#define GL_COMPRESSED_RED 0x8225 +#define GL_COMPRESSED_RG 0x8226 +#define GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT 0x0001 +#define GL_RGBA32F 0x8814 +#define GL_RGB32F 0x8815 +#define GL_RGBA16F 0x881A +#define GL_RGB16F 0x881B +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD +#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF +#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904 +#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905 +#define GL_CLAMP_READ_COLOR 0x891C +#define GL_FIXED_ONLY 0x891D +#define GL_MAX_VARYING_COMPONENTS 0x8B4B +#define GL_TEXTURE_1D_ARRAY 0x8C18 +#define GL_PROXY_TEXTURE_1D_ARRAY 0x8C19 +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_PROXY_TEXTURE_2D_ARRAY 0x8C1B +#define GL_TEXTURE_BINDING_1D_ARRAY 0x8C1C +#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D +#define GL_R11F_G11F_B10F 0x8C3A +#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B +#define GL_RGB9_E5 0x8C3D +#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E +#define GL_TEXTURE_SHARED_SIZE 0x8C3F +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76 +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80 +#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85 +#define GL_PRIMITIVES_GENERATED 0x8C87 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88 +#define GL_RASTERIZER_DISCARD 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B +#define GL_INTERLEAVED_ATTRIBS 0x8C8C +#define GL_SEPARATE_ATTRIBS 0x8C8D +#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F +#define GL_RGBA32UI 0x8D70 +#define GL_RGB32UI 0x8D71 +#define GL_RGBA16UI 0x8D76 +#define GL_RGB16UI 0x8D77 +#define GL_RGBA8UI 0x8D7C +#define GL_RGB8UI 0x8D7D +#define GL_RGBA32I 0x8D82 +#define GL_RGB32I 0x8D83 +#define GL_RGBA16I 0x8D88 +#define GL_RGB16I 0x8D89 +#define GL_RGBA8I 0x8D8E +#define GL_RGB8I 0x8D8F +#define GL_RED_INTEGER 0x8D94 +#define GL_GREEN_INTEGER 0x8D95 +#define GL_BLUE_INTEGER 0x8D96 +#define GL_RGB_INTEGER 0x8D98 +#define GL_RGBA_INTEGER 0x8D99 +#define GL_BGR_INTEGER 0x8D9A +#define GL_BGRA_INTEGER 0x8D9B +#define GL_SAMPLER_1D_ARRAY 0x8DC0 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 +#define GL_INT_SAMPLER_1D 0x8DC9 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1 +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_QUERY_WAIT 0x8E13 +#define GL_QUERY_NO_WAIT 0x8E14 +#define GL_QUERY_BY_REGION_WAIT 0x8E15 +#define GL_QUERY_BY_REGION_NO_WAIT 0x8E16 +#define GL_BUFFER_ACCESS_FLAGS 0x911F +#define GL_BUFFER_MAP_LENGTH 0x9120 +#define GL_BUFFER_MAP_OFFSET 0x9121 +/* Reuse tokens from ARB_depth_buffer_float */ +/* reuse GL_DEPTH_COMPONENT32F */ +/* reuse GL_DEPTH32F_STENCIL8 */ +/* reuse GL_FLOAT_32_UNSIGNED_INT_24_8_REV */ +/* Reuse tokens from ARB_framebuffer_object */ +/* reuse GL_INVALID_FRAMEBUFFER_OPERATION */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE */ +/* reuse GL_FRAMEBUFFER_DEFAULT */ +/* reuse GL_FRAMEBUFFER_UNDEFINED */ +/* reuse GL_DEPTH_STENCIL_ATTACHMENT */ +/* reuse GL_INDEX */ +/* reuse GL_MAX_RENDERBUFFER_SIZE */ +/* reuse GL_DEPTH_STENCIL */ +/* reuse GL_UNSIGNED_INT_24_8 */ +/* reuse GL_DEPTH24_STENCIL8 */ +/* reuse GL_TEXTURE_STENCIL_SIZE */ +/* reuse GL_TEXTURE_RED_TYPE */ +/* reuse GL_TEXTURE_GREEN_TYPE */ +/* reuse GL_TEXTURE_BLUE_TYPE */ +/* reuse GL_TEXTURE_ALPHA_TYPE */ +/* reuse GL_TEXTURE_DEPTH_TYPE */ +/* reuse GL_UNSIGNED_NORMALIZED */ +/* reuse GL_FRAMEBUFFER_BINDING */ +/* reuse GL_DRAW_FRAMEBUFFER_BINDING */ +/* reuse GL_RENDERBUFFER_BINDING */ +/* reuse GL_READ_FRAMEBUFFER */ +/* reuse GL_DRAW_FRAMEBUFFER */ +/* reuse GL_READ_FRAMEBUFFER_BINDING */ +/* reuse GL_RENDERBUFFER_SAMPLES */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER */ +/* reuse GL_FRAMEBUFFER_COMPLETE */ +/* reuse GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT */ +/* reuse GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT */ +/* reuse GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER */ +/* reuse GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER */ +/* reuse GL_FRAMEBUFFER_UNSUPPORTED */ +/* reuse GL_MAX_COLOR_ATTACHMENTS */ +/* reuse GL_COLOR_ATTACHMENT0 */ +/* reuse GL_COLOR_ATTACHMENT1 */ +/* reuse GL_COLOR_ATTACHMENT2 */ +/* reuse GL_COLOR_ATTACHMENT3 */ +/* reuse GL_COLOR_ATTACHMENT4 */ +/* reuse GL_COLOR_ATTACHMENT5 */ +/* reuse GL_COLOR_ATTACHMENT6 */ +/* reuse GL_COLOR_ATTACHMENT7 */ +/* reuse GL_COLOR_ATTACHMENT8 */ +/* reuse GL_COLOR_ATTACHMENT9 */ +/* reuse GL_COLOR_ATTACHMENT10 */ +/* reuse GL_COLOR_ATTACHMENT11 */ +/* reuse GL_COLOR_ATTACHMENT12 */ +/* reuse GL_COLOR_ATTACHMENT13 */ +/* reuse GL_COLOR_ATTACHMENT14 */ +/* reuse GL_COLOR_ATTACHMENT15 */ +/* reuse GL_DEPTH_ATTACHMENT */ +/* reuse GL_STENCIL_ATTACHMENT */ +/* reuse GL_FRAMEBUFFER */ +/* reuse GL_RENDERBUFFER */ +/* reuse GL_RENDERBUFFER_WIDTH */ +/* reuse GL_RENDERBUFFER_HEIGHT */ +/* reuse GL_RENDERBUFFER_INTERNAL_FORMAT */ +/* reuse GL_STENCIL_INDEX1 */ +/* reuse GL_STENCIL_INDEX4 */ +/* reuse GL_STENCIL_INDEX8 */ +/* reuse GL_STENCIL_INDEX16 */ +/* reuse GL_RENDERBUFFER_RED_SIZE */ +/* reuse GL_RENDERBUFFER_GREEN_SIZE */ +/* reuse GL_RENDERBUFFER_BLUE_SIZE */ +/* reuse GL_RENDERBUFFER_ALPHA_SIZE */ +/* reuse GL_RENDERBUFFER_DEPTH_SIZE */ +/* reuse GL_RENDERBUFFER_STENCIL_SIZE */ +/* reuse GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE */ +/* reuse GL_MAX_SAMPLES */ +/* Reuse tokens from ARB_framebuffer_sRGB */ +/* reuse GL_FRAMEBUFFER_SRGB */ +/* Reuse tokens from ARB_half_float_vertex */ +/* reuse GL_HALF_FLOAT */ +/* Reuse tokens from ARB_map_buffer_range */ +/* reuse GL_MAP_READ_BIT */ +/* reuse GL_MAP_WRITE_BIT */ +/* reuse GL_MAP_INVALIDATE_RANGE_BIT */ +/* reuse GL_MAP_INVALIDATE_BUFFER_BIT */ +/* reuse GL_MAP_FLUSH_EXPLICIT_BIT */ +/* reuse GL_MAP_UNSYNCHRONIZED_BIT */ +/* Reuse tokens from ARB_texture_compression_rgtc */ +/* reuse GL_COMPRESSED_RED_RGTC1 */ +/* reuse GL_COMPRESSED_SIGNED_RED_RGTC1 */ +/* reuse GL_COMPRESSED_RG_RGTC2 */ +/* reuse GL_COMPRESSED_SIGNED_RG_RGTC2 */ +/* Reuse tokens from ARB_texture_rg */ +/* reuse GL_RG */ +/* reuse GL_RG_INTEGER */ +/* reuse GL_R8 */ +/* reuse GL_R16 */ +/* reuse GL_RG8 */ +/* reuse GL_RG16 */ +/* reuse GL_R16F */ +/* reuse GL_R32F */ +/* reuse GL_RG16F */ +/* reuse GL_RG32F */ +/* reuse GL_R8I */ +/* reuse GL_R8UI */ +/* reuse GL_R16I */ +/* reuse GL_R16UI */ +/* reuse GL_R32I */ +/* reuse GL_R32UI */ +/* reuse GL_RG8I */ +/* reuse GL_RG8UI */ +/* reuse GL_RG16I */ +/* reuse GL_RG16UI */ +/* reuse GL_RG32I */ +/* reuse GL_RG32UI */ +/* Reuse tokens from ARB_vertex_array_object */ +/* reuse GL_VERTEX_ARRAY_BINDING */ +#define GL_CLAMP_VERTEX_COLOR 0x891A +#define GL_CLAMP_FRAGMENT_COLOR 0x891B +#define GL_ALPHA_INTEGER 0x8D97 +/* Reuse tokens from ARB_framebuffer_object */ +/* reuse GL_TEXTURE_LUMINANCE_TYPE */ +/* reuse GL_TEXTURE_INTENSITY_TYPE */ +#endif + +#ifndef GL_VERSION_3_1 +#define GL_SAMPLER_2D_RECT 0x8B63 +#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64 +#define GL_SAMPLER_BUFFER 0x8DC2 +#define GL_INT_SAMPLER_2D_RECT 0x8DCD +#define GL_INT_SAMPLER_BUFFER 0x8DD0 +#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8 +#define GL_TEXTURE_BUFFER 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING 0x8C2D +#define GL_TEXTURE_RECTANGLE 0x84F5 +#define GL_TEXTURE_BINDING_RECTANGLE 0x84F6 +#define GL_PROXY_TEXTURE_RECTANGLE 0x84F7 +#define GL_MAX_RECTANGLE_TEXTURE_SIZE 0x84F8 +#define GL_RED_SNORM 0x8F90 +#define GL_RG_SNORM 0x8F91 +#define GL_RGB_SNORM 0x8F92 +#define GL_RGBA_SNORM 0x8F93 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGB8_SNORM 0x8F96 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_R16_SNORM 0x8F98 +#define GL_RG16_SNORM 0x8F99 +#define GL_RGB16_SNORM 0x8F9A +#define GL_RGBA16_SNORM 0x8F9B +#define GL_SIGNED_NORMALIZED 0x8F9C +#define GL_PRIMITIVE_RESTART 0x8F9D +#define GL_PRIMITIVE_RESTART_INDEX 0x8F9E +/* Reuse tokens from ARB_copy_buffer */ +/* reuse GL_COPY_READ_BUFFER */ +/* reuse GL_COPY_WRITE_BUFFER */ +/* Reuse tokens from ARB_draw_instanced (none) */ +/* Reuse tokens from ARB_uniform_buffer_object */ +/* reuse GL_UNIFORM_BUFFER */ +/* reuse GL_UNIFORM_BUFFER_BINDING */ +/* reuse GL_UNIFORM_BUFFER_START */ +/* reuse GL_UNIFORM_BUFFER_SIZE */ +/* reuse GL_MAX_VERTEX_UNIFORM_BLOCKS */ +/* reuse GL_MAX_FRAGMENT_UNIFORM_BLOCKS */ +/* reuse GL_MAX_COMBINED_UNIFORM_BLOCKS */ +/* reuse GL_MAX_UNIFORM_BUFFER_BINDINGS */ +/* reuse GL_MAX_UNIFORM_BLOCK_SIZE */ +/* reuse GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS */ +/* reuse GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS */ +/* reuse GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT */ +/* reuse GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH */ +/* reuse GL_ACTIVE_UNIFORM_BLOCKS */ +/* reuse GL_UNIFORM_TYPE */ +/* reuse GL_UNIFORM_SIZE */ +/* reuse GL_UNIFORM_NAME_LENGTH */ +/* reuse GL_UNIFORM_BLOCK_INDEX */ +/* reuse GL_UNIFORM_OFFSET */ +/* reuse GL_UNIFORM_ARRAY_STRIDE */ +/* reuse GL_UNIFORM_MATRIX_STRIDE */ +/* reuse GL_UNIFORM_IS_ROW_MAJOR */ +/* reuse GL_UNIFORM_BLOCK_BINDING */ +/* reuse GL_UNIFORM_BLOCK_DATA_SIZE */ +/* reuse GL_UNIFORM_BLOCK_NAME_LENGTH */ +/* reuse GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS */ +/* reuse GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES */ +/* reuse GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER */ +/* reuse GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER */ +/* reuse GL_INVALID_INDEX */ +#endif + +#ifndef GL_VERSION_3_2 +#define GL_CONTEXT_CORE_PROFILE_BIT 0x00000001 +#define GL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002 +#define GL_LINES_ADJACENCY 0x000A +#define GL_LINE_STRIP_ADJACENCY 0x000B +#define GL_TRIANGLES_ADJACENCY 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY 0x000D +#define GL_PROGRAM_POINT_SIZE 0x8642 +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS 0x8C29 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED 0x8DA7 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS 0x8DA8 +#define GL_GEOMETRY_SHADER 0x8DD9 +#define GL_GEOMETRY_VERTICES_OUT 0x8916 +#define GL_GEOMETRY_INPUT_TYPE 0x8917 +#define GL_GEOMETRY_OUTPUT_TYPE 0x8918 +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS 0x8DDF +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS 0x8DE1 +#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122 +#define GL_MAX_GEOMETRY_INPUT_COMPONENTS 0x9123 +#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS 0x9124 +#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125 +#define GL_CONTEXT_PROFILE_MASK 0x9126 +/* reuse GL_MAX_VARYING_COMPONENTS */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER */ +/* Reuse tokens from ARB_depth_clamp */ +/* reuse GL_DEPTH_CLAMP */ +/* Reuse tokens from ARB_draw_elements_base_vertex (none) */ +/* Reuse tokens from ARB_fragment_coord_conventions (none) */ +/* Reuse tokens from ARB_provoking_vertex */ +/* reuse GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION */ +/* reuse GL_FIRST_VERTEX_CONVENTION */ +/* reuse GL_LAST_VERTEX_CONVENTION */ +/* reuse GL_PROVOKING_VERTEX */ +/* Reuse tokens from ARB_seamless_cube_map */ +/* reuse GL_TEXTURE_CUBE_MAP_SEAMLESS */ +/* Reuse tokens from ARB_sync */ +/* reuse GL_MAX_SERVER_WAIT_TIMEOUT */ +/* reuse GL_OBJECT_TYPE */ +/* reuse GL_SYNC_CONDITION */ +/* reuse GL_SYNC_STATUS */ +/* reuse GL_SYNC_FLAGS */ +/* reuse GL_SYNC_FENCE */ +/* reuse GL_SYNC_GPU_COMMANDS_COMPLETE */ +/* reuse GL_UNSIGNALED */ +/* reuse GL_SIGNALED */ +/* reuse GL_ALREADY_SIGNALED */ +/* reuse GL_TIMEOUT_EXPIRED */ +/* reuse GL_CONDITION_SATISFIED */ +/* reuse GL_WAIT_FAILED */ +/* reuse GL_TIMEOUT_IGNORED */ +/* reuse GL_SYNC_FLUSH_COMMANDS_BIT */ +/* reuse GL_TIMEOUT_IGNORED */ +/* Reuse tokens from ARB_texture_multisample */ +/* reuse GL_SAMPLE_POSITION */ +/* reuse GL_SAMPLE_MASK */ +/* reuse GL_SAMPLE_MASK_VALUE */ +/* reuse GL_MAX_SAMPLE_MASK_WORDS */ +/* reuse GL_TEXTURE_2D_MULTISAMPLE */ +/* reuse GL_PROXY_TEXTURE_2D_MULTISAMPLE */ +/* reuse GL_TEXTURE_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_TEXTURE_BINDING_2D_MULTISAMPLE */ +/* reuse GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_TEXTURE_SAMPLES */ +/* reuse GL_TEXTURE_FIXED_SAMPLE_LOCATIONS */ +/* reuse GL_SAMPLER_2D_MULTISAMPLE */ +/* reuse GL_INT_SAMPLER_2D_MULTISAMPLE */ +/* reuse GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE */ +/* reuse GL_SAMPLER_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_MAX_COLOR_TEXTURE_SAMPLES */ +/* reuse GL_MAX_DEPTH_TEXTURE_SAMPLES */ +/* reuse GL_MAX_INTEGER_SAMPLES */ +/* Don't need to reuse tokens from ARB_vertex_array_bgra since they're already in 1.2 core */ +#endif + +#ifndef GL_VERSION_3_3 +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE +/* Reuse tokens from ARB_blend_func_extended */ +/* reuse GL_SRC1_COLOR */ +/* reuse GL_ONE_MINUS_SRC1_COLOR */ +/* reuse GL_ONE_MINUS_SRC1_ALPHA */ +/* reuse GL_MAX_DUAL_SOURCE_DRAW_BUFFERS */ +/* Reuse tokens from ARB_explicit_attrib_location (none) */ +/* Reuse tokens from ARB_occlusion_query2 */ +/* reuse GL_ANY_SAMPLES_PASSED */ +/* Reuse tokens from ARB_sampler_objects */ +/* reuse GL_SAMPLER_BINDING */ +/* Reuse tokens from ARB_shader_bit_encoding (none) */ +/* Reuse tokens from ARB_texture_rgb10_a2ui */ +/* reuse GL_RGB10_A2UI */ +/* Reuse tokens from ARB_texture_swizzle */ +/* reuse GL_TEXTURE_SWIZZLE_R */ +/* reuse GL_TEXTURE_SWIZZLE_G */ +/* reuse GL_TEXTURE_SWIZZLE_B */ +/* reuse GL_TEXTURE_SWIZZLE_A */ +/* reuse GL_TEXTURE_SWIZZLE_RGBA */ +/* Reuse tokens from ARB_timer_query */ +/* reuse GL_TIME_ELAPSED */ +/* reuse GL_TIMESTAMP */ +/* Reuse tokens from ARB_vertex_type_2_10_10_10_rev */ +/* reuse GL_INT_2_10_10_10_REV */ +#endif + +#ifndef GL_VERSION_4_0 +#define GL_SAMPLE_SHADING 0x8C36 +#define GL_MIN_SAMPLE_SHADING_VALUE 0x8C37 +#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5E +#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5F +#define GL_TEXTURE_CUBE_MAP_ARRAY 0x9009 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY 0x900A +#define GL_PROXY_TEXTURE_CUBE_MAP_ARRAY 0x900B +#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F +/* Reuse tokens from ARB_texture_query_lod (none) */ +/* Reuse tokens from ARB_draw_buffers_blend (none) */ +/* Reuse tokens from ARB_draw_indirect */ +/* reuse GL_DRAW_INDIRECT_BUFFER */ +/* reuse GL_DRAW_INDIRECT_BUFFER_BINDING */ +/* Reuse tokens from ARB_gpu_shader5 */ +/* reuse GL_GEOMETRY_SHADER_INVOCATIONS */ +/* reuse GL_MAX_GEOMETRY_SHADER_INVOCATIONS */ +/* reuse GL_MIN_FRAGMENT_INTERPOLATION_OFFSET */ +/* reuse GL_MAX_FRAGMENT_INTERPOLATION_OFFSET */ +/* reuse GL_FRAGMENT_INTERPOLATION_OFFSET_BITS */ +/* reuse GL_MAX_VERTEX_STREAMS */ +/* Reuse tokens from ARB_gpu_shader_fp64 */ +/* reuse GL_DOUBLE_VEC2 */ +/* reuse GL_DOUBLE_VEC3 */ +/* reuse GL_DOUBLE_VEC4 */ +/* reuse GL_DOUBLE_MAT2 */ +/* reuse GL_DOUBLE_MAT3 */ +/* reuse GL_DOUBLE_MAT4 */ +/* reuse GL_DOUBLE_MAT2x3 */ +/* reuse GL_DOUBLE_MAT2x4 */ +/* reuse GL_DOUBLE_MAT3x2 */ +/* reuse GL_DOUBLE_MAT3x4 */ +/* reuse GL_DOUBLE_MAT4x2 */ +/* reuse GL_DOUBLE_MAT4x3 */ +/* Reuse tokens from ARB_shader_subroutine */ +/* reuse GL_ACTIVE_SUBROUTINES */ +/* reuse GL_ACTIVE_SUBROUTINE_UNIFORMS */ +/* reuse GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS */ +/* reuse GL_ACTIVE_SUBROUTINE_MAX_LENGTH */ +/* reuse GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH */ +/* reuse GL_MAX_SUBROUTINES */ +/* reuse GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS */ +/* reuse GL_NUM_COMPATIBLE_SUBROUTINES */ +/* reuse GL_COMPATIBLE_SUBROUTINES */ +/* Reuse tokens from ARB_tessellation_shader */ +/* reuse GL_PATCHES */ +/* reuse GL_PATCH_VERTICES */ +/* reuse GL_PATCH_DEFAULT_INNER_LEVEL */ +/* reuse GL_PATCH_DEFAULT_OUTER_LEVEL */ +/* reuse GL_TESS_CONTROL_OUTPUT_VERTICES */ +/* reuse GL_TESS_GEN_MODE */ +/* reuse GL_TESS_GEN_SPACING */ +/* reuse GL_TESS_GEN_VERTEX_ORDER */ +/* reuse GL_TESS_GEN_POINT_MODE */ +/* reuse GL_ISOLINES */ +/* reuse GL_FRACTIONAL_ODD */ +/* reuse GL_FRACTIONAL_EVEN */ +/* reuse GL_MAX_PATCH_VERTICES */ +/* reuse GL_MAX_TESS_GEN_LEVEL */ +/* reuse GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS */ +/* reuse GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS */ +/* reuse GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS */ +/* reuse GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS */ +/* reuse GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS */ +/* reuse GL_MAX_TESS_PATCH_COMPONENTS */ +/* reuse GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS */ +/* reuse GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS */ +/* reuse GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS */ +/* reuse GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS */ +/* reuse GL_MAX_TESS_CONTROL_INPUT_COMPONENTS */ +/* reuse GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS */ +/* reuse GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS */ +/* reuse GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS */ +/* reuse GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER */ +/* reuse GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER */ +/* reuse GL_TESS_EVALUATION_SHADER */ +/* reuse GL_TESS_CONTROL_SHADER */ +/* Reuse tokens from ARB_texture_buffer_object_rgb32 (none) */ +/* Reuse tokens from ARB_transform_feedback2 */ +/* reuse GL_TRANSFORM_FEEDBACK */ +/* reuse GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED */ +/* reuse GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE */ +/* reuse GL_TRANSFORM_FEEDBACK_BINDING */ +/* Reuse tokens from ARB_transform_feedback3 */ +/* reuse GL_MAX_TRANSFORM_FEEDBACK_BUFFERS */ +/* reuse GL_MAX_VERTEX_STREAMS */ +#endif + +#ifndef GL_VERSION_4_1 +/* Reuse tokens from ARB_ES2_compatibility */ +/* reuse GL_FIXED */ +/* reuse GL_IMPLEMENTATION_COLOR_READ_TYPE */ +/* reuse GL_IMPLEMENTATION_COLOR_READ_FORMAT */ +/* reuse GL_LOW_FLOAT */ +/* reuse GL_MEDIUM_FLOAT */ +/* reuse GL_HIGH_FLOAT */ +/* reuse GL_LOW_INT */ +/* reuse GL_MEDIUM_INT */ +/* reuse GL_HIGH_INT */ +/* reuse GL_SHADER_COMPILER */ +/* reuse GL_SHADER_BINARY_FORMATS */ +/* reuse GL_NUM_SHADER_BINARY_FORMATS */ +/* reuse GL_MAX_VERTEX_UNIFORM_VECTORS */ +/* reuse GL_MAX_VARYING_VECTORS */ +/* reuse GL_MAX_FRAGMENT_UNIFORM_VECTORS */ +/* reuse GL_RGB565 */ +/* Reuse tokens from ARB_get_program_binary */ +/* reuse GL_PROGRAM_BINARY_RETRIEVABLE_HINT */ +/* reuse GL_PROGRAM_BINARY_LENGTH */ +/* reuse GL_NUM_PROGRAM_BINARY_FORMATS */ +/* reuse GL_PROGRAM_BINARY_FORMATS */ +/* Reuse tokens from ARB_separate_shader_objects */ +/* reuse GL_VERTEX_SHADER_BIT */ +/* reuse GL_FRAGMENT_SHADER_BIT */ +/* reuse GL_GEOMETRY_SHADER_BIT */ +/* reuse GL_TESS_CONTROL_SHADER_BIT */ +/* reuse GL_TESS_EVALUATION_SHADER_BIT */ +/* reuse GL_ALL_SHADER_BITS */ +/* reuse GL_PROGRAM_SEPARABLE */ +/* reuse GL_ACTIVE_PROGRAM */ +/* reuse GL_PROGRAM_PIPELINE_BINDING */ +/* Reuse tokens from ARB_shader_precision (none) */ +/* Reuse tokens from ARB_vertex_attrib_64bit - all are in GL 3.0 and 4.0 already */ +/* Reuse tokens from ARB_viewport_array - some are in GL 1.1 and ARB_provoking_vertex already */ +/* reuse GL_MAX_VIEWPORTS */ +/* reuse GL_VIEWPORT_SUBPIXEL_BITS */ +/* reuse GL_VIEWPORT_BOUNDS_RANGE */ +/* reuse GL_LAYER_PROVOKING_VERTEX */ +/* reuse GL_VIEWPORT_INDEX_PROVOKING_VERTEX */ +/* reuse GL_UNDEFINED_VERTEX */ +#endif + +#ifndef GL_VERSION_4_2 +/* Reuse tokens from ARB_base_instance (none) */ +/* Reuse tokens from ARB_shading_language_420pack (none) */ +/* Reuse tokens from ARB_transform_feedback_instanced (none) */ +/* Reuse tokens from ARB_compressed_texture_pixel_storage */ +/* reuse GL_UNPACK_COMPRESSED_BLOCK_WIDTH */ +/* reuse GL_UNPACK_COMPRESSED_BLOCK_HEIGHT */ +/* reuse GL_UNPACK_COMPRESSED_BLOCK_DEPTH */ +/* reuse GL_UNPACK_COMPRESSED_BLOCK_SIZE */ +/* reuse GL_PACK_COMPRESSED_BLOCK_WIDTH */ +/* reuse GL_PACK_COMPRESSED_BLOCK_HEIGHT */ +/* reuse GL_PACK_COMPRESSED_BLOCK_DEPTH */ +/* reuse GL_PACK_COMPRESSED_BLOCK_SIZE */ +/* Reuse tokens from ARB_conservative_depth (none) */ +/* Reuse tokens from ARB_internalformat_query */ +/* reuse GL_NUM_SAMPLE_COUNTS */ +/* Reuse tokens from ARB_map_buffer_alignment */ +/* reuse GL_MIN_MAP_BUFFER_ALIGNMENT */ +/* Reuse tokens from ARB_shader_atomic_counters */ +/* reuse GL_ATOMIC_COUNTER_BUFFER */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_BINDING */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_START */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_SIZE */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER */ +/* reuse GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS */ +/* reuse GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS */ +/* reuse GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS */ +/* reuse GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS */ +/* reuse GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS */ +/* reuse GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS */ +/* reuse GL_MAX_VERTEX_ATOMIC_COUNTERS */ +/* reuse GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS */ +/* reuse GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS */ +/* reuse GL_MAX_GEOMETRY_ATOMIC_COUNTERS */ +/* reuse GL_MAX_FRAGMENT_ATOMIC_COUNTERS */ +/* reuse GL_MAX_COMBINED_ATOMIC_COUNTERS */ +/* reuse GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE */ +/* reuse GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS */ +/* reuse GL_ACTIVE_ATOMIC_COUNTER_BUFFERS */ +/* reuse GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX */ +/* reuse GL_UNSIGNED_INT_ATOMIC_COUNTER */ +/* Reuse tokens from ARB_shader_image_load_store */ +/* reuse GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT */ +/* reuse GL_ELEMENT_ARRAY_BARRIER_BIT */ +/* reuse GL_UNIFORM_BARRIER_BIT */ +/* reuse GL_TEXTURE_FETCH_BARRIER_BIT */ +/* reuse GL_SHADER_IMAGE_ACCESS_BARRIER_BIT */ +/* reuse GL_COMMAND_BARRIER_BIT */ +/* reuse GL_PIXEL_BUFFER_BARRIER_BIT */ +/* reuse GL_TEXTURE_UPDATE_BARRIER_BIT */ +/* reuse GL_BUFFER_UPDATE_BARRIER_BIT */ +/* reuse GL_FRAMEBUFFER_BARRIER_BIT */ +/* reuse GL_TRANSFORM_FEEDBACK_BARRIER_BIT */ +/* reuse GL_ATOMIC_COUNTER_BARRIER_BIT */ +/* reuse GL_ALL_BARRIER_BITS */ +/* reuse GL_MAX_IMAGE_UNITS */ +/* reuse GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS */ +/* reuse GL_IMAGE_BINDING_NAME */ +/* reuse GL_IMAGE_BINDING_LEVEL */ +/* reuse GL_IMAGE_BINDING_LAYERED */ +/* reuse GL_IMAGE_BINDING_LAYER */ +/* reuse GL_IMAGE_BINDING_ACCESS */ +/* reuse GL_IMAGE_1D */ +/* reuse GL_IMAGE_2D */ +/* reuse GL_IMAGE_3D */ +/* reuse GL_IMAGE_2D_RECT */ +/* reuse GL_IMAGE_CUBE */ +/* reuse GL_IMAGE_BUFFER */ +/* reuse GL_IMAGE_1D_ARRAY */ +/* reuse GL_IMAGE_2D_ARRAY */ +/* reuse GL_IMAGE_CUBE_MAP_ARRAY */ +/* reuse GL_IMAGE_2D_MULTISAMPLE */ +/* reuse GL_IMAGE_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_INT_IMAGE_1D */ +/* reuse GL_INT_IMAGE_2D */ +/* reuse GL_INT_IMAGE_3D */ +/* reuse GL_INT_IMAGE_2D_RECT */ +/* reuse GL_INT_IMAGE_CUBE */ +/* reuse GL_INT_IMAGE_BUFFER */ +/* reuse GL_INT_IMAGE_1D_ARRAY */ +/* reuse GL_INT_IMAGE_2D_ARRAY */ +/* reuse GL_INT_IMAGE_CUBE_MAP_ARRAY */ +/* reuse GL_INT_IMAGE_2D_MULTISAMPLE */ +/* reuse GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_UNSIGNED_INT_IMAGE_1D */ +/* reuse GL_UNSIGNED_INT_IMAGE_2D */ +/* reuse GL_UNSIGNED_INT_IMAGE_3D */ +/* reuse GL_UNSIGNED_INT_IMAGE_2D_RECT */ +/* reuse GL_UNSIGNED_INT_IMAGE_CUBE */ +/* reuse GL_UNSIGNED_INT_IMAGE_BUFFER */ +/* reuse GL_UNSIGNED_INT_IMAGE_1D_ARRAY */ +/* reuse GL_UNSIGNED_INT_IMAGE_2D_ARRAY */ +/* reuse GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY */ +/* reuse GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE */ +/* reuse GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_MAX_IMAGE_SAMPLES */ +/* reuse GL_IMAGE_BINDING_FORMAT */ +/* reuse GL_IMAGE_FORMAT_COMPATIBILITY_TYPE */ +/* reuse GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE */ +/* reuse GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS */ +/* reuse GL_MAX_VERTEX_IMAGE_UNIFORMS */ +/* reuse GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS */ +/* reuse GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS */ +/* reuse GL_MAX_GEOMETRY_IMAGE_UNIFORMS */ +/* reuse GL_MAX_FRAGMENT_IMAGE_UNIFORMS */ +/* reuse GL_MAX_COMBINED_IMAGE_UNIFORMS */ +/* Reuse tokens from ARB_shading_language_packing (none) */ +/* Reuse tokens from ARB_texture_storage */ +/* reuse GL_TEXTURE_IMMUTABLE_FORMAT */ +#endif + +#ifndef GL_VERSION_4_3 +#define GL_NUM_SHADING_LANGUAGE_VERSIONS 0x82E9 +#define GL_VERTEX_ATTRIB_ARRAY_LONG 0x874E +/* Reuse tokens from ARB_arrays_of_arrays (none, GLSL only) */ +/* Reuse tokens from ARB_fragment_layer_viewport (none, GLSL only) */ +/* Reuse tokens from ARB_shader_image_size (none, GLSL only) */ +/* Reuse tokens from ARB_ES3_compatibility */ +/* reuse GL_COMPRESSED_RGB8_ETC2 */ +/* reuse GL_COMPRESSED_SRGB8_ETC2 */ +/* reuse GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 */ +/* reuse GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 */ +/* reuse GL_COMPRESSED_RGBA8_ETC2_EAC */ +/* reuse GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC */ +/* reuse GL_COMPRESSED_R11_EAC */ +/* reuse GL_COMPRESSED_SIGNED_R11_EAC */ +/* reuse GL_COMPRESSED_RG11_EAC */ +/* reuse GL_COMPRESSED_SIGNED_RG11_EAC */ +/* reuse GL_PRIMITIVE_RESTART_FIXED_INDEX */ +/* reuse GL_ANY_SAMPLES_PASSED_CONSERVATIVE */ +/* reuse GL_MAX_ELEMENT_INDEX */ +/* Reuse tokens from ARB_clear_buffer_object (none) */ +/* Reuse tokens from ARB_compute_shader */ +/* reuse GL_COMPUTE_SHADER */ +/* reuse GL_MAX_COMPUTE_UNIFORM_BLOCKS */ +/* reuse GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS */ +/* reuse GL_MAX_COMPUTE_IMAGE_UNIFORMS */ +/* reuse GL_MAX_COMPUTE_SHARED_MEMORY_SIZE */ +/* reuse GL_MAX_COMPUTE_UNIFORM_COMPONENTS */ +/* reuse GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS */ +/* reuse GL_MAX_COMPUTE_ATOMIC_COUNTERS */ +/* reuse GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS */ +/* reuse GL_MAX_COMPUTE_LOCAL_INVOCATIONS */ +/* reuse GL_MAX_COMPUTE_WORK_GROUP_COUNT */ +/* reuse GL_MAX_COMPUTE_WORK_GROUP_SIZE */ +/* reuse GL_COMPUTE_LOCAL_WORK_SIZE */ +/* reuse GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER */ +/* reuse GL_DISPATCH_INDIRECT_BUFFER */ +/* reuse GL_DISPATCH_INDIRECT_BUFFER_BINDING */ +/* Reuse tokens from ARB_copy_image (none) */ +/* Reuse tokens from KHR_debug */ +/* reuse GL_DEBUG_OUTPUT_SYNCHRONOUS */ +/* reuse GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH */ +/* reuse GL_DEBUG_CALLBACK_FUNCTION */ +/* reuse GL_DEBUG_CALLBACK_USER_PARAM */ +/* reuse GL_DEBUG_SOURCE_API */ +/* reuse GL_DEBUG_SOURCE_WINDOW_SYSTEM */ +/* reuse GL_DEBUG_SOURCE_SHADER_COMPILER */ +/* reuse GL_DEBUG_SOURCE_THIRD_PARTY */ +/* reuse GL_DEBUG_SOURCE_APPLICATION */ +/* reuse GL_DEBUG_SOURCE_OTHER */ +/* reuse GL_DEBUG_TYPE_ERROR */ +/* reuse GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR */ +/* reuse GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR */ +/* reuse GL_DEBUG_TYPE_PORTABILITY */ +/* reuse GL_DEBUG_TYPE_PERFORMANCE */ +/* reuse GL_DEBUG_TYPE_OTHER */ +/* reuse GL_MAX_DEBUG_MESSAGE_LENGTH */ +/* reuse GL_MAX_DEBUG_LOGGED_MESSAGES */ +/* reuse GL_DEBUG_LOGGED_MESSAGES */ +/* reuse GL_DEBUG_SEVERITY_HIGH */ +/* reuse GL_DEBUG_SEVERITY_MEDIUM */ +/* reuse GL_DEBUG_SEVERITY_LOW */ +/* reuse GL_DEBUG_TYPE_MARKER */ +/* reuse GL_DEBUG_TYPE_PUSH_GROUP */ +/* reuse GL_DEBUG_TYPE_POP_GROUP */ +/* reuse GL_DEBUG_SEVERITY_NOTIFICATION */ +/* reuse GL_MAX_DEBUG_GROUP_STACK_DEPTH */ +/* reuse GL_DEBUG_GROUP_STACK_DEPTH */ +/* reuse GL_BUFFER */ +/* reuse GL_SHADER */ +/* reuse GL_PROGRAM */ +/* reuse GL_QUERY */ +/* reuse GL_PROGRAM_PIPELINE */ +/* reuse GL_SAMPLER */ +/* reuse GL_DISPLAY_LIST */ +/* reuse GL_MAX_LABEL_LENGTH */ +/* reuse GL_DEBUG_OUTPUT */ +/* reuse GL_CONTEXT_FLAG_DEBUG_BIT */ +/* reuse GL_STACK_UNDERFLOW */ +/* reuse GL_STACK_OVERFLOW */ +/* Reuse tokens from ARB_explicit_uniform_location */ +/* reuse GL_MAX_UNIFORM_LOCATIONS */ +/* Reuse tokens from ARB_framebuffer_no_attachments */ +/* reuse GL_FRAMEBUFFER_DEFAULT_WIDTH */ +/* reuse GL_FRAMEBUFFER_DEFAULT_HEIGHT */ +/* reuse GL_FRAMEBUFFER_DEFAULT_LAYERS */ +/* reuse GL_FRAMEBUFFER_DEFAULT_SAMPLES */ +/* reuse GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS */ +/* reuse GL_MAX_FRAMEBUFFER_WIDTH */ +/* reuse GL_MAX_FRAMEBUFFER_HEIGHT */ +/* reuse GL_MAX_FRAMEBUFFER_LAYERS */ +/* reuse GL_MAX_FRAMEBUFFER_SAMPLES */ +/* Reuse tokens from ARB_internalformat_query2 */ +/* reuse GL_INTERNALFORMAT_SUPPORTED */ +/* reuse GL_INTERNALFORMAT_PREFERRED */ +/* reuse GL_INTERNALFORMAT_RED_SIZE */ +/* reuse GL_INTERNALFORMAT_GREEN_SIZE */ +/* reuse GL_INTERNALFORMAT_BLUE_SIZE */ +/* reuse GL_INTERNALFORMAT_ALPHA_SIZE */ +/* reuse GL_INTERNALFORMAT_DEPTH_SIZE */ +/* reuse GL_INTERNALFORMAT_STENCIL_SIZE */ +/* reuse GL_INTERNALFORMAT_SHARED_SIZE */ +/* reuse GL_INTERNALFORMAT_RED_TYPE */ +/* reuse GL_INTERNALFORMAT_GREEN_TYPE */ +/* reuse GL_INTERNALFORMAT_BLUE_TYPE */ +/* reuse GL_INTERNALFORMAT_ALPHA_TYPE */ +/* reuse GL_INTERNALFORMAT_DEPTH_TYPE */ +/* reuse GL_INTERNALFORMAT_STENCIL_TYPE */ +/* reuse GL_MAX_WIDTH */ +/* reuse GL_MAX_HEIGHT */ +/* reuse GL_MAX_DEPTH */ +/* reuse GL_MAX_LAYERS */ +/* reuse GL_MAX_COMBINED_DIMENSIONS */ +/* reuse GL_COLOR_COMPONENTS */ +/* reuse GL_DEPTH_COMPONENTS */ +/* reuse GL_STENCIL_COMPONENTS */ +/* reuse GL_COLOR_RENDERABLE */ +/* reuse GL_DEPTH_RENDERABLE */ +/* reuse GL_STENCIL_RENDERABLE */ +/* reuse GL_FRAMEBUFFER_RENDERABLE */ +/* reuse GL_FRAMEBUFFER_RENDERABLE_LAYERED */ +/* reuse GL_FRAMEBUFFER_BLEND */ +/* reuse GL_READ_PIXELS */ +/* reuse GL_READ_PIXELS_FORMAT */ +/* reuse GL_READ_PIXELS_TYPE */ +/* reuse GL_TEXTURE_IMAGE_FORMAT */ +/* reuse GL_TEXTURE_IMAGE_TYPE */ +/* reuse GL_GET_TEXTURE_IMAGE_FORMAT */ +/* reuse GL_GET_TEXTURE_IMAGE_TYPE */ +/* reuse GL_MIPMAP */ +/* reuse GL_MANUAL_GENERATE_MIPMAP */ +/* reuse GL_AUTO_GENERATE_MIPMAP */ +/* reuse GL_COLOR_ENCODING */ +/* reuse GL_SRGB_READ */ +/* reuse GL_SRGB_WRITE */ +/* reuse GL_FILTER */ +/* reuse GL_VERTEX_TEXTURE */ +/* reuse GL_TESS_CONTROL_TEXTURE */ +/* reuse GL_TESS_EVALUATION_TEXTURE */ +/* reuse GL_GEOMETRY_TEXTURE */ +/* reuse GL_FRAGMENT_TEXTURE */ +/* reuse GL_COMPUTE_TEXTURE */ +/* reuse GL_TEXTURE_SHADOW */ +/* reuse GL_TEXTURE_GATHER */ +/* reuse GL_TEXTURE_GATHER_SHADOW */ +/* reuse GL_SHADER_IMAGE_LOAD */ +/* reuse GL_SHADER_IMAGE_STORE */ +/* reuse GL_SHADER_IMAGE_ATOMIC */ +/* reuse GL_IMAGE_TEXEL_SIZE */ +/* reuse GL_IMAGE_COMPATIBILITY_CLASS */ +/* reuse GL_IMAGE_PIXEL_FORMAT */ +/* reuse GL_IMAGE_PIXEL_TYPE */ +/* reuse GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST */ +/* reuse GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST */ +/* reuse GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE */ +/* reuse GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE */ +/* reuse GL_TEXTURE_COMPRESSED_BLOCK_WIDTH */ +/* reuse GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT */ +/* reuse GL_TEXTURE_COMPRESSED_BLOCK_SIZE */ +/* reuse GL_CLEAR_BUFFER */ +/* reuse GL_TEXTURE_VIEW */ +/* reuse GL_VIEW_COMPATIBILITY_CLASS */ +/* reuse GL_FULL_SUPPORT */ +/* reuse GL_CAVEAT_SUPPORT */ +/* reuse GL_IMAGE_CLASS_4_X_32 */ +/* reuse GL_IMAGE_CLASS_2_X_32 */ +/* reuse GL_IMAGE_CLASS_1_X_32 */ +/* reuse GL_IMAGE_CLASS_4_X_16 */ +/* reuse GL_IMAGE_CLASS_2_X_16 */ +/* reuse GL_IMAGE_CLASS_1_X_16 */ +/* reuse GL_IMAGE_CLASS_4_X_8 */ +/* reuse GL_IMAGE_CLASS_2_X_8 */ +/* reuse GL_IMAGE_CLASS_1_X_8 */ +/* reuse GL_IMAGE_CLASS_11_11_10 */ +/* reuse GL_IMAGE_CLASS_10_10_10_2 */ +/* reuse GL_VIEW_CLASS_128_BITS */ +/* reuse GL_VIEW_CLASS_96_BITS */ +/* reuse GL_VIEW_CLASS_64_BITS */ +/* reuse GL_VIEW_CLASS_48_BITS */ +/* reuse GL_VIEW_CLASS_32_BITS */ +/* reuse GL_VIEW_CLASS_24_BITS */ +/* reuse GL_VIEW_CLASS_16_BITS */ +/* reuse GL_VIEW_CLASS_8_BITS */ +/* reuse GL_VIEW_CLASS_S3TC_DXT1_RGB */ +/* reuse GL_VIEW_CLASS_S3TC_DXT1_RGBA */ +/* reuse GL_VIEW_CLASS_S3TC_DXT3_RGBA */ +/* reuse GL_VIEW_CLASS_S3TC_DXT5_RGBA */ +/* reuse GL_VIEW_CLASS_RGTC1_RED */ +/* reuse GL_VIEW_CLASS_RGTC2_RG */ +/* reuse GL_VIEW_CLASS_BPTC_UNORM */ +/* reuse GL_VIEW_CLASS_BPTC_FLOAT */ +/* Reuse tokens from ARB_invalidate_subdata (none) */ +/* Reuse tokens from ARB_multi_draw_indirect (none) */ +/* Reuse tokens from ARB_program_interface_query */ +/* reuse GL_UNIFORM */ +/* reuse GL_UNIFORM_BLOCK */ +/* reuse GL_PROGRAM_INPUT */ +/* reuse GL_PROGRAM_OUTPUT */ +/* reuse GL_BUFFER_VARIABLE */ +/* reuse GL_SHADER_STORAGE_BLOCK */ +/* reuse GL_VERTEX_SUBROUTINE */ +/* reuse GL_TESS_CONTROL_SUBROUTINE */ +/* reuse GL_TESS_EVALUATION_SUBROUTINE */ +/* reuse GL_GEOMETRY_SUBROUTINE */ +/* reuse GL_FRAGMENT_SUBROUTINE */ +/* reuse GL_COMPUTE_SUBROUTINE */ +/* reuse GL_VERTEX_SUBROUTINE_UNIFORM */ +/* reuse GL_TESS_CONTROL_SUBROUTINE_UNIFORM */ +/* reuse GL_TESS_EVALUATION_SUBROUTINE_UNIFORM */ +/* reuse GL_GEOMETRY_SUBROUTINE_UNIFORM */ +/* reuse GL_FRAGMENT_SUBROUTINE_UNIFORM */ +/* reuse GL_COMPUTE_SUBROUTINE_UNIFORM */ +/* reuse GL_TRANSFORM_FEEDBACK_VARYING */ +/* reuse GL_ACTIVE_RESOURCES */ +/* reuse GL_MAX_NAME_LENGTH */ +/* reuse GL_MAX_NUM_ACTIVE_VARIABLES */ +/* reuse GL_MAX_NUM_COMPATIBLE_SUBROUTINES */ +/* reuse GL_NAME_LENGTH */ +/* reuse GL_TYPE */ +/* reuse GL_ARRAY_SIZE */ +/* reuse GL_OFFSET */ +/* reuse GL_BLOCK_INDEX */ +/* reuse GL_ARRAY_STRIDE */ +/* reuse GL_MATRIX_STRIDE */ +/* reuse GL_IS_ROW_MAJOR */ +/* reuse GL_ATOMIC_COUNTER_BUFFER_INDEX */ +/* reuse GL_BUFFER_BINDING */ +/* reuse GL_BUFFER_DATA_SIZE */ +/* reuse GL_NUM_ACTIVE_VARIABLES */ +/* reuse GL_ACTIVE_VARIABLES */ +/* reuse GL_REFERENCED_BY_VERTEX_SHADER */ +/* reuse GL_REFERENCED_BY_TESS_CONTROL_SHADER */ +/* reuse GL_REFERENCED_BY_TESS_EVALUATION_SHADER */ +/* reuse GL_REFERENCED_BY_GEOMETRY_SHADER */ +/* reuse GL_REFERENCED_BY_FRAGMENT_SHADER */ +/* reuse GL_REFERENCED_BY_COMPUTE_SHADER */ +/* reuse GL_TOP_LEVEL_ARRAY_SIZE */ +/* reuse GL_TOP_LEVEL_ARRAY_STRIDE */ +/* reuse GL_LOCATION */ +/* reuse GL_LOCATION_INDEX */ +/* reuse GL_IS_PER_PATCH */ +/* Reuse tokens from ARB_robust_buffer_access_behavior (none) */ +/* Reuse tokens from ARB_shader_storage_buffer_object */ +/* reuse GL_SHADER_STORAGE_BUFFER */ +/* reuse GL_SHADER_STORAGE_BUFFER_BINDING */ +/* reuse GL_SHADER_STORAGE_BUFFER_START */ +/* reuse GL_SHADER_STORAGE_BUFFER_SIZE */ +/* reuse GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS */ +/* reuse GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS */ +/* reuse GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS */ +/* reuse GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS */ +/* reuse GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS */ +/* reuse GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS */ +/* reuse GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS */ +/* reuse GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS */ +/* reuse GL_MAX_SHADER_STORAGE_BLOCK_SIZE */ +/* reuse GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT */ +/* reuse GL_SHADER_STORAGE_BARRIER_BIT */ +/* reuse GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES */ +/* Reuse tokens from ARB_stencil_texturing */ +/* reuse GL_DEPTH_STENCIL_TEXTURE_MODE */ +/* Reuse tokens from ARB_texture_buffer_range */ +/* reuse GL_TEXTURE_BUFFER_OFFSET */ +/* reuse GL_TEXTURE_BUFFER_SIZE */ +/* reuse GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT */ +/* Reuse tokens from ARB_texture_query_levels (none) */ +/* Reuse tokens from ARB_texture_storage_multisample (none) */ +/* Reuse tokens from ARB_texture_view */ +/* reuse GL_TEXTURE_VIEW_MIN_LEVEL */ +/* reuse GL_TEXTURE_VIEW_NUM_LEVELS */ +/* reuse GL_TEXTURE_VIEW_MIN_LAYER */ +/* reuse GL_TEXTURE_VIEW_NUM_LAYERS */ +/* reuse GL_TEXTURE_IMMUTABLE_LEVELS */ +/* Reuse tokens from ARB_vertex_attrib_binding */ +/* reuse GL_VERTEX_ATTRIB_BINDING */ +/* reuse GL_VERTEX_ATTRIB_RELATIVE_OFFSET */ +/* reuse GL_VERTEX_BINDING_DIVISOR */ +/* reuse GL_VERTEX_BINDING_OFFSET */ +/* reuse GL_VERTEX_BINDING_STRIDE */ +/* reuse GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET */ +/* reuse GL_MAX_VERTEX_ATTRIB_BINDINGS */ +#endif + +#ifndef GL_ARB_multitexture +#define GL_TEXTURE0_ARB 0x84C0 +#define GL_TEXTURE1_ARB 0x84C1 +#define GL_TEXTURE2_ARB 0x84C2 +#define GL_TEXTURE3_ARB 0x84C3 +#define GL_TEXTURE4_ARB 0x84C4 +#define GL_TEXTURE5_ARB 0x84C5 +#define GL_TEXTURE6_ARB 0x84C6 +#define GL_TEXTURE7_ARB 0x84C7 +#define GL_TEXTURE8_ARB 0x84C8 +#define GL_TEXTURE9_ARB 0x84C9 +#define GL_TEXTURE10_ARB 0x84CA +#define GL_TEXTURE11_ARB 0x84CB +#define GL_TEXTURE12_ARB 0x84CC +#define GL_TEXTURE13_ARB 0x84CD +#define GL_TEXTURE14_ARB 0x84CE +#define GL_TEXTURE15_ARB 0x84CF +#define GL_TEXTURE16_ARB 0x84D0 +#define GL_TEXTURE17_ARB 0x84D1 +#define GL_TEXTURE18_ARB 0x84D2 +#define GL_TEXTURE19_ARB 0x84D3 +#define GL_TEXTURE20_ARB 0x84D4 +#define GL_TEXTURE21_ARB 0x84D5 +#define GL_TEXTURE22_ARB 0x84D6 +#define GL_TEXTURE23_ARB 0x84D7 +#define GL_TEXTURE24_ARB 0x84D8 +#define GL_TEXTURE25_ARB 0x84D9 +#define GL_TEXTURE26_ARB 0x84DA +#define GL_TEXTURE27_ARB 0x84DB +#define GL_TEXTURE28_ARB 0x84DC +#define GL_TEXTURE29_ARB 0x84DD +#define GL_TEXTURE30_ARB 0x84DE +#define GL_TEXTURE31_ARB 0x84DF +#define GL_ACTIVE_TEXTURE_ARB 0x84E0 +#define GL_CLIENT_ACTIVE_TEXTURE_ARB 0x84E1 +#define GL_MAX_TEXTURE_UNITS_ARB 0x84E2 +#endif + +#ifndef GL_ARB_transpose_matrix +#define GL_TRANSPOSE_MODELVIEW_MATRIX_ARB 0x84E3 +#define GL_TRANSPOSE_PROJECTION_MATRIX_ARB 0x84E4 +#define GL_TRANSPOSE_TEXTURE_MATRIX_ARB 0x84E5 +#define GL_TRANSPOSE_COLOR_MATRIX_ARB 0x84E6 +#endif + +#ifndef GL_ARB_multisample +#define GL_MULTISAMPLE_ARB 0x809D +#define GL_SAMPLE_ALPHA_TO_COVERAGE_ARB 0x809E +#define GL_SAMPLE_ALPHA_TO_ONE_ARB 0x809F +#define GL_SAMPLE_COVERAGE_ARB 0x80A0 +#define GL_SAMPLE_BUFFERS_ARB 0x80A8 +#define GL_SAMPLES_ARB 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE_ARB 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT_ARB 0x80AB +#define GL_MULTISAMPLE_BIT_ARB 0x20000000 +#endif + +#ifndef GL_ARB_texture_env_add +#endif + +#ifndef GL_ARB_texture_cube_map +#define GL_NORMAL_MAP_ARB 0x8511 +#define GL_REFLECTION_MAP_ARB 0x8512 +#define GL_TEXTURE_CUBE_MAP_ARB 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARB 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB 0x851A +#define GL_PROXY_TEXTURE_CUBE_MAP_ARB 0x851B +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB 0x851C +#endif + +#ifndef GL_ARB_texture_compression +#define GL_COMPRESSED_ALPHA_ARB 0x84E9 +#define GL_COMPRESSED_LUMINANCE_ARB 0x84EA +#define GL_COMPRESSED_LUMINANCE_ALPHA_ARB 0x84EB +#define GL_COMPRESSED_INTENSITY_ARB 0x84EC +#define GL_COMPRESSED_RGB_ARB 0x84ED +#define GL_COMPRESSED_RGBA_ARB 0x84EE +#define GL_TEXTURE_COMPRESSION_HINT_ARB 0x84EF +#define GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB 0x86A0 +#define GL_TEXTURE_COMPRESSED_ARB 0x86A1 +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS_ARB 0x86A3 +#endif + +#ifndef GL_ARB_texture_border_clamp +#define GL_CLAMP_TO_BORDER_ARB 0x812D +#endif + +#ifndef GL_ARB_point_parameters +#define GL_POINT_SIZE_MIN_ARB 0x8126 +#define GL_POINT_SIZE_MAX_ARB 0x8127 +#define GL_POINT_FADE_THRESHOLD_SIZE_ARB 0x8128 +#define GL_POINT_DISTANCE_ATTENUATION_ARB 0x8129 +#endif + +#ifndef GL_ARB_vertex_blend +#define GL_MAX_VERTEX_UNITS_ARB 0x86A4 +#define GL_ACTIVE_VERTEX_UNITS_ARB 0x86A5 +#define GL_WEIGHT_SUM_UNITY_ARB 0x86A6 +#define GL_VERTEX_BLEND_ARB 0x86A7 +#define GL_CURRENT_WEIGHT_ARB 0x86A8 +#define GL_WEIGHT_ARRAY_TYPE_ARB 0x86A9 +#define GL_WEIGHT_ARRAY_STRIDE_ARB 0x86AA +#define GL_WEIGHT_ARRAY_SIZE_ARB 0x86AB +#define GL_WEIGHT_ARRAY_POINTER_ARB 0x86AC +#define GL_WEIGHT_ARRAY_ARB 0x86AD +#define GL_MODELVIEW0_ARB 0x1700 +#define GL_MODELVIEW1_ARB 0x850A +#define GL_MODELVIEW2_ARB 0x8722 +#define GL_MODELVIEW3_ARB 0x8723 +#define GL_MODELVIEW4_ARB 0x8724 +#define GL_MODELVIEW5_ARB 0x8725 +#define GL_MODELVIEW6_ARB 0x8726 +#define GL_MODELVIEW7_ARB 0x8727 +#define GL_MODELVIEW8_ARB 0x8728 +#define GL_MODELVIEW9_ARB 0x8729 +#define GL_MODELVIEW10_ARB 0x872A +#define GL_MODELVIEW11_ARB 0x872B +#define GL_MODELVIEW12_ARB 0x872C +#define GL_MODELVIEW13_ARB 0x872D +#define GL_MODELVIEW14_ARB 0x872E +#define GL_MODELVIEW15_ARB 0x872F +#define GL_MODELVIEW16_ARB 0x8730 +#define GL_MODELVIEW17_ARB 0x8731 +#define GL_MODELVIEW18_ARB 0x8732 +#define GL_MODELVIEW19_ARB 0x8733 +#define GL_MODELVIEW20_ARB 0x8734 +#define GL_MODELVIEW21_ARB 0x8735 +#define GL_MODELVIEW22_ARB 0x8736 +#define GL_MODELVIEW23_ARB 0x8737 +#define GL_MODELVIEW24_ARB 0x8738 +#define GL_MODELVIEW25_ARB 0x8739 +#define GL_MODELVIEW26_ARB 0x873A +#define GL_MODELVIEW27_ARB 0x873B +#define GL_MODELVIEW28_ARB 0x873C +#define GL_MODELVIEW29_ARB 0x873D +#define GL_MODELVIEW30_ARB 0x873E +#define GL_MODELVIEW31_ARB 0x873F +#endif + +#ifndef GL_ARB_matrix_palette +#define GL_MATRIX_PALETTE_ARB 0x8840 +#define GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB 0x8841 +#define GL_MAX_PALETTE_MATRICES_ARB 0x8842 +#define GL_CURRENT_PALETTE_MATRIX_ARB 0x8843 +#define GL_MATRIX_INDEX_ARRAY_ARB 0x8844 +#define GL_CURRENT_MATRIX_INDEX_ARB 0x8845 +#define GL_MATRIX_INDEX_ARRAY_SIZE_ARB 0x8846 +#define GL_MATRIX_INDEX_ARRAY_TYPE_ARB 0x8847 +#define GL_MATRIX_INDEX_ARRAY_STRIDE_ARB 0x8848 +#define GL_MATRIX_INDEX_ARRAY_POINTER_ARB 0x8849 +#endif + +#ifndef GL_ARB_texture_env_combine +#define GL_COMBINE_ARB 0x8570 +#define GL_COMBINE_RGB_ARB 0x8571 +#define GL_COMBINE_ALPHA_ARB 0x8572 +#define GL_SOURCE0_RGB_ARB 0x8580 +#define GL_SOURCE1_RGB_ARB 0x8581 +#define GL_SOURCE2_RGB_ARB 0x8582 +#define GL_SOURCE0_ALPHA_ARB 0x8588 +#define GL_SOURCE1_ALPHA_ARB 0x8589 +#define GL_SOURCE2_ALPHA_ARB 0x858A +#define GL_OPERAND0_RGB_ARB 0x8590 +#define GL_OPERAND1_RGB_ARB 0x8591 +#define GL_OPERAND2_RGB_ARB 0x8592 +#define GL_OPERAND0_ALPHA_ARB 0x8598 +#define GL_OPERAND1_ALPHA_ARB 0x8599 +#define GL_OPERAND2_ALPHA_ARB 0x859A +#define GL_RGB_SCALE_ARB 0x8573 +#define GL_ADD_SIGNED_ARB 0x8574 +#define GL_INTERPOLATE_ARB 0x8575 +#define GL_SUBTRACT_ARB 0x84E7 +#define GL_CONSTANT_ARB 0x8576 +#define GL_PRIMARY_COLOR_ARB 0x8577 +#define GL_PREVIOUS_ARB 0x8578 +#endif + +#ifndef GL_ARB_texture_env_crossbar +#endif + +#ifndef GL_ARB_texture_env_dot3 +#define GL_DOT3_RGB_ARB 0x86AE +#define GL_DOT3_RGBA_ARB 0x86AF +#endif + +#ifndef GL_ARB_texture_mirrored_repeat +#define GL_MIRRORED_REPEAT_ARB 0x8370 +#endif + +#ifndef GL_ARB_depth_texture +#define GL_DEPTH_COMPONENT16_ARB 0x81A5 +#define GL_DEPTH_COMPONENT24_ARB 0x81A6 +#define GL_DEPTH_COMPONENT32_ARB 0x81A7 +#define GL_TEXTURE_DEPTH_SIZE_ARB 0x884A +#define GL_DEPTH_TEXTURE_MODE_ARB 0x884B +#endif + +#ifndef GL_ARB_shadow +#define GL_TEXTURE_COMPARE_MODE_ARB 0x884C +#define GL_TEXTURE_COMPARE_FUNC_ARB 0x884D +#define GL_COMPARE_R_TO_TEXTURE_ARB 0x884E +#endif + +#ifndef GL_ARB_shadow_ambient +#define GL_TEXTURE_COMPARE_FAIL_VALUE_ARB 0x80BF +#endif + +#ifndef GL_ARB_window_pos +#endif + +#ifndef GL_ARB_vertex_program +#define GL_COLOR_SUM_ARB 0x8458 +#define GL_VERTEX_PROGRAM_ARB 0x8620 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED_ARB 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE_ARB 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE_ARB 0x8625 +#define GL_CURRENT_VERTEX_ATTRIB_ARB 0x8626 +#define GL_PROGRAM_LENGTH_ARB 0x8627 +#define GL_PROGRAM_STRING_ARB 0x8628 +#define GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB 0x862E +#define GL_MAX_PROGRAM_MATRICES_ARB 0x862F +#define GL_CURRENT_MATRIX_STACK_DEPTH_ARB 0x8640 +#define GL_CURRENT_MATRIX_ARB 0x8641 +#define GL_VERTEX_PROGRAM_POINT_SIZE_ARB 0x8642 +#define GL_VERTEX_PROGRAM_TWO_SIDE_ARB 0x8643 +#define GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB 0x8645 +#define GL_PROGRAM_ERROR_POSITION_ARB 0x864B +#define GL_PROGRAM_BINDING_ARB 0x8677 +#define GL_MAX_VERTEX_ATTRIBS_ARB 0x8869 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED_ARB 0x886A +#define GL_PROGRAM_ERROR_STRING_ARB 0x8874 +#define GL_PROGRAM_FORMAT_ASCII_ARB 0x8875 +#define GL_PROGRAM_FORMAT_ARB 0x8876 +#define GL_PROGRAM_INSTRUCTIONS_ARB 0x88A0 +#define GL_MAX_PROGRAM_INSTRUCTIONS_ARB 0x88A1 +#define GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB 0x88A2 +#define GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB 0x88A3 +#define GL_PROGRAM_TEMPORARIES_ARB 0x88A4 +#define GL_MAX_PROGRAM_TEMPORARIES_ARB 0x88A5 +#define GL_PROGRAM_NATIVE_TEMPORARIES_ARB 0x88A6 +#define GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB 0x88A7 +#define GL_PROGRAM_PARAMETERS_ARB 0x88A8 +#define GL_MAX_PROGRAM_PARAMETERS_ARB 0x88A9 +#define GL_PROGRAM_NATIVE_PARAMETERS_ARB 0x88AA +#define GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB 0x88AB +#define GL_PROGRAM_ATTRIBS_ARB 0x88AC +#define GL_MAX_PROGRAM_ATTRIBS_ARB 0x88AD +#define GL_PROGRAM_NATIVE_ATTRIBS_ARB 0x88AE +#define GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB 0x88AF +#define GL_PROGRAM_ADDRESS_REGISTERS_ARB 0x88B0 +#define GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB 0x88B1 +#define GL_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB 0x88B2 +#define GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB 0x88B3 +#define GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB 0x88B4 +#define GL_MAX_PROGRAM_ENV_PARAMETERS_ARB 0x88B5 +#define GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB 0x88B6 +#define GL_TRANSPOSE_CURRENT_MATRIX_ARB 0x88B7 +#define GL_MATRIX0_ARB 0x88C0 +#define GL_MATRIX1_ARB 0x88C1 +#define GL_MATRIX2_ARB 0x88C2 +#define GL_MATRIX3_ARB 0x88C3 +#define GL_MATRIX4_ARB 0x88C4 +#define GL_MATRIX5_ARB 0x88C5 +#define GL_MATRIX6_ARB 0x88C6 +#define GL_MATRIX7_ARB 0x88C7 +#define GL_MATRIX8_ARB 0x88C8 +#define GL_MATRIX9_ARB 0x88C9 +#define GL_MATRIX10_ARB 0x88CA +#define GL_MATRIX11_ARB 0x88CB +#define GL_MATRIX12_ARB 0x88CC +#define GL_MATRIX13_ARB 0x88CD +#define GL_MATRIX14_ARB 0x88CE +#define GL_MATRIX15_ARB 0x88CF +#define GL_MATRIX16_ARB 0x88D0 +#define GL_MATRIX17_ARB 0x88D1 +#define GL_MATRIX18_ARB 0x88D2 +#define GL_MATRIX19_ARB 0x88D3 +#define GL_MATRIX20_ARB 0x88D4 +#define GL_MATRIX21_ARB 0x88D5 +#define GL_MATRIX22_ARB 0x88D6 +#define GL_MATRIX23_ARB 0x88D7 +#define GL_MATRIX24_ARB 0x88D8 +#define GL_MATRIX25_ARB 0x88D9 +#define GL_MATRIX26_ARB 0x88DA +#define GL_MATRIX27_ARB 0x88DB +#define GL_MATRIX28_ARB 0x88DC +#define GL_MATRIX29_ARB 0x88DD +#define GL_MATRIX30_ARB 0x88DE +#define GL_MATRIX31_ARB 0x88DF +#endif + +#ifndef GL_ARB_fragment_program +#define GL_FRAGMENT_PROGRAM_ARB 0x8804 +#define GL_PROGRAM_ALU_INSTRUCTIONS_ARB 0x8805 +#define GL_PROGRAM_TEX_INSTRUCTIONS_ARB 0x8806 +#define GL_PROGRAM_TEX_INDIRECTIONS_ARB 0x8807 +#define GL_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB 0x8808 +#define GL_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB 0x8809 +#define GL_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB 0x880A +#define GL_MAX_PROGRAM_ALU_INSTRUCTIONS_ARB 0x880B +#define GL_MAX_PROGRAM_TEX_INSTRUCTIONS_ARB 0x880C +#define GL_MAX_PROGRAM_TEX_INDIRECTIONS_ARB 0x880D +#define GL_MAX_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB 0x880E +#define GL_MAX_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB 0x880F +#define GL_MAX_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB 0x8810 +#define GL_MAX_TEXTURE_COORDS_ARB 0x8871 +#define GL_MAX_TEXTURE_IMAGE_UNITS_ARB 0x8872 +#endif + +#ifndef GL_ARB_vertex_buffer_object +#define GL_BUFFER_SIZE_ARB 0x8764 +#define GL_BUFFER_USAGE_ARB 0x8765 +#define GL_ARRAY_BUFFER_ARB 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER_ARB 0x8893 +#define GL_ARRAY_BUFFER_BINDING_ARB 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB 0x8895 +#define GL_VERTEX_ARRAY_BUFFER_BINDING_ARB 0x8896 +#define GL_NORMAL_ARRAY_BUFFER_BINDING_ARB 0x8897 +#define GL_COLOR_ARRAY_BUFFER_BINDING_ARB 0x8898 +#define GL_INDEX_ARRAY_BUFFER_BINDING_ARB 0x8899 +#define GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB 0x889A +#define GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB 0x889B +#define GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB 0x889C +#define GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB 0x889D +#define GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB 0x889E +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB 0x889F +#define GL_READ_ONLY_ARB 0x88B8 +#define GL_WRITE_ONLY_ARB 0x88B9 +#define GL_READ_WRITE_ARB 0x88BA +#define GL_BUFFER_ACCESS_ARB 0x88BB +#define GL_BUFFER_MAPPED_ARB 0x88BC +#define GL_BUFFER_MAP_POINTER_ARB 0x88BD +#define GL_STREAM_DRAW_ARB 0x88E0 +#define GL_STREAM_READ_ARB 0x88E1 +#define GL_STREAM_COPY_ARB 0x88E2 +#define GL_STATIC_DRAW_ARB 0x88E4 +#define GL_STATIC_READ_ARB 0x88E5 +#define GL_STATIC_COPY_ARB 0x88E6 +#define GL_DYNAMIC_DRAW_ARB 0x88E8 +#define GL_DYNAMIC_READ_ARB 0x88E9 +#define GL_DYNAMIC_COPY_ARB 0x88EA +#endif + +#ifndef GL_ARB_occlusion_query +#define GL_QUERY_COUNTER_BITS_ARB 0x8864 +#define GL_CURRENT_QUERY_ARB 0x8865 +#define GL_QUERY_RESULT_ARB 0x8866 +#define GL_QUERY_RESULT_AVAILABLE_ARB 0x8867 +#define GL_SAMPLES_PASSED_ARB 0x8914 +#endif + +#ifndef GL_ARB_shader_objects +#define GL_PROGRAM_OBJECT_ARB 0x8B40 +#define GL_SHADER_OBJECT_ARB 0x8B48 +#define GL_OBJECT_TYPE_ARB 0x8B4E +#define GL_OBJECT_SUBTYPE_ARB 0x8B4F +#define GL_FLOAT_VEC2_ARB 0x8B50 +#define GL_FLOAT_VEC3_ARB 0x8B51 +#define GL_FLOAT_VEC4_ARB 0x8B52 +#define GL_INT_VEC2_ARB 0x8B53 +#define GL_INT_VEC3_ARB 0x8B54 +#define GL_INT_VEC4_ARB 0x8B55 +#define GL_BOOL_ARB 0x8B56 +#define GL_BOOL_VEC2_ARB 0x8B57 +#define GL_BOOL_VEC3_ARB 0x8B58 +#define GL_BOOL_VEC4_ARB 0x8B59 +#define GL_FLOAT_MAT2_ARB 0x8B5A +#define GL_FLOAT_MAT3_ARB 0x8B5B +#define GL_FLOAT_MAT4_ARB 0x8B5C +#define GL_SAMPLER_1D_ARB 0x8B5D +#define GL_SAMPLER_2D_ARB 0x8B5E +#define GL_SAMPLER_3D_ARB 0x8B5F +#define GL_SAMPLER_CUBE_ARB 0x8B60 +#define GL_SAMPLER_1D_SHADOW_ARB 0x8B61 +#define GL_SAMPLER_2D_SHADOW_ARB 0x8B62 +#define GL_SAMPLER_2D_RECT_ARB 0x8B63 +#define GL_SAMPLER_2D_RECT_SHADOW_ARB 0x8B64 +#define GL_OBJECT_DELETE_STATUS_ARB 0x8B80 +#define GL_OBJECT_COMPILE_STATUS_ARB 0x8B81 +#define GL_OBJECT_LINK_STATUS_ARB 0x8B82 +#define GL_OBJECT_VALIDATE_STATUS_ARB 0x8B83 +#define GL_OBJECT_INFO_LOG_LENGTH_ARB 0x8B84 +#define GL_OBJECT_ATTACHED_OBJECTS_ARB 0x8B85 +#define GL_OBJECT_ACTIVE_UNIFORMS_ARB 0x8B86 +#define GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH_ARB 0x8B87 +#define GL_OBJECT_SHADER_SOURCE_LENGTH_ARB 0x8B88 +#endif + +#ifndef GL_ARB_vertex_shader +#define GL_VERTEX_SHADER_ARB 0x8B31 +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB 0x8B4A +#define GL_MAX_VARYING_FLOATS_ARB 0x8B4B +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB 0x8B4C +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB 0x8B4D +#define GL_OBJECT_ACTIVE_ATTRIBUTES_ARB 0x8B89 +#define GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB 0x8B8A +#endif + +#ifndef GL_ARB_fragment_shader +#define GL_FRAGMENT_SHADER_ARB 0x8B30 +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB 0x8B49 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB 0x8B8B +#endif + +#ifndef GL_ARB_shading_language_100 +#define GL_SHADING_LANGUAGE_VERSION_ARB 0x8B8C +#endif + +#ifndef GL_ARB_texture_non_power_of_two +#endif + +#ifndef GL_ARB_point_sprite +#define GL_POINT_SPRITE_ARB 0x8861 +#define GL_COORD_REPLACE_ARB 0x8862 +#endif + +#ifndef GL_ARB_fragment_program_shadow +#endif + +#ifndef GL_ARB_draw_buffers +#define GL_MAX_DRAW_BUFFERS_ARB 0x8824 +#define GL_DRAW_BUFFER0_ARB 0x8825 +#define GL_DRAW_BUFFER1_ARB 0x8826 +#define GL_DRAW_BUFFER2_ARB 0x8827 +#define GL_DRAW_BUFFER3_ARB 0x8828 +#define GL_DRAW_BUFFER4_ARB 0x8829 +#define GL_DRAW_BUFFER5_ARB 0x882A +#define GL_DRAW_BUFFER6_ARB 0x882B +#define GL_DRAW_BUFFER7_ARB 0x882C +#define GL_DRAW_BUFFER8_ARB 0x882D +#define GL_DRAW_BUFFER9_ARB 0x882E +#define GL_DRAW_BUFFER10_ARB 0x882F +#define GL_DRAW_BUFFER11_ARB 0x8830 +#define GL_DRAW_BUFFER12_ARB 0x8831 +#define GL_DRAW_BUFFER13_ARB 0x8832 +#define GL_DRAW_BUFFER14_ARB 0x8833 +#define GL_DRAW_BUFFER15_ARB 0x8834 +#endif + +#ifndef GL_ARB_texture_rectangle +#define GL_TEXTURE_RECTANGLE_ARB 0x84F5 +#define GL_TEXTURE_BINDING_RECTANGLE_ARB 0x84F6 +#define GL_PROXY_TEXTURE_RECTANGLE_ARB 0x84F7 +#define GL_MAX_RECTANGLE_TEXTURE_SIZE_ARB 0x84F8 +#endif + +#ifndef GL_ARB_color_buffer_float +#define GL_RGBA_FLOAT_MODE_ARB 0x8820 +#define GL_CLAMP_VERTEX_COLOR_ARB 0x891A +#define GL_CLAMP_FRAGMENT_COLOR_ARB 0x891B +#define GL_CLAMP_READ_COLOR_ARB 0x891C +#define GL_FIXED_ONLY_ARB 0x891D +#endif + +#ifndef GL_ARB_half_float_pixel +#define GL_HALF_FLOAT_ARB 0x140B +#endif + +#ifndef GL_ARB_texture_float +#define GL_TEXTURE_RED_TYPE_ARB 0x8C10 +#define GL_TEXTURE_GREEN_TYPE_ARB 0x8C11 +#define GL_TEXTURE_BLUE_TYPE_ARB 0x8C12 +#define GL_TEXTURE_ALPHA_TYPE_ARB 0x8C13 +#define GL_TEXTURE_LUMINANCE_TYPE_ARB 0x8C14 +#define GL_TEXTURE_INTENSITY_TYPE_ARB 0x8C15 +#define GL_TEXTURE_DEPTH_TYPE_ARB 0x8C16 +#define GL_UNSIGNED_NORMALIZED_ARB 0x8C17 +#define GL_RGBA32F_ARB 0x8814 +#define GL_RGB32F_ARB 0x8815 +#define GL_ALPHA32F_ARB 0x8816 +#define GL_INTENSITY32F_ARB 0x8817 +#define GL_LUMINANCE32F_ARB 0x8818 +#define GL_LUMINANCE_ALPHA32F_ARB 0x8819 +#define GL_RGBA16F_ARB 0x881A +#define GL_RGB16F_ARB 0x881B +#define GL_ALPHA16F_ARB 0x881C +#define GL_INTENSITY16F_ARB 0x881D +#define GL_LUMINANCE16F_ARB 0x881E +#define GL_LUMINANCE_ALPHA16F_ARB 0x881F +#endif + +#ifndef GL_ARB_pixel_buffer_object +#define GL_PIXEL_PACK_BUFFER_ARB 0x88EB +#define GL_PIXEL_UNPACK_BUFFER_ARB 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING_ARB 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING_ARB 0x88EF +#endif + +#ifndef GL_ARB_depth_buffer_float +#define GL_DEPTH_COMPONENT32F 0x8CAC +#define GL_DEPTH32F_STENCIL8 0x8CAD +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD +#endif + +#ifndef GL_ARB_draw_instanced +#endif + +#ifndef GL_ARB_framebuffer_object +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210 +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211 +#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212 +#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213 +#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214 +#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215 +#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216 +#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217 +#define GL_FRAMEBUFFER_DEFAULT 0x8218 +#define GL_FRAMEBUFFER_UNDEFINED 0x8219 +#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_DEPTH_STENCIL 0x84F9 +#define GL_UNSIGNED_INT_24_8 0x84FA +#define GL_DEPTH24_STENCIL8 0x88F0 +#define GL_TEXTURE_STENCIL_SIZE 0x88F1 +#define GL_TEXTURE_RED_TYPE 0x8C10 +#define GL_TEXTURE_GREEN_TYPE 0x8C11 +#define GL_TEXTURE_BLUE_TYPE 0x8C12 +#define GL_TEXTURE_ALPHA_TYPE 0x8C13 +#define GL_TEXTURE_DEPTH_TYPE 0x8C16 +#define GL_UNSIGNED_NORMALIZED 0x8C17 +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_DRAW_FRAMEBUFFER_BINDING GL_FRAMEBUFFER_BINDING +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_READ_FRAMEBUFFER 0x8CA8 +#define GL_DRAW_FRAMEBUFFER 0x8CA9 +#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA +#define GL_RENDERBUFFER_SAMPLES 0x8CAB +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER 0x8CDB +#define GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER 0x8CDC +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_COLOR_ATTACHMENT1 0x8CE1 +#define GL_COLOR_ATTACHMENT2 0x8CE2 +#define GL_COLOR_ATTACHMENT3 0x8CE3 +#define GL_COLOR_ATTACHMENT4 0x8CE4 +#define GL_COLOR_ATTACHMENT5 0x8CE5 +#define GL_COLOR_ATTACHMENT6 0x8CE6 +#define GL_COLOR_ATTACHMENT7 0x8CE7 +#define GL_COLOR_ATTACHMENT8 0x8CE8 +#define GL_COLOR_ATTACHMENT9 0x8CE9 +#define GL_COLOR_ATTACHMENT10 0x8CEA +#define GL_COLOR_ATTACHMENT11 0x8CEB +#define GL_COLOR_ATTACHMENT12 0x8CEC +#define GL_COLOR_ATTACHMENT13 0x8CED +#define GL_COLOR_ATTACHMENT14 0x8CEE +#define GL_COLOR_ATTACHMENT15 0x8CEF +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_STENCIL_INDEX1 0x8D46 +#define GL_STENCIL_INDEX4 0x8D47 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_STENCIL_INDEX16 0x8D49 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56 +#define GL_MAX_SAMPLES 0x8D57 +#define GL_INDEX 0x8222 +#define GL_TEXTURE_LUMINANCE_TYPE 0x8C14 +#define GL_TEXTURE_INTENSITY_TYPE 0x8C15 +#endif + +#ifndef GL_ARB_framebuffer_sRGB +#define GL_FRAMEBUFFER_SRGB 0x8DB9 +#endif + +#ifndef GL_ARB_geometry_shader4 +#define GL_LINES_ADJACENCY_ARB 0x000A +#define GL_LINE_STRIP_ADJACENCY_ARB 0x000B +#define GL_TRIANGLES_ADJACENCY_ARB 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY_ARB 0x000D +#define GL_PROGRAM_POINT_SIZE_ARB 0x8642 +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_ARB 0x8C29 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_ARB 0x8DA7 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_ARB 0x8DA8 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_ARB 0x8DA9 +#define GL_GEOMETRY_SHADER_ARB 0x8DD9 +#define GL_GEOMETRY_VERTICES_OUT_ARB 0x8DDA +#define GL_GEOMETRY_INPUT_TYPE_ARB 0x8DDB +#define GL_GEOMETRY_OUTPUT_TYPE_ARB 0x8DDC +#define GL_MAX_GEOMETRY_VARYING_COMPONENTS_ARB 0x8DDD +#define GL_MAX_VERTEX_VARYING_COMPONENTS_ARB 0x8DDE +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_ARB 0x8DDF +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_ARB 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_ARB 0x8DE1 +/* reuse GL_MAX_VARYING_COMPONENTS */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER */ +#endif + +#ifndef GL_ARB_half_float_vertex +#define GL_HALF_FLOAT 0x140B +#endif + +#ifndef GL_ARB_instanced_arrays +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ARB 0x88FE +#endif + +#ifndef GL_ARB_map_buffer_range +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020 +#endif + +#ifndef GL_ARB_texture_buffer_object +#define GL_TEXTURE_BUFFER_ARB 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE_ARB 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER_ARB 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_ARB 0x8C2D +#define GL_TEXTURE_BUFFER_FORMAT_ARB 0x8C2E +#endif + +#ifndef GL_ARB_texture_compression_rgtc +#define GL_COMPRESSED_RED_RGTC1 0x8DBB +#define GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC +#define GL_COMPRESSED_RG_RGTC2 0x8DBD +#define GL_COMPRESSED_SIGNED_RG_RGTC2 0x8DBE +#endif + +#ifndef GL_ARB_texture_rg +#define GL_RG 0x8227 +#define GL_RG_INTEGER 0x8228 +#define GL_R8 0x8229 +#define GL_R16 0x822A +#define GL_RG8 0x822B +#define GL_RG16 0x822C +#define GL_R16F 0x822D +#define GL_R32F 0x822E +#define GL_RG16F 0x822F +#define GL_RG32F 0x8230 +#define GL_R8I 0x8231 +#define GL_R8UI 0x8232 +#define GL_R16I 0x8233 +#define GL_R16UI 0x8234 +#define GL_R32I 0x8235 +#define GL_R32UI 0x8236 +#define GL_RG8I 0x8237 +#define GL_RG8UI 0x8238 +#define GL_RG16I 0x8239 +#define GL_RG16UI 0x823A +#define GL_RG32I 0x823B +#define GL_RG32UI 0x823C +#endif + +#ifndef GL_ARB_vertex_array_object +#define GL_VERTEX_ARRAY_BINDING 0x85B5 +#endif + +#ifndef GL_ARB_uniform_buffer_object +#define GL_UNIFORM_BUFFER 0x8A11 +#define GL_UNIFORM_BUFFER_BINDING 0x8A28 +#define GL_UNIFORM_BUFFER_START 0x8A29 +#define GL_UNIFORM_BUFFER_SIZE 0x8A2A +#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B +#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS 0x8A2C +#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D +#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E +#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F +#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30 +#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31 +#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS 0x8A32 +#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33 +#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34 +#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35 +#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36 +#define GL_UNIFORM_TYPE 0x8A37 +#define GL_UNIFORM_SIZE 0x8A38 +#define GL_UNIFORM_NAME_LENGTH 0x8A39 +#define GL_UNIFORM_BLOCK_INDEX 0x8A3A +#define GL_UNIFORM_OFFSET 0x8A3B +#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C +#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D +#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E +#define GL_UNIFORM_BLOCK_BINDING 0x8A3F +#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40 +#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER 0x8A45 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46 +#define GL_INVALID_INDEX 0xFFFFFFFFu +#endif + +#ifndef GL_ARB_compatibility +/* ARB_compatibility just defines tokens from core 3.0 */ +#endif + +#ifndef GL_ARB_copy_buffer +#define GL_COPY_READ_BUFFER_BINDING 0x8F36 +#define GL_COPY_READ_BUFFER GL_COPY_READ_BUFFER_BINDING +#define GL_COPY_WRITE_BUFFER_BINDING 0x8F37 +#define GL_COPY_WRITE_BUFFER GL_COPY_WRITE_BUFFER_BINDING +#endif + +#ifndef GL_ARB_shader_texture_lod +#endif + +#ifndef GL_ARB_depth_clamp +#define GL_DEPTH_CLAMP 0x864F +#endif + +#ifndef GL_ARB_draw_elements_base_vertex +#endif + +#ifndef GL_ARB_fragment_coord_conventions +#endif + +#ifndef GL_ARB_provoking_vertex +#define GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION 0x8E4C +#define GL_FIRST_VERTEX_CONVENTION 0x8E4D +#define GL_LAST_VERTEX_CONVENTION 0x8E4E +#define GL_PROVOKING_VERTEX 0x8E4F +#endif + +#ifndef GL_ARB_seamless_cube_map +#define GL_TEXTURE_CUBE_MAP_SEAMLESS 0x884F +#endif + +#ifndef GL_ARB_sync +#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111 +#define GL_OBJECT_TYPE 0x9112 +#define GL_SYNC_CONDITION 0x9113 +#define GL_SYNC_STATUS 0x9114 +#define GL_SYNC_FLAGS 0x9115 +#define GL_SYNC_FENCE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117 +#define GL_UNSIGNALED 0x9118 +#define GL_SIGNALED 0x9119 +#define GL_ALREADY_SIGNALED 0x911A +#define GL_TIMEOUT_EXPIRED 0x911B +#define GL_CONDITION_SATISFIED 0x911C +#define GL_WAIT_FAILED 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001 +#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull +#endif + +#ifndef GL_ARB_texture_multisample +#define GL_SAMPLE_POSITION 0x8E50 +#define GL_SAMPLE_MASK 0x8E51 +#define GL_SAMPLE_MASK_VALUE 0x8E52 +#define GL_MAX_SAMPLE_MASK_WORDS 0x8E59 +#define GL_TEXTURE_2D_MULTISAMPLE 0x9100 +#define GL_PROXY_TEXTURE_2D_MULTISAMPLE 0x9101 +#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102 +#define GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9103 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE 0x9104 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY 0x9105 +#define GL_TEXTURE_SAMPLES 0x9106 +#define GL_TEXTURE_FIXED_SAMPLE_LOCATIONS 0x9107 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D +#define GL_MAX_COLOR_TEXTURE_SAMPLES 0x910E +#define GL_MAX_DEPTH_TEXTURE_SAMPLES 0x910F +#define GL_MAX_INTEGER_SAMPLES 0x9110 +#endif + +#ifndef GL_ARB_vertex_array_bgra +/* reuse GL_BGRA */ +#endif + +#ifndef GL_ARB_draw_buffers_blend +#endif + +#ifndef GL_ARB_sample_shading +#define GL_SAMPLE_SHADING_ARB 0x8C36 +#define GL_MIN_SAMPLE_SHADING_VALUE_ARB 0x8C37 +#endif + +#ifndef GL_ARB_texture_cube_map_array +#define GL_TEXTURE_CUBE_MAP_ARRAY_ARB 0x9009 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_ARB 0x900A +#define GL_PROXY_TEXTURE_CUBE_MAP_ARRAY_ARB 0x900B +#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F +#endif + +#ifndef GL_ARB_texture_gather +#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET_ARB 0x8E5E +#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_ARB 0x8E5F +#define GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB 0x8F9F +#endif + +#ifndef GL_ARB_texture_query_lod +#endif + +#ifndef GL_ARB_shading_language_include +#define GL_SHADER_INCLUDE_ARB 0x8DAE +#define GL_NAMED_STRING_LENGTH_ARB 0x8DE9 +#define GL_NAMED_STRING_TYPE_ARB 0x8DEA +#endif + +#ifndef GL_ARB_texture_compression_bptc +#define GL_COMPRESSED_RGBA_BPTC_UNORM_ARB 0x8E8C +#define GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB 0x8E8D +#define GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB 0x8E8E +#define GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB 0x8E8F +#endif + +#ifndef GL_ARB_blend_func_extended +#define GL_SRC1_COLOR 0x88F9 +/* reuse GL_SRC1_ALPHA */ +#define GL_ONE_MINUS_SRC1_COLOR 0x88FA +#define GL_ONE_MINUS_SRC1_ALPHA 0x88FB +#define GL_MAX_DUAL_SOURCE_DRAW_BUFFERS 0x88FC +#endif + +#ifndef GL_ARB_explicit_attrib_location +#endif + +#ifndef GL_ARB_occlusion_query2 +#define GL_ANY_SAMPLES_PASSED 0x8C2F +#endif + +#ifndef GL_ARB_sampler_objects +#define GL_SAMPLER_BINDING 0x8919 +#endif + +#ifndef GL_ARB_shader_bit_encoding +#endif + +#ifndef GL_ARB_texture_rgb10_a2ui +#define GL_RGB10_A2UI 0x906F +#endif + +#ifndef GL_ARB_texture_swizzle +#define GL_TEXTURE_SWIZZLE_R 0x8E42 +#define GL_TEXTURE_SWIZZLE_G 0x8E43 +#define GL_TEXTURE_SWIZZLE_B 0x8E44 +#define GL_TEXTURE_SWIZZLE_A 0x8E45 +#define GL_TEXTURE_SWIZZLE_RGBA 0x8E46 +#endif + +#ifndef GL_ARB_timer_query +#define GL_TIME_ELAPSED 0x88BF +#define GL_TIMESTAMP 0x8E28 +#endif + +#ifndef GL_ARB_vertex_type_2_10_10_10_rev +/* reuse GL_UNSIGNED_INT_2_10_10_10_REV */ +#define GL_INT_2_10_10_10_REV 0x8D9F +#endif + +#ifndef GL_ARB_draw_indirect +#define GL_DRAW_INDIRECT_BUFFER 0x8F3F +#define GL_DRAW_INDIRECT_BUFFER_BINDING 0x8F43 +#endif + +#ifndef GL_ARB_gpu_shader5 +#define GL_GEOMETRY_SHADER_INVOCATIONS 0x887F +#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS 0x8E5A +#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET 0x8E5B +#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET 0x8E5C +#define GL_FRAGMENT_INTERPOLATION_OFFSET_BITS 0x8E5D +/* reuse GL_MAX_VERTEX_STREAMS */ +#endif + +#ifndef GL_ARB_gpu_shader_fp64 +/* reuse GL_DOUBLE */ +#define GL_DOUBLE_VEC2 0x8FFC +#define GL_DOUBLE_VEC3 0x8FFD +#define GL_DOUBLE_VEC4 0x8FFE +#define GL_DOUBLE_MAT2 0x8F46 +#define GL_DOUBLE_MAT3 0x8F47 +#define GL_DOUBLE_MAT4 0x8F48 +#define GL_DOUBLE_MAT2x3 0x8F49 +#define GL_DOUBLE_MAT2x4 0x8F4A +#define GL_DOUBLE_MAT3x2 0x8F4B +#define GL_DOUBLE_MAT3x4 0x8F4C +#define GL_DOUBLE_MAT4x2 0x8F4D +#define GL_DOUBLE_MAT4x3 0x8F4E +#endif + +#ifndef GL_ARB_shader_subroutine +#define GL_ACTIVE_SUBROUTINES 0x8DE5 +#define GL_ACTIVE_SUBROUTINE_UNIFORMS 0x8DE6 +#define GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS 0x8E47 +#define GL_ACTIVE_SUBROUTINE_MAX_LENGTH 0x8E48 +#define GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH 0x8E49 +#define GL_MAX_SUBROUTINES 0x8DE7 +#define GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS 0x8DE8 +#define GL_NUM_COMPATIBLE_SUBROUTINES 0x8E4A +#define GL_COMPATIBLE_SUBROUTINES 0x8E4B +/* reuse GL_UNIFORM_SIZE */ +/* reuse GL_UNIFORM_NAME_LENGTH */ +#endif + +#ifndef GL_ARB_tessellation_shader +#define GL_PATCHES 0x000E +#define GL_PATCH_VERTICES 0x8E72 +#define GL_PATCH_DEFAULT_INNER_LEVEL 0x8E73 +#define GL_PATCH_DEFAULT_OUTER_LEVEL 0x8E74 +#define GL_TESS_CONTROL_OUTPUT_VERTICES 0x8E75 +#define GL_TESS_GEN_MODE 0x8E76 +#define GL_TESS_GEN_SPACING 0x8E77 +#define GL_TESS_GEN_VERTEX_ORDER 0x8E78 +#define GL_TESS_GEN_POINT_MODE 0x8E79 +/* reuse GL_TRIANGLES */ +/* reuse GL_QUADS */ +#define GL_ISOLINES 0x8E7A +/* reuse GL_EQUAL */ +#define GL_FRACTIONAL_ODD 0x8E7B +#define GL_FRACTIONAL_EVEN 0x8E7C +/* reuse GL_CCW */ +/* reuse GL_CW */ +#define GL_MAX_PATCH_VERTICES 0x8E7D +#define GL_MAX_TESS_GEN_LEVEL 0x8E7E +#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E7F +#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E80 +#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS 0x8E81 +#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS 0x8E82 +#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS 0x8E83 +#define GL_MAX_TESS_PATCH_COMPONENTS 0x8E84 +#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS 0x8E85 +#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS 0x8E86 +#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS 0x8E89 +#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS 0x8E8A +#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS 0x886C +#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS 0x886D +#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E1E +#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E1F +#define GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER 0x84F0 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER 0x84F1 +#define GL_TESS_EVALUATION_SHADER 0x8E87 +#define GL_TESS_CONTROL_SHADER 0x8E88 +#endif + +#ifndef GL_ARB_texture_buffer_object_rgb32 +/* reuse GL_RGB32F */ +/* reuse GL_RGB32UI */ +/* reuse GL_RGB32I */ +#endif + +#ifndef GL_ARB_transform_feedback2 +#define GL_TRANSFORM_FEEDBACK 0x8E22 +#define GL_TRANSFORM_FEEDBACK_PAUSED 0x8E23 +#define GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED GL_TRANSFORM_FEEDBACK_PAUSED +#define GL_TRANSFORM_FEEDBACK_ACTIVE 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE GL_TRANSFORM_FEEDBACK_ACTIVE +#define GL_TRANSFORM_FEEDBACK_BINDING 0x8E25 +#endif + +#ifndef GL_ARB_transform_feedback3 +#define GL_MAX_TRANSFORM_FEEDBACK_BUFFERS 0x8E70 +#define GL_MAX_VERTEX_STREAMS 0x8E71 +#endif + +#ifndef GL_ARB_ES2_compatibility +#define GL_FIXED 0x140C +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_LOW_FLOAT 0x8DF0 +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_LOW_INT 0x8DF3 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_HIGH_INT 0x8DF5 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_RGB565 0x8D62 +#endif + +#ifndef GL_ARB_get_program_binary +#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257 +#define GL_PROGRAM_BINARY_LENGTH 0x8741 +#define GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE +#define GL_PROGRAM_BINARY_FORMATS 0x87FF +#endif + +#ifndef GL_ARB_separate_shader_objects +#define GL_VERTEX_SHADER_BIT 0x00000001 +#define GL_FRAGMENT_SHADER_BIT 0x00000002 +#define GL_GEOMETRY_SHADER_BIT 0x00000004 +#define GL_TESS_CONTROL_SHADER_BIT 0x00000008 +#define GL_TESS_EVALUATION_SHADER_BIT 0x00000010 +#define GL_ALL_SHADER_BITS 0xFFFFFFFF +#define GL_PROGRAM_SEPARABLE 0x8258 +#define GL_ACTIVE_PROGRAM 0x8259 +#define GL_PROGRAM_PIPELINE_BINDING 0x825A +#endif + +#ifndef GL_ARB_shader_precision +#endif + +#ifndef GL_ARB_vertex_attrib_64bit +/* reuse GL_RGB32I */ +/* reuse GL_DOUBLE_VEC2 */ +/* reuse GL_DOUBLE_VEC3 */ +/* reuse GL_DOUBLE_VEC4 */ +/* reuse GL_DOUBLE_MAT2 */ +/* reuse GL_DOUBLE_MAT3 */ +/* reuse GL_DOUBLE_MAT4 */ +/* reuse GL_DOUBLE_MAT2x3 */ +/* reuse GL_DOUBLE_MAT2x4 */ +/* reuse GL_DOUBLE_MAT3x2 */ +/* reuse GL_DOUBLE_MAT3x4 */ +/* reuse GL_DOUBLE_MAT4x2 */ +/* reuse GL_DOUBLE_MAT4x3 */ +#endif + +#ifndef GL_ARB_viewport_array +/* reuse GL_SCISSOR_BOX */ +/* reuse GL_VIEWPORT */ +/* reuse GL_DEPTH_RANGE */ +/* reuse GL_SCISSOR_TEST */ +#define GL_MAX_VIEWPORTS 0x825B +#define GL_VIEWPORT_SUBPIXEL_BITS 0x825C +#define GL_VIEWPORT_BOUNDS_RANGE 0x825D +#define GL_LAYER_PROVOKING_VERTEX 0x825E +#define GL_VIEWPORT_INDEX_PROVOKING_VERTEX 0x825F +#define GL_UNDEFINED_VERTEX 0x8260 +/* reuse GL_FIRST_VERTEX_CONVENTION */ +/* reuse GL_LAST_VERTEX_CONVENTION */ +/* reuse GL_PROVOKING_VERTEX */ +#endif + +#ifndef GL_ARB_cl_event +#define GL_SYNC_CL_EVENT_ARB 0x8240 +#define GL_SYNC_CL_EVENT_COMPLETE_ARB 0x8241 +#endif + +#ifndef GL_ARB_debug_output +#define GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB 0x8242 +#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_ARB 0x8243 +#define GL_DEBUG_CALLBACK_FUNCTION_ARB 0x8244 +#define GL_DEBUG_CALLBACK_USER_PARAM_ARB 0x8245 +#define GL_DEBUG_SOURCE_API_ARB 0x8246 +#define GL_DEBUG_SOURCE_WINDOW_SYSTEM_ARB 0x8247 +#define GL_DEBUG_SOURCE_SHADER_COMPILER_ARB 0x8248 +#define GL_DEBUG_SOURCE_THIRD_PARTY_ARB 0x8249 +#define GL_DEBUG_SOURCE_APPLICATION_ARB 0x824A +#define GL_DEBUG_SOURCE_OTHER_ARB 0x824B +#define GL_DEBUG_TYPE_ERROR_ARB 0x824C +#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_ARB 0x824D +#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_ARB 0x824E +#define GL_DEBUG_TYPE_PORTABILITY_ARB 0x824F +#define GL_DEBUG_TYPE_PERFORMANCE_ARB 0x8250 +#define GL_DEBUG_TYPE_OTHER_ARB 0x8251 +#define GL_MAX_DEBUG_MESSAGE_LENGTH_ARB 0x9143 +#define GL_MAX_DEBUG_LOGGED_MESSAGES_ARB 0x9144 +#define GL_DEBUG_LOGGED_MESSAGES_ARB 0x9145 +#define GL_DEBUG_SEVERITY_HIGH_ARB 0x9146 +#define GL_DEBUG_SEVERITY_MEDIUM_ARB 0x9147 +#define GL_DEBUG_SEVERITY_LOW_ARB 0x9148 +#endif + +#ifndef GL_ARB_robustness +/* reuse GL_NO_ERROR */ +#define GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB 0x00000004 +#define GL_LOSE_CONTEXT_ON_RESET_ARB 0x8252 +#define GL_GUILTY_CONTEXT_RESET_ARB 0x8253 +#define GL_INNOCENT_CONTEXT_RESET_ARB 0x8254 +#define GL_UNKNOWN_CONTEXT_RESET_ARB 0x8255 +#define GL_RESET_NOTIFICATION_STRATEGY_ARB 0x8256 +#define GL_NO_RESET_NOTIFICATION_ARB 0x8261 +#endif + +#ifndef GL_ARB_shader_stencil_export +#endif + +#ifndef GL_ARB_base_instance +#endif + +#ifndef GL_ARB_shading_language_420pack +#endif + +#ifndef GL_ARB_transform_feedback_instanced +#endif + +#ifndef GL_ARB_compressed_texture_pixel_storage +#define GL_UNPACK_COMPRESSED_BLOCK_WIDTH 0x9127 +#define GL_UNPACK_COMPRESSED_BLOCK_HEIGHT 0x9128 +#define GL_UNPACK_COMPRESSED_BLOCK_DEPTH 0x9129 +#define GL_UNPACK_COMPRESSED_BLOCK_SIZE 0x912A +#define GL_PACK_COMPRESSED_BLOCK_WIDTH 0x912B +#define GL_PACK_COMPRESSED_BLOCK_HEIGHT 0x912C +#define GL_PACK_COMPRESSED_BLOCK_DEPTH 0x912D +#define GL_PACK_COMPRESSED_BLOCK_SIZE 0x912E +#endif + +#ifndef GL_ARB_conservative_depth +#endif + +#ifndef GL_ARB_internalformat_query +#define GL_NUM_SAMPLE_COUNTS 0x9380 +#endif + +#ifndef GL_ARB_map_buffer_alignment +#define GL_MIN_MAP_BUFFER_ALIGNMENT 0x90BC +#endif + +#ifndef GL_ARB_shader_atomic_counters +#define GL_ATOMIC_COUNTER_BUFFER 0x92C0 +#define GL_ATOMIC_COUNTER_BUFFER_BINDING 0x92C1 +#define GL_ATOMIC_COUNTER_BUFFER_START 0x92C2 +#define GL_ATOMIC_COUNTER_BUFFER_SIZE 0x92C3 +#define GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE 0x92C4 +#define GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS 0x92C5 +#define GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES 0x92C6 +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER 0x92C7 +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER 0x92C8 +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER 0x92C9 +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER 0x92CA +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER 0x92CB +#define GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS 0x92CC +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS 0x92CD +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS 0x92CE +#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS 0x92CF +#define GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS 0x92D0 +#define GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS 0x92D1 +#define GL_MAX_VERTEX_ATOMIC_COUNTERS 0x92D2 +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS 0x92D3 +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS 0x92D4 +#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS 0x92D5 +#define GL_MAX_FRAGMENT_ATOMIC_COUNTERS 0x92D6 +#define GL_MAX_COMBINED_ATOMIC_COUNTERS 0x92D7 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE 0x92D8 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS 0x92DC +#define GL_ACTIVE_ATOMIC_COUNTER_BUFFERS 0x92D9 +#define GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX 0x92DA +#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB +#endif + +#ifndef GL_ARB_shader_image_load_store +#define GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT 0x00000001 +#define GL_ELEMENT_ARRAY_BARRIER_BIT 0x00000002 +#define GL_UNIFORM_BARRIER_BIT 0x00000004 +#define GL_TEXTURE_FETCH_BARRIER_BIT 0x00000008 +#define GL_SHADER_IMAGE_ACCESS_BARRIER_BIT 0x00000020 +#define GL_COMMAND_BARRIER_BIT 0x00000040 +#define GL_PIXEL_BUFFER_BARRIER_BIT 0x00000080 +#define GL_TEXTURE_UPDATE_BARRIER_BIT 0x00000100 +#define GL_BUFFER_UPDATE_BARRIER_BIT 0x00000200 +#define GL_FRAMEBUFFER_BARRIER_BIT 0x00000400 +#define GL_TRANSFORM_FEEDBACK_BARRIER_BIT 0x00000800 +#define GL_ATOMIC_COUNTER_BARRIER_BIT 0x00001000 +#define GL_ALL_BARRIER_BITS 0xFFFFFFFF +#define GL_MAX_IMAGE_UNITS 0x8F38 +#define GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS 0x8F39 +#define GL_IMAGE_BINDING_NAME 0x8F3A +#define GL_IMAGE_BINDING_LEVEL 0x8F3B +#define GL_IMAGE_BINDING_LAYERED 0x8F3C +#define GL_IMAGE_BINDING_LAYER 0x8F3D +#define GL_IMAGE_BINDING_ACCESS 0x8F3E +#define GL_IMAGE_1D 0x904C +#define GL_IMAGE_2D 0x904D +#define GL_IMAGE_3D 0x904E +#define GL_IMAGE_2D_RECT 0x904F +#define GL_IMAGE_CUBE 0x9050 +#define GL_IMAGE_BUFFER 0x9051 +#define GL_IMAGE_1D_ARRAY 0x9052 +#define GL_IMAGE_2D_ARRAY 0x9053 +#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054 +#define GL_IMAGE_2D_MULTISAMPLE 0x9055 +#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056 +#define GL_INT_IMAGE_1D 0x9057 +#define GL_INT_IMAGE_2D 0x9058 +#define GL_INT_IMAGE_3D 0x9059 +#define GL_INT_IMAGE_2D_RECT 0x905A +#define GL_INT_IMAGE_CUBE 0x905B +#define GL_INT_IMAGE_BUFFER 0x905C +#define GL_INT_IMAGE_1D_ARRAY 0x905D +#define GL_INT_IMAGE_2D_ARRAY 0x905E +#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F +#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060 +#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061 +#define GL_UNSIGNED_INT_IMAGE_1D 0x9062 +#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 +#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 +#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065 +#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 +#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067 +#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C +#define GL_MAX_IMAGE_SAMPLES 0x906D +#define GL_IMAGE_BINDING_FORMAT 0x906E +#define GL_IMAGE_FORMAT_COMPATIBILITY_TYPE 0x90C7 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE 0x90C8 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS 0x90C9 +#define GL_MAX_VERTEX_IMAGE_UNIFORMS 0x90CA +#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS 0x90CB +#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS 0x90CC +#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS 0x90CD +#define GL_MAX_FRAGMENT_IMAGE_UNIFORMS 0x90CE +#define GL_MAX_COMBINED_IMAGE_UNIFORMS 0x90CF +#endif + +#ifndef GL_ARB_shading_language_packing +#endif + +#ifndef GL_ARB_texture_storage +#define GL_TEXTURE_IMMUTABLE_FORMAT 0x912F +#endif + +#ifndef GL_KHR_texture_compression_astc_ldr +#define GL_COMPRESSED_RGBA_ASTC_4x4_KHR 0x93B0 +#define GL_COMPRESSED_RGBA_ASTC_5x4_KHR 0x93B1 +#define GL_COMPRESSED_RGBA_ASTC_5x5_KHR 0x93B2 +#define GL_COMPRESSED_RGBA_ASTC_6x5_KHR 0x93B3 +#define GL_COMPRESSED_RGBA_ASTC_6x6_KHR 0x93B4 +#define GL_COMPRESSED_RGBA_ASTC_8x5_KHR 0x93B5 +#define GL_COMPRESSED_RGBA_ASTC_8x6_KHR 0x93B6 +#define GL_COMPRESSED_RGBA_ASTC_8x8_KHR 0x93B7 +#define GL_COMPRESSED_RGBA_ASTC_10x5_KHR 0x93B8 +#define GL_COMPRESSED_RGBA_ASTC_10x6_KHR 0x93B9 +#define GL_COMPRESSED_RGBA_ASTC_10x8_KHR 0x93BA +#define GL_COMPRESSED_RGBA_ASTC_10x10_KHR 0x93BB +#define GL_COMPRESSED_RGBA_ASTC_12x10_KHR 0x93BC +#define GL_COMPRESSED_RGBA_ASTC_12x12_KHR 0x93BD +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR 0x93D0 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR 0x93D1 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR 0x93D2 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR 0x93D3 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR 0x93D4 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR 0x93D5 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR 0x93D6 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR 0x93D7 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR 0x93D8 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR 0x93D9 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR 0x93DA +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR 0x93DB +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR 0x93DC +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR 0x93DD +#endif + +#ifndef GL_KHR_debug +#define GL_DEBUG_OUTPUT_SYNCHRONOUS 0x8242 +#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH 0x8243 +#define GL_DEBUG_CALLBACK_FUNCTION 0x8244 +#define GL_DEBUG_CALLBACK_USER_PARAM 0x8245 +#define GL_DEBUG_SOURCE_API 0x8246 +#define GL_DEBUG_SOURCE_WINDOW_SYSTEM 0x8247 +#define GL_DEBUG_SOURCE_SHADER_COMPILER 0x8248 +#define GL_DEBUG_SOURCE_THIRD_PARTY 0x8249 +#define GL_DEBUG_SOURCE_APPLICATION 0x824A +#define GL_DEBUG_SOURCE_OTHER 0x824B +#define GL_DEBUG_TYPE_ERROR 0x824C +#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR 0x824D +#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR 0x824E +#define GL_DEBUG_TYPE_PORTABILITY 0x824F +#define GL_DEBUG_TYPE_PERFORMANCE 0x8250 +#define GL_DEBUG_TYPE_OTHER 0x8251 +#define GL_DEBUG_TYPE_MARKER 0x8268 +#define GL_DEBUG_TYPE_PUSH_GROUP 0x8269 +#define GL_DEBUG_TYPE_POP_GROUP 0x826A +#define GL_DEBUG_SEVERITY_NOTIFICATION 0x826B +#define GL_MAX_DEBUG_GROUP_STACK_DEPTH 0x826C +#define GL_DEBUG_GROUP_STACK_DEPTH 0x826D +#define GL_BUFFER 0x82E0 +#define GL_SHADER 0x82E1 +#define GL_PROGRAM 0x82E2 +#define GL_QUERY 0x82E3 +#define GL_PROGRAM_PIPELINE 0x82E4 +#define GL_SAMPLER 0x82E6 +#define GL_DISPLAY_LIST 0x82E7 +/* DISPLAY_LIST used in compatibility profile only */ +#define GL_MAX_LABEL_LENGTH 0x82E8 +#define GL_MAX_DEBUG_MESSAGE_LENGTH 0x9143 +#define GL_MAX_DEBUG_LOGGED_MESSAGES 0x9144 +#define GL_DEBUG_LOGGED_MESSAGES 0x9145 +#define GL_DEBUG_SEVERITY_HIGH 0x9146 +#define GL_DEBUG_SEVERITY_MEDIUM 0x9147 +#define GL_DEBUG_SEVERITY_LOW 0x9148 +#define GL_DEBUG_OUTPUT 0x92E0 +#define GL_CONTEXT_FLAG_DEBUG_BIT 0x00000002 +/* reuse GL_STACK_UNDERFLOW */ +/* reuse GL_STACK_OVERFLOW */ +#endif + +#ifndef GL_ARB_arrays_of_arrays +#endif + +#ifndef GL_ARB_clear_buffer_object +#endif + +#ifndef GL_ARB_compute_shader +#define GL_COMPUTE_SHADER 0x91B9 +#define GL_MAX_COMPUTE_UNIFORM_BLOCKS 0x91BB +#define GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS 0x91BC +#define GL_MAX_COMPUTE_IMAGE_UNIFORMS 0x91BD +#define GL_MAX_COMPUTE_SHARED_MEMORY_SIZE 0x8262 +#define GL_MAX_COMPUTE_UNIFORM_COMPONENTS 0x8263 +#define GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS 0x8264 +#define GL_MAX_COMPUTE_ATOMIC_COUNTERS 0x8265 +#define GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS 0x8266 +#define GL_MAX_COMPUTE_LOCAL_INVOCATIONS 0x90EB +#define GL_MAX_COMPUTE_WORK_GROUP_COUNT 0x91BE +#define GL_MAX_COMPUTE_WORK_GROUP_SIZE 0x91BF +#define GL_COMPUTE_LOCAL_WORK_SIZE 0x8267 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER 0x90EC +#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER 0x90ED +#define GL_DISPATCH_INDIRECT_BUFFER 0x90EE +#define GL_DISPATCH_INDIRECT_BUFFER_BINDING 0x90EF +#define GL_COMPUTE_SHADER_BIT 0x00000020 +#endif + +#ifndef GL_ARB_copy_image +#endif + +#ifndef GL_ARB_texture_view +#define GL_TEXTURE_VIEW_MIN_LEVEL 0x82DB +#define GL_TEXTURE_VIEW_NUM_LEVELS 0x82DC +#define GL_TEXTURE_VIEW_MIN_LAYER 0x82DD +#define GL_TEXTURE_VIEW_NUM_LAYERS 0x82DE +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +#endif + +#ifndef GL_ARB_vertex_attrib_binding +#define GL_VERTEX_ATTRIB_BINDING 0x82D4 +#define GL_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D5 +#define GL_VERTEX_BINDING_DIVISOR 0x82D6 +#define GL_VERTEX_BINDING_OFFSET 0x82D7 +#define GL_VERTEX_BINDING_STRIDE 0x82D8 +#define GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D9 +#define GL_MAX_VERTEX_ATTRIB_BINDINGS 0x82DA +#endif + +#ifndef GL_ARB_robustness_isolation +#endif + +#ifndef GL_ARB_ES3_compatibility +#define GL_COMPRESSED_RGB8_ETC2 0x9274 +#define GL_COMPRESSED_SRGB8_ETC2 0x9275 +#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 +#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277 +#define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 +#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279 +#define GL_COMPRESSED_R11_EAC 0x9270 +#define GL_COMPRESSED_SIGNED_R11_EAC 0x9271 +#define GL_COMPRESSED_RG11_EAC 0x9272 +#define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 +#define GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69 +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A +#define GL_MAX_ELEMENT_INDEX 0x8D6B +#endif + +#ifndef GL_ARB_explicit_uniform_location +#define GL_MAX_UNIFORM_LOCATIONS 0x826E +#endif + +#ifndef GL_ARB_fragment_layer_viewport +#endif + +#ifndef GL_ARB_framebuffer_no_attachments +#define GL_FRAMEBUFFER_DEFAULT_WIDTH 0x9310 +#define GL_FRAMEBUFFER_DEFAULT_HEIGHT 0x9311 +#define GL_FRAMEBUFFER_DEFAULT_LAYERS 0x9312 +#define GL_FRAMEBUFFER_DEFAULT_SAMPLES 0x9313 +#define GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS 0x9314 +#define GL_MAX_FRAMEBUFFER_WIDTH 0x9315 +#define GL_MAX_FRAMEBUFFER_HEIGHT 0x9316 +#define GL_MAX_FRAMEBUFFER_LAYERS 0x9317 +#define GL_MAX_FRAMEBUFFER_SAMPLES 0x9318 +#endif + +#ifndef GL_ARB_internalformat_query2 +/* reuse GL_IMAGE_FORMAT_COMPATIBILITY_TYPE */ +/* reuse GL_NUM_SAMPLE_COUNTS */ +/* reuse GL_RENDERBUFFER */ +/* reuse GL_SAMPLES */ +/* reuse GL_TEXTURE_1D */ +/* reuse GL_TEXTURE_1D_ARRAY */ +/* reuse GL_TEXTURE_2D */ +/* reuse GL_TEXTURE_2D_ARRAY */ +/* reuse GL_TEXTURE_3D */ +/* reuse GL_TEXTURE_CUBE_MAP */ +/* reuse GL_TEXTURE_CUBE_MAP_ARRAY */ +/* reuse GL_TEXTURE_RECTANGLE */ +/* reuse GL_TEXTURE_BUFFER */ +/* reuse GL_TEXTURE_2D_MULTISAMPLE */ +/* reuse GL_TEXTURE_2D_MULTISAMPLE_ARRAY */ +/* reuse GL_TEXTURE_COMPRESSED */ +#define GL_INTERNALFORMAT_SUPPORTED 0x826F +#define GL_INTERNALFORMAT_PREFERRED 0x8270 +#define GL_INTERNALFORMAT_RED_SIZE 0x8271 +#define GL_INTERNALFORMAT_GREEN_SIZE 0x8272 +#define GL_INTERNALFORMAT_BLUE_SIZE 0x8273 +#define GL_INTERNALFORMAT_ALPHA_SIZE 0x8274 +#define GL_INTERNALFORMAT_DEPTH_SIZE 0x8275 +#define GL_INTERNALFORMAT_STENCIL_SIZE 0x8276 +#define GL_INTERNALFORMAT_SHARED_SIZE 0x8277 +#define GL_INTERNALFORMAT_RED_TYPE 0x8278 +#define GL_INTERNALFORMAT_GREEN_TYPE 0x8279 +#define GL_INTERNALFORMAT_BLUE_TYPE 0x827A +#define GL_INTERNALFORMAT_ALPHA_TYPE 0x827B +#define GL_INTERNALFORMAT_DEPTH_TYPE 0x827C +#define GL_INTERNALFORMAT_STENCIL_TYPE 0x827D +#define GL_MAX_WIDTH 0x827E +#define GL_MAX_HEIGHT 0x827F +#define GL_MAX_DEPTH 0x8280 +#define GL_MAX_LAYERS 0x8281 +#define GL_MAX_COMBINED_DIMENSIONS 0x8282 +#define GL_COLOR_COMPONENTS 0x8283 +#define GL_DEPTH_COMPONENTS 0x8284 +#define GL_STENCIL_COMPONENTS 0x8285 +#define GL_COLOR_RENDERABLE 0x8286 +#define GL_DEPTH_RENDERABLE 0x8287 +#define GL_STENCIL_RENDERABLE 0x8288 +#define GL_FRAMEBUFFER_RENDERABLE 0x8289 +#define GL_FRAMEBUFFER_RENDERABLE_LAYERED 0x828A +#define GL_FRAMEBUFFER_BLEND 0x828B +#define GL_READ_PIXELS 0x828C +#define GL_READ_PIXELS_FORMAT 0x828D +#define GL_READ_PIXELS_TYPE 0x828E +#define GL_TEXTURE_IMAGE_FORMAT 0x828F +#define GL_TEXTURE_IMAGE_TYPE 0x8290 +#define GL_GET_TEXTURE_IMAGE_FORMAT 0x8291 +#define GL_GET_TEXTURE_IMAGE_TYPE 0x8292 +#define GL_MIPMAP 0x8293 +#define GL_MANUAL_GENERATE_MIPMAP 0x8294 +#define GL_AUTO_GENERATE_MIPMAP 0x8295 +#define GL_COLOR_ENCODING 0x8296 +#define GL_SRGB_READ 0x8297 +#define GL_SRGB_WRITE 0x8298 +#define GL_SRGB_DECODE_ARB 0x8299 +#define GL_FILTER 0x829A +#define GL_VERTEX_TEXTURE 0x829B +#define GL_TESS_CONTROL_TEXTURE 0x829C +#define GL_TESS_EVALUATION_TEXTURE 0x829D +#define GL_GEOMETRY_TEXTURE 0x829E +#define GL_FRAGMENT_TEXTURE 0x829F +#define GL_COMPUTE_TEXTURE 0x82A0 +#define GL_TEXTURE_SHADOW 0x82A1 +#define GL_TEXTURE_GATHER 0x82A2 +#define GL_TEXTURE_GATHER_SHADOW 0x82A3 +#define GL_SHADER_IMAGE_LOAD 0x82A4 +#define GL_SHADER_IMAGE_STORE 0x82A5 +#define GL_SHADER_IMAGE_ATOMIC 0x82A6 +#define GL_IMAGE_TEXEL_SIZE 0x82A7 +#define GL_IMAGE_COMPATIBILITY_CLASS 0x82A8 +#define GL_IMAGE_PIXEL_FORMAT 0x82A9 +#define GL_IMAGE_PIXEL_TYPE 0x82AA +#define GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST 0x82AC +#define GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST 0x82AD +#define GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE 0x82AE +#define GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE 0x82AF +#define GL_TEXTURE_COMPRESSED_BLOCK_WIDTH 0x82B1 +#define GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT 0x82B2 +#define GL_TEXTURE_COMPRESSED_BLOCK_SIZE 0x82B3 +#define GL_CLEAR_BUFFER 0x82B4 +#define GL_TEXTURE_VIEW 0x82B5 +#define GL_VIEW_COMPATIBILITY_CLASS 0x82B6 +#define GL_FULL_SUPPORT 0x82B7 +#define GL_CAVEAT_SUPPORT 0x82B8 +#define GL_IMAGE_CLASS_4_X_32 0x82B9 +#define GL_IMAGE_CLASS_2_X_32 0x82BA +#define GL_IMAGE_CLASS_1_X_32 0x82BB +#define GL_IMAGE_CLASS_4_X_16 0x82BC +#define GL_IMAGE_CLASS_2_X_16 0x82BD +#define GL_IMAGE_CLASS_1_X_16 0x82BE +#define GL_IMAGE_CLASS_4_X_8 0x82BF +#define GL_IMAGE_CLASS_2_X_8 0x82C0 +#define GL_IMAGE_CLASS_1_X_8 0x82C1 +#define GL_IMAGE_CLASS_11_11_10 0x82C2 +#define GL_IMAGE_CLASS_10_10_10_2 0x82C3 +#define GL_VIEW_CLASS_128_BITS 0x82C4 +#define GL_VIEW_CLASS_96_BITS 0x82C5 +#define GL_VIEW_CLASS_64_BITS 0x82C6 +#define GL_VIEW_CLASS_48_BITS 0x82C7 +#define GL_VIEW_CLASS_32_BITS 0x82C8 +#define GL_VIEW_CLASS_24_BITS 0x82C9 +#define GL_VIEW_CLASS_16_BITS 0x82CA +#define GL_VIEW_CLASS_8_BITS 0x82CB +#define GL_VIEW_CLASS_S3TC_DXT1_RGB 0x82CC +#define GL_VIEW_CLASS_S3TC_DXT1_RGBA 0x82CD +#define GL_VIEW_CLASS_S3TC_DXT3_RGBA 0x82CE +#define GL_VIEW_CLASS_S3TC_DXT5_RGBA 0x82CF +#define GL_VIEW_CLASS_RGTC1_RED 0x82D0 +#define GL_VIEW_CLASS_RGTC2_RG 0x82D1 +#define GL_VIEW_CLASS_BPTC_UNORM 0x82D2 +#define GL_VIEW_CLASS_BPTC_FLOAT 0x82D3 +#endif + +#ifndef GL_ARB_invalidate_subdata +#endif + +#ifndef GL_ARB_multi_draw_indirect +#endif + +#ifndef GL_ARB_program_interface_query +#define GL_UNIFORM 0x92E1 +#define GL_UNIFORM_BLOCK 0x92E2 +#define GL_PROGRAM_INPUT 0x92E3 +#define GL_PROGRAM_OUTPUT 0x92E4 +#define GL_BUFFER_VARIABLE 0x92E5 +#define GL_SHADER_STORAGE_BLOCK 0x92E6 +/* reuse GL_ATOMIC_COUNTER_BUFFER */ +#define GL_VERTEX_SUBROUTINE 0x92E8 +#define GL_TESS_CONTROL_SUBROUTINE 0x92E9 +#define GL_TESS_EVALUATION_SUBROUTINE 0x92EA +#define GL_GEOMETRY_SUBROUTINE 0x92EB +#define GL_FRAGMENT_SUBROUTINE 0x92EC +#define GL_COMPUTE_SUBROUTINE 0x92ED +#define GL_VERTEX_SUBROUTINE_UNIFORM 0x92EE +#define GL_TESS_CONTROL_SUBROUTINE_UNIFORM 0x92EF +#define GL_TESS_EVALUATION_SUBROUTINE_UNIFORM 0x92F0 +#define GL_GEOMETRY_SUBROUTINE_UNIFORM 0x92F1 +#define GL_FRAGMENT_SUBROUTINE_UNIFORM 0x92F2 +#define GL_COMPUTE_SUBROUTINE_UNIFORM 0x92F3 +#define GL_TRANSFORM_FEEDBACK_VARYING 0x92F4 +#define GL_ACTIVE_RESOURCES 0x92F5 +#define GL_MAX_NAME_LENGTH 0x92F6 +#define GL_MAX_NUM_ACTIVE_VARIABLES 0x92F7 +#define GL_MAX_NUM_COMPATIBLE_SUBROUTINES 0x92F8 +#define GL_NAME_LENGTH 0x92F9 +#define GL_TYPE 0x92FA +#define GL_ARRAY_SIZE 0x92FB +#define GL_OFFSET 0x92FC +#define GL_BLOCK_INDEX 0x92FD +#define GL_ARRAY_STRIDE 0x92FE +#define GL_MATRIX_STRIDE 0x92FF +#define GL_IS_ROW_MAJOR 0x9300 +#define GL_ATOMIC_COUNTER_BUFFER_INDEX 0x9301 +#define GL_BUFFER_BINDING 0x9302 +#define GL_BUFFER_DATA_SIZE 0x9303 +#define GL_NUM_ACTIVE_VARIABLES 0x9304 +#define GL_ACTIVE_VARIABLES 0x9305 +#define GL_REFERENCED_BY_VERTEX_SHADER 0x9306 +#define GL_REFERENCED_BY_TESS_CONTROL_SHADER 0x9307 +#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER 0x9308 +#define GL_REFERENCED_BY_GEOMETRY_SHADER 0x9309 +#define GL_REFERENCED_BY_FRAGMENT_SHADER 0x930A +#define GL_REFERENCED_BY_COMPUTE_SHADER 0x930B +#define GL_TOP_LEVEL_ARRAY_SIZE 0x930C +#define GL_TOP_LEVEL_ARRAY_STRIDE 0x930D +#define GL_LOCATION 0x930E +#define GL_LOCATION_INDEX 0x930F +#define GL_IS_PER_PATCH 0x92E7 +/* reuse GL_NUM_COMPATIBLE_SUBROUTINES */ +/* reuse GL_COMPATIBLE_SUBROUTINES */ +#endif + +#ifndef GL_ARB_robust_buffer_access_behavior +#endif + +#ifndef GL_ARB_shader_image_size +#endif + +#ifndef GL_ARB_shader_storage_buffer_object +#define GL_SHADER_STORAGE_BUFFER 0x90D2 +#define GL_SHADER_STORAGE_BUFFER_BINDING 0x90D3 +#define GL_SHADER_STORAGE_BUFFER_START 0x90D4 +#define GL_SHADER_STORAGE_BUFFER_SIZE 0x90D5 +#define GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS 0x90D6 +#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS 0x90D7 +#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS 0x90D8 +#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS 0x90D9 +#define GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS 0x90DA +#define GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS 0x90DB +#define GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS 0x90DC +#define GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS 0x90DD +#define GL_MAX_SHADER_STORAGE_BLOCK_SIZE 0x90DE +#define GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT 0x90DF +#define GL_SHADER_STORAGE_BARRIER_BIT 0x2000 +#define GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS +/* reuse GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS */ +#endif + +#ifndef GL_ARB_stencil_texturing +#define GL_DEPTH_STENCIL_TEXTURE_MODE 0x90EA +#endif + +#ifndef GL_ARB_texture_buffer_range +#define GL_TEXTURE_BUFFER_OFFSET 0x919D +#define GL_TEXTURE_BUFFER_SIZE 0x919E +#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT 0x919F +#endif + +#ifndef GL_ARB_texture_query_levels +#endif + +#ifndef GL_ARB_texture_storage_multisample +#endif + +#ifndef GL_EXT_abgr +#define GL_ABGR_EXT 0x8000 +#endif + +#ifndef GL_EXT_blend_color +#define GL_CONSTANT_COLOR_EXT 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR_EXT 0x8002 +#define GL_CONSTANT_ALPHA_EXT 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA_EXT 0x8004 +#define GL_BLEND_COLOR_EXT 0x8005 +#endif + +#ifndef GL_EXT_polygon_offset +#define GL_POLYGON_OFFSET_EXT 0x8037 +#define GL_POLYGON_OFFSET_FACTOR_EXT 0x8038 +#define GL_POLYGON_OFFSET_BIAS_EXT 0x8039 +#endif + +#ifndef GL_EXT_texture +#define GL_ALPHA4_EXT 0x803B +#define GL_ALPHA8_EXT 0x803C +#define GL_ALPHA12_EXT 0x803D +#define GL_ALPHA16_EXT 0x803E +#define GL_LUMINANCE4_EXT 0x803F +#define GL_LUMINANCE8_EXT 0x8040 +#define GL_LUMINANCE12_EXT 0x8041 +#define GL_LUMINANCE16_EXT 0x8042 +#define GL_LUMINANCE4_ALPHA4_EXT 0x8043 +#define GL_LUMINANCE6_ALPHA2_EXT 0x8044 +#define GL_LUMINANCE8_ALPHA8_EXT 0x8045 +#define GL_LUMINANCE12_ALPHA4_EXT 0x8046 +#define GL_LUMINANCE12_ALPHA12_EXT 0x8047 +#define GL_LUMINANCE16_ALPHA16_EXT 0x8048 +#define GL_INTENSITY_EXT 0x8049 +#define GL_INTENSITY4_EXT 0x804A +#define GL_INTENSITY8_EXT 0x804B +#define GL_INTENSITY12_EXT 0x804C +#define GL_INTENSITY16_EXT 0x804D +#define GL_RGB2_EXT 0x804E +#define GL_RGB4_EXT 0x804F +#define GL_RGB5_EXT 0x8050 +#define GL_RGB8_EXT 0x8051 +#define GL_RGB10_EXT 0x8052 +#define GL_RGB12_EXT 0x8053 +#define GL_RGB16_EXT 0x8054 +#define GL_RGBA2_EXT 0x8055 +#define GL_RGBA4_EXT 0x8056 +#define GL_RGB5_A1_EXT 0x8057 +#define GL_RGBA8_EXT 0x8058 +#define GL_RGB10_A2_EXT 0x8059 +#define GL_RGBA12_EXT 0x805A +#define GL_RGBA16_EXT 0x805B +#define GL_TEXTURE_RED_SIZE_EXT 0x805C +#define GL_TEXTURE_GREEN_SIZE_EXT 0x805D +#define GL_TEXTURE_BLUE_SIZE_EXT 0x805E +#define GL_TEXTURE_ALPHA_SIZE_EXT 0x805F +#define GL_TEXTURE_LUMINANCE_SIZE_EXT 0x8060 +#define GL_TEXTURE_INTENSITY_SIZE_EXT 0x8061 +#define GL_REPLACE_EXT 0x8062 +#define GL_PROXY_TEXTURE_1D_EXT 0x8063 +#define GL_PROXY_TEXTURE_2D_EXT 0x8064 +#define GL_TEXTURE_TOO_LARGE_EXT 0x8065 +#endif + +#ifndef GL_EXT_texture3D +#define GL_PACK_SKIP_IMAGES_EXT 0x806B +#define GL_PACK_IMAGE_HEIGHT_EXT 0x806C +#define GL_UNPACK_SKIP_IMAGES_EXT 0x806D +#define GL_UNPACK_IMAGE_HEIGHT_EXT 0x806E +#define GL_TEXTURE_3D_EXT 0x806F +#define GL_PROXY_TEXTURE_3D_EXT 0x8070 +#define GL_TEXTURE_DEPTH_EXT 0x8071 +#define GL_TEXTURE_WRAP_R_EXT 0x8072 +#define GL_MAX_3D_TEXTURE_SIZE_EXT 0x8073 +#endif + +#ifndef GL_SGIS_texture_filter4 +#define GL_FILTER4_SGIS 0x8146 +#define GL_TEXTURE_FILTER4_SIZE_SGIS 0x8147 +#endif + +#ifndef GL_EXT_subtexture +#endif + +#ifndef GL_EXT_copy_texture +#endif + +#ifndef GL_EXT_histogram +#define GL_HISTOGRAM_EXT 0x8024 +#define GL_PROXY_HISTOGRAM_EXT 0x8025 +#define GL_HISTOGRAM_WIDTH_EXT 0x8026 +#define GL_HISTOGRAM_FORMAT_EXT 0x8027 +#define GL_HISTOGRAM_RED_SIZE_EXT 0x8028 +#define GL_HISTOGRAM_GREEN_SIZE_EXT 0x8029 +#define GL_HISTOGRAM_BLUE_SIZE_EXT 0x802A +#define GL_HISTOGRAM_ALPHA_SIZE_EXT 0x802B +#define GL_HISTOGRAM_LUMINANCE_SIZE_EXT 0x802C +#define GL_HISTOGRAM_SINK_EXT 0x802D +#define GL_MINMAX_EXT 0x802E +#define GL_MINMAX_FORMAT_EXT 0x802F +#define GL_MINMAX_SINK_EXT 0x8030 +#define GL_TABLE_TOO_LARGE_EXT 0x8031 +#endif + +#ifndef GL_EXT_convolution +#define GL_CONVOLUTION_1D_EXT 0x8010 +#define GL_CONVOLUTION_2D_EXT 0x8011 +#define GL_SEPARABLE_2D_EXT 0x8012 +#define GL_CONVOLUTION_BORDER_MODE_EXT 0x8013 +#define GL_CONVOLUTION_FILTER_SCALE_EXT 0x8014 +#define GL_CONVOLUTION_FILTER_BIAS_EXT 0x8015 +#define GL_REDUCE_EXT 0x8016 +#define GL_CONVOLUTION_FORMAT_EXT 0x8017 +#define GL_CONVOLUTION_WIDTH_EXT 0x8018 +#define GL_CONVOLUTION_HEIGHT_EXT 0x8019 +#define GL_MAX_CONVOLUTION_WIDTH_EXT 0x801A +#define GL_MAX_CONVOLUTION_HEIGHT_EXT 0x801B +#define GL_POST_CONVOLUTION_RED_SCALE_EXT 0x801C +#define GL_POST_CONVOLUTION_GREEN_SCALE_EXT 0x801D +#define GL_POST_CONVOLUTION_BLUE_SCALE_EXT 0x801E +#define GL_POST_CONVOLUTION_ALPHA_SCALE_EXT 0x801F +#define GL_POST_CONVOLUTION_RED_BIAS_EXT 0x8020 +#define GL_POST_CONVOLUTION_GREEN_BIAS_EXT 0x8021 +#define GL_POST_CONVOLUTION_BLUE_BIAS_EXT 0x8022 +#define GL_POST_CONVOLUTION_ALPHA_BIAS_EXT 0x8023 +#endif + +#ifndef GL_SGI_color_matrix +#define GL_COLOR_MATRIX_SGI 0x80B1 +#define GL_COLOR_MATRIX_STACK_DEPTH_SGI 0x80B2 +#define GL_MAX_COLOR_MATRIX_STACK_DEPTH_SGI 0x80B3 +#define GL_POST_COLOR_MATRIX_RED_SCALE_SGI 0x80B4 +#define GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI 0x80B5 +#define GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI 0x80B6 +#define GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI 0x80B7 +#define GL_POST_COLOR_MATRIX_RED_BIAS_SGI 0x80B8 +#define GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI 0x80B9 +#define GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI 0x80BA +#define GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI 0x80BB +#endif + +#ifndef GL_SGI_color_table +#define GL_COLOR_TABLE_SGI 0x80D0 +#define GL_POST_CONVOLUTION_COLOR_TABLE_SGI 0x80D1 +#define GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI 0x80D2 +#define GL_PROXY_COLOR_TABLE_SGI 0x80D3 +#define GL_PROXY_POST_CONVOLUTION_COLOR_TABLE_SGI 0x80D4 +#define GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE_SGI 0x80D5 +#define GL_COLOR_TABLE_SCALE_SGI 0x80D6 +#define GL_COLOR_TABLE_BIAS_SGI 0x80D7 +#define GL_COLOR_TABLE_FORMAT_SGI 0x80D8 +#define GL_COLOR_TABLE_WIDTH_SGI 0x80D9 +#define GL_COLOR_TABLE_RED_SIZE_SGI 0x80DA +#define GL_COLOR_TABLE_GREEN_SIZE_SGI 0x80DB +#define GL_COLOR_TABLE_BLUE_SIZE_SGI 0x80DC +#define GL_COLOR_TABLE_ALPHA_SIZE_SGI 0x80DD +#define GL_COLOR_TABLE_LUMINANCE_SIZE_SGI 0x80DE +#define GL_COLOR_TABLE_INTENSITY_SIZE_SGI 0x80DF +#endif + +#ifndef GL_SGIS_pixel_texture +#define GL_PIXEL_TEXTURE_SGIS 0x8353 +#define GL_PIXEL_FRAGMENT_RGB_SOURCE_SGIS 0x8354 +#define GL_PIXEL_FRAGMENT_ALPHA_SOURCE_SGIS 0x8355 +#define GL_PIXEL_GROUP_COLOR_SGIS 0x8356 +#endif + +#ifndef GL_SGIX_pixel_texture +#define GL_PIXEL_TEX_GEN_SGIX 0x8139 +#define GL_PIXEL_TEX_GEN_MODE_SGIX 0x832B +#endif + +#ifndef GL_SGIS_texture4D +#define GL_PACK_SKIP_VOLUMES_SGIS 0x8130 +#define GL_PACK_IMAGE_DEPTH_SGIS 0x8131 +#define GL_UNPACK_SKIP_VOLUMES_SGIS 0x8132 +#define GL_UNPACK_IMAGE_DEPTH_SGIS 0x8133 +#define GL_TEXTURE_4D_SGIS 0x8134 +#define GL_PROXY_TEXTURE_4D_SGIS 0x8135 +#define GL_TEXTURE_4DSIZE_SGIS 0x8136 +#define GL_TEXTURE_WRAP_Q_SGIS 0x8137 +#define GL_MAX_4D_TEXTURE_SIZE_SGIS 0x8138 +#define GL_TEXTURE_4D_BINDING_SGIS 0x814F +#endif + +#ifndef GL_SGI_texture_color_table +#define GL_TEXTURE_COLOR_TABLE_SGI 0x80BC +#define GL_PROXY_TEXTURE_COLOR_TABLE_SGI 0x80BD +#endif + +#ifndef GL_EXT_cmyka +#define GL_CMYK_EXT 0x800C +#define GL_CMYKA_EXT 0x800D +#define GL_PACK_CMYK_HINT_EXT 0x800E +#define GL_UNPACK_CMYK_HINT_EXT 0x800F +#endif + +#ifndef GL_EXT_texture_object +#define GL_TEXTURE_PRIORITY_EXT 0x8066 +#define GL_TEXTURE_RESIDENT_EXT 0x8067 +#define GL_TEXTURE_1D_BINDING_EXT 0x8068 +#define GL_TEXTURE_2D_BINDING_EXT 0x8069 +#define GL_TEXTURE_3D_BINDING_EXT 0x806A +#endif + +#ifndef GL_SGIS_detail_texture +#define GL_DETAIL_TEXTURE_2D_SGIS 0x8095 +#define GL_DETAIL_TEXTURE_2D_BINDING_SGIS 0x8096 +#define GL_LINEAR_DETAIL_SGIS 0x8097 +#define GL_LINEAR_DETAIL_ALPHA_SGIS 0x8098 +#define GL_LINEAR_DETAIL_COLOR_SGIS 0x8099 +#define GL_DETAIL_TEXTURE_LEVEL_SGIS 0x809A +#define GL_DETAIL_TEXTURE_MODE_SGIS 0x809B +#define GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS 0x809C +#endif + +#ifndef GL_SGIS_sharpen_texture +#define GL_LINEAR_SHARPEN_SGIS 0x80AD +#define GL_LINEAR_SHARPEN_ALPHA_SGIS 0x80AE +#define GL_LINEAR_SHARPEN_COLOR_SGIS 0x80AF +#define GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS 0x80B0 +#endif + +#ifndef GL_EXT_packed_pixels +#define GL_UNSIGNED_BYTE_3_3_2_EXT 0x8032 +#define GL_UNSIGNED_SHORT_4_4_4_4_EXT 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1_EXT 0x8034 +#define GL_UNSIGNED_INT_8_8_8_8_EXT 0x8035 +#define GL_UNSIGNED_INT_10_10_10_2_EXT 0x8036 +#endif + +#ifndef GL_SGIS_texture_lod +#define GL_TEXTURE_MIN_LOD_SGIS 0x813A +#define GL_TEXTURE_MAX_LOD_SGIS 0x813B +#define GL_TEXTURE_BASE_LEVEL_SGIS 0x813C +#define GL_TEXTURE_MAX_LEVEL_SGIS 0x813D +#endif + +#ifndef GL_SGIS_multisample +#define GL_MULTISAMPLE_SGIS 0x809D +#define GL_SAMPLE_ALPHA_TO_MASK_SGIS 0x809E +#define GL_SAMPLE_ALPHA_TO_ONE_SGIS 0x809F +#define GL_SAMPLE_MASK_SGIS 0x80A0 +#define GL_1PASS_SGIS 0x80A1 +#define GL_2PASS_0_SGIS 0x80A2 +#define GL_2PASS_1_SGIS 0x80A3 +#define GL_4PASS_0_SGIS 0x80A4 +#define GL_4PASS_1_SGIS 0x80A5 +#define GL_4PASS_2_SGIS 0x80A6 +#define GL_4PASS_3_SGIS 0x80A7 +#define GL_SAMPLE_BUFFERS_SGIS 0x80A8 +#define GL_SAMPLES_SGIS 0x80A9 +#define GL_SAMPLE_MASK_VALUE_SGIS 0x80AA +#define GL_SAMPLE_MASK_INVERT_SGIS 0x80AB +#define GL_SAMPLE_PATTERN_SGIS 0x80AC +#endif + +#ifndef GL_EXT_rescale_normal +#define GL_RESCALE_NORMAL_EXT 0x803A +#endif + +#ifndef GL_EXT_vertex_array +#define GL_VERTEX_ARRAY_EXT 0x8074 +#define GL_NORMAL_ARRAY_EXT 0x8075 +#define GL_COLOR_ARRAY_EXT 0x8076 +#define GL_INDEX_ARRAY_EXT 0x8077 +#define GL_TEXTURE_COORD_ARRAY_EXT 0x8078 +#define GL_EDGE_FLAG_ARRAY_EXT 0x8079 +#define GL_VERTEX_ARRAY_SIZE_EXT 0x807A +#define GL_VERTEX_ARRAY_TYPE_EXT 0x807B +#define GL_VERTEX_ARRAY_STRIDE_EXT 0x807C +#define GL_VERTEX_ARRAY_COUNT_EXT 0x807D +#define GL_NORMAL_ARRAY_TYPE_EXT 0x807E +#define GL_NORMAL_ARRAY_STRIDE_EXT 0x807F +#define GL_NORMAL_ARRAY_COUNT_EXT 0x8080 +#define GL_COLOR_ARRAY_SIZE_EXT 0x8081 +#define GL_COLOR_ARRAY_TYPE_EXT 0x8082 +#define GL_COLOR_ARRAY_STRIDE_EXT 0x8083 +#define GL_COLOR_ARRAY_COUNT_EXT 0x8084 +#define GL_INDEX_ARRAY_TYPE_EXT 0x8085 +#define GL_INDEX_ARRAY_STRIDE_EXT 0x8086 +#define GL_INDEX_ARRAY_COUNT_EXT 0x8087 +#define GL_TEXTURE_COORD_ARRAY_SIZE_EXT 0x8088 +#define GL_TEXTURE_COORD_ARRAY_TYPE_EXT 0x8089 +#define GL_TEXTURE_COORD_ARRAY_STRIDE_EXT 0x808A +#define GL_TEXTURE_COORD_ARRAY_COUNT_EXT 0x808B +#define GL_EDGE_FLAG_ARRAY_STRIDE_EXT 0x808C +#define GL_EDGE_FLAG_ARRAY_COUNT_EXT 0x808D +#define GL_VERTEX_ARRAY_POINTER_EXT 0x808E +#define GL_NORMAL_ARRAY_POINTER_EXT 0x808F +#define GL_COLOR_ARRAY_POINTER_EXT 0x8090 +#define GL_INDEX_ARRAY_POINTER_EXT 0x8091 +#define GL_TEXTURE_COORD_ARRAY_POINTER_EXT 0x8092 +#define GL_EDGE_FLAG_ARRAY_POINTER_EXT 0x8093 +#endif + +#ifndef GL_EXT_misc_attribute +#endif + +#ifndef GL_SGIS_generate_mipmap +#define GL_GENERATE_MIPMAP_SGIS 0x8191 +#define GL_GENERATE_MIPMAP_HINT_SGIS 0x8192 +#endif + +#ifndef GL_SGIX_clipmap +#define GL_LINEAR_CLIPMAP_LINEAR_SGIX 0x8170 +#define GL_TEXTURE_CLIPMAP_CENTER_SGIX 0x8171 +#define GL_TEXTURE_CLIPMAP_FRAME_SGIX 0x8172 +#define GL_TEXTURE_CLIPMAP_OFFSET_SGIX 0x8173 +#define GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX 0x8174 +#define GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX 0x8175 +#define GL_TEXTURE_CLIPMAP_DEPTH_SGIX 0x8176 +#define GL_MAX_CLIPMAP_DEPTH_SGIX 0x8177 +#define GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX 0x8178 +#define GL_NEAREST_CLIPMAP_NEAREST_SGIX 0x844D +#define GL_NEAREST_CLIPMAP_LINEAR_SGIX 0x844E +#define GL_LINEAR_CLIPMAP_NEAREST_SGIX 0x844F +#endif + +#ifndef GL_SGIX_shadow +#define GL_TEXTURE_COMPARE_SGIX 0x819A +#define GL_TEXTURE_COMPARE_OPERATOR_SGIX 0x819B +#define GL_TEXTURE_LEQUAL_R_SGIX 0x819C +#define GL_TEXTURE_GEQUAL_R_SGIX 0x819D +#endif + +#ifndef GL_SGIS_texture_edge_clamp +#define GL_CLAMP_TO_EDGE_SGIS 0x812F +#endif + +#ifndef GL_SGIS_texture_border_clamp +#define GL_CLAMP_TO_BORDER_SGIS 0x812D +#endif + +#ifndef GL_EXT_blend_minmax +#define GL_FUNC_ADD_EXT 0x8006 +#define GL_MIN_EXT 0x8007 +#define GL_MAX_EXT 0x8008 +#define GL_BLEND_EQUATION_EXT 0x8009 +#endif + +#ifndef GL_EXT_blend_subtract +#define GL_FUNC_SUBTRACT_EXT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT_EXT 0x800B +#endif + +#ifndef GL_EXT_blend_logic_op +#endif + +#ifndef GL_SGIX_interlace +#define GL_INTERLACE_SGIX 0x8094 +#endif + +#ifndef GL_SGIX_pixel_tiles +#define GL_PIXEL_TILE_BEST_ALIGNMENT_SGIX 0x813E +#define GL_PIXEL_TILE_CACHE_INCREMENT_SGIX 0x813F +#define GL_PIXEL_TILE_WIDTH_SGIX 0x8140 +#define GL_PIXEL_TILE_HEIGHT_SGIX 0x8141 +#define GL_PIXEL_TILE_GRID_WIDTH_SGIX 0x8142 +#define GL_PIXEL_TILE_GRID_HEIGHT_SGIX 0x8143 +#define GL_PIXEL_TILE_GRID_DEPTH_SGIX 0x8144 +#define GL_PIXEL_TILE_CACHE_SIZE_SGIX 0x8145 +#endif + +#ifndef GL_SGIS_texture_select +#define GL_DUAL_ALPHA4_SGIS 0x8110 +#define GL_DUAL_ALPHA8_SGIS 0x8111 +#define GL_DUAL_ALPHA12_SGIS 0x8112 +#define GL_DUAL_ALPHA16_SGIS 0x8113 +#define GL_DUAL_LUMINANCE4_SGIS 0x8114 +#define GL_DUAL_LUMINANCE8_SGIS 0x8115 +#define GL_DUAL_LUMINANCE12_SGIS 0x8116 +#define GL_DUAL_LUMINANCE16_SGIS 0x8117 +#define GL_DUAL_INTENSITY4_SGIS 0x8118 +#define GL_DUAL_INTENSITY8_SGIS 0x8119 +#define GL_DUAL_INTENSITY12_SGIS 0x811A +#define GL_DUAL_INTENSITY16_SGIS 0x811B +#define GL_DUAL_LUMINANCE_ALPHA4_SGIS 0x811C +#define GL_DUAL_LUMINANCE_ALPHA8_SGIS 0x811D +#define GL_QUAD_ALPHA4_SGIS 0x811E +#define GL_QUAD_ALPHA8_SGIS 0x811F +#define GL_QUAD_LUMINANCE4_SGIS 0x8120 +#define GL_QUAD_LUMINANCE8_SGIS 0x8121 +#define GL_QUAD_INTENSITY4_SGIS 0x8122 +#define GL_QUAD_INTENSITY8_SGIS 0x8123 +#define GL_DUAL_TEXTURE_SELECT_SGIS 0x8124 +#define GL_QUAD_TEXTURE_SELECT_SGIS 0x8125 +#endif + +#ifndef GL_SGIX_sprite +#define GL_SPRITE_SGIX 0x8148 +#define GL_SPRITE_MODE_SGIX 0x8149 +#define GL_SPRITE_AXIS_SGIX 0x814A +#define GL_SPRITE_TRANSLATION_SGIX 0x814B +#define GL_SPRITE_AXIAL_SGIX 0x814C +#define GL_SPRITE_OBJECT_ALIGNED_SGIX 0x814D +#define GL_SPRITE_EYE_ALIGNED_SGIX 0x814E +#endif + +#ifndef GL_SGIX_texture_multi_buffer +#define GL_TEXTURE_MULTI_BUFFER_HINT_SGIX 0x812E +#endif + +#ifndef GL_EXT_point_parameters +#define GL_POINT_SIZE_MIN_EXT 0x8126 +#define GL_POINT_SIZE_MAX_EXT 0x8127 +#define GL_POINT_FADE_THRESHOLD_SIZE_EXT 0x8128 +#define GL_DISTANCE_ATTENUATION_EXT 0x8129 +#endif + +#ifndef GL_SGIS_point_parameters +#define GL_POINT_SIZE_MIN_SGIS 0x8126 +#define GL_POINT_SIZE_MAX_SGIS 0x8127 +#define GL_POINT_FADE_THRESHOLD_SIZE_SGIS 0x8128 +#define GL_DISTANCE_ATTENUATION_SGIS 0x8129 +#endif + +#ifndef GL_SGIX_instruments +#define GL_INSTRUMENT_BUFFER_POINTER_SGIX 0x8180 +#define GL_INSTRUMENT_MEASUREMENTS_SGIX 0x8181 +#endif + +#ifndef GL_SGIX_texture_scale_bias +#define GL_POST_TEXTURE_FILTER_BIAS_SGIX 0x8179 +#define GL_POST_TEXTURE_FILTER_SCALE_SGIX 0x817A +#define GL_POST_TEXTURE_FILTER_BIAS_RANGE_SGIX 0x817B +#define GL_POST_TEXTURE_FILTER_SCALE_RANGE_SGIX 0x817C +#endif + +#ifndef GL_SGIX_framezoom +#define GL_FRAMEZOOM_SGIX 0x818B +#define GL_FRAMEZOOM_FACTOR_SGIX 0x818C +#define GL_MAX_FRAMEZOOM_FACTOR_SGIX 0x818D +#endif + +#ifndef GL_SGIX_tag_sample_buffer +#endif + +#ifndef GL_FfdMaskSGIX +#define GL_TEXTURE_DEFORMATION_BIT_SGIX 0x00000001 +#define GL_GEOMETRY_DEFORMATION_BIT_SGIX 0x00000002 +#endif + +#ifndef GL_SGIX_polynomial_ffd +#define GL_GEOMETRY_DEFORMATION_SGIX 0x8194 +#define GL_TEXTURE_DEFORMATION_SGIX 0x8195 +#define GL_DEFORMATIONS_MASK_SGIX 0x8196 +#define GL_MAX_DEFORMATION_ORDER_SGIX 0x8197 +#endif + +#ifndef GL_SGIX_reference_plane +#define GL_REFERENCE_PLANE_SGIX 0x817D +#define GL_REFERENCE_PLANE_EQUATION_SGIX 0x817E +#endif + +#ifndef GL_SGIX_flush_raster +#endif + +#ifndef GL_SGIX_depth_texture +#define GL_DEPTH_COMPONENT16_SGIX 0x81A5 +#define GL_DEPTH_COMPONENT24_SGIX 0x81A6 +#define GL_DEPTH_COMPONENT32_SGIX 0x81A7 +#endif + +#ifndef GL_SGIS_fog_function +#define GL_FOG_FUNC_SGIS 0x812A +#define GL_FOG_FUNC_POINTS_SGIS 0x812B +#define GL_MAX_FOG_FUNC_POINTS_SGIS 0x812C +#endif + +#ifndef GL_SGIX_fog_offset +#define GL_FOG_OFFSET_SGIX 0x8198 +#define GL_FOG_OFFSET_VALUE_SGIX 0x8199 +#endif + +#ifndef GL_HP_image_transform +#define GL_IMAGE_SCALE_X_HP 0x8155 +#define GL_IMAGE_SCALE_Y_HP 0x8156 +#define GL_IMAGE_TRANSLATE_X_HP 0x8157 +#define GL_IMAGE_TRANSLATE_Y_HP 0x8158 +#define GL_IMAGE_ROTATE_ANGLE_HP 0x8159 +#define GL_IMAGE_ROTATE_ORIGIN_X_HP 0x815A +#define GL_IMAGE_ROTATE_ORIGIN_Y_HP 0x815B +#define GL_IMAGE_MAG_FILTER_HP 0x815C +#define GL_IMAGE_MIN_FILTER_HP 0x815D +#define GL_IMAGE_CUBIC_WEIGHT_HP 0x815E +#define GL_CUBIC_HP 0x815F +#define GL_AVERAGE_HP 0x8160 +#define GL_IMAGE_TRANSFORM_2D_HP 0x8161 +#define GL_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP 0x8162 +#define GL_PROXY_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP 0x8163 +#endif + +#ifndef GL_HP_convolution_border_modes +#define GL_IGNORE_BORDER_HP 0x8150 +#define GL_CONSTANT_BORDER_HP 0x8151 +#define GL_REPLICATE_BORDER_HP 0x8153 +#define GL_CONVOLUTION_BORDER_COLOR_HP 0x8154 +#endif + +#ifndef GL_INGR_palette_buffer +#endif + +#ifndef GL_SGIX_texture_add_env +#define GL_TEXTURE_ENV_BIAS_SGIX 0x80BE +#endif + +#ifndef GL_EXT_color_subtable +#endif + +#ifndef GL_PGI_vertex_hints +#define GL_VERTEX_DATA_HINT_PGI 0x1A22A +#define GL_VERTEX_CONSISTENT_HINT_PGI 0x1A22B +#define GL_MATERIAL_SIDE_HINT_PGI 0x1A22C +#define GL_MAX_VERTEX_HINT_PGI 0x1A22D +#define GL_COLOR3_BIT_PGI 0x00010000 +#define GL_COLOR4_BIT_PGI 0x00020000 +#define GL_EDGEFLAG_BIT_PGI 0x00040000 +#define GL_INDEX_BIT_PGI 0x00080000 +#define GL_MAT_AMBIENT_BIT_PGI 0x00100000 +#define GL_MAT_AMBIENT_AND_DIFFUSE_BIT_PGI 0x00200000 +#define GL_MAT_DIFFUSE_BIT_PGI 0x00400000 +#define GL_MAT_EMISSION_BIT_PGI 0x00800000 +#define GL_MAT_COLOR_INDEXES_BIT_PGI 0x01000000 +#define GL_MAT_SHININESS_BIT_PGI 0x02000000 +#define GL_MAT_SPECULAR_BIT_PGI 0x04000000 +#define GL_NORMAL_BIT_PGI 0x08000000 +#define GL_TEXCOORD1_BIT_PGI 0x10000000 +#define GL_TEXCOORD2_BIT_PGI 0x20000000 +#define GL_TEXCOORD3_BIT_PGI 0x40000000 +#define GL_TEXCOORD4_BIT_PGI 0x80000000 +#define GL_VERTEX23_BIT_PGI 0x00000004 +#define GL_VERTEX4_BIT_PGI 0x00000008 +#endif + +#ifndef GL_PGI_misc_hints +#define GL_PREFER_DOUBLEBUFFER_HINT_PGI 0x1A1F8 +#define GL_CONSERVE_MEMORY_HINT_PGI 0x1A1FD +#define GL_RECLAIM_MEMORY_HINT_PGI 0x1A1FE +#define GL_NATIVE_GRAPHICS_HANDLE_PGI 0x1A202 +#define GL_NATIVE_GRAPHICS_BEGIN_HINT_PGI 0x1A203 +#define GL_NATIVE_GRAPHICS_END_HINT_PGI 0x1A204 +#define GL_ALWAYS_FAST_HINT_PGI 0x1A20C +#define GL_ALWAYS_SOFT_HINT_PGI 0x1A20D +#define GL_ALLOW_DRAW_OBJ_HINT_PGI 0x1A20E +#define GL_ALLOW_DRAW_WIN_HINT_PGI 0x1A20F +#define GL_ALLOW_DRAW_FRG_HINT_PGI 0x1A210 +#define GL_ALLOW_DRAW_MEM_HINT_PGI 0x1A211 +#define GL_STRICT_DEPTHFUNC_HINT_PGI 0x1A216 +#define GL_STRICT_LIGHTING_HINT_PGI 0x1A217 +#define GL_STRICT_SCISSOR_HINT_PGI 0x1A218 +#define GL_FULL_STIPPLE_HINT_PGI 0x1A219 +#define GL_CLIP_NEAR_HINT_PGI 0x1A220 +#define GL_CLIP_FAR_HINT_PGI 0x1A221 +#define GL_WIDE_LINE_HINT_PGI 0x1A222 +#define GL_BACK_NORMALS_HINT_PGI 0x1A223 +#endif + +#ifndef GL_EXT_paletted_texture +#define GL_COLOR_INDEX1_EXT 0x80E2 +#define GL_COLOR_INDEX2_EXT 0x80E3 +#define GL_COLOR_INDEX4_EXT 0x80E4 +#define GL_COLOR_INDEX8_EXT 0x80E5 +#define GL_COLOR_INDEX12_EXT 0x80E6 +#define GL_COLOR_INDEX16_EXT 0x80E7 +#define GL_TEXTURE_INDEX_SIZE_EXT 0x80ED +#endif + +#ifndef GL_EXT_clip_volume_hint +#define GL_CLIP_VOLUME_CLIPPING_HINT_EXT 0x80F0 +#endif + +#ifndef GL_SGIX_list_priority +#define GL_LIST_PRIORITY_SGIX 0x8182 +#endif + +#ifndef GL_SGIX_ir_instrument1 +#define GL_IR_INSTRUMENT1_SGIX 0x817F +#endif + +#ifndef GL_SGIX_calligraphic_fragment +#define GL_CALLIGRAPHIC_FRAGMENT_SGIX 0x8183 +#endif + +#ifndef GL_SGIX_texture_lod_bias +#define GL_TEXTURE_LOD_BIAS_S_SGIX 0x818E +#define GL_TEXTURE_LOD_BIAS_T_SGIX 0x818F +#define GL_TEXTURE_LOD_BIAS_R_SGIX 0x8190 +#endif + +#ifndef GL_SGIX_shadow_ambient +#define GL_SHADOW_AMBIENT_SGIX 0x80BF +#endif + +#ifndef GL_EXT_index_texture +#endif + +#ifndef GL_EXT_index_material +#define GL_INDEX_MATERIAL_EXT 0x81B8 +#define GL_INDEX_MATERIAL_PARAMETER_EXT 0x81B9 +#define GL_INDEX_MATERIAL_FACE_EXT 0x81BA +#endif + +#ifndef GL_EXT_index_func +#define GL_INDEX_TEST_EXT 0x81B5 +#define GL_INDEX_TEST_FUNC_EXT 0x81B6 +#define GL_INDEX_TEST_REF_EXT 0x81B7 +#endif + +#ifndef GL_EXT_index_array_formats +#define GL_IUI_V2F_EXT 0x81AD +#define GL_IUI_V3F_EXT 0x81AE +#define GL_IUI_N3F_V2F_EXT 0x81AF +#define GL_IUI_N3F_V3F_EXT 0x81B0 +#define GL_T2F_IUI_V2F_EXT 0x81B1 +#define GL_T2F_IUI_V3F_EXT 0x81B2 +#define GL_T2F_IUI_N3F_V2F_EXT 0x81B3 +#define GL_T2F_IUI_N3F_V3F_EXT 0x81B4 +#endif + +#ifndef GL_EXT_compiled_vertex_array +#define GL_ARRAY_ELEMENT_LOCK_FIRST_EXT 0x81A8 +#define GL_ARRAY_ELEMENT_LOCK_COUNT_EXT 0x81A9 +#endif + +#ifndef GL_EXT_cull_vertex +#define GL_CULL_VERTEX_EXT 0x81AA +#define GL_CULL_VERTEX_EYE_POSITION_EXT 0x81AB +#define GL_CULL_VERTEX_OBJECT_POSITION_EXT 0x81AC +#endif + +#ifndef GL_SGIX_ycrcb +#define GL_YCRCB_422_SGIX 0x81BB +#define GL_YCRCB_444_SGIX 0x81BC +#endif + +#ifndef GL_SGIX_fragment_lighting +#define GL_FRAGMENT_LIGHTING_SGIX 0x8400 +#define GL_FRAGMENT_COLOR_MATERIAL_SGIX 0x8401 +#define GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX 0x8402 +#define GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX 0x8403 +#define GL_MAX_FRAGMENT_LIGHTS_SGIX 0x8404 +#define GL_MAX_ACTIVE_LIGHTS_SGIX 0x8405 +#define GL_CURRENT_RASTER_NORMAL_SGIX 0x8406 +#define GL_LIGHT_ENV_MODE_SGIX 0x8407 +#define GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX 0x8408 +#define GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX 0x8409 +#define GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX 0x840A +#define GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX 0x840B +#define GL_FRAGMENT_LIGHT0_SGIX 0x840C +#define GL_FRAGMENT_LIGHT1_SGIX 0x840D +#define GL_FRAGMENT_LIGHT2_SGIX 0x840E +#define GL_FRAGMENT_LIGHT3_SGIX 0x840F +#define GL_FRAGMENT_LIGHT4_SGIX 0x8410 +#define GL_FRAGMENT_LIGHT5_SGIX 0x8411 +#define GL_FRAGMENT_LIGHT6_SGIX 0x8412 +#define GL_FRAGMENT_LIGHT7_SGIX 0x8413 +#endif + +#ifndef GL_IBM_rasterpos_clip +#define GL_RASTER_POSITION_UNCLIPPED_IBM 0x19262 +#endif + +#ifndef GL_HP_texture_lighting +#define GL_TEXTURE_LIGHTING_MODE_HP 0x8167 +#define GL_TEXTURE_POST_SPECULAR_HP 0x8168 +#define GL_TEXTURE_PRE_SPECULAR_HP 0x8169 +#endif + +#ifndef GL_EXT_draw_range_elements +#define GL_MAX_ELEMENTS_VERTICES_EXT 0x80E8 +#define GL_MAX_ELEMENTS_INDICES_EXT 0x80E9 +#endif + +#ifndef GL_WIN_phong_shading +#define GL_PHONG_WIN 0x80EA +#define GL_PHONG_HINT_WIN 0x80EB +#endif + +#ifndef GL_WIN_specular_fog +#define GL_FOG_SPECULAR_TEXTURE_WIN 0x80EC +#endif + +#ifndef GL_EXT_light_texture +#define GL_FRAGMENT_MATERIAL_EXT 0x8349 +#define GL_FRAGMENT_NORMAL_EXT 0x834A +#define GL_FRAGMENT_COLOR_EXT 0x834C +#define GL_ATTENUATION_EXT 0x834D +#define GL_SHADOW_ATTENUATION_EXT 0x834E +#define GL_TEXTURE_APPLICATION_MODE_EXT 0x834F +#define GL_TEXTURE_LIGHT_EXT 0x8350 +#define GL_TEXTURE_MATERIAL_FACE_EXT 0x8351 +#define GL_TEXTURE_MATERIAL_PARAMETER_EXT 0x8352 +/* reuse GL_FRAGMENT_DEPTH_EXT */ +#endif + +#ifndef GL_SGIX_blend_alpha_minmax +#define GL_ALPHA_MIN_SGIX 0x8320 +#define GL_ALPHA_MAX_SGIX 0x8321 +#endif + +#ifndef GL_SGIX_impact_pixel_texture +#define GL_PIXEL_TEX_GEN_Q_CEILING_SGIX 0x8184 +#define GL_PIXEL_TEX_GEN_Q_ROUND_SGIX 0x8185 +#define GL_PIXEL_TEX_GEN_Q_FLOOR_SGIX 0x8186 +#define GL_PIXEL_TEX_GEN_ALPHA_REPLACE_SGIX 0x8187 +#define GL_PIXEL_TEX_GEN_ALPHA_NO_REPLACE_SGIX 0x8188 +#define GL_PIXEL_TEX_GEN_ALPHA_LS_SGIX 0x8189 +#define GL_PIXEL_TEX_GEN_ALPHA_MS_SGIX 0x818A +#endif + +#ifndef GL_EXT_bgra +#define GL_BGR_EXT 0x80E0 +#define GL_BGRA_EXT 0x80E1 +#endif + +#ifndef GL_SGIX_async +#define GL_ASYNC_MARKER_SGIX 0x8329 +#endif + +#ifndef GL_SGIX_async_pixel +#define GL_ASYNC_TEX_IMAGE_SGIX 0x835C +#define GL_ASYNC_DRAW_PIXELS_SGIX 0x835D +#define GL_ASYNC_READ_PIXELS_SGIX 0x835E +#define GL_MAX_ASYNC_TEX_IMAGE_SGIX 0x835F +#define GL_MAX_ASYNC_DRAW_PIXELS_SGIX 0x8360 +#define GL_MAX_ASYNC_READ_PIXELS_SGIX 0x8361 +#endif + +#ifndef GL_SGIX_async_histogram +#define GL_ASYNC_HISTOGRAM_SGIX 0x832C +#define GL_MAX_ASYNC_HISTOGRAM_SGIX 0x832D +#endif + +#ifndef GL_INTEL_texture_scissor +#endif + +#ifndef GL_INTEL_parallel_arrays +#define GL_PARALLEL_ARRAYS_INTEL 0x83F4 +#define GL_VERTEX_ARRAY_PARALLEL_POINTERS_INTEL 0x83F5 +#define GL_NORMAL_ARRAY_PARALLEL_POINTERS_INTEL 0x83F6 +#define GL_COLOR_ARRAY_PARALLEL_POINTERS_INTEL 0x83F7 +#define GL_TEXTURE_COORD_ARRAY_PARALLEL_POINTERS_INTEL 0x83F8 +#endif + +#ifndef GL_HP_occlusion_test +#define GL_OCCLUSION_TEST_HP 0x8165 +#define GL_OCCLUSION_TEST_RESULT_HP 0x8166 +#endif + +#ifndef GL_EXT_pixel_transform +#define GL_PIXEL_TRANSFORM_2D_EXT 0x8330 +#define GL_PIXEL_MAG_FILTER_EXT 0x8331 +#define GL_PIXEL_MIN_FILTER_EXT 0x8332 +#define GL_PIXEL_CUBIC_WEIGHT_EXT 0x8333 +#define GL_CUBIC_EXT 0x8334 +#define GL_AVERAGE_EXT 0x8335 +#define GL_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT 0x8336 +#define GL_MAX_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT 0x8337 +#define GL_PIXEL_TRANSFORM_2D_MATRIX_EXT 0x8338 +#endif + +#ifndef GL_EXT_pixel_transform_color_table +#endif + +#ifndef GL_EXT_shared_texture_palette +#define GL_SHARED_TEXTURE_PALETTE_EXT 0x81FB +#endif + +#ifndef GL_EXT_separate_specular_color +#define GL_LIGHT_MODEL_COLOR_CONTROL_EXT 0x81F8 +#define GL_SINGLE_COLOR_EXT 0x81F9 +#define GL_SEPARATE_SPECULAR_COLOR_EXT 0x81FA +#endif + +#ifndef GL_EXT_secondary_color +#define GL_COLOR_SUM_EXT 0x8458 +#define GL_CURRENT_SECONDARY_COLOR_EXT 0x8459 +#define GL_SECONDARY_COLOR_ARRAY_SIZE_EXT 0x845A +#define GL_SECONDARY_COLOR_ARRAY_TYPE_EXT 0x845B +#define GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT 0x845C +#define GL_SECONDARY_COLOR_ARRAY_POINTER_EXT 0x845D +#define GL_SECONDARY_COLOR_ARRAY_EXT 0x845E +#endif + +#ifndef GL_EXT_texture_perturb_normal +#define GL_PERTURB_EXT 0x85AE +#define GL_TEXTURE_NORMAL_EXT 0x85AF +#endif + +#ifndef GL_EXT_multi_draw_arrays +#endif + +#ifndef GL_EXT_fog_coord +#define GL_FOG_COORDINATE_SOURCE_EXT 0x8450 +#define GL_FOG_COORDINATE_EXT 0x8451 +#define GL_FRAGMENT_DEPTH_EXT 0x8452 +#define GL_CURRENT_FOG_COORDINATE_EXT 0x8453 +#define GL_FOG_COORDINATE_ARRAY_TYPE_EXT 0x8454 +#define GL_FOG_COORDINATE_ARRAY_STRIDE_EXT 0x8455 +#define GL_FOG_COORDINATE_ARRAY_POINTER_EXT 0x8456 +#define GL_FOG_COORDINATE_ARRAY_EXT 0x8457 +#endif + +#ifndef GL_REND_screen_coordinates +#define GL_SCREEN_COORDINATES_REND 0x8490 +#define GL_INVERTED_SCREEN_W_REND 0x8491 +#endif + +#ifndef GL_EXT_coordinate_frame +#define GL_TANGENT_ARRAY_EXT 0x8439 +#define GL_BINORMAL_ARRAY_EXT 0x843A +#define GL_CURRENT_TANGENT_EXT 0x843B +#define GL_CURRENT_BINORMAL_EXT 0x843C +#define GL_TANGENT_ARRAY_TYPE_EXT 0x843E +#define GL_TANGENT_ARRAY_STRIDE_EXT 0x843F +#define GL_BINORMAL_ARRAY_TYPE_EXT 0x8440 +#define GL_BINORMAL_ARRAY_STRIDE_EXT 0x8441 +#define GL_TANGENT_ARRAY_POINTER_EXT 0x8442 +#define GL_BINORMAL_ARRAY_POINTER_EXT 0x8443 +#define GL_MAP1_TANGENT_EXT 0x8444 +#define GL_MAP2_TANGENT_EXT 0x8445 +#define GL_MAP1_BINORMAL_EXT 0x8446 +#define GL_MAP2_BINORMAL_EXT 0x8447 +#endif + +#ifndef GL_EXT_texture_env_combine +#define GL_COMBINE_EXT 0x8570 +#define GL_COMBINE_RGB_EXT 0x8571 +#define GL_COMBINE_ALPHA_EXT 0x8572 +#define GL_RGB_SCALE_EXT 0x8573 +#define GL_ADD_SIGNED_EXT 0x8574 +#define GL_INTERPOLATE_EXT 0x8575 +#define GL_CONSTANT_EXT 0x8576 +#define GL_PRIMARY_COLOR_EXT 0x8577 +#define GL_PREVIOUS_EXT 0x8578 +#define GL_SOURCE0_RGB_EXT 0x8580 +#define GL_SOURCE1_RGB_EXT 0x8581 +#define GL_SOURCE2_RGB_EXT 0x8582 +#define GL_SOURCE0_ALPHA_EXT 0x8588 +#define GL_SOURCE1_ALPHA_EXT 0x8589 +#define GL_SOURCE2_ALPHA_EXT 0x858A +#define GL_OPERAND0_RGB_EXT 0x8590 +#define GL_OPERAND1_RGB_EXT 0x8591 +#define GL_OPERAND2_RGB_EXT 0x8592 +#define GL_OPERAND0_ALPHA_EXT 0x8598 +#define GL_OPERAND1_ALPHA_EXT 0x8599 +#define GL_OPERAND2_ALPHA_EXT 0x859A +#endif + +#ifndef GL_APPLE_specular_vector +#define GL_LIGHT_MODEL_SPECULAR_VECTOR_APPLE 0x85B0 +#endif + +#ifndef GL_APPLE_transform_hint +#define GL_TRANSFORM_HINT_APPLE 0x85B1 +#endif + +#ifndef GL_SGIX_fog_scale +#define GL_FOG_SCALE_SGIX 0x81FC +#define GL_FOG_SCALE_VALUE_SGIX 0x81FD +#endif + +#ifndef GL_SUNX_constant_data +#define GL_UNPACK_CONSTANT_DATA_SUNX 0x81D5 +#define GL_TEXTURE_CONSTANT_DATA_SUNX 0x81D6 +#endif + +#ifndef GL_SUN_global_alpha +#define GL_GLOBAL_ALPHA_SUN 0x81D9 +#define GL_GLOBAL_ALPHA_FACTOR_SUN 0x81DA +#endif + +#ifndef GL_SUN_triangle_list +#define GL_RESTART_SUN 0x0001 +#define GL_REPLACE_MIDDLE_SUN 0x0002 +#define GL_REPLACE_OLDEST_SUN 0x0003 +#define GL_TRIANGLE_LIST_SUN 0x81D7 +#define GL_REPLACEMENT_CODE_SUN 0x81D8 +#define GL_REPLACEMENT_CODE_ARRAY_SUN 0x85C0 +#define GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN 0x85C1 +#define GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN 0x85C2 +#define GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN 0x85C3 +#define GL_R1UI_V3F_SUN 0x85C4 +#define GL_R1UI_C4UB_V3F_SUN 0x85C5 +#define GL_R1UI_C3F_V3F_SUN 0x85C6 +#define GL_R1UI_N3F_V3F_SUN 0x85C7 +#define GL_R1UI_C4F_N3F_V3F_SUN 0x85C8 +#define GL_R1UI_T2F_V3F_SUN 0x85C9 +#define GL_R1UI_T2F_N3F_V3F_SUN 0x85CA +#define GL_R1UI_T2F_C4F_N3F_V3F_SUN 0x85CB +#endif + +#ifndef GL_SUN_vertex +#endif + +#ifndef GL_EXT_blend_func_separate +#define GL_BLEND_DST_RGB_EXT 0x80C8 +#define GL_BLEND_SRC_RGB_EXT 0x80C9 +#define GL_BLEND_DST_ALPHA_EXT 0x80CA +#define GL_BLEND_SRC_ALPHA_EXT 0x80CB +#endif + +#ifndef GL_INGR_color_clamp +#define GL_RED_MIN_CLAMP_INGR 0x8560 +#define GL_GREEN_MIN_CLAMP_INGR 0x8561 +#define GL_BLUE_MIN_CLAMP_INGR 0x8562 +#define GL_ALPHA_MIN_CLAMP_INGR 0x8563 +#define GL_RED_MAX_CLAMP_INGR 0x8564 +#define GL_GREEN_MAX_CLAMP_INGR 0x8565 +#define GL_BLUE_MAX_CLAMP_INGR 0x8566 +#define GL_ALPHA_MAX_CLAMP_INGR 0x8567 +#endif + +#ifndef GL_INGR_interlace_read +#define GL_INTERLACE_READ_INGR 0x8568 +#endif + +#ifndef GL_EXT_stencil_wrap +#define GL_INCR_WRAP_EXT 0x8507 +#define GL_DECR_WRAP_EXT 0x8508 +#endif + +#ifndef GL_EXT_422_pixels +#define GL_422_EXT 0x80CC +#define GL_422_REV_EXT 0x80CD +#define GL_422_AVERAGE_EXT 0x80CE +#define GL_422_REV_AVERAGE_EXT 0x80CF +#endif + +#ifndef GL_NV_texgen_reflection +#define GL_NORMAL_MAP_NV 0x8511 +#define GL_REFLECTION_MAP_NV 0x8512 +#endif + +#ifndef GL_EXT_texture_cube_map +#define GL_NORMAL_MAP_EXT 0x8511 +#define GL_REFLECTION_MAP_EXT 0x8512 +#define GL_TEXTURE_CUBE_MAP_EXT 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP_EXT 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X_EXT 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X_EXT 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y_EXT 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z_EXT 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT 0x851A +#define GL_PROXY_TEXTURE_CUBE_MAP_EXT 0x851B +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE_EXT 0x851C +#endif + +#ifndef GL_SUN_convolution_border_modes +#define GL_WRAP_BORDER_SUN 0x81D4 +#endif + +#ifndef GL_EXT_texture_env_add +#endif + +#ifndef GL_EXT_texture_lod_bias +#define GL_MAX_TEXTURE_LOD_BIAS_EXT 0x84FD +#define GL_TEXTURE_FILTER_CONTROL_EXT 0x8500 +#define GL_TEXTURE_LOD_BIAS_EXT 0x8501 +#endif + +#ifndef GL_EXT_texture_filter_anisotropic +#define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE +#define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF +#endif + +#ifndef GL_EXT_vertex_weighting +#define GL_MODELVIEW0_STACK_DEPTH_EXT GL_MODELVIEW_STACK_DEPTH +#define GL_MODELVIEW1_STACK_DEPTH_EXT 0x8502 +#define GL_MODELVIEW0_MATRIX_EXT GL_MODELVIEW_MATRIX +#define GL_MODELVIEW1_MATRIX_EXT 0x8506 +#define GL_VERTEX_WEIGHTING_EXT 0x8509 +#define GL_MODELVIEW0_EXT GL_MODELVIEW +#define GL_MODELVIEW1_EXT 0x850A +#define GL_CURRENT_VERTEX_WEIGHT_EXT 0x850B +#define GL_VERTEX_WEIGHT_ARRAY_EXT 0x850C +#define GL_VERTEX_WEIGHT_ARRAY_SIZE_EXT 0x850D +#define GL_VERTEX_WEIGHT_ARRAY_TYPE_EXT 0x850E +#define GL_VERTEX_WEIGHT_ARRAY_STRIDE_EXT 0x850F +#define GL_VERTEX_WEIGHT_ARRAY_POINTER_EXT 0x8510 +#endif + +#ifndef GL_NV_light_max_exponent +#define GL_MAX_SHININESS_NV 0x8504 +#define GL_MAX_SPOT_EXPONENT_NV 0x8505 +#endif + +#ifndef GL_NV_vertex_array_range +#define GL_VERTEX_ARRAY_RANGE_NV 0x851D +#define GL_VERTEX_ARRAY_RANGE_LENGTH_NV 0x851E +#define GL_VERTEX_ARRAY_RANGE_VALID_NV 0x851F +#define GL_MAX_VERTEX_ARRAY_RANGE_ELEMENT_NV 0x8520 +#define GL_VERTEX_ARRAY_RANGE_POINTER_NV 0x8521 +#endif + +#ifndef GL_NV_register_combiners +#define GL_REGISTER_COMBINERS_NV 0x8522 +#define GL_VARIABLE_A_NV 0x8523 +#define GL_VARIABLE_B_NV 0x8524 +#define GL_VARIABLE_C_NV 0x8525 +#define GL_VARIABLE_D_NV 0x8526 +#define GL_VARIABLE_E_NV 0x8527 +#define GL_VARIABLE_F_NV 0x8528 +#define GL_VARIABLE_G_NV 0x8529 +#define GL_CONSTANT_COLOR0_NV 0x852A +#define GL_CONSTANT_COLOR1_NV 0x852B +#define GL_PRIMARY_COLOR_NV 0x852C +#define GL_SECONDARY_COLOR_NV 0x852D +#define GL_SPARE0_NV 0x852E +#define GL_SPARE1_NV 0x852F +#define GL_DISCARD_NV 0x8530 +#define GL_E_TIMES_F_NV 0x8531 +#define GL_SPARE0_PLUS_SECONDARY_COLOR_NV 0x8532 +#define GL_UNSIGNED_IDENTITY_NV 0x8536 +#define GL_UNSIGNED_INVERT_NV 0x8537 +#define GL_EXPAND_NORMAL_NV 0x8538 +#define GL_EXPAND_NEGATE_NV 0x8539 +#define GL_HALF_BIAS_NORMAL_NV 0x853A +#define GL_HALF_BIAS_NEGATE_NV 0x853B +#define GL_SIGNED_IDENTITY_NV 0x853C +#define GL_SIGNED_NEGATE_NV 0x853D +#define GL_SCALE_BY_TWO_NV 0x853E +#define GL_SCALE_BY_FOUR_NV 0x853F +#define GL_SCALE_BY_ONE_HALF_NV 0x8540 +#define GL_BIAS_BY_NEGATIVE_ONE_HALF_NV 0x8541 +#define GL_COMBINER_INPUT_NV 0x8542 +#define GL_COMBINER_MAPPING_NV 0x8543 +#define GL_COMBINER_COMPONENT_USAGE_NV 0x8544 +#define GL_COMBINER_AB_DOT_PRODUCT_NV 0x8545 +#define GL_COMBINER_CD_DOT_PRODUCT_NV 0x8546 +#define GL_COMBINER_MUX_SUM_NV 0x8547 +#define GL_COMBINER_SCALE_NV 0x8548 +#define GL_COMBINER_BIAS_NV 0x8549 +#define GL_COMBINER_AB_OUTPUT_NV 0x854A +#define GL_COMBINER_CD_OUTPUT_NV 0x854B +#define GL_COMBINER_SUM_OUTPUT_NV 0x854C +#define GL_MAX_GENERAL_COMBINERS_NV 0x854D +#define GL_NUM_GENERAL_COMBINERS_NV 0x854E +#define GL_COLOR_SUM_CLAMP_NV 0x854F +#define GL_COMBINER0_NV 0x8550 +#define GL_COMBINER1_NV 0x8551 +#define GL_COMBINER2_NV 0x8552 +#define GL_COMBINER3_NV 0x8553 +#define GL_COMBINER4_NV 0x8554 +#define GL_COMBINER5_NV 0x8555 +#define GL_COMBINER6_NV 0x8556 +#define GL_COMBINER7_NV 0x8557 +/* reuse GL_TEXTURE0_ARB */ +/* reuse GL_TEXTURE1_ARB */ +/* reuse GL_ZERO */ +/* reuse GL_NONE */ +/* reuse GL_FOG */ +#endif + +#ifndef GL_NV_fog_distance +#define GL_FOG_DISTANCE_MODE_NV 0x855A +#define GL_EYE_RADIAL_NV 0x855B +#define GL_EYE_PLANE_ABSOLUTE_NV 0x855C +/* reuse GL_EYE_PLANE */ +#endif + +#ifndef GL_NV_texgen_emboss +#define GL_EMBOSS_LIGHT_NV 0x855D +#define GL_EMBOSS_CONSTANT_NV 0x855E +#define GL_EMBOSS_MAP_NV 0x855F +#endif + +#ifndef GL_NV_blend_square +#endif + +#ifndef GL_NV_texture_env_combine4 +#define GL_COMBINE4_NV 0x8503 +#define GL_SOURCE3_RGB_NV 0x8583 +#define GL_SOURCE3_ALPHA_NV 0x858B +#define GL_OPERAND3_RGB_NV 0x8593 +#define GL_OPERAND3_ALPHA_NV 0x859B +#endif + +#ifndef GL_MESA_resize_buffers +#endif + +#ifndef GL_MESA_window_pos +#endif + +#ifndef GL_EXT_texture_compression_s3tc +#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0 +#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1 +#define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2 +#define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3 +#endif + +#ifndef GL_IBM_cull_vertex +#define GL_CULL_VERTEX_IBM 103050 +#endif + +#ifndef GL_IBM_multimode_draw_arrays +#endif + +#ifndef GL_IBM_vertex_array_lists +#define GL_VERTEX_ARRAY_LIST_IBM 103070 +#define GL_NORMAL_ARRAY_LIST_IBM 103071 +#define GL_COLOR_ARRAY_LIST_IBM 103072 +#define GL_INDEX_ARRAY_LIST_IBM 103073 +#define GL_TEXTURE_COORD_ARRAY_LIST_IBM 103074 +#define GL_EDGE_FLAG_ARRAY_LIST_IBM 103075 +#define GL_FOG_COORDINATE_ARRAY_LIST_IBM 103076 +#define GL_SECONDARY_COLOR_ARRAY_LIST_IBM 103077 +#define GL_VERTEX_ARRAY_LIST_STRIDE_IBM 103080 +#define GL_NORMAL_ARRAY_LIST_STRIDE_IBM 103081 +#define GL_COLOR_ARRAY_LIST_STRIDE_IBM 103082 +#define GL_INDEX_ARRAY_LIST_STRIDE_IBM 103083 +#define GL_TEXTURE_COORD_ARRAY_LIST_STRIDE_IBM 103084 +#define GL_EDGE_FLAG_ARRAY_LIST_STRIDE_IBM 103085 +#define GL_FOG_COORDINATE_ARRAY_LIST_STRIDE_IBM 103086 +#define GL_SECONDARY_COLOR_ARRAY_LIST_STRIDE_IBM 103087 +#endif + +#ifndef GL_SGIX_subsample +#define GL_PACK_SUBSAMPLE_RATE_SGIX 0x85A0 +#define GL_UNPACK_SUBSAMPLE_RATE_SGIX 0x85A1 +#define GL_PIXEL_SUBSAMPLE_4444_SGIX 0x85A2 +#define GL_PIXEL_SUBSAMPLE_2424_SGIX 0x85A3 +#define GL_PIXEL_SUBSAMPLE_4242_SGIX 0x85A4 +#endif + +#ifndef GL_SGIX_ycrcb_subsample +#endif + +#ifndef GL_SGIX_ycrcba +#define GL_YCRCB_SGIX 0x8318 +#define GL_YCRCBA_SGIX 0x8319 +#endif + +#ifndef GL_SGI_depth_pass_instrument +#define GL_DEPTH_PASS_INSTRUMENT_SGIX 0x8310 +#define GL_DEPTH_PASS_INSTRUMENT_COUNTERS_SGIX 0x8311 +#define GL_DEPTH_PASS_INSTRUMENT_MAX_SGIX 0x8312 +#endif + +#ifndef GL_3DFX_texture_compression_FXT1 +#define GL_COMPRESSED_RGB_FXT1_3DFX 0x86B0 +#define GL_COMPRESSED_RGBA_FXT1_3DFX 0x86B1 +#endif + +#ifndef GL_3DFX_multisample +#define GL_MULTISAMPLE_3DFX 0x86B2 +#define GL_SAMPLE_BUFFERS_3DFX 0x86B3 +#define GL_SAMPLES_3DFX 0x86B4 +#define GL_MULTISAMPLE_BIT_3DFX 0x20000000 +#endif + +#ifndef GL_3DFX_tbuffer +#endif + +#ifndef GL_EXT_multisample +#define GL_MULTISAMPLE_EXT 0x809D +#define GL_SAMPLE_ALPHA_TO_MASK_EXT 0x809E +#define GL_SAMPLE_ALPHA_TO_ONE_EXT 0x809F +#define GL_SAMPLE_MASK_EXT 0x80A0 +#define GL_1PASS_EXT 0x80A1 +#define GL_2PASS_0_EXT 0x80A2 +#define GL_2PASS_1_EXT 0x80A3 +#define GL_4PASS_0_EXT 0x80A4 +#define GL_4PASS_1_EXT 0x80A5 +#define GL_4PASS_2_EXT 0x80A6 +#define GL_4PASS_3_EXT 0x80A7 +#define GL_SAMPLE_BUFFERS_EXT 0x80A8 +#define GL_SAMPLES_EXT 0x80A9 +#define GL_SAMPLE_MASK_VALUE_EXT 0x80AA +#define GL_SAMPLE_MASK_INVERT_EXT 0x80AB +#define GL_SAMPLE_PATTERN_EXT 0x80AC +#define GL_MULTISAMPLE_BIT_EXT 0x20000000 +#endif + +#ifndef GL_SGIX_vertex_preclip +#define GL_VERTEX_PRECLIP_SGIX 0x83EE +#define GL_VERTEX_PRECLIP_HINT_SGIX 0x83EF +#endif + +#ifndef GL_SGIX_convolution_accuracy +#define GL_CONVOLUTION_HINT_SGIX 0x8316 +#endif + +#ifndef GL_SGIX_resample +#define GL_PACK_RESAMPLE_SGIX 0x842C +#define GL_UNPACK_RESAMPLE_SGIX 0x842D +#define GL_RESAMPLE_REPLICATE_SGIX 0x842E +#define GL_RESAMPLE_ZERO_FILL_SGIX 0x842F +#define GL_RESAMPLE_DECIMATE_SGIX 0x8430 +#endif + +#ifndef GL_SGIS_point_line_texgen +#define GL_EYE_DISTANCE_TO_POINT_SGIS 0x81F0 +#define GL_OBJECT_DISTANCE_TO_POINT_SGIS 0x81F1 +#define GL_EYE_DISTANCE_TO_LINE_SGIS 0x81F2 +#define GL_OBJECT_DISTANCE_TO_LINE_SGIS 0x81F3 +#define GL_EYE_POINT_SGIS 0x81F4 +#define GL_OBJECT_POINT_SGIS 0x81F5 +#define GL_EYE_LINE_SGIS 0x81F6 +#define GL_OBJECT_LINE_SGIS 0x81F7 +#endif + +#ifndef GL_SGIS_texture_color_mask +#define GL_TEXTURE_COLOR_WRITEMASK_SGIS 0x81EF +#endif + +#ifndef GL_EXT_texture_env_dot3 +#define GL_DOT3_RGB_EXT 0x8740 +#define GL_DOT3_RGBA_EXT 0x8741 +#endif + +#ifndef GL_ATI_texture_mirror_once +#define GL_MIRROR_CLAMP_ATI 0x8742 +#define GL_MIRROR_CLAMP_TO_EDGE_ATI 0x8743 +#endif + +#ifndef GL_NV_fence +#define GL_ALL_COMPLETED_NV 0x84F2 +#define GL_FENCE_STATUS_NV 0x84F3 +#define GL_FENCE_CONDITION_NV 0x84F4 +#endif + +#ifndef GL_IBM_texture_mirrored_repeat +#define GL_MIRRORED_REPEAT_IBM 0x8370 +#endif + +#ifndef GL_NV_evaluators +#define GL_EVAL_2D_NV 0x86C0 +#define GL_EVAL_TRIANGULAR_2D_NV 0x86C1 +#define GL_MAP_TESSELLATION_NV 0x86C2 +#define GL_MAP_ATTRIB_U_ORDER_NV 0x86C3 +#define GL_MAP_ATTRIB_V_ORDER_NV 0x86C4 +#define GL_EVAL_FRACTIONAL_TESSELLATION_NV 0x86C5 +#define GL_EVAL_VERTEX_ATTRIB0_NV 0x86C6 +#define GL_EVAL_VERTEX_ATTRIB1_NV 0x86C7 +#define GL_EVAL_VERTEX_ATTRIB2_NV 0x86C8 +#define GL_EVAL_VERTEX_ATTRIB3_NV 0x86C9 +#define GL_EVAL_VERTEX_ATTRIB4_NV 0x86CA +#define GL_EVAL_VERTEX_ATTRIB5_NV 0x86CB +#define GL_EVAL_VERTEX_ATTRIB6_NV 0x86CC +#define GL_EVAL_VERTEX_ATTRIB7_NV 0x86CD +#define GL_EVAL_VERTEX_ATTRIB8_NV 0x86CE +#define GL_EVAL_VERTEX_ATTRIB9_NV 0x86CF +#define GL_EVAL_VERTEX_ATTRIB10_NV 0x86D0 +#define GL_EVAL_VERTEX_ATTRIB11_NV 0x86D1 +#define GL_EVAL_VERTEX_ATTRIB12_NV 0x86D2 +#define GL_EVAL_VERTEX_ATTRIB13_NV 0x86D3 +#define GL_EVAL_VERTEX_ATTRIB14_NV 0x86D4 +#define GL_EVAL_VERTEX_ATTRIB15_NV 0x86D5 +#define GL_MAX_MAP_TESSELLATION_NV 0x86D6 +#define GL_MAX_RATIONAL_EVAL_ORDER_NV 0x86D7 +#endif + +#ifndef GL_NV_packed_depth_stencil +#define GL_DEPTH_STENCIL_NV 0x84F9 +#define GL_UNSIGNED_INT_24_8_NV 0x84FA +#endif + +#ifndef GL_NV_register_combiners2 +#define GL_PER_STAGE_CONSTANTS_NV 0x8535 +#endif + +#ifndef GL_NV_texture_compression_vtc +#endif + +#ifndef GL_NV_texture_rectangle +#define GL_TEXTURE_RECTANGLE_NV 0x84F5 +#define GL_TEXTURE_BINDING_RECTANGLE_NV 0x84F6 +#define GL_PROXY_TEXTURE_RECTANGLE_NV 0x84F7 +#define GL_MAX_RECTANGLE_TEXTURE_SIZE_NV 0x84F8 +#endif + +#ifndef GL_NV_texture_shader +#define GL_OFFSET_TEXTURE_RECTANGLE_NV 0x864C +#define GL_OFFSET_TEXTURE_RECTANGLE_SCALE_NV 0x864D +#define GL_DOT_PRODUCT_TEXTURE_RECTANGLE_NV 0x864E +#define GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV 0x86D9 +#define GL_UNSIGNED_INT_S8_S8_8_8_NV 0x86DA +#define GL_UNSIGNED_INT_8_8_S8_S8_REV_NV 0x86DB +#define GL_DSDT_MAG_INTENSITY_NV 0x86DC +#define GL_SHADER_CONSISTENT_NV 0x86DD +#define GL_TEXTURE_SHADER_NV 0x86DE +#define GL_SHADER_OPERATION_NV 0x86DF +#define GL_CULL_MODES_NV 0x86E0 +#define GL_OFFSET_TEXTURE_MATRIX_NV 0x86E1 +#define GL_OFFSET_TEXTURE_SCALE_NV 0x86E2 +#define GL_OFFSET_TEXTURE_BIAS_NV 0x86E3 +#define GL_OFFSET_TEXTURE_2D_MATRIX_NV GL_OFFSET_TEXTURE_MATRIX_NV +#define GL_OFFSET_TEXTURE_2D_SCALE_NV GL_OFFSET_TEXTURE_SCALE_NV +#define GL_OFFSET_TEXTURE_2D_BIAS_NV GL_OFFSET_TEXTURE_BIAS_NV +#define GL_PREVIOUS_TEXTURE_INPUT_NV 0x86E4 +#define GL_CONST_EYE_NV 0x86E5 +#define GL_PASS_THROUGH_NV 0x86E6 +#define GL_CULL_FRAGMENT_NV 0x86E7 +#define GL_OFFSET_TEXTURE_2D_NV 0x86E8 +#define GL_DEPENDENT_AR_TEXTURE_2D_NV 0x86E9 +#define GL_DEPENDENT_GB_TEXTURE_2D_NV 0x86EA +#define GL_DOT_PRODUCT_NV 0x86EC +#define GL_DOT_PRODUCT_DEPTH_REPLACE_NV 0x86ED +#define GL_DOT_PRODUCT_TEXTURE_2D_NV 0x86EE +#define GL_DOT_PRODUCT_TEXTURE_CUBE_MAP_NV 0x86F0 +#define GL_DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV 0x86F1 +#define GL_DOT_PRODUCT_REFLECT_CUBE_MAP_NV 0x86F2 +#define GL_DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV 0x86F3 +#define GL_HILO_NV 0x86F4 +#define GL_DSDT_NV 0x86F5 +#define GL_DSDT_MAG_NV 0x86F6 +#define GL_DSDT_MAG_VIB_NV 0x86F7 +#define GL_HILO16_NV 0x86F8 +#define GL_SIGNED_HILO_NV 0x86F9 +#define GL_SIGNED_HILO16_NV 0x86FA +#define GL_SIGNED_RGBA_NV 0x86FB +#define GL_SIGNED_RGBA8_NV 0x86FC +#define GL_SIGNED_RGB_NV 0x86FE +#define GL_SIGNED_RGB8_NV 0x86FF +#define GL_SIGNED_LUMINANCE_NV 0x8701 +#define GL_SIGNED_LUMINANCE8_NV 0x8702 +#define GL_SIGNED_LUMINANCE_ALPHA_NV 0x8703 +#define GL_SIGNED_LUMINANCE8_ALPHA8_NV 0x8704 +#define GL_SIGNED_ALPHA_NV 0x8705 +#define GL_SIGNED_ALPHA8_NV 0x8706 +#define GL_SIGNED_INTENSITY_NV 0x8707 +#define GL_SIGNED_INTENSITY8_NV 0x8708 +#define GL_DSDT8_NV 0x8709 +#define GL_DSDT8_MAG8_NV 0x870A +#define GL_DSDT8_MAG8_INTENSITY8_NV 0x870B +#define GL_SIGNED_RGB_UNSIGNED_ALPHA_NV 0x870C +#define GL_SIGNED_RGB8_UNSIGNED_ALPHA8_NV 0x870D +#define GL_HI_SCALE_NV 0x870E +#define GL_LO_SCALE_NV 0x870F +#define GL_DS_SCALE_NV 0x8710 +#define GL_DT_SCALE_NV 0x8711 +#define GL_MAGNITUDE_SCALE_NV 0x8712 +#define GL_VIBRANCE_SCALE_NV 0x8713 +#define GL_HI_BIAS_NV 0x8714 +#define GL_LO_BIAS_NV 0x8715 +#define GL_DS_BIAS_NV 0x8716 +#define GL_DT_BIAS_NV 0x8717 +#define GL_MAGNITUDE_BIAS_NV 0x8718 +#define GL_VIBRANCE_BIAS_NV 0x8719 +#define GL_TEXTURE_BORDER_VALUES_NV 0x871A +#define GL_TEXTURE_HI_SIZE_NV 0x871B +#define GL_TEXTURE_LO_SIZE_NV 0x871C +#define GL_TEXTURE_DS_SIZE_NV 0x871D +#define GL_TEXTURE_DT_SIZE_NV 0x871E +#define GL_TEXTURE_MAG_SIZE_NV 0x871F +#endif + +#ifndef GL_NV_texture_shader2 +#define GL_DOT_PRODUCT_TEXTURE_3D_NV 0x86EF +#endif + +#ifndef GL_NV_vertex_array_range2 +#define GL_VERTEX_ARRAY_RANGE_WITHOUT_FLUSH_NV 0x8533 +#endif + +#ifndef GL_NV_vertex_program +#define GL_VERTEX_PROGRAM_NV 0x8620 +#define GL_VERTEX_STATE_PROGRAM_NV 0x8621 +#define GL_ATTRIB_ARRAY_SIZE_NV 0x8623 +#define GL_ATTRIB_ARRAY_STRIDE_NV 0x8624 +#define GL_ATTRIB_ARRAY_TYPE_NV 0x8625 +#define GL_CURRENT_ATTRIB_NV 0x8626 +#define GL_PROGRAM_LENGTH_NV 0x8627 +#define GL_PROGRAM_STRING_NV 0x8628 +#define GL_MODELVIEW_PROJECTION_NV 0x8629 +#define GL_IDENTITY_NV 0x862A +#define GL_INVERSE_NV 0x862B +#define GL_TRANSPOSE_NV 0x862C +#define GL_INVERSE_TRANSPOSE_NV 0x862D +#define GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV 0x862E +#define GL_MAX_TRACK_MATRICES_NV 0x862F +#define GL_MATRIX0_NV 0x8630 +#define GL_MATRIX1_NV 0x8631 +#define GL_MATRIX2_NV 0x8632 +#define GL_MATRIX3_NV 0x8633 +#define GL_MATRIX4_NV 0x8634 +#define GL_MATRIX5_NV 0x8635 +#define GL_MATRIX6_NV 0x8636 +#define GL_MATRIX7_NV 0x8637 +#define GL_CURRENT_MATRIX_STACK_DEPTH_NV 0x8640 +#define GL_CURRENT_MATRIX_NV 0x8641 +#define GL_VERTEX_PROGRAM_POINT_SIZE_NV 0x8642 +#define GL_VERTEX_PROGRAM_TWO_SIDE_NV 0x8643 +#define GL_PROGRAM_PARAMETER_NV 0x8644 +#define GL_ATTRIB_ARRAY_POINTER_NV 0x8645 +#define GL_PROGRAM_TARGET_NV 0x8646 +#define GL_PROGRAM_RESIDENT_NV 0x8647 +#define GL_TRACK_MATRIX_NV 0x8648 +#define GL_TRACK_MATRIX_TRANSFORM_NV 0x8649 +#define GL_VERTEX_PROGRAM_BINDING_NV 0x864A +#define GL_PROGRAM_ERROR_POSITION_NV 0x864B +#define GL_VERTEX_ATTRIB_ARRAY0_NV 0x8650 +#define GL_VERTEX_ATTRIB_ARRAY1_NV 0x8651 +#define GL_VERTEX_ATTRIB_ARRAY2_NV 0x8652 +#define GL_VERTEX_ATTRIB_ARRAY3_NV 0x8653 +#define GL_VERTEX_ATTRIB_ARRAY4_NV 0x8654 +#define GL_VERTEX_ATTRIB_ARRAY5_NV 0x8655 +#define GL_VERTEX_ATTRIB_ARRAY6_NV 0x8656 +#define GL_VERTEX_ATTRIB_ARRAY7_NV 0x8657 +#define GL_VERTEX_ATTRIB_ARRAY8_NV 0x8658 +#define GL_VERTEX_ATTRIB_ARRAY9_NV 0x8659 +#define GL_VERTEX_ATTRIB_ARRAY10_NV 0x865A +#define GL_VERTEX_ATTRIB_ARRAY11_NV 0x865B +#define GL_VERTEX_ATTRIB_ARRAY12_NV 0x865C +#define GL_VERTEX_ATTRIB_ARRAY13_NV 0x865D +#define GL_VERTEX_ATTRIB_ARRAY14_NV 0x865E +#define GL_VERTEX_ATTRIB_ARRAY15_NV 0x865F +#define GL_MAP1_VERTEX_ATTRIB0_4_NV 0x8660 +#define GL_MAP1_VERTEX_ATTRIB1_4_NV 0x8661 +#define GL_MAP1_VERTEX_ATTRIB2_4_NV 0x8662 +#define GL_MAP1_VERTEX_ATTRIB3_4_NV 0x8663 +#define GL_MAP1_VERTEX_ATTRIB4_4_NV 0x8664 +#define GL_MAP1_VERTEX_ATTRIB5_4_NV 0x8665 +#define GL_MAP1_VERTEX_ATTRIB6_4_NV 0x8666 +#define GL_MAP1_VERTEX_ATTRIB7_4_NV 0x8667 +#define GL_MAP1_VERTEX_ATTRIB8_4_NV 0x8668 +#define GL_MAP1_VERTEX_ATTRIB9_4_NV 0x8669 +#define GL_MAP1_VERTEX_ATTRIB10_4_NV 0x866A +#define GL_MAP1_VERTEX_ATTRIB11_4_NV 0x866B +#define GL_MAP1_VERTEX_ATTRIB12_4_NV 0x866C +#define GL_MAP1_VERTEX_ATTRIB13_4_NV 0x866D +#define GL_MAP1_VERTEX_ATTRIB14_4_NV 0x866E +#define GL_MAP1_VERTEX_ATTRIB15_4_NV 0x866F +#define GL_MAP2_VERTEX_ATTRIB0_4_NV 0x8670 +#define GL_MAP2_VERTEX_ATTRIB1_4_NV 0x8671 +#define GL_MAP2_VERTEX_ATTRIB2_4_NV 0x8672 +#define GL_MAP2_VERTEX_ATTRIB3_4_NV 0x8673 +#define GL_MAP2_VERTEX_ATTRIB4_4_NV 0x8674 +#define GL_MAP2_VERTEX_ATTRIB5_4_NV 0x8675 +#define GL_MAP2_VERTEX_ATTRIB6_4_NV 0x8676 +#define GL_MAP2_VERTEX_ATTRIB7_4_NV 0x8677 +#define GL_MAP2_VERTEX_ATTRIB8_4_NV 0x8678 +#define GL_MAP2_VERTEX_ATTRIB9_4_NV 0x8679 +#define GL_MAP2_VERTEX_ATTRIB10_4_NV 0x867A +#define GL_MAP2_VERTEX_ATTRIB11_4_NV 0x867B +#define GL_MAP2_VERTEX_ATTRIB12_4_NV 0x867C +#define GL_MAP2_VERTEX_ATTRIB13_4_NV 0x867D +#define GL_MAP2_VERTEX_ATTRIB14_4_NV 0x867E +#define GL_MAP2_VERTEX_ATTRIB15_4_NV 0x867F +#endif + +#ifndef GL_SGIX_texture_coordinate_clamp +#define GL_TEXTURE_MAX_CLAMP_S_SGIX 0x8369 +#define GL_TEXTURE_MAX_CLAMP_T_SGIX 0x836A +#define GL_TEXTURE_MAX_CLAMP_R_SGIX 0x836B +#endif + +#ifndef GL_SGIX_scalebias_hint +#define GL_SCALEBIAS_HINT_SGIX 0x8322 +#endif + +#ifndef GL_OML_interlace +#define GL_INTERLACE_OML 0x8980 +#define GL_INTERLACE_READ_OML 0x8981 +#endif + +#ifndef GL_OML_subsample +#define GL_FORMAT_SUBSAMPLE_24_24_OML 0x8982 +#define GL_FORMAT_SUBSAMPLE_244_244_OML 0x8983 +#endif + +#ifndef GL_OML_resample +#define GL_PACK_RESAMPLE_OML 0x8984 +#define GL_UNPACK_RESAMPLE_OML 0x8985 +#define GL_RESAMPLE_REPLICATE_OML 0x8986 +#define GL_RESAMPLE_ZERO_FILL_OML 0x8987 +#define GL_RESAMPLE_AVERAGE_OML 0x8988 +#define GL_RESAMPLE_DECIMATE_OML 0x8989 +#endif + +#ifndef GL_NV_copy_depth_to_color +#define GL_DEPTH_STENCIL_TO_RGBA_NV 0x886E +#define GL_DEPTH_STENCIL_TO_BGRA_NV 0x886F +#endif + +#ifndef GL_ATI_envmap_bumpmap +#define GL_BUMP_ROT_MATRIX_ATI 0x8775 +#define GL_BUMP_ROT_MATRIX_SIZE_ATI 0x8776 +#define GL_BUMP_NUM_TEX_UNITS_ATI 0x8777 +#define GL_BUMP_TEX_UNITS_ATI 0x8778 +#define GL_DUDV_ATI 0x8779 +#define GL_DU8DV8_ATI 0x877A +#define GL_BUMP_ENVMAP_ATI 0x877B +#define GL_BUMP_TARGET_ATI 0x877C +#endif + +#ifndef GL_ATI_fragment_shader +#define GL_FRAGMENT_SHADER_ATI 0x8920 +#define GL_REG_0_ATI 0x8921 +#define GL_REG_1_ATI 0x8922 +#define GL_REG_2_ATI 0x8923 +#define GL_REG_3_ATI 0x8924 +#define GL_REG_4_ATI 0x8925 +#define GL_REG_5_ATI 0x8926 +#define GL_REG_6_ATI 0x8927 +#define GL_REG_7_ATI 0x8928 +#define GL_REG_8_ATI 0x8929 +#define GL_REG_9_ATI 0x892A +#define GL_REG_10_ATI 0x892B +#define GL_REG_11_ATI 0x892C +#define GL_REG_12_ATI 0x892D +#define GL_REG_13_ATI 0x892E +#define GL_REG_14_ATI 0x892F +#define GL_REG_15_ATI 0x8930 +#define GL_REG_16_ATI 0x8931 +#define GL_REG_17_ATI 0x8932 +#define GL_REG_18_ATI 0x8933 +#define GL_REG_19_ATI 0x8934 +#define GL_REG_20_ATI 0x8935 +#define GL_REG_21_ATI 0x8936 +#define GL_REG_22_ATI 0x8937 +#define GL_REG_23_ATI 0x8938 +#define GL_REG_24_ATI 0x8939 +#define GL_REG_25_ATI 0x893A +#define GL_REG_26_ATI 0x893B +#define GL_REG_27_ATI 0x893C +#define GL_REG_28_ATI 0x893D +#define GL_REG_29_ATI 0x893E +#define GL_REG_30_ATI 0x893F +#define GL_REG_31_ATI 0x8940 +#define GL_CON_0_ATI 0x8941 +#define GL_CON_1_ATI 0x8942 +#define GL_CON_2_ATI 0x8943 +#define GL_CON_3_ATI 0x8944 +#define GL_CON_4_ATI 0x8945 +#define GL_CON_5_ATI 0x8946 +#define GL_CON_6_ATI 0x8947 +#define GL_CON_7_ATI 0x8948 +#define GL_CON_8_ATI 0x8949 +#define GL_CON_9_ATI 0x894A +#define GL_CON_10_ATI 0x894B +#define GL_CON_11_ATI 0x894C +#define GL_CON_12_ATI 0x894D +#define GL_CON_13_ATI 0x894E +#define GL_CON_14_ATI 0x894F +#define GL_CON_15_ATI 0x8950 +#define GL_CON_16_ATI 0x8951 +#define GL_CON_17_ATI 0x8952 +#define GL_CON_18_ATI 0x8953 +#define GL_CON_19_ATI 0x8954 +#define GL_CON_20_ATI 0x8955 +#define GL_CON_21_ATI 0x8956 +#define GL_CON_22_ATI 0x8957 +#define GL_CON_23_ATI 0x8958 +#define GL_CON_24_ATI 0x8959 +#define GL_CON_25_ATI 0x895A +#define GL_CON_26_ATI 0x895B +#define GL_CON_27_ATI 0x895C +#define GL_CON_28_ATI 0x895D +#define GL_CON_29_ATI 0x895E +#define GL_CON_30_ATI 0x895F +#define GL_CON_31_ATI 0x8960 +#define GL_MOV_ATI 0x8961 +#define GL_ADD_ATI 0x8963 +#define GL_MUL_ATI 0x8964 +#define GL_SUB_ATI 0x8965 +#define GL_DOT3_ATI 0x8966 +#define GL_DOT4_ATI 0x8967 +#define GL_MAD_ATI 0x8968 +#define GL_LERP_ATI 0x8969 +#define GL_CND_ATI 0x896A +#define GL_CND0_ATI 0x896B +#define GL_DOT2_ADD_ATI 0x896C +#define GL_SECONDARY_INTERPOLATOR_ATI 0x896D +#define GL_NUM_FRAGMENT_REGISTERS_ATI 0x896E +#define GL_NUM_FRAGMENT_CONSTANTS_ATI 0x896F +#define GL_NUM_PASSES_ATI 0x8970 +#define GL_NUM_INSTRUCTIONS_PER_PASS_ATI 0x8971 +#define GL_NUM_INSTRUCTIONS_TOTAL_ATI 0x8972 +#define GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI 0x8973 +#define GL_NUM_LOOPBACK_COMPONENTS_ATI 0x8974 +#define GL_COLOR_ALPHA_PAIRING_ATI 0x8975 +#define GL_SWIZZLE_STR_ATI 0x8976 +#define GL_SWIZZLE_STQ_ATI 0x8977 +#define GL_SWIZZLE_STR_DR_ATI 0x8978 +#define GL_SWIZZLE_STQ_DQ_ATI 0x8979 +#define GL_SWIZZLE_STRQ_ATI 0x897A +#define GL_SWIZZLE_STRQ_DQ_ATI 0x897B +#define GL_RED_BIT_ATI 0x00000001 +#define GL_GREEN_BIT_ATI 0x00000002 +#define GL_BLUE_BIT_ATI 0x00000004 +#define GL_2X_BIT_ATI 0x00000001 +#define GL_4X_BIT_ATI 0x00000002 +#define GL_8X_BIT_ATI 0x00000004 +#define GL_HALF_BIT_ATI 0x00000008 +#define GL_QUARTER_BIT_ATI 0x00000010 +#define GL_EIGHTH_BIT_ATI 0x00000020 +#define GL_SATURATE_BIT_ATI 0x00000040 +#define GL_COMP_BIT_ATI 0x00000002 +#define GL_NEGATE_BIT_ATI 0x00000004 +#define GL_BIAS_BIT_ATI 0x00000008 +#endif + +#ifndef GL_ATI_pn_triangles +#define GL_PN_TRIANGLES_ATI 0x87F0 +#define GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI 0x87F1 +#define GL_PN_TRIANGLES_POINT_MODE_ATI 0x87F2 +#define GL_PN_TRIANGLES_NORMAL_MODE_ATI 0x87F3 +#define GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI 0x87F4 +#define GL_PN_TRIANGLES_POINT_MODE_LINEAR_ATI 0x87F5 +#define GL_PN_TRIANGLES_POINT_MODE_CUBIC_ATI 0x87F6 +#define GL_PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI 0x87F7 +#define GL_PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI 0x87F8 +#endif + +#ifndef GL_ATI_vertex_array_object +#define GL_STATIC_ATI 0x8760 +#define GL_DYNAMIC_ATI 0x8761 +#define GL_PRESERVE_ATI 0x8762 +#define GL_DISCARD_ATI 0x8763 +#define GL_OBJECT_BUFFER_SIZE_ATI 0x8764 +#define GL_OBJECT_BUFFER_USAGE_ATI 0x8765 +#define GL_ARRAY_OBJECT_BUFFER_ATI 0x8766 +#define GL_ARRAY_OBJECT_OFFSET_ATI 0x8767 +#endif + +#ifndef GL_EXT_vertex_shader +#define GL_VERTEX_SHADER_EXT 0x8780 +#define GL_VERTEX_SHADER_BINDING_EXT 0x8781 +#define GL_OP_INDEX_EXT 0x8782 +#define GL_OP_NEGATE_EXT 0x8783 +#define GL_OP_DOT3_EXT 0x8784 +#define GL_OP_DOT4_EXT 0x8785 +#define GL_OP_MUL_EXT 0x8786 +#define GL_OP_ADD_EXT 0x8787 +#define GL_OP_MADD_EXT 0x8788 +#define GL_OP_FRAC_EXT 0x8789 +#define GL_OP_MAX_EXT 0x878A +#define GL_OP_MIN_EXT 0x878B +#define GL_OP_SET_GE_EXT 0x878C +#define GL_OP_SET_LT_EXT 0x878D +#define GL_OP_CLAMP_EXT 0x878E +#define GL_OP_FLOOR_EXT 0x878F +#define GL_OP_ROUND_EXT 0x8790 +#define GL_OP_EXP_BASE_2_EXT 0x8791 +#define GL_OP_LOG_BASE_2_EXT 0x8792 +#define GL_OP_POWER_EXT 0x8793 +#define GL_OP_RECIP_EXT 0x8794 +#define GL_OP_RECIP_SQRT_EXT 0x8795 +#define GL_OP_SUB_EXT 0x8796 +#define GL_OP_CROSS_PRODUCT_EXT 0x8797 +#define GL_OP_MULTIPLY_MATRIX_EXT 0x8798 +#define GL_OP_MOV_EXT 0x8799 +#define GL_OUTPUT_VERTEX_EXT 0x879A +#define GL_OUTPUT_COLOR0_EXT 0x879B +#define GL_OUTPUT_COLOR1_EXT 0x879C +#define GL_OUTPUT_TEXTURE_COORD0_EXT 0x879D +#define GL_OUTPUT_TEXTURE_COORD1_EXT 0x879E +#define GL_OUTPUT_TEXTURE_COORD2_EXT 0x879F +#define GL_OUTPUT_TEXTURE_COORD3_EXT 0x87A0 +#define GL_OUTPUT_TEXTURE_COORD4_EXT 0x87A1 +#define GL_OUTPUT_TEXTURE_COORD5_EXT 0x87A2 +#define GL_OUTPUT_TEXTURE_COORD6_EXT 0x87A3 +#define GL_OUTPUT_TEXTURE_COORD7_EXT 0x87A4 +#define GL_OUTPUT_TEXTURE_COORD8_EXT 0x87A5 +#define GL_OUTPUT_TEXTURE_COORD9_EXT 0x87A6 +#define GL_OUTPUT_TEXTURE_COORD10_EXT 0x87A7 +#define GL_OUTPUT_TEXTURE_COORD11_EXT 0x87A8 +#define GL_OUTPUT_TEXTURE_COORD12_EXT 0x87A9 +#define GL_OUTPUT_TEXTURE_COORD13_EXT 0x87AA +#define GL_OUTPUT_TEXTURE_COORD14_EXT 0x87AB +#define GL_OUTPUT_TEXTURE_COORD15_EXT 0x87AC +#define GL_OUTPUT_TEXTURE_COORD16_EXT 0x87AD +#define GL_OUTPUT_TEXTURE_COORD17_EXT 0x87AE +#define GL_OUTPUT_TEXTURE_COORD18_EXT 0x87AF +#define GL_OUTPUT_TEXTURE_COORD19_EXT 0x87B0 +#define GL_OUTPUT_TEXTURE_COORD20_EXT 0x87B1 +#define GL_OUTPUT_TEXTURE_COORD21_EXT 0x87B2 +#define GL_OUTPUT_TEXTURE_COORD22_EXT 0x87B3 +#define GL_OUTPUT_TEXTURE_COORD23_EXT 0x87B4 +#define GL_OUTPUT_TEXTURE_COORD24_EXT 0x87B5 +#define GL_OUTPUT_TEXTURE_COORD25_EXT 0x87B6 +#define GL_OUTPUT_TEXTURE_COORD26_EXT 0x87B7 +#define GL_OUTPUT_TEXTURE_COORD27_EXT 0x87B8 +#define GL_OUTPUT_TEXTURE_COORD28_EXT 0x87B9 +#define GL_OUTPUT_TEXTURE_COORD29_EXT 0x87BA +#define GL_OUTPUT_TEXTURE_COORD30_EXT 0x87BB +#define GL_OUTPUT_TEXTURE_COORD31_EXT 0x87BC +#define GL_OUTPUT_FOG_EXT 0x87BD +#define GL_SCALAR_EXT 0x87BE +#define GL_VECTOR_EXT 0x87BF +#define GL_MATRIX_EXT 0x87C0 +#define GL_VARIANT_EXT 0x87C1 +#define GL_INVARIANT_EXT 0x87C2 +#define GL_LOCAL_CONSTANT_EXT 0x87C3 +#define GL_LOCAL_EXT 0x87C4 +#define GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT 0x87C5 +#define GL_MAX_VERTEX_SHADER_VARIANTS_EXT 0x87C6 +#define GL_MAX_VERTEX_SHADER_INVARIANTS_EXT 0x87C7 +#define GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT 0x87C8 +#define GL_MAX_VERTEX_SHADER_LOCALS_EXT 0x87C9 +#define GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT 0x87CA +#define GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT 0x87CB +#define GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT 0x87CC +#define GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT 0x87CD +#define GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT 0x87CE +#define GL_VERTEX_SHADER_INSTRUCTIONS_EXT 0x87CF +#define GL_VERTEX_SHADER_VARIANTS_EXT 0x87D0 +#define GL_VERTEX_SHADER_INVARIANTS_EXT 0x87D1 +#define GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT 0x87D2 +#define GL_VERTEX_SHADER_LOCALS_EXT 0x87D3 +#define GL_VERTEX_SHADER_OPTIMIZED_EXT 0x87D4 +#define GL_X_EXT 0x87D5 +#define GL_Y_EXT 0x87D6 +#define GL_Z_EXT 0x87D7 +#define GL_W_EXT 0x87D8 +#define GL_NEGATIVE_X_EXT 0x87D9 +#define GL_NEGATIVE_Y_EXT 0x87DA +#define GL_NEGATIVE_Z_EXT 0x87DB +#define GL_NEGATIVE_W_EXT 0x87DC +#define GL_ZERO_EXT 0x87DD +#define GL_ONE_EXT 0x87DE +#define GL_NEGATIVE_ONE_EXT 0x87DF +#define GL_NORMALIZED_RANGE_EXT 0x87E0 +#define GL_FULL_RANGE_EXT 0x87E1 +#define GL_CURRENT_VERTEX_EXT 0x87E2 +#define GL_MVP_MATRIX_EXT 0x87E3 +#define GL_VARIANT_VALUE_EXT 0x87E4 +#define GL_VARIANT_DATATYPE_EXT 0x87E5 +#define GL_VARIANT_ARRAY_STRIDE_EXT 0x87E6 +#define GL_VARIANT_ARRAY_TYPE_EXT 0x87E7 +#define GL_VARIANT_ARRAY_EXT 0x87E8 +#define GL_VARIANT_ARRAY_POINTER_EXT 0x87E9 +#define GL_INVARIANT_VALUE_EXT 0x87EA +#define GL_INVARIANT_DATATYPE_EXT 0x87EB +#define GL_LOCAL_CONSTANT_VALUE_EXT 0x87EC +#define GL_LOCAL_CONSTANT_DATATYPE_EXT 0x87ED +#endif + +#ifndef GL_ATI_vertex_streams +#define GL_MAX_VERTEX_STREAMS_ATI 0x876B +#define GL_VERTEX_STREAM0_ATI 0x876C +#define GL_VERTEX_STREAM1_ATI 0x876D +#define GL_VERTEX_STREAM2_ATI 0x876E +#define GL_VERTEX_STREAM3_ATI 0x876F +#define GL_VERTEX_STREAM4_ATI 0x8770 +#define GL_VERTEX_STREAM5_ATI 0x8771 +#define GL_VERTEX_STREAM6_ATI 0x8772 +#define GL_VERTEX_STREAM7_ATI 0x8773 +#define GL_VERTEX_SOURCE_ATI 0x8774 +#endif + +#ifndef GL_ATI_element_array +#define GL_ELEMENT_ARRAY_ATI 0x8768 +#define GL_ELEMENT_ARRAY_TYPE_ATI 0x8769 +#define GL_ELEMENT_ARRAY_POINTER_ATI 0x876A +#endif + +#ifndef GL_SUN_mesh_array +#define GL_QUAD_MESH_SUN 0x8614 +#define GL_TRIANGLE_MESH_SUN 0x8615 +#endif + +#ifndef GL_SUN_slice_accum +#define GL_SLICE_ACCUM_SUN 0x85CC +#endif + +#ifndef GL_NV_multisample_filter_hint +#define GL_MULTISAMPLE_FILTER_HINT_NV 0x8534 +#endif + +#ifndef GL_NV_depth_clamp +#define GL_DEPTH_CLAMP_NV 0x864F +#endif + +#ifndef GL_NV_occlusion_query +#define GL_PIXEL_COUNTER_BITS_NV 0x8864 +#define GL_CURRENT_OCCLUSION_QUERY_ID_NV 0x8865 +#define GL_PIXEL_COUNT_NV 0x8866 +#define GL_PIXEL_COUNT_AVAILABLE_NV 0x8867 +#endif + +#ifndef GL_NV_point_sprite +#define GL_POINT_SPRITE_NV 0x8861 +#define GL_COORD_REPLACE_NV 0x8862 +#define GL_POINT_SPRITE_R_MODE_NV 0x8863 +#endif + +#ifndef GL_NV_texture_shader3 +#define GL_OFFSET_PROJECTIVE_TEXTURE_2D_NV 0x8850 +#define GL_OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV 0x8851 +#define GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV 0x8852 +#define GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV 0x8853 +#define GL_OFFSET_HILO_TEXTURE_2D_NV 0x8854 +#define GL_OFFSET_HILO_TEXTURE_RECTANGLE_NV 0x8855 +#define GL_OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV 0x8856 +#define GL_OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV 0x8857 +#define GL_DEPENDENT_HILO_TEXTURE_2D_NV 0x8858 +#define GL_DEPENDENT_RGB_TEXTURE_3D_NV 0x8859 +#define GL_DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV 0x885A +#define GL_DOT_PRODUCT_PASS_THROUGH_NV 0x885B +#define GL_DOT_PRODUCT_TEXTURE_1D_NV 0x885C +#define GL_DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV 0x885D +#define GL_HILO8_NV 0x885E +#define GL_SIGNED_HILO8_NV 0x885F +#define GL_FORCE_BLUE_TO_ONE_NV 0x8860 +#endif + +#ifndef GL_NV_vertex_program1_1 +#endif + +#ifndef GL_EXT_shadow_funcs +#endif + +#ifndef GL_EXT_stencil_two_side +#define GL_STENCIL_TEST_TWO_SIDE_EXT 0x8910 +#define GL_ACTIVE_STENCIL_FACE_EXT 0x8911 +#endif + +#ifndef GL_ATI_text_fragment_shader +#define GL_TEXT_FRAGMENT_SHADER_ATI 0x8200 +#endif + +#ifndef GL_APPLE_client_storage +#define GL_UNPACK_CLIENT_STORAGE_APPLE 0x85B2 +#endif + +#ifndef GL_APPLE_element_array +#define GL_ELEMENT_ARRAY_APPLE 0x8A0C +#define GL_ELEMENT_ARRAY_TYPE_APPLE 0x8A0D +#define GL_ELEMENT_ARRAY_POINTER_APPLE 0x8A0E +#endif + +#ifndef GL_APPLE_fence +#define GL_DRAW_PIXELS_APPLE 0x8A0A +#define GL_FENCE_APPLE 0x8A0B +#endif + +#ifndef GL_APPLE_vertex_array_object +#define GL_VERTEX_ARRAY_BINDING_APPLE 0x85B5 +#endif + +#ifndef GL_APPLE_vertex_array_range +#define GL_VERTEX_ARRAY_RANGE_APPLE 0x851D +#define GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE 0x851E +#define GL_VERTEX_ARRAY_STORAGE_HINT_APPLE 0x851F +#define GL_VERTEX_ARRAY_RANGE_POINTER_APPLE 0x8521 +#define GL_STORAGE_CLIENT_APPLE 0x85B4 +#define GL_STORAGE_CACHED_APPLE 0x85BE +#define GL_STORAGE_SHARED_APPLE 0x85BF +#endif + +#ifndef GL_APPLE_ycbcr_422 +#define GL_YCBCR_422_APPLE 0x85B9 +#define GL_UNSIGNED_SHORT_8_8_APPLE 0x85BA +#define GL_UNSIGNED_SHORT_8_8_REV_APPLE 0x85BB +#endif + +#ifndef GL_S3_s3tc +#define GL_RGB_S3TC 0x83A0 +#define GL_RGB4_S3TC 0x83A1 +#define GL_RGBA_S3TC 0x83A2 +#define GL_RGBA4_S3TC 0x83A3 +#define GL_RGBA_DXT5_S3TC 0x83A4 +#define GL_RGBA4_DXT5_S3TC 0x83A5 +#endif + +#ifndef GL_ATI_draw_buffers +#define GL_MAX_DRAW_BUFFERS_ATI 0x8824 +#define GL_DRAW_BUFFER0_ATI 0x8825 +#define GL_DRAW_BUFFER1_ATI 0x8826 +#define GL_DRAW_BUFFER2_ATI 0x8827 +#define GL_DRAW_BUFFER3_ATI 0x8828 +#define GL_DRAW_BUFFER4_ATI 0x8829 +#define GL_DRAW_BUFFER5_ATI 0x882A +#define GL_DRAW_BUFFER6_ATI 0x882B +#define GL_DRAW_BUFFER7_ATI 0x882C +#define GL_DRAW_BUFFER8_ATI 0x882D +#define GL_DRAW_BUFFER9_ATI 0x882E +#define GL_DRAW_BUFFER10_ATI 0x882F +#define GL_DRAW_BUFFER11_ATI 0x8830 +#define GL_DRAW_BUFFER12_ATI 0x8831 +#define GL_DRAW_BUFFER13_ATI 0x8832 +#define GL_DRAW_BUFFER14_ATI 0x8833 +#define GL_DRAW_BUFFER15_ATI 0x8834 +#endif + +#ifndef GL_ATI_pixel_format_float +#define GL_RGBA_FLOAT_MODE_ATI 0x8820 +#define GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI 0x8835 +#endif + +#ifndef GL_ATI_texture_env_combine3 +#define GL_MODULATE_ADD_ATI 0x8744 +#define GL_MODULATE_SIGNED_ADD_ATI 0x8745 +#define GL_MODULATE_SUBTRACT_ATI 0x8746 +#endif + +#ifndef GL_ATI_texture_float +#define GL_RGBA_FLOAT32_ATI 0x8814 +#define GL_RGB_FLOAT32_ATI 0x8815 +#define GL_ALPHA_FLOAT32_ATI 0x8816 +#define GL_INTENSITY_FLOAT32_ATI 0x8817 +#define GL_LUMINANCE_FLOAT32_ATI 0x8818 +#define GL_LUMINANCE_ALPHA_FLOAT32_ATI 0x8819 +#define GL_RGBA_FLOAT16_ATI 0x881A +#define GL_RGB_FLOAT16_ATI 0x881B +#define GL_ALPHA_FLOAT16_ATI 0x881C +#define GL_INTENSITY_FLOAT16_ATI 0x881D +#define GL_LUMINANCE_FLOAT16_ATI 0x881E +#define GL_LUMINANCE_ALPHA_FLOAT16_ATI 0x881F +#endif + +#ifndef GL_NV_float_buffer +#define GL_FLOAT_R_NV 0x8880 +#define GL_FLOAT_RG_NV 0x8881 +#define GL_FLOAT_RGB_NV 0x8882 +#define GL_FLOAT_RGBA_NV 0x8883 +#define GL_FLOAT_R16_NV 0x8884 +#define GL_FLOAT_R32_NV 0x8885 +#define GL_FLOAT_RG16_NV 0x8886 +#define GL_FLOAT_RG32_NV 0x8887 +#define GL_FLOAT_RGB16_NV 0x8888 +#define GL_FLOAT_RGB32_NV 0x8889 +#define GL_FLOAT_RGBA16_NV 0x888A +#define GL_FLOAT_RGBA32_NV 0x888B +#define GL_TEXTURE_FLOAT_COMPONENTS_NV 0x888C +#define GL_FLOAT_CLEAR_COLOR_VALUE_NV 0x888D +#define GL_FLOAT_RGBA_MODE_NV 0x888E +#endif + +#ifndef GL_NV_fragment_program +#define GL_MAX_FRAGMENT_PROGRAM_LOCAL_PARAMETERS_NV 0x8868 +#define GL_FRAGMENT_PROGRAM_NV 0x8870 +#define GL_MAX_TEXTURE_COORDS_NV 0x8871 +#define GL_MAX_TEXTURE_IMAGE_UNITS_NV 0x8872 +#define GL_FRAGMENT_PROGRAM_BINDING_NV 0x8873 +#define GL_PROGRAM_ERROR_STRING_NV 0x8874 +#endif + +#ifndef GL_NV_half_float +#define GL_HALF_FLOAT_NV 0x140B +#endif + +#ifndef GL_NV_pixel_data_range +#define GL_WRITE_PIXEL_DATA_RANGE_NV 0x8878 +#define GL_READ_PIXEL_DATA_RANGE_NV 0x8879 +#define GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV 0x887A +#define GL_READ_PIXEL_DATA_RANGE_LENGTH_NV 0x887B +#define GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV 0x887C +#define GL_READ_PIXEL_DATA_RANGE_POINTER_NV 0x887D +#endif + +#ifndef GL_NV_primitive_restart +#define GL_PRIMITIVE_RESTART_NV 0x8558 +#define GL_PRIMITIVE_RESTART_INDEX_NV 0x8559 +#endif + +#ifndef GL_NV_texture_expand_normal +#define GL_TEXTURE_UNSIGNED_REMAP_MODE_NV 0x888F +#endif + +#ifndef GL_NV_vertex_program2 +#endif + +#ifndef GL_ATI_map_object_buffer +#endif + +#ifndef GL_ATI_separate_stencil +#define GL_STENCIL_BACK_FUNC_ATI 0x8800 +#define GL_STENCIL_BACK_FAIL_ATI 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI 0x8803 +#endif + +#ifndef GL_ATI_vertex_attrib_array_object +#endif + +#ifndef GL_OES_read_format +#define GL_IMPLEMENTATION_COLOR_READ_TYPE_OES 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES 0x8B9B +#endif + +#ifndef GL_EXT_depth_bounds_test +#define GL_DEPTH_BOUNDS_TEST_EXT 0x8890 +#define GL_DEPTH_BOUNDS_EXT 0x8891 +#endif + +#ifndef GL_EXT_texture_mirror_clamp +#define GL_MIRROR_CLAMP_EXT 0x8742 +#define GL_MIRROR_CLAMP_TO_EDGE_EXT 0x8743 +#define GL_MIRROR_CLAMP_TO_BORDER_EXT 0x8912 +#endif + +#ifndef GL_EXT_blend_equation_separate +#define GL_BLEND_EQUATION_RGB_EXT 0x8009 +#define GL_BLEND_EQUATION_ALPHA_EXT 0x883D +#endif + +#ifndef GL_MESA_pack_invert +#define GL_PACK_INVERT_MESA 0x8758 +#endif + +#ifndef GL_MESA_ycbcr_texture +#define GL_UNSIGNED_SHORT_8_8_MESA 0x85BA +#define GL_UNSIGNED_SHORT_8_8_REV_MESA 0x85BB +#define GL_YCBCR_MESA 0x8757 +#endif + +#ifndef GL_EXT_pixel_buffer_object +#define GL_PIXEL_PACK_BUFFER_EXT 0x88EB +#define GL_PIXEL_UNPACK_BUFFER_EXT 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING_EXT 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING_EXT 0x88EF +#endif + +#ifndef GL_NV_fragment_program_option +#endif + +#ifndef GL_NV_fragment_program2 +#define GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV 0x88F4 +#define GL_MAX_PROGRAM_CALL_DEPTH_NV 0x88F5 +#define GL_MAX_PROGRAM_IF_DEPTH_NV 0x88F6 +#define GL_MAX_PROGRAM_LOOP_DEPTH_NV 0x88F7 +#define GL_MAX_PROGRAM_LOOP_COUNT_NV 0x88F8 +#endif + +#ifndef GL_NV_vertex_program2_option +/* reuse GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV */ +/* reuse GL_MAX_PROGRAM_CALL_DEPTH_NV */ +#endif + +#ifndef GL_NV_vertex_program3 +/* reuse GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB */ +#endif + +#ifndef GL_EXT_framebuffer_object +#define GL_INVALID_FRAMEBUFFER_OPERATION_EXT 0x0506 +#define GL_MAX_RENDERBUFFER_SIZE_EXT 0x84E8 +#define GL_FRAMEBUFFER_BINDING_EXT 0x8CA6 +#define GL_RENDERBUFFER_BINDING_EXT 0x8CA7 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT 0x8CD3 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT 0x8CD4 +#define GL_FRAMEBUFFER_COMPLETE_EXT 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT 0x8CD9 +#define GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT 0x8CDA +#define GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT 0x8CDB +#define GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT 0x8CDC +#define GL_FRAMEBUFFER_UNSUPPORTED_EXT 0x8CDD +#define GL_MAX_COLOR_ATTACHMENTS_EXT 0x8CDF +#define GL_COLOR_ATTACHMENT0_EXT 0x8CE0 +#define GL_COLOR_ATTACHMENT1_EXT 0x8CE1 +#define GL_COLOR_ATTACHMENT2_EXT 0x8CE2 +#define GL_COLOR_ATTACHMENT3_EXT 0x8CE3 +#define GL_COLOR_ATTACHMENT4_EXT 0x8CE4 +#define GL_COLOR_ATTACHMENT5_EXT 0x8CE5 +#define GL_COLOR_ATTACHMENT6_EXT 0x8CE6 +#define GL_COLOR_ATTACHMENT7_EXT 0x8CE7 +#define GL_COLOR_ATTACHMENT8_EXT 0x8CE8 +#define GL_COLOR_ATTACHMENT9_EXT 0x8CE9 +#define GL_COLOR_ATTACHMENT10_EXT 0x8CEA +#define GL_COLOR_ATTACHMENT11_EXT 0x8CEB +#define GL_COLOR_ATTACHMENT12_EXT 0x8CEC +#define GL_COLOR_ATTACHMENT13_EXT 0x8CED +#define GL_COLOR_ATTACHMENT14_EXT 0x8CEE +#define GL_COLOR_ATTACHMENT15_EXT 0x8CEF +#define GL_DEPTH_ATTACHMENT_EXT 0x8D00 +#define GL_STENCIL_ATTACHMENT_EXT 0x8D20 +#define GL_FRAMEBUFFER_EXT 0x8D40 +#define GL_RENDERBUFFER_EXT 0x8D41 +#define GL_RENDERBUFFER_WIDTH_EXT 0x8D42 +#define GL_RENDERBUFFER_HEIGHT_EXT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT_EXT 0x8D44 +#define GL_STENCIL_INDEX1_EXT 0x8D46 +#define GL_STENCIL_INDEX4_EXT 0x8D47 +#define GL_STENCIL_INDEX8_EXT 0x8D48 +#define GL_STENCIL_INDEX16_EXT 0x8D49 +#define GL_RENDERBUFFER_RED_SIZE_EXT 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE_EXT 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE_EXT 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE_EXT 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE_EXT 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE_EXT 0x8D55 +#endif + +#ifndef GL_GREMEDY_string_marker +#endif + +#ifndef GL_EXT_packed_depth_stencil +#define GL_DEPTH_STENCIL_EXT 0x84F9 +#define GL_UNSIGNED_INT_24_8_EXT 0x84FA +#define GL_DEPTH24_STENCIL8_EXT 0x88F0 +#define GL_TEXTURE_STENCIL_SIZE_EXT 0x88F1 +#endif + +#ifndef GL_EXT_stencil_clear_tag +#define GL_STENCIL_TAG_BITS_EXT 0x88F2 +#define GL_STENCIL_CLEAR_TAG_VALUE_EXT 0x88F3 +#endif + +#ifndef GL_EXT_texture_sRGB +#define GL_SRGB_EXT 0x8C40 +#define GL_SRGB8_EXT 0x8C41 +#define GL_SRGB_ALPHA_EXT 0x8C42 +#define GL_SRGB8_ALPHA8_EXT 0x8C43 +#define GL_SLUMINANCE_ALPHA_EXT 0x8C44 +#define GL_SLUMINANCE8_ALPHA8_EXT 0x8C45 +#define GL_SLUMINANCE_EXT 0x8C46 +#define GL_SLUMINANCE8_EXT 0x8C47 +#define GL_COMPRESSED_SRGB_EXT 0x8C48 +#define GL_COMPRESSED_SRGB_ALPHA_EXT 0x8C49 +#define GL_COMPRESSED_SLUMINANCE_EXT 0x8C4A +#define GL_COMPRESSED_SLUMINANCE_ALPHA_EXT 0x8C4B +#define GL_COMPRESSED_SRGB_S3TC_DXT1_EXT 0x8C4C +#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT 0x8C4D +#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT 0x8C4E +#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT 0x8C4F +#endif + +#ifndef GL_EXT_framebuffer_blit +#define GL_READ_FRAMEBUFFER_EXT 0x8CA8 +#define GL_DRAW_FRAMEBUFFER_EXT 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING_EXT GL_FRAMEBUFFER_BINDING_EXT +#define GL_READ_FRAMEBUFFER_BINDING_EXT 0x8CAA +#endif + +#ifndef GL_EXT_framebuffer_multisample +#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT 0x8D56 +#define GL_MAX_SAMPLES_EXT 0x8D57 +#endif + +#ifndef GL_MESAX_texture_stack +#define GL_TEXTURE_1D_STACK_MESAX 0x8759 +#define GL_TEXTURE_2D_STACK_MESAX 0x875A +#define GL_PROXY_TEXTURE_1D_STACK_MESAX 0x875B +#define GL_PROXY_TEXTURE_2D_STACK_MESAX 0x875C +#define GL_TEXTURE_1D_STACK_BINDING_MESAX 0x875D +#define GL_TEXTURE_2D_STACK_BINDING_MESAX 0x875E +#endif + +#ifndef GL_EXT_timer_query +#define GL_TIME_ELAPSED_EXT 0x88BF +#endif + +#ifndef GL_EXT_gpu_program_parameters +#endif + +#ifndef GL_APPLE_flush_buffer_range +#define GL_BUFFER_SERIALIZED_MODIFY_APPLE 0x8A12 +#define GL_BUFFER_FLUSHING_UNMAP_APPLE 0x8A13 +#endif + +#ifndef GL_NV_gpu_program4 +#define GL_MIN_PROGRAM_TEXEL_OFFSET_NV 0x8904 +#define GL_MAX_PROGRAM_TEXEL_OFFSET_NV 0x8905 +#define GL_PROGRAM_ATTRIB_COMPONENTS_NV 0x8906 +#define GL_PROGRAM_RESULT_COMPONENTS_NV 0x8907 +#define GL_MAX_PROGRAM_ATTRIB_COMPONENTS_NV 0x8908 +#define GL_MAX_PROGRAM_RESULT_COMPONENTS_NV 0x8909 +#define GL_MAX_PROGRAM_GENERIC_ATTRIBS_NV 0x8DA5 +#define GL_MAX_PROGRAM_GENERIC_RESULTS_NV 0x8DA6 +#endif + +#ifndef GL_NV_geometry_program4 +#define GL_LINES_ADJACENCY_EXT 0x000A +#define GL_LINE_STRIP_ADJACENCY_EXT 0x000B +#define GL_TRIANGLES_ADJACENCY_EXT 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY_EXT 0x000D +#define GL_GEOMETRY_PROGRAM_NV 0x8C26 +#define GL_MAX_PROGRAM_OUTPUT_VERTICES_NV 0x8C27 +#define GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV 0x8C28 +#define GL_GEOMETRY_VERTICES_OUT_EXT 0x8DDA +#define GL_GEOMETRY_INPUT_TYPE_EXT 0x8DDB +#define GL_GEOMETRY_OUTPUT_TYPE_EXT 0x8DDC +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT 0x8C29 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT 0x8DA7 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT 0x8DA8 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT 0x8DA9 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT 0x8CD4 +#define GL_PROGRAM_POINT_SIZE_EXT 0x8642 +#endif + +#ifndef GL_EXT_geometry_shader4 +#define GL_GEOMETRY_SHADER_EXT 0x8DD9 +/* reuse GL_GEOMETRY_VERTICES_OUT_EXT */ +/* reuse GL_GEOMETRY_INPUT_TYPE_EXT */ +/* reuse GL_GEOMETRY_OUTPUT_TYPE_EXT */ +/* reuse GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT */ +#define GL_MAX_GEOMETRY_VARYING_COMPONENTS_EXT 0x8DDD +#define GL_MAX_VERTEX_VARYING_COMPONENTS_EXT 0x8DDE +#define GL_MAX_VARYING_COMPONENTS_EXT 0x8B4B +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT 0x8DDF +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_EXT 0x8DE1 +/* reuse GL_LINES_ADJACENCY_EXT */ +/* reuse GL_LINE_STRIP_ADJACENCY_EXT */ +/* reuse GL_TRIANGLES_ADJACENCY_EXT */ +/* reuse GL_TRIANGLE_STRIP_ADJACENCY_EXT */ +/* reuse GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT */ +/* reuse GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT */ +/* reuse GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT */ +/* reuse GL_PROGRAM_POINT_SIZE_EXT */ +#endif + +#ifndef GL_NV_vertex_program4 +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER_NV 0x88FD +#endif + +#ifndef GL_EXT_gpu_shader4 +#define GL_SAMPLER_1D_ARRAY_EXT 0x8DC0 +#define GL_SAMPLER_2D_ARRAY_EXT 0x8DC1 +#define GL_SAMPLER_BUFFER_EXT 0x8DC2 +#define GL_SAMPLER_1D_ARRAY_SHADOW_EXT 0x8DC3 +#define GL_SAMPLER_2D_ARRAY_SHADOW_EXT 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW_EXT 0x8DC5 +#define GL_UNSIGNED_INT_VEC2_EXT 0x8DC6 +#define GL_UNSIGNED_INT_VEC3_EXT 0x8DC7 +#define GL_UNSIGNED_INT_VEC4_EXT 0x8DC8 +#define GL_INT_SAMPLER_1D_EXT 0x8DC9 +#define GL_INT_SAMPLER_2D_EXT 0x8DCA +#define GL_INT_SAMPLER_3D_EXT 0x8DCB +#define GL_INT_SAMPLER_CUBE_EXT 0x8DCC +#define GL_INT_SAMPLER_2D_RECT_EXT 0x8DCD +#define GL_INT_SAMPLER_1D_ARRAY_EXT 0x8DCE +#define GL_INT_SAMPLER_2D_ARRAY_EXT 0x8DCF +#define GL_INT_SAMPLER_BUFFER_EXT 0x8DD0 +#define GL_UNSIGNED_INT_SAMPLER_1D_EXT 0x8DD1 +#define GL_UNSIGNED_INT_SAMPLER_2D_EXT 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D_EXT 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE_EXT 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_2D_RECT_EXT 0x8DD5 +#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY_EXT 0x8DD6 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY_EXT 0x8DD7 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER_EXT 0x8DD8 +#endif + +#ifndef GL_EXT_draw_instanced +#endif + +#ifndef GL_EXT_packed_float +#define GL_R11F_G11F_B10F_EXT 0x8C3A +#define GL_UNSIGNED_INT_10F_11F_11F_REV_EXT 0x8C3B +#define GL_RGBA_SIGNED_COMPONENTS_EXT 0x8C3C +#endif + +#ifndef GL_EXT_texture_array +#define GL_TEXTURE_1D_ARRAY_EXT 0x8C18 +#define GL_PROXY_TEXTURE_1D_ARRAY_EXT 0x8C19 +#define GL_TEXTURE_2D_ARRAY_EXT 0x8C1A +#define GL_PROXY_TEXTURE_2D_ARRAY_EXT 0x8C1B +#define GL_TEXTURE_BINDING_1D_ARRAY_EXT 0x8C1C +#define GL_TEXTURE_BINDING_2D_ARRAY_EXT 0x8C1D +#define GL_MAX_ARRAY_TEXTURE_LAYERS_EXT 0x88FF +#define GL_COMPARE_REF_DEPTH_TO_TEXTURE_EXT 0x884E +/* reuse GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT */ +#endif + +#ifndef GL_EXT_texture_buffer_object +#define GL_TEXTURE_BUFFER_EXT 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE_EXT 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER_EXT 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT 0x8C2D +#define GL_TEXTURE_BUFFER_FORMAT_EXT 0x8C2E +#endif + +#ifndef GL_EXT_texture_compression_latc +#define GL_COMPRESSED_LUMINANCE_LATC1_EXT 0x8C70 +#define GL_COMPRESSED_SIGNED_LUMINANCE_LATC1_EXT 0x8C71 +#define GL_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT 0x8C72 +#define GL_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT 0x8C73 +#endif + +#ifndef GL_EXT_texture_compression_rgtc +#define GL_COMPRESSED_RED_RGTC1_EXT 0x8DBB +#define GL_COMPRESSED_SIGNED_RED_RGTC1_EXT 0x8DBC +#define GL_COMPRESSED_RED_GREEN_RGTC2_EXT 0x8DBD +#define GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT 0x8DBE +#endif + +#ifndef GL_EXT_texture_shared_exponent +#define GL_RGB9_E5_EXT 0x8C3D +#define GL_UNSIGNED_INT_5_9_9_9_REV_EXT 0x8C3E +#define GL_TEXTURE_SHARED_SIZE_EXT 0x8C3F +#endif + +#ifndef GL_NV_depth_buffer_float +#define GL_DEPTH_COMPONENT32F_NV 0x8DAB +#define GL_DEPTH32F_STENCIL8_NV 0x8DAC +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV_NV 0x8DAD +#define GL_DEPTH_BUFFER_FLOAT_MODE_NV 0x8DAF +#endif + +#ifndef GL_NV_fragment_program4 +#endif + +#ifndef GL_NV_framebuffer_multisample_coverage +#define GL_RENDERBUFFER_COVERAGE_SAMPLES_NV 0x8CAB +#define GL_RENDERBUFFER_COLOR_SAMPLES_NV 0x8E10 +#define GL_MAX_MULTISAMPLE_COVERAGE_MODES_NV 0x8E11 +#define GL_MULTISAMPLE_COVERAGE_MODES_NV 0x8E12 +#endif + +#ifndef GL_EXT_framebuffer_sRGB +#define GL_FRAMEBUFFER_SRGB_EXT 0x8DB9 +#define GL_FRAMEBUFFER_SRGB_CAPABLE_EXT 0x8DBA +#endif + +#ifndef GL_NV_geometry_shader4 +#endif + +#ifndef GL_NV_parameter_buffer_object +#define GL_MAX_PROGRAM_PARAMETER_BUFFER_BINDINGS_NV 0x8DA0 +#define GL_MAX_PROGRAM_PARAMETER_BUFFER_SIZE_NV 0x8DA1 +#define GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV 0x8DA2 +#define GL_GEOMETRY_PROGRAM_PARAMETER_BUFFER_NV 0x8DA3 +#define GL_FRAGMENT_PROGRAM_PARAMETER_BUFFER_NV 0x8DA4 +#endif + +#ifndef GL_EXT_draw_buffers2 +#endif + +#ifndef GL_NV_transform_feedback +#define GL_BACK_PRIMARY_COLOR_NV 0x8C77 +#define GL_BACK_SECONDARY_COLOR_NV 0x8C78 +#define GL_TEXTURE_COORD_NV 0x8C79 +#define GL_CLIP_DISTANCE_NV 0x8C7A +#define GL_VERTEX_ID_NV 0x8C7B +#define GL_PRIMITIVE_ID_NV 0x8C7C +#define GL_GENERIC_ATTRIB_NV 0x8C7D +#define GL_TRANSFORM_FEEDBACK_ATTRIBS_NV 0x8C7E +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE_NV 0x8C7F +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_NV 0x8C80 +#define GL_ACTIVE_VARYINGS_NV 0x8C81 +#define GL_ACTIVE_VARYING_MAX_LENGTH_NV 0x8C82 +#define GL_TRANSFORM_FEEDBACK_VARYINGS_NV 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START_NV 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_NV 0x8C85 +#define GL_TRANSFORM_FEEDBACK_RECORD_NV 0x8C86 +#define GL_PRIMITIVES_GENERATED_NV 0x8C87 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_NV 0x8C88 +#define GL_RASTERIZER_DISCARD_NV 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_NV 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_NV 0x8C8B +#define GL_INTERLEAVED_ATTRIBS_NV 0x8C8C +#define GL_SEPARATE_ATTRIBS_NV 0x8C8D +#define GL_TRANSFORM_FEEDBACK_BUFFER_NV 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_NV 0x8C8F +#define GL_LAYER_NV 0x8DAA +#define GL_NEXT_BUFFER_NV -2 +#define GL_SKIP_COMPONENTS4_NV -3 +#define GL_SKIP_COMPONENTS3_NV -4 +#define GL_SKIP_COMPONENTS2_NV -5 +#define GL_SKIP_COMPONENTS1_NV -6 +#endif + +#ifndef GL_EXT_bindable_uniform +#define GL_MAX_VERTEX_BINDABLE_UNIFORMS_EXT 0x8DE2 +#define GL_MAX_FRAGMENT_BINDABLE_UNIFORMS_EXT 0x8DE3 +#define GL_MAX_GEOMETRY_BINDABLE_UNIFORMS_EXT 0x8DE4 +#define GL_MAX_BINDABLE_UNIFORM_SIZE_EXT 0x8DED +#define GL_UNIFORM_BUFFER_EXT 0x8DEE +#define GL_UNIFORM_BUFFER_BINDING_EXT 0x8DEF +#endif + +#ifndef GL_EXT_texture_integer +#define GL_RGBA32UI_EXT 0x8D70 +#define GL_RGB32UI_EXT 0x8D71 +#define GL_ALPHA32UI_EXT 0x8D72 +#define GL_INTENSITY32UI_EXT 0x8D73 +#define GL_LUMINANCE32UI_EXT 0x8D74 +#define GL_LUMINANCE_ALPHA32UI_EXT 0x8D75 +#define GL_RGBA16UI_EXT 0x8D76 +#define GL_RGB16UI_EXT 0x8D77 +#define GL_ALPHA16UI_EXT 0x8D78 +#define GL_INTENSITY16UI_EXT 0x8D79 +#define GL_LUMINANCE16UI_EXT 0x8D7A +#define GL_LUMINANCE_ALPHA16UI_EXT 0x8D7B +#define GL_RGBA8UI_EXT 0x8D7C +#define GL_RGB8UI_EXT 0x8D7D +#define GL_ALPHA8UI_EXT 0x8D7E +#define GL_INTENSITY8UI_EXT 0x8D7F +#define GL_LUMINANCE8UI_EXT 0x8D80 +#define GL_LUMINANCE_ALPHA8UI_EXT 0x8D81 +#define GL_RGBA32I_EXT 0x8D82 +#define GL_RGB32I_EXT 0x8D83 +#define GL_ALPHA32I_EXT 0x8D84 +#define GL_INTENSITY32I_EXT 0x8D85 +#define GL_LUMINANCE32I_EXT 0x8D86 +#define GL_LUMINANCE_ALPHA32I_EXT 0x8D87 +#define GL_RGBA16I_EXT 0x8D88 +#define GL_RGB16I_EXT 0x8D89 +#define GL_ALPHA16I_EXT 0x8D8A +#define GL_INTENSITY16I_EXT 0x8D8B +#define GL_LUMINANCE16I_EXT 0x8D8C +#define GL_LUMINANCE_ALPHA16I_EXT 0x8D8D +#define GL_RGBA8I_EXT 0x8D8E +#define GL_RGB8I_EXT 0x8D8F +#define GL_ALPHA8I_EXT 0x8D90 +#define GL_INTENSITY8I_EXT 0x8D91 +#define GL_LUMINANCE8I_EXT 0x8D92 +#define GL_LUMINANCE_ALPHA8I_EXT 0x8D93 +#define GL_RED_INTEGER_EXT 0x8D94 +#define GL_GREEN_INTEGER_EXT 0x8D95 +#define GL_BLUE_INTEGER_EXT 0x8D96 +#define GL_ALPHA_INTEGER_EXT 0x8D97 +#define GL_RGB_INTEGER_EXT 0x8D98 +#define GL_RGBA_INTEGER_EXT 0x8D99 +#define GL_BGR_INTEGER_EXT 0x8D9A +#define GL_BGRA_INTEGER_EXT 0x8D9B +#define GL_LUMINANCE_INTEGER_EXT 0x8D9C +#define GL_LUMINANCE_ALPHA_INTEGER_EXT 0x8D9D +#define GL_RGBA_INTEGER_MODE_EXT 0x8D9E +#endif + +#ifndef GL_GREMEDY_frame_terminator +#endif + +#ifndef GL_NV_conditional_render +#define GL_QUERY_WAIT_NV 0x8E13 +#define GL_QUERY_NO_WAIT_NV 0x8E14 +#define GL_QUERY_BY_REGION_WAIT_NV 0x8E15 +#define GL_QUERY_BY_REGION_NO_WAIT_NV 0x8E16 +#endif + +#ifndef GL_NV_present_video +#define GL_FRAME_NV 0x8E26 +#define GL_FIELDS_NV 0x8E27 +#define GL_CURRENT_TIME_NV 0x8E28 +#define GL_NUM_FILL_STREAMS_NV 0x8E29 +#define GL_PRESENT_TIME_NV 0x8E2A +#define GL_PRESENT_DURATION_NV 0x8E2B +#endif + +#ifndef GL_EXT_transform_feedback +#define GL_TRANSFORM_FEEDBACK_BUFFER_EXT 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_START_EXT 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_EXT 0x8C85 +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_EXT 0x8C8F +#define GL_INTERLEAVED_ATTRIBS_EXT 0x8C8C +#define GL_SEPARATE_ATTRIBS_EXT 0x8C8D +#define GL_PRIMITIVES_GENERATED_EXT 0x8C87 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_EXT 0x8C88 +#define GL_RASTERIZER_DISCARD_EXT 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_EXT 0x8C8B +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT 0x8C80 +#define GL_TRANSFORM_FEEDBACK_VARYINGS_EXT 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE_EXT 0x8C7F +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH_EXT 0x8C76 +#endif + +#ifndef GL_EXT_direct_state_access +#define GL_PROGRAM_MATRIX_EXT 0x8E2D +#define GL_TRANSPOSE_PROGRAM_MATRIX_EXT 0x8E2E +#define GL_PROGRAM_MATRIX_STACK_DEPTH_EXT 0x8E2F +#endif + +#ifndef GL_EXT_vertex_array_bgra +/* reuse GL_BGRA */ +#endif + +#ifndef GL_EXT_texture_swizzle +#define GL_TEXTURE_SWIZZLE_R_EXT 0x8E42 +#define GL_TEXTURE_SWIZZLE_G_EXT 0x8E43 +#define GL_TEXTURE_SWIZZLE_B_EXT 0x8E44 +#define GL_TEXTURE_SWIZZLE_A_EXT 0x8E45 +#define GL_TEXTURE_SWIZZLE_RGBA_EXT 0x8E46 +#endif + +#ifndef GL_NV_explicit_multisample +#define GL_SAMPLE_POSITION_NV 0x8E50 +#define GL_SAMPLE_MASK_NV 0x8E51 +#define GL_SAMPLE_MASK_VALUE_NV 0x8E52 +#define GL_TEXTURE_BINDING_RENDERBUFFER_NV 0x8E53 +#define GL_TEXTURE_RENDERBUFFER_DATA_STORE_BINDING_NV 0x8E54 +#define GL_TEXTURE_RENDERBUFFER_NV 0x8E55 +#define GL_SAMPLER_RENDERBUFFER_NV 0x8E56 +#define GL_INT_SAMPLER_RENDERBUFFER_NV 0x8E57 +#define GL_UNSIGNED_INT_SAMPLER_RENDERBUFFER_NV 0x8E58 +#define GL_MAX_SAMPLE_MASK_WORDS_NV 0x8E59 +#endif + +#ifndef GL_NV_transform_feedback2 +#define GL_TRANSFORM_FEEDBACK_NV 0x8E22 +#define GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED_NV 0x8E23 +#define GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE_NV 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BINDING_NV 0x8E25 +#endif + +#ifndef GL_ATI_meminfo +#define GL_VBO_FREE_MEMORY_ATI 0x87FB +#define GL_TEXTURE_FREE_MEMORY_ATI 0x87FC +#define GL_RENDERBUFFER_FREE_MEMORY_ATI 0x87FD +#endif + +#ifndef GL_AMD_performance_monitor +#define GL_COUNTER_TYPE_AMD 0x8BC0 +#define GL_COUNTER_RANGE_AMD 0x8BC1 +#define GL_UNSIGNED_INT64_AMD 0x8BC2 +#define GL_PERCENTAGE_AMD 0x8BC3 +#define GL_PERFMON_RESULT_AVAILABLE_AMD 0x8BC4 +#define GL_PERFMON_RESULT_SIZE_AMD 0x8BC5 +#define GL_PERFMON_RESULT_AMD 0x8BC6 +#endif + +#ifndef GL_AMD_texture_texture4 +#endif + +#ifndef GL_AMD_vertex_shader_tesselator +#define GL_SAMPLER_BUFFER_AMD 0x9001 +#define GL_INT_SAMPLER_BUFFER_AMD 0x9002 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER_AMD 0x9003 +#define GL_TESSELLATION_MODE_AMD 0x9004 +#define GL_TESSELLATION_FACTOR_AMD 0x9005 +#define GL_DISCRETE_AMD 0x9006 +#define GL_CONTINUOUS_AMD 0x9007 +#endif + +#ifndef GL_EXT_provoking_vertex +#define GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION_EXT 0x8E4C +#define GL_FIRST_VERTEX_CONVENTION_EXT 0x8E4D +#define GL_LAST_VERTEX_CONVENTION_EXT 0x8E4E +#define GL_PROVOKING_VERTEX_EXT 0x8E4F +#endif + +#ifndef GL_EXT_texture_snorm +#define GL_ALPHA_SNORM 0x9010 +#define GL_LUMINANCE_SNORM 0x9011 +#define GL_LUMINANCE_ALPHA_SNORM 0x9012 +#define GL_INTENSITY_SNORM 0x9013 +#define GL_ALPHA8_SNORM 0x9014 +#define GL_LUMINANCE8_SNORM 0x9015 +#define GL_LUMINANCE8_ALPHA8_SNORM 0x9016 +#define GL_INTENSITY8_SNORM 0x9017 +#define GL_ALPHA16_SNORM 0x9018 +#define GL_LUMINANCE16_SNORM 0x9019 +#define GL_LUMINANCE16_ALPHA16_SNORM 0x901A +#define GL_INTENSITY16_SNORM 0x901B +/* reuse GL_RED_SNORM */ +/* reuse GL_RG_SNORM */ +/* reuse GL_RGB_SNORM */ +/* reuse GL_RGBA_SNORM */ +/* reuse GL_R8_SNORM */ +/* reuse GL_RG8_SNORM */ +/* reuse GL_RGB8_SNORM */ +/* reuse GL_RGBA8_SNORM */ +/* reuse GL_R16_SNORM */ +/* reuse GL_RG16_SNORM */ +/* reuse GL_RGB16_SNORM */ +/* reuse GL_RGBA16_SNORM */ +/* reuse GL_SIGNED_NORMALIZED */ +#endif + +#ifndef GL_AMD_draw_buffers_blend +#endif + +#ifndef GL_APPLE_texture_range +#define GL_TEXTURE_RANGE_LENGTH_APPLE 0x85B7 +#define GL_TEXTURE_RANGE_POINTER_APPLE 0x85B8 +#define GL_TEXTURE_STORAGE_HINT_APPLE 0x85BC +#define GL_STORAGE_PRIVATE_APPLE 0x85BD +/* reuse GL_STORAGE_CACHED_APPLE */ +/* reuse GL_STORAGE_SHARED_APPLE */ +#endif + +#ifndef GL_APPLE_float_pixels +#define GL_HALF_APPLE 0x140B +#define GL_RGBA_FLOAT32_APPLE 0x8814 +#define GL_RGB_FLOAT32_APPLE 0x8815 +#define GL_ALPHA_FLOAT32_APPLE 0x8816 +#define GL_INTENSITY_FLOAT32_APPLE 0x8817 +#define GL_LUMINANCE_FLOAT32_APPLE 0x8818 +#define GL_LUMINANCE_ALPHA_FLOAT32_APPLE 0x8819 +#define GL_RGBA_FLOAT16_APPLE 0x881A +#define GL_RGB_FLOAT16_APPLE 0x881B +#define GL_ALPHA_FLOAT16_APPLE 0x881C +#define GL_INTENSITY_FLOAT16_APPLE 0x881D +#define GL_LUMINANCE_FLOAT16_APPLE 0x881E +#define GL_LUMINANCE_ALPHA_FLOAT16_APPLE 0x881F +#define GL_COLOR_FLOAT_APPLE 0x8A0F +#endif + +#ifndef GL_APPLE_vertex_program_evaluators +#define GL_VERTEX_ATTRIB_MAP1_APPLE 0x8A00 +#define GL_VERTEX_ATTRIB_MAP2_APPLE 0x8A01 +#define GL_VERTEX_ATTRIB_MAP1_SIZE_APPLE 0x8A02 +#define GL_VERTEX_ATTRIB_MAP1_COEFF_APPLE 0x8A03 +#define GL_VERTEX_ATTRIB_MAP1_ORDER_APPLE 0x8A04 +#define GL_VERTEX_ATTRIB_MAP1_DOMAIN_APPLE 0x8A05 +#define GL_VERTEX_ATTRIB_MAP2_SIZE_APPLE 0x8A06 +#define GL_VERTEX_ATTRIB_MAP2_COEFF_APPLE 0x8A07 +#define GL_VERTEX_ATTRIB_MAP2_ORDER_APPLE 0x8A08 +#define GL_VERTEX_ATTRIB_MAP2_DOMAIN_APPLE 0x8A09 +#endif + +#ifndef GL_APPLE_aux_depth_stencil +#define GL_AUX_DEPTH_STENCIL_APPLE 0x8A14 +#endif + +#ifndef GL_APPLE_object_purgeable +#define GL_BUFFER_OBJECT_APPLE 0x85B3 +#define GL_RELEASED_APPLE 0x8A19 +#define GL_VOLATILE_APPLE 0x8A1A +#define GL_RETAINED_APPLE 0x8A1B +#define GL_UNDEFINED_APPLE 0x8A1C +#define GL_PURGEABLE_APPLE 0x8A1D +#endif + +#ifndef GL_APPLE_row_bytes +#define GL_PACK_ROW_BYTES_APPLE 0x8A15 +#define GL_UNPACK_ROW_BYTES_APPLE 0x8A16 +#endif + +#ifndef GL_APPLE_rgb_422 +#define GL_RGB_422_APPLE 0x8A1F +/* reuse GL_UNSIGNED_SHORT_8_8_APPLE */ +/* reuse GL_UNSIGNED_SHORT_8_8_REV_APPLE */ +#endif + +#ifndef GL_NV_video_capture +#define GL_VIDEO_BUFFER_NV 0x9020 +#define GL_VIDEO_BUFFER_BINDING_NV 0x9021 +#define GL_FIELD_UPPER_NV 0x9022 +#define GL_FIELD_LOWER_NV 0x9023 +#define GL_NUM_VIDEO_CAPTURE_STREAMS_NV 0x9024 +#define GL_NEXT_VIDEO_CAPTURE_BUFFER_STATUS_NV 0x9025 +#define GL_VIDEO_CAPTURE_TO_422_SUPPORTED_NV 0x9026 +#define GL_LAST_VIDEO_CAPTURE_STATUS_NV 0x9027 +#define GL_VIDEO_BUFFER_PITCH_NV 0x9028 +#define GL_VIDEO_COLOR_CONVERSION_MATRIX_NV 0x9029 +#define GL_VIDEO_COLOR_CONVERSION_MAX_NV 0x902A +#define GL_VIDEO_COLOR_CONVERSION_MIN_NV 0x902B +#define GL_VIDEO_COLOR_CONVERSION_OFFSET_NV 0x902C +#define GL_VIDEO_BUFFER_INTERNAL_FORMAT_NV 0x902D +#define GL_PARTIAL_SUCCESS_NV 0x902E +#define GL_SUCCESS_NV 0x902F +#define GL_FAILURE_NV 0x9030 +#define GL_YCBYCR8_422_NV 0x9031 +#define GL_YCBAYCR8A_4224_NV 0x9032 +#define GL_Z6Y10Z6CB10Z6Y10Z6CR10_422_NV 0x9033 +#define GL_Z6Y10Z6CB10Z6A10Z6Y10Z6CR10Z6A10_4224_NV 0x9034 +#define GL_Z4Y12Z4CB12Z4Y12Z4CR12_422_NV 0x9035 +#define GL_Z4Y12Z4CB12Z4A12Z4Y12Z4CR12Z4A12_4224_NV 0x9036 +#define GL_Z4Y12Z4CB12Z4CR12_444_NV 0x9037 +#define GL_VIDEO_CAPTURE_FRAME_WIDTH_NV 0x9038 +#define GL_VIDEO_CAPTURE_FRAME_HEIGHT_NV 0x9039 +#define GL_VIDEO_CAPTURE_FIELD_UPPER_HEIGHT_NV 0x903A +#define GL_VIDEO_CAPTURE_FIELD_LOWER_HEIGHT_NV 0x903B +#define GL_VIDEO_CAPTURE_SURFACE_ORIGIN_NV 0x903C +#endif + +#ifndef GL_NV_copy_image +#endif + +#ifndef GL_EXT_separate_shader_objects +#define GL_ACTIVE_PROGRAM_EXT 0x8B8D +#endif + +#ifndef GL_NV_parameter_buffer_object2 +#endif + +#ifndef GL_NV_shader_buffer_load +#define GL_BUFFER_GPU_ADDRESS_NV 0x8F1D +#define GL_GPU_ADDRESS_NV 0x8F34 +#define GL_MAX_SHADER_BUFFER_ADDRESS_NV 0x8F35 +#endif + +#ifndef GL_NV_vertex_buffer_unified_memory +#define GL_VERTEX_ATTRIB_ARRAY_UNIFIED_NV 0x8F1E +#define GL_ELEMENT_ARRAY_UNIFIED_NV 0x8F1F +#define GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV 0x8F20 +#define GL_VERTEX_ARRAY_ADDRESS_NV 0x8F21 +#define GL_NORMAL_ARRAY_ADDRESS_NV 0x8F22 +#define GL_COLOR_ARRAY_ADDRESS_NV 0x8F23 +#define GL_INDEX_ARRAY_ADDRESS_NV 0x8F24 +#define GL_TEXTURE_COORD_ARRAY_ADDRESS_NV 0x8F25 +#define GL_EDGE_FLAG_ARRAY_ADDRESS_NV 0x8F26 +#define GL_SECONDARY_COLOR_ARRAY_ADDRESS_NV 0x8F27 +#define GL_FOG_COORD_ARRAY_ADDRESS_NV 0x8F28 +#define GL_ELEMENT_ARRAY_ADDRESS_NV 0x8F29 +#define GL_VERTEX_ATTRIB_ARRAY_LENGTH_NV 0x8F2A +#define GL_VERTEX_ARRAY_LENGTH_NV 0x8F2B +#define GL_NORMAL_ARRAY_LENGTH_NV 0x8F2C +#define GL_COLOR_ARRAY_LENGTH_NV 0x8F2D +#define GL_INDEX_ARRAY_LENGTH_NV 0x8F2E +#define GL_TEXTURE_COORD_ARRAY_LENGTH_NV 0x8F2F +#define GL_EDGE_FLAG_ARRAY_LENGTH_NV 0x8F30 +#define GL_SECONDARY_COLOR_ARRAY_LENGTH_NV 0x8F31 +#define GL_FOG_COORD_ARRAY_LENGTH_NV 0x8F32 +#define GL_ELEMENT_ARRAY_LENGTH_NV 0x8F33 +#define GL_DRAW_INDIRECT_UNIFIED_NV 0x8F40 +#define GL_DRAW_INDIRECT_ADDRESS_NV 0x8F41 +#define GL_DRAW_INDIRECT_LENGTH_NV 0x8F42 +#endif + +#ifndef GL_NV_texture_barrier +#endif + +#ifndef GL_AMD_shader_stencil_export +#endif + +#ifndef GL_AMD_seamless_cubemap_per_texture +/* reuse GL_TEXTURE_CUBE_MAP_SEAMLESS */ +#endif + +#ifndef GL_AMD_conservative_depth +#endif + +#ifndef GL_EXT_shader_image_load_store +#define GL_MAX_IMAGE_UNITS_EXT 0x8F38 +#define GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS_EXT 0x8F39 +#define GL_IMAGE_BINDING_NAME_EXT 0x8F3A +#define GL_IMAGE_BINDING_LEVEL_EXT 0x8F3B +#define GL_IMAGE_BINDING_LAYERED_EXT 0x8F3C +#define GL_IMAGE_BINDING_LAYER_EXT 0x8F3D +#define GL_IMAGE_BINDING_ACCESS_EXT 0x8F3E +#define GL_IMAGE_1D_EXT 0x904C +#define GL_IMAGE_2D_EXT 0x904D +#define GL_IMAGE_3D_EXT 0x904E +#define GL_IMAGE_2D_RECT_EXT 0x904F +#define GL_IMAGE_CUBE_EXT 0x9050 +#define GL_IMAGE_BUFFER_EXT 0x9051 +#define GL_IMAGE_1D_ARRAY_EXT 0x9052 +#define GL_IMAGE_2D_ARRAY_EXT 0x9053 +#define GL_IMAGE_CUBE_MAP_ARRAY_EXT 0x9054 +#define GL_IMAGE_2D_MULTISAMPLE_EXT 0x9055 +#define GL_IMAGE_2D_MULTISAMPLE_ARRAY_EXT 0x9056 +#define GL_INT_IMAGE_1D_EXT 0x9057 +#define GL_INT_IMAGE_2D_EXT 0x9058 +#define GL_INT_IMAGE_3D_EXT 0x9059 +#define GL_INT_IMAGE_2D_RECT_EXT 0x905A +#define GL_INT_IMAGE_CUBE_EXT 0x905B +#define GL_INT_IMAGE_BUFFER_EXT 0x905C +#define GL_INT_IMAGE_1D_ARRAY_EXT 0x905D +#define GL_INT_IMAGE_2D_ARRAY_EXT 0x905E +#define GL_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x905F +#define GL_INT_IMAGE_2D_MULTISAMPLE_EXT 0x9060 +#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY_EXT 0x9061 +#define GL_UNSIGNED_INT_IMAGE_1D_EXT 0x9062 +#define GL_UNSIGNED_INT_IMAGE_2D_EXT 0x9063 +#define GL_UNSIGNED_INT_IMAGE_3D_EXT 0x9064 +#define GL_UNSIGNED_INT_IMAGE_2D_RECT_EXT 0x9065 +#define GL_UNSIGNED_INT_IMAGE_CUBE_EXT 0x9066 +#define GL_UNSIGNED_INT_IMAGE_BUFFER_EXT 0x9067 +#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY_EXT 0x9068 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY_EXT 0x9069 +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x906A +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_EXT 0x906B +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY_EXT 0x906C +#define GL_MAX_IMAGE_SAMPLES_EXT 0x906D +#define GL_IMAGE_BINDING_FORMAT_EXT 0x906E +#define GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT_EXT 0x00000001 +#define GL_ELEMENT_ARRAY_BARRIER_BIT_EXT 0x00000002 +#define GL_UNIFORM_BARRIER_BIT_EXT 0x00000004 +#define GL_TEXTURE_FETCH_BARRIER_BIT_EXT 0x00000008 +#define GL_SHADER_IMAGE_ACCESS_BARRIER_BIT_EXT 0x00000020 +#define GL_COMMAND_BARRIER_BIT_EXT 0x00000040 +#define GL_PIXEL_BUFFER_BARRIER_BIT_EXT 0x00000080 +#define GL_TEXTURE_UPDATE_BARRIER_BIT_EXT 0x00000100 +#define GL_BUFFER_UPDATE_BARRIER_BIT_EXT 0x00000200 +#define GL_FRAMEBUFFER_BARRIER_BIT_EXT 0x00000400 +#define GL_TRANSFORM_FEEDBACK_BARRIER_BIT_EXT 0x00000800 +#define GL_ATOMIC_COUNTER_BARRIER_BIT_EXT 0x00001000 +#define GL_ALL_BARRIER_BITS_EXT 0xFFFFFFFF +#endif + +#ifndef GL_EXT_vertex_attrib_64bit +/* reuse GL_DOUBLE */ +#define GL_DOUBLE_VEC2_EXT 0x8FFC +#define GL_DOUBLE_VEC3_EXT 0x8FFD +#define GL_DOUBLE_VEC4_EXT 0x8FFE +#define GL_DOUBLE_MAT2_EXT 0x8F46 +#define GL_DOUBLE_MAT3_EXT 0x8F47 +#define GL_DOUBLE_MAT4_EXT 0x8F48 +#define GL_DOUBLE_MAT2x3_EXT 0x8F49 +#define GL_DOUBLE_MAT2x4_EXT 0x8F4A +#define GL_DOUBLE_MAT3x2_EXT 0x8F4B +#define GL_DOUBLE_MAT3x4_EXT 0x8F4C +#define GL_DOUBLE_MAT4x2_EXT 0x8F4D +#define GL_DOUBLE_MAT4x3_EXT 0x8F4E +#endif + +#ifndef GL_NV_gpu_program5 +#define GL_MAX_GEOMETRY_PROGRAM_INVOCATIONS_NV 0x8E5A +#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET_NV 0x8E5B +#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_NV 0x8E5C +#define GL_FRAGMENT_PROGRAM_INTERPOLATION_OFFSET_BITS_NV 0x8E5D +#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET_NV 0x8E5E +#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_NV 0x8E5F +#define GL_MAX_PROGRAM_SUBROUTINE_PARAMETERS_NV 0x8F44 +#define GL_MAX_PROGRAM_SUBROUTINE_NUM_NV 0x8F45 +#endif + +#ifndef GL_NV_gpu_shader5 +#define GL_INT64_NV 0x140E +#define GL_UNSIGNED_INT64_NV 0x140F +#define GL_INT8_NV 0x8FE0 +#define GL_INT8_VEC2_NV 0x8FE1 +#define GL_INT8_VEC3_NV 0x8FE2 +#define GL_INT8_VEC4_NV 0x8FE3 +#define GL_INT16_NV 0x8FE4 +#define GL_INT16_VEC2_NV 0x8FE5 +#define GL_INT16_VEC3_NV 0x8FE6 +#define GL_INT16_VEC4_NV 0x8FE7 +#define GL_INT64_VEC2_NV 0x8FE9 +#define GL_INT64_VEC3_NV 0x8FEA +#define GL_INT64_VEC4_NV 0x8FEB +#define GL_UNSIGNED_INT8_NV 0x8FEC +#define GL_UNSIGNED_INT8_VEC2_NV 0x8FED +#define GL_UNSIGNED_INT8_VEC3_NV 0x8FEE +#define GL_UNSIGNED_INT8_VEC4_NV 0x8FEF +#define GL_UNSIGNED_INT16_NV 0x8FF0 +#define GL_UNSIGNED_INT16_VEC2_NV 0x8FF1 +#define GL_UNSIGNED_INT16_VEC3_NV 0x8FF2 +#define GL_UNSIGNED_INT16_VEC4_NV 0x8FF3 +#define GL_UNSIGNED_INT64_VEC2_NV 0x8FF5 +#define GL_UNSIGNED_INT64_VEC3_NV 0x8FF6 +#define GL_UNSIGNED_INT64_VEC4_NV 0x8FF7 +#define GL_FLOAT16_NV 0x8FF8 +#define GL_FLOAT16_VEC2_NV 0x8FF9 +#define GL_FLOAT16_VEC3_NV 0x8FFA +#define GL_FLOAT16_VEC4_NV 0x8FFB +/* reuse GL_PATCHES */ +#endif + +#ifndef GL_NV_shader_buffer_store +#define GL_SHADER_GLOBAL_ACCESS_BARRIER_BIT_NV 0x00000010 +/* reuse GL_READ_WRITE */ +/* reuse GL_WRITE_ONLY */ +#endif + +#ifndef GL_NV_tessellation_program5 +#define GL_MAX_PROGRAM_PATCH_ATTRIBS_NV 0x86D8 +#define GL_TESS_CONTROL_PROGRAM_NV 0x891E +#define GL_TESS_EVALUATION_PROGRAM_NV 0x891F +#define GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV 0x8C74 +#define GL_TESS_EVALUATION_PROGRAM_PARAMETER_BUFFER_NV 0x8C75 +#endif + +#ifndef GL_NV_vertex_attrib_integer_64bit +/* reuse GL_INT64_NV */ +/* reuse GL_UNSIGNED_INT64_NV */ +#endif + +#ifndef GL_NV_multisample_coverage +#define GL_COVERAGE_SAMPLES_NV 0x80A9 +#define GL_COLOR_SAMPLES_NV 0x8E20 +#endif + +#ifndef GL_AMD_name_gen_delete +#define GL_DATA_BUFFER_AMD 0x9151 +#define GL_PERFORMANCE_MONITOR_AMD 0x9152 +#define GL_QUERY_OBJECT_AMD 0x9153 +#define GL_VERTEX_ARRAY_OBJECT_AMD 0x9154 +#define GL_SAMPLER_OBJECT_AMD 0x9155 +#endif + +#ifndef GL_AMD_debug_output +#define GL_MAX_DEBUG_MESSAGE_LENGTH_AMD 0x9143 +#define GL_MAX_DEBUG_LOGGED_MESSAGES_AMD 0x9144 +#define GL_DEBUG_LOGGED_MESSAGES_AMD 0x9145 +#define GL_DEBUG_SEVERITY_HIGH_AMD 0x9146 +#define GL_DEBUG_SEVERITY_MEDIUM_AMD 0x9147 +#define GL_DEBUG_SEVERITY_LOW_AMD 0x9148 +#define GL_DEBUG_CATEGORY_API_ERROR_AMD 0x9149 +#define GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD 0x914A +#define GL_DEBUG_CATEGORY_DEPRECATION_AMD 0x914B +#define GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD 0x914C +#define GL_DEBUG_CATEGORY_PERFORMANCE_AMD 0x914D +#define GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD 0x914E +#define GL_DEBUG_CATEGORY_APPLICATION_AMD 0x914F +#define GL_DEBUG_CATEGORY_OTHER_AMD 0x9150 +#endif + +#ifndef GL_NV_vdpau_interop +#define GL_SURFACE_STATE_NV 0x86EB +#define GL_SURFACE_REGISTERED_NV 0x86FD +#define GL_SURFACE_MAPPED_NV 0x8700 +#define GL_WRITE_DISCARD_NV 0x88BE +#endif + +#ifndef GL_AMD_transform_feedback3_lines_triangles +#endif + +#ifndef GL_AMD_depth_clamp_separate +#define GL_DEPTH_CLAMP_NEAR_AMD 0x901E +#define GL_DEPTH_CLAMP_FAR_AMD 0x901F +#endif + +#ifndef GL_EXT_texture_sRGB_decode +#define GL_TEXTURE_SRGB_DECODE_EXT 0x8A48 +#define GL_DECODE_EXT 0x8A49 +#define GL_SKIP_DECODE_EXT 0x8A4A +#endif + +#ifndef GL_NV_texture_multisample +#define GL_TEXTURE_COVERAGE_SAMPLES_NV 0x9045 +#define GL_TEXTURE_COLOR_SAMPLES_NV 0x9046 +#endif + +#ifndef GL_AMD_blend_minmax_factor +#define GL_FACTOR_MIN_AMD 0x901C +#define GL_FACTOR_MAX_AMD 0x901D +#endif + +#ifndef GL_AMD_sample_positions +#define GL_SUBSAMPLE_DISTANCE_AMD 0x883F +#endif + +#ifndef GL_EXT_x11_sync_object +#define GL_SYNC_X11_FENCE_EXT 0x90E1 +#endif + +#ifndef GL_AMD_multi_draw_indirect +#endif + +#ifndef GL_EXT_framebuffer_multisample_blit_scaled +#define GL_SCALED_RESOLVE_FASTEST_EXT 0x90BA +#define GL_SCALED_RESOLVE_NICEST_EXT 0x90BB +#endif + +#ifndef GL_NV_path_rendering +#define GL_PATH_FORMAT_SVG_NV 0x9070 +#define GL_PATH_FORMAT_PS_NV 0x9071 +#define GL_STANDARD_FONT_NAME_NV 0x9072 +#define GL_SYSTEM_FONT_NAME_NV 0x9073 +#define GL_FILE_NAME_NV 0x9074 +#define GL_PATH_STROKE_WIDTH_NV 0x9075 +#define GL_PATH_END_CAPS_NV 0x9076 +#define GL_PATH_INITIAL_END_CAP_NV 0x9077 +#define GL_PATH_TERMINAL_END_CAP_NV 0x9078 +#define GL_PATH_JOIN_STYLE_NV 0x9079 +#define GL_PATH_MITER_LIMIT_NV 0x907A +#define GL_PATH_DASH_CAPS_NV 0x907B +#define GL_PATH_INITIAL_DASH_CAP_NV 0x907C +#define GL_PATH_TERMINAL_DASH_CAP_NV 0x907D +#define GL_PATH_DASH_OFFSET_NV 0x907E +#define GL_PATH_CLIENT_LENGTH_NV 0x907F +#define GL_PATH_FILL_MODE_NV 0x9080 +#define GL_PATH_FILL_MASK_NV 0x9081 +#define GL_PATH_FILL_COVER_MODE_NV 0x9082 +#define GL_PATH_STROKE_COVER_MODE_NV 0x9083 +#define GL_PATH_STROKE_MASK_NV 0x9084 +#define GL_PATH_SAMPLE_QUALITY_NV 0x9085 +#define GL_PATH_STROKE_BOUND_NV 0x9086 +#define GL_PATH_STROKE_OVERSAMPLE_COUNT_NV 0x9087 +#define GL_COUNT_UP_NV 0x9088 +#define GL_COUNT_DOWN_NV 0x9089 +#define GL_PATH_OBJECT_BOUNDING_BOX_NV 0x908A +#define GL_CONVEX_HULL_NV 0x908B +#define GL_MULTI_HULLS_NV 0x908C +#define GL_BOUNDING_BOX_NV 0x908D +#define GL_TRANSLATE_X_NV 0x908E +#define GL_TRANSLATE_Y_NV 0x908F +#define GL_TRANSLATE_2D_NV 0x9090 +#define GL_TRANSLATE_3D_NV 0x9091 +#define GL_AFFINE_2D_NV 0x9092 +#define GL_PROJECTIVE_2D_NV 0x9093 +#define GL_AFFINE_3D_NV 0x9094 +#define GL_PROJECTIVE_3D_NV 0x9095 +#define GL_TRANSPOSE_AFFINE_2D_NV 0x9096 +#define GL_TRANSPOSE_PROJECTIVE_2D_NV 0x9097 +#define GL_TRANSPOSE_AFFINE_3D_NV 0x9098 +#define GL_TRANSPOSE_PROJECTIVE_3D_NV 0x9099 +#define GL_UTF8_NV 0x909A +#define GL_UTF16_NV 0x909B +#define GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV 0x909C +#define GL_PATH_COMMAND_COUNT_NV 0x909D +#define GL_PATH_COORD_COUNT_NV 0x909E +#define GL_PATH_DASH_ARRAY_COUNT_NV 0x909F +#define GL_PATH_COMPUTED_LENGTH_NV 0x90A0 +#define GL_PATH_FILL_BOUNDING_BOX_NV 0x90A1 +#define GL_PATH_STROKE_BOUNDING_BOX_NV 0x90A2 +#define GL_SQUARE_NV 0x90A3 +#define GL_ROUND_NV 0x90A4 +#define GL_TRIANGULAR_NV 0x90A5 +#define GL_BEVEL_NV 0x90A6 +#define GL_MITER_REVERT_NV 0x90A7 +#define GL_MITER_TRUNCATE_NV 0x90A8 +#define GL_SKIP_MISSING_GLYPH_NV 0x90A9 +#define GL_USE_MISSING_GLYPH_NV 0x90AA +#define GL_PATH_ERROR_POSITION_NV 0x90AB +#define GL_PATH_FOG_GEN_MODE_NV 0x90AC +#define GL_ACCUM_ADJACENT_PAIRS_NV 0x90AD +#define GL_ADJACENT_PAIRS_NV 0x90AE +#define GL_FIRST_TO_REST_NV 0x90AF +#define GL_PATH_GEN_MODE_NV 0x90B0 +#define GL_PATH_GEN_COEFF_NV 0x90B1 +#define GL_PATH_GEN_COLOR_FORMAT_NV 0x90B2 +#define GL_PATH_GEN_COMPONENTS_NV 0x90B3 +#define GL_PATH_STENCIL_FUNC_NV 0x90B7 +#define GL_PATH_STENCIL_REF_NV 0x90B8 +#define GL_PATH_STENCIL_VALUE_MASK_NV 0x90B9 +#define GL_PATH_STENCIL_DEPTH_OFFSET_FACTOR_NV 0x90BD +#define GL_PATH_STENCIL_DEPTH_OFFSET_UNITS_NV 0x90BE +#define GL_PATH_COVER_DEPTH_FUNC_NV 0x90BF +#define GL_PATH_DASH_OFFSET_RESET_NV 0x90B4 +#define GL_MOVE_TO_RESETS_NV 0x90B5 +#define GL_MOVE_TO_CONTINUES_NV 0x90B6 +#define GL_CLOSE_PATH_NV 0x00 +#define GL_MOVE_TO_NV 0x02 +#define GL_RELATIVE_MOVE_TO_NV 0x03 +#define GL_LINE_TO_NV 0x04 +#define GL_RELATIVE_LINE_TO_NV 0x05 +#define GL_HORIZONTAL_LINE_TO_NV 0x06 +#define GL_RELATIVE_HORIZONTAL_LINE_TO_NV 0x07 +#define GL_VERTICAL_LINE_TO_NV 0x08 +#define GL_RELATIVE_VERTICAL_LINE_TO_NV 0x09 +#define GL_QUADRATIC_CURVE_TO_NV 0x0A +#define GL_RELATIVE_QUADRATIC_CURVE_TO_NV 0x0B +#define GL_CUBIC_CURVE_TO_NV 0x0C +#define GL_RELATIVE_CUBIC_CURVE_TO_NV 0x0D +#define GL_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0E +#define GL_RELATIVE_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0F +#define GL_SMOOTH_CUBIC_CURVE_TO_NV 0x10 +#define GL_RELATIVE_SMOOTH_CUBIC_CURVE_TO_NV 0x11 +#define GL_SMALL_CCW_ARC_TO_NV 0x12 +#define GL_RELATIVE_SMALL_CCW_ARC_TO_NV 0x13 +#define GL_SMALL_CW_ARC_TO_NV 0x14 +#define GL_RELATIVE_SMALL_CW_ARC_TO_NV 0x15 +#define GL_LARGE_CCW_ARC_TO_NV 0x16 +#define GL_RELATIVE_LARGE_CCW_ARC_TO_NV 0x17 +#define GL_LARGE_CW_ARC_TO_NV 0x18 +#define GL_RELATIVE_LARGE_CW_ARC_TO_NV 0x19 +#define GL_RESTART_PATH_NV 0xF0 +#define GL_DUP_FIRST_CUBIC_CURVE_TO_NV 0xF2 +#define GL_DUP_LAST_CUBIC_CURVE_TO_NV 0xF4 +#define GL_RECT_NV 0xF6 +#define GL_CIRCULAR_CCW_ARC_TO_NV 0xF8 +#define GL_CIRCULAR_CW_ARC_TO_NV 0xFA +#define GL_CIRCULAR_TANGENT_ARC_TO_NV 0xFC +#define GL_ARC_TO_NV 0xFE +#define GL_RELATIVE_ARC_TO_NV 0xFF +#define GL_BOLD_BIT_NV 0x01 +#define GL_ITALIC_BIT_NV 0x02 +#define GL_GLYPH_WIDTH_BIT_NV 0x01 +#define GL_GLYPH_HEIGHT_BIT_NV 0x02 +#define GL_GLYPH_HORIZONTAL_BEARING_X_BIT_NV 0x04 +#define GL_GLYPH_HORIZONTAL_BEARING_Y_BIT_NV 0x08 +#define GL_GLYPH_HORIZONTAL_BEARING_ADVANCE_BIT_NV 0x10 +#define GL_GLYPH_VERTICAL_BEARING_X_BIT_NV 0x20 +#define GL_GLYPH_VERTICAL_BEARING_Y_BIT_NV 0x40 +#define GL_GLYPH_VERTICAL_BEARING_ADVANCE_BIT_NV 0x80 +#define GL_GLYPH_HAS_KERNING_NV 0x100 +#define GL_FONT_X_MIN_BOUNDS_NV 0x00010000 +#define GL_FONT_Y_MIN_BOUNDS_NV 0x00020000 +#define GL_FONT_X_MAX_BOUNDS_NV 0x00040000 +#define GL_FONT_Y_MAX_BOUNDS_NV 0x00080000 +#define GL_FONT_UNITS_PER_EM_NV 0x00100000 +#define GL_FONT_ASCENDER_NV 0x00200000 +#define GL_FONT_DESCENDER_NV 0x00400000 +#define GL_FONT_HEIGHT_NV 0x00800000 +#define GL_FONT_MAX_ADVANCE_WIDTH_NV 0x01000000 +#define GL_FONT_MAX_ADVANCE_HEIGHT_NV 0x02000000 +#define GL_FONT_UNDERLINE_POSITION_NV 0x04000000 +#define GL_FONT_UNDERLINE_THICKNESS_NV 0x08000000 +#define GL_FONT_HAS_KERNING_NV 0x10000000 +#endif + +#ifndef GL_AMD_pinned_memory +#define GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD 0x9160 +#endif + +#ifndef GL_AMD_stencil_operation_extended +#define GL_SET_AMD 0x874A +#define GL_REPLACE_VALUE_AMD 0x874B +#define GL_STENCIL_OP_VALUE_AMD 0x874C +#define GL_STENCIL_BACK_OP_VALUE_AMD 0x874D +#endif + +#ifndef GL_AMD_vertex_shader_viewport_index +#endif + +#ifndef GL_AMD_vertex_shader_layer +#endif + +#ifndef GL_NV_bindless_texture +#endif + +#ifndef GL_NV_shader_atomic_float +#endif + +#ifndef GL_AMD_query_buffer_object +#define GL_QUERY_BUFFER_AMD 0x9192 +#define GL_QUERY_BUFFER_BINDING_AMD 0x9193 +#define GL_QUERY_RESULT_NO_WAIT_AMD 0x9194 +#endif + +#ifndef GL_AMD_sparse_texture +#define GL_VIRTUAL_PAGE_SIZE_X_AMD 0x9195 +#define GL_VIRTUAL_PAGE_SIZE_Y_AMD 0x9196 +#define GL_VIRTUAL_PAGE_SIZE_Z_AMD 0x9197 +#define GL_MAX_SPARSE_TEXTURE_SIZE_AMD 0x9198 +#define GL_MAX_SPARSE_3D_TEXTURE_SIZE_AMD 0x9199 +#define GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS 0x919A +#define GL_MIN_SPARSE_LEVEL_AMD 0x919B +#define GL_MIN_LOD_WARNING_AMD 0x919C +#define GL_TEXTURE_STORAGE_SPARSE_BIT_AMD 0x00000001 +#endif + + +/*************************************************************/ + +#include +#ifndef GL_VERSION_2_0 +/* GL type for program/shader text */ +typedef char GLchar; +#endif + +#ifndef GL_VERSION_1_5 +/* GL types for handling large vertex buffer objects */ +typedef ptrdiff_t GLintptr; +typedef ptrdiff_t GLsizeiptr; +#endif + +#ifndef GL_ARB_vertex_buffer_object +/* GL types for handling large vertex buffer objects */ +typedef ptrdiff_t GLintptrARB; +typedef ptrdiff_t GLsizeiptrARB; +#endif + +#ifndef GL_ARB_shader_objects +/* GL types for program/shader text and shader object handles */ +typedef char GLcharARB; +typedef unsigned int GLhandleARB; +#endif + +/* GL type for "half" precision (s10e5) float data in host memory */ +#ifndef GL_ARB_half_float_pixel +typedef unsigned short GLhalfARB; +#endif + +#ifndef GL_NV_half_float +typedef unsigned short GLhalfNV; +#endif + +#ifndef GLEXT_64_TYPES_DEFINED +/* This code block is duplicated in glxext.h, so must be protected */ +#define GLEXT_64_TYPES_DEFINED +/* Define int32_t, int64_t, and uint64_t types for UST/MSC */ +/* (as used in the GL_EXT_timer_query extension). */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +#include +#elif defined(__sun__) || defined(__digital__) +#include +#if defined(__STDC__) +#if defined(__arch64__) || defined(_LP64) +typedef long int int64_t; +typedef unsigned long int uint64_t; +#else +typedef long long int int64_t; +typedef unsigned long long int uint64_t; +#endif /* __arch64__ */ +#endif /* __STDC__ */ +#elif defined( __VMS ) || defined(__sgi) +#include +#elif defined(__SCO__) || defined(__USLC__) +#include +#elif defined(__UNIXOS2__) || defined(__SOL64__) +typedef long int int32_t; +typedef long long int int64_t; +typedef unsigned long long int uint64_t; +#elif defined(_WIN32) && defined(__GNUC__) +#include +#elif defined(_WIN32) +typedef __int32 int32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +#else +/* Fallback if nothing above works */ +#include +#endif +#endif + +#ifndef GL_EXT_timer_query +typedef int64_t GLint64EXT; +typedef uint64_t GLuint64EXT; +#endif + +#ifndef GL_ARB_sync +typedef int64_t GLint64; +typedef uint64_t GLuint64; +typedef struct __GLsync *GLsync; +#endif + +#ifndef GL_ARB_cl_event +/* These incomplete types let us declare types compatible with OpenCL's cl_context and cl_event */ +struct _cl_context; +struct _cl_event; +#endif + +#ifndef GL_ARB_debug_output +typedef void (APIENTRY *GLDEBUGPROCARB)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,GLvoid *userParam); +#endif + +#ifndef GL_AMD_debug_output +typedef void (APIENTRY *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severity,GLsizei length,const GLchar *message,GLvoid *userParam); +#endif + +#ifndef GL_KHR_debug +typedef void (APIENTRY *GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,GLvoid *userParam); +#endif + +#ifndef GL_NV_vdpau_interop +typedef GLintptr GLvdpauSurfaceNV; +#endif + +#ifndef GL_VERSION_1_2 +#define GL_VERSION_1_2 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GLAPI void APIENTRY glBlendEquation (GLenum mode); +GLAPI void APIENTRY glDrawRangeElements (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid *indices); +GLAPI void APIENTRY glTexImage3D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glCopyTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI void APIENTRY glColorTable (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const GLvoid *table); +GLAPI void APIENTRY glColorTableParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glColorTableParameteriv (GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glCopyColorTable (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width); +GLAPI void APIENTRY glGetColorTable (GLenum target, GLenum format, GLenum type, GLvoid *table); +GLAPI void APIENTRY glGetColorTableParameterfv (GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetColorTableParameteriv (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glColorSubTable (GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum type, const GLvoid *data); +GLAPI void APIENTRY glCopyColorSubTable (GLenum target, GLsizei start, GLint x, GLint y, GLsizei width); +GLAPI void APIENTRY glConvolutionFilter1D (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const GLvoid *image); +GLAPI void APIENTRY glConvolutionFilter2D (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *image); +GLAPI void APIENTRY glConvolutionParameterf (GLenum target, GLenum pname, GLfloat params); +GLAPI void APIENTRY glConvolutionParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glConvolutionParameteri (GLenum target, GLenum pname, GLint params); +GLAPI void APIENTRY glConvolutionParameteriv (GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glCopyConvolutionFilter1D (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width); +GLAPI void APIENTRY glCopyConvolutionFilter2D (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI void APIENTRY glGetConvolutionFilter (GLenum target, GLenum format, GLenum type, GLvoid *image); +GLAPI void APIENTRY glGetConvolutionParameterfv (GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetConvolutionParameteriv (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetSeparableFilter (GLenum target, GLenum format, GLenum type, GLvoid *row, GLvoid *column, GLvoid *span); +GLAPI void APIENTRY glSeparableFilter2D (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *row, const GLvoid *column); +GLAPI void APIENTRY glGetHistogram (GLenum target, GLboolean reset, GLenum format, GLenum type, GLvoid *values); +GLAPI void APIENTRY glGetHistogramParameterfv (GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetHistogramParameteriv (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetMinmax (GLenum target, GLboolean reset, GLenum format, GLenum type, GLvoid *values); +GLAPI void APIENTRY glGetMinmaxParameterfv (GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetMinmaxParameteriv (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glHistogram (GLenum target, GLsizei width, GLenum internalformat, GLboolean sink); +GLAPI void APIENTRY glMinmax (GLenum target, GLenum internalformat, GLboolean sink); +GLAPI void APIENTRY glResetHistogram (GLenum target); +GLAPI void APIENTRY glResetMinmax (GLenum target); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode); +typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTSPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid *indices); +typedef void (APIENTRYP PFNGLTEXIMAGE3DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLCOLORTABLEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const GLvoid *table); +typedef void (APIENTRYP PFNGLCOLORTABLEPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLCOLORTABLEPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLCOPYCOLORTABLEPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width); +typedef void (APIENTRYP PFNGLGETCOLORTABLEPROC) (GLenum target, GLenum format, GLenum type, GLvoid *table); +typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLCOLORSUBTABLEPROC) (GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum type, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOPYCOLORSUBTABLEPROC) (GLenum target, GLsizei start, GLint x, GLint y, GLsizei width); +typedef void (APIENTRYP PFNGLCONVOLUTIONFILTER1DPROC) (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const GLvoid *image); +typedef void (APIENTRYP PFNGLCONVOLUTIONFILTER2DPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *image); +typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat params); +typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERIPROC) (GLenum target, GLenum pname, GLint params); +typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLCOPYCONVOLUTIONFILTER1DPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width); +typedef void (APIENTRYP PFNGLCOPYCONVOLUTIONFILTER2DPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLGETCONVOLUTIONFILTERPROC) (GLenum target, GLenum format, GLenum type, GLvoid *image); +typedef void (APIENTRYP PFNGLGETCONVOLUTIONPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETCONVOLUTIONPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETSEPARABLEFILTERPROC) (GLenum target, GLenum format, GLenum type, GLvoid *row, GLvoid *column, GLvoid *span); +typedef void (APIENTRYP PFNGLSEPARABLEFILTER2DPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *row, const GLvoid *column); +typedef void (APIENTRYP PFNGLGETHISTOGRAMPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLvoid *values); +typedef void (APIENTRYP PFNGLGETHISTOGRAMPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETHISTOGRAMPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETMINMAXPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLvoid *values); +typedef void (APIENTRYP PFNGLGETMINMAXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETMINMAXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLHISTOGRAMPROC) (GLenum target, GLsizei width, GLenum internalformat, GLboolean sink); +typedef void (APIENTRYP PFNGLMINMAXPROC) (GLenum target, GLenum internalformat, GLboolean sink); +typedef void (APIENTRYP PFNGLRESETHISTOGRAMPROC) (GLenum target); +typedef void (APIENTRYP PFNGLRESETMINMAXPROC) (GLenum target); +#endif + +#ifndef GL_VERSION_1_3 +#define GL_VERSION_1_3 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glActiveTexture (GLenum texture); +GLAPI void APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GLAPI void APIENTRY glCompressedTexImage3D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexImage1D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexSubImage1D (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glGetCompressedTexImage (GLenum target, GLint level, GLvoid *img); +GLAPI void APIENTRY glClientActiveTexture (GLenum texture); +GLAPI void APIENTRY glMultiTexCoord1d (GLenum target, GLdouble s); +GLAPI void APIENTRY glMultiTexCoord1dv (GLenum target, const GLdouble *v); +GLAPI void APIENTRY glMultiTexCoord1f (GLenum target, GLfloat s); +GLAPI void APIENTRY glMultiTexCoord1fv (GLenum target, const GLfloat *v); +GLAPI void APIENTRY glMultiTexCoord1i (GLenum target, GLint s); +GLAPI void APIENTRY glMultiTexCoord1iv (GLenum target, const GLint *v); +GLAPI void APIENTRY glMultiTexCoord1s (GLenum target, GLshort s); +GLAPI void APIENTRY glMultiTexCoord1sv (GLenum target, const GLshort *v); +GLAPI void APIENTRY glMultiTexCoord2d (GLenum target, GLdouble s, GLdouble t); +GLAPI void APIENTRY glMultiTexCoord2dv (GLenum target, const GLdouble *v); +GLAPI void APIENTRY glMultiTexCoord2f (GLenum target, GLfloat s, GLfloat t); +GLAPI void APIENTRY glMultiTexCoord2fv (GLenum target, const GLfloat *v); +GLAPI void APIENTRY glMultiTexCoord2i (GLenum target, GLint s, GLint t); +GLAPI void APIENTRY glMultiTexCoord2iv (GLenum target, const GLint *v); +GLAPI void APIENTRY glMultiTexCoord2s (GLenum target, GLshort s, GLshort t); +GLAPI void APIENTRY glMultiTexCoord2sv (GLenum target, const GLshort *v); +GLAPI void APIENTRY glMultiTexCoord3d (GLenum target, GLdouble s, GLdouble t, GLdouble r); +GLAPI void APIENTRY glMultiTexCoord3dv (GLenum target, const GLdouble *v); +GLAPI void APIENTRY glMultiTexCoord3f (GLenum target, GLfloat s, GLfloat t, GLfloat r); +GLAPI void APIENTRY glMultiTexCoord3fv (GLenum target, const GLfloat *v); +GLAPI void APIENTRY glMultiTexCoord3i (GLenum target, GLint s, GLint t, GLint r); +GLAPI void APIENTRY glMultiTexCoord3iv (GLenum target, const GLint *v); +GLAPI void APIENTRY glMultiTexCoord3s (GLenum target, GLshort s, GLshort t, GLshort r); +GLAPI void APIENTRY glMultiTexCoord3sv (GLenum target, const GLshort *v); +GLAPI void APIENTRY glMultiTexCoord4d (GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q); +GLAPI void APIENTRY glMultiTexCoord4dv (GLenum target, const GLdouble *v); +GLAPI void APIENTRY glMultiTexCoord4f (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q); +GLAPI void APIENTRY glMultiTexCoord4fv (GLenum target, const GLfloat *v); +GLAPI void APIENTRY glMultiTexCoord4i (GLenum target, GLint s, GLint t, GLint r, GLint q); +GLAPI void APIENTRY glMultiTexCoord4iv (GLenum target, const GLint *v); +GLAPI void APIENTRY glMultiTexCoord4s (GLenum target, GLshort s, GLshort t, GLshort r, GLshort q); +GLAPI void APIENTRY glMultiTexCoord4sv (GLenum target, const GLshort *v); +GLAPI void APIENTRY glLoadTransposeMatrixf (const GLfloat *m); +GLAPI void APIENTRY glLoadTransposeMatrixd (const GLdouble *m); +GLAPI void APIENTRY glMultTransposeMatrixf (const GLfloat *m); +GLAPI void APIENTRY glMultTransposeMatrixd (const GLdouble *m); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture); +typedef void (APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE1DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC) (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXIMAGEPROC) (GLenum target, GLint level, GLvoid *img); +typedef void (APIENTRYP PFNGLCLIENTACTIVETEXTUREPROC) (GLenum texture); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1DPROC) (GLenum target, GLdouble s); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1DVPROC) (GLenum target, const GLdouble *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1FPROC) (GLenum target, GLfloat s); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1FVPROC) (GLenum target, const GLfloat *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1IPROC) (GLenum target, GLint s); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1IVPROC) (GLenum target, const GLint *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1SPROC) (GLenum target, GLshort s); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1SVPROC) (GLenum target, const GLshort *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2DPROC) (GLenum target, GLdouble s, GLdouble t); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2DVPROC) (GLenum target, const GLdouble *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2FPROC) (GLenum target, GLfloat s, GLfloat t); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2FVPROC) (GLenum target, const GLfloat *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2IPROC) (GLenum target, GLint s, GLint t); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2IVPROC) (GLenum target, const GLint *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2SPROC) (GLenum target, GLshort s, GLshort t); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2SVPROC) (GLenum target, const GLshort *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3DPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3DVPROC) (GLenum target, const GLdouble *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3FPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3FVPROC) (GLenum target, const GLfloat *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3IPROC) (GLenum target, GLint s, GLint t, GLint r); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3IVPROC) (GLenum target, const GLint *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3SPROC) (GLenum target, GLshort s, GLshort t, GLshort r); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3SVPROC) (GLenum target, const GLshort *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4DPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4DVPROC) (GLenum target, const GLdouble *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4FPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4FVPROC) (GLenum target, const GLfloat *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4IPROC) (GLenum target, GLint s, GLint t, GLint r, GLint q); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4IVPROC) (GLenum target, const GLint *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4SPROC) (GLenum target, GLshort s, GLshort t, GLshort r, GLshort q); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4SVPROC) (GLenum target, const GLshort *v); +typedef void (APIENTRYP PFNGLLOADTRANSPOSEMATRIXFPROC) (const GLfloat *m); +typedef void (APIENTRYP PFNGLLOADTRANSPOSEMATRIXDPROC) (const GLdouble *m); +typedef void (APIENTRYP PFNGLMULTTRANSPOSEMATRIXFPROC) (const GLfloat *m); +typedef void (APIENTRYP PFNGLMULTTRANSPOSEMATRIXDPROC) (const GLdouble *m); +#endif + +#ifndef GL_VERSION_1_4 +#define GL_VERSION_1_4 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GLAPI void APIENTRY glMultiDrawArrays (GLenum mode, const GLint *first, const GLsizei *count, GLsizei drawcount); +GLAPI void APIENTRY glMultiDrawElements (GLenum mode, const GLsizei *count, GLenum type, const GLvoid* const *indices, GLsizei drawcount); +GLAPI void APIENTRY glPointParameterf (GLenum pname, GLfloat param); +GLAPI void APIENTRY glPointParameterfv (GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glPointParameteri (GLenum pname, GLint param); +GLAPI void APIENTRY glPointParameteriv (GLenum pname, const GLint *params); +GLAPI void APIENTRY glFogCoordf (GLfloat coord); +GLAPI void APIENTRY glFogCoordfv (const GLfloat *coord); +GLAPI void APIENTRY glFogCoordd (GLdouble coord); +GLAPI void APIENTRY glFogCoorddv (const GLdouble *coord); +GLAPI void APIENTRY glFogCoordPointer (GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glSecondaryColor3b (GLbyte red, GLbyte green, GLbyte blue); +GLAPI void APIENTRY glSecondaryColor3bv (const GLbyte *v); +GLAPI void APIENTRY glSecondaryColor3d (GLdouble red, GLdouble green, GLdouble blue); +GLAPI void APIENTRY glSecondaryColor3dv (const GLdouble *v); +GLAPI void APIENTRY glSecondaryColor3f (GLfloat red, GLfloat green, GLfloat blue); +GLAPI void APIENTRY glSecondaryColor3fv (const GLfloat *v); +GLAPI void APIENTRY glSecondaryColor3i (GLint red, GLint green, GLint blue); +GLAPI void APIENTRY glSecondaryColor3iv (const GLint *v); +GLAPI void APIENTRY glSecondaryColor3s (GLshort red, GLshort green, GLshort blue); +GLAPI void APIENTRY glSecondaryColor3sv (const GLshort *v); +GLAPI void APIENTRY glSecondaryColor3ub (GLubyte red, GLubyte green, GLubyte blue); +GLAPI void APIENTRY glSecondaryColor3ubv (const GLubyte *v); +GLAPI void APIENTRY glSecondaryColor3ui (GLuint red, GLuint green, GLuint blue); +GLAPI void APIENTRY glSecondaryColor3uiv (const GLuint *v); +GLAPI void APIENTRY glSecondaryColor3us (GLushort red, GLushort green, GLushort blue); +GLAPI void APIENTRY glSecondaryColor3usv (const GLushort *v); +GLAPI void APIENTRY glSecondaryColorPointer (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glWindowPos2d (GLdouble x, GLdouble y); +GLAPI void APIENTRY glWindowPos2dv (const GLdouble *v); +GLAPI void APIENTRY glWindowPos2f (GLfloat x, GLfloat y); +GLAPI void APIENTRY glWindowPos2fv (const GLfloat *v); +GLAPI void APIENTRY glWindowPos2i (GLint x, GLint y); +GLAPI void APIENTRY glWindowPos2iv (const GLint *v); +GLAPI void APIENTRY glWindowPos2s (GLshort x, GLshort y); +GLAPI void APIENTRY glWindowPos2sv (const GLshort *v); +GLAPI void APIENTRY glWindowPos3d (GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glWindowPos3dv (const GLdouble *v); +GLAPI void APIENTRY glWindowPos3f (GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glWindowPos3fv (const GLfloat *v); +GLAPI void APIENTRY glWindowPos3i (GLint x, GLint y, GLint z); +GLAPI void APIENTRY glWindowPos3iv (const GLint *v); +GLAPI void APIENTRY glWindowPos3s (GLshort x, GLshort y, GLshort z); +GLAPI void APIENTRY glWindowPos3sv (const GLshort *v); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei drawcount); +typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSPROC) (GLenum mode, const GLsizei *count, GLenum type, const GLvoid* const *indices, GLsizei drawcount); +typedef void (APIENTRYP PFNGLPOINTPARAMETERFPROC) (GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLPOINTPARAMETERFVPROC) (GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLPOINTPARAMETERIPROC) (GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLPOINTPARAMETERIVPROC) (GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLFOGCOORDFPROC) (GLfloat coord); +typedef void (APIENTRYP PFNGLFOGCOORDFVPROC) (const GLfloat *coord); +typedef void (APIENTRYP PFNGLFOGCOORDDPROC) (GLdouble coord); +typedef void (APIENTRYP PFNGLFOGCOORDDVPROC) (const GLdouble *coord); +typedef void (APIENTRYP PFNGLFOGCOORDPOINTERPROC) (GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3BPROC) (GLbyte red, GLbyte green, GLbyte blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3BVPROC) (const GLbyte *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3DPROC) (GLdouble red, GLdouble green, GLdouble blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3DVPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3FPROC) (GLfloat red, GLfloat green, GLfloat blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3FVPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3IPROC) (GLint red, GLint green, GLint blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3IVPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3SPROC) (GLshort red, GLshort green, GLshort blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3SVPROC) (const GLshort *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UBPROC) (GLubyte red, GLubyte green, GLubyte blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UBVPROC) (const GLubyte *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UIPROC) (GLuint red, GLuint green, GLuint blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UIVPROC) (const GLuint *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3USPROC) (GLushort red, GLushort green, GLushort blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3USVPROC) (const GLushort *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLORPOINTERPROC) (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLWINDOWPOS2DPROC) (GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLWINDOWPOS2DVPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLWINDOWPOS2FPROC) (GLfloat x, GLfloat y); +typedef void (APIENTRYP PFNGLWINDOWPOS2FVPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLWINDOWPOS2IPROC) (GLint x, GLint y); +typedef void (APIENTRYP PFNGLWINDOWPOS2IVPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLWINDOWPOS2SPROC) (GLshort x, GLshort y); +typedef void (APIENTRYP PFNGLWINDOWPOS2SVPROC) (const GLshort *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3DPROC) (GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLWINDOWPOS3DVPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3FPROC) (GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLWINDOWPOS3FVPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3IPROC) (GLint x, GLint y, GLint z); +typedef void (APIENTRYP PFNGLWINDOWPOS3IVPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3SPROC) (GLshort x, GLshort y, GLshort z); +typedef void (APIENTRYP PFNGLWINDOWPOS3SVPROC) (const GLshort *v); +#endif + +#ifndef GL_VERSION_1_5 +#define GL_VERSION_1_5 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGenQueries (GLsizei n, GLuint *ids); +GLAPI void APIENTRY glDeleteQueries (GLsizei n, const GLuint *ids); +GLAPI GLboolean APIENTRY glIsQuery (GLuint id); +GLAPI void APIENTRY glBeginQuery (GLenum target, GLuint id); +GLAPI void APIENTRY glEndQuery (GLenum target); +GLAPI void APIENTRY glGetQueryiv (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetQueryObjectiv (GLuint id, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetQueryObjectuiv (GLuint id, GLenum pname, GLuint *params); +GLAPI void APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GLAPI void APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GLAPI void APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GLAPI GLboolean APIENTRY glIsBuffer (GLuint buffer); +GLAPI void APIENTRY glBufferData (GLenum target, GLsizeiptr size, const GLvoid *data, GLenum usage); +GLAPI void APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid *data); +GLAPI void APIENTRY glGetBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, GLvoid *data); +GLAPI GLvoid* APIENTRY glMapBuffer (GLenum target, GLenum access); +GLAPI GLboolean APIENTRY glUnmapBuffer (GLenum target); +GLAPI void APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetBufferPointerv (GLenum target, GLenum pname, GLvoid* *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGENQUERIESPROC) (GLsizei n, GLuint *ids); +typedef void (APIENTRYP PFNGLDELETEQUERIESPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (APIENTRYP PFNGLISQUERYPROC) (GLuint id); +typedef void (APIENTRYP PFNGLBEGINQUERYPROC) (GLenum target, GLuint id); +typedef void (APIENTRYP PFNGLENDQUERYPROC) (GLenum target); +typedef void (APIENTRYP PFNGLGETQUERYIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETQUERYOBJECTIVPROC) (GLuint id, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETQUERYOBJECTUIVPROC) (GLuint id, GLenum pname, GLuint *params); +typedef void (APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer); +typedef void (APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers); +typedef void (APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers); +typedef GLboolean (APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer); +typedef void (APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const GLvoid *data, GLenum usage); +typedef void (APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid *data); +typedef void (APIENTRYP PFNGLGETBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, GLvoid *data); +typedef GLvoid* (APIENTRYP PFNGLMAPBUFFERPROC) (GLenum target, GLenum access); +typedef GLboolean (APIENTRYP PFNGLUNMAPBUFFERPROC) (GLenum target); +typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETBUFFERPOINTERVPROC) (GLenum target, GLenum pname, GLvoid* *params); +#endif + +#ifndef GL_VERSION_2_0 +#define GL_VERSION_2_0 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); +GLAPI void APIENTRY glDrawBuffers (GLsizei n, const GLenum *bufs); +GLAPI void APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GLAPI void APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); +GLAPI void APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask); +GLAPI void APIENTRY glAttachShader (GLuint program, GLuint shader); +GLAPI void APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); +GLAPI void APIENTRY glCompileShader (GLuint shader); +GLAPI GLuint APIENTRY glCreateProgram (void); +GLAPI GLuint APIENTRY glCreateShader (GLenum type); +GLAPI void APIENTRY glDeleteProgram (GLuint program); +GLAPI void APIENTRY glDeleteShader (GLuint shader); +GLAPI void APIENTRY glDetachShader (GLuint program, GLuint shader); +GLAPI void APIENTRY glDisableVertexAttribArray (GLuint index); +GLAPI void APIENTRY glEnableVertexAttribArray (GLuint index); +GLAPI void APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GLAPI void APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GLAPI void APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *obj); +GLAPI GLint APIENTRY glGetAttribLocation (GLuint program, const GLchar *name); +GLAPI void APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GLAPI void APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GLAPI void APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GLAPI GLint APIENTRY glGetUniformLocation (GLuint program, const GLchar *name); +GLAPI void APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params); +GLAPI void APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params); +GLAPI void APIENTRY glGetVertexAttribdv (GLuint index, GLenum pname, GLdouble *params); +GLAPI void APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, GLvoid* *pointer); +GLAPI GLboolean APIENTRY glIsProgram (GLuint program); +GLAPI GLboolean APIENTRY glIsShader (GLuint shader); +GLAPI void APIENTRY glLinkProgram (GLuint program); +GLAPI void APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar* const *string, const GLint *length); +GLAPI void APIENTRY glUseProgram (GLuint program); +GLAPI void APIENTRY glUniform1f (GLint location, GLfloat v0); +GLAPI void APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1); +GLAPI void APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GLAPI void APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GLAPI void APIENTRY glUniform1i (GLint location, GLint v0); +GLAPI void APIENTRY glUniform2i (GLint location, GLint v0, GLint v1); +GLAPI void APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); +GLAPI void APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GLAPI void APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glValidateProgram (GLuint program); +GLAPI void APIENTRY glVertexAttrib1d (GLuint index, GLdouble x); +GLAPI void APIENTRY glVertexAttrib1dv (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib1f (GLuint index, GLfloat x); +GLAPI void APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib1s (GLuint index, GLshort x); +GLAPI void APIENTRY glVertexAttrib1sv (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib2d (GLuint index, GLdouble x, GLdouble y); +GLAPI void APIENTRY glVertexAttrib2dv (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); +GLAPI void APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib2s (GLuint index, GLshort x, GLshort y); +GLAPI void APIENTRY glVertexAttrib2sv (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib3d (GLuint index, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glVertexAttrib3dv (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib3s (GLuint index, GLshort x, GLshort y, GLshort z); +GLAPI void APIENTRY glVertexAttrib3sv (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib4Nbv (GLuint index, const GLbyte *v); +GLAPI void APIENTRY glVertexAttrib4Niv (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttrib4Nsv (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib4Nub (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w); +GLAPI void APIENTRY glVertexAttrib4Nubv (GLuint index, const GLubyte *v); +GLAPI void APIENTRY glVertexAttrib4Nuiv (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttrib4Nusv (GLuint index, const GLushort *v); +GLAPI void APIENTRY glVertexAttrib4bv (GLuint index, const GLbyte *v); +GLAPI void APIENTRY glVertexAttrib4d (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glVertexAttrib4dv (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib4iv (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttrib4s (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w); +GLAPI void APIENTRY glVertexAttrib4sv (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib4ubv (GLuint index, const GLubyte *v); +GLAPI void APIENTRY glVertexAttrib4uiv (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttrib4usv (GLuint index, const GLushort *v); +GLAPI void APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const GLvoid *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha); +typedef void (APIENTRYP PFNGLDRAWBUFFERSPROC) (GLsizei n, const GLenum *bufs); +typedef void (APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask); +typedef void (APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name); +typedef void (APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader); +typedef GLuint (APIENTRYP PFNGLCREATEPROGRAMPROC) (void); +typedef GLuint (APIENTRYP PFNGLCREATESHADERPROC) (GLenum type); +typedef void (APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program); +typedef void (APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader); +typedef void (APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *obj); +typedef GLint (APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +typedef GLint (APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params); +typedef void (APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBDVPROC) (GLuint index, GLenum pname, GLdouble *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, GLvoid* *pointer); +typedef GLboolean (APIENTRYP PFNGLISPROGRAMPROC) (GLuint program); +typedef GLboolean (APIENTRYP PFNGLISSHADERPROC) (GLuint shader); +typedef void (APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program); +typedef void (APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar* const *string, const GLint *length); +typedef void (APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program); +typedef void (APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0); +typedef void (APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0); +typedef void (APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1); +typedef void (APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1DPROC) (GLuint index, GLdouble x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1DVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1SPROC) (GLuint index, GLshort x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1SVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2DPROC) (GLuint index, GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2DVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2SPROC) (GLuint index, GLshort x, GLshort y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2SVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3DPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3DVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3SPROC) (GLuint index, GLshort x, GLshort y, GLshort z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3SVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NBVPROC) (GLuint index, const GLbyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NIVPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NSVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBPROC) (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBVPROC) (GLuint index, const GLubyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUIVPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUSVPROC) (GLuint index, const GLushort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4BVPROC) (GLuint index, const GLbyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4DPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4DVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4IVPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4SPROC) (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4SVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4UBVPROC) (GLuint index, const GLubyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4UIVPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4USVPROC) (GLuint index, const GLushort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const GLvoid *pointer); +#endif + +#ifndef GL_VERSION_2_1 +#define GL_VERSION_2_1 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glUniformMatrix2x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glUniformMatrix3x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glUniformMatrix2x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glUniformMatrix4x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glUniformMatrix3x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glUniformMatrix4x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#endif + +#ifndef GL_VERSION_3_0 +#define GL_VERSION_3_0 1 +/* OpenGL 3.0 also reuses entry points from these extensions: */ +/* ARB_framebuffer_object */ +/* ARB_map_buffer_range */ +/* ARB_vertex_array_object */ +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glColorMaski (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GLAPI void APIENTRY glGetBooleani_v (GLenum target, GLuint index, GLboolean *data); +GLAPI void APIENTRY glGetIntegeri_v (GLenum target, GLuint index, GLint *data); +GLAPI void APIENTRY glEnablei (GLenum target, GLuint index); +GLAPI void APIENTRY glDisablei (GLenum target, GLuint index); +GLAPI GLboolean APIENTRY glIsEnabledi (GLenum target, GLuint index); +GLAPI void APIENTRY glBeginTransformFeedback (GLenum primitiveMode); +GLAPI void APIENTRY glEndTransformFeedback (void); +GLAPI void APIENTRY glBindBufferRange (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GLAPI void APIENTRY glBindBufferBase (GLenum target, GLuint index, GLuint buffer); +GLAPI void APIENTRY glTransformFeedbackVaryings (GLuint program, GLsizei count, const GLchar* const *varyings, GLenum bufferMode); +GLAPI void APIENTRY glGetTransformFeedbackVarying (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +GLAPI void APIENTRY glClampColor (GLenum target, GLenum clamp); +GLAPI void APIENTRY glBeginConditionalRender (GLuint id, GLenum mode); +GLAPI void APIENTRY glEndConditionalRender (void); +GLAPI void APIENTRY glVertexAttribIPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glGetVertexAttribIiv (GLuint index, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetVertexAttribIuiv (GLuint index, GLenum pname, GLuint *params); +GLAPI void APIENTRY glVertexAttribI1i (GLuint index, GLint x); +GLAPI void APIENTRY glVertexAttribI2i (GLuint index, GLint x, GLint y); +GLAPI void APIENTRY glVertexAttribI3i (GLuint index, GLint x, GLint y, GLint z); +GLAPI void APIENTRY glVertexAttribI4i (GLuint index, GLint x, GLint y, GLint z, GLint w); +GLAPI void APIENTRY glVertexAttribI1ui (GLuint index, GLuint x); +GLAPI void APIENTRY glVertexAttribI2ui (GLuint index, GLuint x, GLuint y); +GLAPI void APIENTRY glVertexAttribI3ui (GLuint index, GLuint x, GLuint y, GLuint z); +GLAPI void APIENTRY glVertexAttribI4ui (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GLAPI void APIENTRY glVertexAttribI1iv (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttribI2iv (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttribI3iv (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttribI4iv (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttribI1uiv (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttribI2uiv (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttribI3uiv (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttribI4uiv (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttribI4bv (GLuint index, const GLbyte *v); +GLAPI void APIENTRY glVertexAttribI4sv (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttribI4ubv (GLuint index, const GLubyte *v); +GLAPI void APIENTRY glVertexAttribI4usv (GLuint index, const GLushort *v); +GLAPI void APIENTRY glGetUniformuiv (GLuint program, GLint location, GLuint *params); +GLAPI void APIENTRY glBindFragDataLocation (GLuint program, GLuint color, const GLchar *name); +GLAPI GLint APIENTRY glGetFragDataLocation (GLuint program, const GLchar *name); +GLAPI void APIENTRY glUniform1ui (GLint location, GLuint v0); +GLAPI void APIENTRY glUniform2ui (GLint location, GLuint v0, GLuint v1); +GLAPI void APIENTRY glUniform3ui (GLint location, GLuint v0, GLuint v1, GLuint v2); +GLAPI void APIENTRY glUniform4ui (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GLAPI void APIENTRY glUniform1uiv (GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glUniform2uiv (GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glUniform3uiv (GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glUniform4uiv (GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glTexParameterIiv (GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glTexParameterIuiv (GLenum target, GLenum pname, const GLuint *params); +GLAPI void APIENTRY glGetTexParameterIiv (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetTexParameterIuiv (GLenum target, GLenum pname, GLuint *params); +GLAPI void APIENTRY glClearBufferiv (GLenum buffer, GLint drawbuffer, const GLint *value); +GLAPI void APIENTRY glClearBufferuiv (GLenum buffer, GLint drawbuffer, const GLuint *value); +GLAPI void APIENTRY glClearBufferfv (GLenum buffer, GLint drawbuffer, const GLfloat *value); +GLAPI void APIENTRY glClearBufferfi (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +GLAPI const GLubyte * APIENTRY glGetStringi (GLenum name, GLuint index); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOLORMASKIPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +typedef void (APIENTRYP PFNGLGETBOOLEANI_VPROC) (GLenum target, GLuint index, GLboolean *data); +typedef void (APIENTRYP PFNGLGETINTEGERI_VPROC) (GLenum target, GLuint index, GLint *data); +typedef void (APIENTRYP PFNGLENABLEIPROC) (GLenum target, GLuint index); +typedef void (APIENTRYP PFNGLDISABLEIPROC) (GLenum target, GLuint index); +typedef GLboolean (APIENTRYP PFNGLISENABLEDIPROC) (GLenum target, GLuint index); +typedef void (APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKPROC) (GLenum primitiveMode); +typedef void (APIENTRYP PFNGLENDTRANSFORMFEEDBACKPROC) (void); +typedef void (APIENTRYP PFNGLBINDBUFFERRANGEPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (APIENTRYP PFNGLBINDBUFFERBASEPROC) (GLenum target, GLuint index, GLuint buffer); +typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSPROC) (GLuint program, GLsizei count, const GLchar* const *varyings, GLenum bufferMode); +typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +typedef void (APIENTRYP PFNGLCLAMPCOLORPROC) (GLenum target, GLenum clamp); +typedef void (APIENTRYP PFNGLBEGINCONDITIONALRENDERPROC) (GLuint id, GLenum mode); +typedef void (APIENTRYP PFNGLENDCONDITIONALRENDERPROC) (void); +typedef void (APIENTRYP PFNGLVERTEXATTRIBIPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIUIVPROC) (GLuint index, GLenum pname, GLuint *params); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IPROC) (GLuint index, GLint x); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IPROC) (GLuint index, GLint x, GLint y); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IPROC) (GLuint index, GLint x, GLint y, GLint z); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIPROC) (GLuint index, GLuint x); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIPROC) (GLuint index, GLuint x, GLuint y); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IVPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IVPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IVPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IVPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIVPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIVPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIVPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIVPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4BVPROC) (GLuint index, const GLbyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4SVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UBVPROC) (GLuint index, const GLubyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4USVPROC) (GLuint index, const GLushort *v); +typedef void (APIENTRYP PFNGLGETUNIFORMUIVPROC) (GLuint program, GLint location, GLuint *params); +typedef void (APIENTRYP PFNGLBINDFRAGDATALOCATIONPROC) (GLuint program, GLuint color, const GLchar *name); +typedef GLint (APIENTRYP PFNGLGETFRAGDATALOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (APIENTRYP PFNGLUNIFORM1UIPROC) (GLint location, GLuint v0); +typedef void (APIENTRYP PFNGLUNIFORM2UIPROC) (GLint location, GLuint v0, GLuint v1); +typedef void (APIENTRYP PFNGLUNIFORM3UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (APIENTRYP PFNGLUNIFORM4UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (APIENTRYP PFNGLUNIFORM1UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLUNIFORM2UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLUNIFORM3UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLUNIFORM4UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLTEXPARAMETERIIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLTEXPARAMETERIUIVPROC) (GLenum target, GLenum pname, const GLuint *params); +typedef void (APIENTRYP PFNGLGETTEXPARAMETERIIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETTEXPARAMETERIUIVPROC) (GLenum target, GLenum pname, GLuint *params); +typedef void (APIENTRYP PFNGLCLEARBUFFERIVPROC) (GLenum buffer, GLint drawbuffer, const GLint *value); +typedef void (APIENTRYP PFNGLCLEARBUFFERUIVPROC) (GLenum buffer, GLint drawbuffer, const GLuint *value); +typedef void (APIENTRYP PFNGLCLEARBUFFERFVPROC) (GLenum buffer, GLint drawbuffer, const GLfloat *value); +typedef void (APIENTRYP PFNGLCLEARBUFFERFIPROC) (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +typedef const GLubyte * (APIENTRYP PFNGLGETSTRINGIPROC) (GLenum name, GLuint index); +#endif + +#ifndef GL_VERSION_3_1 +#define GL_VERSION_3_1 1 +/* OpenGL 3.1 also reuses entry points from these extensions: */ +/* ARB_copy_buffer */ +/* ARB_uniform_buffer_object */ +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawArraysInstanced (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +GLAPI void APIENTRY glDrawElementsInstanced (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei instancecount); +GLAPI void APIENTRY glTexBuffer (GLenum target, GLenum internalformat, GLuint buffer); +GLAPI void APIENTRY glPrimitiveRestartIndex (GLuint index); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWARRAYSINSTANCEDPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDPROC) (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei instancecount); +typedef void (APIENTRYP PFNGLTEXBUFFERPROC) (GLenum target, GLenum internalformat, GLuint buffer); +typedef void (APIENTRYP PFNGLPRIMITIVERESTARTINDEXPROC) (GLuint index); +#endif + +#ifndef GL_VERSION_3_2 +#define GL_VERSION_3_2 1 +/* OpenGL 3.2 also reuses entry points from these extensions: */ +/* ARB_draw_elements_base_vertex */ +/* ARB_provoking_vertex */ +/* ARB_sync */ +/* ARB_texture_multisample */ +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetInteger64i_v (GLenum target, GLuint index, GLint64 *data); +GLAPI void APIENTRY glGetBufferParameteri64v (GLenum target, GLenum pname, GLint64 *params); +GLAPI void APIENTRY glFramebufferTexture (GLenum target, GLenum attachment, GLuint texture, GLint level); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETINTEGER64I_VPROC) (GLenum target, GLuint index, GLint64 *data); +typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERI64VPROC) (GLenum target, GLenum pname, GLint64 *params); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level); +#endif + +#ifndef GL_VERSION_3_3 +#define GL_VERSION_3_3 1 +/* OpenGL 3.3 also reuses entry points from these extensions: */ +/* ARB_blend_func_extended */ +/* ARB_sampler_objects */ +/* ARB_explicit_attrib_location, but it has none */ +/* ARB_occlusion_query2 (no entry points) */ +/* ARB_shader_bit_encoding (no entry points) */ +/* ARB_texture_rgb10_a2ui (no entry points) */ +/* ARB_texture_swizzle (no entry points) */ +/* ARB_timer_query */ +/* ARB_vertex_type_2_10_10_10_rev */ +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexAttribDivisor (GLuint index, GLuint divisor); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXATTRIBDIVISORPROC) (GLuint index, GLuint divisor); +#endif + +#ifndef GL_VERSION_4_0 +#define GL_VERSION_4_0 1 +/* OpenGL 4.0 also reuses entry points from these extensions: */ +/* ARB_texture_query_lod (no entry points) */ +/* ARB_draw_indirect */ +/* ARB_gpu_shader5 (no entry points) */ +/* ARB_gpu_shader_fp64 */ +/* ARB_shader_subroutine */ +/* ARB_tessellation_shader */ +/* ARB_texture_buffer_object_rgb32 (no entry points) */ +/* ARB_texture_cube_map_array (no entry points) */ +/* ARB_texture_gather (no entry points) */ +/* ARB_transform_feedback2 */ +/* ARB_transform_feedback3 */ +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glMinSampleShading (GLfloat value); +GLAPI void APIENTRY glBlendEquationi (GLuint buf, GLenum mode); +GLAPI void APIENTRY glBlendEquationSeparatei (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +GLAPI void APIENTRY glBlendFunci (GLuint buf, GLenum src, GLenum dst); +GLAPI void APIENTRY glBlendFuncSeparatei (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLMINSAMPLESHADINGPROC) (GLfloat value); +typedef void (APIENTRYP PFNGLBLENDEQUATIONIPROC) (GLuint buf, GLenum mode); +typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEIPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +typedef void (APIENTRYP PFNGLBLENDFUNCIPROC) (GLuint buf, GLenum src, GLenum dst); +typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEIPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +#endif + +#ifndef GL_VERSION_4_1 +#define GL_VERSION_4_1 1 +/* OpenGL 4.1 reuses entry points from these extensions: */ +/* ARB_ES2_compatibility */ +/* ARB_get_program_binary */ +/* ARB_separate_shader_objects */ +/* ARB_shader_precision (no entry points) */ +/* ARB_vertex_attrib_64bit */ +/* ARB_viewport_array */ +#endif + +#ifndef GL_VERSION_4_2 +#define GL_VERSION_4_2 1 +/* OpenGL 4.2 reuses entry points from these extensions: */ +/* ARB_base_instance */ +/* ARB_shading_language_420pack (no entry points) */ +/* ARB_transform_feedback_instanced */ +/* ARB_compressed_texture_pixel_storage (no entry points) */ +/* ARB_conservative_depth (no entry points) */ +/* ARB_internalformat_query */ +/* ARB_map_buffer_alignment (no entry points) */ +/* ARB_shader_atomic_counters */ +/* ARB_shader_image_load_store */ +/* ARB_shading_language_packing (no entry points) */ +/* ARB_texture_storage */ +#endif + +#ifndef GL_VERSION_4_3 +#define GL_VERSION_4_3 1 +/* OpenGL 4.3 reuses entry points from these extensions: */ +/* ARB_arrays_of_arrays (no entry points, GLSL only) */ +/* ARB_fragment_layer_viewport (no entry points, GLSL only) */ +/* ARB_shader_image_size (no entry points, GLSL only) */ +/* ARB_ES3_compatibility (no entry points) */ +/* ARB_clear_buffer_object */ +/* ARB_compute_shader */ +/* ARB_copy_image */ +/* KHR_debug (includes ARB_debug_output commands promoted to KHR without suffixes) */ +/* ARB_explicit_uniform_location (no entry points) */ +/* ARB_framebuffer_no_attachments */ +/* ARB_internalformat_query2 */ +/* ARB_invalidate_subdata */ +/* ARB_multi_draw_indirect */ +/* ARB_program_interface_query */ +/* ARB_robust_buffer_access_behavior (no entry points) */ +/* ARB_shader_storage_buffer_object */ +/* ARB_stencil_texturing (no entry points) */ +/* ARB_texture_buffer_range */ +/* ARB_texture_query_levels (no entry points) */ +/* ARB_texture_storage_multisample */ +/* ARB_texture_view */ +/* ARB_vertex_attrib_binding */ +#endif + +#ifndef GL_ARB_multitexture +#define GL_ARB_multitexture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glActiveTextureARB (GLenum texture); +GLAPI void APIENTRY glClientActiveTextureARB (GLenum texture); +GLAPI void APIENTRY glMultiTexCoord1dARB (GLenum target, GLdouble s); +GLAPI void APIENTRY glMultiTexCoord1dvARB (GLenum target, const GLdouble *v); +GLAPI void APIENTRY glMultiTexCoord1fARB (GLenum target, GLfloat s); +GLAPI void APIENTRY glMultiTexCoord1fvARB (GLenum target, const GLfloat *v); +GLAPI void APIENTRY glMultiTexCoord1iARB (GLenum target, GLint s); +GLAPI void APIENTRY glMultiTexCoord1ivARB (GLenum target, const GLint *v); +GLAPI void APIENTRY glMultiTexCoord1sARB (GLenum target, GLshort s); +GLAPI void APIENTRY glMultiTexCoord1svARB (GLenum target, const GLshort *v); +GLAPI void APIENTRY glMultiTexCoord2dARB (GLenum target, GLdouble s, GLdouble t); +GLAPI void APIENTRY glMultiTexCoord2dvARB (GLenum target, const GLdouble *v); +GLAPI void APIENTRY glMultiTexCoord2fARB (GLenum target, GLfloat s, GLfloat t); +GLAPI void APIENTRY glMultiTexCoord2fvARB (GLenum target, const GLfloat *v); +GLAPI void APIENTRY glMultiTexCoord2iARB (GLenum target, GLint s, GLint t); +GLAPI void APIENTRY glMultiTexCoord2ivARB (GLenum target, const GLint *v); +GLAPI void APIENTRY glMultiTexCoord2sARB (GLenum target, GLshort s, GLshort t); +GLAPI void APIENTRY glMultiTexCoord2svARB (GLenum target, const GLshort *v); +GLAPI void APIENTRY glMultiTexCoord3dARB (GLenum target, GLdouble s, GLdouble t, GLdouble r); +GLAPI void APIENTRY glMultiTexCoord3dvARB (GLenum target, const GLdouble *v); +GLAPI void APIENTRY glMultiTexCoord3fARB (GLenum target, GLfloat s, GLfloat t, GLfloat r); +GLAPI void APIENTRY glMultiTexCoord3fvARB (GLenum target, const GLfloat *v); +GLAPI void APIENTRY glMultiTexCoord3iARB (GLenum target, GLint s, GLint t, GLint r); +GLAPI void APIENTRY glMultiTexCoord3ivARB (GLenum target, const GLint *v); +GLAPI void APIENTRY glMultiTexCoord3sARB (GLenum target, GLshort s, GLshort t, GLshort r); +GLAPI void APIENTRY glMultiTexCoord3svARB (GLenum target, const GLshort *v); +GLAPI void APIENTRY glMultiTexCoord4dARB (GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q); +GLAPI void APIENTRY glMultiTexCoord4dvARB (GLenum target, const GLdouble *v); +GLAPI void APIENTRY glMultiTexCoord4fARB (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q); +GLAPI void APIENTRY glMultiTexCoord4fvARB (GLenum target, const GLfloat *v); +GLAPI void APIENTRY glMultiTexCoord4iARB (GLenum target, GLint s, GLint t, GLint r, GLint q); +GLAPI void APIENTRY glMultiTexCoord4ivARB (GLenum target, const GLint *v); +GLAPI void APIENTRY glMultiTexCoord4sARB (GLenum target, GLshort s, GLshort t, GLshort r, GLshort q); +GLAPI void APIENTRY glMultiTexCoord4svARB (GLenum target, const GLshort *v); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLACTIVETEXTUREARBPROC) (GLenum texture); +typedef void (APIENTRYP PFNGLCLIENTACTIVETEXTUREARBPROC) (GLenum texture); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1DARBPROC) (GLenum target, GLdouble s); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1DVARBPROC) (GLenum target, const GLdouble *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1FARBPROC) (GLenum target, GLfloat s); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1FVARBPROC) (GLenum target, const GLfloat *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1IARBPROC) (GLenum target, GLint s); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1IVARBPROC) (GLenum target, const GLint *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1SARBPROC) (GLenum target, GLshort s); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1SVARBPROC) (GLenum target, const GLshort *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2DARBPROC) (GLenum target, GLdouble s, GLdouble t); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2DVARBPROC) (GLenum target, const GLdouble *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2FARBPROC) (GLenum target, GLfloat s, GLfloat t); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2FVARBPROC) (GLenum target, const GLfloat *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2IARBPROC) (GLenum target, GLint s, GLint t); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2IVARBPROC) (GLenum target, const GLint *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2SARBPROC) (GLenum target, GLshort s, GLshort t); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2SVARBPROC) (GLenum target, const GLshort *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3DARBPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3DVARBPROC) (GLenum target, const GLdouble *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3FARBPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3FVARBPROC) (GLenum target, const GLfloat *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3IARBPROC) (GLenum target, GLint s, GLint t, GLint r); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3IVARBPROC) (GLenum target, const GLint *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3SARBPROC) (GLenum target, GLshort s, GLshort t, GLshort r); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3SVARBPROC) (GLenum target, const GLshort *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4DARBPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4DVARBPROC) (GLenum target, const GLdouble *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4FARBPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4FVARBPROC) (GLenum target, const GLfloat *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4IARBPROC) (GLenum target, GLint s, GLint t, GLint r, GLint q); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4IVARBPROC) (GLenum target, const GLint *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4SARBPROC) (GLenum target, GLshort s, GLshort t, GLshort r, GLshort q); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4SVARBPROC) (GLenum target, const GLshort *v); +#endif + +#ifndef GL_ARB_transpose_matrix +#define GL_ARB_transpose_matrix 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glLoadTransposeMatrixfARB (const GLfloat *m); +GLAPI void APIENTRY glLoadTransposeMatrixdARB (const GLdouble *m); +GLAPI void APIENTRY glMultTransposeMatrixfARB (const GLfloat *m); +GLAPI void APIENTRY glMultTransposeMatrixdARB (const GLdouble *m); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLLOADTRANSPOSEMATRIXFARBPROC) (const GLfloat *m); +typedef void (APIENTRYP PFNGLLOADTRANSPOSEMATRIXDARBPROC) (const GLdouble *m); +typedef void (APIENTRYP PFNGLMULTTRANSPOSEMATRIXFARBPROC) (const GLfloat *m); +typedef void (APIENTRYP PFNGLMULTTRANSPOSEMATRIXDARBPROC) (const GLdouble *m); +#endif + +#ifndef GL_ARB_multisample +#define GL_ARB_multisample 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glSampleCoverageARB (GLfloat value, GLboolean invert); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSAMPLECOVERAGEARBPROC) (GLfloat value, GLboolean invert); +#endif + +#ifndef GL_ARB_texture_env_add +#define GL_ARB_texture_env_add 1 +#endif + +#ifndef GL_ARB_texture_cube_map +#define GL_ARB_texture_cube_map 1 +#endif + +#ifndef GL_ARB_texture_compression +#define GL_ARB_texture_compression 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glCompressedTexImage3DARB (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexImage2DARB (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexImage1DARB (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexSubImage3DARB (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexSubImage2DARB (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glCompressedTexSubImage1DARB (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *data); +GLAPI void APIENTRY glGetCompressedTexImageARB (GLenum target, GLint level, GLvoid *img); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DARBPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DARBPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE1DARBPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DARBPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DARBPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE1DARBPROC) (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *data); +typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXIMAGEARBPROC) (GLenum target, GLint level, GLvoid *img); +#endif + +#ifndef GL_ARB_texture_border_clamp +#define GL_ARB_texture_border_clamp 1 +#endif + +#ifndef GL_ARB_point_parameters +#define GL_ARB_point_parameters 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPointParameterfARB (GLenum pname, GLfloat param); +GLAPI void APIENTRY glPointParameterfvARB (GLenum pname, const GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPOINTPARAMETERFARBPROC) (GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLPOINTPARAMETERFVARBPROC) (GLenum pname, const GLfloat *params); +#endif + +#ifndef GL_ARB_vertex_blend +#define GL_ARB_vertex_blend 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glWeightbvARB (GLint size, const GLbyte *weights); +GLAPI void APIENTRY glWeightsvARB (GLint size, const GLshort *weights); +GLAPI void APIENTRY glWeightivARB (GLint size, const GLint *weights); +GLAPI void APIENTRY glWeightfvARB (GLint size, const GLfloat *weights); +GLAPI void APIENTRY glWeightdvARB (GLint size, const GLdouble *weights); +GLAPI void APIENTRY glWeightubvARB (GLint size, const GLubyte *weights); +GLAPI void APIENTRY glWeightusvARB (GLint size, const GLushort *weights); +GLAPI void APIENTRY glWeightuivARB (GLint size, const GLuint *weights); +GLAPI void APIENTRY glWeightPointerARB (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glVertexBlendARB (GLint count); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLWEIGHTBVARBPROC) (GLint size, const GLbyte *weights); +typedef void (APIENTRYP PFNGLWEIGHTSVARBPROC) (GLint size, const GLshort *weights); +typedef void (APIENTRYP PFNGLWEIGHTIVARBPROC) (GLint size, const GLint *weights); +typedef void (APIENTRYP PFNGLWEIGHTFVARBPROC) (GLint size, const GLfloat *weights); +typedef void (APIENTRYP PFNGLWEIGHTDVARBPROC) (GLint size, const GLdouble *weights); +typedef void (APIENTRYP PFNGLWEIGHTUBVARBPROC) (GLint size, const GLubyte *weights); +typedef void (APIENTRYP PFNGLWEIGHTUSVARBPROC) (GLint size, const GLushort *weights); +typedef void (APIENTRYP PFNGLWEIGHTUIVARBPROC) (GLint size, const GLuint *weights); +typedef void (APIENTRYP PFNGLWEIGHTPOINTERARBPROC) (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLVERTEXBLENDARBPROC) (GLint count); +#endif + +#ifndef GL_ARB_matrix_palette +#define GL_ARB_matrix_palette 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glCurrentPaletteMatrixARB (GLint index); +GLAPI void APIENTRY glMatrixIndexubvARB (GLint size, const GLubyte *indices); +GLAPI void APIENTRY glMatrixIndexusvARB (GLint size, const GLushort *indices); +GLAPI void APIENTRY glMatrixIndexuivARB (GLint size, const GLuint *indices); +GLAPI void APIENTRY glMatrixIndexPointerARB (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCURRENTPALETTEMATRIXARBPROC) (GLint index); +typedef void (APIENTRYP PFNGLMATRIXINDEXUBVARBPROC) (GLint size, const GLubyte *indices); +typedef void (APIENTRYP PFNGLMATRIXINDEXUSVARBPROC) (GLint size, const GLushort *indices); +typedef void (APIENTRYP PFNGLMATRIXINDEXUIVARBPROC) (GLint size, const GLuint *indices); +typedef void (APIENTRYP PFNGLMATRIXINDEXPOINTERARBPROC) (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +#endif + +#ifndef GL_ARB_texture_env_combine +#define GL_ARB_texture_env_combine 1 +#endif + +#ifndef GL_ARB_texture_env_crossbar +#define GL_ARB_texture_env_crossbar 1 +#endif + +#ifndef GL_ARB_texture_env_dot3 +#define GL_ARB_texture_env_dot3 1 +#endif + +#ifndef GL_ARB_texture_mirrored_repeat +#define GL_ARB_texture_mirrored_repeat 1 +#endif + +#ifndef GL_ARB_depth_texture +#define GL_ARB_depth_texture 1 +#endif + +#ifndef GL_ARB_shadow +#define GL_ARB_shadow 1 +#endif + +#ifndef GL_ARB_shadow_ambient +#define GL_ARB_shadow_ambient 1 +#endif + +#ifndef GL_ARB_window_pos +#define GL_ARB_window_pos 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glWindowPos2dARB (GLdouble x, GLdouble y); +GLAPI void APIENTRY glWindowPos2dvARB (const GLdouble *v); +GLAPI void APIENTRY glWindowPos2fARB (GLfloat x, GLfloat y); +GLAPI void APIENTRY glWindowPos2fvARB (const GLfloat *v); +GLAPI void APIENTRY glWindowPos2iARB (GLint x, GLint y); +GLAPI void APIENTRY glWindowPos2ivARB (const GLint *v); +GLAPI void APIENTRY glWindowPos2sARB (GLshort x, GLshort y); +GLAPI void APIENTRY glWindowPos2svARB (const GLshort *v); +GLAPI void APIENTRY glWindowPos3dARB (GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glWindowPos3dvARB (const GLdouble *v); +GLAPI void APIENTRY glWindowPos3fARB (GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glWindowPos3fvARB (const GLfloat *v); +GLAPI void APIENTRY glWindowPos3iARB (GLint x, GLint y, GLint z); +GLAPI void APIENTRY glWindowPos3ivARB (const GLint *v); +GLAPI void APIENTRY glWindowPos3sARB (GLshort x, GLshort y, GLshort z); +GLAPI void APIENTRY glWindowPos3svARB (const GLshort *v); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLWINDOWPOS2DARBPROC) (GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLWINDOWPOS2DVARBPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLWINDOWPOS2FARBPROC) (GLfloat x, GLfloat y); +typedef void (APIENTRYP PFNGLWINDOWPOS2FVARBPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLWINDOWPOS2IARBPROC) (GLint x, GLint y); +typedef void (APIENTRYP PFNGLWINDOWPOS2IVARBPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLWINDOWPOS2SARBPROC) (GLshort x, GLshort y); +typedef void (APIENTRYP PFNGLWINDOWPOS2SVARBPROC) (const GLshort *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3DARBPROC) (GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLWINDOWPOS3DVARBPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3FARBPROC) (GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLWINDOWPOS3FVARBPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3IARBPROC) (GLint x, GLint y, GLint z); +typedef void (APIENTRYP PFNGLWINDOWPOS3IVARBPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3SARBPROC) (GLshort x, GLshort y, GLshort z); +typedef void (APIENTRYP PFNGLWINDOWPOS3SVARBPROC) (const GLshort *v); +#endif + +#ifndef GL_ARB_vertex_program +#define GL_ARB_vertex_program 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexAttrib1dARB (GLuint index, GLdouble x); +GLAPI void APIENTRY glVertexAttrib1dvARB (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib1fARB (GLuint index, GLfloat x); +GLAPI void APIENTRY glVertexAttrib1fvARB (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib1sARB (GLuint index, GLshort x); +GLAPI void APIENTRY glVertexAttrib1svARB (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib2dARB (GLuint index, GLdouble x, GLdouble y); +GLAPI void APIENTRY glVertexAttrib2dvARB (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib2fARB (GLuint index, GLfloat x, GLfloat y); +GLAPI void APIENTRY glVertexAttrib2fvARB (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib2sARB (GLuint index, GLshort x, GLshort y); +GLAPI void APIENTRY glVertexAttrib2svARB (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib3dARB (GLuint index, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glVertexAttrib3dvARB (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib3fARB (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glVertexAttrib3fvARB (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib3sARB (GLuint index, GLshort x, GLshort y, GLshort z); +GLAPI void APIENTRY glVertexAttrib3svARB (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib4NbvARB (GLuint index, const GLbyte *v); +GLAPI void APIENTRY glVertexAttrib4NivARB (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttrib4NsvARB (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib4NubARB (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w); +GLAPI void APIENTRY glVertexAttrib4NubvARB (GLuint index, const GLubyte *v); +GLAPI void APIENTRY glVertexAttrib4NuivARB (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttrib4NusvARB (GLuint index, const GLushort *v); +GLAPI void APIENTRY glVertexAttrib4bvARB (GLuint index, const GLbyte *v); +GLAPI void APIENTRY glVertexAttrib4dARB (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glVertexAttrib4dvARB (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib4fARB (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glVertexAttrib4fvARB (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib4ivARB (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttrib4sARB (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w); +GLAPI void APIENTRY glVertexAttrib4svARB (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib4ubvARB (GLuint index, const GLubyte *v); +GLAPI void APIENTRY glVertexAttrib4uivARB (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttrib4usvARB (GLuint index, const GLushort *v); +GLAPI void APIENTRY glVertexAttribPointerARB (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glEnableVertexAttribArrayARB (GLuint index); +GLAPI void APIENTRY glDisableVertexAttribArrayARB (GLuint index); +GLAPI void APIENTRY glProgramStringARB (GLenum target, GLenum format, GLsizei len, const GLvoid *string); +GLAPI void APIENTRY glBindProgramARB (GLenum target, GLuint program); +GLAPI void APIENTRY glDeleteProgramsARB (GLsizei n, const GLuint *programs); +GLAPI void APIENTRY glGenProgramsARB (GLsizei n, GLuint *programs); +GLAPI void APIENTRY glProgramEnvParameter4dARB (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glProgramEnvParameter4dvARB (GLenum target, GLuint index, const GLdouble *params); +GLAPI void APIENTRY glProgramEnvParameter4fARB (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glProgramEnvParameter4fvARB (GLenum target, GLuint index, const GLfloat *params); +GLAPI void APIENTRY glProgramLocalParameter4dARB (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glProgramLocalParameter4dvARB (GLenum target, GLuint index, const GLdouble *params); +GLAPI void APIENTRY glProgramLocalParameter4fARB (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glProgramLocalParameter4fvARB (GLenum target, GLuint index, const GLfloat *params); +GLAPI void APIENTRY glGetProgramEnvParameterdvARB (GLenum target, GLuint index, GLdouble *params); +GLAPI void APIENTRY glGetProgramEnvParameterfvARB (GLenum target, GLuint index, GLfloat *params); +GLAPI void APIENTRY glGetProgramLocalParameterdvARB (GLenum target, GLuint index, GLdouble *params); +GLAPI void APIENTRY glGetProgramLocalParameterfvARB (GLenum target, GLuint index, GLfloat *params); +GLAPI void APIENTRY glGetProgramivARB (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetProgramStringARB (GLenum target, GLenum pname, GLvoid *string); +GLAPI void APIENTRY glGetVertexAttribdvARB (GLuint index, GLenum pname, GLdouble *params); +GLAPI void APIENTRY glGetVertexAttribfvARB (GLuint index, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetVertexAttribivARB (GLuint index, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetVertexAttribPointervARB (GLuint index, GLenum pname, GLvoid* *pointer); +GLAPI GLboolean APIENTRY glIsProgramARB (GLuint program); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXATTRIB1DARBPROC) (GLuint index, GLdouble x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1DVARBPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1FARBPROC) (GLuint index, GLfloat x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1FVARBPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1SARBPROC) (GLuint index, GLshort x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1SVARBPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2DARBPROC) (GLuint index, GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2DVARBPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2FARBPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2FVARBPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2SARBPROC) (GLuint index, GLshort x, GLshort y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2SVARBPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3DARBPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3DVARBPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3FARBPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3FVARBPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3SARBPROC) (GLuint index, GLshort x, GLshort y, GLshort z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3SVARBPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NBVARBPROC) (GLuint index, const GLbyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NIVARBPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NSVARBPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBARBPROC) (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBVARBPROC) (GLuint index, const GLubyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUIVARBPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUSVARBPROC) (GLuint index, const GLushort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4BVARBPROC) (GLuint index, const GLbyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4DARBPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4DVARBPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4FARBPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4FVARBPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4IVARBPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4SARBPROC) (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4SVARBPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4UBVARBPROC) (GLuint index, const GLubyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4UIVARBPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4USVARBPROC) (GLuint index, const GLushort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBPOINTERARBPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYARBPROC) (GLuint index); +typedef void (APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYARBPROC) (GLuint index); +typedef void (APIENTRYP PFNGLPROGRAMSTRINGARBPROC) (GLenum target, GLenum format, GLsizei len, const GLvoid *string); +typedef void (APIENTRYP PFNGLBINDPROGRAMARBPROC) (GLenum target, GLuint program); +typedef void (APIENTRYP PFNGLDELETEPROGRAMSARBPROC) (GLsizei n, const GLuint *programs); +typedef void (APIENTRYP PFNGLGENPROGRAMSARBPROC) (GLsizei n, GLuint *programs); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETER4DARBPROC) (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETER4DVARBPROC) (GLenum target, GLuint index, const GLdouble *params); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETER4FARBPROC) (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETER4FVARBPROC) (GLenum target, GLuint index, const GLfloat *params); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETER4DARBPROC) (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETER4DVARBPROC) (GLenum target, GLuint index, const GLdouble *params); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETER4FARBPROC) (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETER4FVARBPROC) (GLenum target, GLuint index, const GLfloat *params); +typedef void (APIENTRYP PFNGLGETPROGRAMENVPARAMETERDVARBPROC) (GLenum target, GLuint index, GLdouble *params); +typedef void (APIENTRYP PFNGLGETPROGRAMENVPARAMETERFVARBPROC) (GLenum target, GLuint index, GLfloat *params); +typedef void (APIENTRYP PFNGLGETPROGRAMLOCALPARAMETERDVARBPROC) (GLenum target, GLuint index, GLdouble *params); +typedef void (APIENTRYP PFNGLGETPROGRAMLOCALPARAMETERFVARBPROC) (GLenum target, GLuint index, GLfloat *params); +typedef void (APIENTRYP PFNGLGETPROGRAMIVARBPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETPROGRAMSTRINGARBPROC) (GLenum target, GLenum pname, GLvoid *string); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBDVARBPROC) (GLuint index, GLenum pname, GLdouble *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBFVARBPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIVARBPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVARBPROC) (GLuint index, GLenum pname, GLvoid* *pointer); +typedef GLboolean (APIENTRYP PFNGLISPROGRAMARBPROC) (GLuint program); +#endif + +#ifndef GL_ARB_fragment_program +#define GL_ARB_fragment_program 1 +/* All ARB_fragment_program entry points are shared with ARB_vertex_program. */ +#endif + +#ifndef GL_ARB_vertex_buffer_object +#define GL_ARB_vertex_buffer_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindBufferARB (GLenum target, GLuint buffer); +GLAPI void APIENTRY glDeleteBuffersARB (GLsizei n, const GLuint *buffers); +GLAPI void APIENTRY glGenBuffersARB (GLsizei n, GLuint *buffers); +GLAPI GLboolean APIENTRY glIsBufferARB (GLuint buffer); +GLAPI void APIENTRY glBufferDataARB (GLenum target, GLsizeiptrARB size, const GLvoid *data, GLenum usage); +GLAPI void APIENTRY glBufferSubDataARB (GLenum target, GLintptrARB offset, GLsizeiptrARB size, const GLvoid *data); +GLAPI void APIENTRY glGetBufferSubDataARB (GLenum target, GLintptrARB offset, GLsizeiptrARB size, GLvoid *data); +GLAPI GLvoid* APIENTRY glMapBufferARB (GLenum target, GLenum access); +GLAPI GLboolean APIENTRY glUnmapBufferARB (GLenum target); +GLAPI void APIENTRY glGetBufferParameterivARB (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetBufferPointervARB (GLenum target, GLenum pname, GLvoid* *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDBUFFERARBPROC) (GLenum target, GLuint buffer); +typedef void (APIENTRYP PFNGLDELETEBUFFERSARBPROC) (GLsizei n, const GLuint *buffers); +typedef void (APIENTRYP PFNGLGENBUFFERSARBPROC) (GLsizei n, GLuint *buffers); +typedef GLboolean (APIENTRYP PFNGLISBUFFERARBPROC) (GLuint buffer); +typedef void (APIENTRYP PFNGLBUFFERDATAARBPROC) (GLenum target, GLsizeiptrARB size, const GLvoid *data, GLenum usage); +typedef void (APIENTRYP PFNGLBUFFERSUBDATAARBPROC) (GLenum target, GLintptrARB offset, GLsizeiptrARB size, const GLvoid *data); +typedef void (APIENTRYP PFNGLGETBUFFERSUBDATAARBPROC) (GLenum target, GLintptrARB offset, GLsizeiptrARB size, GLvoid *data); +typedef GLvoid* (APIENTRYP PFNGLMAPBUFFERARBPROC) (GLenum target, GLenum access); +typedef GLboolean (APIENTRYP PFNGLUNMAPBUFFERARBPROC) (GLenum target); +typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERIVARBPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETBUFFERPOINTERVARBPROC) (GLenum target, GLenum pname, GLvoid* *params); +#endif + +#ifndef GL_ARB_occlusion_query +#define GL_ARB_occlusion_query 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGenQueriesARB (GLsizei n, GLuint *ids); +GLAPI void APIENTRY glDeleteQueriesARB (GLsizei n, const GLuint *ids); +GLAPI GLboolean APIENTRY glIsQueryARB (GLuint id); +GLAPI void APIENTRY glBeginQueryARB (GLenum target, GLuint id); +GLAPI void APIENTRY glEndQueryARB (GLenum target); +GLAPI void APIENTRY glGetQueryivARB (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetQueryObjectivARB (GLuint id, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetQueryObjectuivARB (GLuint id, GLenum pname, GLuint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGENQUERIESARBPROC) (GLsizei n, GLuint *ids); +typedef void (APIENTRYP PFNGLDELETEQUERIESARBPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (APIENTRYP PFNGLISQUERYARBPROC) (GLuint id); +typedef void (APIENTRYP PFNGLBEGINQUERYARBPROC) (GLenum target, GLuint id); +typedef void (APIENTRYP PFNGLENDQUERYARBPROC) (GLenum target); +typedef void (APIENTRYP PFNGLGETQUERYIVARBPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETQUERYOBJECTIVARBPROC) (GLuint id, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETQUERYOBJECTUIVARBPROC) (GLuint id, GLenum pname, GLuint *params); +#endif + +#ifndef GL_ARB_shader_objects +#define GL_ARB_shader_objects 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDeleteObjectARB (GLhandleARB obj); +GLAPI GLhandleARB APIENTRY glGetHandleARB (GLenum pname); +GLAPI void APIENTRY glDetachObjectARB (GLhandleARB containerObj, GLhandleARB attachedObj); +GLAPI GLhandleARB APIENTRY glCreateShaderObjectARB (GLenum shaderType); +GLAPI void APIENTRY glShaderSourceARB (GLhandleARB shaderObj, GLsizei count, const GLcharARB* *string, const GLint *length); +GLAPI void APIENTRY glCompileShaderARB (GLhandleARB shaderObj); +GLAPI GLhandleARB APIENTRY glCreateProgramObjectARB (void); +GLAPI void APIENTRY glAttachObjectARB (GLhandleARB containerObj, GLhandleARB obj); +GLAPI void APIENTRY glLinkProgramARB (GLhandleARB programObj); +GLAPI void APIENTRY glUseProgramObjectARB (GLhandleARB programObj); +GLAPI void APIENTRY glValidateProgramARB (GLhandleARB programObj); +GLAPI void APIENTRY glUniform1fARB (GLint location, GLfloat v0); +GLAPI void APIENTRY glUniform2fARB (GLint location, GLfloat v0, GLfloat v1); +GLAPI void APIENTRY glUniform3fARB (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GLAPI void APIENTRY glUniform4fARB (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GLAPI void APIENTRY glUniform1iARB (GLint location, GLint v0); +GLAPI void APIENTRY glUniform2iARB (GLint location, GLint v0, GLint v1); +GLAPI void APIENTRY glUniform3iARB (GLint location, GLint v0, GLint v1, GLint v2); +GLAPI void APIENTRY glUniform4iARB (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GLAPI void APIENTRY glUniform1fvARB (GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glUniform2fvARB (GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glUniform3fvARB (GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glUniform4fvARB (GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glUniform1ivARB (GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glUniform2ivARB (GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glUniform3ivARB (GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glUniform4ivARB (GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glUniformMatrix2fvARB (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glUniformMatrix3fvARB (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glUniformMatrix4fvARB (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glGetObjectParameterfvARB (GLhandleARB obj, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetObjectParameterivARB (GLhandleARB obj, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetInfoLogARB (GLhandleARB obj, GLsizei maxLength, GLsizei *length, GLcharARB *infoLog); +GLAPI void APIENTRY glGetAttachedObjectsARB (GLhandleARB containerObj, GLsizei maxCount, GLsizei *count, GLhandleARB *obj); +GLAPI GLint APIENTRY glGetUniformLocationARB (GLhandleARB programObj, const GLcharARB *name); +GLAPI void APIENTRY glGetActiveUniformARB (GLhandleARB programObj, GLuint index, GLsizei maxLength, GLsizei *length, GLint *size, GLenum *type, GLcharARB *name); +GLAPI void APIENTRY glGetUniformfvARB (GLhandleARB programObj, GLint location, GLfloat *params); +GLAPI void APIENTRY glGetUniformivARB (GLhandleARB programObj, GLint location, GLint *params); +GLAPI void APIENTRY glGetShaderSourceARB (GLhandleARB obj, GLsizei maxLength, GLsizei *length, GLcharARB *source); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDELETEOBJECTARBPROC) (GLhandleARB obj); +typedef GLhandleARB (APIENTRYP PFNGLGETHANDLEARBPROC) (GLenum pname); +typedef void (APIENTRYP PFNGLDETACHOBJECTARBPROC) (GLhandleARB containerObj, GLhandleARB attachedObj); +typedef GLhandleARB (APIENTRYP PFNGLCREATESHADEROBJECTARBPROC) (GLenum shaderType); +typedef void (APIENTRYP PFNGLSHADERSOURCEARBPROC) (GLhandleARB shaderObj, GLsizei count, const GLcharARB* *string, const GLint *length); +typedef void (APIENTRYP PFNGLCOMPILESHADERARBPROC) (GLhandleARB shaderObj); +typedef GLhandleARB (APIENTRYP PFNGLCREATEPROGRAMOBJECTARBPROC) (void); +typedef void (APIENTRYP PFNGLATTACHOBJECTARBPROC) (GLhandleARB containerObj, GLhandleARB obj); +typedef void (APIENTRYP PFNGLLINKPROGRAMARBPROC) (GLhandleARB programObj); +typedef void (APIENTRYP PFNGLUSEPROGRAMOBJECTARBPROC) (GLhandleARB programObj); +typedef void (APIENTRYP PFNGLVALIDATEPROGRAMARBPROC) (GLhandleARB programObj); +typedef void (APIENTRYP PFNGLUNIFORM1FARBPROC) (GLint location, GLfloat v0); +typedef void (APIENTRYP PFNGLUNIFORM2FARBPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (APIENTRYP PFNGLUNIFORM3FARBPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (APIENTRYP PFNGLUNIFORM4FARBPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (APIENTRYP PFNGLUNIFORM1IARBPROC) (GLint location, GLint v0); +typedef void (APIENTRYP PFNGLUNIFORM2IARBPROC) (GLint location, GLint v0, GLint v1); +typedef void (APIENTRYP PFNGLUNIFORM3IARBPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (APIENTRYP PFNGLUNIFORM4IARBPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (APIENTRYP PFNGLUNIFORM1FVARBPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORM2FVARBPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORM3FVARBPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORM4FVARBPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORM1IVARBPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLUNIFORM2IVARBPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLUNIFORM3IVARBPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLUNIFORM4IVARBPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2FVARBPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3FVARBPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4FVARBPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLGETOBJECTPARAMETERFVARBPROC) (GLhandleARB obj, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETOBJECTPARAMETERIVARBPROC) (GLhandleARB obj, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETINFOLOGARBPROC) (GLhandleARB obj, GLsizei maxLength, GLsizei *length, GLcharARB *infoLog); +typedef void (APIENTRYP PFNGLGETATTACHEDOBJECTSARBPROC) (GLhandleARB containerObj, GLsizei maxCount, GLsizei *count, GLhandleARB *obj); +typedef GLint (APIENTRYP PFNGLGETUNIFORMLOCATIONARBPROC) (GLhandleARB programObj, const GLcharARB *name); +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMARBPROC) (GLhandleARB programObj, GLuint index, GLsizei maxLength, GLsizei *length, GLint *size, GLenum *type, GLcharARB *name); +typedef void (APIENTRYP PFNGLGETUNIFORMFVARBPROC) (GLhandleARB programObj, GLint location, GLfloat *params); +typedef void (APIENTRYP PFNGLGETUNIFORMIVARBPROC) (GLhandleARB programObj, GLint location, GLint *params); +typedef void (APIENTRYP PFNGLGETSHADERSOURCEARBPROC) (GLhandleARB obj, GLsizei maxLength, GLsizei *length, GLcharARB *source); +#endif + +#ifndef GL_ARB_vertex_shader +#define GL_ARB_vertex_shader 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindAttribLocationARB (GLhandleARB programObj, GLuint index, const GLcharARB *name); +GLAPI void APIENTRY glGetActiveAttribARB (GLhandleARB programObj, GLuint index, GLsizei maxLength, GLsizei *length, GLint *size, GLenum *type, GLcharARB *name); +GLAPI GLint APIENTRY glGetAttribLocationARB (GLhandleARB programObj, const GLcharARB *name); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDATTRIBLOCATIONARBPROC) (GLhandleARB programObj, GLuint index, const GLcharARB *name); +typedef void (APIENTRYP PFNGLGETACTIVEATTRIBARBPROC) (GLhandleARB programObj, GLuint index, GLsizei maxLength, GLsizei *length, GLint *size, GLenum *type, GLcharARB *name); +typedef GLint (APIENTRYP PFNGLGETATTRIBLOCATIONARBPROC) (GLhandleARB programObj, const GLcharARB *name); +#endif + +#ifndef GL_ARB_fragment_shader +#define GL_ARB_fragment_shader 1 +#endif + +#ifndef GL_ARB_shading_language_100 +#define GL_ARB_shading_language_100 1 +#endif + +#ifndef GL_ARB_texture_non_power_of_two +#define GL_ARB_texture_non_power_of_two 1 +#endif + +#ifndef GL_ARB_point_sprite +#define GL_ARB_point_sprite 1 +#endif + +#ifndef GL_ARB_fragment_program_shadow +#define GL_ARB_fragment_program_shadow 1 +#endif + +#ifndef GL_ARB_draw_buffers +#define GL_ARB_draw_buffers 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawBuffersARB (GLsizei n, const GLenum *bufs); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWBUFFERSARBPROC) (GLsizei n, const GLenum *bufs); +#endif + +#ifndef GL_ARB_texture_rectangle +#define GL_ARB_texture_rectangle 1 +#endif + +#ifndef GL_ARB_color_buffer_float +#define GL_ARB_color_buffer_float 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glClampColorARB (GLenum target, GLenum clamp); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCLAMPCOLORARBPROC) (GLenum target, GLenum clamp); +#endif + +#ifndef GL_ARB_half_float_pixel +#define GL_ARB_half_float_pixel 1 +#endif + +#ifndef GL_ARB_texture_float +#define GL_ARB_texture_float 1 +#endif + +#ifndef GL_ARB_pixel_buffer_object +#define GL_ARB_pixel_buffer_object 1 +#endif + +#ifndef GL_ARB_depth_buffer_float +#define GL_ARB_depth_buffer_float 1 +#endif + +#ifndef GL_ARB_draw_instanced +#define GL_ARB_draw_instanced 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawArraysInstancedARB (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +GLAPI void APIENTRY glDrawElementsInstancedARB (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei primcount); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWARRAYSINSTANCEDARBPROC) (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDARBPROC) (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei primcount); +#endif + +#ifndef GL_ARB_framebuffer_object +#define GL_ARB_framebuffer_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLboolean APIENTRY glIsRenderbuffer (GLuint renderbuffer); +GLAPI void APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer); +GLAPI void APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); +GLAPI void APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); +GLAPI void APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI void APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); +GLAPI GLboolean APIENTRY glIsFramebuffer (GLuint framebuffer); +GLAPI void APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer); +GLAPI void APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); +GLAPI void APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers); +GLAPI GLenum APIENTRY glCheckFramebufferStatus (GLenum target); +GLAPI void APIENTRY glFramebufferTexture1D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GLAPI void APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GLAPI void APIENTRY glFramebufferTexture3D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +GLAPI void APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GLAPI void APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GLAPI void APIENTRY glGenerateMipmap (GLenum target); +GLAPI void APIENTRY glBlitFramebuffer (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +GLAPI void APIENTRY glRenderbufferStorageMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI void APIENTRY glFramebufferTextureLayer (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLboolean (APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer); +typedef void (APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer); +typedef void (APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLboolean (APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer); +typedef void (APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer); +typedef void (APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers); +typedef GLenum (APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE1DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE3DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +typedef void (APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target); +typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +#endif + +#ifndef GL_ARB_framebuffer_sRGB +#define GL_ARB_framebuffer_sRGB 1 +#endif + +#ifndef GL_ARB_geometry_shader4 +#define GL_ARB_geometry_shader4 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProgramParameteriARB (GLuint program, GLenum pname, GLint value); +GLAPI void APIENTRY glFramebufferTextureARB (GLenum target, GLenum attachment, GLuint texture, GLint level); +GLAPI void APIENTRY glFramebufferTextureLayerARB (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +GLAPI void APIENTRY glFramebufferTextureFaceARB (GLenum target, GLenum attachment, GLuint texture, GLint level, GLenum face); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROGRAMPARAMETERIARBPROC) (GLuint program, GLenum pname, GLint value); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREARBPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERARBPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREFACEARBPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLenum face); +#endif + +#ifndef GL_ARB_half_float_vertex +#define GL_ARB_half_float_vertex 1 +#endif + +#ifndef GL_ARB_instanced_arrays +#define GL_ARB_instanced_arrays 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexAttribDivisorARB (GLuint index, GLuint divisor); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXATTRIBDIVISORARBPROC) (GLuint index, GLuint divisor); +#endif + +#ifndef GL_ARB_map_buffer_range +#define GL_ARB_map_buffer_range 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLvoid* APIENTRY glMapBufferRange (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GLAPI void APIENTRY glFlushMappedBufferRange (GLenum target, GLintptr offset, GLsizeiptr length); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLvoid* (APIENTRYP PFNGLMAPBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +#endif + +#ifndef GL_ARB_texture_buffer_object +#define GL_ARB_texture_buffer_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexBufferARB (GLenum target, GLenum internalformat, GLuint buffer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXBUFFERARBPROC) (GLenum target, GLenum internalformat, GLuint buffer); +#endif + +#ifndef GL_ARB_texture_compression_rgtc +#define GL_ARB_texture_compression_rgtc 1 +#endif + +#ifndef GL_ARB_texture_rg +#define GL_ARB_texture_rg 1 +#endif + +#ifndef GL_ARB_vertex_array_object +#define GL_ARB_vertex_array_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindVertexArray (GLuint array); +GLAPI void APIENTRY glDeleteVertexArrays (GLsizei n, const GLuint *arrays); +GLAPI void APIENTRY glGenVertexArrays (GLsizei n, GLuint *arrays); +GLAPI GLboolean APIENTRY glIsVertexArray (GLuint array); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDVERTEXARRAYPROC) (GLuint array); +typedef void (APIENTRYP PFNGLDELETEVERTEXARRAYSPROC) (GLsizei n, const GLuint *arrays); +typedef void (APIENTRYP PFNGLGENVERTEXARRAYSPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (APIENTRYP PFNGLISVERTEXARRAYPROC) (GLuint array); +#endif + +#ifndef GL_ARB_uniform_buffer_object +#define GL_ARB_uniform_buffer_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetUniformIndices (GLuint program, GLsizei uniformCount, const GLchar* const *uniformNames, GLuint *uniformIndices); +GLAPI void APIENTRY glGetActiveUniformsiv (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetActiveUniformName (GLuint program, GLuint uniformIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformName); +GLAPI GLuint APIENTRY glGetUniformBlockIndex (GLuint program, const GLchar *uniformBlockName); +GLAPI void APIENTRY glGetActiveUniformBlockiv (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetActiveUniformBlockName (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +GLAPI void APIENTRY glUniformBlockBinding (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETUNIFORMINDICESPROC) (GLuint program, GLsizei uniformCount, const GLchar* const *uniformNames, GLuint *uniformIndices); +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMSIVPROC) (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMNAMEPROC) (GLuint program, GLuint uniformIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformName); +typedef GLuint (APIENTRYP PFNGLGETUNIFORMBLOCKINDEXPROC) (GLuint program, const GLchar *uniformBlockName); +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKIVPROC) (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +typedef void (APIENTRYP PFNGLUNIFORMBLOCKBINDINGPROC) (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +#endif + +#ifndef GL_ARB_compatibility +#define GL_ARB_compatibility 1 +#endif + +#ifndef GL_ARB_copy_buffer +#define GL_ARB_copy_buffer 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glCopyBufferSubData (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOPYBUFFERSUBDATAPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +#endif + +#ifndef GL_ARB_shader_texture_lod +#define GL_ARB_shader_texture_lod 1 +#endif + +#ifndef GL_ARB_depth_clamp +#define GL_ARB_depth_clamp 1 +#endif + +#ifndef GL_ARB_draw_elements_base_vertex +#define GL_ARB_draw_elements_base_vertex 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawElementsBaseVertex (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLint basevertex); +GLAPI void APIENTRY glDrawRangeElementsBaseVertex (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid *indices, GLint basevertex); +GLAPI void APIENTRY glDrawElementsInstancedBaseVertex (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei instancecount, GLint basevertex); +GLAPI void APIENTRY glMultiDrawElementsBaseVertex (GLenum mode, const GLsizei *count, GLenum type, const GLvoid* const *indices, GLsizei drawcount, const GLint *basevertex); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXPROC) (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLint basevertex); +typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid *indices, GLint basevertex); +typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC) (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei instancecount, GLint basevertex); +typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC) (GLenum mode, const GLsizei *count, GLenum type, const GLvoid* const *indices, GLsizei drawcount, const GLint *basevertex); +#endif + +#ifndef GL_ARB_fragment_coord_conventions +#define GL_ARB_fragment_coord_conventions 1 +#endif + +#ifndef GL_ARB_provoking_vertex +#define GL_ARB_provoking_vertex 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProvokingVertex (GLenum mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROVOKINGVERTEXPROC) (GLenum mode); +#endif + +#ifndef GL_ARB_seamless_cube_map +#define GL_ARB_seamless_cube_map 1 +#endif + +#ifndef GL_ARB_sync +#define GL_ARB_sync 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLsync APIENTRY glFenceSync (GLenum condition, GLbitfield flags); +GLAPI GLboolean APIENTRY glIsSync (GLsync sync); +GLAPI void APIENTRY glDeleteSync (GLsync sync); +GLAPI GLenum APIENTRY glClientWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GLAPI void APIENTRY glWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GLAPI void APIENTRY glGetInteger64v (GLenum pname, GLint64 *params); +GLAPI void APIENTRY glGetSynciv (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLsync (APIENTRYP PFNGLFENCESYNCPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (APIENTRYP PFNGLISSYNCPROC) (GLsync sync); +typedef void (APIENTRYP PFNGLDELETESYNCPROC) (GLsync sync); +typedef GLenum (APIENTRYP PFNGLCLIENTWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (APIENTRYP PFNGLWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (APIENTRYP PFNGLGETINTEGER64VPROC) (GLenum pname, GLint64 *params); +typedef void (APIENTRYP PFNGLGETSYNCIVPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#endif + +#ifndef GL_ARB_texture_multisample +#define GL_ARB_texture_multisample 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexImage2DMultisample (GLenum target, GLsizei samples, GLint internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +GLAPI void APIENTRY glTexImage3DMultisample (GLenum target, GLsizei samples, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +GLAPI void APIENTRY glGetMultisamplefv (GLenum pname, GLuint index, GLfloat *val); +GLAPI void APIENTRY glSampleMaski (GLuint index, GLbitfield mask); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXIMAGE2DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLint internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +typedef void (APIENTRYP PFNGLTEXIMAGE3DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +typedef void (APIENTRYP PFNGLGETMULTISAMPLEFVPROC) (GLenum pname, GLuint index, GLfloat *val); +typedef void (APIENTRYP PFNGLSAMPLEMASKIPROC) (GLuint index, GLbitfield mask); +#endif + +#ifndef GL_ARB_vertex_array_bgra +#define GL_ARB_vertex_array_bgra 1 +#endif + +#ifndef GL_ARB_draw_buffers_blend +#define GL_ARB_draw_buffers_blend 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendEquationiARB (GLuint buf, GLenum mode); +GLAPI void APIENTRY glBlendEquationSeparateiARB (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +GLAPI void APIENTRY glBlendFunciARB (GLuint buf, GLenum src, GLenum dst); +GLAPI void APIENTRY glBlendFuncSeparateiARB (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDEQUATIONIARBPROC) (GLuint buf, GLenum mode); +typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEIARBPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +typedef void (APIENTRYP PFNGLBLENDFUNCIARBPROC) (GLuint buf, GLenum src, GLenum dst); +typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEIARBPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +#endif + +#ifndef GL_ARB_sample_shading +#define GL_ARB_sample_shading 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glMinSampleShadingARB (GLfloat value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLMINSAMPLESHADINGARBPROC) (GLfloat value); +#endif + +#ifndef GL_ARB_texture_cube_map_array +#define GL_ARB_texture_cube_map_array 1 +#endif + +#ifndef GL_ARB_texture_gather +#define GL_ARB_texture_gather 1 +#endif + +#ifndef GL_ARB_texture_query_lod +#define GL_ARB_texture_query_lod 1 +#endif + +#ifndef GL_ARB_shading_language_include +#define GL_ARB_shading_language_include 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glNamedStringARB (GLenum type, GLint namelen, const GLchar *name, GLint stringlen, const GLchar *string); +GLAPI void APIENTRY glDeleteNamedStringARB (GLint namelen, const GLchar *name); +GLAPI void APIENTRY glCompileShaderIncludeARB (GLuint shader, GLsizei count, const GLchar* *path, const GLint *length); +GLAPI GLboolean APIENTRY glIsNamedStringARB (GLint namelen, const GLchar *name); +GLAPI void APIENTRY glGetNamedStringARB (GLint namelen, const GLchar *name, GLsizei bufSize, GLint *stringlen, GLchar *string); +GLAPI void APIENTRY glGetNamedStringivARB (GLint namelen, const GLchar *name, GLenum pname, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLNAMEDSTRINGARBPROC) (GLenum type, GLint namelen, const GLchar *name, GLint stringlen, const GLchar *string); +typedef void (APIENTRYP PFNGLDELETENAMEDSTRINGARBPROC) (GLint namelen, const GLchar *name); +typedef void (APIENTRYP PFNGLCOMPILESHADERINCLUDEARBPROC) (GLuint shader, GLsizei count, const GLchar* *path, const GLint *length); +typedef GLboolean (APIENTRYP PFNGLISNAMEDSTRINGARBPROC) (GLint namelen, const GLchar *name); +typedef void (APIENTRYP PFNGLGETNAMEDSTRINGARBPROC) (GLint namelen, const GLchar *name, GLsizei bufSize, GLint *stringlen, GLchar *string); +typedef void (APIENTRYP PFNGLGETNAMEDSTRINGIVARBPROC) (GLint namelen, const GLchar *name, GLenum pname, GLint *params); +#endif + +#ifndef GL_ARB_texture_compression_bptc +#define GL_ARB_texture_compression_bptc 1 +#endif + +#ifndef GL_ARB_blend_func_extended +#define GL_ARB_blend_func_extended 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindFragDataLocationIndexed (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name); +GLAPI GLint APIENTRY glGetFragDataIndex (GLuint program, const GLchar *name); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDFRAGDATALOCATIONINDEXEDPROC) (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name); +typedef GLint (APIENTRYP PFNGLGETFRAGDATAINDEXPROC) (GLuint program, const GLchar *name); +#endif + +#ifndef GL_ARB_explicit_attrib_location +#define GL_ARB_explicit_attrib_location 1 +#endif + +#ifndef GL_ARB_occlusion_query2 +#define GL_ARB_occlusion_query2 1 +#endif + +#ifndef GL_ARB_sampler_objects +#define GL_ARB_sampler_objects 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGenSamplers (GLsizei count, GLuint *samplers); +GLAPI void APIENTRY glDeleteSamplers (GLsizei count, const GLuint *samplers); +GLAPI GLboolean APIENTRY glIsSampler (GLuint sampler); +GLAPI void APIENTRY glBindSampler (GLuint unit, GLuint sampler); +GLAPI void APIENTRY glSamplerParameteri (GLuint sampler, GLenum pname, GLint param); +GLAPI void APIENTRY glSamplerParameteriv (GLuint sampler, GLenum pname, const GLint *param); +GLAPI void APIENTRY glSamplerParameterf (GLuint sampler, GLenum pname, GLfloat param); +GLAPI void APIENTRY glSamplerParameterfv (GLuint sampler, GLenum pname, const GLfloat *param); +GLAPI void APIENTRY glSamplerParameterIiv (GLuint sampler, GLenum pname, const GLint *param); +GLAPI void APIENTRY glSamplerParameterIuiv (GLuint sampler, GLenum pname, const GLuint *param); +GLAPI void APIENTRY glGetSamplerParameteriv (GLuint sampler, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetSamplerParameterIiv (GLuint sampler, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetSamplerParameterfv (GLuint sampler, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetSamplerParameterIuiv (GLuint sampler, GLenum pname, GLuint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGENSAMPLERSPROC) (GLsizei count, GLuint *samplers); +typedef void (APIENTRYP PFNGLDELETESAMPLERSPROC) (GLsizei count, const GLuint *samplers); +typedef GLboolean (APIENTRYP PFNGLISSAMPLERPROC) (GLuint sampler); +typedef void (APIENTRYP PFNGLBINDSAMPLERPROC) (GLuint unit, GLuint sampler); +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIPROC) (GLuint sampler, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERFPROC) (GLuint sampler, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, const GLfloat *param); +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIUIVPROC) (GLuint sampler, GLenum pname, const GLuint *param); +typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERIIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVPROC) (GLuint sampler, GLenum pname, GLuint *params); +#endif + +#ifndef GL_ARB_shader_bit_encoding +#define GL_ARB_shader_bit_encoding 1 +#endif + +#ifndef GL_ARB_texture_rgb10_a2ui +#define GL_ARB_texture_rgb10_a2ui 1 +#endif + +#ifndef GL_ARB_texture_swizzle +#define GL_ARB_texture_swizzle 1 +#endif + +#ifndef GL_ARB_timer_query +#define GL_ARB_timer_query 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glQueryCounter (GLuint id, GLenum target); +GLAPI void APIENTRY glGetQueryObjecti64v (GLuint id, GLenum pname, GLint64 *params); +GLAPI void APIENTRY glGetQueryObjectui64v (GLuint id, GLenum pname, GLuint64 *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLQUERYCOUNTERPROC) (GLuint id, GLenum target); +typedef void (APIENTRYP PFNGLGETQUERYOBJECTI64VPROC) (GLuint id, GLenum pname, GLint64 *params); +typedef void (APIENTRYP PFNGLGETQUERYOBJECTUI64VPROC) (GLuint id, GLenum pname, GLuint64 *params); +#endif + +#ifndef GL_ARB_vertex_type_2_10_10_10_rev +#define GL_ARB_vertex_type_2_10_10_10_rev 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexP2ui (GLenum type, GLuint value); +GLAPI void APIENTRY glVertexP2uiv (GLenum type, const GLuint *value); +GLAPI void APIENTRY glVertexP3ui (GLenum type, GLuint value); +GLAPI void APIENTRY glVertexP3uiv (GLenum type, const GLuint *value); +GLAPI void APIENTRY glVertexP4ui (GLenum type, GLuint value); +GLAPI void APIENTRY glVertexP4uiv (GLenum type, const GLuint *value); +GLAPI void APIENTRY glTexCoordP1ui (GLenum type, GLuint coords); +GLAPI void APIENTRY glTexCoordP1uiv (GLenum type, const GLuint *coords); +GLAPI void APIENTRY glTexCoordP2ui (GLenum type, GLuint coords); +GLAPI void APIENTRY glTexCoordP2uiv (GLenum type, const GLuint *coords); +GLAPI void APIENTRY glTexCoordP3ui (GLenum type, GLuint coords); +GLAPI void APIENTRY glTexCoordP3uiv (GLenum type, const GLuint *coords); +GLAPI void APIENTRY glTexCoordP4ui (GLenum type, GLuint coords); +GLAPI void APIENTRY glTexCoordP4uiv (GLenum type, const GLuint *coords); +GLAPI void APIENTRY glMultiTexCoordP1ui (GLenum texture, GLenum type, GLuint coords); +GLAPI void APIENTRY glMultiTexCoordP1uiv (GLenum texture, GLenum type, const GLuint *coords); +GLAPI void APIENTRY glMultiTexCoordP2ui (GLenum texture, GLenum type, GLuint coords); +GLAPI void APIENTRY glMultiTexCoordP2uiv (GLenum texture, GLenum type, const GLuint *coords); +GLAPI void APIENTRY glMultiTexCoordP3ui (GLenum texture, GLenum type, GLuint coords); +GLAPI void APIENTRY glMultiTexCoordP3uiv (GLenum texture, GLenum type, const GLuint *coords); +GLAPI void APIENTRY glMultiTexCoordP4ui (GLenum texture, GLenum type, GLuint coords); +GLAPI void APIENTRY glMultiTexCoordP4uiv (GLenum texture, GLenum type, const GLuint *coords); +GLAPI void APIENTRY glNormalP3ui (GLenum type, GLuint coords); +GLAPI void APIENTRY glNormalP3uiv (GLenum type, const GLuint *coords); +GLAPI void APIENTRY glColorP3ui (GLenum type, GLuint color); +GLAPI void APIENTRY glColorP3uiv (GLenum type, const GLuint *color); +GLAPI void APIENTRY glColorP4ui (GLenum type, GLuint color); +GLAPI void APIENTRY glColorP4uiv (GLenum type, const GLuint *color); +GLAPI void APIENTRY glSecondaryColorP3ui (GLenum type, GLuint color); +GLAPI void APIENTRY glSecondaryColorP3uiv (GLenum type, const GLuint *color); +GLAPI void APIENTRY glVertexAttribP1ui (GLuint index, GLenum type, GLboolean normalized, GLuint value); +GLAPI void APIENTRY glVertexAttribP1uiv (GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +GLAPI void APIENTRY glVertexAttribP2ui (GLuint index, GLenum type, GLboolean normalized, GLuint value); +GLAPI void APIENTRY glVertexAttribP2uiv (GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +GLAPI void APIENTRY glVertexAttribP3ui (GLuint index, GLenum type, GLboolean normalized, GLuint value); +GLAPI void APIENTRY glVertexAttribP3uiv (GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +GLAPI void APIENTRY glVertexAttribP4ui (GLuint index, GLenum type, GLboolean normalized, GLuint value); +GLAPI void APIENTRY glVertexAttribP4uiv (GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXP2UIPROC) (GLenum type, GLuint value); +typedef void (APIENTRYP PFNGLVERTEXP2UIVPROC) (GLenum type, const GLuint *value); +typedef void (APIENTRYP PFNGLVERTEXP3UIPROC) (GLenum type, GLuint value); +typedef void (APIENTRYP PFNGLVERTEXP3UIVPROC) (GLenum type, const GLuint *value); +typedef void (APIENTRYP PFNGLVERTEXP4UIPROC) (GLenum type, GLuint value); +typedef void (APIENTRYP PFNGLVERTEXP4UIVPROC) (GLenum type, const GLuint *value); +typedef void (APIENTRYP PFNGLTEXCOORDP1UIPROC) (GLenum type, GLuint coords); +typedef void (APIENTRYP PFNGLTEXCOORDP1UIVPROC) (GLenum type, const GLuint *coords); +typedef void (APIENTRYP PFNGLTEXCOORDP2UIPROC) (GLenum type, GLuint coords); +typedef void (APIENTRYP PFNGLTEXCOORDP2UIVPROC) (GLenum type, const GLuint *coords); +typedef void (APIENTRYP PFNGLTEXCOORDP3UIPROC) (GLenum type, GLuint coords); +typedef void (APIENTRYP PFNGLTEXCOORDP3UIVPROC) (GLenum type, const GLuint *coords); +typedef void (APIENTRYP PFNGLTEXCOORDP4UIPROC) (GLenum type, GLuint coords); +typedef void (APIENTRYP PFNGLTEXCOORDP4UIVPROC) (GLenum type, const GLuint *coords); +typedef void (APIENTRYP PFNGLMULTITEXCOORDP1UIPROC) (GLenum texture, GLenum type, GLuint coords); +typedef void (APIENTRYP PFNGLMULTITEXCOORDP1UIVPROC) (GLenum texture, GLenum type, const GLuint *coords); +typedef void (APIENTRYP PFNGLMULTITEXCOORDP2UIPROC) (GLenum texture, GLenum type, GLuint coords); +typedef void (APIENTRYP PFNGLMULTITEXCOORDP2UIVPROC) (GLenum texture, GLenum type, const GLuint *coords); +typedef void (APIENTRYP PFNGLMULTITEXCOORDP3UIPROC) (GLenum texture, GLenum type, GLuint coords); +typedef void (APIENTRYP PFNGLMULTITEXCOORDP3UIVPROC) (GLenum texture, GLenum type, const GLuint *coords); +typedef void (APIENTRYP PFNGLMULTITEXCOORDP4UIPROC) (GLenum texture, GLenum type, GLuint coords); +typedef void (APIENTRYP PFNGLMULTITEXCOORDP4UIVPROC) (GLenum texture, GLenum type, const GLuint *coords); +typedef void (APIENTRYP PFNGLNORMALP3UIPROC) (GLenum type, GLuint coords); +typedef void (APIENTRYP PFNGLNORMALP3UIVPROC) (GLenum type, const GLuint *coords); +typedef void (APIENTRYP PFNGLCOLORP3UIPROC) (GLenum type, GLuint color); +typedef void (APIENTRYP PFNGLCOLORP3UIVPROC) (GLenum type, const GLuint *color); +typedef void (APIENTRYP PFNGLCOLORP4UIPROC) (GLenum type, GLuint color); +typedef void (APIENTRYP PFNGLCOLORP4UIVPROC) (GLenum type, const GLuint *color); +typedef void (APIENTRYP PFNGLSECONDARYCOLORP3UIPROC) (GLenum type, GLuint color); +typedef void (APIENTRYP PFNGLSECONDARYCOLORP3UIVPROC) (GLenum type, const GLuint *color); +typedef void (APIENTRYP PFNGLVERTEXATTRIBP1UIPROC) (GLuint index, GLenum type, GLboolean normalized, GLuint value); +typedef void (APIENTRYP PFNGLVERTEXATTRIBP1UIVPROC) (GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +typedef void (APIENTRYP PFNGLVERTEXATTRIBP2UIPROC) (GLuint index, GLenum type, GLboolean normalized, GLuint value); +typedef void (APIENTRYP PFNGLVERTEXATTRIBP2UIVPROC) (GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +typedef void (APIENTRYP PFNGLVERTEXATTRIBP3UIPROC) (GLuint index, GLenum type, GLboolean normalized, GLuint value); +typedef void (APIENTRYP PFNGLVERTEXATTRIBP3UIVPROC) (GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +typedef void (APIENTRYP PFNGLVERTEXATTRIBP4UIPROC) (GLuint index, GLenum type, GLboolean normalized, GLuint value); +typedef void (APIENTRYP PFNGLVERTEXATTRIBP4UIVPROC) (GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +#endif + +#ifndef GL_ARB_draw_indirect +#define GL_ARB_draw_indirect 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawArraysIndirect (GLenum mode, const GLvoid *indirect); +GLAPI void APIENTRY glDrawElementsIndirect (GLenum mode, GLenum type, const GLvoid *indirect); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWARRAYSINDIRECTPROC) (GLenum mode, const GLvoid *indirect); +typedef void (APIENTRYP PFNGLDRAWELEMENTSINDIRECTPROC) (GLenum mode, GLenum type, const GLvoid *indirect); +#endif + +#ifndef GL_ARB_gpu_shader5 +#define GL_ARB_gpu_shader5 1 +#endif + +#ifndef GL_ARB_gpu_shader_fp64 +#define GL_ARB_gpu_shader_fp64 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glUniform1d (GLint location, GLdouble x); +GLAPI void APIENTRY glUniform2d (GLint location, GLdouble x, GLdouble y); +GLAPI void APIENTRY glUniform3d (GLint location, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glUniform4d (GLint location, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glUniform1dv (GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glUniform2dv (GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glUniform3dv (GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glUniform4dv (GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glUniformMatrix2dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glUniformMatrix3dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glUniformMatrix4dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glUniformMatrix2x3dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glUniformMatrix2x4dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glUniformMatrix3x2dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glUniformMatrix3x4dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glUniformMatrix4x2dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glUniformMatrix4x3dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glGetUniformdv (GLuint program, GLint location, GLdouble *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLUNIFORM1DPROC) (GLint location, GLdouble x); +typedef void (APIENTRYP PFNGLUNIFORM2DPROC) (GLint location, GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLUNIFORM3DPROC) (GLint location, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLUNIFORM4DPROC) (GLint location, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLUNIFORM1DVPROC) (GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORM2DVPROC) (GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORM3DVPROC) (GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORM4DVPROC) (GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X3DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X4DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X2DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X4DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X2DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X3DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLGETUNIFORMDVPROC) (GLuint program, GLint location, GLdouble *params); +#endif + +#ifndef GL_ARB_shader_subroutine +#define GL_ARB_shader_subroutine 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLint APIENTRY glGetSubroutineUniformLocation (GLuint program, GLenum shadertype, const GLchar *name); +GLAPI GLuint APIENTRY glGetSubroutineIndex (GLuint program, GLenum shadertype, const GLchar *name); +GLAPI void APIENTRY glGetActiveSubroutineUniformiv (GLuint program, GLenum shadertype, GLuint index, GLenum pname, GLint *values); +GLAPI void APIENTRY glGetActiveSubroutineUniformName (GLuint program, GLenum shadertype, GLuint index, GLsizei bufsize, GLsizei *length, GLchar *name); +GLAPI void APIENTRY glGetActiveSubroutineName (GLuint program, GLenum shadertype, GLuint index, GLsizei bufsize, GLsizei *length, GLchar *name); +GLAPI void APIENTRY glUniformSubroutinesuiv (GLenum shadertype, GLsizei count, const GLuint *indices); +GLAPI void APIENTRY glGetUniformSubroutineuiv (GLenum shadertype, GLint location, GLuint *params); +GLAPI void APIENTRY glGetProgramStageiv (GLuint program, GLenum shadertype, GLenum pname, GLint *values); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLint (APIENTRYP PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC) (GLuint program, GLenum shadertype, const GLchar *name); +typedef GLuint (APIENTRYP PFNGLGETSUBROUTINEINDEXPROC) (GLuint program, GLenum shadertype, const GLchar *name); +typedef void (APIENTRYP PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC) (GLuint program, GLenum shadertype, GLuint index, GLenum pname, GLint *values); +typedef void (APIENTRYP PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC) (GLuint program, GLenum shadertype, GLuint index, GLsizei bufsize, GLsizei *length, GLchar *name); +typedef void (APIENTRYP PFNGLGETACTIVESUBROUTINENAMEPROC) (GLuint program, GLenum shadertype, GLuint index, GLsizei bufsize, GLsizei *length, GLchar *name); +typedef void (APIENTRYP PFNGLUNIFORMSUBROUTINESUIVPROC) (GLenum shadertype, GLsizei count, const GLuint *indices); +typedef void (APIENTRYP PFNGLGETUNIFORMSUBROUTINEUIVPROC) (GLenum shadertype, GLint location, GLuint *params); +typedef void (APIENTRYP PFNGLGETPROGRAMSTAGEIVPROC) (GLuint program, GLenum shadertype, GLenum pname, GLint *values); +#endif + +#ifndef GL_ARB_tessellation_shader +#define GL_ARB_tessellation_shader 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPatchParameteri (GLenum pname, GLint value); +GLAPI void APIENTRY glPatchParameterfv (GLenum pname, const GLfloat *values); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPATCHPARAMETERIPROC) (GLenum pname, GLint value); +typedef void (APIENTRYP PFNGLPATCHPARAMETERFVPROC) (GLenum pname, const GLfloat *values); +#endif + +#ifndef GL_ARB_texture_buffer_object_rgb32 +#define GL_ARB_texture_buffer_object_rgb32 1 +#endif + +#ifndef GL_ARB_transform_feedback2 +#define GL_ARB_transform_feedback2 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindTransformFeedback (GLenum target, GLuint id); +GLAPI void APIENTRY glDeleteTransformFeedbacks (GLsizei n, const GLuint *ids); +GLAPI void APIENTRY glGenTransformFeedbacks (GLsizei n, GLuint *ids); +GLAPI GLboolean APIENTRY glIsTransformFeedback (GLuint id); +GLAPI void APIENTRY glPauseTransformFeedback (void); +GLAPI void APIENTRY glResumeTransformFeedback (void); +GLAPI void APIENTRY glDrawTransformFeedback (GLenum mode, GLuint id); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDTRANSFORMFEEDBACKPROC) (GLenum target, GLuint id); +typedef void (APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSPROC) (GLsizei n, const GLuint *ids); +typedef void (APIENTRYP PFNGLGENTRANSFORMFEEDBACKSPROC) (GLsizei n, GLuint *ids); +typedef GLboolean (APIENTRYP PFNGLISTRANSFORMFEEDBACKPROC) (GLuint id); +typedef void (APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKPROC) (void); +typedef void (APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKPROC) (void); +typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKPROC) (GLenum mode, GLuint id); +#endif + +#ifndef GL_ARB_transform_feedback3 +#define GL_ARB_transform_feedback3 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawTransformFeedbackStream (GLenum mode, GLuint id, GLuint stream); +GLAPI void APIENTRY glBeginQueryIndexed (GLenum target, GLuint index, GLuint id); +GLAPI void APIENTRY glEndQueryIndexed (GLenum target, GLuint index); +GLAPI void APIENTRY glGetQueryIndexediv (GLenum target, GLuint index, GLenum pname, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC) (GLenum mode, GLuint id, GLuint stream); +typedef void (APIENTRYP PFNGLBEGINQUERYINDEXEDPROC) (GLenum target, GLuint index, GLuint id); +typedef void (APIENTRYP PFNGLENDQUERYINDEXEDPROC) (GLenum target, GLuint index); +typedef void (APIENTRYP PFNGLGETQUERYINDEXEDIVPROC) (GLenum target, GLuint index, GLenum pname, GLint *params); +#endif + +#ifndef GL_ARB_ES2_compatibility +#define GL_ARB_ES2_compatibility 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glReleaseShaderCompiler (void); +GLAPI void APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const GLvoid *binary, GLsizei length); +GLAPI void APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +GLAPI void APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GLAPI void APIENTRY glClearDepthf (GLfloat d); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void); +typedef void (APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const GLvoid *binary, GLsizei length); +typedef void (APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +typedef void (APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f); +typedef void (APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d); +#endif + +#ifndef GL_ARB_get_program_binary +#define GL_ARB_get_program_binary 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetProgramBinary (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, GLvoid *binary); +GLAPI void APIENTRY glProgramBinary (GLuint program, GLenum binaryFormat, const GLvoid *binary, GLsizei length); +GLAPI void APIENTRY glProgramParameteri (GLuint program, GLenum pname, GLint value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETPROGRAMBINARYPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, GLvoid *binary); +typedef void (APIENTRYP PFNGLPROGRAMBINARYPROC) (GLuint program, GLenum binaryFormat, const GLvoid *binary, GLsizei length); +typedef void (APIENTRYP PFNGLPROGRAMPARAMETERIPROC) (GLuint program, GLenum pname, GLint value); +#endif + +#ifndef GL_ARB_separate_shader_objects +#define GL_ARB_separate_shader_objects 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glUseProgramStages (GLuint pipeline, GLbitfield stages, GLuint program); +GLAPI void APIENTRY glActiveShaderProgram (GLuint pipeline, GLuint program); +GLAPI GLuint APIENTRY glCreateShaderProgramv (GLenum type, GLsizei count, const GLchar* const *strings); +GLAPI void APIENTRY glBindProgramPipeline (GLuint pipeline); +GLAPI void APIENTRY glDeleteProgramPipelines (GLsizei n, const GLuint *pipelines); +GLAPI void APIENTRY glGenProgramPipelines (GLsizei n, GLuint *pipelines); +GLAPI GLboolean APIENTRY glIsProgramPipeline (GLuint pipeline); +GLAPI void APIENTRY glGetProgramPipelineiv (GLuint pipeline, GLenum pname, GLint *params); +GLAPI void APIENTRY glProgramUniform1i (GLuint program, GLint location, GLint v0); +GLAPI void APIENTRY glProgramUniform1iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glProgramUniform1f (GLuint program, GLint location, GLfloat v0); +GLAPI void APIENTRY glProgramUniform1fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glProgramUniform1d (GLuint program, GLint location, GLdouble v0); +GLAPI void APIENTRY glProgramUniform1dv (GLuint program, GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glProgramUniform1ui (GLuint program, GLint location, GLuint v0); +GLAPI void APIENTRY glProgramUniform1uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glProgramUniform2i (GLuint program, GLint location, GLint v0, GLint v1); +GLAPI void APIENTRY glProgramUniform2iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glProgramUniform2f (GLuint program, GLint location, GLfloat v0, GLfloat v1); +GLAPI void APIENTRY glProgramUniform2fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glProgramUniform2d (GLuint program, GLint location, GLdouble v0, GLdouble v1); +GLAPI void APIENTRY glProgramUniform2dv (GLuint program, GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glProgramUniform2ui (GLuint program, GLint location, GLuint v0, GLuint v1); +GLAPI void APIENTRY glProgramUniform2uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glProgramUniform3i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +GLAPI void APIENTRY glProgramUniform3iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glProgramUniform3f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GLAPI void APIENTRY glProgramUniform3fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glProgramUniform3d (GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2); +GLAPI void APIENTRY glProgramUniform3dv (GLuint program, GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glProgramUniform3ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +GLAPI void APIENTRY glProgramUniform3uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glProgramUniform4i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GLAPI void APIENTRY glProgramUniform4iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glProgramUniform4f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GLAPI void APIENTRY glProgramUniform4fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glProgramUniform4d (GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2, GLdouble v3); +GLAPI void APIENTRY glProgramUniform4dv (GLuint program, GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glProgramUniform4ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GLAPI void APIENTRY glProgramUniform4uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glProgramUniformMatrix2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix2dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix3dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix4dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix2x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix3x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix2x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix4x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix3x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix4x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix2x3dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix3x2dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix2x4dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix4x2dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix3x4dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix4x3dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glValidateProgramPipeline (GLuint pipeline); +GLAPI void APIENTRY glGetProgramPipelineInfoLog (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLUSEPROGRAMSTAGESPROC) (GLuint pipeline, GLbitfield stages, GLuint program); +typedef void (APIENTRYP PFNGLACTIVESHADERPROGRAMPROC) (GLuint pipeline, GLuint program); +typedef GLuint (APIENTRYP PFNGLCREATESHADERPROGRAMVPROC) (GLenum type, GLsizei count, const GLchar* const *strings); +typedef void (APIENTRYP PFNGLBINDPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (APIENTRYP PFNGLDELETEPROGRAMPIPELINESPROC) (GLsizei n, const GLuint *pipelines); +typedef void (APIENTRYP PFNGLGENPROGRAMPIPELINESPROC) (GLsizei n, GLuint *pipelines); +typedef GLboolean (APIENTRYP PFNGLISPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (APIENTRYP PFNGLGETPROGRAMPIPELINEIVPROC) (GLuint pipeline, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1IPROC) (GLuint program, GLint location, GLint v0); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1FPROC) (GLuint program, GLint location, GLfloat v0); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1DPROC) (GLuint program, GLint location, GLdouble v0); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1DVPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UIPROC) (GLuint program, GLint location, GLuint v0); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2IPROC) (GLuint program, GLint location, GLint v0, GLint v1); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2DPROC) (GLuint program, GLint location, GLdouble v0, GLdouble v1); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2DVPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3DPROC) (GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3DVPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4DPROC) (GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2, GLdouble v3); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4DVPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLVALIDATEPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (APIENTRYP PFNGLGETPROGRAMPIPELINEINFOLOGPROC) (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +#endif + +#ifndef GL_ARB_vertex_attrib_64bit +#define GL_ARB_vertex_attrib_64bit 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexAttribL1d (GLuint index, GLdouble x); +GLAPI void APIENTRY glVertexAttribL2d (GLuint index, GLdouble x, GLdouble y); +GLAPI void APIENTRY glVertexAttribL3d (GLuint index, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glVertexAttribL4d (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glVertexAttribL1dv (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribL2dv (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribL3dv (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribL4dv (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribLPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glGetVertexAttribLdv (GLuint index, GLenum pname, GLdouble *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXATTRIBL1DPROC) (GLuint index, GLdouble x); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL2DPROC) (GLuint index, GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL3DPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL4DPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL1DVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL2DVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL3DVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL4DVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBLPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBLDVPROC) (GLuint index, GLenum pname, GLdouble *params); +#endif + +#ifndef GL_ARB_viewport_array +#define GL_ARB_viewport_array 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glViewportArrayv (GLuint first, GLsizei count, const GLfloat *v); +GLAPI void APIENTRY glViewportIndexedf (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +GLAPI void APIENTRY glViewportIndexedfv (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glScissorArrayv (GLuint first, GLsizei count, const GLint *v); +GLAPI void APIENTRY glScissorIndexed (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +GLAPI void APIENTRY glScissorIndexedv (GLuint index, const GLint *v); +GLAPI void APIENTRY glDepthRangeArrayv (GLuint first, GLsizei count, const GLdouble *v); +GLAPI void APIENTRY glDepthRangeIndexed (GLuint index, GLdouble n, GLdouble f); +GLAPI void APIENTRY glGetFloati_v (GLenum target, GLuint index, GLfloat *data); +GLAPI void APIENTRY glGetDoublei_v (GLenum target, GLuint index, GLdouble *data); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVIEWPORTARRAYVPROC) (GLuint first, GLsizei count, const GLfloat *v); +typedef void (APIENTRYP PFNGLVIEWPORTINDEXEDFPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +typedef void (APIENTRYP PFNGLVIEWPORTINDEXEDFVPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLSCISSORARRAYVPROC) (GLuint first, GLsizei count, const GLint *v); +typedef void (APIENTRYP PFNGLSCISSORINDEXEDPROC) (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLSCISSORINDEXEDVPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLDEPTHRANGEARRAYVPROC) (GLuint first, GLsizei count, const GLdouble *v); +typedef void (APIENTRYP PFNGLDEPTHRANGEINDEXEDPROC) (GLuint index, GLdouble n, GLdouble f); +typedef void (APIENTRYP PFNGLGETFLOATI_VPROC) (GLenum target, GLuint index, GLfloat *data); +typedef void (APIENTRYP PFNGLGETDOUBLEI_VPROC) (GLenum target, GLuint index, GLdouble *data); +#endif + +#ifndef GL_ARB_cl_event +#define GL_ARB_cl_event 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLsync APIENTRY glCreateSyncFromCLeventARB (struct _cl_context * context, struct _cl_event * event, GLbitfield flags); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLsync (APIENTRYP PFNGLCREATESYNCFROMCLEVENTARBPROC) (struct _cl_context * context, struct _cl_event * event, GLbitfield flags); +#endif + +#ifndef GL_ARB_debug_output +#define GL_ARB_debug_output 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDebugMessageControlARB (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +GLAPI void APIENTRY glDebugMessageInsertARB (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +GLAPI void APIENTRY glDebugMessageCallbackARB (GLDEBUGPROCARB callback, const GLvoid *userParam); +GLAPI GLuint APIENTRY glGetDebugMessageLogARB (GLuint count, GLsizei bufsize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDEBUGMESSAGECONTROLARBPROC) (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTARBPROC) (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKARBPROC) (GLDEBUGPROCARB callback, const GLvoid *userParam); +typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGARBPROC) (GLuint count, GLsizei bufsize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +#endif + +#ifndef GL_ARB_robustness +#define GL_ARB_robustness 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLenum APIENTRY glGetGraphicsResetStatusARB (void); +GLAPI void APIENTRY glGetnMapdvARB (GLenum target, GLenum query, GLsizei bufSize, GLdouble *v); +GLAPI void APIENTRY glGetnMapfvARB (GLenum target, GLenum query, GLsizei bufSize, GLfloat *v); +GLAPI void APIENTRY glGetnMapivARB (GLenum target, GLenum query, GLsizei bufSize, GLint *v); +GLAPI void APIENTRY glGetnPixelMapfvARB (GLenum map, GLsizei bufSize, GLfloat *values); +GLAPI void APIENTRY glGetnPixelMapuivARB (GLenum map, GLsizei bufSize, GLuint *values); +GLAPI void APIENTRY glGetnPixelMapusvARB (GLenum map, GLsizei bufSize, GLushort *values); +GLAPI void APIENTRY glGetnPolygonStippleARB (GLsizei bufSize, GLubyte *pattern); +GLAPI void APIENTRY glGetnColorTableARB (GLenum target, GLenum format, GLenum type, GLsizei bufSize, GLvoid *table); +GLAPI void APIENTRY glGetnConvolutionFilterARB (GLenum target, GLenum format, GLenum type, GLsizei bufSize, GLvoid *image); +GLAPI void APIENTRY glGetnSeparableFilterARB (GLenum target, GLenum format, GLenum type, GLsizei rowBufSize, GLvoid *row, GLsizei columnBufSize, GLvoid *column, GLvoid *span); +GLAPI void APIENTRY glGetnHistogramARB (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, GLvoid *values); +GLAPI void APIENTRY glGetnMinmaxARB (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, GLvoid *values); +GLAPI void APIENTRY glGetnTexImageARB (GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, GLvoid *img); +GLAPI void APIENTRY glReadnPixelsARB (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, GLvoid *data); +GLAPI void APIENTRY glGetnCompressedTexImageARB (GLenum target, GLint lod, GLsizei bufSize, GLvoid *img); +GLAPI void APIENTRY glGetnUniformfvARB (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +GLAPI void APIENTRY glGetnUniformivARB (GLuint program, GLint location, GLsizei bufSize, GLint *params); +GLAPI void APIENTRY glGetnUniformuivARB (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +GLAPI void APIENTRY glGetnUniformdvARB (GLuint program, GLint location, GLsizei bufSize, GLdouble *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLenum (APIENTRYP PFNGLGETGRAPHICSRESETSTATUSARBPROC) (void); +typedef void (APIENTRYP PFNGLGETNMAPDVARBPROC) (GLenum target, GLenum query, GLsizei bufSize, GLdouble *v); +typedef void (APIENTRYP PFNGLGETNMAPFVARBPROC) (GLenum target, GLenum query, GLsizei bufSize, GLfloat *v); +typedef void (APIENTRYP PFNGLGETNMAPIVARBPROC) (GLenum target, GLenum query, GLsizei bufSize, GLint *v); +typedef void (APIENTRYP PFNGLGETNPIXELMAPFVARBPROC) (GLenum map, GLsizei bufSize, GLfloat *values); +typedef void (APIENTRYP PFNGLGETNPIXELMAPUIVARBPROC) (GLenum map, GLsizei bufSize, GLuint *values); +typedef void (APIENTRYP PFNGLGETNPIXELMAPUSVARBPROC) (GLenum map, GLsizei bufSize, GLushort *values); +typedef void (APIENTRYP PFNGLGETNPOLYGONSTIPPLEARBPROC) (GLsizei bufSize, GLubyte *pattern); +typedef void (APIENTRYP PFNGLGETNCOLORTABLEARBPROC) (GLenum target, GLenum format, GLenum type, GLsizei bufSize, GLvoid *table); +typedef void (APIENTRYP PFNGLGETNCONVOLUTIONFILTERARBPROC) (GLenum target, GLenum format, GLenum type, GLsizei bufSize, GLvoid *image); +typedef void (APIENTRYP PFNGLGETNSEPARABLEFILTERARBPROC) (GLenum target, GLenum format, GLenum type, GLsizei rowBufSize, GLvoid *row, GLsizei columnBufSize, GLvoid *column, GLvoid *span); +typedef void (APIENTRYP PFNGLGETNHISTOGRAMARBPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, GLvoid *values); +typedef void (APIENTRYP PFNGLGETNMINMAXARBPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, GLvoid *values); +typedef void (APIENTRYP PFNGLGETNTEXIMAGEARBPROC) (GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, GLvoid *img); +typedef void (APIENTRYP PFNGLREADNPIXELSARBPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, GLvoid *data); +typedef void (APIENTRYP PFNGLGETNCOMPRESSEDTEXIMAGEARBPROC) (GLenum target, GLint lod, GLsizei bufSize, GLvoid *img); +typedef void (APIENTRYP PFNGLGETNUNIFORMFVARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +typedef void (APIENTRYP PFNGLGETNUNIFORMIVARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params); +typedef void (APIENTRYP PFNGLGETNUNIFORMUIVARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +typedef void (APIENTRYP PFNGLGETNUNIFORMDVARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLdouble *params); +#endif + +#ifndef GL_ARB_shader_stencil_export +#define GL_ARB_shader_stencil_export 1 +#endif + +#ifndef GL_ARB_base_instance +#define GL_ARB_base_instance 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawArraysInstancedBaseInstance (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance); +GLAPI void APIENTRY glDrawElementsInstancedBaseInstance (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance); +GLAPI void APIENTRY glDrawElementsInstancedBaseVertexBaseInstance (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance); +typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance); +typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance); +#endif + +#ifndef GL_ARB_shading_language_420pack +#define GL_ARB_shading_language_420pack 1 +#endif + +#ifndef GL_ARB_transform_feedback_instanced +#define GL_ARB_transform_feedback_instanced 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawTransformFeedbackInstanced (GLenum mode, GLuint id, GLsizei instancecount); +GLAPI void APIENTRY glDrawTransformFeedbackStreamInstanced (GLenum mode, GLuint id, GLuint stream, GLsizei instancecount); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC) (GLenum mode, GLuint id, GLsizei instancecount); +typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC) (GLenum mode, GLuint id, GLuint stream, GLsizei instancecount); +#endif + +#ifndef GL_ARB_compressed_texture_pixel_storage +#define GL_ARB_compressed_texture_pixel_storage 1 +#endif + +#ifndef GL_ARB_conservative_depth +#define GL_ARB_conservative_depth 1 +#endif + +#ifndef GL_ARB_internalformat_query +#define GL_ARB_internalformat_query 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetInternalformativ (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETINTERNALFORMATIVPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#endif + +#ifndef GL_ARB_map_buffer_alignment +#define GL_ARB_map_buffer_alignment 1 +#endif + +#ifndef GL_ARB_shader_atomic_counters +#define GL_ARB_shader_atomic_counters 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetActiveAtomicCounterBufferiv (GLuint program, GLuint bufferIndex, GLenum pname, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC) (GLuint program, GLuint bufferIndex, GLenum pname, GLint *params); +#endif + +#ifndef GL_ARB_shader_image_load_store +#define GL_ARB_shader_image_load_store 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindImageTexture (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +GLAPI void APIENTRY glMemoryBarrier (GLbitfield barriers); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDIMAGETEXTUREPROC) (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +typedef void (APIENTRYP PFNGLMEMORYBARRIERPROC) (GLbitfield barriers); +#endif + +#ifndef GL_ARB_shading_language_packing +#define GL_ARB_shading_language_packing 1 +#endif + +#ifndef GL_ARB_texture_storage +#define GL_ARB_texture_storage 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexStorage1D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GLAPI void APIENTRY glTexStorage2D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI void APIENTRY glTexStorage3D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GLAPI void APIENTRY glTextureStorage1DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GLAPI void APIENTRY glTextureStorage2DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI void APIENTRY glTextureStorage3DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXSTORAGE1DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (APIENTRYP PFNGLTEXSTORAGE2DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLTEXSTORAGE3DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (APIENTRYP PFNGLTEXTURESTORAGE1DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (APIENTRYP PFNGLTEXTURESTORAGE2DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLTEXTURESTORAGE3DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#endif + +#ifndef GL_KHR_texture_compression_astc_ldr +#define GL_KHR_texture_compression_astc_ldr 1 +#endif + +#ifndef GL_KHR_debug +#define GL_KHR_debug 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDebugMessageControl (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +GLAPI void APIENTRY glDebugMessageInsert (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +GLAPI void APIENTRY glDebugMessageCallback (GLDEBUGPROC callback, const void *userParam); +GLAPI GLuint APIENTRY glGetDebugMessageLog (GLuint count, GLsizei bufsize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +GLAPI void APIENTRY glPushDebugGroup (GLenum source, GLuint id, GLsizei length, const GLchar *message); +GLAPI void APIENTRY glPopDebugGroup (void); +GLAPI void APIENTRY glObjectLabel (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +GLAPI void APIENTRY glGetObjectLabel (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +GLAPI void APIENTRY glObjectPtrLabel (const void *ptr, GLsizei length, const GLchar *label); +GLAPI void APIENTRY glGetObjectPtrLabel (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDEBUGMESSAGECONTROLPROC) (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTPROC) (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKPROC) (GLDEBUGPROC callback, const void *userParam); +typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGPROC) (GLuint count, GLsizei bufsize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +typedef void (APIENTRYP PFNGLPUSHDEBUGGROUPPROC) (GLenum source, GLuint id, GLsizei length, const GLchar *message); +typedef void (APIENTRYP PFNGLPOPDEBUGGROUPPROC) (void); +typedef void (APIENTRYP PFNGLOBJECTLABELPROC) (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +typedef void (APIENTRYP PFNGLGETOBJECTLABELPROC) (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +typedef void (APIENTRYP PFNGLOBJECTPTRLABELPROC) (const void *ptr, GLsizei length, const GLchar *label); +typedef void (APIENTRYP PFNGLGETOBJECTPTRLABELPROC) (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +#endif + +#ifndef GL_ARB_arrays_of_arrays +#define GL_ARB_arrays_of_arrays 1 +#endif + +#ifndef GL_ARB_clear_buffer_object +#define GL_ARB_clear_buffer_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glClearBufferData (GLenum target, GLenum internalformat, GLenum format, GLenum type, const void *data); +GLAPI void APIENTRY glClearBufferSubData (GLenum target, GLenum internalformat, GLintptr offset, GLsizeiptr size, GLenum format, GLenum type, const void *data); +GLAPI void APIENTRY glClearNamedBufferDataEXT (GLuint buffer, GLenum internalformat, GLenum format, GLenum type, const void *data); +GLAPI void APIENTRY glClearNamedBufferSubDataEXT (GLuint buffer, GLenum internalformat, GLenum format, GLenum type, GLsizeiptr offset, GLsizeiptr size, const void *data); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCLEARBUFFERDATAPROC) (GLenum target, GLenum internalformat, GLenum format, GLenum type, const void *data); +typedef void (APIENTRYP PFNGLCLEARBUFFERSUBDATAPROC) (GLenum target, GLenum internalformat, GLintptr offset, GLsizeiptr size, GLenum format, GLenum type, const void *data); +typedef void (APIENTRYP PFNGLCLEARNAMEDBUFFERDATAEXTPROC) (GLuint buffer, GLenum internalformat, GLenum format, GLenum type, const void *data); +typedef void (APIENTRYP PFNGLCLEARNAMEDBUFFERSUBDATAEXTPROC) (GLuint buffer, GLenum internalformat, GLenum format, GLenum type, GLsizeiptr offset, GLsizeiptr size, const void *data); +#endif + +#ifndef GL_ARB_compute_shader +#define GL_ARB_compute_shader 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDispatchCompute (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +GLAPI void APIENTRY glDispatchComputeIndirect (GLintptr indirect); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDISPATCHCOMPUTEPROC) (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +typedef void (APIENTRYP PFNGLDISPATCHCOMPUTEINDIRECTPROC) (GLintptr indirect); +#endif + +#ifndef GL_ARB_copy_image +#define GL_ARB_copy_image 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glCopyImageSubData (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOPYIMAGESUBDATAPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#endif + +#ifndef GL_ARB_texture_view +#define GL_ARB_texture_view 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTextureView (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXTUREVIEWPROC) (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#endif + +#ifndef GL_ARB_vertex_attrib_binding +#define GL_ARB_vertex_attrib_binding 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindVertexBuffer (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +GLAPI void APIENTRY glVertexAttribFormat (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +GLAPI void APIENTRY glVertexAttribIFormat (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +GLAPI void APIENTRY glVertexAttribLFormat (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +GLAPI void APIENTRY glVertexAttribBinding (GLuint attribindex, GLuint bindingindex); +GLAPI void APIENTRY glVertexBindingDivisor (GLuint bindingindex, GLuint divisor); +GLAPI void APIENTRY glVertexArrayBindVertexBufferEXT (GLuint vaobj, GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +GLAPI void APIENTRY glVertexArrayVertexAttribFormatEXT (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +GLAPI void APIENTRY glVertexArrayVertexAttribIFormatEXT (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +GLAPI void APIENTRY glVertexArrayVertexAttribLFormatEXT (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +GLAPI void APIENTRY glVertexArrayVertexAttribBindingEXT (GLuint vaobj, GLuint attribindex, GLuint bindingindex); +GLAPI void APIENTRY glVertexArrayVertexBindingDivisorEXT (GLuint vaobj, GLuint bindingindex, GLuint divisor); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDVERTEXBUFFERPROC) (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +typedef void (APIENTRYP PFNGLVERTEXATTRIBFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +typedef void (APIENTRYP PFNGLVERTEXATTRIBIFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (APIENTRYP PFNGLVERTEXATTRIBLFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (APIENTRYP PFNGLVERTEXATTRIBBINDINGPROC) (GLuint attribindex, GLuint bindingindex); +typedef void (APIENTRYP PFNGLVERTEXBINDINGDIVISORPROC) (GLuint bindingindex, GLuint divisor); +typedef void (APIENTRYP PFNGLVERTEXARRAYBINDVERTEXBUFFEREXTPROC) (GLuint vaobj, GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBFORMATEXTPROC) (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBIFORMATEXTPROC) (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBLFORMATEXTPROC) (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBBINDINGEXTPROC) (GLuint vaobj, GLuint attribindex, GLuint bindingindex); +typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXBINDINGDIVISOREXTPROC) (GLuint vaobj, GLuint bindingindex, GLuint divisor); +#endif + +#ifndef GL_ARB_robustness_isolation +#define GL_ARB_robustness_isolation 1 +#endif + +#ifndef GL_ARB_ES3_compatibility +#define GL_ARB_ES3_compatibility 1 +#endif + +#ifndef GL_ARB_explicit_uniform_location +#define GL_ARB_explicit_uniform_location 1 +#endif + +#ifndef GL_ARB_fragment_layer_viewport +#define GL_ARB_fragment_layer_viewport 1 +#endif + +#ifndef GL_ARB_framebuffer_no_attachments +#define GL_ARB_framebuffer_no_attachments 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glFramebufferParameteri (GLenum target, GLenum pname, GLint param); +GLAPI void APIENTRY glGetFramebufferParameteriv (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glNamedFramebufferParameteriEXT (GLuint framebuffer, GLenum pname, GLint param); +GLAPI void APIENTRY glGetNamedFramebufferParameterivEXT (GLuint framebuffer, GLenum pname, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLFRAMEBUFFERPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERPARAMETERIEXTPROC) (GLuint framebuffer, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLGETNAMEDFRAMEBUFFERPARAMETERIVEXTPROC) (GLuint framebuffer, GLenum pname, GLint *params); +#endif + +#ifndef GL_ARB_internalformat_query2 +#define GL_ARB_internalformat_query2 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetInternalformati64v (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint64 *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETINTERNALFORMATI64VPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint64 *params); +#endif + +#ifndef GL_ARB_invalidate_subdata +#define GL_ARB_invalidate_subdata 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glInvalidateTexSubImage (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth); +GLAPI void APIENTRY glInvalidateTexImage (GLuint texture, GLint level); +GLAPI void APIENTRY glInvalidateBufferSubData (GLuint buffer, GLintptr offset, GLsizeiptr length); +GLAPI void APIENTRY glInvalidateBufferData (GLuint buffer); +GLAPI void APIENTRY glInvalidateFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments); +GLAPI void APIENTRY glInvalidateSubFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLINVALIDATETEXSUBIMAGEPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth); +typedef void (APIENTRYP PFNGLINVALIDATETEXIMAGEPROC) (GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLINVALIDATEBUFFERSUBDATAPROC) (GLuint buffer, GLintptr offset, GLsizeiptr length); +typedef void (APIENTRYP PFNGLINVALIDATEBUFFERDATAPROC) (GLuint buffer); +typedef void (APIENTRYP PFNGLINVALIDATEFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +typedef void (APIENTRYP PFNGLINVALIDATESUBFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +#endif + +#ifndef GL_ARB_multi_draw_indirect +#define GL_ARB_multi_draw_indirect 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glMultiDrawArraysIndirect (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride); +GLAPI void APIENTRY glMultiDrawElementsIndirect (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTPROC) (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride); +typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTPROC) (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride); +#endif + +#ifndef GL_ARB_program_interface_query +#define GL_ARB_program_interface_query 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetProgramInterfaceiv (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +GLAPI GLuint APIENTRY glGetProgramResourceIndex (GLuint program, GLenum programInterface, const GLchar *name); +GLAPI void APIENTRY glGetProgramResourceName (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +GLAPI void APIENTRY glGetProgramResourceiv (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +GLAPI GLint APIENTRY glGetProgramResourceLocation (GLuint program, GLenum programInterface, const GLchar *name); +GLAPI GLint APIENTRY glGetProgramResourceLocationIndex (GLuint program, GLenum programInterface, const GLchar *name); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETPROGRAMINTERFACEIVPROC) (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +typedef GLuint (APIENTRYP PFNGLGETPROGRAMRESOURCEINDEXPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef void (APIENTRYP PFNGLGETPROGRAMRESOURCENAMEPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +typedef void (APIENTRYP PFNGLGETPROGRAMRESOURCEIVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +typedef GLint (APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef GLint (APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC) (GLuint program, GLenum programInterface, const GLchar *name); +#endif + +#ifndef GL_ARB_robust_buffer_access_behavior +#define GL_ARB_robust_buffer_access_behavior 1 +#endif + +#ifndef GL_ARB_shader_image_size +#define GL_ARB_shader_image_size 1 +#endif + +#ifndef GL_ARB_shader_storage_buffer_object +#define GL_ARB_shader_storage_buffer_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glShaderStorageBlockBinding (GLuint program, GLuint storageBlockIndex, GLuint storageBlockBinding); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSHADERSTORAGEBLOCKBINDINGPROC) (GLuint program, GLuint storageBlockIndex, GLuint storageBlockBinding); +#endif + +#ifndef GL_ARB_stencil_texturing +#define GL_ARB_stencil_texturing 1 +#endif + +#ifndef GL_ARB_texture_buffer_range +#define GL_ARB_texture_buffer_range 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexBufferRange (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +GLAPI void APIENTRY glTextureBufferRangeEXT (GLuint texture, GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXBUFFERRANGEPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (APIENTRYP PFNGLTEXTUREBUFFERRANGEEXTPROC) (GLuint texture, GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#endif + +#ifndef GL_ARB_texture_query_levels +#define GL_ARB_texture_query_levels 1 +#endif + +#ifndef GL_ARB_texture_storage_multisample +#define GL_ARB_texture_storage_multisample 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexStorage2DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +GLAPI void APIENTRY glTexStorage3DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +GLAPI void APIENTRY glTextureStorage2DMultisampleEXT (GLuint texture, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +GLAPI void APIENTRY glTextureStorage3DMultisampleEXT (GLuint texture, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXSTORAGE2DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +typedef void (APIENTRYP PFNGLTEXSTORAGE3DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +typedef void (APIENTRYP PFNGLTEXTURESTORAGE2DMULTISAMPLEEXTPROC) (GLuint texture, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +typedef void (APIENTRYP PFNGLTEXTURESTORAGE3DMULTISAMPLEEXTPROC) (GLuint texture, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#endif + +#ifndef GL_EXT_abgr +#define GL_EXT_abgr 1 +#endif + +#ifndef GL_EXT_blend_color +#define GL_EXT_blend_color 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendColorEXT (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDCOLOREXTPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +#endif + +#ifndef GL_EXT_polygon_offset +#define GL_EXT_polygon_offset 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPolygonOffsetEXT (GLfloat factor, GLfloat bias); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPOLYGONOFFSETEXTPROC) (GLfloat factor, GLfloat bias); +#endif + +#ifndef GL_EXT_texture +#define GL_EXT_texture 1 +#endif + +#ifndef GL_EXT_texture3D +#define GL_EXT_texture3D 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexImage3DEXT (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glTexSubImage3DEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXIMAGE3DEXTPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLTEXSUBIMAGE3DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels); +#endif + +#ifndef GL_SGIS_texture_filter4 +#define GL_SGIS_texture_filter4 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetTexFilterFuncSGIS (GLenum target, GLenum filter, GLfloat *weights); +GLAPI void APIENTRY glTexFilterFuncSGIS (GLenum target, GLenum filter, GLsizei n, const GLfloat *weights); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETTEXFILTERFUNCSGISPROC) (GLenum target, GLenum filter, GLfloat *weights); +typedef void (APIENTRYP PFNGLTEXFILTERFUNCSGISPROC) (GLenum target, GLenum filter, GLsizei n, const GLfloat *weights); +#endif + +#ifndef GL_EXT_subtexture +#define GL_EXT_subtexture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexSubImage1DEXT (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glTexSubImage2DEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXSUBIMAGE1DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLTEXSUBIMAGE2DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels); +#endif + +#ifndef GL_EXT_copy_texture +#define GL_EXT_copy_texture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glCopyTexImage1DEXT (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border); +GLAPI void APIENTRY glCopyTexImage2DEXT (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GLAPI void APIENTRY glCopyTexSubImage1DEXT (GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width); +GLAPI void APIENTRY glCopyTexSubImage2DEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI void APIENTRY glCopyTexSubImage3DEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOPYTEXIMAGE1DEXTPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border); +typedef void (APIENTRYP PFNGLCOPYTEXIMAGE2DEXTPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE1DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width); +typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE2DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE3DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +#endif + +#ifndef GL_EXT_histogram +#define GL_EXT_histogram 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetHistogramEXT (GLenum target, GLboolean reset, GLenum format, GLenum type, GLvoid *values); +GLAPI void APIENTRY glGetHistogramParameterfvEXT (GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetHistogramParameterivEXT (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetMinmaxEXT (GLenum target, GLboolean reset, GLenum format, GLenum type, GLvoid *values); +GLAPI void APIENTRY glGetMinmaxParameterfvEXT (GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetMinmaxParameterivEXT (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glHistogramEXT (GLenum target, GLsizei width, GLenum internalformat, GLboolean sink); +GLAPI void APIENTRY glMinmaxEXT (GLenum target, GLenum internalformat, GLboolean sink); +GLAPI void APIENTRY glResetHistogramEXT (GLenum target); +GLAPI void APIENTRY glResetMinmaxEXT (GLenum target); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETHISTOGRAMEXTPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLvoid *values); +typedef void (APIENTRYP PFNGLGETHISTOGRAMPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETHISTOGRAMPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETMINMAXEXTPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLvoid *values); +typedef void (APIENTRYP PFNGLGETMINMAXPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETMINMAXPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLHISTOGRAMEXTPROC) (GLenum target, GLsizei width, GLenum internalformat, GLboolean sink); +typedef void (APIENTRYP PFNGLMINMAXEXTPROC) (GLenum target, GLenum internalformat, GLboolean sink); +typedef void (APIENTRYP PFNGLRESETHISTOGRAMEXTPROC) (GLenum target); +typedef void (APIENTRYP PFNGLRESETMINMAXEXTPROC) (GLenum target); +#endif + +#ifndef GL_EXT_convolution +#define GL_EXT_convolution 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glConvolutionFilter1DEXT (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const GLvoid *image); +GLAPI void APIENTRY glConvolutionFilter2DEXT (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *image); +GLAPI void APIENTRY glConvolutionParameterfEXT (GLenum target, GLenum pname, GLfloat params); +GLAPI void APIENTRY glConvolutionParameterfvEXT (GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glConvolutionParameteriEXT (GLenum target, GLenum pname, GLint params); +GLAPI void APIENTRY glConvolutionParameterivEXT (GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glCopyConvolutionFilter1DEXT (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width); +GLAPI void APIENTRY glCopyConvolutionFilter2DEXT (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI void APIENTRY glGetConvolutionFilterEXT (GLenum target, GLenum format, GLenum type, GLvoid *image); +GLAPI void APIENTRY glGetConvolutionParameterfvEXT (GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetConvolutionParameterivEXT (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetSeparableFilterEXT (GLenum target, GLenum format, GLenum type, GLvoid *row, GLvoid *column, GLvoid *span); +GLAPI void APIENTRY glSeparableFilter2DEXT (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *row, const GLvoid *column); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCONVOLUTIONFILTER1DEXTPROC) (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const GLvoid *image); +typedef void (APIENTRYP PFNGLCONVOLUTIONFILTER2DEXTPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *image); +typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERFEXTPROC) (GLenum target, GLenum pname, GLfloat params); +typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERFVEXTPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERIEXTPROC) (GLenum target, GLenum pname, GLint params); +typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERIVEXTPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLCOPYCONVOLUTIONFILTER1DEXTPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width); +typedef void (APIENTRYP PFNGLCOPYCONVOLUTIONFILTER2DEXTPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLGETCONVOLUTIONFILTEREXTPROC) (GLenum target, GLenum format, GLenum type, GLvoid *image); +typedef void (APIENTRYP PFNGLGETCONVOLUTIONPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETCONVOLUTIONPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETSEPARABLEFILTEREXTPROC) (GLenum target, GLenum format, GLenum type, GLvoid *row, GLvoid *column, GLvoid *span); +typedef void (APIENTRYP PFNGLSEPARABLEFILTER2DEXTPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *row, const GLvoid *column); +#endif + +#ifndef GL_SGI_color_matrix +#define GL_SGI_color_matrix 1 +#endif + +#ifndef GL_SGI_color_table +#define GL_SGI_color_table 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glColorTableSGI (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const GLvoid *table); +GLAPI void APIENTRY glColorTableParameterfvSGI (GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glColorTableParameterivSGI (GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glCopyColorTableSGI (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width); +GLAPI void APIENTRY glGetColorTableSGI (GLenum target, GLenum format, GLenum type, GLvoid *table); +GLAPI void APIENTRY glGetColorTableParameterfvSGI (GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetColorTableParameterivSGI (GLenum target, GLenum pname, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOLORTABLESGIPROC) (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const GLvoid *table); +typedef void (APIENTRYP PFNGLCOLORTABLEPARAMETERFVSGIPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLCOLORTABLEPARAMETERIVSGIPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLCOPYCOLORTABLESGIPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width); +typedef void (APIENTRYP PFNGLGETCOLORTABLESGIPROC) (GLenum target, GLenum format, GLenum type, GLvoid *table); +typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERFVSGIPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERIVSGIPROC) (GLenum target, GLenum pname, GLint *params); +#endif + +#ifndef GL_SGIX_pixel_texture +#define GL_SGIX_pixel_texture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPixelTexGenSGIX (GLenum mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPIXELTEXGENSGIXPROC) (GLenum mode); +#endif + +#ifndef GL_SGIS_pixel_texture +#define GL_SGIS_pixel_texture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPixelTexGenParameteriSGIS (GLenum pname, GLint param); +GLAPI void APIENTRY glPixelTexGenParameterivSGIS (GLenum pname, const GLint *params); +GLAPI void APIENTRY glPixelTexGenParameterfSGIS (GLenum pname, GLfloat param); +GLAPI void APIENTRY glPixelTexGenParameterfvSGIS (GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glGetPixelTexGenParameterivSGIS (GLenum pname, GLint *params); +GLAPI void APIENTRY glGetPixelTexGenParameterfvSGIS (GLenum pname, GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPIXELTEXGENPARAMETERISGISPROC) (GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLPIXELTEXGENPARAMETERIVSGISPROC) (GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLPIXELTEXGENPARAMETERFSGISPROC) (GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLPIXELTEXGENPARAMETERFVSGISPROC) (GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLGETPIXELTEXGENPARAMETERIVSGISPROC) (GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETPIXELTEXGENPARAMETERFVSGISPROC) (GLenum pname, GLfloat *params); +#endif + +#ifndef GL_SGIS_texture4D +#define GL_SGIS_texture4D 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexImage4DSGIS (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLsizei size4d, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glTexSubImage4DSGIS (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint woffset, GLsizei width, GLsizei height, GLsizei depth, GLsizei size4d, GLenum format, GLenum type, const GLvoid *pixels); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXIMAGE4DSGISPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLsizei size4d, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLTEXSUBIMAGE4DSGISPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint woffset, GLsizei width, GLsizei height, GLsizei depth, GLsizei size4d, GLenum format, GLenum type, const GLvoid *pixels); +#endif + +#ifndef GL_SGI_texture_color_table +#define GL_SGI_texture_color_table 1 +#endif + +#ifndef GL_EXT_cmyka +#define GL_EXT_cmyka 1 +#endif + +#ifndef GL_EXT_texture_object +#define GL_EXT_texture_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLboolean APIENTRY glAreTexturesResidentEXT (GLsizei n, const GLuint *textures, GLboolean *residences); +GLAPI void APIENTRY glBindTextureEXT (GLenum target, GLuint texture); +GLAPI void APIENTRY glDeleteTexturesEXT (GLsizei n, const GLuint *textures); +GLAPI void APIENTRY glGenTexturesEXT (GLsizei n, GLuint *textures); +GLAPI GLboolean APIENTRY glIsTextureEXT (GLuint texture); +GLAPI void APIENTRY glPrioritizeTexturesEXT (GLsizei n, const GLuint *textures, const GLclampf *priorities); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLboolean (APIENTRYP PFNGLARETEXTURESRESIDENTEXTPROC) (GLsizei n, const GLuint *textures, GLboolean *residences); +typedef void (APIENTRYP PFNGLBINDTEXTUREEXTPROC) (GLenum target, GLuint texture); +typedef void (APIENTRYP PFNGLDELETETEXTURESEXTPROC) (GLsizei n, const GLuint *textures); +typedef void (APIENTRYP PFNGLGENTEXTURESEXTPROC) (GLsizei n, GLuint *textures); +typedef GLboolean (APIENTRYP PFNGLISTEXTUREEXTPROC) (GLuint texture); +typedef void (APIENTRYP PFNGLPRIORITIZETEXTURESEXTPROC) (GLsizei n, const GLuint *textures, const GLclampf *priorities); +#endif + +#ifndef GL_SGIS_detail_texture +#define GL_SGIS_detail_texture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDetailTexFuncSGIS (GLenum target, GLsizei n, const GLfloat *points); +GLAPI void APIENTRY glGetDetailTexFuncSGIS (GLenum target, GLfloat *points); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDETAILTEXFUNCSGISPROC) (GLenum target, GLsizei n, const GLfloat *points); +typedef void (APIENTRYP PFNGLGETDETAILTEXFUNCSGISPROC) (GLenum target, GLfloat *points); +#endif + +#ifndef GL_SGIS_sharpen_texture +#define GL_SGIS_sharpen_texture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glSharpenTexFuncSGIS (GLenum target, GLsizei n, const GLfloat *points); +GLAPI void APIENTRY glGetSharpenTexFuncSGIS (GLenum target, GLfloat *points); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSHARPENTEXFUNCSGISPROC) (GLenum target, GLsizei n, const GLfloat *points); +typedef void (APIENTRYP PFNGLGETSHARPENTEXFUNCSGISPROC) (GLenum target, GLfloat *points); +#endif + +#ifndef GL_EXT_packed_pixels +#define GL_EXT_packed_pixels 1 +#endif + +#ifndef GL_SGIS_texture_lod +#define GL_SGIS_texture_lod 1 +#endif + +#ifndef GL_SGIS_multisample +#define GL_SGIS_multisample 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glSampleMaskSGIS (GLclampf value, GLboolean invert); +GLAPI void APIENTRY glSamplePatternSGIS (GLenum pattern); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSAMPLEMASKSGISPROC) (GLclampf value, GLboolean invert); +typedef void (APIENTRYP PFNGLSAMPLEPATTERNSGISPROC) (GLenum pattern); +#endif + +#ifndef GL_EXT_rescale_normal +#define GL_EXT_rescale_normal 1 +#endif + +#ifndef GL_EXT_vertex_array +#define GL_EXT_vertex_array 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glArrayElementEXT (GLint i); +GLAPI void APIENTRY glColorPointerEXT (GLint size, GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +GLAPI void APIENTRY glDrawArraysEXT (GLenum mode, GLint first, GLsizei count); +GLAPI void APIENTRY glEdgeFlagPointerEXT (GLsizei stride, GLsizei count, const GLboolean *pointer); +GLAPI void APIENTRY glGetPointervEXT (GLenum pname, GLvoid* *params); +GLAPI void APIENTRY glIndexPointerEXT (GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +GLAPI void APIENTRY glNormalPointerEXT (GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +GLAPI void APIENTRY glTexCoordPointerEXT (GLint size, GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +GLAPI void APIENTRY glVertexPointerEXT (GLint size, GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLARRAYELEMENTEXTPROC) (GLint i); +typedef void (APIENTRYP PFNGLCOLORPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLDRAWARRAYSEXTPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (APIENTRYP PFNGLEDGEFLAGPOINTEREXTPROC) (GLsizei stride, GLsizei count, const GLboolean *pointer); +typedef void (APIENTRYP PFNGLGETPOINTERVEXTPROC) (GLenum pname, GLvoid* *params); +typedef void (APIENTRYP PFNGLINDEXPOINTEREXTPROC) (GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLNORMALPOINTEREXTPROC) (GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLTEXCOORDPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLVERTEXPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, GLsizei count, const GLvoid *pointer); +#endif + +#ifndef GL_EXT_misc_attribute +#define GL_EXT_misc_attribute 1 +#endif + +#ifndef GL_SGIS_generate_mipmap +#define GL_SGIS_generate_mipmap 1 +#endif + +#ifndef GL_SGIX_clipmap +#define GL_SGIX_clipmap 1 +#endif + +#ifndef GL_SGIX_shadow +#define GL_SGIX_shadow 1 +#endif + +#ifndef GL_SGIS_texture_edge_clamp +#define GL_SGIS_texture_edge_clamp 1 +#endif + +#ifndef GL_SGIS_texture_border_clamp +#define GL_SGIS_texture_border_clamp 1 +#endif + +#ifndef GL_EXT_blend_minmax +#define GL_EXT_blend_minmax 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendEquationEXT (GLenum mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDEQUATIONEXTPROC) (GLenum mode); +#endif + +#ifndef GL_EXT_blend_subtract +#define GL_EXT_blend_subtract 1 +#endif + +#ifndef GL_EXT_blend_logic_op +#define GL_EXT_blend_logic_op 1 +#endif + +#ifndef GL_SGIX_interlace +#define GL_SGIX_interlace 1 +#endif + +#ifndef GL_SGIX_pixel_tiles +#define GL_SGIX_pixel_tiles 1 +#endif + +#ifndef GL_SGIX_texture_select +#define GL_SGIX_texture_select 1 +#endif + +#ifndef GL_SGIX_sprite +#define GL_SGIX_sprite 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glSpriteParameterfSGIX (GLenum pname, GLfloat param); +GLAPI void APIENTRY glSpriteParameterfvSGIX (GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glSpriteParameteriSGIX (GLenum pname, GLint param); +GLAPI void APIENTRY glSpriteParameterivSGIX (GLenum pname, const GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSPRITEPARAMETERFSGIXPROC) (GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLSPRITEPARAMETERFVSGIXPROC) (GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLSPRITEPARAMETERISGIXPROC) (GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLSPRITEPARAMETERIVSGIXPROC) (GLenum pname, const GLint *params); +#endif + +#ifndef GL_SGIX_texture_multi_buffer +#define GL_SGIX_texture_multi_buffer 1 +#endif + +#ifndef GL_EXT_point_parameters +#define GL_EXT_point_parameters 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPointParameterfEXT (GLenum pname, GLfloat param); +GLAPI void APIENTRY glPointParameterfvEXT (GLenum pname, const GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPOINTPARAMETERFEXTPROC) (GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLPOINTPARAMETERFVEXTPROC) (GLenum pname, const GLfloat *params); +#endif + +#ifndef GL_SGIS_point_parameters +#define GL_SGIS_point_parameters 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPointParameterfSGIS (GLenum pname, GLfloat param); +GLAPI void APIENTRY glPointParameterfvSGIS (GLenum pname, const GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPOINTPARAMETERFSGISPROC) (GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLPOINTPARAMETERFVSGISPROC) (GLenum pname, const GLfloat *params); +#endif + +#ifndef GL_SGIX_instruments +#define GL_SGIX_instruments 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLint APIENTRY glGetInstrumentsSGIX (void); +GLAPI void APIENTRY glInstrumentsBufferSGIX (GLsizei size, GLint *buffer); +GLAPI GLint APIENTRY glPollInstrumentsSGIX (GLint *marker_p); +GLAPI void APIENTRY glReadInstrumentsSGIX (GLint marker); +GLAPI void APIENTRY glStartInstrumentsSGIX (void); +GLAPI void APIENTRY glStopInstrumentsSGIX (GLint marker); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLint (APIENTRYP PFNGLGETINSTRUMENTSSGIXPROC) (void); +typedef void (APIENTRYP PFNGLINSTRUMENTSBUFFERSGIXPROC) (GLsizei size, GLint *buffer); +typedef GLint (APIENTRYP PFNGLPOLLINSTRUMENTSSGIXPROC) (GLint *marker_p); +typedef void (APIENTRYP PFNGLREADINSTRUMENTSSGIXPROC) (GLint marker); +typedef void (APIENTRYP PFNGLSTARTINSTRUMENTSSGIXPROC) (void); +typedef void (APIENTRYP PFNGLSTOPINSTRUMENTSSGIXPROC) (GLint marker); +#endif + +#ifndef GL_SGIX_texture_scale_bias +#define GL_SGIX_texture_scale_bias 1 +#endif + +#ifndef GL_SGIX_framezoom +#define GL_SGIX_framezoom 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glFrameZoomSGIX (GLint factor); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLFRAMEZOOMSGIXPROC) (GLint factor); +#endif + +#ifndef GL_SGIX_tag_sample_buffer +#define GL_SGIX_tag_sample_buffer 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTagSampleBufferSGIX (void); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTAGSAMPLEBUFFERSGIXPROC) (void); +#endif + +#ifndef GL_SGIX_polynomial_ffd +#define GL_SGIX_polynomial_ffd 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDeformationMap3dSGIX (GLenum target, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, GLdouble w1, GLdouble w2, GLint wstride, GLint worder, const GLdouble *points); +GLAPI void APIENTRY glDeformationMap3fSGIX (GLenum target, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, GLfloat w1, GLfloat w2, GLint wstride, GLint worder, const GLfloat *points); +GLAPI void APIENTRY glDeformSGIX (GLbitfield mask); +GLAPI void APIENTRY glLoadIdentityDeformationMapSGIX (GLbitfield mask); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDEFORMATIONMAP3DSGIXPROC) (GLenum target, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, GLdouble w1, GLdouble w2, GLint wstride, GLint worder, const GLdouble *points); +typedef void (APIENTRYP PFNGLDEFORMATIONMAP3FSGIXPROC) (GLenum target, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, GLfloat w1, GLfloat w2, GLint wstride, GLint worder, const GLfloat *points); +typedef void (APIENTRYP PFNGLDEFORMSGIXPROC) (GLbitfield mask); +typedef void (APIENTRYP PFNGLLOADIDENTITYDEFORMATIONMAPSGIXPROC) (GLbitfield mask); +#endif + +#ifndef GL_SGIX_reference_plane +#define GL_SGIX_reference_plane 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glReferencePlaneSGIX (const GLdouble *equation); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLREFERENCEPLANESGIXPROC) (const GLdouble *equation); +#endif + +#ifndef GL_SGIX_flush_raster +#define GL_SGIX_flush_raster 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glFlushRasterSGIX (void); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLFLUSHRASTERSGIXPROC) (void); +#endif + +#ifndef GL_SGIX_depth_texture +#define GL_SGIX_depth_texture 1 +#endif + +#ifndef GL_SGIS_fog_function +#define GL_SGIS_fog_function 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glFogFuncSGIS (GLsizei n, const GLfloat *points); +GLAPI void APIENTRY glGetFogFuncSGIS (GLfloat *points); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLFOGFUNCSGISPROC) (GLsizei n, const GLfloat *points); +typedef void (APIENTRYP PFNGLGETFOGFUNCSGISPROC) (GLfloat *points); +#endif + +#ifndef GL_SGIX_fog_offset +#define GL_SGIX_fog_offset 1 +#endif + +#ifndef GL_HP_image_transform +#define GL_HP_image_transform 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glImageTransformParameteriHP (GLenum target, GLenum pname, GLint param); +GLAPI void APIENTRY glImageTransformParameterfHP (GLenum target, GLenum pname, GLfloat param); +GLAPI void APIENTRY glImageTransformParameterivHP (GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glImageTransformParameterfvHP (GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glGetImageTransformParameterivHP (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetImageTransformParameterfvHP (GLenum target, GLenum pname, GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLIMAGETRANSFORMPARAMETERIHPPROC) (GLenum target, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLIMAGETRANSFORMPARAMETERFHPPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLIMAGETRANSFORMPARAMETERIVHPPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLIMAGETRANSFORMPARAMETERFVHPPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLGETIMAGETRANSFORMPARAMETERIVHPPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETIMAGETRANSFORMPARAMETERFVHPPROC) (GLenum target, GLenum pname, GLfloat *params); +#endif + +#ifndef GL_HP_convolution_border_modes +#define GL_HP_convolution_border_modes 1 +#endif + +#ifndef GL_SGIX_texture_add_env +#define GL_SGIX_texture_add_env 1 +#endif + +#ifndef GL_EXT_color_subtable +#define GL_EXT_color_subtable 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glColorSubTableEXT (GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum type, const GLvoid *data); +GLAPI void APIENTRY glCopyColorSubTableEXT (GLenum target, GLsizei start, GLint x, GLint y, GLsizei width); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOLORSUBTABLEEXTPROC) (GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum type, const GLvoid *data); +typedef void (APIENTRYP PFNGLCOPYCOLORSUBTABLEEXTPROC) (GLenum target, GLsizei start, GLint x, GLint y, GLsizei width); +#endif + +#ifndef GL_PGI_vertex_hints +#define GL_PGI_vertex_hints 1 +#endif + +#ifndef GL_PGI_misc_hints +#define GL_PGI_misc_hints 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glHintPGI (GLenum target, GLint mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLHINTPGIPROC) (GLenum target, GLint mode); +#endif + +#ifndef GL_EXT_paletted_texture +#define GL_EXT_paletted_texture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glColorTableEXT (GLenum target, GLenum internalFormat, GLsizei width, GLenum format, GLenum type, const GLvoid *table); +GLAPI void APIENTRY glGetColorTableEXT (GLenum target, GLenum format, GLenum type, GLvoid *data); +GLAPI void APIENTRY glGetColorTableParameterivEXT (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetColorTableParameterfvEXT (GLenum target, GLenum pname, GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOLORTABLEEXTPROC) (GLenum target, GLenum internalFormat, GLsizei width, GLenum format, GLenum type, const GLvoid *table); +typedef void (APIENTRYP PFNGLGETCOLORTABLEEXTPROC) (GLenum target, GLenum format, GLenum type, GLvoid *data); +typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params); +#endif + +#ifndef GL_EXT_clip_volume_hint +#define GL_EXT_clip_volume_hint 1 +#endif + +#ifndef GL_SGIX_list_priority +#define GL_SGIX_list_priority 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetListParameterfvSGIX (GLuint list, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetListParameterivSGIX (GLuint list, GLenum pname, GLint *params); +GLAPI void APIENTRY glListParameterfSGIX (GLuint list, GLenum pname, GLfloat param); +GLAPI void APIENTRY glListParameterfvSGIX (GLuint list, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glListParameteriSGIX (GLuint list, GLenum pname, GLint param); +GLAPI void APIENTRY glListParameterivSGIX (GLuint list, GLenum pname, const GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETLISTPARAMETERFVSGIXPROC) (GLuint list, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETLISTPARAMETERIVSGIXPROC) (GLuint list, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLLISTPARAMETERFSGIXPROC) (GLuint list, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLLISTPARAMETERFVSGIXPROC) (GLuint list, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLLISTPARAMETERISGIXPROC) (GLuint list, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLLISTPARAMETERIVSGIXPROC) (GLuint list, GLenum pname, const GLint *params); +#endif + +#ifndef GL_SGIX_ir_instrument1 +#define GL_SGIX_ir_instrument1 1 +#endif + +#ifndef GL_SGIX_calligraphic_fragment +#define GL_SGIX_calligraphic_fragment 1 +#endif + +#ifndef GL_SGIX_texture_lod_bias +#define GL_SGIX_texture_lod_bias 1 +#endif + +#ifndef GL_SGIX_shadow_ambient +#define GL_SGIX_shadow_ambient 1 +#endif + +#ifndef GL_EXT_index_texture +#define GL_EXT_index_texture 1 +#endif + +#ifndef GL_EXT_index_material +#define GL_EXT_index_material 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glIndexMaterialEXT (GLenum face, GLenum mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLINDEXMATERIALEXTPROC) (GLenum face, GLenum mode); +#endif + +#ifndef GL_EXT_index_func +#define GL_EXT_index_func 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glIndexFuncEXT (GLenum func, GLclampf ref); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLINDEXFUNCEXTPROC) (GLenum func, GLclampf ref); +#endif + +#ifndef GL_EXT_index_array_formats +#define GL_EXT_index_array_formats 1 +#endif + +#ifndef GL_EXT_compiled_vertex_array +#define GL_EXT_compiled_vertex_array 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glLockArraysEXT (GLint first, GLsizei count); +GLAPI void APIENTRY glUnlockArraysEXT (void); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLLOCKARRAYSEXTPROC) (GLint first, GLsizei count); +typedef void (APIENTRYP PFNGLUNLOCKARRAYSEXTPROC) (void); +#endif + +#ifndef GL_EXT_cull_vertex +#define GL_EXT_cull_vertex 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glCullParameterdvEXT (GLenum pname, GLdouble *params); +GLAPI void APIENTRY glCullParameterfvEXT (GLenum pname, GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCULLPARAMETERDVEXTPROC) (GLenum pname, GLdouble *params); +typedef void (APIENTRYP PFNGLCULLPARAMETERFVEXTPROC) (GLenum pname, GLfloat *params); +#endif + +#ifndef GL_SGIX_ycrcb +#define GL_SGIX_ycrcb 1 +#endif + +#ifndef GL_SGIX_fragment_lighting +#define GL_SGIX_fragment_lighting 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glFragmentColorMaterialSGIX (GLenum face, GLenum mode); +GLAPI void APIENTRY glFragmentLightfSGIX (GLenum light, GLenum pname, GLfloat param); +GLAPI void APIENTRY glFragmentLightfvSGIX (GLenum light, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glFragmentLightiSGIX (GLenum light, GLenum pname, GLint param); +GLAPI void APIENTRY glFragmentLightivSGIX (GLenum light, GLenum pname, const GLint *params); +GLAPI void APIENTRY glFragmentLightModelfSGIX (GLenum pname, GLfloat param); +GLAPI void APIENTRY glFragmentLightModelfvSGIX (GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glFragmentLightModeliSGIX (GLenum pname, GLint param); +GLAPI void APIENTRY glFragmentLightModelivSGIX (GLenum pname, const GLint *params); +GLAPI void APIENTRY glFragmentMaterialfSGIX (GLenum face, GLenum pname, GLfloat param); +GLAPI void APIENTRY glFragmentMaterialfvSGIX (GLenum face, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glFragmentMaterialiSGIX (GLenum face, GLenum pname, GLint param); +GLAPI void APIENTRY glFragmentMaterialivSGIX (GLenum face, GLenum pname, const GLint *params); +GLAPI void APIENTRY glGetFragmentLightfvSGIX (GLenum light, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetFragmentLightivSGIX (GLenum light, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetFragmentMaterialfvSGIX (GLenum face, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetFragmentMaterialivSGIX (GLenum face, GLenum pname, GLint *params); +GLAPI void APIENTRY glLightEnviSGIX (GLenum pname, GLint param); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLFRAGMENTCOLORMATERIALSGIXPROC) (GLenum face, GLenum mode); +typedef void (APIENTRYP PFNGLFRAGMENTLIGHTFSGIXPROC) (GLenum light, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLFRAGMENTLIGHTFVSGIXPROC) (GLenum light, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLFRAGMENTLIGHTISGIXPROC) (GLenum light, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLFRAGMENTLIGHTIVSGIXPROC) (GLenum light, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLFRAGMENTLIGHTMODELFSGIXPROC) (GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLFRAGMENTLIGHTMODELFVSGIXPROC) (GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLFRAGMENTLIGHTMODELISGIXPROC) (GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLFRAGMENTLIGHTMODELIVSGIXPROC) (GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLFRAGMENTMATERIALFSGIXPROC) (GLenum face, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLFRAGMENTMATERIALFVSGIXPROC) (GLenum face, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLFRAGMENTMATERIALISGIXPROC) (GLenum face, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLFRAGMENTMATERIALIVSGIXPROC) (GLenum face, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLGETFRAGMENTLIGHTFVSGIXPROC) (GLenum light, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETFRAGMENTLIGHTIVSGIXPROC) (GLenum light, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETFRAGMENTMATERIALFVSGIXPROC) (GLenum face, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETFRAGMENTMATERIALIVSGIXPROC) (GLenum face, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLLIGHTENVISGIXPROC) (GLenum pname, GLint param); +#endif + +#ifndef GL_IBM_rasterpos_clip +#define GL_IBM_rasterpos_clip 1 +#endif + +#ifndef GL_HP_texture_lighting +#define GL_HP_texture_lighting 1 +#endif + +#ifndef GL_EXT_draw_range_elements +#define GL_EXT_draw_range_elements 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawRangeElementsEXT (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid *indices); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTSEXTPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid *indices); +#endif + +#ifndef GL_WIN_phong_shading +#define GL_WIN_phong_shading 1 +#endif + +#ifndef GL_WIN_specular_fog +#define GL_WIN_specular_fog 1 +#endif + +#ifndef GL_EXT_light_texture +#define GL_EXT_light_texture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glApplyTextureEXT (GLenum mode); +GLAPI void APIENTRY glTextureLightEXT (GLenum pname); +GLAPI void APIENTRY glTextureMaterialEXT (GLenum face, GLenum mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLAPPLYTEXTUREEXTPROC) (GLenum mode); +typedef void (APIENTRYP PFNGLTEXTURELIGHTEXTPROC) (GLenum pname); +typedef void (APIENTRYP PFNGLTEXTUREMATERIALEXTPROC) (GLenum face, GLenum mode); +#endif + +#ifndef GL_SGIX_blend_alpha_minmax +#define GL_SGIX_blend_alpha_minmax 1 +#endif + +#ifndef GL_EXT_bgra +#define GL_EXT_bgra 1 +#endif + +#ifndef GL_SGIX_async +#define GL_SGIX_async 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glAsyncMarkerSGIX (GLuint marker); +GLAPI GLint APIENTRY glFinishAsyncSGIX (GLuint *markerp); +GLAPI GLint APIENTRY glPollAsyncSGIX (GLuint *markerp); +GLAPI GLuint APIENTRY glGenAsyncMarkersSGIX (GLsizei range); +GLAPI void APIENTRY glDeleteAsyncMarkersSGIX (GLuint marker, GLsizei range); +GLAPI GLboolean APIENTRY glIsAsyncMarkerSGIX (GLuint marker); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLASYNCMARKERSGIXPROC) (GLuint marker); +typedef GLint (APIENTRYP PFNGLFINISHASYNCSGIXPROC) (GLuint *markerp); +typedef GLint (APIENTRYP PFNGLPOLLASYNCSGIXPROC) (GLuint *markerp); +typedef GLuint (APIENTRYP PFNGLGENASYNCMARKERSSGIXPROC) (GLsizei range); +typedef void (APIENTRYP PFNGLDELETEASYNCMARKERSSGIXPROC) (GLuint marker, GLsizei range); +typedef GLboolean (APIENTRYP PFNGLISASYNCMARKERSGIXPROC) (GLuint marker); +#endif + +#ifndef GL_SGIX_async_pixel +#define GL_SGIX_async_pixel 1 +#endif + +#ifndef GL_SGIX_async_histogram +#define GL_SGIX_async_histogram 1 +#endif + +#ifndef GL_INTEL_parallel_arrays +#define GL_INTEL_parallel_arrays 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexPointervINTEL (GLint size, GLenum type, const GLvoid* *pointer); +GLAPI void APIENTRY glNormalPointervINTEL (GLenum type, const GLvoid* *pointer); +GLAPI void APIENTRY glColorPointervINTEL (GLint size, GLenum type, const GLvoid* *pointer); +GLAPI void APIENTRY glTexCoordPointervINTEL (GLint size, GLenum type, const GLvoid* *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXPOINTERVINTELPROC) (GLint size, GLenum type, const GLvoid* *pointer); +typedef void (APIENTRYP PFNGLNORMALPOINTERVINTELPROC) (GLenum type, const GLvoid* *pointer); +typedef void (APIENTRYP PFNGLCOLORPOINTERVINTELPROC) (GLint size, GLenum type, const GLvoid* *pointer); +typedef void (APIENTRYP PFNGLTEXCOORDPOINTERVINTELPROC) (GLint size, GLenum type, const GLvoid* *pointer); +#endif + +#ifndef GL_HP_occlusion_test +#define GL_HP_occlusion_test 1 +#endif + +#ifndef GL_EXT_pixel_transform +#define GL_EXT_pixel_transform 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPixelTransformParameteriEXT (GLenum target, GLenum pname, GLint param); +GLAPI void APIENTRY glPixelTransformParameterfEXT (GLenum target, GLenum pname, GLfloat param); +GLAPI void APIENTRY glPixelTransformParameterivEXT (GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glPixelTransformParameterfvEXT (GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glGetPixelTransformParameterivEXT (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetPixelTransformParameterfvEXT (GLenum target, GLenum pname, GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPIXELTRANSFORMPARAMETERIEXTPROC) (GLenum target, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLPIXELTRANSFORMPARAMETERFEXTPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLPIXELTRANSFORMPARAMETERIVEXTPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLPIXELTRANSFORMPARAMETERFVEXTPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLGETPIXELTRANSFORMPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETPIXELTRANSFORMPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params); +#endif + +#ifndef GL_EXT_pixel_transform_color_table +#define GL_EXT_pixel_transform_color_table 1 +#endif + +#ifndef GL_EXT_shared_texture_palette +#define GL_EXT_shared_texture_palette 1 +#endif + +#ifndef GL_EXT_separate_specular_color +#define GL_EXT_separate_specular_color 1 +#endif + +#ifndef GL_EXT_secondary_color +#define GL_EXT_secondary_color 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glSecondaryColor3bEXT (GLbyte red, GLbyte green, GLbyte blue); +GLAPI void APIENTRY glSecondaryColor3bvEXT (const GLbyte *v); +GLAPI void APIENTRY glSecondaryColor3dEXT (GLdouble red, GLdouble green, GLdouble blue); +GLAPI void APIENTRY glSecondaryColor3dvEXT (const GLdouble *v); +GLAPI void APIENTRY glSecondaryColor3fEXT (GLfloat red, GLfloat green, GLfloat blue); +GLAPI void APIENTRY glSecondaryColor3fvEXT (const GLfloat *v); +GLAPI void APIENTRY glSecondaryColor3iEXT (GLint red, GLint green, GLint blue); +GLAPI void APIENTRY glSecondaryColor3ivEXT (const GLint *v); +GLAPI void APIENTRY glSecondaryColor3sEXT (GLshort red, GLshort green, GLshort blue); +GLAPI void APIENTRY glSecondaryColor3svEXT (const GLshort *v); +GLAPI void APIENTRY glSecondaryColor3ubEXT (GLubyte red, GLubyte green, GLubyte blue); +GLAPI void APIENTRY glSecondaryColor3ubvEXT (const GLubyte *v); +GLAPI void APIENTRY glSecondaryColor3uiEXT (GLuint red, GLuint green, GLuint blue); +GLAPI void APIENTRY glSecondaryColor3uivEXT (const GLuint *v); +GLAPI void APIENTRY glSecondaryColor3usEXT (GLushort red, GLushort green, GLushort blue); +GLAPI void APIENTRY glSecondaryColor3usvEXT (const GLushort *v); +GLAPI void APIENTRY glSecondaryColorPointerEXT (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3BEXTPROC) (GLbyte red, GLbyte green, GLbyte blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3BVEXTPROC) (const GLbyte *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3DEXTPROC) (GLdouble red, GLdouble green, GLdouble blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3DVEXTPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3FEXTPROC) (GLfloat red, GLfloat green, GLfloat blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3FVEXTPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3IEXTPROC) (GLint red, GLint green, GLint blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3IVEXTPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3SEXTPROC) (GLshort red, GLshort green, GLshort blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3SVEXTPROC) (const GLshort *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UBEXTPROC) (GLubyte red, GLubyte green, GLubyte blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UBVEXTPROC) (const GLubyte *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UIEXTPROC) (GLuint red, GLuint green, GLuint blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UIVEXTPROC) (const GLuint *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3USEXTPROC) (GLushort red, GLushort green, GLushort blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3USVEXTPROC) (const GLushort *v); +typedef void (APIENTRYP PFNGLSECONDARYCOLORPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +#endif + +#ifndef GL_EXT_texture_perturb_normal +#define GL_EXT_texture_perturb_normal 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTextureNormalEXT (GLenum mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXTURENORMALEXTPROC) (GLenum mode); +#endif + +#ifndef GL_EXT_multi_draw_arrays +#define GL_EXT_multi_draw_arrays 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glMultiDrawArraysEXT (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +GLAPI void APIENTRY glMultiDrawElementsEXT (GLenum mode, const GLsizei *count, GLenum type, const GLvoid* *indices, GLsizei primcount); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSEXTPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const GLvoid* *indices, GLsizei primcount); +#endif + +#ifndef GL_EXT_fog_coord +#define GL_EXT_fog_coord 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glFogCoordfEXT (GLfloat coord); +GLAPI void APIENTRY glFogCoordfvEXT (const GLfloat *coord); +GLAPI void APIENTRY glFogCoorddEXT (GLdouble coord); +GLAPI void APIENTRY glFogCoorddvEXT (const GLdouble *coord); +GLAPI void APIENTRY glFogCoordPointerEXT (GLenum type, GLsizei stride, const GLvoid *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLFOGCOORDFEXTPROC) (GLfloat coord); +typedef void (APIENTRYP PFNGLFOGCOORDFVEXTPROC) (const GLfloat *coord); +typedef void (APIENTRYP PFNGLFOGCOORDDEXTPROC) (GLdouble coord); +typedef void (APIENTRYP PFNGLFOGCOORDDVEXTPROC) (const GLdouble *coord); +typedef void (APIENTRYP PFNGLFOGCOORDPOINTEREXTPROC) (GLenum type, GLsizei stride, const GLvoid *pointer); +#endif + +#ifndef GL_REND_screen_coordinates +#define GL_REND_screen_coordinates 1 +#endif + +#ifndef GL_EXT_coordinate_frame +#define GL_EXT_coordinate_frame 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTangent3bEXT (GLbyte tx, GLbyte ty, GLbyte tz); +GLAPI void APIENTRY glTangent3bvEXT (const GLbyte *v); +GLAPI void APIENTRY glTangent3dEXT (GLdouble tx, GLdouble ty, GLdouble tz); +GLAPI void APIENTRY glTangent3dvEXT (const GLdouble *v); +GLAPI void APIENTRY glTangent3fEXT (GLfloat tx, GLfloat ty, GLfloat tz); +GLAPI void APIENTRY glTangent3fvEXT (const GLfloat *v); +GLAPI void APIENTRY glTangent3iEXT (GLint tx, GLint ty, GLint tz); +GLAPI void APIENTRY glTangent3ivEXT (const GLint *v); +GLAPI void APIENTRY glTangent3sEXT (GLshort tx, GLshort ty, GLshort tz); +GLAPI void APIENTRY glTangent3svEXT (const GLshort *v); +GLAPI void APIENTRY glBinormal3bEXT (GLbyte bx, GLbyte by, GLbyte bz); +GLAPI void APIENTRY glBinormal3bvEXT (const GLbyte *v); +GLAPI void APIENTRY glBinormal3dEXT (GLdouble bx, GLdouble by, GLdouble bz); +GLAPI void APIENTRY glBinormal3dvEXT (const GLdouble *v); +GLAPI void APIENTRY glBinormal3fEXT (GLfloat bx, GLfloat by, GLfloat bz); +GLAPI void APIENTRY glBinormal3fvEXT (const GLfloat *v); +GLAPI void APIENTRY glBinormal3iEXT (GLint bx, GLint by, GLint bz); +GLAPI void APIENTRY glBinormal3ivEXT (const GLint *v); +GLAPI void APIENTRY glBinormal3sEXT (GLshort bx, GLshort by, GLshort bz); +GLAPI void APIENTRY glBinormal3svEXT (const GLshort *v); +GLAPI void APIENTRY glTangentPointerEXT (GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glBinormalPointerEXT (GLenum type, GLsizei stride, const GLvoid *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTANGENT3BEXTPROC) (GLbyte tx, GLbyte ty, GLbyte tz); +typedef void (APIENTRYP PFNGLTANGENT3BVEXTPROC) (const GLbyte *v); +typedef void (APIENTRYP PFNGLTANGENT3DEXTPROC) (GLdouble tx, GLdouble ty, GLdouble tz); +typedef void (APIENTRYP PFNGLTANGENT3DVEXTPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLTANGENT3FEXTPROC) (GLfloat tx, GLfloat ty, GLfloat tz); +typedef void (APIENTRYP PFNGLTANGENT3FVEXTPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLTANGENT3IEXTPROC) (GLint tx, GLint ty, GLint tz); +typedef void (APIENTRYP PFNGLTANGENT3IVEXTPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLTANGENT3SEXTPROC) (GLshort tx, GLshort ty, GLshort tz); +typedef void (APIENTRYP PFNGLTANGENT3SVEXTPROC) (const GLshort *v); +typedef void (APIENTRYP PFNGLBINORMAL3BEXTPROC) (GLbyte bx, GLbyte by, GLbyte bz); +typedef void (APIENTRYP PFNGLBINORMAL3BVEXTPROC) (const GLbyte *v); +typedef void (APIENTRYP PFNGLBINORMAL3DEXTPROC) (GLdouble bx, GLdouble by, GLdouble bz); +typedef void (APIENTRYP PFNGLBINORMAL3DVEXTPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLBINORMAL3FEXTPROC) (GLfloat bx, GLfloat by, GLfloat bz); +typedef void (APIENTRYP PFNGLBINORMAL3FVEXTPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLBINORMAL3IEXTPROC) (GLint bx, GLint by, GLint bz); +typedef void (APIENTRYP PFNGLBINORMAL3IVEXTPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLBINORMAL3SEXTPROC) (GLshort bx, GLshort by, GLshort bz); +typedef void (APIENTRYP PFNGLBINORMAL3SVEXTPROC) (const GLshort *v); +typedef void (APIENTRYP PFNGLTANGENTPOINTEREXTPROC) (GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLBINORMALPOINTEREXTPROC) (GLenum type, GLsizei stride, const GLvoid *pointer); +#endif + +#ifndef GL_EXT_texture_env_combine +#define GL_EXT_texture_env_combine 1 +#endif + +#ifndef GL_APPLE_specular_vector +#define GL_APPLE_specular_vector 1 +#endif + +#ifndef GL_APPLE_transform_hint +#define GL_APPLE_transform_hint 1 +#endif + +#ifndef GL_SGIX_fog_scale +#define GL_SGIX_fog_scale 1 +#endif + +#ifndef GL_SUNX_constant_data +#define GL_SUNX_constant_data 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glFinishTextureSUNX (void); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLFINISHTEXTURESUNXPROC) (void); +#endif + +#ifndef GL_SUN_global_alpha +#define GL_SUN_global_alpha 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGlobalAlphaFactorbSUN (GLbyte factor); +GLAPI void APIENTRY glGlobalAlphaFactorsSUN (GLshort factor); +GLAPI void APIENTRY glGlobalAlphaFactoriSUN (GLint factor); +GLAPI void APIENTRY glGlobalAlphaFactorfSUN (GLfloat factor); +GLAPI void APIENTRY glGlobalAlphaFactordSUN (GLdouble factor); +GLAPI void APIENTRY glGlobalAlphaFactorubSUN (GLubyte factor); +GLAPI void APIENTRY glGlobalAlphaFactorusSUN (GLushort factor); +GLAPI void APIENTRY glGlobalAlphaFactoruiSUN (GLuint factor); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORBSUNPROC) (GLbyte factor); +typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORSSUNPROC) (GLshort factor); +typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORISUNPROC) (GLint factor); +typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORFSUNPROC) (GLfloat factor); +typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORDSUNPROC) (GLdouble factor); +typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORUBSUNPROC) (GLubyte factor); +typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORUSSUNPROC) (GLushort factor); +typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORUISUNPROC) (GLuint factor); +#endif + +#ifndef GL_SUN_triangle_list +#define GL_SUN_triangle_list 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glReplacementCodeuiSUN (GLuint code); +GLAPI void APIENTRY glReplacementCodeusSUN (GLushort code); +GLAPI void APIENTRY glReplacementCodeubSUN (GLubyte code); +GLAPI void APIENTRY glReplacementCodeuivSUN (const GLuint *code); +GLAPI void APIENTRY glReplacementCodeusvSUN (const GLushort *code); +GLAPI void APIENTRY glReplacementCodeubvSUN (const GLubyte *code); +GLAPI void APIENTRY glReplacementCodePointerSUN (GLenum type, GLsizei stride, const GLvoid* *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUISUNPROC) (GLuint code); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUSSUNPROC) (GLushort code); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUBSUNPROC) (GLubyte code); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUIVSUNPROC) (const GLuint *code); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUSVSUNPROC) (const GLushort *code); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUBVSUNPROC) (const GLubyte *code); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEPOINTERSUNPROC) (GLenum type, GLsizei stride, const GLvoid* *pointer); +#endif + +#ifndef GL_SUN_vertex +#define GL_SUN_vertex 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glColor4ubVertex2fSUN (GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y); +GLAPI void APIENTRY glColor4ubVertex2fvSUN (const GLubyte *c, const GLfloat *v); +GLAPI void APIENTRY glColor4ubVertex3fSUN (GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glColor4ubVertex3fvSUN (const GLubyte *c, const GLfloat *v); +GLAPI void APIENTRY glColor3fVertex3fSUN (GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glColor3fVertex3fvSUN (const GLfloat *c, const GLfloat *v); +GLAPI void APIENTRY glNormal3fVertex3fSUN (GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glNormal3fVertex3fvSUN (const GLfloat *n, const GLfloat *v); +GLAPI void APIENTRY glColor4fNormal3fVertex3fSUN (GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glColor4fNormal3fVertex3fvSUN (const GLfloat *c, const GLfloat *n, const GLfloat *v); +GLAPI void APIENTRY glTexCoord2fVertex3fSUN (GLfloat s, GLfloat t, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glTexCoord2fVertex3fvSUN (const GLfloat *tc, const GLfloat *v); +GLAPI void APIENTRY glTexCoord4fVertex4fSUN (GLfloat s, GLfloat t, GLfloat p, GLfloat q, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glTexCoord4fVertex4fvSUN (const GLfloat *tc, const GLfloat *v); +GLAPI void APIENTRY glTexCoord2fColor4ubVertex3fSUN (GLfloat s, GLfloat t, GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glTexCoord2fColor4ubVertex3fvSUN (const GLfloat *tc, const GLubyte *c, const GLfloat *v); +GLAPI void APIENTRY glTexCoord2fColor3fVertex3fSUN (GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glTexCoord2fColor3fVertex3fvSUN (const GLfloat *tc, const GLfloat *c, const GLfloat *v); +GLAPI void APIENTRY glTexCoord2fNormal3fVertex3fSUN (GLfloat s, GLfloat t, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glTexCoord2fNormal3fVertex3fvSUN (const GLfloat *tc, const GLfloat *n, const GLfloat *v); +GLAPI void APIENTRY glTexCoord2fColor4fNormal3fVertex3fSUN (GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glTexCoord2fColor4fNormal3fVertex3fvSUN (const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v); +GLAPI void APIENTRY glTexCoord4fColor4fNormal3fVertex4fSUN (GLfloat s, GLfloat t, GLfloat p, GLfloat q, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glTexCoord4fColor4fNormal3fVertex4fvSUN (const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v); +GLAPI void APIENTRY glReplacementCodeuiVertex3fSUN (GLuint rc, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glReplacementCodeuiVertex3fvSUN (const GLuint *rc, const GLfloat *v); +GLAPI void APIENTRY glReplacementCodeuiColor4ubVertex3fSUN (GLuint rc, GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glReplacementCodeuiColor4ubVertex3fvSUN (const GLuint *rc, const GLubyte *c, const GLfloat *v); +GLAPI void APIENTRY glReplacementCodeuiColor3fVertex3fSUN (GLuint rc, GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glReplacementCodeuiColor3fVertex3fvSUN (const GLuint *rc, const GLfloat *c, const GLfloat *v); +GLAPI void APIENTRY glReplacementCodeuiNormal3fVertex3fSUN (GLuint rc, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glReplacementCodeuiNormal3fVertex3fvSUN (const GLuint *rc, const GLfloat *n, const GLfloat *v); +GLAPI void APIENTRY glReplacementCodeuiColor4fNormal3fVertex3fSUN (GLuint rc, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glReplacementCodeuiColor4fNormal3fVertex3fvSUN (const GLuint *rc, const GLfloat *c, const GLfloat *n, const GLfloat *v); +GLAPI void APIENTRY glReplacementCodeuiTexCoord2fVertex3fSUN (GLuint rc, GLfloat s, GLfloat t, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glReplacementCodeuiTexCoord2fVertex3fvSUN (const GLuint *rc, const GLfloat *tc, const GLfloat *v); +GLAPI void APIENTRY glReplacementCodeuiTexCoord2fNormal3fVertex3fSUN (GLuint rc, GLfloat s, GLfloat t, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN (const GLuint *rc, const GLfloat *tc, const GLfloat *n, const GLfloat *v); +GLAPI void APIENTRY glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fSUN (GLuint rc, GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fvSUN (const GLuint *rc, const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOLOR4UBVERTEX2FSUNPROC) (GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y); +typedef void (APIENTRYP PFNGLCOLOR4UBVERTEX2FVSUNPROC) (const GLubyte *c, const GLfloat *v); +typedef void (APIENTRYP PFNGLCOLOR4UBVERTEX3FSUNPROC) (GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLCOLOR4UBVERTEX3FVSUNPROC) (const GLubyte *c, const GLfloat *v); +typedef void (APIENTRYP PFNGLCOLOR3FVERTEX3FSUNPROC) (GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLCOLOR3FVERTEX3FVSUNPROC) (const GLfloat *c, const GLfloat *v); +typedef void (APIENTRYP PFNGLNORMAL3FVERTEX3FSUNPROC) (GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLNORMAL3FVERTEX3FVSUNPROC) (const GLfloat *n, const GLfloat *v); +typedef void (APIENTRYP PFNGLCOLOR4FNORMAL3FVERTEX3FSUNPROC) (GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLCOLOR4FNORMAL3FVERTEX3FVSUNPROC) (const GLfloat *c, const GLfloat *n, const GLfloat *v); +typedef void (APIENTRYP PFNGLTEXCOORD2FVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLTEXCOORD2FVERTEX3FVSUNPROC) (const GLfloat *tc, const GLfloat *v); +typedef void (APIENTRYP PFNGLTEXCOORD4FVERTEX4FSUNPROC) (GLfloat s, GLfloat t, GLfloat p, GLfloat q, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLTEXCOORD4FVERTEX4FVSUNPROC) (const GLfloat *tc, const GLfloat *v); +typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR4UBVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR4UBVERTEX3FVSUNPROC) (const GLfloat *tc, const GLubyte *c, const GLfloat *v); +typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR3FVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR3FVERTEX3FVSUNPROC) (const GLfloat *tc, const GLfloat *c, const GLfloat *v); +typedef void (APIENTRYP PFNGLTEXCOORD2FNORMAL3FVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLTEXCOORD2FNORMAL3FVERTEX3FVSUNPROC) (const GLfloat *tc, const GLfloat *n, const GLfloat *v); +typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR4FNORMAL3FVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR4FNORMAL3FVERTEX3FVSUNPROC) (const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v); +typedef void (APIENTRYP PFNGLTEXCOORD4FCOLOR4FNORMAL3FVERTEX4FSUNPROC) (GLfloat s, GLfloat t, GLfloat p, GLfloat q, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLTEXCOORD4FCOLOR4FNORMAL3FVERTEX4FVSUNPROC) (const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUIVERTEX3FSUNPROC) (GLuint rc, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUIVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *v); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR4UBVERTEX3FSUNPROC) (GLuint rc, GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR4UBVERTEX3FVSUNPROC) (const GLuint *rc, const GLubyte *c, const GLfloat *v); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR3FVERTEX3FSUNPROC) (GLuint rc, GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *c, const GLfloat *v); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUINORMAL3FVERTEX3FSUNPROC) (GLuint rc, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUINORMAL3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *n, const GLfloat *v); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR4FNORMAL3FVERTEX3FSUNPROC) (GLuint rc, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR4FNORMAL3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *c, const GLfloat *n, const GLfloat *v); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FVERTEX3FSUNPROC) (GLuint rc, GLfloat s, GLfloat t, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *tc, const GLfloat *v); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FNORMAL3FVERTEX3FSUNPROC) (GLuint rc, GLfloat s, GLfloat t, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FNORMAL3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *tc, const GLfloat *n, const GLfloat *v); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FCOLOR4FNORMAL3FVERTEX3FSUNPROC) (GLuint rc, GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FCOLOR4FNORMAL3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v); +#endif + +#ifndef GL_EXT_blend_func_separate +#define GL_EXT_blend_func_separate 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendFuncSeparateEXT (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEEXTPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +#endif + +#ifndef GL_INGR_blend_func_separate +#define GL_INGR_blend_func_separate 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendFuncSeparateINGR (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEINGRPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +#endif + +#ifndef GL_INGR_color_clamp +#define GL_INGR_color_clamp 1 +#endif + +#ifndef GL_INGR_interlace_read +#define GL_INGR_interlace_read 1 +#endif + +#ifndef GL_EXT_stencil_wrap +#define GL_EXT_stencil_wrap 1 +#endif + +#ifndef GL_EXT_422_pixels +#define GL_EXT_422_pixels 1 +#endif + +#ifndef GL_NV_texgen_reflection +#define GL_NV_texgen_reflection 1 +#endif + +#ifndef GL_SUN_convolution_border_modes +#define GL_SUN_convolution_border_modes 1 +#endif + +#ifndef GL_EXT_texture_env_add +#define GL_EXT_texture_env_add 1 +#endif + +#ifndef GL_EXT_texture_lod_bias +#define GL_EXT_texture_lod_bias 1 +#endif + +#ifndef GL_EXT_texture_filter_anisotropic +#define GL_EXT_texture_filter_anisotropic 1 +#endif + +#ifndef GL_EXT_vertex_weighting +#define GL_EXT_vertex_weighting 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexWeightfEXT (GLfloat weight); +GLAPI void APIENTRY glVertexWeightfvEXT (const GLfloat *weight); +GLAPI void APIENTRY glVertexWeightPointerEXT (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXWEIGHTFEXTPROC) (GLfloat weight); +typedef void (APIENTRYP PFNGLVERTEXWEIGHTFVEXTPROC) (const GLfloat *weight); +typedef void (APIENTRYP PFNGLVERTEXWEIGHTPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +#endif + +#ifndef GL_NV_light_max_exponent +#define GL_NV_light_max_exponent 1 +#endif + +#ifndef GL_NV_vertex_array_range +#define GL_NV_vertex_array_range 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glFlushVertexArrayRangeNV (void); +GLAPI void APIENTRY glVertexArrayRangeNV (GLsizei length, const GLvoid *pointer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLFLUSHVERTEXARRAYRANGENVPROC) (void); +typedef void (APIENTRYP PFNGLVERTEXARRAYRANGENVPROC) (GLsizei length, const GLvoid *pointer); +#endif + +#ifndef GL_NV_register_combiners +#define GL_NV_register_combiners 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glCombinerParameterfvNV (GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glCombinerParameterfNV (GLenum pname, GLfloat param); +GLAPI void APIENTRY glCombinerParameterivNV (GLenum pname, const GLint *params); +GLAPI void APIENTRY glCombinerParameteriNV (GLenum pname, GLint param); +GLAPI void APIENTRY glCombinerInputNV (GLenum stage, GLenum portion, GLenum variable, GLenum input, GLenum mapping, GLenum componentUsage); +GLAPI void APIENTRY glCombinerOutputNV (GLenum stage, GLenum portion, GLenum abOutput, GLenum cdOutput, GLenum sumOutput, GLenum scale, GLenum bias, GLboolean abDotProduct, GLboolean cdDotProduct, GLboolean muxSum); +GLAPI void APIENTRY glFinalCombinerInputNV (GLenum variable, GLenum input, GLenum mapping, GLenum componentUsage); +GLAPI void APIENTRY glGetCombinerInputParameterfvNV (GLenum stage, GLenum portion, GLenum variable, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetCombinerInputParameterivNV (GLenum stage, GLenum portion, GLenum variable, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetCombinerOutputParameterfvNV (GLenum stage, GLenum portion, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetCombinerOutputParameterivNV (GLenum stage, GLenum portion, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetFinalCombinerInputParameterfvNV (GLenum variable, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetFinalCombinerInputParameterivNV (GLenum variable, GLenum pname, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOMBINERPARAMETERFVNVPROC) (GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLCOMBINERPARAMETERFNVPROC) (GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLCOMBINERPARAMETERIVNVPROC) (GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLCOMBINERPARAMETERINVPROC) (GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLCOMBINERINPUTNVPROC) (GLenum stage, GLenum portion, GLenum variable, GLenum input, GLenum mapping, GLenum componentUsage); +typedef void (APIENTRYP PFNGLCOMBINEROUTPUTNVPROC) (GLenum stage, GLenum portion, GLenum abOutput, GLenum cdOutput, GLenum sumOutput, GLenum scale, GLenum bias, GLboolean abDotProduct, GLboolean cdDotProduct, GLboolean muxSum); +typedef void (APIENTRYP PFNGLFINALCOMBINERINPUTNVPROC) (GLenum variable, GLenum input, GLenum mapping, GLenum componentUsage); +typedef void (APIENTRYP PFNGLGETCOMBINERINPUTPARAMETERFVNVPROC) (GLenum stage, GLenum portion, GLenum variable, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETCOMBINERINPUTPARAMETERIVNVPROC) (GLenum stage, GLenum portion, GLenum variable, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETCOMBINEROUTPUTPARAMETERFVNVPROC) (GLenum stage, GLenum portion, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETCOMBINEROUTPUTPARAMETERIVNVPROC) (GLenum stage, GLenum portion, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETFINALCOMBINERINPUTPARAMETERFVNVPROC) (GLenum variable, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETFINALCOMBINERINPUTPARAMETERIVNVPROC) (GLenum variable, GLenum pname, GLint *params); +#endif + +#ifndef GL_NV_fog_distance +#define GL_NV_fog_distance 1 +#endif + +#ifndef GL_NV_texgen_emboss +#define GL_NV_texgen_emboss 1 +#endif + +#ifndef GL_NV_blend_square +#define GL_NV_blend_square 1 +#endif + +#ifndef GL_NV_texture_env_combine4 +#define GL_NV_texture_env_combine4 1 +#endif + +#ifndef GL_MESA_resize_buffers +#define GL_MESA_resize_buffers 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glResizeBuffersMESA (void); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLRESIZEBUFFERSMESAPROC) (void); +#endif + +#ifndef GL_MESA_window_pos +#define GL_MESA_window_pos 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glWindowPos2dMESA (GLdouble x, GLdouble y); +GLAPI void APIENTRY glWindowPos2dvMESA (const GLdouble *v); +GLAPI void APIENTRY glWindowPos2fMESA (GLfloat x, GLfloat y); +GLAPI void APIENTRY glWindowPos2fvMESA (const GLfloat *v); +GLAPI void APIENTRY glWindowPos2iMESA (GLint x, GLint y); +GLAPI void APIENTRY glWindowPos2ivMESA (const GLint *v); +GLAPI void APIENTRY glWindowPos2sMESA (GLshort x, GLshort y); +GLAPI void APIENTRY glWindowPos2svMESA (const GLshort *v); +GLAPI void APIENTRY glWindowPos3dMESA (GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glWindowPos3dvMESA (const GLdouble *v); +GLAPI void APIENTRY glWindowPos3fMESA (GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glWindowPos3fvMESA (const GLfloat *v); +GLAPI void APIENTRY glWindowPos3iMESA (GLint x, GLint y, GLint z); +GLAPI void APIENTRY glWindowPos3ivMESA (const GLint *v); +GLAPI void APIENTRY glWindowPos3sMESA (GLshort x, GLshort y, GLshort z); +GLAPI void APIENTRY glWindowPos3svMESA (const GLshort *v); +GLAPI void APIENTRY glWindowPos4dMESA (GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glWindowPos4dvMESA (const GLdouble *v); +GLAPI void APIENTRY glWindowPos4fMESA (GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glWindowPos4fvMESA (const GLfloat *v); +GLAPI void APIENTRY glWindowPos4iMESA (GLint x, GLint y, GLint z, GLint w); +GLAPI void APIENTRY glWindowPos4ivMESA (const GLint *v); +GLAPI void APIENTRY glWindowPos4sMESA (GLshort x, GLshort y, GLshort z, GLshort w); +GLAPI void APIENTRY glWindowPos4svMESA (const GLshort *v); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLWINDOWPOS2DMESAPROC) (GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLWINDOWPOS2DVMESAPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLWINDOWPOS2FMESAPROC) (GLfloat x, GLfloat y); +typedef void (APIENTRYP PFNGLWINDOWPOS2FVMESAPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLWINDOWPOS2IMESAPROC) (GLint x, GLint y); +typedef void (APIENTRYP PFNGLWINDOWPOS2IVMESAPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLWINDOWPOS2SMESAPROC) (GLshort x, GLshort y); +typedef void (APIENTRYP PFNGLWINDOWPOS2SVMESAPROC) (const GLshort *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3DMESAPROC) (GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLWINDOWPOS3DVMESAPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3FMESAPROC) (GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLWINDOWPOS3FVMESAPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3IMESAPROC) (GLint x, GLint y, GLint z); +typedef void (APIENTRYP PFNGLWINDOWPOS3IVMESAPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLWINDOWPOS3SMESAPROC) (GLshort x, GLshort y, GLshort z); +typedef void (APIENTRYP PFNGLWINDOWPOS3SVMESAPROC) (const GLshort *v); +typedef void (APIENTRYP PFNGLWINDOWPOS4DMESAPROC) (GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLWINDOWPOS4DVMESAPROC) (const GLdouble *v); +typedef void (APIENTRYP PFNGLWINDOWPOS4FMESAPROC) (GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLWINDOWPOS4FVMESAPROC) (const GLfloat *v); +typedef void (APIENTRYP PFNGLWINDOWPOS4IMESAPROC) (GLint x, GLint y, GLint z, GLint w); +typedef void (APIENTRYP PFNGLWINDOWPOS4IVMESAPROC) (const GLint *v); +typedef void (APIENTRYP PFNGLWINDOWPOS4SMESAPROC) (GLshort x, GLshort y, GLshort z, GLshort w); +typedef void (APIENTRYP PFNGLWINDOWPOS4SVMESAPROC) (const GLshort *v); +#endif + +#ifndef GL_IBM_cull_vertex +#define GL_IBM_cull_vertex 1 +#endif + +#ifndef GL_IBM_multimode_draw_arrays +#define GL_IBM_multimode_draw_arrays 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glMultiModeDrawArraysIBM (const GLenum *mode, const GLint *first, const GLsizei *count, GLsizei primcount, GLint modestride); +GLAPI void APIENTRY glMultiModeDrawElementsIBM (const GLenum *mode, const GLsizei *count, GLenum type, const GLvoid* const *indices, GLsizei primcount, GLint modestride); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLMULTIMODEDRAWARRAYSIBMPROC) (const GLenum *mode, const GLint *first, const GLsizei *count, GLsizei primcount, GLint modestride); +typedef void (APIENTRYP PFNGLMULTIMODEDRAWELEMENTSIBMPROC) (const GLenum *mode, const GLsizei *count, GLenum type, const GLvoid* const *indices, GLsizei primcount, GLint modestride); +#endif + +#ifndef GL_IBM_vertex_array_lists +#define GL_IBM_vertex_array_lists 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glColorPointerListIBM (GLint size, GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +GLAPI void APIENTRY glSecondaryColorPointerListIBM (GLint size, GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +GLAPI void APIENTRY glEdgeFlagPointerListIBM (GLint stride, const GLboolean* *pointer, GLint ptrstride); +GLAPI void APIENTRY glFogCoordPointerListIBM (GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +GLAPI void APIENTRY glIndexPointerListIBM (GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +GLAPI void APIENTRY glNormalPointerListIBM (GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +GLAPI void APIENTRY glTexCoordPointerListIBM (GLint size, GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +GLAPI void APIENTRY glVertexPointerListIBM (GLint size, GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOLORPOINTERLISTIBMPROC) (GLint size, GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +typedef void (APIENTRYP PFNGLSECONDARYCOLORPOINTERLISTIBMPROC) (GLint size, GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +typedef void (APIENTRYP PFNGLEDGEFLAGPOINTERLISTIBMPROC) (GLint stride, const GLboolean* *pointer, GLint ptrstride); +typedef void (APIENTRYP PFNGLFOGCOORDPOINTERLISTIBMPROC) (GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +typedef void (APIENTRYP PFNGLINDEXPOINTERLISTIBMPROC) (GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +typedef void (APIENTRYP PFNGLNORMALPOINTERLISTIBMPROC) (GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +typedef void (APIENTRYP PFNGLTEXCOORDPOINTERLISTIBMPROC) (GLint size, GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +typedef void (APIENTRYP PFNGLVERTEXPOINTERLISTIBMPROC) (GLint size, GLenum type, GLint stride, const GLvoid* *pointer, GLint ptrstride); +#endif + +#ifndef GL_SGIX_subsample +#define GL_SGIX_subsample 1 +#endif + +#ifndef GL_SGIX_ycrcba +#define GL_SGIX_ycrcba 1 +#endif + +#ifndef GL_SGIX_ycrcb_subsample +#define GL_SGIX_ycrcb_subsample 1 +#endif + +#ifndef GL_SGIX_depth_pass_instrument +#define GL_SGIX_depth_pass_instrument 1 +#endif + +#ifndef GL_3DFX_texture_compression_FXT1 +#define GL_3DFX_texture_compression_FXT1 1 +#endif + +#ifndef GL_3DFX_multisample +#define GL_3DFX_multisample 1 +#endif + +#ifndef GL_3DFX_tbuffer +#define GL_3DFX_tbuffer 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTbufferMask3DFX (GLuint mask); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTBUFFERMASK3DFXPROC) (GLuint mask); +#endif + +#ifndef GL_EXT_multisample +#define GL_EXT_multisample 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glSampleMaskEXT (GLclampf value, GLboolean invert); +GLAPI void APIENTRY glSamplePatternEXT (GLenum pattern); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSAMPLEMASKEXTPROC) (GLclampf value, GLboolean invert); +typedef void (APIENTRYP PFNGLSAMPLEPATTERNEXTPROC) (GLenum pattern); +#endif + +#ifndef GL_SGIX_vertex_preclip +#define GL_SGIX_vertex_preclip 1 +#endif + +#ifndef GL_SGIX_convolution_accuracy +#define GL_SGIX_convolution_accuracy 1 +#endif + +#ifndef GL_SGIX_resample +#define GL_SGIX_resample 1 +#endif + +#ifndef GL_SGIS_point_line_texgen +#define GL_SGIS_point_line_texgen 1 +#endif + +#ifndef GL_SGIS_texture_color_mask +#define GL_SGIS_texture_color_mask 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTextureColorMaskSGIS (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXTURECOLORMASKSGISPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +#endif + +#ifndef GL_SGIX_igloo_interface +#define GL_SGIX_igloo_interface 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glIglooInterfaceSGIX (GLenum pname, const GLvoid *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLIGLOOINTERFACESGIXPROC) (GLenum pname, const GLvoid *params); +#endif + +#ifndef GL_EXT_texture_env_dot3 +#define GL_EXT_texture_env_dot3 1 +#endif + +#ifndef GL_ATI_texture_mirror_once +#define GL_ATI_texture_mirror_once 1 +#endif + +#ifndef GL_NV_fence +#define GL_NV_fence 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDeleteFencesNV (GLsizei n, const GLuint *fences); +GLAPI void APIENTRY glGenFencesNV (GLsizei n, GLuint *fences); +GLAPI GLboolean APIENTRY glIsFenceNV (GLuint fence); +GLAPI GLboolean APIENTRY glTestFenceNV (GLuint fence); +GLAPI void APIENTRY glGetFenceivNV (GLuint fence, GLenum pname, GLint *params); +GLAPI void APIENTRY glFinishFenceNV (GLuint fence); +GLAPI void APIENTRY glSetFenceNV (GLuint fence, GLenum condition); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDELETEFENCESNVPROC) (GLsizei n, const GLuint *fences); +typedef void (APIENTRYP PFNGLGENFENCESNVPROC) (GLsizei n, GLuint *fences); +typedef GLboolean (APIENTRYP PFNGLISFENCENVPROC) (GLuint fence); +typedef GLboolean (APIENTRYP PFNGLTESTFENCENVPROC) (GLuint fence); +typedef void (APIENTRYP PFNGLGETFENCEIVNVPROC) (GLuint fence, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLFINISHFENCENVPROC) (GLuint fence); +typedef void (APIENTRYP PFNGLSETFENCENVPROC) (GLuint fence, GLenum condition); +#endif + +#ifndef GL_NV_evaluators +#define GL_NV_evaluators 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glMapControlPointsNV (GLenum target, GLuint index, GLenum type, GLsizei ustride, GLsizei vstride, GLint uorder, GLint vorder, GLboolean packed, const GLvoid *points); +GLAPI void APIENTRY glMapParameterivNV (GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glMapParameterfvNV (GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glGetMapControlPointsNV (GLenum target, GLuint index, GLenum type, GLsizei ustride, GLsizei vstride, GLboolean packed, GLvoid *points); +GLAPI void APIENTRY glGetMapParameterivNV (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetMapParameterfvNV (GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetMapAttribParameterivNV (GLenum target, GLuint index, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetMapAttribParameterfvNV (GLenum target, GLuint index, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glEvalMapsNV (GLenum target, GLenum mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLMAPCONTROLPOINTSNVPROC) (GLenum target, GLuint index, GLenum type, GLsizei ustride, GLsizei vstride, GLint uorder, GLint vorder, GLboolean packed, const GLvoid *points); +typedef void (APIENTRYP PFNGLMAPPARAMETERIVNVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLMAPPARAMETERFVNVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLGETMAPCONTROLPOINTSNVPROC) (GLenum target, GLuint index, GLenum type, GLsizei ustride, GLsizei vstride, GLboolean packed, GLvoid *points); +typedef void (APIENTRYP PFNGLGETMAPPARAMETERIVNVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETMAPPARAMETERFVNVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETMAPATTRIBPARAMETERIVNVPROC) (GLenum target, GLuint index, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETMAPATTRIBPARAMETERFVNVPROC) (GLenum target, GLuint index, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLEVALMAPSNVPROC) (GLenum target, GLenum mode); +#endif + +#ifndef GL_NV_packed_depth_stencil +#define GL_NV_packed_depth_stencil 1 +#endif + +#ifndef GL_NV_register_combiners2 +#define GL_NV_register_combiners2 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glCombinerStageParameterfvNV (GLenum stage, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glGetCombinerStageParameterfvNV (GLenum stage, GLenum pname, GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOMBINERSTAGEPARAMETERFVNVPROC) (GLenum stage, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLGETCOMBINERSTAGEPARAMETERFVNVPROC) (GLenum stage, GLenum pname, GLfloat *params); +#endif + +#ifndef GL_NV_texture_compression_vtc +#define GL_NV_texture_compression_vtc 1 +#endif + +#ifndef GL_NV_texture_rectangle +#define GL_NV_texture_rectangle 1 +#endif + +#ifndef GL_NV_texture_shader +#define GL_NV_texture_shader 1 +#endif + +#ifndef GL_NV_texture_shader2 +#define GL_NV_texture_shader2 1 +#endif + +#ifndef GL_NV_vertex_array_range2 +#define GL_NV_vertex_array_range2 1 +#endif + +#ifndef GL_NV_vertex_program +#define GL_NV_vertex_program 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLboolean APIENTRY glAreProgramsResidentNV (GLsizei n, const GLuint *programs, GLboolean *residences); +GLAPI void APIENTRY glBindProgramNV (GLenum target, GLuint id); +GLAPI void APIENTRY glDeleteProgramsNV (GLsizei n, const GLuint *programs); +GLAPI void APIENTRY glExecuteProgramNV (GLenum target, GLuint id, const GLfloat *params); +GLAPI void APIENTRY glGenProgramsNV (GLsizei n, GLuint *programs); +GLAPI void APIENTRY glGetProgramParameterdvNV (GLenum target, GLuint index, GLenum pname, GLdouble *params); +GLAPI void APIENTRY glGetProgramParameterfvNV (GLenum target, GLuint index, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetProgramivNV (GLuint id, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetProgramStringNV (GLuint id, GLenum pname, GLubyte *program); +GLAPI void APIENTRY glGetTrackMatrixivNV (GLenum target, GLuint address, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetVertexAttribdvNV (GLuint index, GLenum pname, GLdouble *params); +GLAPI void APIENTRY glGetVertexAttribfvNV (GLuint index, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetVertexAttribivNV (GLuint index, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetVertexAttribPointervNV (GLuint index, GLenum pname, GLvoid* *pointer); +GLAPI GLboolean APIENTRY glIsProgramNV (GLuint id); +GLAPI void APIENTRY glLoadProgramNV (GLenum target, GLuint id, GLsizei len, const GLubyte *program); +GLAPI void APIENTRY glProgramParameter4dNV (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glProgramParameter4dvNV (GLenum target, GLuint index, const GLdouble *v); +GLAPI void APIENTRY glProgramParameter4fNV (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glProgramParameter4fvNV (GLenum target, GLuint index, const GLfloat *v); +GLAPI void APIENTRY glProgramParameters4dvNV (GLenum target, GLuint index, GLsizei count, const GLdouble *v); +GLAPI void APIENTRY glProgramParameters4fvNV (GLenum target, GLuint index, GLsizei count, const GLfloat *v); +GLAPI void APIENTRY glRequestResidentProgramsNV (GLsizei n, const GLuint *programs); +GLAPI void APIENTRY glTrackMatrixNV (GLenum target, GLuint address, GLenum matrix, GLenum transform); +GLAPI void APIENTRY glVertexAttribPointerNV (GLuint index, GLint fsize, GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glVertexAttrib1dNV (GLuint index, GLdouble x); +GLAPI void APIENTRY glVertexAttrib1dvNV (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib1fNV (GLuint index, GLfloat x); +GLAPI void APIENTRY glVertexAttrib1fvNV (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib1sNV (GLuint index, GLshort x); +GLAPI void APIENTRY glVertexAttrib1svNV (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib2dNV (GLuint index, GLdouble x, GLdouble y); +GLAPI void APIENTRY glVertexAttrib2dvNV (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib2fNV (GLuint index, GLfloat x, GLfloat y); +GLAPI void APIENTRY glVertexAttrib2fvNV (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib2sNV (GLuint index, GLshort x, GLshort y); +GLAPI void APIENTRY glVertexAttrib2svNV (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib3dNV (GLuint index, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glVertexAttrib3dvNV (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib3fNV (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glVertexAttrib3fvNV (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib3sNV (GLuint index, GLshort x, GLshort y, GLshort z); +GLAPI void APIENTRY glVertexAttrib3svNV (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib4dNV (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glVertexAttrib4dvNV (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttrib4fNV (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glVertexAttrib4fvNV (GLuint index, const GLfloat *v); +GLAPI void APIENTRY glVertexAttrib4sNV (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w); +GLAPI void APIENTRY glVertexAttrib4svNV (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttrib4ubNV (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w); +GLAPI void APIENTRY glVertexAttrib4ubvNV (GLuint index, const GLubyte *v); +GLAPI void APIENTRY glVertexAttribs1dvNV (GLuint index, GLsizei count, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribs1fvNV (GLuint index, GLsizei count, const GLfloat *v); +GLAPI void APIENTRY glVertexAttribs1svNV (GLuint index, GLsizei count, const GLshort *v); +GLAPI void APIENTRY glVertexAttribs2dvNV (GLuint index, GLsizei count, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribs2fvNV (GLuint index, GLsizei count, const GLfloat *v); +GLAPI void APIENTRY glVertexAttribs2svNV (GLuint index, GLsizei count, const GLshort *v); +GLAPI void APIENTRY glVertexAttribs3dvNV (GLuint index, GLsizei count, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribs3fvNV (GLuint index, GLsizei count, const GLfloat *v); +GLAPI void APIENTRY glVertexAttribs3svNV (GLuint index, GLsizei count, const GLshort *v); +GLAPI void APIENTRY glVertexAttribs4dvNV (GLuint index, GLsizei count, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribs4fvNV (GLuint index, GLsizei count, const GLfloat *v); +GLAPI void APIENTRY glVertexAttribs4svNV (GLuint index, GLsizei count, const GLshort *v); +GLAPI void APIENTRY glVertexAttribs4ubvNV (GLuint index, GLsizei count, const GLubyte *v); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLboolean (APIENTRYP PFNGLAREPROGRAMSRESIDENTNVPROC) (GLsizei n, const GLuint *programs, GLboolean *residences); +typedef void (APIENTRYP PFNGLBINDPROGRAMNVPROC) (GLenum target, GLuint id); +typedef void (APIENTRYP PFNGLDELETEPROGRAMSNVPROC) (GLsizei n, const GLuint *programs); +typedef void (APIENTRYP PFNGLEXECUTEPROGRAMNVPROC) (GLenum target, GLuint id, const GLfloat *params); +typedef void (APIENTRYP PFNGLGENPROGRAMSNVPROC) (GLsizei n, GLuint *programs); +typedef void (APIENTRYP PFNGLGETPROGRAMPARAMETERDVNVPROC) (GLenum target, GLuint index, GLenum pname, GLdouble *params); +typedef void (APIENTRYP PFNGLGETPROGRAMPARAMETERFVNVPROC) (GLenum target, GLuint index, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETPROGRAMIVNVPROC) (GLuint id, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETPROGRAMSTRINGNVPROC) (GLuint id, GLenum pname, GLubyte *program); +typedef void (APIENTRYP PFNGLGETTRACKMATRIXIVNVPROC) (GLenum target, GLuint address, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBDVNVPROC) (GLuint index, GLenum pname, GLdouble *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBFVNVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIVNVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVNVPROC) (GLuint index, GLenum pname, GLvoid* *pointer); +typedef GLboolean (APIENTRYP PFNGLISPROGRAMNVPROC) (GLuint id); +typedef void (APIENTRYP PFNGLLOADPROGRAMNVPROC) (GLenum target, GLuint id, GLsizei len, const GLubyte *program); +typedef void (APIENTRYP PFNGLPROGRAMPARAMETER4DNVPROC) (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLPROGRAMPARAMETER4DVNVPROC) (GLenum target, GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLPROGRAMPARAMETER4FNVPROC) (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLPROGRAMPARAMETER4FVNVPROC) (GLenum target, GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLPROGRAMPARAMETERS4DVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLdouble *v); +typedef void (APIENTRYP PFNGLPROGRAMPARAMETERS4FVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLfloat *v); +typedef void (APIENTRYP PFNGLREQUESTRESIDENTPROGRAMSNVPROC) (GLsizei n, const GLuint *programs); +typedef void (APIENTRYP PFNGLTRACKMATRIXNVPROC) (GLenum target, GLuint address, GLenum matrix, GLenum transform); +typedef void (APIENTRYP PFNGLVERTEXATTRIBPOINTERNVPROC) (GLuint index, GLint fsize, GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1DNVPROC) (GLuint index, GLdouble x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1DVNVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1FNVPROC) (GLuint index, GLfloat x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1FVNVPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1SNVPROC) (GLuint index, GLshort x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1SVNVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2DNVPROC) (GLuint index, GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2DVNVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2FNVPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2FVNVPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2SNVPROC) (GLuint index, GLshort x, GLshort y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2SVNVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3DNVPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3DVNVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3FNVPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3FVNVPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3SNVPROC) (GLuint index, GLshort x, GLshort y, GLshort z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3SVNVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4DNVPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4DVNVPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4FNVPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4FVNVPROC) (GLuint index, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4SNVPROC) (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4SVNVPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4UBNVPROC) (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4UBVNVPROC) (GLuint index, const GLubyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS1DVNVPROC) (GLuint index, GLsizei count, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS1FVNVPROC) (GLuint index, GLsizei count, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS1SVNVPROC) (GLuint index, GLsizei count, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS2DVNVPROC) (GLuint index, GLsizei count, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS2FVNVPROC) (GLuint index, GLsizei count, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS2SVNVPROC) (GLuint index, GLsizei count, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS3DVNVPROC) (GLuint index, GLsizei count, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS3FVNVPROC) (GLuint index, GLsizei count, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS3SVNVPROC) (GLuint index, GLsizei count, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS4DVNVPROC) (GLuint index, GLsizei count, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS4FVNVPROC) (GLuint index, GLsizei count, const GLfloat *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS4SVNVPROC) (GLuint index, GLsizei count, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS4UBVNVPROC) (GLuint index, GLsizei count, const GLubyte *v); +#endif + +#ifndef GL_SGIX_texture_coordinate_clamp +#define GL_SGIX_texture_coordinate_clamp 1 +#endif + +#ifndef GL_SGIX_scalebias_hint +#define GL_SGIX_scalebias_hint 1 +#endif + +#ifndef GL_OML_interlace +#define GL_OML_interlace 1 +#endif + +#ifndef GL_OML_subsample +#define GL_OML_subsample 1 +#endif + +#ifndef GL_OML_resample +#define GL_OML_resample 1 +#endif + +#ifndef GL_NV_copy_depth_to_color +#define GL_NV_copy_depth_to_color 1 +#endif + +#ifndef GL_ATI_envmap_bumpmap +#define GL_ATI_envmap_bumpmap 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexBumpParameterivATI (GLenum pname, const GLint *param); +GLAPI void APIENTRY glTexBumpParameterfvATI (GLenum pname, const GLfloat *param); +GLAPI void APIENTRY glGetTexBumpParameterivATI (GLenum pname, GLint *param); +GLAPI void APIENTRY glGetTexBumpParameterfvATI (GLenum pname, GLfloat *param); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXBUMPPARAMETERIVATIPROC) (GLenum pname, const GLint *param); +typedef void (APIENTRYP PFNGLTEXBUMPPARAMETERFVATIPROC) (GLenum pname, const GLfloat *param); +typedef void (APIENTRYP PFNGLGETTEXBUMPPARAMETERIVATIPROC) (GLenum pname, GLint *param); +typedef void (APIENTRYP PFNGLGETTEXBUMPPARAMETERFVATIPROC) (GLenum pname, GLfloat *param); +#endif + +#ifndef GL_ATI_fragment_shader +#define GL_ATI_fragment_shader 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLuint APIENTRY glGenFragmentShadersATI (GLuint range); +GLAPI void APIENTRY glBindFragmentShaderATI (GLuint id); +GLAPI void APIENTRY glDeleteFragmentShaderATI (GLuint id); +GLAPI void APIENTRY glBeginFragmentShaderATI (void); +GLAPI void APIENTRY glEndFragmentShaderATI (void); +GLAPI void APIENTRY glPassTexCoordATI (GLuint dst, GLuint coord, GLenum swizzle); +GLAPI void APIENTRY glSampleMapATI (GLuint dst, GLuint interp, GLenum swizzle); +GLAPI void APIENTRY glColorFragmentOp1ATI (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod); +GLAPI void APIENTRY glColorFragmentOp2ATI (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod); +GLAPI void APIENTRY glColorFragmentOp3ATI (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod, GLuint arg3, GLuint arg3Rep, GLuint arg3Mod); +GLAPI void APIENTRY glAlphaFragmentOp1ATI (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod); +GLAPI void APIENTRY glAlphaFragmentOp2ATI (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod); +GLAPI void APIENTRY glAlphaFragmentOp3ATI (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod, GLuint arg3, GLuint arg3Rep, GLuint arg3Mod); +GLAPI void APIENTRY glSetFragmentShaderConstantATI (GLuint dst, const GLfloat *value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLuint (APIENTRYP PFNGLGENFRAGMENTSHADERSATIPROC) (GLuint range); +typedef void (APIENTRYP PFNGLBINDFRAGMENTSHADERATIPROC) (GLuint id); +typedef void (APIENTRYP PFNGLDELETEFRAGMENTSHADERATIPROC) (GLuint id); +typedef void (APIENTRYP PFNGLBEGINFRAGMENTSHADERATIPROC) (void); +typedef void (APIENTRYP PFNGLENDFRAGMENTSHADERATIPROC) (void); +typedef void (APIENTRYP PFNGLPASSTEXCOORDATIPROC) (GLuint dst, GLuint coord, GLenum swizzle); +typedef void (APIENTRYP PFNGLSAMPLEMAPATIPROC) (GLuint dst, GLuint interp, GLenum swizzle); +typedef void (APIENTRYP PFNGLCOLORFRAGMENTOP1ATIPROC) (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod); +typedef void (APIENTRYP PFNGLCOLORFRAGMENTOP2ATIPROC) (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod); +typedef void (APIENTRYP PFNGLCOLORFRAGMENTOP3ATIPROC) (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod, GLuint arg3, GLuint arg3Rep, GLuint arg3Mod); +typedef void (APIENTRYP PFNGLALPHAFRAGMENTOP1ATIPROC) (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod); +typedef void (APIENTRYP PFNGLALPHAFRAGMENTOP2ATIPROC) (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod); +typedef void (APIENTRYP PFNGLALPHAFRAGMENTOP3ATIPROC) (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod, GLuint arg3, GLuint arg3Rep, GLuint arg3Mod); +typedef void (APIENTRYP PFNGLSETFRAGMENTSHADERCONSTANTATIPROC) (GLuint dst, const GLfloat *value); +#endif + +#ifndef GL_ATI_pn_triangles +#define GL_ATI_pn_triangles 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPNTrianglesiATI (GLenum pname, GLint param); +GLAPI void APIENTRY glPNTrianglesfATI (GLenum pname, GLfloat param); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPNTRIANGLESIATIPROC) (GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLPNTRIANGLESFATIPROC) (GLenum pname, GLfloat param); +#endif + +#ifndef GL_ATI_vertex_array_object +#define GL_ATI_vertex_array_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLuint APIENTRY glNewObjectBufferATI (GLsizei size, const GLvoid *pointer, GLenum usage); +GLAPI GLboolean APIENTRY glIsObjectBufferATI (GLuint buffer); +GLAPI void APIENTRY glUpdateObjectBufferATI (GLuint buffer, GLuint offset, GLsizei size, const GLvoid *pointer, GLenum preserve); +GLAPI void APIENTRY glGetObjectBufferfvATI (GLuint buffer, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetObjectBufferivATI (GLuint buffer, GLenum pname, GLint *params); +GLAPI void APIENTRY glFreeObjectBufferATI (GLuint buffer); +GLAPI void APIENTRY glArrayObjectATI (GLenum array, GLint size, GLenum type, GLsizei stride, GLuint buffer, GLuint offset); +GLAPI void APIENTRY glGetArrayObjectfvATI (GLenum array, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetArrayObjectivATI (GLenum array, GLenum pname, GLint *params); +GLAPI void APIENTRY glVariantArrayObjectATI (GLuint id, GLenum type, GLsizei stride, GLuint buffer, GLuint offset); +GLAPI void APIENTRY glGetVariantArrayObjectfvATI (GLuint id, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetVariantArrayObjectivATI (GLuint id, GLenum pname, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLuint (APIENTRYP PFNGLNEWOBJECTBUFFERATIPROC) (GLsizei size, const GLvoid *pointer, GLenum usage); +typedef GLboolean (APIENTRYP PFNGLISOBJECTBUFFERATIPROC) (GLuint buffer); +typedef void (APIENTRYP PFNGLUPDATEOBJECTBUFFERATIPROC) (GLuint buffer, GLuint offset, GLsizei size, const GLvoid *pointer, GLenum preserve); +typedef void (APIENTRYP PFNGLGETOBJECTBUFFERFVATIPROC) (GLuint buffer, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETOBJECTBUFFERIVATIPROC) (GLuint buffer, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLFREEOBJECTBUFFERATIPROC) (GLuint buffer); +typedef void (APIENTRYP PFNGLARRAYOBJECTATIPROC) (GLenum array, GLint size, GLenum type, GLsizei stride, GLuint buffer, GLuint offset); +typedef void (APIENTRYP PFNGLGETARRAYOBJECTFVATIPROC) (GLenum array, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETARRAYOBJECTIVATIPROC) (GLenum array, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLVARIANTARRAYOBJECTATIPROC) (GLuint id, GLenum type, GLsizei stride, GLuint buffer, GLuint offset); +typedef void (APIENTRYP PFNGLGETVARIANTARRAYOBJECTFVATIPROC) (GLuint id, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETVARIANTARRAYOBJECTIVATIPROC) (GLuint id, GLenum pname, GLint *params); +#endif + +#ifndef GL_EXT_vertex_shader +#define GL_EXT_vertex_shader 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBeginVertexShaderEXT (void); +GLAPI void APIENTRY glEndVertexShaderEXT (void); +GLAPI void APIENTRY glBindVertexShaderEXT (GLuint id); +GLAPI GLuint APIENTRY glGenVertexShadersEXT (GLuint range); +GLAPI void APIENTRY glDeleteVertexShaderEXT (GLuint id); +GLAPI void APIENTRY glShaderOp1EXT (GLenum op, GLuint res, GLuint arg1); +GLAPI void APIENTRY glShaderOp2EXT (GLenum op, GLuint res, GLuint arg1, GLuint arg2); +GLAPI void APIENTRY glShaderOp3EXT (GLenum op, GLuint res, GLuint arg1, GLuint arg2, GLuint arg3); +GLAPI void APIENTRY glSwizzleEXT (GLuint res, GLuint in, GLenum outX, GLenum outY, GLenum outZ, GLenum outW); +GLAPI void APIENTRY glWriteMaskEXT (GLuint res, GLuint in, GLenum outX, GLenum outY, GLenum outZ, GLenum outW); +GLAPI void APIENTRY glInsertComponentEXT (GLuint res, GLuint src, GLuint num); +GLAPI void APIENTRY glExtractComponentEXT (GLuint res, GLuint src, GLuint num); +GLAPI GLuint APIENTRY glGenSymbolsEXT (GLenum datatype, GLenum storagetype, GLenum range, GLuint components); +GLAPI void APIENTRY glSetInvariantEXT (GLuint id, GLenum type, const GLvoid *addr); +GLAPI void APIENTRY glSetLocalConstantEXT (GLuint id, GLenum type, const GLvoid *addr); +GLAPI void APIENTRY glVariantbvEXT (GLuint id, const GLbyte *addr); +GLAPI void APIENTRY glVariantsvEXT (GLuint id, const GLshort *addr); +GLAPI void APIENTRY glVariantivEXT (GLuint id, const GLint *addr); +GLAPI void APIENTRY glVariantfvEXT (GLuint id, const GLfloat *addr); +GLAPI void APIENTRY glVariantdvEXT (GLuint id, const GLdouble *addr); +GLAPI void APIENTRY glVariantubvEXT (GLuint id, const GLubyte *addr); +GLAPI void APIENTRY glVariantusvEXT (GLuint id, const GLushort *addr); +GLAPI void APIENTRY glVariantuivEXT (GLuint id, const GLuint *addr); +GLAPI void APIENTRY glVariantPointerEXT (GLuint id, GLenum type, GLuint stride, const GLvoid *addr); +GLAPI void APIENTRY glEnableVariantClientStateEXT (GLuint id); +GLAPI void APIENTRY glDisableVariantClientStateEXT (GLuint id); +GLAPI GLuint APIENTRY glBindLightParameterEXT (GLenum light, GLenum value); +GLAPI GLuint APIENTRY glBindMaterialParameterEXT (GLenum face, GLenum value); +GLAPI GLuint APIENTRY glBindTexGenParameterEXT (GLenum unit, GLenum coord, GLenum value); +GLAPI GLuint APIENTRY glBindTextureUnitParameterEXT (GLenum unit, GLenum value); +GLAPI GLuint APIENTRY glBindParameterEXT (GLenum value); +GLAPI GLboolean APIENTRY glIsVariantEnabledEXT (GLuint id, GLenum cap); +GLAPI void APIENTRY glGetVariantBooleanvEXT (GLuint id, GLenum value, GLboolean *data); +GLAPI void APIENTRY glGetVariantIntegervEXT (GLuint id, GLenum value, GLint *data); +GLAPI void APIENTRY glGetVariantFloatvEXT (GLuint id, GLenum value, GLfloat *data); +GLAPI void APIENTRY glGetVariantPointervEXT (GLuint id, GLenum value, GLvoid* *data); +GLAPI void APIENTRY glGetInvariantBooleanvEXT (GLuint id, GLenum value, GLboolean *data); +GLAPI void APIENTRY glGetInvariantIntegervEXT (GLuint id, GLenum value, GLint *data); +GLAPI void APIENTRY glGetInvariantFloatvEXT (GLuint id, GLenum value, GLfloat *data); +GLAPI void APIENTRY glGetLocalConstantBooleanvEXT (GLuint id, GLenum value, GLboolean *data); +GLAPI void APIENTRY glGetLocalConstantIntegervEXT (GLuint id, GLenum value, GLint *data); +GLAPI void APIENTRY glGetLocalConstantFloatvEXT (GLuint id, GLenum value, GLfloat *data); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBEGINVERTEXSHADEREXTPROC) (void); +typedef void (APIENTRYP PFNGLENDVERTEXSHADEREXTPROC) (void); +typedef void (APIENTRYP PFNGLBINDVERTEXSHADEREXTPROC) (GLuint id); +typedef GLuint (APIENTRYP PFNGLGENVERTEXSHADERSEXTPROC) (GLuint range); +typedef void (APIENTRYP PFNGLDELETEVERTEXSHADEREXTPROC) (GLuint id); +typedef void (APIENTRYP PFNGLSHADEROP1EXTPROC) (GLenum op, GLuint res, GLuint arg1); +typedef void (APIENTRYP PFNGLSHADEROP2EXTPROC) (GLenum op, GLuint res, GLuint arg1, GLuint arg2); +typedef void (APIENTRYP PFNGLSHADEROP3EXTPROC) (GLenum op, GLuint res, GLuint arg1, GLuint arg2, GLuint arg3); +typedef void (APIENTRYP PFNGLSWIZZLEEXTPROC) (GLuint res, GLuint in, GLenum outX, GLenum outY, GLenum outZ, GLenum outW); +typedef void (APIENTRYP PFNGLWRITEMASKEXTPROC) (GLuint res, GLuint in, GLenum outX, GLenum outY, GLenum outZ, GLenum outW); +typedef void (APIENTRYP PFNGLINSERTCOMPONENTEXTPROC) (GLuint res, GLuint src, GLuint num); +typedef void (APIENTRYP PFNGLEXTRACTCOMPONENTEXTPROC) (GLuint res, GLuint src, GLuint num); +typedef GLuint (APIENTRYP PFNGLGENSYMBOLSEXTPROC) (GLenum datatype, GLenum storagetype, GLenum range, GLuint components); +typedef void (APIENTRYP PFNGLSETINVARIANTEXTPROC) (GLuint id, GLenum type, const GLvoid *addr); +typedef void (APIENTRYP PFNGLSETLOCALCONSTANTEXTPROC) (GLuint id, GLenum type, const GLvoid *addr); +typedef void (APIENTRYP PFNGLVARIANTBVEXTPROC) (GLuint id, const GLbyte *addr); +typedef void (APIENTRYP PFNGLVARIANTSVEXTPROC) (GLuint id, const GLshort *addr); +typedef void (APIENTRYP PFNGLVARIANTIVEXTPROC) (GLuint id, const GLint *addr); +typedef void (APIENTRYP PFNGLVARIANTFVEXTPROC) (GLuint id, const GLfloat *addr); +typedef void (APIENTRYP PFNGLVARIANTDVEXTPROC) (GLuint id, const GLdouble *addr); +typedef void (APIENTRYP PFNGLVARIANTUBVEXTPROC) (GLuint id, const GLubyte *addr); +typedef void (APIENTRYP PFNGLVARIANTUSVEXTPROC) (GLuint id, const GLushort *addr); +typedef void (APIENTRYP PFNGLVARIANTUIVEXTPROC) (GLuint id, const GLuint *addr); +typedef void (APIENTRYP PFNGLVARIANTPOINTEREXTPROC) (GLuint id, GLenum type, GLuint stride, const GLvoid *addr); +typedef void (APIENTRYP PFNGLENABLEVARIANTCLIENTSTATEEXTPROC) (GLuint id); +typedef void (APIENTRYP PFNGLDISABLEVARIANTCLIENTSTATEEXTPROC) (GLuint id); +typedef GLuint (APIENTRYP PFNGLBINDLIGHTPARAMETEREXTPROC) (GLenum light, GLenum value); +typedef GLuint (APIENTRYP PFNGLBINDMATERIALPARAMETEREXTPROC) (GLenum face, GLenum value); +typedef GLuint (APIENTRYP PFNGLBINDTEXGENPARAMETEREXTPROC) (GLenum unit, GLenum coord, GLenum value); +typedef GLuint (APIENTRYP PFNGLBINDTEXTUREUNITPARAMETEREXTPROC) (GLenum unit, GLenum value); +typedef GLuint (APIENTRYP PFNGLBINDPARAMETEREXTPROC) (GLenum value); +typedef GLboolean (APIENTRYP PFNGLISVARIANTENABLEDEXTPROC) (GLuint id, GLenum cap); +typedef void (APIENTRYP PFNGLGETVARIANTBOOLEANVEXTPROC) (GLuint id, GLenum value, GLboolean *data); +typedef void (APIENTRYP PFNGLGETVARIANTINTEGERVEXTPROC) (GLuint id, GLenum value, GLint *data); +typedef void (APIENTRYP PFNGLGETVARIANTFLOATVEXTPROC) (GLuint id, GLenum value, GLfloat *data); +typedef void (APIENTRYP PFNGLGETVARIANTPOINTERVEXTPROC) (GLuint id, GLenum value, GLvoid* *data); +typedef void (APIENTRYP PFNGLGETINVARIANTBOOLEANVEXTPROC) (GLuint id, GLenum value, GLboolean *data); +typedef void (APIENTRYP PFNGLGETINVARIANTINTEGERVEXTPROC) (GLuint id, GLenum value, GLint *data); +typedef void (APIENTRYP PFNGLGETINVARIANTFLOATVEXTPROC) (GLuint id, GLenum value, GLfloat *data); +typedef void (APIENTRYP PFNGLGETLOCALCONSTANTBOOLEANVEXTPROC) (GLuint id, GLenum value, GLboolean *data); +typedef void (APIENTRYP PFNGLGETLOCALCONSTANTINTEGERVEXTPROC) (GLuint id, GLenum value, GLint *data); +typedef void (APIENTRYP PFNGLGETLOCALCONSTANTFLOATVEXTPROC) (GLuint id, GLenum value, GLfloat *data); +#endif + +#ifndef GL_ATI_vertex_streams +#define GL_ATI_vertex_streams 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexStream1sATI (GLenum stream, GLshort x); +GLAPI void APIENTRY glVertexStream1svATI (GLenum stream, const GLshort *coords); +GLAPI void APIENTRY glVertexStream1iATI (GLenum stream, GLint x); +GLAPI void APIENTRY glVertexStream1ivATI (GLenum stream, const GLint *coords); +GLAPI void APIENTRY glVertexStream1fATI (GLenum stream, GLfloat x); +GLAPI void APIENTRY glVertexStream1fvATI (GLenum stream, const GLfloat *coords); +GLAPI void APIENTRY glVertexStream1dATI (GLenum stream, GLdouble x); +GLAPI void APIENTRY glVertexStream1dvATI (GLenum stream, const GLdouble *coords); +GLAPI void APIENTRY glVertexStream2sATI (GLenum stream, GLshort x, GLshort y); +GLAPI void APIENTRY glVertexStream2svATI (GLenum stream, const GLshort *coords); +GLAPI void APIENTRY glVertexStream2iATI (GLenum stream, GLint x, GLint y); +GLAPI void APIENTRY glVertexStream2ivATI (GLenum stream, const GLint *coords); +GLAPI void APIENTRY glVertexStream2fATI (GLenum stream, GLfloat x, GLfloat y); +GLAPI void APIENTRY glVertexStream2fvATI (GLenum stream, const GLfloat *coords); +GLAPI void APIENTRY glVertexStream2dATI (GLenum stream, GLdouble x, GLdouble y); +GLAPI void APIENTRY glVertexStream2dvATI (GLenum stream, const GLdouble *coords); +GLAPI void APIENTRY glVertexStream3sATI (GLenum stream, GLshort x, GLshort y, GLshort z); +GLAPI void APIENTRY glVertexStream3svATI (GLenum stream, const GLshort *coords); +GLAPI void APIENTRY glVertexStream3iATI (GLenum stream, GLint x, GLint y, GLint z); +GLAPI void APIENTRY glVertexStream3ivATI (GLenum stream, const GLint *coords); +GLAPI void APIENTRY glVertexStream3fATI (GLenum stream, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glVertexStream3fvATI (GLenum stream, const GLfloat *coords); +GLAPI void APIENTRY glVertexStream3dATI (GLenum stream, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glVertexStream3dvATI (GLenum stream, const GLdouble *coords); +GLAPI void APIENTRY glVertexStream4sATI (GLenum stream, GLshort x, GLshort y, GLshort z, GLshort w); +GLAPI void APIENTRY glVertexStream4svATI (GLenum stream, const GLshort *coords); +GLAPI void APIENTRY glVertexStream4iATI (GLenum stream, GLint x, GLint y, GLint z, GLint w); +GLAPI void APIENTRY glVertexStream4ivATI (GLenum stream, const GLint *coords); +GLAPI void APIENTRY glVertexStream4fATI (GLenum stream, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glVertexStream4fvATI (GLenum stream, const GLfloat *coords); +GLAPI void APIENTRY glVertexStream4dATI (GLenum stream, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glVertexStream4dvATI (GLenum stream, const GLdouble *coords); +GLAPI void APIENTRY glNormalStream3bATI (GLenum stream, GLbyte nx, GLbyte ny, GLbyte nz); +GLAPI void APIENTRY glNormalStream3bvATI (GLenum stream, const GLbyte *coords); +GLAPI void APIENTRY glNormalStream3sATI (GLenum stream, GLshort nx, GLshort ny, GLshort nz); +GLAPI void APIENTRY glNormalStream3svATI (GLenum stream, const GLshort *coords); +GLAPI void APIENTRY glNormalStream3iATI (GLenum stream, GLint nx, GLint ny, GLint nz); +GLAPI void APIENTRY glNormalStream3ivATI (GLenum stream, const GLint *coords); +GLAPI void APIENTRY glNormalStream3fATI (GLenum stream, GLfloat nx, GLfloat ny, GLfloat nz); +GLAPI void APIENTRY glNormalStream3fvATI (GLenum stream, const GLfloat *coords); +GLAPI void APIENTRY glNormalStream3dATI (GLenum stream, GLdouble nx, GLdouble ny, GLdouble nz); +GLAPI void APIENTRY glNormalStream3dvATI (GLenum stream, const GLdouble *coords); +GLAPI void APIENTRY glClientActiveVertexStreamATI (GLenum stream); +GLAPI void APIENTRY glVertexBlendEnviATI (GLenum pname, GLint param); +GLAPI void APIENTRY glVertexBlendEnvfATI (GLenum pname, GLfloat param); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXSTREAM1SATIPROC) (GLenum stream, GLshort x); +typedef void (APIENTRYP PFNGLVERTEXSTREAM1SVATIPROC) (GLenum stream, const GLshort *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM1IATIPROC) (GLenum stream, GLint x); +typedef void (APIENTRYP PFNGLVERTEXSTREAM1IVATIPROC) (GLenum stream, const GLint *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM1FATIPROC) (GLenum stream, GLfloat x); +typedef void (APIENTRYP PFNGLVERTEXSTREAM1FVATIPROC) (GLenum stream, const GLfloat *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM1DATIPROC) (GLenum stream, GLdouble x); +typedef void (APIENTRYP PFNGLVERTEXSTREAM1DVATIPROC) (GLenum stream, const GLdouble *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM2SATIPROC) (GLenum stream, GLshort x, GLshort y); +typedef void (APIENTRYP PFNGLVERTEXSTREAM2SVATIPROC) (GLenum stream, const GLshort *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM2IATIPROC) (GLenum stream, GLint x, GLint y); +typedef void (APIENTRYP PFNGLVERTEXSTREAM2IVATIPROC) (GLenum stream, const GLint *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM2FATIPROC) (GLenum stream, GLfloat x, GLfloat y); +typedef void (APIENTRYP PFNGLVERTEXSTREAM2FVATIPROC) (GLenum stream, const GLfloat *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM2DATIPROC) (GLenum stream, GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLVERTEXSTREAM2DVATIPROC) (GLenum stream, const GLdouble *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM3SATIPROC) (GLenum stream, GLshort x, GLshort y, GLshort z); +typedef void (APIENTRYP PFNGLVERTEXSTREAM3SVATIPROC) (GLenum stream, const GLshort *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM3IATIPROC) (GLenum stream, GLint x, GLint y, GLint z); +typedef void (APIENTRYP PFNGLVERTEXSTREAM3IVATIPROC) (GLenum stream, const GLint *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM3FATIPROC) (GLenum stream, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLVERTEXSTREAM3FVATIPROC) (GLenum stream, const GLfloat *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM3DATIPROC) (GLenum stream, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLVERTEXSTREAM3DVATIPROC) (GLenum stream, const GLdouble *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM4SATIPROC) (GLenum stream, GLshort x, GLshort y, GLshort z, GLshort w); +typedef void (APIENTRYP PFNGLVERTEXSTREAM4SVATIPROC) (GLenum stream, const GLshort *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM4IATIPROC) (GLenum stream, GLint x, GLint y, GLint z, GLint w); +typedef void (APIENTRYP PFNGLVERTEXSTREAM4IVATIPROC) (GLenum stream, const GLint *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM4FATIPROC) (GLenum stream, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLVERTEXSTREAM4FVATIPROC) (GLenum stream, const GLfloat *coords); +typedef void (APIENTRYP PFNGLVERTEXSTREAM4DATIPROC) (GLenum stream, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLVERTEXSTREAM4DVATIPROC) (GLenum stream, const GLdouble *coords); +typedef void (APIENTRYP PFNGLNORMALSTREAM3BATIPROC) (GLenum stream, GLbyte nx, GLbyte ny, GLbyte nz); +typedef void (APIENTRYP PFNGLNORMALSTREAM3BVATIPROC) (GLenum stream, const GLbyte *coords); +typedef void (APIENTRYP PFNGLNORMALSTREAM3SATIPROC) (GLenum stream, GLshort nx, GLshort ny, GLshort nz); +typedef void (APIENTRYP PFNGLNORMALSTREAM3SVATIPROC) (GLenum stream, const GLshort *coords); +typedef void (APIENTRYP PFNGLNORMALSTREAM3IATIPROC) (GLenum stream, GLint nx, GLint ny, GLint nz); +typedef void (APIENTRYP PFNGLNORMALSTREAM3IVATIPROC) (GLenum stream, const GLint *coords); +typedef void (APIENTRYP PFNGLNORMALSTREAM3FATIPROC) (GLenum stream, GLfloat nx, GLfloat ny, GLfloat nz); +typedef void (APIENTRYP PFNGLNORMALSTREAM3FVATIPROC) (GLenum stream, const GLfloat *coords); +typedef void (APIENTRYP PFNGLNORMALSTREAM3DATIPROC) (GLenum stream, GLdouble nx, GLdouble ny, GLdouble nz); +typedef void (APIENTRYP PFNGLNORMALSTREAM3DVATIPROC) (GLenum stream, const GLdouble *coords); +typedef void (APIENTRYP PFNGLCLIENTACTIVEVERTEXSTREAMATIPROC) (GLenum stream); +typedef void (APIENTRYP PFNGLVERTEXBLENDENVIATIPROC) (GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLVERTEXBLENDENVFATIPROC) (GLenum pname, GLfloat param); +#endif + +#ifndef GL_ATI_element_array +#define GL_ATI_element_array 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glElementPointerATI (GLenum type, const GLvoid *pointer); +GLAPI void APIENTRY glDrawElementArrayATI (GLenum mode, GLsizei count); +GLAPI void APIENTRY glDrawRangeElementArrayATI (GLenum mode, GLuint start, GLuint end, GLsizei count); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLELEMENTPOINTERATIPROC) (GLenum type, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLDRAWELEMENTARRAYATIPROC) (GLenum mode, GLsizei count); +typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTARRAYATIPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count); +#endif + +#ifndef GL_SUN_mesh_array +#define GL_SUN_mesh_array 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawMeshArraysSUN (GLenum mode, GLint first, GLsizei count, GLsizei width); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWMESHARRAYSSUNPROC) (GLenum mode, GLint first, GLsizei count, GLsizei width); +#endif + +#ifndef GL_SUN_slice_accum +#define GL_SUN_slice_accum 1 +#endif + +#ifndef GL_NV_multisample_filter_hint +#define GL_NV_multisample_filter_hint 1 +#endif + +#ifndef GL_NV_depth_clamp +#define GL_NV_depth_clamp 1 +#endif + +#ifndef GL_NV_occlusion_query +#define GL_NV_occlusion_query 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGenOcclusionQueriesNV (GLsizei n, GLuint *ids); +GLAPI void APIENTRY glDeleteOcclusionQueriesNV (GLsizei n, const GLuint *ids); +GLAPI GLboolean APIENTRY glIsOcclusionQueryNV (GLuint id); +GLAPI void APIENTRY glBeginOcclusionQueryNV (GLuint id); +GLAPI void APIENTRY glEndOcclusionQueryNV (void); +GLAPI void APIENTRY glGetOcclusionQueryivNV (GLuint id, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetOcclusionQueryuivNV (GLuint id, GLenum pname, GLuint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGENOCCLUSIONQUERIESNVPROC) (GLsizei n, GLuint *ids); +typedef void (APIENTRYP PFNGLDELETEOCCLUSIONQUERIESNVPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (APIENTRYP PFNGLISOCCLUSIONQUERYNVPROC) (GLuint id); +typedef void (APIENTRYP PFNGLBEGINOCCLUSIONQUERYNVPROC) (GLuint id); +typedef void (APIENTRYP PFNGLENDOCCLUSIONQUERYNVPROC) (void); +typedef void (APIENTRYP PFNGLGETOCCLUSIONQUERYIVNVPROC) (GLuint id, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETOCCLUSIONQUERYUIVNVPROC) (GLuint id, GLenum pname, GLuint *params); +#endif + +#ifndef GL_NV_point_sprite +#define GL_NV_point_sprite 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPointParameteriNV (GLenum pname, GLint param); +GLAPI void APIENTRY glPointParameterivNV (GLenum pname, const GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPOINTPARAMETERINVPROC) (GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLPOINTPARAMETERIVNVPROC) (GLenum pname, const GLint *params); +#endif + +#ifndef GL_NV_texture_shader3 +#define GL_NV_texture_shader3 1 +#endif + +#ifndef GL_NV_vertex_program1_1 +#define GL_NV_vertex_program1_1 1 +#endif + +#ifndef GL_EXT_shadow_funcs +#define GL_EXT_shadow_funcs 1 +#endif + +#ifndef GL_EXT_stencil_two_side +#define GL_EXT_stencil_two_side 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glActiveStencilFaceEXT (GLenum face); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLACTIVESTENCILFACEEXTPROC) (GLenum face); +#endif + +#ifndef GL_ATI_text_fragment_shader +#define GL_ATI_text_fragment_shader 1 +#endif + +#ifndef GL_APPLE_client_storage +#define GL_APPLE_client_storage 1 +#endif + +#ifndef GL_APPLE_element_array +#define GL_APPLE_element_array 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glElementPointerAPPLE (GLenum type, const GLvoid *pointer); +GLAPI void APIENTRY glDrawElementArrayAPPLE (GLenum mode, GLint first, GLsizei count); +GLAPI void APIENTRY glDrawRangeElementArrayAPPLE (GLenum mode, GLuint start, GLuint end, GLint first, GLsizei count); +GLAPI void APIENTRY glMultiDrawElementArrayAPPLE (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +GLAPI void APIENTRY glMultiDrawRangeElementArrayAPPLE (GLenum mode, GLuint start, GLuint end, const GLint *first, const GLsizei *count, GLsizei primcount); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLELEMENTPOINTERAPPLEPROC) (GLenum type, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLDRAWELEMENTARRAYAPPLEPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTARRAYAPPLEPROC) (GLenum mode, GLuint start, GLuint end, GLint first, GLsizei count); +typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTARRAYAPPLEPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +typedef void (APIENTRYP PFNGLMULTIDRAWRANGEELEMENTARRAYAPPLEPROC) (GLenum mode, GLuint start, GLuint end, const GLint *first, const GLsizei *count, GLsizei primcount); +#endif + +#ifndef GL_APPLE_fence +#define GL_APPLE_fence 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGenFencesAPPLE (GLsizei n, GLuint *fences); +GLAPI void APIENTRY glDeleteFencesAPPLE (GLsizei n, const GLuint *fences); +GLAPI void APIENTRY glSetFenceAPPLE (GLuint fence); +GLAPI GLboolean APIENTRY glIsFenceAPPLE (GLuint fence); +GLAPI GLboolean APIENTRY glTestFenceAPPLE (GLuint fence); +GLAPI void APIENTRY glFinishFenceAPPLE (GLuint fence); +GLAPI GLboolean APIENTRY glTestObjectAPPLE (GLenum object, GLuint name); +GLAPI void APIENTRY glFinishObjectAPPLE (GLenum object, GLint name); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGENFENCESAPPLEPROC) (GLsizei n, GLuint *fences); +typedef void (APIENTRYP PFNGLDELETEFENCESAPPLEPROC) (GLsizei n, const GLuint *fences); +typedef void (APIENTRYP PFNGLSETFENCEAPPLEPROC) (GLuint fence); +typedef GLboolean (APIENTRYP PFNGLISFENCEAPPLEPROC) (GLuint fence); +typedef GLboolean (APIENTRYP PFNGLTESTFENCEAPPLEPROC) (GLuint fence); +typedef void (APIENTRYP PFNGLFINISHFENCEAPPLEPROC) (GLuint fence); +typedef GLboolean (APIENTRYP PFNGLTESTOBJECTAPPLEPROC) (GLenum object, GLuint name); +typedef void (APIENTRYP PFNGLFINISHOBJECTAPPLEPROC) (GLenum object, GLint name); +#endif + +#ifndef GL_APPLE_vertex_array_object +#define GL_APPLE_vertex_array_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindVertexArrayAPPLE (GLuint array); +GLAPI void APIENTRY glDeleteVertexArraysAPPLE (GLsizei n, const GLuint *arrays); +GLAPI void APIENTRY glGenVertexArraysAPPLE (GLsizei n, GLuint *arrays); +GLAPI GLboolean APIENTRY glIsVertexArrayAPPLE (GLuint array); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDVERTEXARRAYAPPLEPROC) (GLuint array); +typedef void (APIENTRYP PFNGLDELETEVERTEXARRAYSAPPLEPROC) (GLsizei n, const GLuint *arrays); +typedef void (APIENTRYP PFNGLGENVERTEXARRAYSAPPLEPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (APIENTRYP PFNGLISVERTEXARRAYAPPLEPROC) (GLuint array); +#endif + +#ifndef GL_APPLE_vertex_array_range +#define GL_APPLE_vertex_array_range 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexArrayRangeAPPLE (GLsizei length, GLvoid *pointer); +GLAPI void APIENTRY glFlushVertexArrayRangeAPPLE (GLsizei length, GLvoid *pointer); +GLAPI void APIENTRY glVertexArrayParameteriAPPLE (GLenum pname, GLint param); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXARRAYRANGEAPPLEPROC) (GLsizei length, GLvoid *pointer); +typedef void (APIENTRYP PFNGLFLUSHVERTEXARRAYRANGEAPPLEPROC) (GLsizei length, GLvoid *pointer); +typedef void (APIENTRYP PFNGLVERTEXARRAYPARAMETERIAPPLEPROC) (GLenum pname, GLint param); +#endif + +#ifndef GL_APPLE_ycbcr_422 +#define GL_APPLE_ycbcr_422 1 +#endif + +#ifndef GL_S3_s3tc +#define GL_S3_s3tc 1 +#endif + +#ifndef GL_ATI_draw_buffers +#define GL_ATI_draw_buffers 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawBuffersATI (GLsizei n, const GLenum *bufs); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWBUFFERSATIPROC) (GLsizei n, const GLenum *bufs); +#endif + +#ifndef GL_ATI_pixel_format_float +#define GL_ATI_pixel_format_float 1 +/* This is really a WGL extension, but defines some associated GL enums. + * ATI does not export "GL_ATI_pixel_format_float" in the GL_EXTENSIONS string. + */ +#endif + +#ifndef GL_ATI_texture_env_combine3 +#define GL_ATI_texture_env_combine3 1 +#endif + +#ifndef GL_ATI_texture_float +#define GL_ATI_texture_float 1 +#endif + +#ifndef GL_NV_float_buffer +#define GL_NV_float_buffer 1 +#endif + +#ifndef GL_NV_fragment_program +#define GL_NV_fragment_program 1 +/* Some NV_fragment_program entry points are shared with ARB_vertex_program. */ +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProgramNamedParameter4fNV (GLuint id, GLsizei len, const GLubyte *name, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glProgramNamedParameter4dNV (GLuint id, GLsizei len, const GLubyte *name, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glProgramNamedParameter4fvNV (GLuint id, GLsizei len, const GLubyte *name, const GLfloat *v); +GLAPI void APIENTRY glProgramNamedParameter4dvNV (GLuint id, GLsizei len, const GLubyte *name, const GLdouble *v); +GLAPI void APIENTRY glGetProgramNamedParameterfvNV (GLuint id, GLsizei len, const GLubyte *name, GLfloat *params); +GLAPI void APIENTRY glGetProgramNamedParameterdvNV (GLuint id, GLsizei len, const GLubyte *name, GLdouble *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROGRAMNAMEDPARAMETER4FNVPROC) (GLuint id, GLsizei len, const GLubyte *name, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLPROGRAMNAMEDPARAMETER4DNVPROC) (GLuint id, GLsizei len, const GLubyte *name, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLPROGRAMNAMEDPARAMETER4FVNVPROC) (GLuint id, GLsizei len, const GLubyte *name, const GLfloat *v); +typedef void (APIENTRYP PFNGLPROGRAMNAMEDPARAMETER4DVNVPROC) (GLuint id, GLsizei len, const GLubyte *name, const GLdouble *v); +typedef void (APIENTRYP PFNGLGETPROGRAMNAMEDPARAMETERFVNVPROC) (GLuint id, GLsizei len, const GLubyte *name, GLfloat *params); +typedef void (APIENTRYP PFNGLGETPROGRAMNAMEDPARAMETERDVNVPROC) (GLuint id, GLsizei len, const GLubyte *name, GLdouble *params); +#endif + +#ifndef GL_NV_half_float +#define GL_NV_half_float 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertex2hNV (GLhalfNV x, GLhalfNV y); +GLAPI void APIENTRY glVertex2hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glVertex3hNV (GLhalfNV x, GLhalfNV y, GLhalfNV z); +GLAPI void APIENTRY glVertex3hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glVertex4hNV (GLhalfNV x, GLhalfNV y, GLhalfNV z, GLhalfNV w); +GLAPI void APIENTRY glVertex4hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glNormal3hNV (GLhalfNV nx, GLhalfNV ny, GLhalfNV nz); +GLAPI void APIENTRY glNormal3hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue); +GLAPI void APIENTRY glColor3hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glColor4hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue, GLhalfNV alpha); +GLAPI void APIENTRY glColor4hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glTexCoord1hNV (GLhalfNV s); +GLAPI void APIENTRY glTexCoord1hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glTexCoord2hNV (GLhalfNV s, GLhalfNV t); +GLAPI void APIENTRY glTexCoord2hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glTexCoord3hNV (GLhalfNV s, GLhalfNV t, GLhalfNV r); +GLAPI void APIENTRY glTexCoord3hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glTexCoord4hNV (GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q); +GLAPI void APIENTRY glTexCoord4hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glMultiTexCoord1hNV (GLenum target, GLhalfNV s); +GLAPI void APIENTRY glMultiTexCoord1hvNV (GLenum target, const GLhalfNV *v); +GLAPI void APIENTRY glMultiTexCoord2hNV (GLenum target, GLhalfNV s, GLhalfNV t); +GLAPI void APIENTRY glMultiTexCoord2hvNV (GLenum target, const GLhalfNV *v); +GLAPI void APIENTRY glMultiTexCoord3hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r); +GLAPI void APIENTRY glMultiTexCoord3hvNV (GLenum target, const GLhalfNV *v); +GLAPI void APIENTRY glMultiTexCoord4hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q); +GLAPI void APIENTRY glMultiTexCoord4hvNV (GLenum target, const GLhalfNV *v); +GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog); +GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog); +GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue); +GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight); +GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight); +GLAPI void APIENTRY glVertexAttrib1hNV (GLuint index, GLhalfNV x); +GLAPI void APIENTRY glVertexAttrib1hvNV (GLuint index, const GLhalfNV *v); +GLAPI void APIENTRY glVertexAttrib2hNV (GLuint index, GLhalfNV x, GLhalfNV y); +GLAPI void APIENTRY glVertexAttrib2hvNV (GLuint index, const GLhalfNV *v); +GLAPI void APIENTRY glVertexAttrib3hNV (GLuint index, GLhalfNV x, GLhalfNV y, GLhalfNV z); +GLAPI void APIENTRY glVertexAttrib3hvNV (GLuint index, const GLhalfNV *v); +GLAPI void APIENTRY glVertexAttrib4hNV (GLuint index, GLhalfNV x, GLhalfNV y, GLhalfNV z, GLhalfNV w); +GLAPI void APIENTRY glVertexAttrib4hvNV (GLuint index, const GLhalfNV *v); +GLAPI void APIENTRY glVertexAttribs1hvNV (GLuint index, GLsizei n, const GLhalfNV *v); +GLAPI void APIENTRY glVertexAttribs2hvNV (GLuint index, GLsizei n, const GLhalfNV *v); +GLAPI void APIENTRY glVertexAttribs3hvNV (GLuint index, GLsizei n, const GLhalfNV *v); +GLAPI void APIENTRY glVertexAttribs4hvNV (GLuint index, GLsizei n, const GLhalfNV *v); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEX2HNVPROC) (GLhalfNV x, GLhalfNV y); +typedef void (APIENTRYP PFNGLVERTEX2HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEX3HNVPROC) (GLhalfNV x, GLhalfNV y, GLhalfNV z); +typedef void (APIENTRYP PFNGLVERTEX3HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEX4HNVPROC) (GLhalfNV x, GLhalfNV y, GLhalfNV z, GLhalfNV w); +typedef void (APIENTRYP PFNGLVERTEX4HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLNORMAL3HNVPROC) (GLhalfNV nx, GLhalfNV ny, GLhalfNV nz); +typedef void (APIENTRYP PFNGLNORMAL3HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue); +typedef void (APIENTRYP PFNGLCOLOR3HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLCOLOR4HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue, GLhalfNV alpha); +typedef void (APIENTRYP PFNGLCOLOR4HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLTEXCOORD1HNVPROC) (GLhalfNV s); +typedef void (APIENTRYP PFNGLTEXCOORD1HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLTEXCOORD2HNVPROC) (GLhalfNV s, GLhalfNV t); +typedef void (APIENTRYP PFNGLTEXCOORD2HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLTEXCOORD3HNVPROC) (GLhalfNV s, GLhalfNV t, GLhalfNV r); +typedef void (APIENTRYP PFNGLTEXCOORD3HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLTEXCOORD4HNVPROC) (GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q); +typedef void (APIENTRYP PFNGLTEXCOORD4HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1HNVPROC) (GLenum target, GLhalfNV s); +typedef void (APIENTRYP PFNGLMULTITEXCOORD1HVNVPROC) (GLenum target, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t); +typedef void (APIENTRYP PFNGLMULTITEXCOORD2HVNVPROC) (GLenum target, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r); +typedef void (APIENTRYP PFNGLMULTITEXCOORD3HVNVPROC) (GLenum target, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q); +typedef void (APIENTRYP PFNGLMULTITEXCOORD4HVNVPROC) (GLenum target, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog); +typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight); +typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1HNVPROC) (GLuint index, GLhalfNV x); +typedef void (APIENTRYP PFNGLVERTEXATTRIB1HVNVPROC) (GLuint index, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y); +typedef void (APIENTRYP PFNGLVERTEXATTRIB2HVNVPROC) (GLuint index, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y, GLhalfNV z); +typedef void (APIENTRYP PFNGLVERTEXATTRIB3HVNVPROC) (GLuint index, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y, GLhalfNV z, GLhalfNV w); +typedef void (APIENTRYP PFNGLVERTEXATTRIB4HVNVPROC) (GLuint index, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS1HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS2HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS3HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBS4HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); +#endif + +#ifndef GL_NV_pixel_data_range +#define GL_NV_pixel_data_range 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPixelDataRangeNV (GLenum target, GLsizei length, const GLvoid *pointer); +GLAPI void APIENTRY glFlushPixelDataRangeNV (GLenum target); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPIXELDATARANGENVPROC) (GLenum target, GLsizei length, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLFLUSHPIXELDATARANGENVPROC) (GLenum target); +#endif + +#ifndef GL_NV_primitive_restart +#define GL_NV_primitive_restart 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPrimitiveRestartNV (void); +GLAPI void APIENTRY glPrimitiveRestartIndexNV (GLuint index); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPRIMITIVERESTARTNVPROC) (void); +typedef void (APIENTRYP PFNGLPRIMITIVERESTARTINDEXNVPROC) (GLuint index); +#endif + +#ifndef GL_NV_texture_expand_normal +#define GL_NV_texture_expand_normal 1 +#endif + +#ifndef GL_NV_vertex_program2 +#define GL_NV_vertex_program2 1 +#endif + +#ifndef GL_ATI_map_object_buffer +#define GL_ATI_map_object_buffer 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLvoid* APIENTRY glMapObjectBufferATI (GLuint buffer); +GLAPI void APIENTRY glUnmapObjectBufferATI (GLuint buffer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLvoid* (APIENTRYP PFNGLMAPOBJECTBUFFERATIPROC) (GLuint buffer); +typedef void (APIENTRYP PFNGLUNMAPOBJECTBUFFERATIPROC) (GLuint buffer); +#endif + +#ifndef GL_ATI_separate_stencil +#define GL_ATI_separate_stencil 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glStencilOpSeparateATI (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GLAPI void APIENTRY glStencilFuncSeparateATI (GLenum frontfunc, GLenum backfunc, GLint ref, GLuint mask); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSTENCILOPSEPARATEATIPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (APIENTRYP PFNGLSTENCILFUNCSEPARATEATIPROC) (GLenum frontfunc, GLenum backfunc, GLint ref, GLuint mask); +#endif + +#ifndef GL_ATI_vertex_attrib_array_object +#define GL_ATI_vertex_attrib_array_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexAttribArrayObjectATI (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLuint buffer, GLuint offset); +GLAPI void APIENTRY glGetVertexAttribArrayObjectfvATI (GLuint index, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetVertexAttribArrayObjectivATI (GLuint index, GLenum pname, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXATTRIBARRAYOBJECTATIPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLuint buffer, GLuint offset); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBARRAYOBJECTFVATIPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBARRAYOBJECTIVATIPROC) (GLuint index, GLenum pname, GLint *params); +#endif + +#ifndef GL_OES_read_format +#define GL_OES_read_format 1 +#endif + +#ifndef GL_EXT_depth_bounds_test +#define GL_EXT_depth_bounds_test 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDepthBoundsEXT (GLclampd zmin, GLclampd zmax); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDEPTHBOUNDSEXTPROC) (GLclampd zmin, GLclampd zmax); +#endif + +#ifndef GL_EXT_texture_mirror_clamp +#define GL_EXT_texture_mirror_clamp 1 +#endif + +#ifndef GL_EXT_blend_equation_separate +#define GL_EXT_blend_equation_separate 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendEquationSeparateEXT (GLenum modeRGB, GLenum modeAlpha); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEEXTPROC) (GLenum modeRGB, GLenum modeAlpha); +#endif + +#ifndef GL_MESA_pack_invert +#define GL_MESA_pack_invert 1 +#endif + +#ifndef GL_MESA_ycbcr_texture +#define GL_MESA_ycbcr_texture 1 +#endif + +#ifndef GL_EXT_pixel_buffer_object +#define GL_EXT_pixel_buffer_object 1 +#endif + +#ifndef GL_NV_fragment_program_option +#define GL_NV_fragment_program_option 1 +#endif + +#ifndef GL_NV_fragment_program2 +#define GL_NV_fragment_program2 1 +#endif + +#ifndef GL_NV_vertex_program2_option +#define GL_NV_vertex_program2_option 1 +#endif + +#ifndef GL_NV_vertex_program3 +#define GL_NV_vertex_program3 1 +#endif + +#ifndef GL_EXT_framebuffer_object +#define GL_EXT_framebuffer_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLboolean APIENTRY glIsRenderbufferEXT (GLuint renderbuffer); +GLAPI void APIENTRY glBindRenderbufferEXT (GLenum target, GLuint renderbuffer); +GLAPI void APIENTRY glDeleteRenderbuffersEXT (GLsizei n, const GLuint *renderbuffers); +GLAPI void APIENTRY glGenRenderbuffersEXT (GLsizei n, GLuint *renderbuffers); +GLAPI void APIENTRY glRenderbufferStorageEXT (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI void APIENTRY glGetRenderbufferParameterivEXT (GLenum target, GLenum pname, GLint *params); +GLAPI GLboolean APIENTRY glIsFramebufferEXT (GLuint framebuffer); +GLAPI void APIENTRY glBindFramebufferEXT (GLenum target, GLuint framebuffer); +GLAPI void APIENTRY glDeleteFramebuffersEXT (GLsizei n, const GLuint *framebuffers); +GLAPI void APIENTRY glGenFramebuffersEXT (GLsizei n, GLuint *framebuffers); +GLAPI GLenum APIENTRY glCheckFramebufferStatusEXT (GLenum target); +GLAPI void APIENTRY glFramebufferTexture1DEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GLAPI void APIENTRY glFramebufferTexture2DEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GLAPI void APIENTRY glFramebufferTexture3DEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +GLAPI void APIENTRY glFramebufferRenderbufferEXT (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GLAPI void APIENTRY glGetFramebufferAttachmentParameterivEXT (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GLAPI void APIENTRY glGenerateMipmapEXT (GLenum target); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLboolean (APIENTRYP PFNGLISRENDERBUFFEREXTPROC) (GLuint renderbuffer); +typedef void (APIENTRYP PFNGLBINDRENDERBUFFEREXTPROC) (GLenum target, GLuint renderbuffer); +typedef void (APIENTRYP PFNGLDELETERENDERBUFFERSEXTPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (APIENTRYP PFNGLGENRENDERBUFFERSEXTPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEEXTPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLboolean (APIENTRYP PFNGLISFRAMEBUFFEREXTPROC) (GLuint framebuffer); +typedef void (APIENTRYP PFNGLBINDFRAMEBUFFEREXTPROC) (GLenum target, GLuint framebuffer); +typedef void (APIENTRYP PFNGLDELETEFRAMEBUFFERSEXTPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (APIENTRYP PFNGLGENFRAMEBUFFERSEXTPROC) (GLsizei n, GLuint *framebuffers); +typedef GLenum (APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSEXTPROC) (GLenum target); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE1DEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE3DEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +typedef void (APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFEREXTPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVEXTPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGENERATEMIPMAPEXTPROC) (GLenum target); +#endif + +#ifndef GL_GREMEDY_string_marker +#define GL_GREMEDY_string_marker 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glStringMarkerGREMEDY (GLsizei len, const GLvoid *string); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSTRINGMARKERGREMEDYPROC) (GLsizei len, const GLvoid *string); +#endif + +#ifndef GL_EXT_packed_depth_stencil +#define GL_EXT_packed_depth_stencil 1 +#endif + +#ifndef GL_EXT_stencil_clear_tag +#define GL_EXT_stencil_clear_tag 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glStencilClearTagEXT (GLsizei stencilTagBits, GLuint stencilClearTag); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSTENCILCLEARTAGEXTPROC) (GLsizei stencilTagBits, GLuint stencilClearTag); +#endif + +#ifndef GL_EXT_texture_sRGB +#define GL_EXT_texture_sRGB 1 +#endif + +#ifndef GL_EXT_framebuffer_blit +#define GL_EXT_framebuffer_blit 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlitFramebufferEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLITFRAMEBUFFEREXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#endif + +#ifndef GL_EXT_framebuffer_multisample +#define GL_EXT_framebuffer_multisample 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glRenderbufferStorageMultisampleEXT (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#endif + +#ifndef GL_MESAX_texture_stack +#define GL_MESAX_texture_stack 1 +#endif + +#ifndef GL_EXT_timer_query +#define GL_EXT_timer_query 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetQueryObjecti64vEXT (GLuint id, GLenum pname, GLint64EXT *params); +GLAPI void APIENTRY glGetQueryObjectui64vEXT (GLuint id, GLenum pname, GLuint64EXT *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETQUERYOBJECTI64VEXTPROC) (GLuint id, GLenum pname, GLint64EXT *params); +typedef void (APIENTRYP PFNGLGETQUERYOBJECTUI64VEXTPROC) (GLuint id, GLenum pname, GLuint64EXT *params); +#endif + +#ifndef GL_EXT_gpu_program_parameters +#define GL_EXT_gpu_program_parameters 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProgramEnvParameters4fvEXT (GLenum target, GLuint index, GLsizei count, const GLfloat *params); +GLAPI void APIENTRY glProgramLocalParameters4fvEXT (GLenum target, GLuint index, GLsizei count, const GLfloat *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERS4FVEXTPROC) (GLenum target, GLuint index, GLsizei count, const GLfloat *params); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERS4FVEXTPROC) (GLenum target, GLuint index, GLsizei count, const GLfloat *params); +#endif + +#ifndef GL_APPLE_flush_buffer_range +#define GL_APPLE_flush_buffer_range 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBufferParameteriAPPLE (GLenum target, GLenum pname, GLint param); +GLAPI void APIENTRY glFlushMappedBufferRangeAPPLE (GLenum target, GLintptr offset, GLsizeiptr size); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBUFFERPARAMETERIAPPLEPROC) (GLenum target, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEAPPLEPROC) (GLenum target, GLintptr offset, GLsizeiptr size); +#endif + +#ifndef GL_NV_gpu_program4 +#define GL_NV_gpu_program4 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProgramLocalParameterI4iNV (GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w); +GLAPI void APIENTRY glProgramLocalParameterI4ivNV (GLenum target, GLuint index, const GLint *params); +GLAPI void APIENTRY glProgramLocalParametersI4ivNV (GLenum target, GLuint index, GLsizei count, const GLint *params); +GLAPI void APIENTRY glProgramLocalParameterI4uiNV (GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GLAPI void APIENTRY glProgramLocalParameterI4uivNV (GLenum target, GLuint index, const GLuint *params); +GLAPI void APIENTRY glProgramLocalParametersI4uivNV (GLenum target, GLuint index, GLsizei count, const GLuint *params); +GLAPI void APIENTRY glProgramEnvParameterI4iNV (GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w); +GLAPI void APIENTRY glProgramEnvParameterI4ivNV (GLenum target, GLuint index, const GLint *params); +GLAPI void APIENTRY glProgramEnvParametersI4ivNV (GLenum target, GLuint index, GLsizei count, const GLint *params); +GLAPI void APIENTRY glProgramEnvParameterI4uiNV (GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GLAPI void APIENTRY glProgramEnvParameterI4uivNV (GLenum target, GLuint index, const GLuint *params); +GLAPI void APIENTRY glProgramEnvParametersI4uivNV (GLenum target, GLuint index, GLsizei count, const GLuint *params); +GLAPI void APIENTRY glGetProgramLocalParameterIivNV (GLenum target, GLuint index, GLint *params); +GLAPI void APIENTRY glGetProgramLocalParameterIuivNV (GLenum target, GLuint index, GLuint *params); +GLAPI void APIENTRY glGetProgramEnvParameterIivNV (GLenum target, GLuint index, GLint *params); +GLAPI void APIENTRY glGetProgramEnvParameterIuivNV (GLenum target, GLuint index, GLuint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERI4INVPROC) (GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERI4IVNVPROC) (GLenum target, GLuint index, const GLint *params); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERSI4IVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLint *params); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERI4UINVPROC) (GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERI4UIVNVPROC) (GLenum target, GLuint index, const GLuint *params); +typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERSI4UIVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLuint *params); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERI4INVPROC) (GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERI4IVNVPROC) (GLenum target, GLuint index, const GLint *params); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERSI4IVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLint *params); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERI4UINVPROC) (GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERI4UIVNVPROC) (GLenum target, GLuint index, const GLuint *params); +typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERSI4UIVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLuint *params); +typedef void (APIENTRYP PFNGLGETPROGRAMLOCALPARAMETERIIVNVPROC) (GLenum target, GLuint index, GLint *params); +typedef void (APIENTRYP PFNGLGETPROGRAMLOCALPARAMETERIUIVNVPROC) (GLenum target, GLuint index, GLuint *params); +typedef void (APIENTRYP PFNGLGETPROGRAMENVPARAMETERIIVNVPROC) (GLenum target, GLuint index, GLint *params); +typedef void (APIENTRYP PFNGLGETPROGRAMENVPARAMETERIUIVNVPROC) (GLenum target, GLuint index, GLuint *params); +#endif + +#ifndef GL_NV_geometry_program4 +#define GL_NV_geometry_program4 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProgramVertexLimitNV (GLenum target, GLint limit); +GLAPI void APIENTRY glFramebufferTextureEXT (GLenum target, GLenum attachment, GLuint texture, GLint level); +GLAPI void APIENTRY glFramebufferTextureLayerEXT (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +GLAPI void APIENTRY glFramebufferTextureFaceEXT (GLenum target, GLenum attachment, GLuint texture, GLint level, GLenum face); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROGRAMVERTEXLIMITNVPROC) (GLenum target, GLint limit); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREEXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYEREXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREFACEEXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLenum face); +#endif + +#ifndef GL_EXT_geometry_shader4 +#define GL_EXT_geometry_shader4 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProgramParameteriEXT (GLuint program, GLenum pname, GLint value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROGRAMPARAMETERIEXTPROC) (GLuint program, GLenum pname, GLint value); +#endif + +#ifndef GL_NV_vertex_program4 +#define GL_NV_vertex_program4 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexAttribI1iEXT (GLuint index, GLint x); +GLAPI void APIENTRY glVertexAttribI2iEXT (GLuint index, GLint x, GLint y); +GLAPI void APIENTRY glVertexAttribI3iEXT (GLuint index, GLint x, GLint y, GLint z); +GLAPI void APIENTRY glVertexAttribI4iEXT (GLuint index, GLint x, GLint y, GLint z, GLint w); +GLAPI void APIENTRY glVertexAttribI1uiEXT (GLuint index, GLuint x); +GLAPI void APIENTRY glVertexAttribI2uiEXT (GLuint index, GLuint x, GLuint y); +GLAPI void APIENTRY glVertexAttribI3uiEXT (GLuint index, GLuint x, GLuint y, GLuint z); +GLAPI void APIENTRY glVertexAttribI4uiEXT (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GLAPI void APIENTRY glVertexAttribI1ivEXT (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttribI2ivEXT (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttribI3ivEXT (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttribI4ivEXT (GLuint index, const GLint *v); +GLAPI void APIENTRY glVertexAttribI1uivEXT (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttribI2uivEXT (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttribI3uivEXT (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttribI4uivEXT (GLuint index, const GLuint *v); +GLAPI void APIENTRY glVertexAttribI4bvEXT (GLuint index, const GLbyte *v); +GLAPI void APIENTRY glVertexAttribI4svEXT (GLuint index, const GLshort *v); +GLAPI void APIENTRY glVertexAttribI4ubvEXT (GLuint index, const GLubyte *v); +GLAPI void APIENTRY glVertexAttribI4usvEXT (GLuint index, const GLushort *v); +GLAPI void APIENTRY glVertexAttribIPointerEXT (GLuint index, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glGetVertexAttribIivEXT (GLuint index, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetVertexAttribIuivEXT (GLuint index, GLenum pname, GLuint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IEXTPROC) (GLuint index, GLint x); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IEXTPROC) (GLuint index, GLint x, GLint y); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IEXTPROC) (GLuint index, GLint x, GLint y, GLint z); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IEXTPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIEXTPROC) (GLuint index, GLuint x); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIEXTPROC) (GLuint index, GLuint x, GLuint y); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIEXTPROC) (GLuint index, GLuint x, GLuint y, GLuint z); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIEXTPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IVEXTPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IVEXTPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IVEXTPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IVEXTPROC) (GLuint index, const GLint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIVEXTPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIVEXTPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIVEXTPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIVEXTPROC) (GLuint index, const GLuint *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4BVEXTPROC) (GLuint index, const GLbyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4SVEXTPROC) (GLuint index, const GLshort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UBVEXTPROC) (GLuint index, const GLubyte *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4USVEXTPROC) (GLuint index, const GLushort *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBIPOINTEREXTPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIIVEXTPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIUIVEXTPROC) (GLuint index, GLenum pname, GLuint *params); +#endif + +#ifndef GL_EXT_gpu_shader4 +#define GL_EXT_gpu_shader4 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetUniformuivEXT (GLuint program, GLint location, GLuint *params); +GLAPI void APIENTRY glBindFragDataLocationEXT (GLuint program, GLuint color, const GLchar *name); +GLAPI GLint APIENTRY glGetFragDataLocationEXT (GLuint program, const GLchar *name); +GLAPI void APIENTRY glUniform1uiEXT (GLint location, GLuint v0); +GLAPI void APIENTRY glUniform2uiEXT (GLint location, GLuint v0, GLuint v1); +GLAPI void APIENTRY glUniform3uiEXT (GLint location, GLuint v0, GLuint v1, GLuint v2); +GLAPI void APIENTRY glUniform4uiEXT (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GLAPI void APIENTRY glUniform1uivEXT (GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glUniform2uivEXT (GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glUniform3uivEXT (GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glUniform4uivEXT (GLint location, GLsizei count, const GLuint *value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETUNIFORMUIVEXTPROC) (GLuint program, GLint location, GLuint *params); +typedef void (APIENTRYP PFNGLBINDFRAGDATALOCATIONEXTPROC) (GLuint program, GLuint color, const GLchar *name); +typedef GLint (APIENTRYP PFNGLGETFRAGDATALOCATIONEXTPROC) (GLuint program, const GLchar *name); +typedef void (APIENTRYP PFNGLUNIFORM1UIEXTPROC) (GLint location, GLuint v0); +typedef void (APIENTRYP PFNGLUNIFORM2UIEXTPROC) (GLint location, GLuint v0, GLuint v1); +typedef void (APIENTRYP PFNGLUNIFORM3UIEXTPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (APIENTRYP PFNGLUNIFORM4UIEXTPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (APIENTRYP PFNGLUNIFORM1UIVEXTPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLUNIFORM2UIVEXTPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLUNIFORM3UIVEXTPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLUNIFORM4UIVEXTPROC) (GLint location, GLsizei count, const GLuint *value); +#endif + +#ifndef GL_EXT_draw_instanced +#define GL_EXT_draw_instanced 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDrawArraysInstancedEXT (GLenum mode, GLint start, GLsizei count, GLsizei primcount); +GLAPI void APIENTRY glDrawElementsInstancedEXT (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei primcount); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDRAWARRAYSINSTANCEDEXTPROC) (GLenum mode, GLint start, GLsizei count, GLsizei primcount); +typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDEXTPROC) (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei primcount); +#endif + +#ifndef GL_EXT_packed_float +#define GL_EXT_packed_float 1 +#endif + +#ifndef GL_EXT_texture_array +#define GL_EXT_texture_array 1 +#endif + +#ifndef GL_EXT_texture_buffer_object +#define GL_EXT_texture_buffer_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexBufferEXT (GLenum target, GLenum internalformat, GLuint buffer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXBUFFEREXTPROC) (GLenum target, GLenum internalformat, GLuint buffer); +#endif + +#ifndef GL_EXT_texture_compression_latc +#define GL_EXT_texture_compression_latc 1 +#endif + +#ifndef GL_EXT_texture_compression_rgtc +#define GL_EXT_texture_compression_rgtc 1 +#endif + +#ifndef GL_EXT_texture_shared_exponent +#define GL_EXT_texture_shared_exponent 1 +#endif + +#ifndef GL_NV_depth_buffer_float +#define GL_NV_depth_buffer_float 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDepthRangedNV (GLdouble zNear, GLdouble zFar); +GLAPI void APIENTRY glClearDepthdNV (GLdouble depth); +GLAPI void APIENTRY glDepthBoundsdNV (GLdouble zmin, GLdouble zmax); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDEPTHRANGEDNVPROC) (GLdouble zNear, GLdouble zFar); +typedef void (APIENTRYP PFNGLCLEARDEPTHDNVPROC) (GLdouble depth); +typedef void (APIENTRYP PFNGLDEPTHBOUNDSDNVPROC) (GLdouble zmin, GLdouble zmax); +#endif + +#ifndef GL_NV_fragment_program4 +#define GL_NV_fragment_program4 1 +#endif + +#ifndef GL_NV_framebuffer_multisample_coverage +#define GL_NV_framebuffer_multisample_coverage 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glRenderbufferStorageMultisampleCoverageNV (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLenum internalformat, GLsizei width, GLsizei height); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLECOVERAGENVPROC) (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLenum internalformat, GLsizei width, GLsizei height); +#endif + +#ifndef GL_EXT_framebuffer_sRGB +#define GL_EXT_framebuffer_sRGB 1 +#endif + +#ifndef GL_NV_geometry_shader4 +#define GL_NV_geometry_shader4 1 +#endif + +#ifndef GL_NV_parameter_buffer_object +#define GL_NV_parameter_buffer_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProgramBufferParametersfvNV (GLenum target, GLuint buffer, GLuint index, GLsizei count, const GLfloat *params); +GLAPI void APIENTRY glProgramBufferParametersIivNV (GLenum target, GLuint buffer, GLuint index, GLsizei count, const GLint *params); +GLAPI void APIENTRY glProgramBufferParametersIuivNV (GLenum target, GLuint buffer, GLuint index, GLsizei count, const GLuint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROGRAMBUFFERPARAMETERSFVNVPROC) (GLenum target, GLuint buffer, GLuint index, GLsizei count, const GLfloat *params); +typedef void (APIENTRYP PFNGLPROGRAMBUFFERPARAMETERSIIVNVPROC) (GLenum target, GLuint buffer, GLuint index, GLsizei count, const GLint *params); +typedef void (APIENTRYP PFNGLPROGRAMBUFFERPARAMETERSIUIVNVPROC) (GLenum target, GLuint buffer, GLuint index, GLsizei count, const GLuint *params); +#endif + +#ifndef GL_EXT_draw_buffers2 +#define GL_EXT_draw_buffers2 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glColorMaskIndexedEXT (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GLAPI void APIENTRY glGetBooleanIndexedvEXT (GLenum target, GLuint index, GLboolean *data); +GLAPI void APIENTRY glGetIntegerIndexedvEXT (GLenum target, GLuint index, GLint *data); +GLAPI void APIENTRY glEnableIndexedEXT (GLenum target, GLuint index); +GLAPI void APIENTRY glDisableIndexedEXT (GLenum target, GLuint index); +GLAPI GLboolean APIENTRY glIsEnabledIndexedEXT (GLenum target, GLuint index); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOLORMASKINDEXEDEXTPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +typedef void (APIENTRYP PFNGLGETBOOLEANINDEXEDVEXTPROC) (GLenum target, GLuint index, GLboolean *data); +typedef void (APIENTRYP PFNGLGETINTEGERINDEXEDVEXTPROC) (GLenum target, GLuint index, GLint *data); +typedef void (APIENTRYP PFNGLENABLEINDEXEDEXTPROC) (GLenum target, GLuint index); +typedef void (APIENTRYP PFNGLDISABLEINDEXEDEXTPROC) (GLenum target, GLuint index); +typedef GLboolean (APIENTRYP PFNGLISENABLEDINDEXEDEXTPROC) (GLenum target, GLuint index); +#endif + +#ifndef GL_NV_transform_feedback +#define GL_NV_transform_feedback 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBeginTransformFeedbackNV (GLenum primitiveMode); +GLAPI void APIENTRY glEndTransformFeedbackNV (void); +GLAPI void APIENTRY glTransformFeedbackAttribsNV (GLuint count, const GLint *attribs, GLenum bufferMode); +GLAPI void APIENTRY glBindBufferRangeNV (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GLAPI void APIENTRY glBindBufferOffsetNV (GLenum target, GLuint index, GLuint buffer, GLintptr offset); +GLAPI void APIENTRY glBindBufferBaseNV (GLenum target, GLuint index, GLuint buffer); +GLAPI void APIENTRY glTransformFeedbackVaryingsNV (GLuint program, GLsizei count, const GLint *locations, GLenum bufferMode); +GLAPI void APIENTRY glActiveVaryingNV (GLuint program, const GLchar *name); +GLAPI GLint APIENTRY glGetVaryingLocationNV (GLuint program, const GLchar *name); +GLAPI void APIENTRY glGetActiveVaryingNV (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +GLAPI void APIENTRY glGetTransformFeedbackVaryingNV (GLuint program, GLuint index, GLint *location); +GLAPI void APIENTRY glTransformFeedbackStreamAttribsNV (GLsizei count, const GLint *attribs, GLsizei nbuffers, const GLint *bufstreams, GLenum bufferMode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKNVPROC) (GLenum primitiveMode); +typedef void (APIENTRYP PFNGLENDTRANSFORMFEEDBACKNVPROC) (void); +typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKATTRIBSNVPROC) (GLuint count, const GLint *attribs, GLenum bufferMode); +typedef void (APIENTRYP PFNGLBINDBUFFERRANGENVPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (APIENTRYP PFNGLBINDBUFFEROFFSETNVPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset); +typedef void (APIENTRYP PFNGLBINDBUFFERBASENVPROC) (GLenum target, GLuint index, GLuint buffer); +typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSNVPROC) (GLuint program, GLsizei count, const GLint *locations, GLenum bufferMode); +typedef void (APIENTRYP PFNGLACTIVEVARYINGNVPROC) (GLuint program, const GLchar *name); +typedef GLint (APIENTRYP PFNGLGETVARYINGLOCATIONNVPROC) (GLuint program, const GLchar *name); +typedef void (APIENTRYP PFNGLGETACTIVEVARYINGNVPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGNVPROC) (GLuint program, GLuint index, GLint *location); +typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKSTREAMATTRIBSNVPROC) (GLsizei count, const GLint *attribs, GLsizei nbuffers, const GLint *bufstreams, GLenum bufferMode); +#endif + +#ifndef GL_EXT_bindable_uniform +#define GL_EXT_bindable_uniform 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glUniformBufferEXT (GLuint program, GLint location, GLuint buffer); +GLAPI GLint APIENTRY glGetUniformBufferSizeEXT (GLuint program, GLint location); +GLAPI GLintptr APIENTRY glGetUniformOffsetEXT (GLuint program, GLint location); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLUNIFORMBUFFEREXTPROC) (GLuint program, GLint location, GLuint buffer); +typedef GLint (APIENTRYP PFNGLGETUNIFORMBUFFERSIZEEXTPROC) (GLuint program, GLint location); +typedef GLintptr (APIENTRYP PFNGLGETUNIFORMOFFSETEXTPROC) (GLuint program, GLint location); +#endif + +#ifndef GL_EXT_texture_integer +#define GL_EXT_texture_integer 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexParameterIivEXT (GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glTexParameterIuivEXT (GLenum target, GLenum pname, const GLuint *params); +GLAPI void APIENTRY glGetTexParameterIivEXT (GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetTexParameterIuivEXT (GLenum target, GLenum pname, GLuint *params); +GLAPI void APIENTRY glClearColorIiEXT (GLint red, GLint green, GLint blue, GLint alpha); +GLAPI void APIENTRY glClearColorIuiEXT (GLuint red, GLuint green, GLuint blue, GLuint alpha); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, const GLuint *params); +typedef void (APIENTRYP PFNGLGETTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, GLuint *params); +typedef void (APIENTRYP PFNGLCLEARCOLORIIEXTPROC) (GLint red, GLint green, GLint blue, GLint alpha); +typedef void (APIENTRYP PFNGLCLEARCOLORIUIEXTPROC) (GLuint red, GLuint green, GLuint blue, GLuint alpha); +#endif + +#ifndef GL_GREMEDY_frame_terminator +#define GL_GREMEDY_frame_terminator 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glFrameTerminatorGREMEDY (void); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLFRAMETERMINATORGREMEDYPROC) (void); +#endif + +#ifndef GL_NV_conditional_render +#define GL_NV_conditional_render 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBeginConditionalRenderNV (GLuint id, GLenum mode); +GLAPI void APIENTRY glEndConditionalRenderNV (void); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBEGINCONDITIONALRENDERNVPROC) (GLuint id, GLenum mode); +typedef void (APIENTRYP PFNGLENDCONDITIONALRENDERNVPROC) (void); +#endif + +#ifndef GL_NV_present_video +#define GL_NV_present_video 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glPresentFrameKeyedNV (GLuint video_slot, GLuint64EXT minPresentTime, GLuint beginPresentTimeId, GLuint presentDurationId, GLenum type, GLenum target0, GLuint fill0, GLuint key0, GLenum target1, GLuint fill1, GLuint key1); +GLAPI void APIENTRY glPresentFrameDualFillNV (GLuint video_slot, GLuint64EXT minPresentTime, GLuint beginPresentTimeId, GLuint presentDurationId, GLenum type, GLenum target0, GLuint fill0, GLenum target1, GLuint fill1, GLenum target2, GLuint fill2, GLenum target3, GLuint fill3); +GLAPI void APIENTRY glGetVideoivNV (GLuint video_slot, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetVideouivNV (GLuint video_slot, GLenum pname, GLuint *params); +GLAPI void APIENTRY glGetVideoi64vNV (GLuint video_slot, GLenum pname, GLint64EXT *params); +GLAPI void APIENTRY glGetVideoui64vNV (GLuint video_slot, GLenum pname, GLuint64EXT *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPRESENTFRAMEKEYEDNVPROC) (GLuint video_slot, GLuint64EXT minPresentTime, GLuint beginPresentTimeId, GLuint presentDurationId, GLenum type, GLenum target0, GLuint fill0, GLuint key0, GLenum target1, GLuint fill1, GLuint key1); +typedef void (APIENTRYP PFNGLPRESENTFRAMEDUALFILLNVPROC) (GLuint video_slot, GLuint64EXT minPresentTime, GLuint beginPresentTimeId, GLuint presentDurationId, GLenum type, GLenum target0, GLuint fill0, GLenum target1, GLuint fill1, GLenum target2, GLuint fill2, GLenum target3, GLuint fill3); +typedef void (APIENTRYP PFNGLGETVIDEOIVNVPROC) (GLuint video_slot, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETVIDEOUIVNVPROC) (GLuint video_slot, GLenum pname, GLuint *params); +typedef void (APIENTRYP PFNGLGETVIDEOI64VNVPROC) (GLuint video_slot, GLenum pname, GLint64EXT *params); +typedef void (APIENTRYP PFNGLGETVIDEOUI64VNVPROC) (GLuint video_slot, GLenum pname, GLuint64EXT *params); +#endif + +#ifndef GL_EXT_transform_feedback +#define GL_EXT_transform_feedback 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBeginTransformFeedbackEXT (GLenum primitiveMode); +GLAPI void APIENTRY glEndTransformFeedbackEXT (void); +GLAPI void APIENTRY glBindBufferRangeEXT (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GLAPI void APIENTRY glBindBufferOffsetEXT (GLenum target, GLuint index, GLuint buffer, GLintptr offset); +GLAPI void APIENTRY glBindBufferBaseEXT (GLenum target, GLuint index, GLuint buffer); +GLAPI void APIENTRY glTransformFeedbackVaryingsEXT (GLuint program, GLsizei count, const GLchar* *varyings, GLenum bufferMode); +GLAPI void APIENTRY glGetTransformFeedbackVaryingEXT (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKEXTPROC) (GLenum primitiveMode); +typedef void (APIENTRYP PFNGLENDTRANSFORMFEEDBACKEXTPROC) (void); +typedef void (APIENTRYP PFNGLBINDBUFFERRANGEEXTPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (APIENTRYP PFNGLBINDBUFFEROFFSETEXTPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset); +typedef void (APIENTRYP PFNGLBINDBUFFERBASEEXTPROC) (GLenum target, GLuint index, GLuint buffer); +typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSEXTPROC) (GLuint program, GLsizei count, const GLchar* *varyings, GLenum bufferMode); +typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGEXTPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +#endif + +#ifndef GL_EXT_direct_state_access +#define GL_EXT_direct_state_access 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glClientAttribDefaultEXT (GLbitfield mask); +GLAPI void APIENTRY glPushClientAttribDefaultEXT (GLbitfield mask); +GLAPI void APIENTRY glMatrixLoadfEXT (GLenum mode, const GLfloat *m); +GLAPI void APIENTRY glMatrixLoaddEXT (GLenum mode, const GLdouble *m); +GLAPI void APIENTRY glMatrixMultfEXT (GLenum mode, const GLfloat *m); +GLAPI void APIENTRY glMatrixMultdEXT (GLenum mode, const GLdouble *m); +GLAPI void APIENTRY glMatrixLoadIdentityEXT (GLenum mode); +GLAPI void APIENTRY glMatrixRotatefEXT (GLenum mode, GLfloat angle, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glMatrixRotatedEXT (GLenum mode, GLdouble angle, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glMatrixScalefEXT (GLenum mode, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glMatrixScaledEXT (GLenum mode, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glMatrixTranslatefEXT (GLenum mode, GLfloat x, GLfloat y, GLfloat z); +GLAPI void APIENTRY glMatrixTranslatedEXT (GLenum mode, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glMatrixFrustumEXT (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar); +GLAPI void APIENTRY glMatrixOrthoEXT (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar); +GLAPI void APIENTRY glMatrixPopEXT (GLenum mode); +GLAPI void APIENTRY glMatrixPushEXT (GLenum mode); +GLAPI void APIENTRY glMatrixLoadTransposefEXT (GLenum mode, const GLfloat *m); +GLAPI void APIENTRY glMatrixLoadTransposedEXT (GLenum mode, const GLdouble *m); +GLAPI void APIENTRY glMatrixMultTransposefEXT (GLenum mode, const GLfloat *m); +GLAPI void APIENTRY glMatrixMultTransposedEXT (GLenum mode, const GLdouble *m); +GLAPI void APIENTRY glTextureParameterfEXT (GLuint texture, GLenum target, GLenum pname, GLfloat param); +GLAPI void APIENTRY glTextureParameterfvEXT (GLuint texture, GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glTextureParameteriEXT (GLuint texture, GLenum target, GLenum pname, GLint param); +GLAPI void APIENTRY glTextureParameterivEXT (GLuint texture, GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glTextureImage1DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glTextureImage2DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glTextureSubImage1DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glTextureSubImage2DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glCopyTextureImage1DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border); +GLAPI void APIENTRY glCopyTextureImage2DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GLAPI void APIENTRY glCopyTextureSubImage1DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width); +GLAPI void APIENTRY glCopyTextureSubImage2DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI void APIENTRY glGetTextureImageEXT (GLuint texture, GLenum target, GLint level, GLenum format, GLenum type, GLvoid *pixels); +GLAPI void APIENTRY glGetTextureParameterfvEXT (GLuint texture, GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetTextureParameterivEXT (GLuint texture, GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetTextureLevelParameterfvEXT (GLuint texture, GLenum target, GLint level, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetTextureLevelParameterivEXT (GLuint texture, GLenum target, GLint level, GLenum pname, GLint *params); +GLAPI void APIENTRY glTextureImage3DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glTextureSubImage3DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glCopyTextureSubImage3DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI void APIENTRY glMultiTexParameterfEXT (GLenum texunit, GLenum target, GLenum pname, GLfloat param); +GLAPI void APIENTRY glMultiTexParameterfvEXT (GLenum texunit, GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glMultiTexParameteriEXT (GLenum texunit, GLenum target, GLenum pname, GLint param); +GLAPI void APIENTRY glMultiTexParameterivEXT (GLenum texunit, GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glMultiTexImage1DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glMultiTexImage2DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glMultiTexSubImage1DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glMultiTexSubImage2DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glCopyMultiTexImage1DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border); +GLAPI void APIENTRY glCopyMultiTexImage2DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GLAPI void APIENTRY glCopyMultiTexSubImage1DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width); +GLAPI void APIENTRY glCopyMultiTexSubImage2DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI void APIENTRY glGetMultiTexImageEXT (GLenum texunit, GLenum target, GLint level, GLenum format, GLenum type, GLvoid *pixels); +GLAPI void APIENTRY glGetMultiTexParameterfvEXT (GLenum texunit, GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetMultiTexParameterivEXT (GLenum texunit, GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetMultiTexLevelParameterfvEXT (GLenum texunit, GLenum target, GLint level, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetMultiTexLevelParameterivEXT (GLenum texunit, GLenum target, GLint level, GLenum pname, GLint *params); +GLAPI void APIENTRY glMultiTexImage3DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glMultiTexSubImage3DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels); +GLAPI void APIENTRY glCopyMultiTexSubImage3DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI void APIENTRY glBindMultiTextureEXT (GLenum texunit, GLenum target, GLuint texture); +GLAPI void APIENTRY glEnableClientStateIndexedEXT (GLenum array, GLuint index); +GLAPI void APIENTRY glDisableClientStateIndexedEXT (GLenum array, GLuint index); +GLAPI void APIENTRY glMultiTexCoordPointerEXT (GLenum texunit, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glMultiTexEnvfEXT (GLenum texunit, GLenum target, GLenum pname, GLfloat param); +GLAPI void APIENTRY glMultiTexEnvfvEXT (GLenum texunit, GLenum target, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glMultiTexEnviEXT (GLenum texunit, GLenum target, GLenum pname, GLint param); +GLAPI void APIENTRY glMultiTexEnvivEXT (GLenum texunit, GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glMultiTexGendEXT (GLenum texunit, GLenum coord, GLenum pname, GLdouble param); +GLAPI void APIENTRY glMultiTexGendvEXT (GLenum texunit, GLenum coord, GLenum pname, const GLdouble *params); +GLAPI void APIENTRY glMultiTexGenfEXT (GLenum texunit, GLenum coord, GLenum pname, GLfloat param); +GLAPI void APIENTRY glMultiTexGenfvEXT (GLenum texunit, GLenum coord, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glMultiTexGeniEXT (GLenum texunit, GLenum coord, GLenum pname, GLint param); +GLAPI void APIENTRY glMultiTexGenivEXT (GLenum texunit, GLenum coord, GLenum pname, const GLint *params); +GLAPI void APIENTRY glGetMultiTexEnvfvEXT (GLenum texunit, GLenum target, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetMultiTexEnvivEXT (GLenum texunit, GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetMultiTexGendvEXT (GLenum texunit, GLenum coord, GLenum pname, GLdouble *params); +GLAPI void APIENTRY glGetMultiTexGenfvEXT (GLenum texunit, GLenum coord, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetMultiTexGenivEXT (GLenum texunit, GLenum coord, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetFloatIndexedvEXT (GLenum target, GLuint index, GLfloat *data); +GLAPI void APIENTRY glGetDoubleIndexedvEXT (GLenum target, GLuint index, GLdouble *data); +GLAPI void APIENTRY glGetPointerIndexedvEXT (GLenum target, GLuint index, GLvoid* *data); +GLAPI void APIENTRY glCompressedTextureImage3DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedTextureImage2DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedTextureImage1DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedTextureSubImage3DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedTextureSubImage2DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedTextureSubImage1DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glGetCompressedTextureImageEXT (GLuint texture, GLenum target, GLint lod, GLvoid *img); +GLAPI void APIENTRY glCompressedMultiTexImage3DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedMultiTexImage2DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedMultiTexImage1DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedMultiTexSubImage3DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedMultiTexSubImage2DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glCompressedMultiTexSubImage1DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *bits); +GLAPI void APIENTRY glGetCompressedMultiTexImageEXT (GLenum texunit, GLenum target, GLint lod, GLvoid *img); +GLAPI void APIENTRY glNamedProgramStringEXT (GLuint program, GLenum target, GLenum format, GLsizei len, const GLvoid *string); +GLAPI void APIENTRY glNamedProgramLocalParameter4dEXT (GLuint program, GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glNamedProgramLocalParameter4dvEXT (GLuint program, GLenum target, GLuint index, const GLdouble *params); +GLAPI void APIENTRY glNamedProgramLocalParameter4fEXT (GLuint program, GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI void APIENTRY glNamedProgramLocalParameter4fvEXT (GLuint program, GLenum target, GLuint index, const GLfloat *params); +GLAPI void APIENTRY glGetNamedProgramLocalParameterdvEXT (GLuint program, GLenum target, GLuint index, GLdouble *params); +GLAPI void APIENTRY glGetNamedProgramLocalParameterfvEXT (GLuint program, GLenum target, GLuint index, GLfloat *params); +GLAPI void APIENTRY glGetNamedProgramivEXT (GLuint program, GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetNamedProgramStringEXT (GLuint program, GLenum target, GLenum pname, GLvoid *string); +GLAPI void APIENTRY glNamedProgramLocalParameters4fvEXT (GLuint program, GLenum target, GLuint index, GLsizei count, const GLfloat *params); +GLAPI void APIENTRY glNamedProgramLocalParameterI4iEXT (GLuint program, GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w); +GLAPI void APIENTRY glNamedProgramLocalParameterI4ivEXT (GLuint program, GLenum target, GLuint index, const GLint *params); +GLAPI void APIENTRY glNamedProgramLocalParametersI4ivEXT (GLuint program, GLenum target, GLuint index, GLsizei count, const GLint *params); +GLAPI void APIENTRY glNamedProgramLocalParameterI4uiEXT (GLuint program, GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GLAPI void APIENTRY glNamedProgramLocalParameterI4uivEXT (GLuint program, GLenum target, GLuint index, const GLuint *params); +GLAPI void APIENTRY glNamedProgramLocalParametersI4uivEXT (GLuint program, GLenum target, GLuint index, GLsizei count, const GLuint *params); +GLAPI void APIENTRY glGetNamedProgramLocalParameterIivEXT (GLuint program, GLenum target, GLuint index, GLint *params); +GLAPI void APIENTRY glGetNamedProgramLocalParameterIuivEXT (GLuint program, GLenum target, GLuint index, GLuint *params); +GLAPI void APIENTRY glTextureParameterIivEXT (GLuint texture, GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glTextureParameterIuivEXT (GLuint texture, GLenum target, GLenum pname, const GLuint *params); +GLAPI void APIENTRY glGetTextureParameterIivEXT (GLuint texture, GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetTextureParameterIuivEXT (GLuint texture, GLenum target, GLenum pname, GLuint *params); +GLAPI void APIENTRY glMultiTexParameterIivEXT (GLenum texunit, GLenum target, GLenum pname, const GLint *params); +GLAPI void APIENTRY glMultiTexParameterIuivEXT (GLenum texunit, GLenum target, GLenum pname, const GLuint *params); +GLAPI void APIENTRY glGetMultiTexParameterIivEXT (GLenum texunit, GLenum target, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetMultiTexParameterIuivEXT (GLenum texunit, GLenum target, GLenum pname, GLuint *params); +GLAPI void APIENTRY glProgramUniform1fEXT (GLuint program, GLint location, GLfloat v0); +GLAPI void APIENTRY glProgramUniform2fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1); +GLAPI void APIENTRY glProgramUniform3fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GLAPI void APIENTRY glProgramUniform4fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GLAPI void APIENTRY glProgramUniform1iEXT (GLuint program, GLint location, GLint v0); +GLAPI void APIENTRY glProgramUniform2iEXT (GLuint program, GLint location, GLint v0, GLint v1); +GLAPI void APIENTRY glProgramUniform3iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +GLAPI void APIENTRY glProgramUniform4iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GLAPI void APIENTRY glProgramUniform1fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glProgramUniform2fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glProgramUniform3fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glProgramUniform4fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GLAPI void APIENTRY glProgramUniform1ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glProgramUniform2ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glProgramUniform3ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glProgramUniform4ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GLAPI void APIENTRY glProgramUniformMatrix2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix2x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix3x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix2x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix4x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix3x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniformMatrix4x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI void APIENTRY glProgramUniform1uiEXT (GLuint program, GLint location, GLuint v0); +GLAPI void APIENTRY glProgramUniform2uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1); +GLAPI void APIENTRY glProgramUniform3uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +GLAPI void APIENTRY glProgramUniform4uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GLAPI void APIENTRY glProgramUniform1uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glProgramUniform2uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glProgramUniform3uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glProgramUniform4uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GLAPI void APIENTRY glNamedBufferDataEXT (GLuint buffer, GLsizeiptr size, const GLvoid *data, GLenum usage); +GLAPI void APIENTRY glNamedBufferSubDataEXT (GLuint buffer, GLintptr offset, GLsizeiptr size, const GLvoid *data); +GLAPI GLvoid* APIENTRY glMapNamedBufferEXT (GLuint buffer, GLenum access); +GLAPI GLboolean APIENTRY glUnmapNamedBufferEXT (GLuint buffer); +GLAPI GLvoid* APIENTRY glMapNamedBufferRangeEXT (GLuint buffer, GLintptr offset, GLsizeiptr length, GLbitfield access); +GLAPI void APIENTRY glFlushMappedNamedBufferRangeEXT (GLuint buffer, GLintptr offset, GLsizeiptr length); +GLAPI void APIENTRY glNamedCopyBufferSubDataEXT (GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +GLAPI void APIENTRY glGetNamedBufferParameterivEXT (GLuint buffer, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetNamedBufferPointervEXT (GLuint buffer, GLenum pname, GLvoid* *params); +GLAPI void APIENTRY glGetNamedBufferSubDataEXT (GLuint buffer, GLintptr offset, GLsizeiptr size, GLvoid *data); +GLAPI void APIENTRY glTextureBufferEXT (GLuint texture, GLenum target, GLenum internalformat, GLuint buffer); +GLAPI void APIENTRY glMultiTexBufferEXT (GLenum texunit, GLenum target, GLenum internalformat, GLuint buffer); +GLAPI void APIENTRY glNamedRenderbufferStorageEXT (GLuint renderbuffer, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI void APIENTRY glGetNamedRenderbufferParameterivEXT (GLuint renderbuffer, GLenum pname, GLint *params); +GLAPI GLenum APIENTRY glCheckNamedFramebufferStatusEXT (GLuint framebuffer, GLenum target); +GLAPI void APIENTRY glNamedFramebufferTexture1DEXT (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GLAPI void APIENTRY glNamedFramebufferTexture2DEXT (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GLAPI void APIENTRY glNamedFramebufferTexture3DEXT (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +GLAPI void APIENTRY glNamedFramebufferRenderbufferEXT (GLuint framebuffer, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GLAPI void APIENTRY glGetNamedFramebufferAttachmentParameterivEXT (GLuint framebuffer, GLenum attachment, GLenum pname, GLint *params); +GLAPI void APIENTRY glGenerateTextureMipmapEXT (GLuint texture, GLenum target); +GLAPI void APIENTRY glGenerateMultiTexMipmapEXT (GLenum texunit, GLenum target); +GLAPI void APIENTRY glFramebufferDrawBufferEXT (GLuint framebuffer, GLenum mode); +GLAPI void APIENTRY glFramebufferDrawBuffersEXT (GLuint framebuffer, GLsizei n, const GLenum *bufs); +GLAPI void APIENTRY glFramebufferReadBufferEXT (GLuint framebuffer, GLenum mode); +GLAPI void APIENTRY glGetFramebufferParameterivEXT (GLuint framebuffer, GLenum pname, GLint *params); +GLAPI void APIENTRY glNamedRenderbufferStorageMultisampleEXT (GLuint renderbuffer, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI void APIENTRY glNamedRenderbufferStorageMultisampleCoverageEXT (GLuint renderbuffer, GLsizei coverageSamples, GLsizei colorSamples, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI void APIENTRY glNamedFramebufferTextureEXT (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level); +GLAPI void APIENTRY glNamedFramebufferTextureLayerEXT (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint layer); +GLAPI void APIENTRY glNamedFramebufferTextureFaceEXT (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLenum face); +GLAPI void APIENTRY glTextureRenderbufferEXT (GLuint texture, GLenum target, GLuint renderbuffer); +GLAPI void APIENTRY glMultiTexRenderbufferEXT (GLenum texunit, GLenum target, GLuint renderbuffer); +GLAPI void APIENTRY glProgramUniform1dEXT (GLuint program, GLint location, GLdouble x); +GLAPI void APIENTRY glProgramUniform2dEXT (GLuint program, GLint location, GLdouble x, GLdouble y); +GLAPI void APIENTRY glProgramUniform3dEXT (GLuint program, GLint location, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glProgramUniform4dEXT (GLuint program, GLint location, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glProgramUniform1dvEXT (GLuint program, GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glProgramUniform2dvEXT (GLuint program, GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glProgramUniform3dvEXT (GLuint program, GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glProgramUniform4dvEXT (GLuint program, GLint location, GLsizei count, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix2dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix3dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix4dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix2x3dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix2x4dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix3x2dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix3x4dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix4x2dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +GLAPI void APIENTRY glProgramUniformMatrix4x3dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCLIENTATTRIBDEFAULTEXTPROC) (GLbitfield mask); +typedef void (APIENTRYP PFNGLPUSHCLIENTATTRIBDEFAULTEXTPROC) (GLbitfield mask); +typedef void (APIENTRYP PFNGLMATRIXLOADFEXTPROC) (GLenum mode, const GLfloat *m); +typedef void (APIENTRYP PFNGLMATRIXLOADDEXTPROC) (GLenum mode, const GLdouble *m); +typedef void (APIENTRYP PFNGLMATRIXMULTFEXTPROC) (GLenum mode, const GLfloat *m); +typedef void (APIENTRYP PFNGLMATRIXMULTDEXTPROC) (GLenum mode, const GLdouble *m); +typedef void (APIENTRYP PFNGLMATRIXLOADIDENTITYEXTPROC) (GLenum mode); +typedef void (APIENTRYP PFNGLMATRIXROTATEFEXTPROC) (GLenum mode, GLfloat angle, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLMATRIXROTATEDEXTPROC) (GLenum mode, GLdouble angle, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLMATRIXSCALEFEXTPROC) (GLenum mode, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLMATRIXSCALEDEXTPROC) (GLenum mode, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLMATRIXTRANSLATEFEXTPROC) (GLenum mode, GLfloat x, GLfloat y, GLfloat z); +typedef void (APIENTRYP PFNGLMATRIXTRANSLATEDEXTPROC) (GLenum mode, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLMATRIXFRUSTUMEXTPROC) (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar); +typedef void (APIENTRYP PFNGLMATRIXORTHOEXTPROC) (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar); +typedef void (APIENTRYP PFNGLMATRIXPOPEXTPROC) (GLenum mode); +typedef void (APIENTRYP PFNGLMATRIXPUSHEXTPROC) (GLenum mode); +typedef void (APIENTRYP PFNGLMATRIXLOADTRANSPOSEFEXTPROC) (GLenum mode, const GLfloat *m); +typedef void (APIENTRYP PFNGLMATRIXLOADTRANSPOSEDEXTPROC) (GLenum mode, const GLdouble *m); +typedef void (APIENTRYP PFNGLMATRIXMULTTRANSPOSEFEXTPROC) (GLenum mode, const GLfloat *m); +typedef void (APIENTRYP PFNGLMATRIXMULTTRANSPOSEDEXTPROC) (GLenum mode, const GLdouble *m); +typedef void (APIENTRYP PFNGLTEXTUREPARAMETERFEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLTEXTUREPARAMETERFVEXTPROC) (GLuint texture, GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLTEXTUREIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLTEXTUREIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLTEXTURESUBIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLTEXTURESUBIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLCOPYTEXTUREIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border); +typedef void (APIENTRYP PFNGLCOPYTEXTUREIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (APIENTRYP PFNGLCOPYTEXTURESUBIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width); +typedef void (APIENTRYP PFNGLCOPYTEXTURESUBIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLGETTEXTUREIMAGEEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum format, GLenum type, GLvoid *pixels); +typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERFVEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETTEXTURELEVELPARAMETERFVEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETTEXTURELEVELPARAMETERIVEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLTEXTUREIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLTEXTURESUBIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLCOPYTEXTURESUBIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLMULTITEXPARAMETERFEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLMULTITEXPARAMETERFVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLMULTITEXPARAMETERIEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLMULTITEXPARAMETERIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLMULTITEXIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLMULTITEXIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLMULTITEXSUBIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLMULTITEXSUBIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLCOPYMULTITEXIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border); +typedef void (APIENTRYP PFNGLCOPYMULTITEXIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (APIENTRYP PFNGLCOPYMULTITEXSUBIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width); +typedef void (APIENTRYP PFNGLCOPYMULTITEXSUBIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLGETMULTITEXIMAGEEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum format, GLenum type, GLvoid *pixels); +typedef void (APIENTRYP PFNGLGETMULTITEXPARAMETERFVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETMULTITEXPARAMETERIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETMULTITEXLEVELPARAMETERFVEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETMULTITEXLEVELPARAMETERIVEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLMULTITEXIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLMULTITEXSUBIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels); +typedef void (APIENTRYP PFNGLCOPYMULTITEXSUBIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLBINDMULTITEXTUREEXTPROC) (GLenum texunit, GLenum target, GLuint texture); +typedef void (APIENTRYP PFNGLENABLECLIENTSTATEINDEXEDEXTPROC) (GLenum array, GLuint index); +typedef void (APIENTRYP PFNGLDISABLECLIENTSTATEINDEXEDEXTPROC) (GLenum array, GLuint index); +typedef void (APIENTRYP PFNGLMULTITEXCOORDPOINTEREXTPROC) (GLenum texunit, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLMULTITEXENVFEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLMULTITEXENVFVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLMULTITEXENVIEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLMULTITEXENVIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLMULTITEXGENDEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLdouble param); +typedef void (APIENTRYP PFNGLMULTITEXGENDVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, const GLdouble *params); +typedef void (APIENTRYP PFNGLMULTITEXGENFEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLfloat param); +typedef void (APIENTRYP PFNGLMULTITEXGENFVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLMULTITEXGENIEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLint param); +typedef void (APIENTRYP PFNGLMULTITEXGENIVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLGETMULTITEXENVFVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETMULTITEXENVIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETMULTITEXGENDVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLdouble *params); +typedef void (APIENTRYP PFNGLGETMULTITEXGENFVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETMULTITEXGENIVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETFLOATINDEXEDVEXTPROC) (GLenum target, GLuint index, GLfloat *data); +typedef void (APIENTRYP PFNGLGETDOUBLEINDEXEDVEXTPROC) (GLenum target, GLuint index, GLdouble *data); +typedef void (APIENTRYP PFNGLGETPOINTERINDEXEDVEXTPROC) (GLenum target, GLuint index, GLvoid* *data); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTUREIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTUREIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTUREIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTURESUBIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTURESUBIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTURESUBIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXTUREIMAGEEXTPROC) (GLuint texture, GLenum target, GLint lod, GLvoid *img); +typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXSUBIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXSUBIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXSUBIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *bits); +typedef void (APIENTRYP PFNGLGETCOMPRESSEDMULTITEXIMAGEEXTPROC) (GLenum texunit, GLenum target, GLint lod, GLvoid *img); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMSTRINGEXTPROC) (GLuint program, GLenum target, GLenum format, GLsizei len, const GLvoid *string); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETER4DEXTPROC) (GLuint program, GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETER4DVEXTPROC) (GLuint program, GLenum target, GLuint index, const GLdouble *params); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETER4FEXTPROC) (GLuint program, GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETER4FVEXTPROC) (GLuint program, GLenum target, GLuint index, const GLfloat *params); +typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMLOCALPARAMETERDVEXTPROC) (GLuint program, GLenum target, GLuint index, GLdouble *params); +typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMLOCALPARAMETERFVEXTPROC) (GLuint program, GLenum target, GLuint index, GLfloat *params); +typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMIVEXTPROC) (GLuint program, GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMSTRINGEXTPROC) (GLuint program, GLenum target, GLenum pname, GLvoid *string); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERS4FVEXTPROC) (GLuint program, GLenum target, GLuint index, GLsizei count, const GLfloat *params); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERI4IEXTPROC) (GLuint program, GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERI4IVEXTPROC) (GLuint program, GLenum target, GLuint index, const GLint *params); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERSI4IVEXTPROC) (GLuint program, GLenum target, GLuint index, GLsizei count, const GLint *params); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERI4UIEXTPROC) (GLuint program, GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERI4UIVEXTPROC) (GLuint program, GLenum target, GLuint index, const GLuint *params); +typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERSI4UIVEXTPROC) (GLuint program, GLenum target, GLuint index, GLsizei count, const GLuint *params); +typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMLOCALPARAMETERIIVEXTPROC) (GLuint program, GLenum target, GLuint index, GLint *params); +typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMLOCALPARAMETERIUIVEXTPROC) (GLuint program, GLenum target, GLuint index, GLuint *params); +typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIUIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, const GLuint *params); +typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERIIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERIUIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLuint *params); +typedef void (APIENTRYP PFNGLMULTITEXPARAMETERIIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLMULTITEXPARAMETERIUIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLuint *params); +typedef void (APIENTRYP PFNGLGETMULTITEXPARAMETERIIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETMULTITEXPARAMETERIUIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLuint *params); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1FEXTPROC) (GLuint program, GLint location, GLfloat v0); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1IEXTPROC) (GLuint program, GLint location, GLint v0); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UIEXTPROC) (GLuint program, GLint location, GLuint v0); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (APIENTRYP PFNGLNAMEDBUFFERDATAEXTPROC) (GLuint buffer, GLsizeiptr size, const GLvoid *data, GLenum usage); +typedef void (APIENTRYP PFNGLNAMEDBUFFERSUBDATAEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, const GLvoid *data); +typedef GLvoid* (APIENTRYP PFNGLMAPNAMEDBUFFEREXTPROC) (GLuint buffer, GLenum access); +typedef GLboolean (APIENTRYP PFNGLUNMAPNAMEDBUFFEREXTPROC) (GLuint buffer); +typedef GLvoid* (APIENTRYP PFNGLMAPNAMEDBUFFERRANGEEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (APIENTRYP PFNGLFLUSHMAPPEDNAMEDBUFFERRANGEEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr length); +typedef void (APIENTRYP PFNGLNAMEDCOPYBUFFERSUBDATAEXTPROC) (GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +typedef void (APIENTRYP PFNGLGETNAMEDBUFFERPARAMETERIVEXTPROC) (GLuint buffer, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETNAMEDBUFFERPOINTERVEXTPROC) (GLuint buffer, GLenum pname, GLvoid* *params); +typedef void (APIENTRYP PFNGLGETNAMEDBUFFERSUBDATAEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, GLvoid *data); +typedef void (APIENTRYP PFNGLTEXTUREBUFFEREXTPROC) (GLuint texture, GLenum target, GLenum internalformat, GLuint buffer); +typedef void (APIENTRYP PFNGLMULTITEXBUFFEREXTPROC) (GLenum texunit, GLenum target, GLenum internalformat, GLuint buffer); +typedef void (APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEEXTPROC) (GLuint renderbuffer, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLGETNAMEDRENDERBUFFERPARAMETERIVEXTPROC) (GLuint renderbuffer, GLenum pname, GLint *params); +typedef GLenum (APIENTRYP PFNGLCHECKNAMEDFRAMEBUFFERSTATUSEXTPROC) (GLuint framebuffer, GLenum target); +typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTURE1DEXTPROC) (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTURE2DEXTPROC) (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTURE3DEXTPROC) (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERRENDERBUFFEREXTPROC) (GLuint framebuffer, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (APIENTRYP PFNGLGETNAMEDFRAMEBUFFERATTACHMENTPARAMETERIVEXTPROC) (GLuint framebuffer, GLenum attachment, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGENERATETEXTUREMIPMAPEXTPROC) (GLuint texture, GLenum target); +typedef void (APIENTRYP PFNGLGENERATEMULTITEXMIPMAPEXTPROC) (GLenum texunit, GLenum target); +typedef void (APIENTRYP PFNGLFRAMEBUFFERDRAWBUFFEREXTPROC) (GLuint framebuffer, GLenum mode); +typedef void (APIENTRYP PFNGLFRAMEBUFFERDRAWBUFFERSEXTPROC) (GLuint framebuffer, GLsizei n, const GLenum *bufs); +typedef void (APIENTRYP PFNGLFRAMEBUFFERREADBUFFEREXTPROC) (GLuint framebuffer, GLenum mode); +typedef void (APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVEXTPROC) (GLuint framebuffer, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) (GLuint renderbuffer, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLECOVERAGEEXTPROC) (GLuint renderbuffer, GLsizei coverageSamples, GLsizei colorSamples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTUREEXTPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level); +typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTURELAYEREXTPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTUREFACEEXTPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLenum face); +typedef void (APIENTRYP PFNGLTEXTURERENDERBUFFEREXTPROC) (GLuint texture, GLenum target, GLuint renderbuffer); +typedef void (APIENTRYP PFNGLMULTITEXRENDERBUFFEREXTPROC) (GLenum texunit, GLenum target, GLuint renderbuffer); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1DEXTPROC) (GLuint program, GLint location, GLdouble x); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2DEXTPROC) (GLuint program, GLint location, GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3DEXTPROC) (GLuint program, GLint location, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4DEXTPROC) (GLuint program, GLint location, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1DVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2DVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3DVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4DVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value); +#endif + +#ifndef GL_EXT_vertex_array_bgra +#define GL_EXT_vertex_array_bgra 1 +#endif + +#ifndef GL_EXT_texture_swizzle +#define GL_EXT_texture_swizzle 1 +#endif + +#ifndef GL_NV_explicit_multisample +#define GL_NV_explicit_multisample 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetMultisamplefvNV (GLenum pname, GLuint index, GLfloat *val); +GLAPI void APIENTRY glSampleMaskIndexedNV (GLuint index, GLbitfield mask); +GLAPI void APIENTRY glTexRenderbufferNV (GLenum target, GLuint renderbuffer); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETMULTISAMPLEFVNVPROC) (GLenum pname, GLuint index, GLfloat *val); +typedef void (APIENTRYP PFNGLSAMPLEMASKINDEXEDNVPROC) (GLuint index, GLbitfield mask); +typedef void (APIENTRYP PFNGLTEXRENDERBUFFERNVPROC) (GLenum target, GLuint renderbuffer); +#endif + +#ifndef GL_NV_transform_feedback2 +#define GL_NV_transform_feedback2 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindTransformFeedbackNV (GLenum target, GLuint id); +GLAPI void APIENTRY glDeleteTransformFeedbacksNV (GLsizei n, const GLuint *ids); +GLAPI void APIENTRY glGenTransformFeedbacksNV (GLsizei n, GLuint *ids); +GLAPI GLboolean APIENTRY glIsTransformFeedbackNV (GLuint id); +GLAPI void APIENTRY glPauseTransformFeedbackNV (void); +GLAPI void APIENTRY glResumeTransformFeedbackNV (void); +GLAPI void APIENTRY glDrawTransformFeedbackNV (GLenum mode, GLuint id); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDTRANSFORMFEEDBACKNVPROC) (GLenum target, GLuint id); +typedef void (APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSNVPROC) (GLsizei n, const GLuint *ids); +typedef void (APIENTRYP PFNGLGENTRANSFORMFEEDBACKSNVPROC) (GLsizei n, GLuint *ids); +typedef GLboolean (APIENTRYP PFNGLISTRANSFORMFEEDBACKNVPROC) (GLuint id); +typedef void (APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKNVPROC) (void); +typedef void (APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKNVPROC) (void); +typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKNVPROC) (GLenum mode, GLuint id); +#endif + +#ifndef GL_ATI_meminfo +#define GL_ATI_meminfo 1 +#endif + +#ifndef GL_AMD_performance_monitor +#define GL_AMD_performance_monitor 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGetPerfMonitorGroupsAMD (GLint *numGroups, GLsizei groupsSize, GLuint *groups); +GLAPI void APIENTRY glGetPerfMonitorCountersAMD (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters); +GLAPI void APIENTRY glGetPerfMonitorGroupStringAMD (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString); +GLAPI void APIENTRY glGetPerfMonitorCounterStringAMD (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString); +GLAPI void APIENTRY glGetPerfMonitorCounterInfoAMD (GLuint group, GLuint counter, GLenum pname, GLvoid *data); +GLAPI void APIENTRY glGenPerfMonitorsAMD (GLsizei n, GLuint *monitors); +GLAPI void APIENTRY glDeletePerfMonitorsAMD (GLsizei n, GLuint *monitors); +GLAPI void APIENTRY glSelectPerfMonitorCountersAMD (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList); +GLAPI void APIENTRY glBeginPerfMonitorAMD (GLuint monitor); +GLAPI void APIENTRY glEndPerfMonitorAMD (GLuint monitor); +GLAPI void APIENTRY glGetPerfMonitorCounterDataAMD (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGETPERFMONITORGROUPSAMDPROC) (GLint *numGroups, GLsizei groupsSize, GLuint *groups); +typedef void (APIENTRYP PFNGLGETPERFMONITORCOUNTERSAMDPROC) (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters); +typedef void (APIENTRYP PFNGLGETPERFMONITORGROUPSTRINGAMDPROC) (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString); +typedef void (APIENTRYP PFNGLGETPERFMONITORCOUNTERSTRINGAMDPROC) (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString); +typedef void (APIENTRYP PFNGLGETPERFMONITORCOUNTERINFOAMDPROC) (GLuint group, GLuint counter, GLenum pname, GLvoid *data); +typedef void (APIENTRYP PFNGLGENPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors); +typedef void (APIENTRYP PFNGLDELETEPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors); +typedef void (APIENTRYP PFNGLSELECTPERFMONITORCOUNTERSAMDPROC) (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList); +typedef void (APIENTRYP PFNGLBEGINPERFMONITORAMDPROC) (GLuint monitor); +typedef void (APIENTRYP PFNGLENDPERFMONITORAMDPROC) (GLuint monitor); +typedef void (APIENTRYP PFNGLGETPERFMONITORCOUNTERDATAAMDPROC) (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten); +#endif + +#ifndef GL_AMD_texture_texture4 +#define GL_AMD_texture_texture4 1 +#endif + +#ifndef GL_AMD_vertex_shader_tesselator +#define GL_AMD_vertex_shader_tesselator 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTessellationFactorAMD (GLfloat factor); +GLAPI void APIENTRY glTessellationModeAMD (GLenum mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTESSELLATIONFACTORAMDPROC) (GLfloat factor); +typedef void (APIENTRYP PFNGLTESSELLATIONMODEAMDPROC) (GLenum mode); +#endif + +#ifndef GL_EXT_provoking_vertex +#define GL_EXT_provoking_vertex 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProvokingVertexEXT (GLenum mode); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROVOKINGVERTEXEXTPROC) (GLenum mode); +#endif + +#ifndef GL_EXT_texture_snorm +#define GL_EXT_texture_snorm 1 +#endif + +#ifndef GL_AMD_draw_buffers_blend +#define GL_AMD_draw_buffers_blend 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBlendFuncIndexedAMD (GLuint buf, GLenum src, GLenum dst); +GLAPI void APIENTRY glBlendFuncSeparateIndexedAMD (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +GLAPI void APIENTRY glBlendEquationIndexedAMD (GLuint buf, GLenum mode); +GLAPI void APIENTRY glBlendEquationSeparateIndexedAMD (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBLENDFUNCINDEXEDAMDPROC) (GLuint buf, GLenum src, GLenum dst); +typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEINDEXEDAMDPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +typedef void (APIENTRYP PFNGLBLENDEQUATIONINDEXEDAMDPROC) (GLuint buf, GLenum mode); +typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEINDEXEDAMDPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +#endif + +#ifndef GL_APPLE_texture_range +#define GL_APPLE_texture_range 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTextureRangeAPPLE (GLenum target, GLsizei length, const GLvoid *pointer); +GLAPI void APIENTRY glGetTexParameterPointervAPPLE (GLenum target, GLenum pname, GLvoid* *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXTURERANGEAPPLEPROC) (GLenum target, GLsizei length, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLGETTEXPARAMETERPOINTERVAPPLEPROC) (GLenum target, GLenum pname, GLvoid* *params); +#endif + +#ifndef GL_APPLE_float_pixels +#define GL_APPLE_float_pixels 1 +#endif + +#ifndef GL_APPLE_vertex_program_evaluators +#define GL_APPLE_vertex_program_evaluators 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glEnableVertexAttribAPPLE (GLuint index, GLenum pname); +GLAPI void APIENTRY glDisableVertexAttribAPPLE (GLuint index, GLenum pname); +GLAPI GLboolean APIENTRY glIsVertexAttribEnabledAPPLE (GLuint index, GLenum pname); +GLAPI void APIENTRY glMapVertexAttrib1dAPPLE (GLuint index, GLuint size, GLdouble u1, GLdouble u2, GLint stride, GLint order, const GLdouble *points); +GLAPI void APIENTRY glMapVertexAttrib1fAPPLE (GLuint index, GLuint size, GLfloat u1, GLfloat u2, GLint stride, GLint order, const GLfloat *points); +GLAPI void APIENTRY glMapVertexAttrib2dAPPLE (GLuint index, GLuint size, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, const GLdouble *points); +GLAPI void APIENTRY glMapVertexAttrib2fAPPLE (GLuint index, GLuint size, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, const GLfloat *points); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLENABLEVERTEXATTRIBAPPLEPROC) (GLuint index, GLenum pname); +typedef void (APIENTRYP PFNGLDISABLEVERTEXATTRIBAPPLEPROC) (GLuint index, GLenum pname); +typedef GLboolean (APIENTRYP PFNGLISVERTEXATTRIBENABLEDAPPLEPROC) (GLuint index, GLenum pname); +typedef void (APIENTRYP PFNGLMAPVERTEXATTRIB1DAPPLEPROC) (GLuint index, GLuint size, GLdouble u1, GLdouble u2, GLint stride, GLint order, const GLdouble *points); +typedef void (APIENTRYP PFNGLMAPVERTEXATTRIB1FAPPLEPROC) (GLuint index, GLuint size, GLfloat u1, GLfloat u2, GLint stride, GLint order, const GLfloat *points); +typedef void (APIENTRYP PFNGLMAPVERTEXATTRIB2DAPPLEPROC) (GLuint index, GLuint size, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, const GLdouble *points); +typedef void (APIENTRYP PFNGLMAPVERTEXATTRIB2FAPPLEPROC) (GLuint index, GLuint size, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, const GLfloat *points); +#endif + +#ifndef GL_APPLE_aux_depth_stencil +#define GL_APPLE_aux_depth_stencil 1 +#endif + +#ifndef GL_APPLE_object_purgeable +#define GL_APPLE_object_purgeable 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLenum APIENTRY glObjectPurgeableAPPLE (GLenum objectType, GLuint name, GLenum option); +GLAPI GLenum APIENTRY glObjectUnpurgeableAPPLE (GLenum objectType, GLuint name, GLenum option); +GLAPI void APIENTRY glGetObjectParameterivAPPLE (GLenum objectType, GLuint name, GLenum pname, GLint *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLenum (APIENTRYP PFNGLOBJECTPURGEABLEAPPLEPROC) (GLenum objectType, GLuint name, GLenum option); +typedef GLenum (APIENTRYP PFNGLOBJECTUNPURGEABLEAPPLEPROC) (GLenum objectType, GLuint name, GLenum option); +typedef void (APIENTRYP PFNGLGETOBJECTPARAMETERIVAPPLEPROC) (GLenum objectType, GLuint name, GLenum pname, GLint *params); +#endif + +#ifndef GL_APPLE_row_bytes +#define GL_APPLE_row_bytes 1 +#endif + +#ifndef GL_APPLE_rgb_422 +#define GL_APPLE_rgb_422 1 +#endif + +#ifndef GL_NV_video_capture +#define GL_NV_video_capture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBeginVideoCaptureNV (GLuint video_capture_slot); +GLAPI void APIENTRY glBindVideoCaptureStreamBufferNV (GLuint video_capture_slot, GLuint stream, GLenum frame_region, GLintptrARB offset); +GLAPI void APIENTRY glBindVideoCaptureStreamTextureNV (GLuint video_capture_slot, GLuint stream, GLenum frame_region, GLenum target, GLuint texture); +GLAPI void APIENTRY glEndVideoCaptureNV (GLuint video_capture_slot); +GLAPI void APIENTRY glGetVideoCaptureivNV (GLuint video_capture_slot, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetVideoCaptureStreamivNV (GLuint video_capture_slot, GLuint stream, GLenum pname, GLint *params); +GLAPI void APIENTRY glGetVideoCaptureStreamfvNV (GLuint video_capture_slot, GLuint stream, GLenum pname, GLfloat *params); +GLAPI void APIENTRY glGetVideoCaptureStreamdvNV (GLuint video_capture_slot, GLuint stream, GLenum pname, GLdouble *params); +GLAPI GLenum APIENTRY glVideoCaptureNV (GLuint video_capture_slot, GLuint *sequence_num, GLuint64EXT *capture_time); +GLAPI void APIENTRY glVideoCaptureStreamParameterivNV (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLint *params); +GLAPI void APIENTRY glVideoCaptureStreamParameterfvNV (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLfloat *params); +GLAPI void APIENTRY glVideoCaptureStreamParameterdvNV (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLdouble *params); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBEGINVIDEOCAPTURENVPROC) (GLuint video_capture_slot); +typedef void (APIENTRYP PFNGLBINDVIDEOCAPTURESTREAMBUFFERNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum frame_region, GLintptrARB offset); +typedef void (APIENTRYP PFNGLBINDVIDEOCAPTURESTREAMTEXTURENVPROC) (GLuint video_capture_slot, GLuint stream, GLenum frame_region, GLenum target, GLuint texture); +typedef void (APIENTRYP PFNGLENDVIDEOCAPTURENVPROC) (GLuint video_capture_slot); +typedef void (APIENTRYP PFNGLGETVIDEOCAPTUREIVNVPROC) (GLuint video_capture_slot, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETVIDEOCAPTURESTREAMIVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, GLint *params); +typedef void (APIENTRYP PFNGLGETVIDEOCAPTURESTREAMFVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, GLfloat *params); +typedef void (APIENTRYP PFNGLGETVIDEOCAPTURESTREAMDVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, GLdouble *params); +typedef GLenum (APIENTRYP PFNGLVIDEOCAPTURENVPROC) (GLuint video_capture_slot, GLuint *sequence_num, GLuint64EXT *capture_time); +typedef void (APIENTRYP PFNGLVIDEOCAPTURESTREAMPARAMETERIVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLint *params); +typedef void (APIENTRYP PFNGLVIDEOCAPTURESTREAMPARAMETERFVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLfloat *params); +typedef void (APIENTRYP PFNGLVIDEOCAPTURESTREAMPARAMETERDVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLdouble *params); +#endif + +#ifndef GL_NV_copy_image +#define GL_NV_copy_image 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glCopyImageSubDataNV (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLCOPYIMAGESUBDATANVPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth); +#endif + +#ifndef GL_EXT_separate_shader_objects +#define GL_EXT_separate_shader_objects 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glUseShaderProgramEXT (GLenum type, GLuint program); +GLAPI void APIENTRY glActiveProgramEXT (GLuint program); +GLAPI GLuint APIENTRY glCreateShaderProgramEXT (GLenum type, const GLchar *string); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLUSESHADERPROGRAMEXTPROC) (GLenum type, GLuint program); +typedef void (APIENTRYP PFNGLACTIVEPROGRAMEXTPROC) (GLuint program); +typedef GLuint (APIENTRYP PFNGLCREATESHADERPROGRAMEXTPROC) (GLenum type, const GLchar *string); +#endif + +#ifndef GL_NV_parameter_buffer_object2 +#define GL_NV_parameter_buffer_object2 1 +#endif + +#ifndef GL_NV_shader_buffer_load +#define GL_NV_shader_buffer_load 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glMakeBufferResidentNV (GLenum target, GLenum access); +GLAPI void APIENTRY glMakeBufferNonResidentNV (GLenum target); +GLAPI GLboolean APIENTRY glIsBufferResidentNV (GLenum target); +GLAPI void APIENTRY glMakeNamedBufferResidentNV (GLuint buffer, GLenum access); +GLAPI void APIENTRY glMakeNamedBufferNonResidentNV (GLuint buffer); +GLAPI GLboolean APIENTRY glIsNamedBufferResidentNV (GLuint buffer); +GLAPI void APIENTRY glGetBufferParameterui64vNV (GLenum target, GLenum pname, GLuint64EXT *params); +GLAPI void APIENTRY glGetNamedBufferParameterui64vNV (GLuint buffer, GLenum pname, GLuint64EXT *params); +GLAPI void APIENTRY glGetIntegerui64vNV (GLenum value, GLuint64EXT *result); +GLAPI void APIENTRY glUniformui64NV (GLint location, GLuint64EXT value); +GLAPI void APIENTRY glUniformui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GLAPI void APIENTRY glGetUniformui64vNV (GLuint program, GLint location, GLuint64EXT *params); +GLAPI void APIENTRY glProgramUniformui64NV (GLuint program, GLint location, GLuint64EXT value); +GLAPI void APIENTRY glProgramUniformui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLMAKEBUFFERRESIDENTNVPROC) (GLenum target, GLenum access); +typedef void (APIENTRYP PFNGLMAKEBUFFERNONRESIDENTNVPROC) (GLenum target); +typedef GLboolean (APIENTRYP PFNGLISBUFFERRESIDENTNVPROC) (GLenum target); +typedef void (APIENTRYP PFNGLMAKENAMEDBUFFERRESIDENTNVPROC) (GLuint buffer, GLenum access); +typedef void (APIENTRYP PFNGLMAKENAMEDBUFFERNONRESIDENTNVPROC) (GLuint buffer); +typedef GLboolean (APIENTRYP PFNGLISNAMEDBUFFERRESIDENTNVPROC) (GLuint buffer); +typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERUI64VNVPROC) (GLenum target, GLenum pname, GLuint64EXT *params); +typedef void (APIENTRYP PFNGLGETNAMEDBUFFERPARAMETERUI64VNVPROC) (GLuint buffer, GLenum pname, GLuint64EXT *params); +typedef void (APIENTRYP PFNGLGETINTEGERUI64VNVPROC) (GLenum value, GLuint64EXT *result); +typedef void (APIENTRYP PFNGLUNIFORMUI64NVPROC) (GLint location, GLuint64EXT value); +typedef void (APIENTRYP PFNGLUNIFORMUI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (APIENTRYP PFNGLGETUNIFORMUI64VNVPROC) (GLuint program, GLint location, GLuint64EXT *params); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMUI64NVPROC) (GLuint program, GLint location, GLuint64EXT value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMUI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +#endif + +#ifndef GL_NV_vertex_buffer_unified_memory +#define GL_NV_vertex_buffer_unified_memory 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBufferAddressRangeNV (GLenum pname, GLuint index, GLuint64EXT address, GLsizeiptr length); +GLAPI void APIENTRY glVertexFormatNV (GLint size, GLenum type, GLsizei stride); +GLAPI void APIENTRY glNormalFormatNV (GLenum type, GLsizei stride); +GLAPI void APIENTRY glColorFormatNV (GLint size, GLenum type, GLsizei stride); +GLAPI void APIENTRY glIndexFormatNV (GLenum type, GLsizei stride); +GLAPI void APIENTRY glTexCoordFormatNV (GLint size, GLenum type, GLsizei stride); +GLAPI void APIENTRY glEdgeFlagFormatNV (GLsizei stride); +GLAPI void APIENTRY glSecondaryColorFormatNV (GLint size, GLenum type, GLsizei stride); +GLAPI void APIENTRY glFogCoordFormatNV (GLenum type, GLsizei stride); +GLAPI void APIENTRY glVertexAttribFormatNV (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride); +GLAPI void APIENTRY glVertexAttribIFormatNV (GLuint index, GLint size, GLenum type, GLsizei stride); +GLAPI void APIENTRY glGetIntegerui64i_vNV (GLenum value, GLuint index, GLuint64EXT *result); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBUFFERADDRESSRANGENVPROC) (GLenum pname, GLuint index, GLuint64EXT address, GLsizeiptr length); +typedef void (APIENTRYP PFNGLVERTEXFORMATNVPROC) (GLint size, GLenum type, GLsizei stride); +typedef void (APIENTRYP PFNGLNORMALFORMATNVPROC) (GLenum type, GLsizei stride); +typedef void (APIENTRYP PFNGLCOLORFORMATNVPROC) (GLint size, GLenum type, GLsizei stride); +typedef void (APIENTRYP PFNGLINDEXFORMATNVPROC) (GLenum type, GLsizei stride); +typedef void (APIENTRYP PFNGLTEXCOORDFORMATNVPROC) (GLint size, GLenum type, GLsizei stride); +typedef void (APIENTRYP PFNGLEDGEFLAGFORMATNVPROC) (GLsizei stride); +typedef void (APIENTRYP PFNGLSECONDARYCOLORFORMATNVPROC) (GLint size, GLenum type, GLsizei stride); +typedef void (APIENTRYP PFNGLFOGCOORDFORMATNVPROC) (GLenum type, GLsizei stride); +typedef void (APIENTRYP PFNGLVERTEXATTRIBFORMATNVPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride); +typedef void (APIENTRYP PFNGLVERTEXATTRIBIFORMATNVPROC) (GLuint index, GLint size, GLenum type, GLsizei stride); +typedef void (APIENTRYP PFNGLGETINTEGERUI64I_VNVPROC) (GLenum value, GLuint index, GLuint64EXT *result); +#endif + +#ifndef GL_NV_texture_barrier +#define GL_NV_texture_barrier 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTextureBarrierNV (void); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXTUREBARRIERNVPROC) (void); +#endif + +#ifndef GL_AMD_shader_stencil_export +#define GL_AMD_shader_stencil_export 1 +#endif + +#ifndef GL_AMD_seamless_cubemap_per_texture +#define GL_AMD_seamless_cubemap_per_texture 1 +#endif + +#ifndef GL_AMD_conservative_depth +#define GL_AMD_conservative_depth 1 +#endif + +#ifndef GL_EXT_shader_image_load_store +#define GL_EXT_shader_image_load_store 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glBindImageTextureEXT (GLuint index, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLint format); +GLAPI void APIENTRY glMemoryBarrierEXT (GLbitfield barriers); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLBINDIMAGETEXTUREEXTPROC) (GLuint index, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLint format); +typedef void (APIENTRYP PFNGLMEMORYBARRIEREXTPROC) (GLbitfield barriers); +#endif + +#ifndef GL_EXT_vertex_attrib_64bit +#define GL_EXT_vertex_attrib_64bit 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexAttribL1dEXT (GLuint index, GLdouble x); +GLAPI void APIENTRY glVertexAttribL2dEXT (GLuint index, GLdouble x, GLdouble y); +GLAPI void APIENTRY glVertexAttribL3dEXT (GLuint index, GLdouble x, GLdouble y, GLdouble z); +GLAPI void APIENTRY glVertexAttribL4dEXT (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI void APIENTRY glVertexAttribL1dvEXT (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribL2dvEXT (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribL3dvEXT (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribL4dvEXT (GLuint index, const GLdouble *v); +GLAPI void APIENTRY glVertexAttribLPointerEXT (GLuint index, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +GLAPI void APIENTRY glGetVertexAttribLdvEXT (GLuint index, GLenum pname, GLdouble *params); +GLAPI void APIENTRY glVertexArrayVertexAttribLOffsetEXT (GLuint vaobj, GLuint buffer, GLuint index, GLint size, GLenum type, GLsizei stride, GLintptr offset); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXATTRIBL1DEXTPROC) (GLuint index, GLdouble x); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL2DEXTPROC) (GLuint index, GLdouble x, GLdouble y); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL3DEXTPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL4DEXTPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL1DVEXTPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL2DVEXTPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL3DVEXTPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL4DVEXTPROC) (GLuint index, const GLdouble *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBLPOINTEREXTPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const GLvoid *pointer); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBLDVEXTPROC) (GLuint index, GLenum pname, GLdouble *params); +typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBLOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLuint index, GLint size, GLenum type, GLsizei stride, GLintptr offset); +#endif + +#ifndef GL_NV_gpu_program5 +#define GL_NV_gpu_program5 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glProgramSubroutineParametersuivNV (GLenum target, GLsizei count, const GLuint *params); +GLAPI void APIENTRY glGetProgramSubroutineParameteruivNV (GLenum target, GLuint index, GLuint *param); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLPROGRAMSUBROUTINEPARAMETERSUIVNVPROC) (GLenum target, GLsizei count, const GLuint *params); +typedef void (APIENTRYP PFNGLGETPROGRAMSUBROUTINEPARAMETERUIVNVPROC) (GLenum target, GLuint index, GLuint *param); +#endif + +#ifndef GL_NV_gpu_shader5 +#define GL_NV_gpu_shader5 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glUniform1i64NV (GLint location, GLint64EXT x); +GLAPI void APIENTRY glUniform2i64NV (GLint location, GLint64EXT x, GLint64EXT y); +GLAPI void APIENTRY glUniform3i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +GLAPI void APIENTRY glUniform4i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +GLAPI void APIENTRY glUniform1i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GLAPI void APIENTRY glUniform2i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GLAPI void APIENTRY glUniform3i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GLAPI void APIENTRY glUniform4i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GLAPI void APIENTRY glUniform1ui64NV (GLint location, GLuint64EXT x); +GLAPI void APIENTRY glUniform2ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y); +GLAPI void APIENTRY glUniform3ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +GLAPI void APIENTRY glUniform4ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +GLAPI void APIENTRY glUniform1ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GLAPI void APIENTRY glUniform2ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GLAPI void APIENTRY glUniform3ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GLAPI void APIENTRY glUniform4ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GLAPI void APIENTRY glGetUniformi64vNV (GLuint program, GLint location, GLint64EXT *params); +GLAPI void APIENTRY glProgramUniform1i64NV (GLuint program, GLint location, GLint64EXT x); +GLAPI void APIENTRY glProgramUniform2i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y); +GLAPI void APIENTRY glProgramUniform3i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +GLAPI void APIENTRY glProgramUniform4i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +GLAPI void APIENTRY glProgramUniform1i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GLAPI void APIENTRY glProgramUniform2i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GLAPI void APIENTRY glProgramUniform3i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GLAPI void APIENTRY glProgramUniform4i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GLAPI void APIENTRY glProgramUniform1ui64NV (GLuint program, GLint location, GLuint64EXT x); +GLAPI void APIENTRY glProgramUniform2ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y); +GLAPI void APIENTRY glProgramUniform3ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +GLAPI void APIENTRY glProgramUniform4ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +GLAPI void APIENTRY glProgramUniform1ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +GLAPI void APIENTRY glProgramUniform2ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +GLAPI void APIENTRY glProgramUniform3ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +GLAPI void APIENTRY glProgramUniform4ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLUNIFORM1I64NVPROC) (GLint location, GLint64EXT x); +typedef void (APIENTRYP PFNGLUNIFORM2I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y); +typedef void (APIENTRYP PFNGLUNIFORM3I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +typedef void (APIENTRYP PFNGLUNIFORM4I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +typedef void (APIENTRYP PFNGLUNIFORM1I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (APIENTRYP PFNGLUNIFORM2I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (APIENTRYP PFNGLUNIFORM3I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (APIENTRYP PFNGLUNIFORM4I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (APIENTRYP PFNGLUNIFORM1UI64NVPROC) (GLint location, GLuint64EXT x); +typedef void (APIENTRYP PFNGLUNIFORM2UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y); +typedef void (APIENTRYP PFNGLUNIFORM3UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +typedef void (APIENTRYP PFNGLUNIFORM4UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +typedef void (APIENTRYP PFNGLUNIFORM1UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (APIENTRYP PFNGLUNIFORM2UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (APIENTRYP PFNGLUNIFORM3UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (APIENTRYP PFNGLUNIFORM4UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (APIENTRYP PFNGLGETUNIFORMI64VNVPROC) (GLuint program, GLint location, GLint64EXT *params); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1I64NVPROC) (GLuint program, GLint location, GLint64EXT x); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +#endif + +#ifndef GL_NV_shader_buffer_store +#define GL_NV_shader_buffer_store 1 +#endif + +#ifndef GL_NV_tessellation_program5 +#define GL_NV_tessellation_program5 1 +#endif + +#ifndef GL_NV_vertex_attrib_integer_64bit +#define GL_NV_vertex_attrib_integer_64bit 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVertexAttribL1i64NV (GLuint index, GLint64EXT x); +GLAPI void APIENTRY glVertexAttribL2i64NV (GLuint index, GLint64EXT x, GLint64EXT y); +GLAPI void APIENTRY glVertexAttribL3i64NV (GLuint index, GLint64EXT x, GLint64EXT y, GLint64EXT z); +GLAPI void APIENTRY glVertexAttribL4i64NV (GLuint index, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +GLAPI void APIENTRY glVertexAttribL1i64vNV (GLuint index, const GLint64EXT *v); +GLAPI void APIENTRY glVertexAttribL2i64vNV (GLuint index, const GLint64EXT *v); +GLAPI void APIENTRY glVertexAttribL3i64vNV (GLuint index, const GLint64EXT *v); +GLAPI void APIENTRY glVertexAttribL4i64vNV (GLuint index, const GLint64EXT *v); +GLAPI void APIENTRY glVertexAttribL1ui64NV (GLuint index, GLuint64EXT x); +GLAPI void APIENTRY glVertexAttribL2ui64NV (GLuint index, GLuint64EXT x, GLuint64EXT y); +GLAPI void APIENTRY glVertexAttribL3ui64NV (GLuint index, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +GLAPI void APIENTRY glVertexAttribL4ui64NV (GLuint index, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +GLAPI void APIENTRY glVertexAttribL1ui64vNV (GLuint index, const GLuint64EXT *v); +GLAPI void APIENTRY glVertexAttribL2ui64vNV (GLuint index, const GLuint64EXT *v); +GLAPI void APIENTRY glVertexAttribL3ui64vNV (GLuint index, const GLuint64EXT *v); +GLAPI void APIENTRY glVertexAttribL4ui64vNV (GLuint index, const GLuint64EXT *v); +GLAPI void APIENTRY glGetVertexAttribLi64vNV (GLuint index, GLenum pname, GLint64EXT *params); +GLAPI void APIENTRY glGetVertexAttribLui64vNV (GLuint index, GLenum pname, GLuint64EXT *params); +GLAPI void APIENTRY glVertexAttribLFormatNV (GLuint index, GLint size, GLenum type, GLsizei stride); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVERTEXATTRIBL1I64NVPROC) (GLuint index, GLint64EXT x); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL2I64NVPROC) (GLuint index, GLint64EXT x, GLint64EXT y); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL3I64NVPROC) (GLuint index, GLint64EXT x, GLint64EXT y, GLint64EXT z); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL4I64NVPROC) (GLuint index, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL1I64VNVPROC) (GLuint index, const GLint64EXT *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL2I64VNVPROC) (GLuint index, const GLint64EXT *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL3I64VNVPROC) (GLuint index, const GLint64EXT *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL4I64VNVPROC) (GLuint index, const GLint64EXT *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL1UI64NVPROC) (GLuint index, GLuint64EXT x); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL2UI64NVPROC) (GLuint index, GLuint64EXT x, GLuint64EXT y); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL3UI64NVPROC) (GLuint index, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL4UI64NVPROC) (GLuint index, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL1UI64VNVPROC) (GLuint index, const GLuint64EXT *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL2UI64VNVPROC) (GLuint index, const GLuint64EXT *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL3UI64VNVPROC) (GLuint index, const GLuint64EXT *v); +typedef void (APIENTRYP PFNGLVERTEXATTRIBL4UI64VNVPROC) (GLuint index, const GLuint64EXT *v); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBLI64VNVPROC) (GLuint index, GLenum pname, GLint64EXT *params); +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBLUI64VNVPROC) (GLuint index, GLenum pname, GLuint64EXT *params); +typedef void (APIENTRYP PFNGLVERTEXATTRIBLFORMATNVPROC) (GLuint index, GLint size, GLenum type, GLsizei stride); +#endif + +#ifndef GL_NV_multisample_coverage +#define GL_NV_multisample_coverage 1 +#endif + +#ifndef GL_AMD_name_gen_delete +#define GL_AMD_name_gen_delete 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glGenNamesAMD (GLenum identifier, GLuint num, GLuint *names); +GLAPI void APIENTRY glDeleteNamesAMD (GLenum identifier, GLuint num, const GLuint *names); +GLAPI GLboolean APIENTRY glIsNameAMD (GLenum identifier, GLuint name); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLGENNAMESAMDPROC) (GLenum identifier, GLuint num, GLuint *names); +typedef void (APIENTRYP PFNGLDELETENAMESAMDPROC) (GLenum identifier, GLuint num, const GLuint *names); +typedef GLboolean (APIENTRYP PFNGLISNAMEAMDPROC) (GLenum identifier, GLuint name); +#endif + +#ifndef GL_AMD_debug_output +#define GL_AMD_debug_output 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glDebugMessageEnableAMD (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +GLAPI void APIENTRY glDebugMessageInsertAMD (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf); +GLAPI void APIENTRY glDebugMessageCallbackAMD (GLDEBUGPROCAMD callback, GLvoid *userParam); +GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufsize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLDEBUGMESSAGEENABLEAMDPROC) (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTAMDPROC) (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf); +typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKAMDPROC) (GLDEBUGPROCAMD callback, GLvoid *userParam); +typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufsize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message); +#endif + +#ifndef GL_NV_vdpau_interop +#define GL_NV_vdpau_interop 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glVDPAUInitNV (const GLvoid *vdpDevice, const GLvoid *getProcAddress); +GLAPI void APIENTRY glVDPAUFiniNV (void); +GLAPI GLvdpauSurfaceNV APIENTRY glVDPAURegisterVideoSurfaceNV (const GLvoid *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames); +GLAPI GLvdpauSurfaceNV APIENTRY glVDPAURegisterOutputSurfaceNV (GLvoid *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames); +GLAPI void APIENTRY glVDPAUIsSurfaceNV (GLvdpauSurfaceNV surface); +GLAPI void APIENTRY glVDPAUUnregisterSurfaceNV (GLvdpauSurfaceNV surface); +GLAPI void APIENTRY glVDPAUGetSurfaceivNV (GLvdpauSurfaceNV surface, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +GLAPI void APIENTRY glVDPAUSurfaceAccessNV (GLvdpauSurfaceNV surface, GLenum access); +GLAPI void APIENTRY glVDPAUMapSurfacesNV (GLsizei numSurfaces, const GLvdpauSurfaceNV *surfaces); +GLAPI void APIENTRY glVDPAUUnmapSurfacesNV (GLsizei numSurface, const GLvdpauSurfaceNV *surfaces); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLVDPAUINITNVPROC) (const GLvoid *vdpDevice, const GLvoid *getProcAddress); +typedef void (APIENTRYP PFNGLVDPAUFININVPROC) (void); +typedef GLvdpauSurfaceNV (APIENTRYP PFNGLVDPAUREGISTERVIDEOSURFACENVPROC) (const GLvoid *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames); +typedef GLvdpauSurfaceNV (APIENTRYP PFNGLVDPAUREGISTEROUTPUTSURFACENVPROC) (GLvoid *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames); +typedef void (APIENTRYP PFNGLVDPAUISSURFACENVPROC) (GLvdpauSurfaceNV surface); +typedef void (APIENTRYP PFNGLVDPAUUNREGISTERSURFACENVPROC) (GLvdpauSurfaceNV surface); +typedef void (APIENTRYP PFNGLVDPAUGETSURFACEIVNVPROC) (GLvdpauSurfaceNV surface, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +typedef void (APIENTRYP PFNGLVDPAUSURFACEACCESSNVPROC) (GLvdpauSurfaceNV surface, GLenum access); +typedef void (APIENTRYP PFNGLVDPAUMAPSURFACESNVPROC) (GLsizei numSurfaces, const GLvdpauSurfaceNV *surfaces); +typedef void (APIENTRYP PFNGLVDPAUUNMAPSURFACESNVPROC) (GLsizei numSurface, const GLvdpauSurfaceNV *surfaces); +#endif + +#ifndef GL_AMD_transform_feedback3_lines_triangles +#define GL_AMD_transform_feedback3_lines_triangles 1 +#endif + +#ifndef GL_AMD_depth_clamp_separate +#define GL_AMD_depth_clamp_separate 1 +#endif + +#ifndef GL_EXT_texture_sRGB_decode +#define GL_EXT_texture_sRGB_decode 1 +#endif + +#ifndef GL_NV_texture_multisample +#define GL_NV_texture_multisample 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexImage2DMultisampleCoverageNV (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations); +GLAPI void APIENTRY glTexImage3DMultisampleCoverageNV (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations); +GLAPI void APIENTRY glTextureImage2DMultisampleNV (GLuint texture, GLenum target, GLsizei samples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations); +GLAPI void APIENTRY glTextureImage3DMultisampleNV (GLuint texture, GLenum target, GLsizei samples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations); +GLAPI void APIENTRY glTextureImage2DMultisampleCoverageNV (GLuint texture, GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations); +GLAPI void APIENTRY glTextureImage3DMultisampleCoverageNV (GLuint texture, GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXIMAGE2DMULTISAMPLECOVERAGENVPROC) (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations); +typedef void (APIENTRYP PFNGLTEXIMAGE3DMULTISAMPLECOVERAGENVPROC) (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations); +typedef void (APIENTRYP PFNGLTEXTUREIMAGE2DMULTISAMPLENVPROC) (GLuint texture, GLenum target, GLsizei samples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations); +typedef void (APIENTRYP PFNGLTEXTUREIMAGE3DMULTISAMPLENVPROC) (GLuint texture, GLenum target, GLsizei samples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations); +typedef void (APIENTRYP PFNGLTEXTUREIMAGE2DMULTISAMPLECOVERAGENVPROC) (GLuint texture, GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations); +typedef void (APIENTRYP PFNGLTEXTUREIMAGE3DMULTISAMPLECOVERAGENVPROC) (GLuint texture, GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations); +#endif + +#ifndef GL_AMD_blend_minmax_factor +#define GL_AMD_blend_minmax_factor 1 +#endif + +#ifndef GL_AMD_sample_positions +#define GL_AMD_sample_positions 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glSetMultisamplefvAMD (GLenum pname, GLuint index, const GLfloat *val); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSETMULTISAMPLEFVAMDPROC) (GLenum pname, GLuint index, const GLfloat *val); +#endif + +#ifndef GL_EXT_x11_sync_object +#define GL_EXT_x11_sync_object 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLsync APIENTRY glImportSyncEXT (GLenum external_sync_type, GLintptr external_sync, GLbitfield flags); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLsync (APIENTRYP PFNGLIMPORTSYNCEXTPROC) (GLenum external_sync_type, GLintptr external_sync, GLbitfield flags); +#endif + +#ifndef GL_AMD_multi_draw_indirect +#define GL_AMD_multi_draw_indirect 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glMultiDrawArraysIndirectAMD (GLenum mode, const GLvoid *indirect, GLsizei primcount, GLsizei stride); +GLAPI void APIENTRY glMultiDrawElementsIndirectAMD (GLenum mode, GLenum type, const GLvoid *indirect, GLsizei primcount, GLsizei stride); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTAMDPROC) (GLenum mode, const GLvoid *indirect, GLsizei primcount, GLsizei stride); +typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTAMDPROC) (GLenum mode, GLenum type, const GLvoid *indirect, GLsizei primcount, GLsizei stride); +#endif + +#ifndef GL_EXT_framebuffer_multisample_blit_scaled +#define GL_EXT_framebuffer_multisample_blit_scaled 1 +#endif + +#ifndef GL_NV_path_rendering +#define GL_NV_path_rendering 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLuint APIENTRY glGenPathsNV (GLsizei range); +GLAPI void APIENTRY glDeletePathsNV (GLuint path, GLsizei range); +GLAPI GLboolean APIENTRY glIsPathNV (GLuint path); +GLAPI void APIENTRY glPathCommandsNV (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const GLvoid *coords); +GLAPI void APIENTRY glPathCoordsNV (GLuint path, GLsizei numCoords, GLenum coordType, const GLvoid *coords); +GLAPI void APIENTRY glPathSubCommandsNV (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const GLvoid *coords); +GLAPI void APIENTRY glPathSubCoordsNV (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const GLvoid *coords); +GLAPI void APIENTRY glPathStringNV (GLuint path, GLenum format, GLsizei length, const GLvoid *pathString); +GLAPI void APIENTRY glPathGlyphsNV (GLuint firstPathName, GLenum fontTarget, const GLvoid *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const GLvoid *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GLAPI void APIENTRY glPathGlyphRangeNV (GLuint firstPathName, GLenum fontTarget, const GLvoid *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GLAPI void APIENTRY glWeightPathsNV (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights); +GLAPI void APIENTRY glCopyPathNV (GLuint resultPath, GLuint srcPath); +GLAPI void APIENTRY glInterpolatePathsNV (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight); +GLAPI void APIENTRY glTransformPathNV (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues); +GLAPI void APIENTRY glPathParameterivNV (GLuint path, GLenum pname, const GLint *value); +GLAPI void APIENTRY glPathParameteriNV (GLuint path, GLenum pname, GLint value); +GLAPI void APIENTRY glPathParameterfvNV (GLuint path, GLenum pname, const GLfloat *value); +GLAPI void APIENTRY glPathParameterfNV (GLuint path, GLenum pname, GLfloat value); +GLAPI void APIENTRY glPathDashArrayNV (GLuint path, GLsizei dashCount, const GLfloat *dashArray); +GLAPI void APIENTRY glPathStencilFuncNV (GLenum func, GLint ref, GLuint mask); +GLAPI void APIENTRY glPathStencilDepthOffsetNV (GLfloat factor, GLfloat units); +GLAPI void APIENTRY glStencilFillPathNV (GLuint path, GLenum fillMode, GLuint mask); +GLAPI void APIENTRY glStencilStrokePathNV (GLuint path, GLint reference, GLuint mask); +GLAPI void APIENTRY glStencilFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues); +GLAPI void APIENTRY glStencilStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues); +GLAPI void APIENTRY glPathCoverDepthFuncNV (GLenum func); +GLAPI void APIENTRY glPathColorGenNV (GLenum color, GLenum genMode, GLenum colorFormat, const GLfloat *coeffs); +GLAPI void APIENTRY glPathTexGenNV (GLenum texCoordSet, GLenum genMode, GLint components, const GLfloat *coeffs); +GLAPI void APIENTRY glPathFogGenNV (GLenum genMode); +GLAPI void APIENTRY glCoverFillPathNV (GLuint path, GLenum coverMode); +GLAPI void APIENTRY glCoverStrokePathNV (GLuint path, GLenum coverMode); +GLAPI void APIENTRY glCoverFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GLAPI void APIENTRY glCoverStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GLAPI void APIENTRY glGetPathParameterivNV (GLuint path, GLenum pname, GLint *value); +GLAPI void APIENTRY glGetPathParameterfvNV (GLuint path, GLenum pname, GLfloat *value); +GLAPI void APIENTRY glGetPathCommandsNV (GLuint path, GLubyte *commands); +GLAPI void APIENTRY glGetPathCoordsNV (GLuint path, GLfloat *coords); +GLAPI void APIENTRY glGetPathDashArrayNV (GLuint path, GLfloat *dashArray); +GLAPI void APIENTRY glGetPathMetricsNV (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics); +GLAPI void APIENTRY glGetPathMetricRangeNV (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics); +GLAPI void APIENTRY glGetPathSpacingNV (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing); +GLAPI void APIENTRY glGetPathColorGenivNV (GLenum color, GLenum pname, GLint *value); +GLAPI void APIENTRY glGetPathColorGenfvNV (GLenum color, GLenum pname, GLfloat *value); +GLAPI void APIENTRY glGetPathTexGenivNV (GLenum texCoordSet, GLenum pname, GLint *value); +GLAPI void APIENTRY glGetPathTexGenfvNV (GLenum texCoordSet, GLenum pname, GLfloat *value); +GLAPI GLboolean APIENTRY glIsPointInFillPathNV (GLuint path, GLuint mask, GLfloat x, GLfloat y); +GLAPI GLboolean APIENTRY glIsPointInStrokePathNV (GLuint path, GLfloat x, GLfloat y); +GLAPI GLfloat APIENTRY glGetPathLengthNV (GLuint path, GLsizei startSegment, GLsizei numSegments); +GLAPI GLboolean APIENTRY glPointAlongPathNV (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLuint (APIENTRYP PFNGLGENPATHSNVPROC) (GLsizei range); +typedef void (APIENTRYP PFNGLDELETEPATHSNVPROC) (GLuint path, GLsizei range); +typedef GLboolean (APIENTRYP PFNGLISPATHNVPROC) (GLuint path); +typedef void (APIENTRYP PFNGLPATHCOMMANDSNVPROC) (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const GLvoid *coords); +typedef void (APIENTRYP PFNGLPATHCOORDSNVPROC) (GLuint path, GLsizei numCoords, GLenum coordType, const GLvoid *coords); +typedef void (APIENTRYP PFNGLPATHSUBCOMMANDSNVPROC) (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const GLvoid *coords); +typedef void (APIENTRYP PFNGLPATHSUBCOORDSNVPROC) (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const GLvoid *coords); +typedef void (APIENTRYP PFNGLPATHSTRINGNVPROC) (GLuint path, GLenum format, GLsizei length, const GLvoid *pathString); +typedef void (APIENTRYP PFNGLPATHGLYPHSNVPROC) (GLuint firstPathName, GLenum fontTarget, const GLvoid *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const GLvoid *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef void (APIENTRYP PFNGLPATHGLYPHRANGENVPROC) (GLuint firstPathName, GLenum fontTarget, const GLvoid *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef void (APIENTRYP PFNGLWEIGHTPATHSNVPROC) (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights); +typedef void (APIENTRYP PFNGLCOPYPATHNVPROC) (GLuint resultPath, GLuint srcPath); +typedef void (APIENTRYP PFNGLINTERPOLATEPATHSNVPROC) (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight); +typedef void (APIENTRYP PFNGLTRANSFORMPATHNVPROC) (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues); +typedef void (APIENTRYP PFNGLPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, const GLint *value); +typedef void (APIENTRYP PFNGLPATHPARAMETERINVPROC) (GLuint path, GLenum pname, GLint value); +typedef void (APIENTRYP PFNGLPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, const GLfloat *value); +typedef void (APIENTRYP PFNGLPATHPARAMETERFNVPROC) (GLuint path, GLenum pname, GLfloat value); +typedef void (APIENTRYP PFNGLPATHDASHARRAYNVPROC) (GLuint path, GLsizei dashCount, const GLfloat *dashArray); +typedef void (APIENTRYP PFNGLPATHSTENCILFUNCNVPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (APIENTRYP PFNGLPATHSTENCILDEPTHOFFSETNVPROC) (GLfloat factor, GLfloat units); +typedef void (APIENTRYP PFNGLSTENCILFILLPATHNVPROC) (GLuint path, GLenum fillMode, GLuint mask); +typedef void (APIENTRYP PFNGLSTENCILSTROKEPATHNVPROC) (GLuint path, GLint reference, GLuint mask); +typedef void (APIENTRYP PFNGLSTENCILFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues); +typedef void (APIENTRYP PFNGLSTENCILSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues); +typedef void (APIENTRYP PFNGLPATHCOVERDEPTHFUNCNVPROC) (GLenum func); +typedef void (APIENTRYP PFNGLPATHCOLORGENNVPROC) (GLenum color, GLenum genMode, GLenum colorFormat, const GLfloat *coeffs); +typedef void (APIENTRYP PFNGLPATHTEXGENNVPROC) (GLenum texCoordSet, GLenum genMode, GLint components, const GLfloat *coeffs); +typedef void (APIENTRYP PFNGLPATHFOGGENNVPROC) (GLenum genMode); +typedef void (APIENTRYP PFNGLCOVERFILLPATHNVPROC) (GLuint path, GLenum coverMode); +typedef void (APIENTRYP PFNGLCOVERSTROKEPATHNVPROC) (GLuint path, GLenum coverMode); +typedef void (APIENTRYP PFNGLCOVERFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef void (APIENTRYP PFNGLCOVERSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef void (APIENTRYP PFNGLGETPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, GLint *value); +typedef void (APIENTRYP PFNGLGETPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, GLfloat *value); +typedef void (APIENTRYP PFNGLGETPATHCOMMANDSNVPROC) (GLuint path, GLubyte *commands); +typedef void (APIENTRYP PFNGLGETPATHCOORDSNVPROC) (GLuint path, GLfloat *coords); +typedef void (APIENTRYP PFNGLGETPATHDASHARRAYNVPROC) (GLuint path, GLfloat *dashArray); +typedef void (APIENTRYP PFNGLGETPATHMETRICSNVPROC) (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics); +typedef void (APIENTRYP PFNGLGETPATHMETRICRANGENVPROC) (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics); +typedef void (APIENTRYP PFNGLGETPATHSPACINGNVPROC) (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const GLvoid *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing); +typedef void (APIENTRYP PFNGLGETPATHCOLORGENIVNVPROC) (GLenum color, GLenum pname, GLint *value); +typedef void (APIENTRYP PFNGLGETPATHCOLORGENFVNVPROC) (GLenum color, GLenum pname, GLfloat *value); +typedef void (APIENTRYP PFNGLGETPATHTEXGENIVNVPROC) (GLenum texCoordSet, GLenum pname, GLint *value); +typedef void (APIENTRYP PFNGLGETPATHTEXGENFVNVPROC) (GLenum texCoordSet, GLenum pname, GLfloat *value); +typedef GLboolean (APIENTRYP PFNGLISPOINTINFILLPATHNVPROC) (GLuint path, GLuint mask, GLfloat x, GLfloat y); +typedef GLboolean (APIENTRYP PFNGLISPOINTINSTROKEPATHNVPROC) (GLuint path, GLfloat x, GLfloat y); +typedef GLfloat (APIENTRYP PFNGLGETPATHLENGTHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments); +typedef GLboolean (APIENTRYP PFNGLPOINTALONGPATHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY); +#endif + +#ifndef GL_AMD_pinned_memory +#define GL_AMD_pinned_memory 1 +#endif + +#ifndef GL_AMD_stencil_operation_extended +#define GL_AMD_stencil_operation_extended 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glStencilOpValueAMD (GLenum face, GLuint value); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLSTENCILOPVALUEAMDPROC) (GLenum face, GLuint value); +#endif + +#ifndef GL_AMD_vertex_shader_viewport_index +#define GL_AMD_vertex_shader_viewport_index 1 +#endif + +#ifndef GL_AMD_vertex_shader_layer +#define GL_AMD_vertex_shader_layer 1 +#endif + +#ifndef GL_NV_bindless_texture +#define GL_NV_bindless_texture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI GLuint64 APIENTRY glGetTextureHandleNV (GLuint texture); +GLAPI GLuint64 APIENTRY glGetTextureSamplerHandleNV (GLuint texture, GLuint sampler); +GLAPI void APIENTRY glMakeTextureHandleResidentNV (GLuint64 handle); +GLAPI void APIENTRY glMakeTextureHandleNonResidentNV (GLuint64 handle); +GLAPI GLuint64 APIENTRY glGetImageHandleNV (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format); +GLAPI void APIENTRY glMakeImageHandleResidentNV (GLuint64 handle, GLenum access); +GLAPI void APIENTRY glMakeImageHandleNonResidentNV (GLuint64 handle); +GLAPI void APIENTRY glUniformHandleui64NV (GLint location, GLuint64 value); +GLAPI void APIENTRY glUniformHandleui64vNV (GLint location, GLsizei count, const GLuint64 *value); +GLAPI void APIENTRY glProgramUniformHandleui64NV (GLuint program, GLint location, GLuint64 value); +GLAPI void APIENTRY glProgramUniformHandleui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +GLAPI GLboolean APIENTRY glIsTextureHandleResidentNV (GLuint64 handle); +GLAPI GLboolean APIENTRY glIsImageHandleResidentNV (GLuint64 handle); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef GLuint64 (APIENTRYP PFNGLGETTEXTUREHANDLENVPROC) (GLuint texture); +typedef GLuint64 (APIENTRYP PFNGLGETTEXTURESAMPLERHANDLENVPROC) (GLuint texture, GLuint sampler); +typedef void (APIENTRYP PFNGLMAKETEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle); +typedef void (APIENTRYP PFNGLMAKETEXTUREHANDLENONRESIDENTNVPROC) (GLuint64 handle); +typedef GLuint64 (APIENTRYP PFNGLGETIMAGEHANDLENVPROC) (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format); +typedef void (APIENTRYP PFNGLMAKEIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle, GLenum access); +typedef void (APIENTRYP PFNGLMAKEIMAGEHANDLENONRESIDENTNVPROC) (GLuint64 handle); +typedef void (APIENTRYP PFNGLUNIFORMHANDLEUI64NVPROC) (GLint location, GLuint64 value); +typedef void (APIENTRYP PFNGLUNIFORMHANDLEUI64VNVPROC) (GLint location, GLsizei count, const GLuint64 *value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64NVPROC) (GLuint program, GLint location, GLuint64 value); +typedef void (APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +typedef GLboolean (APIENTRYP PFNGLISTEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle); +typedef GLboolean (APIENTRYP PFNGLISIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle); +#endif + +#ifndef GL_NV_shader_atomic_float +#define GL_NV_shader_atomic_float 1 +#endif + +#ifndef GL_AMD_query_buffer_object +#define GL_AMD_query_buffer_object 1 +#endif + +#ifndef GL_AMD_sparse_texture +#define GL_AMD_sparse_texture 1 +#ifdef GL_GLEXT_PROTOTYPES +GLAPI void APIENTRY glTexStorageSparseAMD (GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLsizei layers, GLbitfield flags); +GLAPI void APIENTRY glTextureStorageSparseAMD (GLuint texture, GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLsizei layers, GLbitfield flags); +#endif /* GL_GLEXT_PROTOTYPES */ +typedef void (APIENTRYP PFNGLTEXSTORAGESPARSEAMDPROC) (GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLsizei layers, GLbitfield flags); +typedef void (APIENTRYP PFNGLTEXTURESTORAGESPARSEAMDPROC) (GLuint texture, GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLsizei layers, GLbitfield flags); +#endif + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/VC/inc/GL/wglext.h b/VC/inc/GL/wglext.h new file mode 100644 index 0000000..b5dc7bf --- /dev/null +++ b/VC/inc/GL/wglext.h @@ -0,0 +1,943 @@ +#ifndef __wglext_h_ +#define __wglext_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2007-2012 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +/* Function declaration macros - to move into glplatform.h */ + +#if defined(_WIN32) && !defined(APIENTRY) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__) +#define WIN32_LEAN_AND_MEAN 1 +#include +#endif + +#ifndef APIENTRY +#define APIENTRY +#endif +#ifndef APIENTRYP +#define APIENTRYP APIENTRY * +#endif +#ifndef GLAPI +#define GLAPI extern +#endif + +/*************************************************************/ + +/* Header file version number */ +/* wglext.h last updated 2012/01/04 */ +/* Current version at http://www.opengl.org/registry/ */ +#define WGL_WGLEXT_VERSION 24 + +#ifndef WGL_ARB_buffer_region +#define WGL_FRONT_COLOR_BUFFER_BIT_ARB 0x00000001 +#define WGL_BACK_COLOR_BUFFER_BIT_ARB 0x00000002 +#define WGL_DEPTH_BUFFER_BIT_ARB 0x00000004 +#define WGL_STENCIL_BUFFER_BIT_ARB 0x00000008 +#endif + +#ifndef WGL_ARB_multisample +#define WGL_SAMPLE_BUFFERS_ARB 0x2041 +#define WGL_SAMPLES_ARB 0x2042 +#endif + +#ifndef WGL_ARB_extensions_string +#endif + +#ifndef WGL_ARB_pixel_format +#define WGL_NUMBER_PIXEL_FORMATS_ARB 0x2000 +#define WGL_DRAW_TO_WINDOW_ARB 0x2001 +#define WGL_DRAW_TO_BITMAP_ARB 0x2002 +#define WGL_ACCELERATION_ARB 0x2003 +#define WGL_NEED_PALETTE_ARB 0x2004 +#define WGL_NEED_SYSTEM_PALETTE_ARB 0x2005 +#define WGL_SWAP_LAYER_BUFFERS_ARB 0x2006 +#define WGL_SWAP_METHOD_ARB 0x2007 +#define WGL_NUMBER_OVERLAYS_ARB 0x2008 +#define WGL_NUMBER_UNDERLAYS_ARB 0x2009 +#define WGL_TRANSPARENT_ARB 0x200A +#define WGL_TRANSPARENT_RED_VALUE_ARB 0x2037 +#define WGL_TRANSPARENT_GREEN_VALUE_ARB 0x2038 +#define WGL_TRANSPARENT_BLUE_VALUE_ARB 0x2039 +#define WGL_TRANSPARENT_ALPHA_VALUE_ARB 0x203A +#define WGL_TRANSPARENT_INDEX_VALUE_ARB 0x203B +#define WGL_SHARE_DEPTH_ARB 0x200C +#define WGL_SHARE_STENCIL_ARB 0x200D +#define WGL_SHARE_ACCUM_ARB 0x200E +#define WGL_SUPPORT_GDI_ARB 0x200F +#define WGL_SUPPORT_OPENGL_ARB 0x2010 +#define WGL_DOUBLE_BUFFER_ARB 0x2011 +#define WGL_STEREO_ARB 0x2012 +#define WGL_PIXEL_TYPE_ARB 0x2013 +#define WGL_COLOR_BITS_ARB 0x2014 +#define WGL_RED_BITS_ARB 0x2015 +#define WGL_RED_SHIFT_ARB 0x2016 +#define WGL_GREEN_BITS_ARB 0x2017 +#define WGL_GREEN_SHIFT_ARB 0x2018 +#define WGL_BLUE_BITS_ARB 0x2019 +#define WGL_BLUE_SHIFT_ARB 0x201A +#define WGL_ALPHA_BITS_ARB 0x201B +#define WGL_ALPHA_SHIFT_ARB 0x201C +#define WGL_ACCUM_BITS_ARB 0x201D +#define WGL_ACCUM_RED_BITS_ARB 0x201E +#define WGL_ACCUM_GREEN_BITS_ARB 0x201F +#define WGL_ACCUM_BLUE_BITS_ARB 0x2020 +#define WGL_ACCUM_ALPHA_BITS_ARB 0x2021 +#define WGL_DEPTH_BITS_ARB 0x2022 +#define WGL_STENCIL_BITS_ARB 0x2023 +#define WGL_AUX_BUFFERS_ARB 0x2024 +#define WGL_NO_ACCELERATION_ARB 0x2025 +#define WGL_GENERIC_ACCELERATION_ARB 0x2026 +#define WGL_FULL_ACCELERATION_ARB 0x2027 +#define WGL_SWAP_EXCHANGE_ARB 0x2028 +#define WGL_SWAP_COPY_ARB 0x2029 +#define WGL_SWAP_UNDEFINED_ARB 0x202A +#define WGL_TYPE_RGBA_ARB 0x202B +#define WGL_TYPE_COLORINDEX_ARB 0x202C +#endif + +#ifndef WGL_ARB_make_current_read +#define ERROR_INVALID_PIXEL_TYPE_ARB 0x2043 +#define ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB 0x2054 +#endif + +#ifndef WGL_ARB_pbuffer +#define WGL_DRAW_TO_PBUFFER_ARB 0x202D +#define WGL_MAX_PBUFFER_PIXELS_ARB 0x202E +#define WGL_MAX_PBUFFER_WIDTH_ARB 0x202F +#define WGL_MAX_PBUFFER_HEIGHT_ARB 0x2030 +#define WGL_PBUFFER_LARGEST_ARB 0x2033 +#define WGL_PBUFFER_WIDTH_ARB 0x2034 +#define WGL_PBUFFER_HEIGHT_ARB 0x2035 +#define WGL_PBUFFER_LOST_ARB 0x2036 +#endif + +#ifndef WGL_ARB_render_texture +#define WGL_BIND_TO_TEXTURE_RGB_ARB 0x2070 +#define WGL_BIND_TO_TEXTURE_RGBA_ARB 0x2071 +#define WGL_TEXTURE_FORMAT_ARB 0x2072 +#define WGL_TEXTURE_TARGET_ARB 0x2073 +#define WGL_MIPMAP_TEXTURE_ARB 0x2074 +#define WGL_TEXTURE_RGB_ARB 0x2075 +#define WGL_TEXTURE_RGBA_ARB 0x2076 +#define WGL_NO_TEXTURE_ARB 0x2077 +#define WGL_TEXTURE_CUBE_MAP_ARB 0x2078 +#define WGL_TEXTURE_1D_ARB 0x2079 +#define WGL_TEXTURE_2D_ARB 0x207A +#define WGL_MIPMAP_LEVEL_ARB 0x207B +#define WGL_CUBE_MAP_FACE_ARB 0x207C +#define WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB 0x207D +#define WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB 0x207E +#define WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB 0x207F +#define WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB 0x2080 +#define WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB 0x2081 +#define WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB 0x2082 +#define WGL_FRONT_LEFT_ARB 0x2083 +#define WGL_FRONT_RIGHT_ARB 0x2084 +#define WGL_BACK_LEFT_ARB 0x2085 +#define WGL_BACK_RIGHT_ARB 0x2086 +#define WGL_AUX0_ARB 0x2087 +#define WGL_AUX1_ARB 0x2088 +#define WGL_AUX2_ARB 0x2089 +#define WGL_AUX3_ARB 0x208A +#define WGL_AUX4_ARB 0x208B +#define WGL_AUX5_ARB 0x208C +#define WGL_AUX6_ARB 0x208D +#define WGL_AUX7_ARB 0x208E +#define WGL_AUX8_ARB 0x208F +#define WGL_AUX9_ARB 0x2090 +#endif + +#ifndef WGL_ARB_pixel_format_float +#define WGL_TYPE_RGBA_FLOAT_ARB 0x21A0 +#endif + +#ifndef WGL_ARB_framebuffer_sRGB +#define WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB 0x20A9 +#endif + +#ifndef WGL_ARB_create_context +#define WGL_CONTEXT_DEBUG_BIT_ARB 0x00000001 +#define WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB 0x00000002 +#define WGL_CONTEXT_MAJOR_VERSION_ARB 0x2091 +#define WGL_CONTEXT_MINOR_VERSION_ARB 0x2092 +#define WGL_CONTEXT_LAYER_PLANE_ARB 0x2093 +#define WGL_CONTEXT_FLAGS_ARB 0x2094 +#define ERROR_INVALID_VERSION_ARB 0x2095 +#endif + +#ifndef WGL_ARB_create_context_profile +#define WGL_CONTEXT_PROFILE_MASK_ARB 0x9126 +#define WGL_CONTEXT_CORE_PROFILE_BIT_ARB 0x00000001 +#define WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB 0x00000002 +#define ERROR_INVALID_PROFILE_ARB 0x2096 +#endif + +#ifndef WGL_ARB_create_context_robustness +#define WGL_CONTEXT_ROBUST_ACCESS_BIT_ARB 0x00000004 +#define WGL_LOSE_CONTEXT_ON_RESET_ARB 0x8252 +#define WGL_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB 0x8256 +#define WGL_NO_RESET_NOTIFICATION_ARB 0x8261 +#endif + +#ifndef WGL_EXT_make_current_read +#define ERROR_INVALID_PIXEL_TYPE_EXT 0x2043 +#endif + +#ifndef WGL_EXT_pixel_format +#define WGL_NUMBER_PIXEL_FORMATS_EXT 0x2000 +#define WGL_DRAW_TO_WINDOW_EXT 0x2001 +#define WGL_DRAW_TO_BITMAP_EXT 0x2002 +#define WGL_ACCELERATION_EXT 0x2003 +#define WGL_NEED_PALETTE_EXT 0x2004 +#define WGL_NEED_SYSTEM_PALETTE_EXT 0x2005 +#define WGL_SWAP_LAYER_BUFFERS_EXT 0x2006 +#define WGL_SWAP_METHOD_EXT 0x2007 +#define WGL_NUMBER_OVERLAYS_EXT 0x2008 +#define WGL_NUMBER_UNDERLAYS_EXT 0x2009 +#define WGL_TRANSPARENT_EXT 0x200A +#define WGL_TRANSPARENT_VALUE_EXT 0x200B +#define WGL_SHARE_DEPTH_EXT 0x200C +#define WGL_SHARE_STENCIL_EXT 0x200D +#define WGL_SHARE_ACCUM_EXT 0x200E +#define WGL_SUPPORT_GDI_EXT 0x200F +#define WGL_SUPPORT_OPENGL_EXT 0x2010 +#define WGL_DOUBLE_BUFFER_EXT 0x2011 +#define WGL_STEREO_EXT 0x2012 +#define WGL_PIXEL_TYPE_EXT 0x2013 +#define WGL_COLOR_BITS_EXT 0x2014 +#define WGL_RED_BITS_EXT 0x2015 +#define WGL_RED_SHIFT_EXT 0x2016 +#define WGL_GREEN_BITS_EXT 0x2017 +#define WGL_GREEN_SHIFT_EXT 0x2018 +#define WGL_BLUE_BITS_EXT 0x2019 +#define WGL_BLUE_SHIFT_EXT 0x201A +#define WGL_ALPHA_BITS_EXT 0x201B +#define WGL_ALPHA_SHIFT_EXT 0x201C +#define WGL_ACCUM_BITS_EXT 0x201D +#define WGL_ACCUM_RED_BITS_EXT 0x201E +#define WGL_ACCUM_GREEN_BITS_EXT 0x201F +#define WGL_ACCUM_BLUE_BITS_EXT 0x2020 +#define WGL_ACCUM_ALPHA_BITS_EXT 0x2021 +#define WGL_DEPTH_BITS_EXT 0x2022 +#define WGL_STENCIL_BITS_EXT 0x2023 +#define WGL_AUX_BUFFERS_EXT 0x2024 +#define WGL_NO_ACCELERATION_EXT 0x2025 +#define WGL_GENERIC_ACCELERATION_EXT 0x2026 +#define WGL_FULL_ACCELERATION_EXT 0x2027 +#define WGL_SWAP_EXCHANGE_EXT 0x2028 +#define WGL_SWAP_COPY_EXT 0x2029 +#define WGL_SWAP_UNDEFINED_EXT 0x202A +#define WGL_TYPE_RGBA_EXT 0x202B +#define WGL_TYPE_COLORINDEX_EXT 0x202C +#endif + +#ifndef WGL_EXT_pbuffer +#define WGL_DRAW_TO_PBUFFER_EXT 0x202D +#define WGL_MAX_PBUFFER_PIXELS_EXT 0x202E +#define WGL_MAX_PBUFFER_WIDTH_EXT 0x202F +#define WGL_MAX_PBUFFER_HEIGHT_EXT 0x2030 +#define WGL_OPTIMAL_PBUFFER_WIDTH_EXT 0x2031 +#define WGL_OPTIMAL_PBUFFER_HEIGHT_EXT 0x2032 +#define WGL_PBUFFER_LARGEST_EXT 0x2033 +#define WGL_PBUFFER_WIDTH_EXT 0x2034 +#define WGL_PBUFFER_HEIGHT_EXT 0x2035 +#endif + +#ifndef WGL_EXT_depth_float +#define WGL_DEPTH_FLOAT_EXT 0x2040 +#endif + +#ifndef WGL_3DFX_multisample +#define WGL_SAMPLE_BUFFERS_3DFX 0x2060 +#define WGL_SAMPLES_3DFX 0x2061 +#endif + +#ifndef WGL_EXT_multisample +#define WGL_SAMPLE_BUFFERS_EXT 0x2041 +#define WGL_SAMPLES_EXT 0x2042 +#endif + +#ifndef WGL_I3D_digital_video_control +#define WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D 0x2050 +#define WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D 0x2051 +#define WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D 0x2052 +#define WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D 0x2053 +#endif + +#ifndef WGL_I3D_gamma +#define WGL_GAMMA_TABLE_SIZE_I3D 0x204E +#define WGL_GAMMA_EXCLUDE_DESKTOP_I3D 0x204F +#endif + +#ifndef WGL_I3D_genlock +#define WGL_GENLOCK_SOURCE_MULTIVIEW_I3D 0x2044 +#define WGL_GENLOCK_SOURCE_EXTENAL_SYNC_I3D 0x2045 +#define WGL_GENLOCK_SOURCE_EXTENAL_FIELD_I3D 0x2046 +#define WGL_GENLOCK_SOURCE_EXTENAL_TTL_I3D 0x2047 +#define WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D 0x2048 +#define WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D 0x2049 +#define WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D 0x204A +#define WGL_GENLOCK_SOURCE_EDGE_RISING_I3D 0x204B +#define WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D 0x204C +#endif + +#ifndef WGL_I3D_image_buffer +#define WGL_IMAGE_BUFFER_MIN_ACCESS_I3D 0x00000001 +#define WGL_IMAGE_BUFFER_LOCK_I3D 0x00000002 +#endif + +#ifndef WGL_I3D_swap_frame_lock +#endif + +#ifndef WGL_NV_render_depth_texture +#define WGL_BIND_TO_TEXTURE_DEPTH_NV 0x20A3 +#define WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV 0x20A4 +#define WGL_DEPTH_TEXTURE_FORMAT_NV 0x20A5 +#define WGL_TEXTURE_DEPTH_COMPONENT_NV 0x20A6 +#define WGL_DEPTH_COMPONENT_NV 0x20A7 +#endif + +#ifndef WGL_NV_render_texture_rectangle +#define WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV 0x20A0 +#define WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV 0x20A1 +#define WGL_TEXTURE_RECTANGLE_NV 0x20A2 +#endif + +#ifndef WGL_ATI_pixel_format_float +#define WGL_TYPE_RGBA_FLOAT_ATI 0x21A0 +#endif + +#ifndef WGL_NV_float_buffer +#define WGL_FLOAT_COMPONENTS_NV 0x20B0 +#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV 0x20B1 +#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV 0x20B2 +#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV 0x20B3 +#define WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV 0x20B4 +#define WGL_TEXTURE_FLOAT_R_NV 0x20B5 +#define WGL_TEXTURE_FLOAT_RG_NV 0x20B6 +#define WGL_TEXTURE_FLOAT_RGB_NV 0x20B7 +#define WGL_TEXTURE_FLOAT_RGBA_NV 0x20B8 +#endif + +#ifndef WGL_3DL_stereo_control +#define WGL_STEREO_EMITTER_ENABLE_3DL 0x2055 +#define WGL_STEREO_EMITTER_DISABLE_3DL 0x2056 +#define WGL_STEREO_POLARITY_NORMAL_3DL 0x2057 +#define WGL_STEREO_POLARITY_INVERT_3DL 0x2058 +#endif + +#ifndef WGL_EXT_pixel_format_packed_float +#define WGL_TYPE_RGBA_UNSIGNED_FLOAT_EXT 0x20A8 +#endif + +#ifndef WGL_EXT_framebuffer_sRGB +#define WGL_FRAMEBUFFER_SRGB_CAPABLE_EXT 0x20A9 +#endif + +#ifndef WGL_NV_present_video +#define WGL_NUM_VIDEO_SLOTS_NV 0x20F0 +#endif + +#ifndef WGL_NV_video_out +#define WGL_BIND_TO_VIDEO_RGB_NV 0x20C0 +#define WGL_BIND_TO_VIDEO_RGBA_NV 0x20C1 +#define WGL_BIND_TO_VIDEO_RGB_AND_DEPTH_NV 0x20C2 +#define WGL_VIDEO_OUT_COLOR_NV 0x20C3 +#define WGL_VIDEO_OUT_ALPHA_NV 0x20C4 +#define WGL_VIDEO_OUT_DEPTH_NV 0x20C5 +#define WGL_VIDEO_OUT_COLOR_AND_ALPHA_NV 0x20C6 +#define WGL_VIDEO_OUT_COLOR_AND_DEPTH_NV 0x20C7 +#define WGL_VIDEO_OUT_FRAME 0x20C8 +#define WGL_VIDEO_OUT_FIELD_1 0x20C9 +#define WGL_VIDEO_OUT_FIELD_2 0x20CA +#define WGL_VIDEO_OUT_STACKED_FIELDS_1_2 0x20CB +#define WGL_VIDEO_OUT_STACKED_FIELDS_2_1 0x20CC +#endif + +#ifndef WGL_NV_swap_group +#endif + +#ifndef WGL_NV_gpu_affinity +#define WGL_ERROR_INCOMPATIBLE_AFFINITY_MASKS_NV 0x20D0 +#define WGL_ERROR_MISSING_AFFINITY_MASK_NV 0x20D1 +#endif + +#ifndef WGL_AMD_gpu_association +#define WGL_GPU_VENDOR_AMD 0x1F00 +#define WGL_GPU_RENDERER_STRING_AMD 0x1F01 +#define WGL_GPU_OPENGL_VERSION_STRING_AMD 0x1F02 +#define WGL_GPU_FASTEST_TARGET_GPUS_AMD 0x21A2 +#define WGL_GPU_RAM_AMD 0x21A3 +#define WGL_GPU_CLOCK_AMD 0x21A4 +#define WGL_GPU_NUM_PIPES_AMD 0x21A5 +#define WGL_GPU_NUM_SIMD_AMD 0x21A6 +#define WGL_GPU_NUM_RB_AMD 0x21A7 +#define WGL_GPU_NUM_SPI_AMD 0x21A8 +#endif + +#ifndef WGL_NV_video_capture +#define WGL_UNIQUE_ID_NV 0x20CE +#define WGL_NUM_VIDEO_CAPTURE_SLOTS_NV 0x20CF +#endif + +#ifndef WGL_NV_copy_image +#endif + +#ifndef WGL_NV_multisample_coverage +#define WGL_COVERAGE_SAMPLES_NV 0x2042 +#define WGL_COLOR_SAMPLES_NV 0x20B9 +#endif + +#ifndef WGL_EXT_create_context_es2_profile +#define WGL_CONTEXT_ES2_PROFILE_BIT_EXT 0x00000004 +#endif + +#ifndef WGL_NV_DX_interop +#define WGL_ACCESS_READ_ONLY_NV 0x00000000 +#define WGL_ACCESS_READ_WRITE_NV 0x00000001 +#define WGL_ACCESS_WRITE_DISCARD_NV 0x00000002 +#endif + +#ifndef WGL_NV_DX_interop2 +#endif + +#ifndef WGL_EXT_swap_control_tear +#endif + + +/*************************************************************/ + +#ifndef WGL_ARB_pbuffer +DECLARE_HANDLE(HPBUFFERARB); +#endif +#ifndef WGL_EXT_pbuffer +DECLARE_HANDLE(HPBUFFEREXT); +#endif +#ifndef WGL_NV_present_video +DECLARE_HANDLE(HVIDEOOUTPUTDEVICENV); +#endif +#ifndef WGL_NV_video_output +DECLARE_HANDLE(HPVIDEODEV); +#endif +#ifndef WGL_NV_gpu_affinity +DECLARE_HANDLE(HPGPUNV); +DECLARE_HANDLE(HGPUNV); + +typedef struct _GPU_DEVICE { + DWORD cb; + CHAR DeviceName[32]; + CHAR DeviceString[128]; + DWORD Flags; + RECT rcVirtualScreen; +} GPU_DEVICE, *PGPU_DEVICE; +#endif +#ifndef WGL_NV_video_capture +DECLARE_HANDLE(HVIDEOINPUTDEVICENV); +#endif + +#ifndef WGL_ARB_buffer_region +#define WGL_ARB_buffer_region 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern HANDLE WINAPI wglCreateBufferRegionARB (HDC hDC, int iLayerPlane, UINT uType); +extern VOID WINAPI wglDeleteBufferRegionARB (HANDLE hRegion); +extern BOOL WINAPI wglSaveBufferRegionARB (HANDLE hRegion, int x, int y, int width, int height); +extern BOOL WINAPI wglRestoreBufferRegionARB (HANDLE hRegion, int x, int y, int width, int height, int xSrc, int ySrc); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef HANDLE (WINAPI * PFNWGLCREATEBUFFERREGIONARBPROC) (HDC hDC, int iLayerPlane, UINT uType); +typedef VOID (WINAPI * PFNWGLDELETEBUFFERREGIONARBPROC) (HANDLE hRegion); +typedef BOOL (WINAPI * PFNWGLSAVEBUFFERREGIONARBPROC) (HANDLE hRegion, int x, int y, int width, int height); +typedef BOOL (WINAPI * PFNWGLRESTOREBUFFERREGIONARBPROC) (HANDLE hRegion, int x, int y, int width, int height, int xSrc, int ySrc); +#endif + +#ifndef WGL_ARB_multisample +#define WGL_ARB_multisample 1 +#endif + +#ifndef WGL_ARB_extensions_string +#define WGL_ARB_extensions_string 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern const char * WINAPI wglGetExtensionsStringARB (HDC hdc); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef const char * (WINAPI * PFNWGLGETEXTENSIONSSTRINGARBPROC) (HDC hdc); +#endif + +#ifndef WGL_ARB_pixel_format +#define WGL_ARB_pixel_format 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglGetPixelFormatAttribivARB (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int *piAttributes, int *piValues); +extern BOOL WINAPI wglGetPixelFormatAttribfvARB (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int *piAttributes, FLOAT *pfValues); +extern BOOL WINAPI wglChoosePixelFormatARB (HDC hdc, const int *piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBIVARBPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int *piAttributes, int *piValues); +typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBFVARBPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, const int *piAttributes, FLOAT *pfValues); +typedef BOOL (WINAPI * PFNWGLCHOOSEPIXELFORMATARBPROC) (HDC hdc, const int *piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats); +#endif + +#ifndef WGL_ARB_make_current_read +#define WGL_ARB_make_current_read 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglMakeContextCurrentARB (HDC hDrawDC, HDC hReadDC, HGLRC hglrc); +extern HDC WINAPI wglGetCurrentReadDCARB (void); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLMAKECONTEXTCURRENTARBPROC) (HDC hDrawDC, HDC hReadDC, HGLRC hglrc); +typedef HDC (WINAPI * PFNWGLGETCURRENTREADDCARBPROC) (void); +#endif + +#ifndef WGL_ARB_pbuffer +#define WGL_ARB_pbuffer 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern HPBUFFERARB WINAPI wglCreatePbufferARB (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int *piAttribList); +extern HDC WINAPI wglGetPbufferDCARB (HPBUFFERARB hPbuffer); +extern int WINAPI wglReleasePbufferDCARB (HPBUFFERARB hPbuffer, HDC hDC); +extern BOOL WINAPI wglDestroyPbufferARB (HPBUFFERARB hPbuffer); +extern BOOL WINAPI wglQueryPbufferARB (HPBUFFERARB hPbuffer, int iAttribute, int *piValue); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef HPBUFFERARB (WINAPI * PFNWGLCREATEPBUFFERARBPROC) (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int *piAttribList); +typedef HDC (WINAPI * PFNWGLGETPBUFFERDCARBPROC) (HPBUFFERARB hPbuffer); +typedef int (WINAPI * PFNWGLRELEASEPBUFFERDCARBPROC) (HPBUFFERARB hPbuffer, HDC hDC); +typedef BOOL (WINAPI * PFNWGLDESTROYPBUFFERARBPROC) (HPBUFFERARB hPbuffer); +typedef BOOL (WINAPI * PFNWGLQUERYPBUFFERARBPROC) (HPBUFFERARB hPbuffer, int iAttribute, int *piValue); +#endif + +#ifndef WGL_ARB_render_texture +#define WGL_ARB_render_texture 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglBindTexImageARB (HPBUFFERARB hPbuffer, int iBuffer); +extern BOOL WINAPI wglReleaseTexImageARB (HPBUFFERARB hPbuffer, int iBuffer); +extern BOOL WINAPI wglSetPbufferAttribARB (HPBUFFERARB hPbuffer, const int *piAttribList); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLBINDTEXIMAGEARBPROC) (HPBUFFERARB hPbuffer, int iBuffer); +typedef BOOL (WINAPI * PFNWGLRELEASETEXIMAGEARBPROC) (HPBUFFERARB hPbuffer, int iBuffer); +typedef BOOL (WINAPI * PFNWGLSETPBUFFERATTRIBARBPROC) (HPBUFFERARB hPbuffer, const int *piAttribList); +#endif + +#ifndef WGL_ARB_pixel_format_float +#define WGL_ARB_pixel_format_float 1 +#endif + +#ifndef WGL_ARB_framebuffer_sRGB +#define WGL_ARB_framebuffer_sRGB 1 +#endif + +#ifndef WGL_ARB_create_context +#define WGL_ARB_create_context 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern HGLRC WINAPI wglCreateContextAttribsARB (HDC hDC, HGLRC hShareContext, const int *attribList); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef HGLRC (WINAPI * PFNWGLCREATECONTEXTATTRIBSARBPROC) (HDC hDC, HGLRC hShareContext, const int *attribList); +#endif + +#ifndef WGL_ARB_create_context_profile +#define WGL_ARB_create_context_profile 1 +#endif + +#ifndef WGL_ARB_create_context_robustness +#define WGL_ARB_create_context_robustness 1 +#endif + +#ifndef WGL_EXT_display_color_table +#define WGL_EXT_display_color_table 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern GLboolean WINAPI wglCreateDisplayColorTableEXT (GLushort id); +extern GLboolean WINAPI wglLoadDisplayColorTableEXT (const GLushort *table, GLuint length); +extern GLboolean WINAPI wglBindDisplayColorTableEXT (GLushort id); +extern VOID WINAPI wglDestroyDisplayColorTableEXT (GLushort id); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef GLboolean (WINAPI * PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC) (GLushort id); +typedef GLboolean (WINAPI * PFNWGLLOADDISPLAYCOLORTABLEEXTPROC) (const GLushort *table, GLuint length); +typedef GLboolean (WINAPI * PFNWGLBINDDISPLAYCOLORTABLEEXTPROC) (GLushort id); +typedef VOID (WINAPI * PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC) (GLushort id); +#endif + +#ifndef WGL_EXT_extensions_string +#define WGL_EXT_extensions_string 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern const char * WINAPI wglGetExtensionsStringEXT (void); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef const char * (WINAPI * PFNWGLGETEXTENSIONSSTRINGEXTPROC) (void); +#endif + +#ifndef WGL_EXT_make_current_read +#define WGL_EXT_make_current_read 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglMakeContextCurrentEXT (HDC hDrawDC, HDC hReadDC, HGLRC hglrc); +extern HDC WINAPI wglGetCurrentReadDCEXT (void); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLMAKECONTEXTCURRENTEXTPROC) (HDC hDrawDC, HDC hReadDC, HGLRC hglrc); +typedef HDC (WINAPI * PFNWGLGETCURRENTREADDCEXTPROC) (void); +#endif + +#ifndef WGL_EXT_pbuffer +#define WGL_EXT_pbuffer 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern HPBUFFEREXT WINAPI wglCreatePbufferEXT (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int *piAttribList); +extern HDC WINAPI wglGetPbufferDCEXT (HPBUFFEREXT hPbuffer); +extern int WINAPI wglReleasePbufferDCEXT (HPBUFFEREXT hPbuffer, HDC hDC); +extern BOOL WINAPI wglDestroyPbufferEXT (HPBUFFEREXT hPbuffer); +extern BOOL WINAPI wglQueryPbufferEXT (HPBUFFEREXT hPbuffer, int iAttribute, int *piValue); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef HPBUFFEREXT (WINAPI * PFNWGLCREATEPBUFFEREXTPROC) (HDC hDC, int iPixelFormat, int iWidth, int iHeight, const int *piAttribList); +typedef HDC (WINAPI * PFNWGLGETPBUFFERDCEXTPROC) (HPBUFFEREXT hPbuffer); +typedef int (WINAPI * PFNWGLRELEASEPBUFFERDCEXTPROC) (HPBUFFEREXT hPbuffer, HDC hDC); +typedef BOOL (WINAPI * PFNWGLDESTROYPBUFFEREXTPROC) (HPBUFFEREXT hPbuffer); +typedef BOOL (WINAPI * PFNWGLQUERYPBUFFEREXTPROC) (HPBUFFEREXT hPbuffer, int iAttribute, int *piValue); +#endif + +#ifndef WGL_EXT_pixel_format +#define WGL_EXT_pixel_format 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglGetPixelFormatAttribivEXT (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int *piAttributes, int *piValues); +extern BOOL WINAPI wglGetPixelFormatAttribfvEXT (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int *piAttributes, FLOAT *pfValues); +extern BOOL WINAPI wglChoosePixelFormatEXT (HDC hdc, const int *piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBIVEXTPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int *piAttributes, int *piValues); +typedef BOOL (WINAPI * PFNWGLGETPIXELFORMATATTRIBFVEXTPROC) (HDC hdc, int iPixelFormat, int iLayerPlane, UINT nAttributes, int *piAttributes, FLOAT *pfValues); +typedef BOOL (WINAPI * PFNWGLCHOOSEPIXELFORMATEXTPROC) (HDC hdc, const int *piAttribIList, const FLOAT *pfAttribFList, UINT nMaxFormats, int *piFormats, UINT *nNumFormats); +#endif + +#ifndef WGL_EXT_swap_control +#define WGL_EXT_swap_control 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglSwapIntervalEXT (int interval); +extern int WINAPI wglGetSwapIntervalEXT (void); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLSWAPINTERVALEXTPROC) (int interval); +typedef int (WINAPI * PFNWGLGETSWAPINTERVALEXTPROC) (void); +#endif + +#ifndef WGL_EXT_depth_float +#define WGL_EXT_depth_float 1 +#endif + +#ifndef WGL_NV_vertex_array_range +#define WGL_NV_vertex_array_range 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern void* WINAPI wglAllocateMemoryNV (GLsizei size, GLfloat readfreq, GLfloat writefreq, GLfloat priority); +extern void WINAPI wglFreeMemoryNV (void *pointer); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef void* (WINAPI * PFNWGLALLOCATEMEMORYNVPROC) (GLsizei size, GLfloat readfreq, GLfloat writefreq, GLfloat priority); +typedef void (WINAPI * PFNWGLFREEMEMORYNVPROC) (void *pointer); +#endif + +#ifndef WGL_3DFX_multisample +#define WGL_3DFX_multisample 1 +#endif + +#ifndef WGL_EXT_multisample +#define WGL_EXT_multisample 1 +#endif + +#ifndef WGL_OML_sync_control +#define WGL_OML_sync_control 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglGetSyncValuesOML (HDC hdc, INT64 *ust, INT64 *msc, INT64 *sbc); +extern BOOL WINAPI wglGetMscRateOML (HDC hdc, INT32 *numerator, INT32 *denominator); +extern INT64 WINAPI wglSwapBuffersMscOML (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder); +extern INT64 WINAPI wglSwapLayerBuffersMscOML (HDC hdc, int fuPlanes, INT64 target_msc, INT64 divisor, INT64 remainder); +extern BOOL WINAPI wglWaitForMscOML (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder, INT64 *ust, INT64 *msc, INT64 *sbc); +extern BOOL WINAPI wglWaitForSbcOML (HDC hdc, INT64 target_sbc, INT64 *ust, INT64 *msc, INT64 *sbc); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLGETSYNCVALUESOMLPROC) (HDC hdc, INT64 *ust, INT64 *msc, INT64 *sbc); +typedef BOOL (WINAPI * PFNWGLGETMSCRATEOMLPROC) (HDC hdc, INT32 *numerator, INT32 *denominator); +typedef INT64 (WINAPI * PFNWGLSWAPBUFFERSMSCOMLPROC) (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder); +typedef INT64 (WINAPI * PFNWGLSWAPLAYERBUFFERSMSCOMLPROC) (HDC hdc, int fuPlanes, INT64 target_msc, INT64 divisor, INT64 remainder); +typedef BOOL (WINAPI * PFNWGLWAITFORMSCOMLPROC) (HDC hdc, INT64 target_msc, INT64 divisor, INT64 remainder, INT64 *ust, INT64 *msc, INT64 *sbc); +typedef BOOL (WINAPI * PFNWGLWAITFORSBCOMLPROC) (HDC hdc, INT64 target_sbc, INT64 *ust, INT64 *msc, INT64 *sbc); +#endif + +#ifndef WGL_I3D_digital_video_control +#define WGL_I3D_digital_video_control 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglGetDigitalVideoParametersI3D (HDC hDC, int iAttribute, int *piValue); +extern BOOL WINAPI wglSetDigitalVideoParametersI3D (HDC hDC, int iAttribute, const int *piValue); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC) (HDC hDC, int iAttribute, int *piValue); +typedef BOOL (WINAPI * PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC) (HDC hDC, int iAttribute, const int *piValue); +#endif + +#ifndef WGL_I3D_gamma +#define WGL_I3D_gamma 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglGetGammaTableParametersI3D (HDC hDC, int iAttribute, int *piValue); +extern BOOL WINAPI wglSetGammaTableParametersI3D (HDC hDC, int iAttribute, const int *piValue); +extern BOOL WINAPI wglGetGammaTableI3D (HDC hDC, int iEntries, USHORT *puRed, USHORT *puGreen, USHORT *puBlue); +extern BOOL WINAPI wglSetGammaTableI3D (HDC hDC, int iEntries, const USHORT *puRed, const USHORT *puGreen, const USHORT *puBlue); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLGETGAMMATABLEPARAMETERSI3DPROC) (HDC hDC, int iAttribute, int *piValue); +typedef BOOL (WINAPI * PFNWGLSETGAMMATABLEPARAMETERSI3DPROC) (HDC hDC, int iAttribute, const int *piValue); +typedef BOOL (WINAPI * PFNWGLGETGAMMATABLEI3DPROC) (HDC hDC, int iEntries, USHORT *puRed, USHORT *puGreen, USHORT *puBlue); +typedef BOOL (WINAPI * PFNWGLSETGAMMATABLEI3DPROC) (HDC hDC, int iEntries, const USHORT *puRed, const USHORT *puGreen, const USHORT *puBlue); +#endif + +#ifndef WGL_I3D_genlock +#define WGL_I3D_genlock 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglEnableGenlockI3D (HDC hDC); +extern BOOL WINAPI wglDisableGenlockI3D (HDC hDC); +extern BOOL WINAPI wglIsEnabledGenlockI3D (HDC hDC, BOOL *pFlag); +extern BOOL WINAPI wglGenlockSourceI3D (HDC hDC, UINT uSource); +extern BOOL WINAPI wglGetGenlockSourceI3D (HDC hDC, UINT *uSource); +extern BOOL WINAPI wglGenlockSourceEdgeI3D (HDC hDC, UINT uEdge); +extern BOOL WINAPI wglGetGenlockSourceEdgeI3D (HDC hDC, UINT *uEdge); +extern BOOL WINAPI wglGenlockSampleRateI3D (HDC hDC, UINT uRate); +extern BOOL WINAPI wglGetGenlockSampleRateI3D (HDC hDC, UINT *uRate); +extern BOOL WINAPI wglGenlockSourceDelayI3D (HDC hDC, UINT uDelay); +extern BOOL WINAPI wglGetGenlockSourceDelayI3D (HDC hDC, UINT *uDelay); +extern BOOL WINAPI wglQueryGenlockMaxSourceDelayI3D (HDC hDC, UINT *uMaxLineDelay, UINT *uMaxPixelDelay); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLENABLEGENLOCKI3DPROC) (HDC hDC); +typedef BOOL (WINAPI * PFNWGLDISABLEGENLOCKI3DPROC) (HDC hDC); +typedef BOOL (WINAPI * PFNWGLISENABLEDGENLOCKI3DPROC) (HDC hDC, BOOL *pFlag); +typedef BOOL (WINAPI * PFNWGLGENLOCKSOURCEI3DPROC) (HDC hDC, UINT uSource); +typedef BOOL (WINAPI * PFNWGLGETGENLOCKSOURCEI3DPROC) (HDC hDC, UINT *uSource); +typedef BOOL (WINAPI * PFNWGLGENLOCKSOURCEEDGEI3DPROC) (HDC hDC, UINT uEdge); +typedef BOOL (WINAPI * PFNWGLGETGENLOCKSOURCEEDGEI3DPROC) (HDC hDC, UINT *uEdge); +typedef BOOL (WINAPI * PFNWGLGENLOCKSAMPLERATEI3DPROC) (HDC hDC, UINT uRate); +typedef BOOL (WINAPI * PFNWGLGETGENLOCKSAMPLERATEI3DPROC) (HDC hDC, UINT *uRate); +typedef BOOL (WINAPI * PFNWGLGENLOCKSOURCEDELAYI3DPROC) (HDC hDC, UINT uDelay); +typedef BOOL (WINAPI * PFNWGLGETGENLOCKSOURCEDELAYI3DPROC) (HDC hDC, UINT *uDelay); +typedef BOOL (WINAPI * PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC) (HDC hDC, UINT *uMaxLineDelay, UINT *uMaxPixelDelay); +#endif + +#ifndef WGL_I3D_image_buffer +#define WGL_I3D_image_buffer 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern LPVOID WINAPI wglCreateImageBufferI3D (HDC hDC, DWORD dwSize, UINT uFlags); +extern BOOL WINAPI wglDestroyImageBufferI3D (HDC hDC, LPVOID pAddress); +extern BOOL WINAPI wglAssociateImageBufferEventsI3D (HDC hDC, const HANDLE *pEvent, const LPVOID *pAddress, const DWORD *pSize, UINT count); +extern BOOL WINAPI wglReleaseImageBufferEventsI3D (HDC hDC, const LPVOID *pAddress, UINT count); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef LPVOID (WINAPI * PFNWGLCREATEIMAGEBUFFERI3DPROC) (HDC hDC, DWORD dwSize, UINT uFlags); +typedef BOOL (WINAPI * PFNWGLDESTROYIMAGEBUFFERI3DPROC) (HDC hDC, LPVOID pAddress); +typedef BOOL (WINAPI * PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC) (HDC hDC, const HANDLE *pEvent, const LPVOID *pAddress, const DWORD *pSize, UINT count); +typedef BOOL (WINAPI * PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC) (HDC hDC, const LPVOID *pAddress, UINT count); +#endif + +#ifndef WGL_I3D_swap_frame_lock +#define WGL_I3D_swap_frame_lock 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglEnableFrameLockI3D (void); +extern BOOL WINAPI wglDisableFrameLockI3D (void); +extern BOOL WINAPI wglIsEnabledFrameLockI3D (BOOL *pFlag); +extern BOOL WINAPI wglQueryFrameLockMasterI3D (BOOL *pFlag); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLENABLEFRAMELOCKI3DPROC) (void); +typedef BOOL (WINAPI * PFNWGLDISABLEFRAMELOCKI3DPROC) (void); +typedef BOOL (WINAPI * PFNWGLISENABLEDFRAMELOCKI3DPROC) (BOOL *pFlag); +typedef BOOL (WINAPI * PFNWGLQUERYFRAMELOCKMASTERI3DPROC) (BOOL *pFlag); +#endif + +#ifndef WGL_I3D_swap_frame_usage +#define WGL_I3D_swap_frame_usage 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglGetFrameUsageI3D (float *pUsage); +extern BOOL WINAPI wglBeginFrameTrackingI3D (void); +extern BOOL WINAPI wglEndFrameTrackingI3D (void); +extern BOOL WINAPI wglQueryFrameTrackingI3D (DWORD *pFrameCount, DWORD *pMissedFrames, float *pLastMissedUsage); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLGETFRAMEUSAGEI3DPROC) (float *pUsage); +typedef BOOL (WINAPI * PFNWGLBEGINFRAMETRACKINGI3DPROC) (void); +typedef BOOL (WINAPI * PFNWGLENDFRAMETRACKINGI3DPROC) (void); +typedef BOOL (WINAPI * PFNWGLQUERYFRAMETRACKINGI3DPROC) (DWORD *pFrameCount, DWORD *pMissedFrames, float *pLastMissedUsage); +#endif + +#ifndef WGL_ATI_pixel_format_float +#define WGL_ATI_pixel_format_float 1 +#endif + +#ifndef WGL_NV_float_buffer +#define WGL_NV_float_buffer 1 +#endif + +#ifndef WGL_3DL_stereo_control +#define WGL_3DL_stereo_control 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglSetStereoEmitterState3DL (HDC hDC, UINT uState); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLSETSTEREOEMITTERSTATE3DLPROC) (HDC hDC, UINT uState); +#endif + +#ifndef WGL_EXT_pixel_format_packed_float +#define WGL_EXT_pixel_format_packed_float 1 +#endif + +#ifndef WGL_EXT_framebuffer_sRGB +#define WGL_EXT_framebuffer_sRGB 1 +#endif + +#ifndef WGL_NV_present_video +#define WGL_NV_present_video 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern int WINAPI wglEnumerateVideoDevicesNV (HDC hDC, HVIDEOOUTPUTDEVICENV *phDeviceList); +extern BOOL WINAPI wglBindVideoDeviceNV (HDC hDC, unsigned int uVideoSlot, HVIDEOOUTPUTDEVICENV hVideoDevice, const int *piAttribList); +extern BOOL WINAPI wglQueryCurrentContextNV (int iAttribute, int *piValue); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef int (WINAPI * PFNWGLENUMERATEVIDEODEVICESNVPROC) (HDC hDC, HVIDEOOUTPUTDEVICENV *phDeviceList); +typedef BOOL (WINAPI * PFNWGLBINDVIDEODEVICENVPROC) (HDC hDC, unsigned int uVideoSlot, HVIDEOOUTPUTDEVICENV hVideoDevice, const int *piAttribList); +typedef BOOL (WINAPI * PFNWGLQUERYCURRENTCONTEXTNVPROC) (int iAttribute, int *piValue); +#endif + +#ifndef WGL_NV_video_output +#define WGL_NV_video_output 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglGetVideoDeviceNV (HDC hDC, int numDevices, HPVIDEODEV *hVideoDevice); +extern BOOL WINAPI wglReleaseVideoDeviceNV (HPVIDEODEV hVideoDevice); +extern BOOL WINAPI wglBindVideoImageNV (HPVIDEODEV hVideoDevice, HPBUFFERARB hPbuffer, int iVideoBuffer); +extern BOOL WINAPI wglReleaseVideoImageNV (HPBUFFERARB hPbuffer, int iVideoBuffer); +extern BOOL WINAPI wglSendPbufferToVideoNV (HPBUFFERARB hPbuffer, int iBufferType, unsigned long *pulCounterPbuffer, BOOL bBlock); +extern BOOL WINAPI wglGetVideoInfoNV (HPVIDEODEV hpVideoDevice, unsigned long *pulCounterOutputPbuffer, unsigned long *pulCounterOutputVideo); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLGETVIDEODEVICENVPROC) (HDC hDC, int numDevices, HPVIDEODEV *hVideoDevice); +typedef BOOL (WINAPI * PFNWGLRELEASEVIDEODEVICENVPROC) (HPVIDEODEV hVideoDevice); +typedef BOOL (WINAPI * PFNWGLBINDVIDEOIMAGENVPROC) (HPVIDEODEV hVideoDevice, HPBUFFERARB hPbuffer, int iVideoBuffer); +typedef BOOL (WINAPI * PFNWGLRELEASEVIDEOIMAGENVPROC) (HPBUFFERARB hPbuffer, int iVideoBuffer); +typedef BOOL (WINAPI * PFNWGLSENDPBUFFERTOVIDEONVPROC) (HPBUFFERARB hPbuffer, int iBufferType, unsigned long *pulCounterPbuffer, BOOL bBlock); +typedef BOOL (WINAPI * PFNWGLGETVIDEOINFONVPROC) (HPVIDEODEV hpVideoDevice, unsigned long *pulCounterOutputPbuffer, unsigned long *pulCounterOutputVideo); +#endif + +#ifndef WGL_NV_swap_group +#define WGL_NV_swap_group 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglJoinSwapGroupNV (HDC hDC, GLuint group); +extern BOOL WINAPI wglBindSwapBarrierNV (GLuint group, GLuint barrier); +extern BOOL WINAPI wglQuerySwapGroupNV (HDC hDC, GLuint *group, GLuint *barrier); +extern BOOL WINAPI wglQueryMaxSwapGroupsNV (HDC hDC, GLuint *maxGroups, GLuint *maxBarriers); +extern BOOL WINAPI wglQueryFrameCountNV (HDC hDC, GLuint *count); +extern BOOL WINAPI wglResetFrameCountNV (HDC hDC); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLJOINSWAPGROUPNVPROC) (HDC hDC, GLuint group); +typedef BOOL (WINAPI * PFNWGLBINDSWAPBARRIERNVPROC) (GLuint group, GLuint barrier); +typedef BOOL (WINAPI * PFNWGLQUERYSWAPGROUPNVPROC) (HDC hDC, GLuint *group, GLuint *barrier); +typedef BOOL (WINAPI * PFNWGLQUERYMAXSWAPGROUPSNVPROC) (HDC hDC, GLuint *maxGroups, GLuint *maxBarriers); +typedef BOOL (WINAPI * PFNWGLQUERYFRAMECOUNTNVPROC) (HDC hDC, GLuint *count); +typedef BOOL (WINAPI * PFNWGLRESETFRAMECOUNTNVPROC) (HDC hDC); +#endif + +#ifndef WGL_NV_gpu_affinity +#define WGL_NV_gpu_affinity 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglEnumGpusNV (UINT iGpuIndex, HGPUNV *phGpu); +extern BOOL WINAPI wglEnumGpuDevicesNV (HGPUNV hGpu, UINT iDeviceIndex, PGPU_DEVICE lpGpuDevice); +extern HDC WINAPI wglCreateAffinityDCNV (const HGPUNV *phGpuList); +extern BOOL WINAPI wglEnumGpusFromAffinityDCNV (HDC hAffinityDC, UINT iGpuIndex, HGPUNV *hGpu); +extern BOOL WINAPI wglDeleteDCNV (HDC hdc); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLENUMGPUSNVPROC) (UINT iGpuIndex, HGPUNV *phGpu); +typedef BOOL (WINAPI * PFNWGLENUMGPUDEVICESNVPROC) (HGPUNV hGpu, UINT iDeviceIndex, PGPU_DEVICE lpGpuDevice); +typedef HDC (WINAPI * PFNWGLCREATEAFFINITYDCNVPROC) (const HGPUNV *phGpuList); +typedef BOOL (WINAPI * PFNWGLENUMGPUSFROMAFFINITYDCNVPROC) (HDC hAffinityDC, UINT iGpuIndex, HGPUNV *hGpu); +typedef BOOL (WINAPI * PFNWGLDELETEDCNVPROC) (HDC hdc); +#endif + +#ifndef WGL_AMD_gpu_association +#define WGL_AMD_gpu_association 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern UINT WINAPI wglGetGPUIDsAMD (UINT maxCount, UINT *ids); +extern INT WINAPI wglGetGPUInfoAMD (UINT id, int property, GLenum dataType, UINT size, void *data); +extern UINT WINAPI wglGetContextGPUIDAMD (HGLRC hglrc); +extern HGLRC WINAPI wglCreateAssociatedContextAMD (UINT id); +extern HGLRC WINAPI wglCreateAssociatedContextAttribsAMD (UINT id, HGLRC hShareContext, const int *attribList); +extern BOOL WINAPI wglDeleteAssociatedContextAMD (HGLRC hglrc); +extern BOOL WINAPI wglMakeAssociatedContextCurrentAMD (HGLRC hglrc); +extern HGLRC WINAPI wglGetCurrentAssociatedContextAMD (void); +extern VOID WINAPI wglBlitContextFramebufferAMD (HGLRC dstCtx, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef UINT (WINAPI * PFNWGLGETGPUIDSAMDPROC) (UINT maxCount, UINT *ids); +typedef INT (WINAPI * PFNWGLGETGPUINFOAMDPROC) (UINT id, int property, GLenum dataType, UINT size, void *data); +typedef UINT (WINAPI * PFNWGLGETCONTEXTGPUIDAMDPROC) (HGLRC hglrc); +typedef HGLRC (WINAPI * PFNWGLCREATEASSOCIATEDCONTEXTAMDPROC) (UINT id); +typedef HGLRC (WINAPI * PFNWGLCREATEASSOCIATEDCONTEXTATTRIBSAMDPROC) (UINT id, HGLRC hShareContext, const int *attribList); +typedef BOOL (WINAPI * PFNWGLDELETEASSOCIATEDCONTEXTAMDPROC) (HGLRC hglrc); +typedef BOOL (WINAPI * PFNWGLMAKEASSOCIATEDCONTEXTCURRENTAMDPROC) (HGLRC hglrc); +typedef HGLRC (WINAPI * PFNWGLGETCURRENTASSOCIATEDCONTEXTAMDPROC) (void); +typedef VOID (WINAPI * PFNWGLBLITCONTEXTFRAMEBUFFERAMDPROC) (HGLRC dstCtx, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#endif + +#ifndef WGL_NV_video_capture +#define WGL_NV_video_capture 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglBindVideoCaptureDeviceNV (UINT uVideoSlot, HVIDEOINPUTDEVICENV hDevice); +extern UINT WINAPI wglEnumerateVideoCaptureDevicesNV (HDC hDc, HVIDEOINPUTDEVICENV *phDeviceList); +extern BOOL WINAPI wglLockVideoCaptureDeviceNV (HDC hDc, HVIDEOINPUTDEVICENV hDevice); +extern BOOL WINAPI wglQueryVideoCaptureDeviceNV (HDC hDc, HVIDEOINPUTDEVICENV hDevice, int iAttribute, int *piValue); +extern BOOL WINAPI wglReleaseVideoCaptureDeviceNV (HDC hDc, HVIDEOINPUTDEVICENV hDevice); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLBINDVIDEOCAPTUREDEVICENVPROC) (UINT uVideoSlot, HVIDEOINPUTDEVICENV hDevice); +typedef UINT (WINAPI * PFNWGLENUMERATEVIDEOCAPTUREDEVICESNVPROC) (HDC hDc, HVIDEOINPUTDEVICENV *phDeviceList); +typedef BOOL (WINAPI * PFNWGLLOCKVIDEOCAPTUREDEVICENVPROC) (HDC hDc, HVIDEOINPUTDEVICENV hDevice); +typedef BOOL (WINAPI * PFNWGLQUERYVIDEOCAPTUREDEVICENVPROC) (HDC hDc, HVIDEOINPUTDEVICENV hDevice, int iAttribute, int *piValue); +typedef BOOL (WINAPI * PFNWGLRELEASEVIDEOCAPTUREDEVICENVPROC) (HDC hDc, HVIDEOINPUTDEVICENV hDevice); +#endif + +#ifndef WGL_NV_copy_image +#define WGL_NV_copy_image 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglCopyImageSubDataNV (HGLRC hSrcRC, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, HGLRC hDstRC, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLCOPYIMAGESUBDATANVPROC) (HGLRC hSrcRC, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, HGLRC hDstRC, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth); +#endif + +#ifndef WGL_NV_multisample_coverage +#define WGL_NV_multisample_coverage 1 +#endif + +#ifndef WGL_NV_DX_interop +#define WGL_NV_DX_interop 1 +#ifdef WGL_WGLEXT_PROTOTYPES +extern BOOL WINAPI wglDXSetResourceShareHandleNV (void *dxObject, HANDLE shareHandle); +extern HANDLE WINAPI wglDXOpenDeviceNV (void *dxDevice); +extern BOOL WINAPI wglDXCloseDeviceNV (HANDLE hDevice); +extern HANDLE WINAPI wglDXRegisterObjectNV (HANDLE hDevice, void *dxObject, GLuint name, GLenum type, GLenum access); +extern BOOL WINAPI wglDXUnregisterObjectNV (HANDLE hDevice, HANDLE hObject); +extern BOOL WINAPI wglDXObjectAccessNV (HANDLE hObject, GLenum access); +extern BOOL WINAPI wglDXLockObjectsNV (HANDLE hDevice, GLint count, HANDLE *hObjects); +extern BOOL WINAPI wglDXUnlockObjectsNV (HANDLE hDevice, GLint count, HANDLE *hObjects); +#endif /* WGL_WGLEXT_PROTOTYPES */ +typedef BOOL (WINAPI * PFNWGLDXSETRESOURCESHAREHANDLENVPROC) (void *dxObject, HANDLE shareHandle); +typedef HANDLE (WINAPI * PFNWGLDXOPENDEVICENVPROC) (void *dxDevice); +typedef BOOL (WINAPI * PFNWGLDXCLOSEDEVICENVPROC) (HANDLE hDevice); +typedef HANDLE (WINAPI * PFNWGLDXREGISTEROBJECTNVPROC) (HANDLE hDevice, void *dxObject, GLuint name, GLenum type, GLenum access); +typedef BOOL (WINAPI * PFNWGLDXUNREGISTEROBJECTNVPROC) (HANDLE hDevice, HANDLE hObject); +typedef BOOL (WINAPI * PFNWGLDXOBJECTACCESSNVPROC) (HANDLE hObject, GLenum access); +typedef BOOL (WINAPI * PFNWGLDXLOCKOBJECTSNVPROC) (HANDLE hDevice, GLint count, HANDLE *hObjects); +typedef BOOL (WINAPI * PFNWGLDXUNLOCKOBJECTSNVPROC) (HANDLE hDevice, GLint count, HANDLE *hObjects); +#endif + +#ifndef WGL_NV_DX_interop2 +#define WGL_NV_DX_interop2 1 +#endif + +#ifndef WGL_EXT_swap_control_tear +#define WGL_EXT_swap_control_tear 1 +#endif + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/VC/inc/config.h b/VC/inc/config.h new file mode 100644 index 0000000..fd372f1 --- /dev/null +++ b/VC/inc/config.h @@ -0,0 +1,70 @@ +// +// Q2PRO configuration file for MSVC +// + +#define REVISION 1337 +#define VERSION "v133.7" + +#ifdef _WIN64 +#define CPUSTRING "x86_64" +#define BUILDSTRING "Win64" +#else +#define CPUSTRING "x86" +#define BUILDSTRING "Win32" +#endif + +#define BASEGAME "baseq2" +#define DEFGAME "" + +#define USE_ICMP 1 +#define USE_ZLIB 1 +#define USE_SYSCON 1 +#define USE_DBGHELP 1 +#define USE_MAPCHECKSUM 1 + +#if USE_CLIENT +//#define VID_REF "gl" +#define VID_MODELIST "640x480 800x600 1024x768" +#define VID_GEOMETRY "640x480" +//#define REF_GL 1 +//#define USE_REF REF_GL +#define USE_UI 1 +#define USE_PNG 1 +#define USE_JPG 1 +#define USE_TGA 1 +#define USE_MD3 0 +#define USE_LIGHTSTYLES 1 +#define USE_DLIGHTS 1 +//#define USE_DINPUT 1 +//#define USE_DSOUND 1 +//#define USE_OPENAL 1 +#define USE_SNDDMA 1 +#define USE_CURL 0 +#define USE_AUTOREPLY 1 +#endif + +#if USE_SERVER +#define USE_AC_SERVER !USE_CLIENT +#define USE_MVD_SERVER 1 +#define USE_MVD_CLIENT 1 +#define USE_PACKETDUP 1 +#define USE_WINSVC !USE_CLIENT +#endif + +#define _USE_MATH_DEFINES +#define inline __inline +#define __func__ __FUNCTION__ + +#ifdef _WIN64 +typedef __int64 ssize_t; +#define SSIZE_MAX _I64_MAX +#else +typedef __int32 ssize_t; +#define SSIZE_MAX _I32_MAX +#endif + +#pragma warning(disable:4018) +#pragma warning(disable:4244) +#pragma warning(disable:4267) +#pragma warning(disable:4305) + diff --git a/blue_noise_textures/1024_1024/LDR_RGBA_0.png b/blue_noise_textures/1024_1024/LDR_RGBA_0.png new file mode 100644 index 0000000..fbe0757 Binary files /dev/null and b/blue_noise_textures/1024_1024/LDR_RGBA_0.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_0.png b/blue_noise_textures/128_128/HDR_LA_0.png new file mode 100644 index 0000000..26b0827 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_0.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_1.png b/blue_noise_textures/128_128/HDR_LA_1.png new file mode 100644 index 0000000..1748fa2 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_1.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_10.png b/blue_noise_textures/128_128/HDR_LA_10.png new file mode 100644 index 0000000..6f5f612 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_10.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_11.png b/blue_noise_textures/128_128/HDR_LA_11.png new file mode 100644 index 0000000..6fd8d51 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_11.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_12.png b/blue_noise_textures/128_128/HDR_LA_12.png new file mode 100644 index 0000000..4de04ea Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_12.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_13.png b/blue_noise_textures/128_128/HDR_LA_13.png new file mode 100644 index 0000000..8b3313d Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_13.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_14.png b/blue_noise_textures/128_128/HDR_LA_14.png new file mode 100644 index 0000000..443b9d9 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_14.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_15.png b/blue_noise_textures/128_128/HDR_LA_15.png new file mode 100644 index 0000000..ec68478 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_15.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_2.png b/blue_noise_textures/128_128/HDR_LA_2.png new file mode 100644 index 0000000..336c10e Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_2.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_3.png b/blue_noise_textures/128_128/HDR_LA_3.png new file mode 100644 index 0000000..d70c4ff Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_3.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_4.png b/blue_noise_textures/128_128/HDR_LA_4.png new file mode 100644 index 0000000..6d7dd11 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_4.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_5.png b/blue_noise_textures/128_128/HDR_LA_5.png new file mode 100644 index 0000000..4e928ca Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_5.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_6.png b/blue_noise_textures/128_128/HDR_LA_6.png new file mode 100644 index 0000000..e998ffb Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_6.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_7.png b/blue_noise_textures/128_128/HDR_LA_7.png new file mode 100644 index 0000000..9b77fd3 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_7.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_8.png b/blue_noise_textures/128_128/HDR_LA_8.png new file mode 100644 index 0000000..c12157d Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_8.png differ diff --git a/blue_noise_textures/128_128/HDR_LA_9.png b/blue_noise_textures/128_128/HDR_LA_9.png new file mode 100644 index 0000000..88ed526 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_LA_9.png differ diff --git a/blue_noise_textures/128_128/HDR_L_0.png b/blue_noise_textures/128_128/HDR_L_0.png new file mode 100644 index 0000000..41aa44d Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_0.png differ diff --git a/blue_noise_textures/128_128/HDR_L_1.png b/blue_noise_textures/128_128/HDR_L_1.png new file mode 100644 index 0000000..e6bea43 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_1.png differ diff --git a/blue_noise_textures/128_128/HDR_L_10.png b/blue_noise_textures/128_128/HDR_L_10.png new file mode 100644 index 0000000..685fba3 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_10.png differ diff --git a/blue_noise_textures/128_128/HDR_L_11.png b/blue_noise_textures/128_128/HDR_L_11.png new file mode 100644 index 0000000..6b75f7a Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_11.png differ diff --git a/blue_noise_textures/128_128/HDR_L_12.png b/blue_noise_textures/128_128/HDR_L_12.png new file mode 100644 index 0000000..2ef31ea Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_12.png differ diff --git a/blue_noise_textures/128_128/HDR_L_13.png b/blue_noise_textures/128_128/HDR_L_13.png new file mode 100644 index 0000000..86f843f Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_13.png differ diff --git a/blue_noise_textures/128_128/HDR_L_14.png b/blue_noise_textures/128_128/HDR_L_14.png new file mode 100644 index 0000000..b92feda Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_14.png differ diff --git a/blue_noise_textures/128_128/HDR_L_15.png b/blue_noise_textures/128_128/HDR_L_15.png new file mode 100644 index 0000000..817f734 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_15.png differ diff --git a/blue_noise_textures/128_128/HDR_L_2.png b/blue_noise_textures/128_128/HDR_L_2.png new file mode 100644 index 0000000..0793fb4 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_2.png differ diff --git a/blue_noise_textures/128_128/HDR_L_3.png b/blue_noise_textures/128_128/HDR_L_3.png new file mode 100644 index 0000000..a4e716e Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_3.png differ diff --git a/blue_noise_textures/128_128/HDR_L_4.png b/blue_noise_textures/128_128/HDR_L_4.png new file mode 100644 index 0000000..a0ee7d3 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_4.png differ diff --git a/blue_noise_textures/128_128/HDR_L_5.png b/blue_noise_textures/128_128/HDR_L_5.png new file mode 100644 index 0000000..7a2848a Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_5.png differ diff --git a/blue_noise_textures/128_128/HDR_L_6.png b/blue_noise_textures/128_128/HDR_L_6.png new file mode 100644 index 0000000..cdd3b64 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_6.png differ diff --git a/blue_noise_textures/128_128/HDR_L_7.png b/blue_noise_textures/128_128/HDR_L_7.png new file mode 100644 index 0000000..ec39301 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_7.png differ diff --git a/blue_noise_textures/128_128/HDR_L_8.png b/blue_noise_textures/128_128/HDR_L_8.png new file mode 100644 index 0000000..a9082aa Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_8.png differ diff --git a/blue_noise_textures/128_128/HDR_L_9.png b/blue_noise_textures/128_128/HDR_L_9.png new file mode 100644 index 0000000..01ae78d Binary files /dev/null and b/blue_noise_textures/128_128/HDR_L_9.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_0.png b/blue_noise_textures/128_128/HDR_RGBA_0.png new file mode 100644 index 0000000..98c62d3 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_0.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_1.png b/blue_noise_textures/128_128/HDR_RGBA_1.png new file mode 100644 index 0000000..ffd56dc Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_1.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_10.png b/blue_noise_textures/128_128/HDR_RGBA_10.png new file mode 100644 index 0000000..2c66a38 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_10.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_11.png b/blue_noise_textures/128_128/HDR_RGBA_11.png new file mode 100644 index 0000000..d100a8a Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_11.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_12.png b/blue_noise_textures/128_128/HDR_RGBA_12.png new file mode 100644 index 0000000..7683aa0 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_12.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_13.png b/blue_noise_textures/128_128/HDR_RGBA_13.png new file mode 100644 index 0000000..fe2e99e Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_13.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_14.png b/blue_noise_textures/128_128/HDR_RGBA_14.png new file mode 100644 index 0000000..ba3cbd1 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_14.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_15.png b/blue_noise_textures/128_128/HDR_RGBA_15.png new file mode 100644 index 0000000..afbfdbc Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_15.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_2.png b/blue_noise_textures/128_128/HDR_RGBA_2.png new file mode 100644 index 0000000..80991ac Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_2.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_3.png b/blue_noise_textures/128_128/HDR_RGBA_3.png new file mode 100644 index 0000000..da7d0af Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_3.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_4.png b/blue_noise_textures/128_128/HDR_RGBA_4.png new file mode 100644 index 0000000..77b9c35 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_4.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_5.png b/blue_noise_textures/128_128/HDR_RGBA_5.png new file mode 100644 index 0000000..777e92e Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_5.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_6.png b/blue_noise_textures/128_128/HDR_RGBA_6.png new file mode 100644 index 0000000..41fc1d4 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_6.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_7.png b/blue_noise_textures/128_128/HDR_RGBA_7.png new file mode 100644 index 0000000..40baadc Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_7.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_8.png b/blue_noise_textures/128_128/HDR_RGBA_8.png new file mode 100644 index 0000000..fd58543 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_8.png differ diff --git a/blue_noise_textures/128_128/HDR_RGBA_9.png b/blue_noise_textures/128_128/HDR_RGBA_9.png new file mode 100644 index 0000000..45be1cf Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGBA_9.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_0.png b/blue_noise_textures/128_128/HDR_RGB_0.png new file mode 100644 index 0000000..a605ffb Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_0.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_1.png b/blue_noise_textures/128_128/HDR_RGB_1.png new file mode 100644 index 0000000..dce8f54 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_1.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_10.png b/blue_noise_textures/128_128/HDR_RGB_10.png new file mode 100644 index 0000000..f8b8527 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_10.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_11.png b/blue_noise_textures/128_128/HDR_RGB_11.png new file mode 100644 index 0000000..4d14354 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_11.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_12.png b/blue_noise_textures/128_128/HDR_RGB_12.png new file mode 100644 index 0000000..36679a6 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_12.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_13.png b/blue_noise_textures/128_128/HDR_RGB_13.png new file mode 100644 index 0000000..1142fa1 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_13.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_14.png b/blue_noise_textures/128_128/HDR_RGB_14.png new file mode 100644 index 0000000..4105862 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_14.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_15.png b/blue_noise_textures/128_128/HDR_RGB_15.png new file mode 100644 index 0000000..8805840 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_15.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_2.png b/blue_noise_textures/128_128/HDR_RGB_2.png new file mode 100644 index 0000000..91466a5 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_2.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_3.png b/blue_noise_textures/128_128/HDR_RGB_3.png new file mode 100644 index 0000000..2e3ece0 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_3.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_4.png b/blue_noise_textures/128_128/HDR_RGB_4.png new file mode 100644 index 0000000..d49816a Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_4.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_5.png b/blue_noise_textures/128_128/HDR_RGB_5.png new file mode 100644 index 0000000..837851c Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_5.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_6.png b/blue_noise_textures/128_128/HDR_RGB_6.png new file mode 100644 index 0000000..fba3aec Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_6.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_7.png b/blue_noise_textures/128_128/HDR_RGB_7.png new file mode 100644 index 0000000..0bc7f56 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_7.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_8.png b/blue_noise_textures/128_128/HDR_RGB_8.png new file mode 100644 index 0000000..b060840 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_8.png differ diff --git a/blue_noise_textures/128_128/HDR_RGB_9.png b/blue_noise_textures/128_128/HDR_RGB_9.png new file mode 100644 index 0000000..d326f37 Binary files /dev/null and b/blue_noise_textures/128_128/HDR_RGB_9.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_0.png b/blue_noise_textures/128_128/LDR_LLL1_0.png new file mode 100644 index 0000000..69b451f Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_0.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_1.png b/blue_noise_textures/128_128/LDR_LLL1_1.png new file mode 100644 index 0000000..ffe2b67 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_1.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_10.png b/blue_noise_textures/128_128/LDR_LLL1_10.png new file mode 100644 index 0000000..96eb169 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_10.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_11.png b/blue_noise_textures/128_128/LDR_LLL1_11.png new file mode 100644 index 0000000..80a49dd Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_11.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_12.png b/blue_noise_textures/128_128/LDR_LLL1_12.png new file mode 100644 index 0000000..f1dc37d Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_12.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_13.png b/blue_noise_textures/128_128/LDR_LLL1_13.png new file mode 100644 index 0000000..527770d Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_13.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_14.png b/blue_noise_textures/128_128/LDR_LLL1_14.png new file mode 100644 index 0000000..8ba68b1 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_14.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_15.png b/blue_noise_textures/128_128/LDR_LLL1_15.png new file mode 100644 index 0000000..2cbce2a Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_15.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_2.png b/blue_noise_textures/128_128/LDR_LLL1_2.png new file mode 100644 index 0000000..2daf5a2 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_2.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_3.png b/blue_noise_textures/128_128/LDR_LLL1_3.png new file mode 100644 index 0000000..b677074 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_3.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_4.png b/blue_noise_textures/128_128/LDR_LLL1_4.png new file mode 100644 index 0000000..f13df2b Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_4.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_5.png b/blue_noise_textures/128_128/LDR_LLL1_5.png new file mode 100644 index 0000000..3c3c6a6 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_5.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_6.png b/blue_noise_textures/128_128/LDR_LLL1_6.png new file mode 100644 index 0000000..2113c72 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_6.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_7.png b/blue_noise_textures/128_128/LDR_LLL1_7.png new file mode 100644 index 0000000..900fab4 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_7.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_8.png b/blue_noise_textures/128_128/LDR_LLL1_8.png new file mode 100644 index 0000000..90e7e5e Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_8.png differ diff --git a/blue_noise_textures/128_128/LDR_LLL1_9.png b/blue_noise_textures/128_128/LDR_LLL1_9.png new file mode 100644 index 0000000..2df6e77 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_LLL1_9.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_0.png b/blue_noise_textures/128_128/LDR_RG01_0.png new file mode 100644 index 0000000..884f68a Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_0.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_1.png b/blue_noise_textures/128_128/LDR_RG01_1.png new file mode 100644 index 0000000..a7ea312 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_1.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_10.png b/blue_noise_textures/128_128/LDR_RG01_10.png new file mode 100644 index 0000000..ef364f5 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_10.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_11.png b/blue_noise_textures/128_128/LDR_RG01_11.png new file mode 100644 index 0000000..e5b9756 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_11.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_12.png b/blue_noise_textures/128_128/LDR_RG01_12.png new file mode 100644 index 0000000..aed83f0 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_12.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_13.png b/blue_noise_textures/128_128/LDR_RG01_13.png new file mode 100644 index 0000000..72da6a3 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_13.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_14.png b/blue_noise_textures/128_128/LDR_RG01_14.png new file mode 100644 index 0000000..40c355d Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_14.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_15.png b/blue_noise_textures/128_128/LDR_RG01_15.png new file mode 100644 index 0000000..f4653eb Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_15.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_2.png b/blue_noise_textures/128_128/LDR_RG01_2.png new file mode 100644 index 0000000..659e3c4 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_2.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_3.png b/blue_noise_textures/128_128/LDR_RG01_3.png new file mode 100644 index 0000000..c474378 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_3.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_4.png b/blue_noise_textures/128_128/LDR_RG01_4.png new file mode 100644 index 0000000..410336f Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_4.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_5.png b/blue_noise_textures/128_128/LDR_RG01_5.png new file mode 100644 index 0000000..834a79f Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_5.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_6.png b/blue_noise_textures/128_128/LDR_RG01_6.png new file mode 100644 index 0000000..191b074 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_6.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_7.png b/blue_noise_textures/128_128/LDR_RG01_7.png new file mode 100644 index 0000000..9ed8519 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_7.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_8.png b/blue_noise_textures/128_128/LDR_RG01_8.png new file mode 100644 index 0000000..3d07b96 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_8.png differ diff --git a/blue_noise_textures/128_128/LDR_RG01_9.png b/blue_noise_textures/128_128/LDR_RG01_9.png new file mode 100644 index 0000000..6cd83d0 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RG01_9.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_0.png b/blue_noise_textures/128_128/LDR_RGB1_0.png new file mode 100644 index 0000000..4e7b3e9 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_0.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_1.png b/blue_noise_textures/128_128/LDR_RGB1_1.png new file mode 100644 index 0000000..b47bc87 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_1.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_10.png b/blue_noise_textures/128_128/LDR_RGB1_10.png new file mode 100644 index 0000000..795363f Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_10.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_11.png b/blue_noise_textures/128_128/LDR_RGB1_11.png new file mode 100644 index 0000000..a344ce1 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_11.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_12.png b/blue_noise_textures/128_128/LDR_RGB1_12.png new file mode 100644 index 0000000..de4537d Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_12.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_13.png b/blue_noise_textures/128_128/LDR_RGB1_13.png new file mode 100644 index 0000000..525886f Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_13.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_14.png b/blue_noise_textures/128_128/LDR_RGB1_14.png new file mode 100644 index 0000000..34dc265 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_14.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_15.png b/blue_noise_textures/128_128/LDR_RGB1_15.png new file mode 100644 index 0000000..ae1adaf Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_15.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_2.png b/blue_noise_textures/128_128/LDR_RGB1_2.png new file mode 100644 index 0000000..c24536a Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_2.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_3.png b/blue_noise_textures/128_128/LDR_RGB1_3.png new file mode 100644 index 0000000..ab72b90 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_3.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_4.png b/blue_noise_textures/128_128/LDR_RGB1_4.png new file mode 100644 index 0000000..42768ad Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_4.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_5.png b/blue_noise_textures/128_128/LDR_RGB1_5.png new file mode 100644 index 0000000..b56fe7d Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_5.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_6.png b/blue_noise_textures/128_128/LDR_RGB1_6.png new file mode 100644 index 0000000..c088755 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_6.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_7.png b/blue_noise_textures/128_128/LDR_RGB1_7.png new file mode 100644 index 0000000..ba69389 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_7.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_8.png b/blue_noise_textures/128_128/LDR_RGB1_8.png new file mode 100644 index 0000000..8da924a Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_8.png differ diff --git a/blue_noise_textures/128_128/LDR_RGB1_9.png b/blue_noise_textures/128_128/LDR_RGB1_9.png new file mode 100644 index 0000000..d8a739d Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGB1_9.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_0.png b/blue_noise_textures/128_128/LDR_RGBA_0.png new file mode 100644 index 0000000..526c9ef Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_0.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_1.png b/blue_noise_textures/128_128/LDR_RGBA_1.png new file mode 100644 index 0000000..574153d Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_1.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_10.png b/blue_noise_textures/128_128/LDR_RGBA_10.png new file mode 100644 index 0000000..c4646ce Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_10.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_11.png b/blue_noise_textures/128_128/LDR_RGBA_11.png new file mode 100644 index 0000000..7afdd08 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_11.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_12.png b/blue_noise_textures/128_128/LDR_RGBA_12.png new file mode 100644 index 0000000..f00ed21 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_12.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_13.png b/blue_noise_textures/128_128/LDR_RGBA_13.png new file mode 100644 index 0000000..03f146a Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_13.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_14.png b/blue_noise_textures/128_128/LDR_RGBA_14.png new file mode 100644 index 0000000..3543236 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_14.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_15.png b/blue_noise_textures/128_128/LDR_RGBA_15.png new file mode 100644 index 0000000..552c359 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_15.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_2.png b/blue_noise_textures/128_128/LDR_RGBA_2.png new file mode 100644 index 0000000..0ffe975 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_2.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_3.png b/blue_noise_textures/128_128/LDR_RGBA_3.png new file mode 100644 index 0000000..8ee18c2 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_3.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_4.png b/blue_noise_textures/128_128/LDR_RGBA_4.png new file mode 100644 index 0000000..f817618 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_4.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_5.png b/blue_noise_textures/128_128/LDR_RGBA_5.png new file mode 100644 index 0000000..00ad67c Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_5.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_6.png b/blue_noise_textures/128_128/LDR_RGBA_6.png new file mode 100644 index 0000000..6c36bb6 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_6.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_7.png b/blue_noise_textures/128_128/LDR_RGBA_7.png new file mode 100644 index 0000000..5972195 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_7.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_8.png b/blue_noise_textures/128_128/LDR_RGBA_8.png new file mode 100644 index 0000000..ad9fc6a Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_8.png differ diff --git a/blue_noise_textures/128_128/LDR_RGBA_9.png b/blue_noise_textures/128_128/LDR_RGBA_9.png new file mode 100644 index 0000000..30a76f7 Binary files /dev/null and b/blue_noise_textures/128_128/LDR_RGBA_9.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_0.png b/blue_noise_textures/16_16/HDR_LA_0.png new file mode 100644 index 0000000..9a62295 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_0.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_1.png b/blue_noise_textures/16_16/HDR_LA_1.png new file mode 100644 index 0000000..2d84e81 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_1.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_10.png b/blue_noise_textures/16_16/HDR_LA_10.png new file mode 100644 index 0000000..4f90d0b Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_10.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_11.png b/blue_noise_textures/16_16/HDR_LA_11.png new file mode 100644 index 0000000..73987ab Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_11.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_12.png b/blue_noise_textures/16_16/HDR_LA_12.png new file mode 100644 index 0000000..abd3b6f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_12.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_13.png b/blue_noise_textures/16_16/HDR_LA_13.png new file mode 100644 index 0000000..56fa120 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_13.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_14.png b/blue_noise_textures/16_16/HDR_LA_14.png new file mode 100644 index 0000000..dabc212 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_14.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_15.png b/blue_noise_textures/16_16/HDR_LA_15.png new file mode 100644 index 0000000..5d4a8cf Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_15.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_16.png b/blue_noise_textures/16_16/HDR_LA_16.png new file mode 100644 index 0000000..3a2b14b Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_16.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_17.png b/blue_noise_textures/16_16/HDR_LA_17.png new file mode 100644 index 0000000..bef565b Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_17.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_18.png b/blue_noise_textures/16_16/HDR_LA_18.png new file mode 100644 index 0000000..844265e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_18.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_19.png b/blue_noise_textures/16_16/HDR_LA_19.png new file mode 100644 index 0000000..cd5662f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_19.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_2.png b/blue_noise_textures/16_16/HDR_LA_2.png new file mode 100644 index 0000000..34a9676 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_2.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_20.png b/blue_noise_textures/16_16/HDR_LA_20.png new file mode 100644 index 0000000..21c60dc Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_20.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_21.png b/blue_noise_textures/16_16/HDR_LA_21.png new file mode 100644 index 0000000..18c67ca Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_21.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_22.png b/blue_noise_textures/16_16/HDR_LA_22.png new file mode 100644 index 0000000..07bdb9c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_22.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_23.png b/blue_noise_textures/16_16/HDR_LA_23.png new file mode 100644 index 0000000..9168ead Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_23.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_24.png b/blue_noise_textures/16_16/HDR_LA_24.png new file mode 100644 index 0000000..1a58165 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_24.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_25.png b/blue_noise_textures/16_16/HDR_LA_25.png new file mode 100644 index 0000000..fcfa6de Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_25.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_26.png b/blue_noise_textures/16_16/HDR_LA_26.png new file mode 100644 index 0000000..c0b8e67 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_26.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_27.png b/blue_noise_textures/16_16/HDR_LA_27.png new file mode 100644 index 0000000..431944c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_27.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_28.png b/blue_noise_textures/16_16/HDR_LA_28.png new file mode 100644 index 0000000..d7ca724 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_28.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_29.png b/blue_noise_textures/16_16/HDR_LA_29.png new file mode 100644 index 0000000..86b472a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_29.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_3.png b/blue_noise_textures/16_16/HDR_LA_3.png new file mode 100644 index 0000000..56fd391 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_3.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_30.png b/blue_noise_textures/16_16/HDR_LA_30.png new file mode 100644 index 0000000..ec20c92 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_30.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_31.png b/blue_noise_textures/16_16/HDR_LA_31.png new file mode 100644 index 0000000..fb0b4e3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_31.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_32.png b/blue_noise_textures/16_16/HDR_LA_32.png new file mode 100644 index 0000000..6f217ae Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_32.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_33.png b/blue_noise_textures/16_16/HDR_LA_33.png new file mode 100644 index 0000000..cd6acc3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_33.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_34.png b/blue_noise_textures/16_16/HDR_LA_34.png new file mode 100644 index 0000000..5661758 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_34.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_35.png b/blue_noise_textures/16_16/HDR_LA_35.png new file mode 100644 index 0000000..5951ead Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_35.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_36.png b/blue_noise_textures/16_16/HDR_LA_36.png new file mode 100644 index 0000000..26e4ed3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_36.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_37.png b/blue_noise_textures/16_16/HDR_LA_37.png new file mode 100644 index 0000000..e794b83 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_37.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_38.png b/blue_noise_textures/16_16/HDR_LA_38.png new file mode 100644 index 0000000..d51ef8b Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_38.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_39.png b/blue_noise_textures/16_16/HDR_LA_39.png new file mode 100644 index 0000000..3518ea2 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_39.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_4.png b/blue_noise_textures/16_16/HDR_LA_4.png new file mode 100644 index 0000000..a33dea7 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_4.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_40.png b/blue_noise_textures/16_16/HDR_LA_40.png new file mode 100644 index 0000000..367c4dd Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_40.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_41.png b/blue_noise_textures/16_16/HDR_LA_41.png new file mode 100644 index 0000000..fa8599a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_41.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_42.png b/blue_noise_textures/16_16/HDR_LA_42.png new file mode 100644 index 0000000..266ec70 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_42.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_43.png b/blue_noise_textures/16_16/HDR_LA_43.png new file mode 100644 index 0000000..0c745ee Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_43.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_44.png b/blue_noise_textures/16_16/HDR_LA_44.png new file mode 100644 index 0000000..b8c62da Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_44.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_45.png b/blue_noise_textures/16_16/HDR_LA_45.png new file mode 100644 index 0000000..b3b5917 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_45.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_46.png b/blue_noise_textures/16_16/HDR_LA_46.png new file mode 100644 index 0000000..b6d1ca1 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_46.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_47.png b/blue_noise_textures/16_16/HDR_LA_47.png new file mode 100644 index 0000000..057829a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_47.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_48.png b/blue_noise_textures/16_16/HDR_LA_48.png new file mode 100644 index 0000000..af9cd37 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_48.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_49.png b/blue_noise_textures/16_16/HDR_LA_49.png new file mode 100644 index 0000000..de129b3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_49.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_5.png b/blue_noise_textures/16_16/HDR_LA_5.png new file mode 100644 index 0000000..2947017 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_5.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_50.png b/blue_noise_textures/16_16/HDR_LA_50.png new file mode 100644 index 0000000..23c7887 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_50.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_51.png b/blue_noise_textures/16_16/HDR_LA_51.png new file mode 100644 index 0000000..d4b6f2c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_51.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_52.png b/blue_noise_textures/16_16/HDR_LA_52.png new file mode 100644 index 0000000..7f55453 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_52.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_53.png b/blue_noise_textures/16_16/HDR_LA_53.png new file mode 100644 index 0000000..b5f936a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_53.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_54.png b/blue_noise_textures/16_16/HDR_LA_54.png new file mode 100644 index 0000000..d4be224 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_54.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_55.png b/blue_noise_textures/16_16/HDR_LA_55.png new file mode 100644 index 0000000..dac23b1 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_55.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_56.png b/blue_noise_textures/16_16/HDR_LA_56.png new file mode 100644 index 0000000..d4f87f6 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_56.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_57.png b/blue_noise_textures/16_16/HDR_LA_57.png new file mode 100644 index 0000000..d136d00 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_57.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_58.png b/blue_noise_textures/16_16/HDR_LA_58.png new file mode 100644 index 0000000..53b51af Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_58.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_59.png b/blue_noise_textures/16_16/HDR_LA_59.png new file mode 100644 index 0000000..bd7dcfe Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_59.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_6.png b/blue_noise_textures/16_16/HDR_LA_6.png new file mode 100644 index 0000000..7cb2883 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_6.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_60.png b/blue_noise_textures/16_16/HDR_LA_60.png new file mode 100644 index 0000000..8acc88e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_60.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_61.png b/blue_noise_textures/16_16/HDR_LA_61.png new file mode 100644 index 0000000..a6f2269 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_61.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_62.png b/blue_noise_textures/16_16/HDR_LA_62.png new file mode 100644 index 0000000..c97fc54 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_62.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_63.png b/blue_noise_textures/16_16/HDR_LA_63.png new file mode 100644 index 0000000..d741bba Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_63.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_7.png b/blue_noise_textures/16_16/HDR_LA_7.png new file mode 100644 index 0000000..ed2da17 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_7.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_8.png b/blue_noise_textures/16_16/HDR_LA_8.png new file mode 100644 index 0000000..d87d95f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_8.png differ diff --git a/blue_noise_textures/16_16/HDR_LA_9.png b/blue_noise_textures/16_16/HDR_LA_9.png new file mode 100644 index 0000000..7f42f63 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_LA_9.png differ diff --git a/blue_noise_textures/16_16/HDR_L_0.png b/blue_noise_textures/16_16/HDR_L_0.png new file mode 100644 index 0000000..e7ab1c2 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_0.png differ diff --git a/blue_noise_textures/16_16/HDR_L_1.png b/blue_noise_textures/16_16/HDR_L_1.png new file mode 100644 index 0000000..7fd0c9a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_1.png differ diff --git a/blue_noise_textures/16_16/HDR_L_10.png b/blue_noise_textures/16_16/HDR_L_10.png new file mode 100644 index 0000000..40d592e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_10.png differ diff --git a/blue_noise_textures/16_16/HDR_L_11.png b/blue_noise_textures/16_16/HDR_L_11.png new file mode 100644 index 0000000..421b5ce Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_11.png differ diff --git a/blue_noise_textures/16_16/HDR_L_12.png b/blue_noise_textures/16_16/HDR_L_12.png new file mode 100644 index 0000000..79c73e3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_12.png differ diff --git a/blue_noise_textures/16_16/HDR_L_13.png b/blue_noise_textures/16_16/HDR_L_13.png new file mode 100644 index 0000000..e12e1de Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_13.png differ diff --git a/blue_noise_textures/16_16/HDR_L_14.png b/blue_noise_textures/16_16/HDR_L_14.png new file mode 100644 index 0000000..e40d10e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_14.png differ diff --git a/blue_noise_textures/16_16/HDR_L_15.png b/blue_noise_textures/16_16/HDR_L_15.png new file mode 100644 index 0000000..69364dc Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_15.png differ diff --git a/blue_noise_textures/16_16/HDR_L_16.png b/blue_noise_textures/16_16/HDR_L_16.png new file mode 100644 index 0000000..c65ede2 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_16.png differ diff --git a/blue_noise_textures/16_16/HDR_L_17.png b/blue_noise_textures/16_16/HDR_L_17.png new file mode 100644 index 0000000..8c66814 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_17.png differ diff --git a/blue_noise_textures/16_16/HDR_L_18.png b/blue_noise_textures/16_16/HDR_L_18.png new file mode 100644 index 0000000..34eb7c2 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_18.png differ diff --git a/blue_noise_textures/16_16/HDR_L_19.png b/blue_noise_textures/16_16/HDR_L_19.png new file mode 100644 index 0000000..20515f7 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_19.png differ diff --git a/blue_noise_textures/16_16/HDR_L_2.png b/blue_noise_textures/16_16/HDR_L_2.png new file mode 100644 index 0000000..3a88eb2 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_2.png differ diff --git a/blue_noise_textures/16_16/HDR_L_20.png b/blue_noise_textures/16_16/HDR_L_20.png new file mode 100644 index 0000000..434d444 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_20.png differ diff --git a/blue_noise_textures/16_16/HDR_L_21.png b/blue_noise_textures/16_16/HDR_L_21.png new file mode 100644 index 0000000..92b847e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_21.png differ diff --git a/blue_noise_textures/16_16/HDR_L_22.png b/blue_noise_textures/16_16/HDR_L_22.png new file mode 100644 index 0000000..fd64def Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_22.png differ diff --git a/blue_noise_textures/16_16/HDR_L_23.png b/blue_noise_textures/16_16/HDR_L_23.png new file mode 100644 index 0000000..4fb2ba7 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_23.png differ diff --git a/blue_noise_textures/16_16/HDR_L_24.png b/blue_noise_textures/16_16/HDR_L_24.png new file mode 100644 index 0000000..e0edb5c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_24.png differ diff --git a/blue_noise_textures/16_16/HDR_L_25.png b/blue_noise_textures/16_16/HDR_L_25.png new file mode 100644 index 0000000..c469899 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_25.png differ diff --git a/blue_noise_textures/16_16/HDR_L_26.png b/blue_noise_textures/16_16/HDR_L_26.png new file mode 100644 index 0000000..85d94e1 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_26.png differ diff --git a/blue_noise_textures/16_16/HDR_L_27.png b/blue_noise_textures/16_16/HDR_L_27.png new file mode 100644 index 0000000..3ff2382 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_27.png differ diff --git a/blue_noise_textures/16_16/HDR_L_28.png b/blue_noise_textures/16_16/HDR_L_28.png new file mode 100644 index 0000000..84ba0cb Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_28.png differ diff --git a/blue_noise_textures/16_16/HDR_L_29.png b/blue_noise_textures/16_16/HDR_L_29.png new file mode 100644 index 0000000..2b7cfa3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_29.png differ diff --git a/blue_noise_textures/16_16/HDR_L_3.png b/blue_noise_textures/16_16/HDR_L_3.png new file mode 100644 index 0000000..33c5b8a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_3.png differ diff --git a/blue_noise_textures/16_16/HDR_L_30.png b/blue_noise_textures/16_16/HDR_L_30.png new file mode 100644 index 0000000..b25635b Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_30.png differ diff --git a/blue_noise_textures/16_16/HDR_L_31.png b/blue_noise_textures/16_16/HDR_L_31.png new file mode 100644 index 0000000..7ca4a8c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_31.png differ diff --git a/blue_noise_textures/16_16/HDR_L_32.png b/blue_noise_textures/16_16/HDR_L_32.png new file mode 100644 index 0000000..c918984 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_32.png differ diff --git a/blue_noise_textures/16_16/HDR_L_33.png b/blue_noise_textures/16_16/HDR_L_33.png new file mode 100644 index 0000000..fe7dbba Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_33.png differ diff --git a/blue_noise_textures/16_16/HDR_L_34.png b/blue_noise_textures/16_16/HDR_L_34.png new file mode 100644 index 0000000..921ef90 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_34.png differ diff --git a/blue_noise_textures/16_16/HDR_L_35.png b/blue_noise_textures/16_16/HDR_L_35.png new file mode 100644 index 0000000..701dd9c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_35.png differ diff --git a/blue_noise_textures/16_16/HDR_L_36.png b/blue_noise_textures/16_16/HDR_L_36.png new file mode 100644 index 0000000..eac961b Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_36.png differ diff --git a/blue_noise_textures/16_16/HDR_L_37.png b/blue_noise_textures/16_16/HDR_L_37.png new file mode 100644 index 0000000..5a37468 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_37.png differ diff --git a/blue_noise_textures/16_16/HDR_L_38.png b/blue_noise_textures/16_16/HDR_L_38.png new file mode 100644 index 0000000..b748640 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_38.png differ diff --git a/blue_noise_textures/16_16/HDR_L_39.png b/blue_noise_textures/16_16/HDR_L_39.png new file mode 100644 index 0000000..0637d45 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_39.png differ diff --git a/blue_noise_textures/16_16/HDR_L_4.png b/blue_noise_textures/16_16/HDR_L_4.png new file mode 100644 index 0000000..3da44ce Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_4.png differ diff --git a/blue_noise_textures/16_16/HDR_L_40.png b/blue_noise_textures/16_16/HDR_L_40.png new file mode 100644 index 0000000..4269eed Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_40.png differ diff --git a/blue_noise_textures/16_16/HDR_L_41.png b/blue_noise_textures/16_16/HDR_L_41.png new file mode 100644 index 0000000..30c0e8e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_41.png differ diff --git a/blue_noise_textures/16_16/HDR_L_42.png b/blue_noise_textures/16_16/HDR_L_42.png new file mode 100644 index 0000000..c63ad01 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_42.png differ diff --git a/blue_noise_textures/16_16/HDR_L_43.png b/blue_noise_textures/16_16/HDR_L_43.png new file mode 100644 index 0000000..4c56a94 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_43.png differ diff --git a/blue_noise_textures/16_16/HDR_L_44.png b/blue_noise_textures/16_16/HDR_L_44.png new file mode 100644 index 0000000..5d7bba6 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_44.png differ diff --git a/blue_noise_textures/16_16/HDR_L_45.png b/blue_noise_textures/16_16/HDR_L_45.png new file mode 100644 index 0000000..f5f13b8 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_45.png differ diff --git a/blue_noise_textures/16_16/HDR_L_46.png b/blue_noise_textures/16_16/HDR_L_46.png new file mode 100644 index 0000000..e8f41ed Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_46.png differ diff --git a/blue_noise_textures/16_16/HDR_L_47.png b/blue_noise_textures/16_16/HDR_L_47.png new file mode 100644 index 0000000..269486e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_47.png differ diff --git a/blue_noise_textures/16_16/HDR_L_48.png b/blue_noise_textures/16_16/HDR_L_48.png new file mode 100644 index 0000000..94fd348 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_48.png differ diff --git a/blue_noise_textures/16_16/HDR_L_49.png b/blue_noise_textures/16_16/HDR_L_49.png new file mode 100644 index 0000000..b29584c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_49.png differ diff --git a/blue_noise_textures/16_16/HDR_L_5.png b/blue_noise_textures/16_16/HDR_L_5.png new file mode 100644 index 0000000..a45432a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_5.png differ diff --git a/blue_noise_textures/16_16/HDR_L_50.png b/blue_noise_textures/16_16/HDR_L_50.png new file mode 100644 index 0000000..a292858 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_50.png differ diff --git a/blue_noise_textures/16_16/HDR_L_51.png b/blue_noise_textures/16_16/HDR_L_51.png new file mode 100644 index 0000000..b21d077 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_51.png differ diff --git a/blue_noise_textures/16_16/HDR_L_52.png b/blue_noise_textures/16_16/HDR_L_52.png new file mode 100644 index 0000000..ea15f36 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_52.png differ diff --git a/blue_noise_textures/16_16/HDR_L_53.png b/blue_noise_textures/16_16/HDR_L_53.png new file mode 100644 index 0000000..4b56254 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_53.png differ diff --git a/blue_noise_textures/16_16/HDR_L_54.png b/blue_noise_textures/16_16/HDR_L_54.png new file mode 100644 index 0000000..f1148e7 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_54.png differ diff --git a/blue_noise_textures/16_16/HDR_L_55.png b/blue_noise_textures/16_16/HDR_L_55.png new file mode 100644 index 0000000..cb2f18f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_55.png differ diff --git a/blue_noise_textures/16_16/HDR_L_56.png b/blue_noise_textures/16_16/HDR_L_56.png new file mode 100644 index 0000000..603a54f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_56.png differ diff --git a/blue_noise_textures/16_16/HDR_L_57.png b/blue_noise_textures/16_16/HDR_L_57.png new file mode 100644 index 0000000..6834cca Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_57.png differ diff --git a/blue_noise_textures/16_16/HDR_L_58.png b/blue_noise_textures/16_16/HDR_L_58.png new file mode 100644 index 0000000..94e2b27 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_58.png differ diff --git a/blue_noise_textures/16_16/HDR_L_59.png b/blue_noise_textures/16_16/HDR_L_59.png new file mode 100644 index 0000000..35b6816 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_59.png differ diff --git a/blue_noise_textures/16_16/HDR_L_6.png b/blue_noise_textures/16_16/HDR_L_6.png new file mode 100644 index 0000000..fd9db9f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_6.png differ diff --git a/blue_noise_textures/16_16/HDR_L_60.png b/blue_noise_textures/16_16/HDR_L_60.png new file mode 100644 index 0000000..ff69a43 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_60.png differ diff --git a/blue_noise_textures/16_16/HDR_L_61.png b/blue_noise_textures/16_16/HDR_L_61.png new file mode 100644 index 0000000..d81fde5 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_61.png differ diff --git a/blue_noise_textures/16_16/HDR_L_62.png b/blue_noise_textures/16_16/HDR_L_62.png new file mode 100644 index 0000000..37ba3a2 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_62.png differ diff --git a/blue_noise_textures/16_16/HDR_L_63.png b/blue_noise_textures/16_16/HDR_L_63.png new file mode 100644 index 0000000..44c4c9a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_63.png differ diff --git a/blue_noise_textures/16_16/HDR_L_7.png b/blue_noise_textures/16_16/HDR_L_7.png new file mode 100644 index 0000000..88a183e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_7.png differ diff --git a/blue_noise_textures/16_16/HDR_L_8.png b/blue_noise_textures/16_16/HDR_L_8.png new file mode 100644 index 0000000..41cb363 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_8.png differ diff --git a/blue_noise_textures/16_16/HDR_L_9.png b/blue_noise_textures/16_16/HDR_L_9.png new file mode 100644 index 0000000..b433cc3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_L_9.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_0.png b/blue_noise_textures/16_16/HDR_RGBA_0.png new file mode 100644 index 0000000..a9b5168 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_0.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_1.png b/blue_noise_textures/16_16/HDR_RGBA_1.png new file mode 100644 index 0000000..9a8e793 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_1.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_10.png b/blue_noise_textures/16_16/HDR_RGBA_10.png new file mode 100644 index 0000000..a7a8bca Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_10.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_11.png b/blue_noise_textures/16_16/HDR_RGBA_11.png new file mode 100644 index 0000000..6507ef0 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_11.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_12.png b/blue_noise_textures/16_16/HDR_RGBA_12.png new file mode 100644 index 0000000..d9f02c5 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_12.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_13.png b/blue_noise_textures/16_16/HDR_RGBA_13.png new file mode 100644 index 0000000..7d6f1a0 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_13.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_14.png b/blue_noise_textures/16_16/HDR_RGBA_14.png new file mode 100644 index 0000000..b7288f2 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_14.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_15.png b/blue_noise_textures/16_16/HDR_RGBA_15.png new file mode 100644 index 0000000..f2e7551 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_15.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_16.png b/blue_noise_textures/16_16/HDR_RGBA_16.png new file mode 100644 index 0000000..e093626 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_16.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_17.png b/blue_noise_textures/16_16/HDR_RGBA_17.png new file mode 100644 index 0000000..2083096 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_17.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_18.png b/blue_noise_textures/16_16/HDR_RGBA_18.png new file mode 100644 index 0000000..0319e54 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_18.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_19.png b/blue_noise_textures/16_16/HDR_RGBA_19.png new file mode 100644 index 0000000..92f76d0 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_19.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_2.png b/blue_noise_textures/16_16/HDR_RGBA_2.png new file mode 100644 index 0000000..a99a53c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_2.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_20.png b/blue_noise_textures/16_16/HDR_RGBA_20.png new file mode 100644 index 0000000..f3b397f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_20.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_21.png b/blue_noise_textures/16_16/HDR_RGBA_21.png new file mode 100644 index 0000000..5b1df8a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_21.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_22.png b/blue_noise_textures/16_16/HDR_RGBA_22.png new file mode 100644 index 0000000..681713a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_22.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_23.png b/blue_noise_textures/16_16/HDR_RGBA_23.png new file mode 100644 index 0000000..2620e2d Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_23.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_24.png b/blue_noise_textures/16_16/HDR_RGBA_24.png new file mode 100644 index 0000000..0bd7366 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_24.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_25.png b/blue_noise_textures/16_16/HDR_RGBA_25.png new file mode 100644 index 0000000..8aa4cc5 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_25.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_26.png b/blue_noise_textures/16_16/HDR_RGBA_26.png new file mode 100644 index 0000000..f96d206 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_26.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_27.png b/blue_noise_textures/16_16/HDR_RGBA_27.png new file mode 100644 index 0000000..6f32b61 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_27.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_28.png b/blue_noise_textures/16_16/HDR_RGBA_28.png new file mode 100644 index 0000000..0457e10 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_28.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_29.png b/blue_noise_textures/16_16/HDR_RGBA_29.png new file mode 100644 index 0000000..2abb1eb Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_29.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_3.png b/blue_noise_textures/16_16/HDR_RGBA_3.png new file mode 100644 index 0000000..1b4d1dd Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_3.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_30.png b/blue_noise_textures/16_16/HDR_RGBA_30.png new file mode 100644 index 0000000..80339cc Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_30.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_31.png b/blue_noise_textures/16_16/HDR_RGBA_31.png new file mode 100644 index 0000000..4ac412d Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_31.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_32.png b/blue_noise_textures/16_16/HDR_RGBA_32.png new file mode 100644 index 0000000..0c818a6 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_32.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_33.png b/blue_noise_textures/16_16/HDR_RGBA_33.png new file mode 100644 index 0000000..89c6dce Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_33.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_34.png b/blue_noise_textures/16_16/HDR_RGBA_34.png new file mode 100644 index 0000000..ebd5915 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_34.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_35.png b/blue_noise_textures/16_16/HDR_RGBA_35.png new file mode 100644 index 0000000..d7d284a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_35.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_36.png b/blue_noise_textures/16_16/HDR_RGBA_36.png new file mode 100644 index 0000000..ba35267 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_36.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_37.png b/blue_noise_textures/16_16/HDR_RGBA_37.png new file mode 100644 index 0000000..b5fda88 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_37.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_38.png b/blue_noise_textures/16_16/HDR_RGBA_38.png new file mode 100644 index 0000000..d0bd754 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_38.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_39.png b/blue_noise_textures/16_16/HDR_RGBA_39.png new file mode 100644 index 0000000..e45e861 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_39.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_4.png b/blue_noise_textures/16_16/HDR_RGBA_4.png new file mode 100644 index 0000000..49be013 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_4.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_40.png b/blue_noise_textures/16_16/HDR_RGBA_40.png new file mode 100644 index 0000000..5cf9741 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_40.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_41.png b/blue_noise_textures/16_16/HDR_RGBA_41.png new file mode 100644 index 0000000..8cb51d6 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_41.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_42.png b/blue_noise_textures/16_16/HDR_RGBA_42.png new file mode 100644 index 0000000..dc92ca5 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_42.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_43.png b/blue_noise_textures/16_16/HDR_RGBA_43.png new file mode 100644 index 0000000..38f81a4 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_43.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_44.png b/blue_noise_textures/16_16/HDR_RGBA_44.png new file mode 100644 index 0000000..d9e4810 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_44.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_45.png b/blue_noise_textures/16_16/HDR_RGBA_45.png new file mode 100644 index 0000000..7f31ef4 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_45.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_46.png b/blue_noise_textures/16_16/HDR_RGBA_46.png new file mode 100644 index 0000000..cd172ea Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_46.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_47.png b/blue_noise_textures/16_16/HDR_RGBA_47.png new file mode 100644 index 0000000..e28cad5 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_47.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_48.png b/blue_noise_textures/16_16/HDR_RGBA_48.png new file mode 100644 index 0000000..87c59a7 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_48.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_49.png b/blue_noise_textures/16_16/HDR_RGBA_49.png new file mode 100644 index 0000000..f53a02e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_49.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_5.png b/blue_noise_textures/16_16/HDR_RGBA_5.png new file mode 100644 index 0000000..bd148b0 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_5.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_50.png b/blue_noise_textures/16_16/HDR_RGBA_50.png new file mode 100644 index 0000000..158b7df Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_50.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_51.png b/blue_noise_textures/16_16/HDR_RGBA_51.png new file mode 100644 index 0000000..de8a081 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_51.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_52.png b/blue_noise_textures/16_16/HDR_RGBA_52.png new file mode 100644 index 0000000..ff23622 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_52.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_53.png b/blue_noise_textures/16_16/HDR_RGBA_53.png new file mode 100644 index 0000000..b7d8589 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_53.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_54.png b/blue_noise_textures/16_16/HDR_RGBA_54.png new file mode 100644 index 0000000..87aa604 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_54.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_55.png b/blue_noise_textures/16_16/HDR_RGBA_55.png new file mode 100644 index 0000000..9bd62d1 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_55.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_56.png b/blue_noise_textures/16_16/HDR_RGBA_56.png new file mode 100644 index 0000000..adb5ab8 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_56.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_57.png b/blue_noise_textures/16_16/HDR_RGBA_57.png new file mode 100644 index 0000000..184df3a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_57.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_58.png b/blue_noise_textures/16_16/HDR_RGBA_58.png new file mode 100644 index 0000000..63eba4b Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_58.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_59.png b/blue_noise_textures/16_16/HDR_RGBA_59.png new file mode 100644 index 0000000..a8589cb Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_59.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_6.png b/blue_noise_textures/16_16/HDR_RGBA_6.png new file mode 100644 index 0000000..f1368a8 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_6.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_60.png b/blue_noise_textures/16_16/HDR_RGBA_60.png new file mode 100644 index 0000000..38c9f3e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_60.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_61.png b/blue_noise_textures/16_16/HDR_RGBA_61.png new file mode 100644 index 0000000..b5d5bc7 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_61.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_62.png b/blue_noise_textures/16_16/HDR_RGBA_62.png new file mode 100644 index 0000000..4529d88 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_62.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_63.png b/blue_noise_textures/16_16/HDR_RGBA_63.png new file mode 100644 index 0000000..d800e95 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_63.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_7.png b/blue_noise_textures/16_16/HDR_RGBA_7.png new file mode 100644 index 0000000..7c5af68 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_7.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_8.png b/blue_noise_textures/16_16/HDR_RGBA_8.png new file mode 100644 index 0000000..230689a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_8.png differ diff --git a/blue_noise_textures/16_16/HDR_RGBA_9.png b/blue_noise_textures/16_16/HDR_RGBA_9.png new file mode 100644 index 0000000..4c3bd03 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGBA_9.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_0.png b/blue_noise_textures/16_16/HDR_RGB_0.png new file mode 100644 index 0000000..1251505 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_0.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_1.png b/blue_noise_textures/16_16/HDR_RGB_1.png new file mode 100644 index 0000000..445bf56 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_1.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_10.png b/blue_noise_textures/16_16/HDR_RGB_10.png new file mode 100644 index 0000000..2222755 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_10.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_11.png b/blue_noise_textures/16_16/HDR_RGB_11.png new file mode 100644 index 0000000..08b81bf Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_11.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_12.png b/blue_noise_textures/16_16/HDR_RGB_12.png new file mode 100644 index 0000000..8104259 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_12.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_13.png b/blue_noise_textures/16_16/HDR_RGB_13.png new file mode 100644 index 0000000..e352833 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_13.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_14.png b/blue_noise_textures/16_16/HDR_RGB_14.png new file mode 100644 index 0000000..aaf4526 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_14.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_15.png b/blue_noise_textures/16_16/HDR_RGB_15.png new file mode 100644 index 0000000..3fa4e3d Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_15.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_16.png b/blue_noise_textures/16_16/HDR_RGB_16.png new file mode 100644 index 0000000..59f0983 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_16.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_17.png b/blue_noise_textures/16_16/HDR_RGB_17.png new file mode 100644 index 0000000..6f9e3e8 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_17.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_18.png b/blue_noise_textures/16_16/HDR_RGB_18.png new file mode 100644 index 0000000..2ba8820 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_18.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_19.png b/blue_noise_textures/16_16/HDR_RGB_19.png new file mode 100644 index 0000000..60a81cb Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_19.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_2.png b/blue_noise_textures/16_16/HDR_RGB_2.png new file mode 100644 index 0000000..1e910d3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_2.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_20.png b/blue_noise_textures/16_16/HDR_RGB_20.png new file mode 100644 index 0000000..1e05c46 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_20.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_21.png b/blue_noise_textures/16_16/HDR_RGB_21.png new file mode 100644 index 0000000..4524daa Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_21.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_22.png b/blue_noise_textures/16_16/HDR_RGB_22.png new file mode 100644 index 0000000..ec3cd79 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_22.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_23.png b/blue_noise_textures/16_16/HDR_RGB_23.png new file mode 100644 index 0000000..b9063f8 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_23.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_24.png b/blue_noise_textures/16_16/HDR_RGB_24.png new file mode 100644 index 0000000..4f45199 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_24.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_25.png b/blue_noise_textures/16_16/HDR_RGB_25.png new file mode 100644 index 0000000..bdfde5e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_25.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_26.png b/blue_noise_textures/16_16/HDR_RGB_26.png new file mode 100644 index 0000000..0807825 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_26.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_27.png b/blue_noise_textures/16_16/HDR_RGB_27.png new file mode 100644 index 0000000..4c3c0ae Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_27.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_28.png b/blue_noise_textures/16_16/HDR_RGB_28.png new file mode 100644 index 0000000..0e7e6da Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_28.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_29.png b/blue_noise_textures/16_16/HDR_RGB_29.png new file mode 100644 index 0000000..39cb88b Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_29.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_3.png b/blue_noise_textures/16_16/HDR_RGB_3.png new file mode 100644 index 0000000..335983e Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_3.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_30.png b/blue_noise_textures/16_16/HDR_RGB_30.png new file mode 100644 index 0000000..b50c232 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_30.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_31.png b/blue_noise_textures/16_16/HDR_RGB_31.png new file mode 100644 index 0000000..b48c21c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_31.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_32.png b/blue_noise_textures/16_16/HDR_RGB_32.png new file mode 100644 index 0000000..f6ebd28 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_32.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_33.png b/blue_noise_textures/16_16/HDR_RGB_33.png new file mode 100644 index 0000000..f8f198d Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_33.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_34.png b/blue_noise_textures/16_16/HDR_RGB_34.png new file mode 100644 index 0000000..3359173 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_34.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_35.png b/blue_noise_textures/16_16/HDR_RGB_35.png new file mode 100644 index 0000000..0573e98 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_35.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_36.png b/blue_noise_textures/16_16/HDR_RGB_36.png new file mode 100644 index 0000000..621d350 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_36.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_37.png b/blue_noise_textures/16_16/HDR_RGB_37.png new file mode 100644 index 0000000..575d32c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_37.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_38.png b/blue_noise_textures/16_16/HDR_RGB_38.png new file mode 100644 index 0000000..aed3648 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_38.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_39.png b/blue_noise_textures/16_16/HDR_RGB_39.png new file mode 100644 index 0000000..1257e72 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_39.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_4.png b/blue_noise_textures/16_16/HDR_RGB_4.png new file mode 100644 index 0000000..24e8a4c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_4.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_40.png b/blue_noise_textures/16_16/HDR_RGB_40.png new file mode 100644 index 0000000..c29c992 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_40.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_41.png b/blue_noise_textures/16_16/HDR_RGB_41.png new file mode 100644 index 0000000..b63bbd8 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_41.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_42.png b/blue_noise_textures/16_16/HDR_RGB_42.png new file mode 100644 index 0000000..c5f1ea3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_42.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_43.png b/blue_noise_textures/16_16/HDR_RGB_43.png new file mode 100644 index 0000000..0d667b2 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_43.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_44.png b/blue_noise_textures/16_16/HDR_RGB_44.png new file mode 100644 index 0000000..7314561 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_44.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_45.png b/blue_noise_textures/16_16/HDR_RGB_45.png new file mode 100644 index 0000000..b06b875 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_45.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_46.png b/blue_noise_textures/16_16/HDR_RGB_46.png new file mode 100644 index 0000000..293563d Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_46.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_47.png b/blue_noise_textures/16_16/HDR_RGB_47.png new file mode 100644 index 0000000..b95ec8f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_47.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_48.png b/blue_noise_textures/16_16/HDR_RGB_48.png new file mode 100644 index 0000000..c27a331 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_48.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_49.png b/blue_noise_textures/16_16/HDR_RGB_49.png new file mode 100644 index 0000000..77067d3 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_49.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_5.png b/blue_noise_textures/16_16/HDR_RGB_5.png new file mode 100644 index 0000000..401dbf0 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_5.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_50.png b/blue_noise_textures/16_16/HDR_RGB_50.png new file mode 100644 index 0000000..ecb8bfa Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_50.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_51.png b/blue_noise_textures/16_16/HDR_RGB_51.png new file mode 100644 index 0000000..079884f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_51.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_52.png b/blue_noise_textures/16_16/HDR_RGB_52.png new file mode 100644 index 0000000..f5225ef Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_52.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_53.png b/blue_noise_textures/16_16/HDR_RGB_53.png new file mode 100644 index 0000000..30166dc Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_53.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_54.png b/blue_noise_textures/16_16/HDR_RGB_54.png new file mode 100644 index 0000000..a29dbf6 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_54.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_55.png b/blue_noise_textures/16_16/HDR_RGB_55.png new file mode 100644 index 0000000..e6dbbe9 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_55.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_56.png b/blue_noise_textures/16_16/HDR_RGB_56.png new file mode 100644 index 0000000..d1b295d Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_56.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_57.png b/blue_noise_textures/16_16/HDR_RGB_57.png new file mode 100644 index 0000000..b7c77ec Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_57.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_58.png b/blue_noise_textures/16_16/HDR_RGB_58.png new file mode 100644 index 0000000..f829664 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_58.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_59.png b/blue_noise_textures/16_16/HDR_RGB_59.png new file mode 100644 index 0000000..eb566ec Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_59.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_6.png b/blue_noise_textures/16_16/HDR_RGB_6.png new file mode 100644 index 0000000..a3fd47f Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_6.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_60.png b/blue_noise_textures/16_16/HDR_RGB_60.png new file mode 100644 index 0000000..3012bda Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_60.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_61.png b/blue_noise_textures/16_16/HDR_RGB_61.png new file mode 100644 index 0000000..ab2ca2a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_61.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_62.png b/blue_noise_textures/16_16/HDR_RGB_62.png new file mode 100644 index 0000000..76a795c Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_62.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_63.png b/blue_noise_textures/16_16/HDR_RGB_63.png new file mode 100644 index 0000000..b61165a Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_63.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_7.png b/blue_noise_textures/16_16/HDR_RGB_7.png new file mode 100644 index 0000000..592ccca Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_7.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_8.png b/blue_noise_textures/16_16/HDR_RGB_8.png new file mode 100644 index 0000000..57fea34 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_8.png differ diff --git a/blue_noise_textures/16_16/HDR_RGB_9.png b/blue_noise_textures/16_16/HDR_RGB_9.png new file mode 100644 index 0000000..8080cb2 Binary files /dev/null and b/blue_noise_textures/16_16/HDR_RGB_9.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_0.png b/blue_noise_textures/16_16/LDR_LLL1_0.png new file mode 100644 index 0000000..199ea82 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_0.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_1.png b/blue_noise_textures/16_16/LDR_LLL1_1.png new file mode 100644 index 0000000..3eb8b2a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_1.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_10.png b/blue_noise_textures/16_16/LDR_LLL1_10.png new file mode 100644 index 0000000..cc9ce05 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_10.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_11.png b/blue_noise_textures/16_16/LDR_LLL1_11.png new file mode 100644 index 0000000..07b3bcd Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_11.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_12.png b/blue_noise_textures/16_16/LDR_LLL1_12.png new file mode 100644 index 0000000..66cd074 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_12.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_13.png b/blue_noise_textures/16_16/LDR_LLL1_13.png new file mode 100644 index 0000000..fffbb5d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_13.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_14.png b/blue_noise_textures/16_16/LDR_LLL1_14.png new file mode 100644 index 0000000..d6b5390 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_14.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_15.png b/blue_noise_textures/16_16/LDR_LLL1_15.png new file mode 100644 index 0000000..94da642 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_15.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_16.png b/blue_noise_textures/16_16/LDR_LLL1_16.png new file mode 100644 index 0000000..b624380 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_16.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_17.png b/blue_noise_textures/16_16/LDR_LLL1_17.png new file mode 100644 index 0000000..7dc2698 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_17.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_18.png b/blue_noise_textures/16_16/LDR_LLL1_18.png new file mode 100644 index 0000000..026b356 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_18.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_19.png b/blue_noise_textures/16_16/LDR_LLL1_19.png new file mode 100644 index 0000000..8780df4 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_19.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_2.png b/blue_noise_textures/16_16/LDR_LLL1_2.png new file mode 100644 index 0000000..fc7cf7b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_2.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_20.png b/blue_noise_textures/16_16/LDR_LLL1_20.png new file mode 100644 index 0000000..e9da82e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_20.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_21.png b/blue_noise_textures/16_16/LDR_LLL1_21.png new file mode 100644 index 0000000..6af0bae Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_21.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_22.png b/blue_noise_textures/16_16/LDR_LLL1_22.png new file mode 100644 index 0000000..490dfc7 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_22.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_23.png b/blue_noise_textures/16_16/LDR_LLL1_23.png new file mode 100644 index 0000000..7b5d407 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_23.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_24.png b/blue_noise_textures/16_16/LDR_LLL1_24.png new file mode 100644 index 0000000..f4e8fdb Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_24.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_25.png b/blue_noise_textures/16_16/LDR_LLL1_25.png new file mode 100644 index 0000000..8c2b24a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_25.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_26.png b/blue_noise_textures/16_16/LDR_LLL1_26.png new file mode 100644 index 0000000..a599a10 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_26.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_27.png b/blue_noise_textures/16_16/LDR_LLL1_27.png new file mode 100644 index 0000000..9605aaf Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_27.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_28.png b/blue_noise_textures/16_16/LDR_LLL1_28.png new file mode 100644 index 0000000..ccd5d9b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_28.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_29.png b/blue_noise_textures/16_16/LDR_LLL1_29.png new file mode 100644 index 0000000..a870e6c Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_29.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_3.png b/blue_noise_textures/16_16/LDR_LLL1_3.png new file mode 100644 index 0000000..dd0bd45 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_3.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_30.png b/blue_noise_textures/16_16/LDR_LLL1_30.png new file mode 100644 index 0000000..a7d2315 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_30.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_31.png b/blue_noise_textures/16_16/LDR_LLL1_31.png new file mode 100644 index 0000000..b8053f1 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_31.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_32.png b/blue_noise_textures/16_16/LDR_LLL1_32.png new file mode 100644 index 0000000..b0ae835 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_32.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_33.png b/blue_noise_textures/16_16/LDR_LLL1_33.png new file mode 100644 index 0000000..0319b6f Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_33.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_34.png b/blue_noise_textures/16_16/LDR_LLL1_34.png new file mode 100644 index 0000000..ebfc175 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_34.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_35.png b/blue_noise_textures/16_16/LDR_LLL1_35.png new file mode 100644 index 0000000..0b45849 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_35.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_36.png b/blue_noise_textures/16_16/LDR_LLL1_36.png new file mode 100644 index 0000000..b3d7976 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_36.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_37.png b/blue_noise_textures/16_16/LDR_LLL1_37.png new file mode 100644 index 0000000..d3b93ca Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_37.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_38.png b/blue_noise_textures/16_16/LDR_LLL1_38.png new file mode 100644 index 0000000..6e1b99b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_38.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_39.png b/blue_noise_textures/16_16/LDR_LLL1_39.png new file mode 100644 index 0000000..882e563 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_39.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_4.png b/blue_noise_textures/16_16/LDR_LLL1_4.png new file mode 100644 index 0000000..6dd46f6 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_4.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_40.png b/blue_noise_textures/16_16/LDR_LLL1_40.png new file mode 100644 index 0000000..f9e582f Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_40.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_41.png b/blue_noise_textures/16_16/LDR_LLL1_41.png new file mode 100644 index 0000000..588c204 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_41.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_42.png b/blue_noise_textures/16_16/LDR_LLL1_42.png new file mode 100644 index 0000000..eca4bef Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_42.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_43.png b/blue_noise_textures/16_16/LDR_LLL1_43.png new file mode 100644 index 0000000..8def985 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_43.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_44.png b/blue_noise_textures/16_16/LDR_LLL1_44.png new file mode 100644 index 0000000..1149df3 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_44.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_45.png b/blue_noise_textures/16_16/LDR_LLL1_45.png new file mode 100644 index 0000000..c05061f Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_45.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_46.png b/blue_noise_textures/16_16/LDR_LLL1_46.png new file mode 100644 index 0000000..658bf6a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_46.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_47.png b/blue_noise_textures/16_16/LDR_LLL1_47.png new file mode 100644 index 0000000..fa0d394 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_47.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_48.png b/blue_noise_textures/16_16/LDR_LLL1_48.png new file mode 100644 index 0000000..f6d85ff Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_48.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_49.png b/blue_noise_textures/16_16/LDR_LLL1_49.png new file mode 100644 index 0000000..7d00cc5 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_49.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_5.png b/blue_noise_textures/16_16/LDR_LLL1_5.png new file mode 100644 index 0000000..78fc3f2 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_5.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_50.png b/blue_noise_textures/16_16/LDR_LLL1_50.png new file mode 100644 index 0000000..7f0921b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_50.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_51.png b/blue_noise_textures/16_16/LDR_LLL1_51.png new file mode 100644 index 0000000..dc659d6 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_51.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_52.png b/blue_noise_textures/16_16/LDR_LLL1_52.png new file mode 100644 index 0000000..37a4d96 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_52.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_53.png b/blue_noise_textures/16_16/LDR_LLL1_53.png new file mode 100644 index 0000000..8e6b273 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_53.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_54.png b/blue_noise_textures/16_16/LDR_LLL1_54.png new file mode 100644 index 0000000..988a7e7 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_54.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_55.png b/blue_noise_textures/16_16/LDR_LLL1_55.png new file mode 100644 index 0000000..85db033 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_55.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_56.png b/blue_noise_textures/16_16/LDR_LLL1_56.png new file mode 100644 index 0000000..fb8d0ac Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_56.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_57.png b/blue_noise_textures/16_16/LDR_LLL1_57.png new file mode 100644 index 0000000..e69b90a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_57.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_58.png b/blue_noise_textures/16_16/LDR_LLL1_58.png new file mode 100644 index 0000000..b7f63d4 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_58.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_59.png b/blue_noise_textures/16_16/LDR_LLL1_59.png new file mode 100644 index 0000000..f34502c Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_59.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_6.png b/blue_noise_textures/16_16/LDR_LLL1_6.png new file mode 100644 index 0000000..af5ff14 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_6.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_60.png b/blue_noise_textures/16_16/LDR_LLL1_60.png new file mode 100644 index 0000000..df6b01d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_60.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_61.png b/blue_noise_textures/16_16/LDR_LLL1_61.png new file mode 100644 index 0000000..aa3a886 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_61.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_62.png b/blue_noise_textures/16_16/LDR_LLL1_62.png new file mode 100644 index 0000000..bee8081 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_62.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_63.png b/blue_noise_textures/16_16/LDR_LLL1_63.png new file mode 100644 index 0000000..291f9f4 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_63.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_7.png b/blue_noise_textures/16_16/LDR_LLL1_7.png new file mode 100644 index 0000000..84d2f81 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_7.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_8.png b/blue_noise_textures/16_16/LDR_LLL1_8.png new file mode 100644 index 0000000..982482e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_8.png differ diff --git a/blue_noise_textures/16_16/LDR_LLL1_9.png b/blue_noise_textures/16_16/LDR_LLL1_9.png new file mode 100644 index 0000000..6466131 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_LLL1_9.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_0.png b/blue_noise_textures/16_16/LDR_RG01_0.png new file mode 100644 index 0000000..bb14a9f Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_0.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_1.png b/blue_noise_textures/16_16/LDR_RG01_1.png new file mode 100644 index 0000000..07c8f4d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_1.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_10.png b/blue_noise_textures/16_16/LDR_RG01_10.png new file mode 100644 index 0000000..78fb176 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_10.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_11.png b/blue_noise_textures/16_16/LDR_RG01_11.png new file mode 100644 index 0000000..55b55db Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_11.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_12.png b/blue_noise_textures/16_16/LDR_RG01_12.png new file mode 100644 index 0000000..8fce6d8 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_12.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_13.png b/blue_noise_textures/16_16/LDR_RG01_13.png new file mode 100644 index 0000000..0abbda5 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_13.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_14.png b/blue_noise_textures/16_16/LDR_RG01_14.png new file mode 100644 index 0000000..c412df1 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_14.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_15.png b/blue_noise_textures/16_16/LDR_RG01_15.png new file mode 100644 index 0000000..a384c96 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_15.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_16.png b/blue_noise_textures/16_16/LDR_RG01_16.png new file mode 100644 index 0000000..f7a7f19 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_16.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_17.png b/blue_noise_textures/16_16/LDR_RG01_17.png new file mode 100644 index 0000000..95c0d93 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_17.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_18.png b/blue_noise_textures/16_16/LDR_RG01_18.png new file mode 100644 index 0000000..45b7698 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_18.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_19.png b/blue_noise_textures/16_16/LDR_RG01_19.png new file mode 100644 index 0000000..0dfe41e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_19.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_2.png b/blue_noise_textures/16_16/LDR_RG01_2.png new file mode 100644 index 0000000..2e81ab9 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_2.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_20.png b/blue_noise_textures/16_16/LDR_RG01_20.png new file mode 100644 index 0000000..2b833f1 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_20.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_21.png b/blue_noise_textures/16_16/LDR_RG01_21.png new file mode 100644 index 0000000..ae57972 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_21.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_22.png b/blue_noise_textures/16_16/LDR_RG01_22.png new file mode 100644 index 0000000..3b98e97 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_22.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_23.png b/blue_noise_textures/16_16/LDR_RG01_23.png new file mode 100644 index 0000000..274e7b2 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_23.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_24.png b/blue_noise_textures/16_16/LDR_RG01_24.png new file mode 100644 index 0000000..7b04383 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_24.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_25.png b/blue_noise_textures/16_16/LDR_RG01_25.png new file mode 100644 index 0000000..ef32695 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_25.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_26.png b/blue_noise_textures/16_16/LDR_RG01_26.png new file mode 100644 index 0000000..f45d989 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_26.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_27.png b/blue_noise_textures/16_16/LDR_RG01_27.png new file mode 100644 index 0000000..b037cb7 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_27.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_28.png b/blue_noise_textures/16_16/LDR_RG01_28.png new file mode 100644 index 0000000..d2e2666 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_28.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_29.png b/blue_noise_textures/16_16/LDR_RG01_29.png new file mode 100644 index 0000000..3ade3d5 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_29.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_3.png b/blue_noise_textures/16_16/LDR_RG01_3.png new file mode 100644 index 0000000..eca5f72 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_3.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_30.png b/blue_noise_textures/16_16/LDR_RG01_30.png new file mode 100644 index 0000000..8b39f48 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_30.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_31.png b/blue_noise_textures/16_16/LDR_RG01_31.png new file mode 100644 index 0000000..4ad5f11 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_31.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_32.png b/blue_noise_textures/16_16/LDR_RG01_32.png new file mode 100644 index 0000000..39d57fd Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_32.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_33.png b/blue_noise_textures/16_16/LDR_RG01_33.png new file mode 100644 index 0000000..ae966c2 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_33.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_34.png b/blue_noise_textures/16_16/LDR_RG01_34.png new file mode 100644 index 0000000..9e1ee6d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_34.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_35.png b/blue_noise_textures/16_16/LDR_RG01_35.png new file mode 100644 index 0000000..9c9ae37 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_35.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_36.png b/blue_noise_textures/16_16/LDR_RG01_36.png new file mode 100644 index 0000000..146306b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_36.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_37.png b/blue_noise_textures/16_16/LDR_RG01_37.png new file mode 100644 index 0000000..93b19dc Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_37.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_38.png b/blue_noise_textures/16_16/LDR_RG01_38.png new file mode 100644 index 0000000..3e4a707 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_38.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_39.png b/blue_noise_textures/16_16/LDR_RG01_39.png new file mode 100644 index 0000000..98a06ab Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_39.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_4.png b/blue_noise_textures/16_16/LDR_RG01_4.png new file mode 100644 index 0000000..1b73ca2 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_4.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_40.png b/blue_noise_textures/16_16/LDR_RG01_40.png new file mode 100644 index 0000000..258550b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_40.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_41.png b/blue_noise_textures/16_16/LDR_RG01_41.png new file mode 100644 index 0000000..0b4273e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_41.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_42.png b/blue_noise_textures/16_16/LDR_RG01_42.png new file mode 100644 index 0000000..68770e9 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_42.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_43.png b/blue_noise_textures/16_16/LDR_RG01_43.png new file mode 100644 index 0000000..001a6fc Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_43.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_44.png b/blue_noise_textures/16_16/LDR_RG01_44.png new file mode 100644 index 0000000..27b1834 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_44.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_45.png b/blue_noise_textures/16_16/LDR_RG01_45.png new file mode 100644 index 0000000..7b6a0f6 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_45.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_46.png b/blue_noise_textures/16_16/LDR_RG01_46.png new file mode 100644 index 0000000..ffd018d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_46.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_47.png b/blue_noise_textures/16_16/LDR_RG01_47.png new file mode 100644 index 0000000..010f1f7 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_47.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_48.png b/blue_noise_textures/16_16/LDR_RG01_48.png new file mode 100644 index 0000000..d510337 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_48.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_49.png b/blue_noise_textures/16_16/LDR_RG01_49.png new file mode 100644 index 0000000..710a941 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_49.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_5.png b/blue_noise_textures/16_16/LDR_RG01_5.png new file mode 100644 index 0000000..05fa59c Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_5.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_50.png b/blue_noise_textures/16_16/LDR_RG01_50.png new file mode 100644 index 0000000..04c4ddb Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_50.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_51.png b/blue_noise_textures/16_16/LDR_RG01_51.png new file mode 100644 index 0000000..2e38954 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_51.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_52.png b/blue_noise_textures/16_16/LDR_RG01_52.png new file mode 100644 index 0000000..d1fdd4f Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_52.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_53.png b/blue_noise_textures/16_16/LDR_RG01_53.png new file mode 100644 index 0000000..193a26a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_53.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_54.png b/blue_noise_textures/16_16/LDR_RG01_54.png new file mode 100644 index 0000000..d9f419f Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_54.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_55.png b/blue_noise_textures/16_16/LDR_RG01_55.png new file mode 100644 index 0000000..54277a8 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_55.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_56.png b/blue_noise_textures/16_16/LDR_RG01_56.png new file mode 100644 index 0000000..ad1e55c Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_56.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_57.png b/blue_noise_textures/16_16/LDR_RG01_57.png new file mode 100644 index 0000000..0259c4c Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_57.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_58.png b/blue_noise_textures/16_16/LDR_RG01_58.png new file mode 100644 index 0000000..0b35d50 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_58.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_59.png b/blue_noise_textures/16_16/LDR_RG01_59.png new file mode 100644 index 0000000..88451c4 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_59.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_6.png b/blue_noise_textures/16_16/LDR_RG01_6.png new file mode 100644 index 0000000..a98b048 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_6.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_60.png b/blue_noise_textures/16_16/LDR_RG01_60.png new file mode 100644 index 0000000..79c115d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_60.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_61.png b/blue_noise_textures/16_16/LDR_RG01_61.png new file mode 100644 index 0000000..7bce477 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_61.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_62.png b/blue_noise_textures/16_16/LDR_RG01_62.png new file mode 100644 index 0000000..c132af5 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_62.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_63.png b/blue_noise_textures/16_16/LDR_RG01_63.png new file mode 100644 index 0000000..7d3c821 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_63.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_7.png b/blue_noise_textures/16_16/LDR_RG01_7.png new file mode 100644 index 0000000..7238570 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_7.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_8.png b/blue_noise_textures/16_16/LDR_RG01_8.png new file mode 100644 index 0000000..4e1a983 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_8.png differ diff --git a/blue_noise_textures/16_16/LDR_RG01_9.png b/blue_noise_textures/16_16/LDR_RG01_9.png new file mode 100644 index 0000000..a2919da Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RG01_9.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_0.png b/blue_noise_textures/16_16/LDR_RGB1_0.png new file mode 100644 index 0000000..630cb6b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_0.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_1.png b/blue_noise_textures/16_16/LDR_RGB1_1.png new file mode 100644 index 0000000..6b90d4f Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_1.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_10.png b/blue_noise_textures/16_16/LDR_RGB1_10.png new file mode 100644 index 0000000..9a34ca8 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_10.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_11.png b/blue_noise_textures/16_16/LDR_RGB1_11.png new file mode 100644 index 0000000..8135f51 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_11.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_12.png b/blue_noise_textures/16_16/LDR_RGB1_12.png new file mode 100644 index 0000000..e010b1e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_12.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_13.png b/blue_noise_textures/16_16/LDR_RGB1_13.png new file mode 100644 index 0000000..35c6ab0 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_13.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_14.png b/blue_noise_textures/16_16/LDR_RGB1_14.png new file mode 100644 index 0000000..353b0ac Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_14.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_15.png b/blue_noise_textures/16_16/LDR_RGB1_15.png new file mode 100644 index 0000000..33dae7e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_15.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_16.png b/blue_noise_textures/16_16/LDR_RGB1_16.png new file mode 100644 index 0000000..155c144 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_16.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_17.png b/blue_noise_textures/16_16/LDR_RGB1_17.png new file mode 100644 index 0000000..3851d43 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_17.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_18.png b/blue_noise_textures/16_16/LDR_RGB1_18.png new file mode 100644 index 0000000..0f3cbbc Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_18.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_19.png b/blue_noise_textures/16_16/LDR_RGB1_19.png new file mode 100644 index 0000000..c9b667c Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_19.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_2.png b/blue_noise_textures/16_16/LDR_RGB1_2.png new file mode 100644 index 0000000..d24f115 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_2.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_20.png b/blue_noise_textures/16_16/LDR_RGB1_20.png new file mode 100644 index 0000000..72a0560 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_20.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_21.png b/blue_noise_textures/16_16/LDR_RGB1_21.png new file mode 100644 index 0000000..23e4edf Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_21.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_22.png b/blue_noise_textures/16_16/LDR_RGB1_22.png new file mode 100644 index 0000000..7f5e555 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_22.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_23.png b/blue_noise_textures/16_16/LDR_RGB1_23.png new file mode 100644 index 0000000..3c0141e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_23.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_24.png b/blue_noise_textures/16_16/LDR_RGB1_24.png new file mode 100644 index 0000000..8e5eb5e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_24.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_25.png b/blue_noise_textures/16_16/LDR_RGB1_25.png new file mode 100644 index 0000000..ce510be Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_25.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_26.png b/blue_noise_textures/16_16/LDR_RGB1_26.png new file mode 100644 index 0000000..3f2134a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_26.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_27.png b/blue_noise_textures/16_16/LDR_RGB1_27.png new file mode 100644 index 0000000..ad0cb17 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_27.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_28.png b/blue_noise_textures/16_16/LDR_RGB1_28.png new file mode 100644 index 0000000..abc3d97 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_28.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_29.png b/blue_noise_textures/16_16/LDR_RGB1_29.png new file mode 100644 index 0000000..1f3d0ea Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_29.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_3.png b/blue_noise_textures/16_16/LDR_RGB1_3.png new file mode 100644 index 0000000..0a2cac2 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_3.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_30.png b/blue_noise_textures/16_16/LDR_RGB1_30.png new file mode 100644 index 0000000..e847a06 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_30.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_31.png b/blue_noise_textures/16_16/LDR_RGB1_31.png new file mode 100644 index 0000000..460c08e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_31.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_32.png b/blue_noise_textures/16_16/LDR_RGB1_32.png new file mode 100644 index 0000000..c7937a2 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_32.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_33.png b/blue_noise_textures/16_16/LDR_RGB1_33.png new file mode 100644 index 0000000..af1229a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_33.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_34.png b/blue_noise_textures/16_16/LDR_RGB1_34.png new file mode 100644 index 0000000..8a2fc85 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_34.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_35.png b/blue_noise_textures/16_16/LDR_RGB1_35.png new file mode 100644 index 0000000..fc47a57 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_35.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_36.png b/blue_noise_textures/16_16/LDR_RGB1_36.png new file mode 100644 index 0000000..f9ce751 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_36.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_37.png b/blue_noise_textures/16_16/LDR_RGB1_37.png new file mode 100644 index 0000000..f814279 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_37.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_38.png b/blue_noise_textures/16_16/LDR_RGB1_38.png new file mode 100644 index 0000000..abff1bf Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_38.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_39.png b/blue_noise_textures/16_16/LDR_RGB1_39.png new file mode 100644 index 0000000..f6fafd5 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_39.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_4.png b/blue_noise_textures/16_16/LDR_RGB1_4.png new file mode 100644 index 0000000..d898559 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_4.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_40.png b/blue_noise_textures/16_16/LDR_RGB1_40.png new file mode 100644 index 0000000..a36fe8c Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_40.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_41.png b/blue_noise_textures/16_16/LDR_RGB1_41.png new file mode 100644 index 0000000..faa16be Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_41.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_42.png b/blue_noise_textures/16_16/LDR_RGB1_42.png new file mode 100644 index 0000000..bf156d3 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_42.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_43.png b/blue_noise_textures/16_16/LDR_RGB1_43.png new file mode 100644 index 0000000..ff63c19 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_43.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_44.png b/blue_noise_textures/16_16/LDR_RGB1_44.png new file mode 100644 index 0000000..54f917a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_44.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_45.png b/blue_noise_textures/16_16/LDR_RGB1_45.png new file mode 100644 index 0000000..4e87ce2 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_45.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_46.png b/blue_noise_textures/16_16/LDR_RGB1_46.png new file mode 100644 index 0000000..78fd750 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_46.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_47.png b/blue_noise_textures/16_16/LDR_RGB1_47.png new file mode 100644 index 0000000..6b14c1c Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_47.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_48.png b/blue_noise_textures/16_16/LDR_RGB1_48.png new file mode 100644 index 0000000..6965cea Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_48.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_49.png b/blue_noise_textures/16_16/LDR_RGB1_49.png new file mode 100644 index 0000000..360261b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_49.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_5.png b/blue_noise_textures/16_16/LDR_RGB1_5.png new file mode 100644 index 0000000..e5bdd25 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_5.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_50.png b/blue_noise_textures/16_16/LDR_RGB1_50.png new file mode 100644 index 0000000..122b660 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_50.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_51.png b/blue_noise_textures/16_16/LDR_RGB1_51.png new file mode 100644 index 0000000..545a864 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_51.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_52.png b/blue_noise_textures/16_16/LDR_RGB1_52.png new file mode 100644 index 0000000..633db01 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_52.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_53.png b/blue_noise_textures/16_16/LDR_RGB1_53.png new file mode 100644 index 0000000..812fe90 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_53.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_54.png b/blue_noise_textures/16_16/LDR_RGB1_54.png new file mode 100644 index 0000000..26e0430 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_54.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_55.png b/blue_noise_textures/16_16/LDR_RGB1_55.png new file mode 100644 index 0000000..ffcc297 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_55.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_56.png b/blue_noise_textures/16_16/LDR_RGB1_56.png new file mode 100644 index 0000000..901939f Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_56.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_57.png b/blue_noise_textures/16_16/LDR_RGB1_57.png new file mode 100644 index 0000000..1819b2b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_57.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_58.png b/blue_noise_textures/16_16/LDR_RGB1_58.png new file mode 100644 index 0000000..02d2974 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_58.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_59.png b/blue_noise_textures/16_16/LDR_RGB1_59.png new file mode 100644 index 0000000..7fa168d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_59.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_6.png b/blue_noise_textures/16_16/LDR_RGB1_6.png new file mode 100644 index 0000000..33f2de3 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_6.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_60.png b/blue_noise_textures/16_16/LDR_RGB1_60.png new file mode 100644 index 0000000..ae62d17 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_60.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_61.png b/blue_noise_textures/16_16/LDR_RGB1_61.png new file mode 100644 index 0000000..c3023a0 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_61.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_62.png b/blue_noise_textures/16_16/LDR_RGB1_62.png new file mode 100644 index 0000000..434ef9a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_62.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_63.png b/blue_noise_textures/16_16/LDR_RGB1_63.png new file mode 100644 index 0000000..8775832 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_63.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_7.png b/blue_noise_textures/16_16/LDR_RGB1_7.png new file mode 100644 index 0000000..fce173d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_7.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_8.png b/blue_noise_textures/16_16/LDR_RGB1_8.png new file mode 100644 index 0000000..250f97d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_8.png differ diff --git a/blue_noise_textures/16_16/LDR_RGB1_9.png b/blue_noise_textures/16_16/LDR_RGB1_9.png new file mode 100644 index 0000000..6455bcc Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGB1_9.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_0.png b/blue_noise_textures/16_16/LDR_RGBA_0.png new file mode 100644 index 0000000..43befcc Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_0.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_1.png b/blue_noise_textures/16_16/LDR_RGBA_1.png new file mode 100644 index 0000000..677f30e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_1.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_10.png b/blue_noise_textures/16_16/LDR_RGBA_10.png new file mode 100644 index 0000000..0e6666a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_10.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_11.png b/blue_noise_textures/16_16/LDR_RGBA_11.png new file mode 100644 index 0000000..a0acd26 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_11.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_12.png b/blue_noise_textures/16_16/LDR_RGBA_12.png new file mode 100644 index 0000000..0930a6e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_12.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_13.png b/blue_noise_textures/16_16/LDR_RGBA_13.png new file mode 100644 index 0000000..4c70f5e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_13.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_14.png b/blue_noise_textures/16_16/LDR_RGBA_14.png new file mode 100644 index 0000000..39c91dc Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_14.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_15.png b/blue_noise_textures/16_16/LDR_RGBA_15.png new file mode 100644 index 0000000..cbbb801 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_15.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_16.png b/blue_noise_textures/16_16/LDR_RGBA_16.png new file mode 100644 index 0000000..3e182c1 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_16.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_17.png b/blue_noise_textures/16_16/LDR_RGBA_17.png new file mode 100644 index 0000000..bb45238 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_17.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_18.png b/blue_noise_textures/16_16/LDR_RGBA_18.png new file mode 100644 index 0000000..27bdc67 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_18.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_19.png b/blue_noise_textures/16_16/LDR_RGBA_19.png new file mode 100644 index 0000000..daedcf7 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_19.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_2.png b/blue_noise_textures/16_16/LDR_RGBA_2.png new file mode 100644 index 0000000..716ef24 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_2.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_20.png b/blue_noise_textures/16_16/LDR_RGBA_20.png new file mode 100644 index 0000000..97998be Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_20.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_21.png b/blue_noise_textures/16_16/LDR_RGBA_21.png new file mode 100644 index 0000000..035d488 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_21.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_22.png b/blue_noise_textures/16_16/LDR_RGBA_22.png new file mode 100644 index 0000000..1b6a7e9 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_22.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_23.png b/blue_noise_textures/16_16/LDR_RGBA_23.png new file mode 100644 index 0000000..9276263 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_23.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_24.png b/blue_noise_textures/16_16/LDR_RGBA_24.png new file mode 100644 index 0000000..9dc1735 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_24.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_25.png b/blue_noise_textures/16_16/LDR_RGBA_25.png new file mode 100644 index 0000000..38782fb Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_25.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_26.png b/blue_noise_textures/16_16/LDR_RGBA_26.png new file mode 100644 index 0000000..24015f7 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_26.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_27.png b/blue_noise_textures/16_16/LDR_RGBA_27.png new file mode 100644 index 0000000..c9ef368 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_27.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_28.png b/blue_noise_textures/16_16/LDR_RGBA_28.png new file mode 100644 index 0000000..3811e39 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_28.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_29.png b/blue_noise_textures/16_16/LDR_RGBA_29.png new file mode 100644 index 0000000..3b53991 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_29.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_3.png b/blue_noise_textures/16_16/LDR_RGBA_3.png new file mode 100644 index 0000000..db64f5b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_3.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_30.png b/blue_noise_textures/16_16/LDR_RGBA_30.png new file mode 100644 index 0000000..9c3ff8a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_30.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_31.png b/blue_noise_textures/16_16/LDR_RGBA_31.png new file mode 100644 index 0000000..619e35a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_31.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_32.png b/blue_noise_textures/16_16/LDR_RGBA_32.png new file mode 100644 index 0000000..0880d29 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_32.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_33.png b/blue_noise_textures/16_16/LDR_RGBA_33.png new file mode 100644 index 0000000..4b33d34 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_33.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_34.png b/blue_noise_textures/16_16/LDR_RGBA_34.png new file mode 100644 index 0000000..081c9f0 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_34.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_35.png b/blue_noise_textures/16_16/LDR_RGBA_35.png new file mode 100644 index 0000000..2992e63 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_35.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_36.png b/blue_noise_textures/16_16/LDR_RGBA_36.png new file mode 100644 index 0000000..894f989 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_36.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_37.png b/blue_noise_textures/16_16/LDR_RGBA_37.png new file mode 100644 index 0000000..619766e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_37.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_38.png b/blue_noise_textures/16_16/LDR_RGBA_38.png new file mode 100644 index 0000000..3c73d47 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_38.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_39.png b/blue_noise_textures/16_16/LDR_RGBA_39.png new file mode 100644 index 0000000..2ee0bab Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_39.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_4.png b/blue_noise_textures/16_16/LDR_RGBA_4.png new file mode 100644 index 0000000..34ead22 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_4.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_40.png b/blue_noise_textures/16_16/LDR_RGBA_40.png new file mode 100644 index 0000000..26e10f4 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_40.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_41.png b/blue_noise_textures/16_16/LDR_RGBA_41.png new file mode 100644 index 0000000..295f4ee Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_41.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_42.png b/blue_noise_textures/16_16/LDR_RGBA_42.png new file mode 100644 index 0000000..b32220d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_42.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_43.png b/blue_noise_textures/16_16/LDR_RGBA_43.png new file mode 100644 index 0000000..43f3da5 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_43.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_44.png b/blue_noise_textures/16_16/LDR_RGBA_44.png new file mode 100644 index 0000000..8283f5b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_44.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_45.png b/blue_noise_textures/16_16/LDR_RGBA_45.png new file mode 100644 index 0000000..e8c553e Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_45.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_46.png b/blue_noise_textures/16_16/LDR_RGBA_46.png new file mode 100644 index 0000000..79525f2 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_46.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_47.png b/blue_noise_textures/16_16/LDR_RGBA_47.png new file mode 100644 index 0000000..220124b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_47.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_48.png b/blue_noise_textures/16_16/LDR_RGBA_48.png new file mode 100644 index 0000000..175ff1d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_48.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_49.png b/blue_noise_textures/16_16/LDR_RGBA_49.png new file mode 100644 index 0000000..331bb5d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_49.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_5.png b/blue_noise_textures/16_16/LDR_RGBA_5.png new file mode 100644 index 0000000..ecb216a Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_5.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_50.png b/blue_noise_textures/16_16/LDR_RGBA_50.png new file mode 100644 index 0000000..e376d79 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_50.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_51.png b/blue_noise_textures/16_16/LDR_RGBA_51.png new file mode 100644 index 0000000..7fe8e98 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_51.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_52.png b/blue_noise_textures/16_16/LDR_RGBA_52.png new file mode 100644 index 0000000..27d2514 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_52.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_53.png b/blue_noise_textures/16_16/LDR_RGBA_53.png new file mode 100644 index 0000000..fe25a4b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_53.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_54.png b/blue_noise_textures/16_16/LDR_RGBA_54.png new file mode 100644 index 0000000..2b5c00d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_54.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_55.png b/blue_noise_textures/16_16/LDR_RGBA_55.png new file mode 100644 index 0000000..3b38f3c Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_55.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_56.png b/blue_noise_textures/16_16/LDR_RGBA_56.png new file mode 100644 index 0000000..f07b387 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_56.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_57.png b/blue_noise_textures/16_16/LDR_RGBA_57.png new file mode 100644 index 0000000..78b4178 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_57.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_58.png b/blue_noise_textures/16_16/LDR_RGBA_58.png new file mode 100644 index 0000000..3729a0b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_58.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_59.png b/blue_noise_textures/16_16/LDR_RGBA_59.png new file mode 100644 index 0000000..f35d9a0 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_59.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_6.png b/blue_noise_textures/16_16/LDR_RGBA_6.png new file mode 100644 index 0000000..b56bd5b Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_6.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_60.png b/blue_noise_textures/16_16/LDR_RGBA_60.png new file mode 100644 index 0000000..0be5bea Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_60.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_61.png b/blue_noise_textures/16_16/LDR_RGBA_61.png new file mode 100644 index 0000000..35a6e90 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_61.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_62.png b/blue_noise_textures/16_16/LDR_RGBA_62.png new file mode 100644 index 0000000..53f2cb8 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_62.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_63.png b/blue_noise_textures/16_16/LDR_RGBA_63.png new file mode 100644 index 0000000..3800c14 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_63.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_7.png b/blue_noise_textures/16_16/LDR_RGBA_7.png new file mode 100644 index 0000000..d2f077d Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_7.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_8.png b/blue_noise_textures/16_16/LDR_RGBA_8.png new file mode 100644 index 0000000..20f8a73 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_8.png differ diff --git a/blue_noise_textures/16_16/LDR_RGBA_9.png b/blue_noise_textures/16_16/LDR_RGBA_9.png new file mode 100644 index 0000000..5875402 Binary files /dev/null and b/blue_noise_textures/16_16/LDR_RGBA_9.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0000.png b/blue_noise_textures/256_256/HDR_RGBA_0000.png new file mode 100755 index 0000000..622b39f Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0000.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0001.png b/blue_noise_textures/256_256/HDR_RGBA_0001.png new file mode 100755 index 0000000..5eeebfa Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0001.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0002.png b/blue_noise_textures/256_256/HDR_RGBA_0002.png new file mode 100755 index 0000000..73a3599 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0002.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0003.png b/blue_noise_textures/256_256/HDR_RGBA_0003.png new file mode 100755 index 0000000..93fdbb5 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0003.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0004.png b/blue_noise_textures/256_256/HDR_RGBA_0004.png new file mode 100755 index 0000000..2f77166 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0004.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0005.png b/blue_noise_textures/256_256/HDR_RGBA_0005.png new file mode 100755 index 0000000..22697f0 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0005.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0006.png b/blue_noise_textures/256_256/HDR_RGBA_0006.png new file mode 100755 index 0000000..728ea49 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0006.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0007.png b/blue_noise_textures/256_256/HDR_RGBA_0007.png new file mode 100755 index 0000000..13fa9d2 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0007.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0008.png b/blue_noise_textures/256_256/HDR_RGBA_0008.png new file mode 100755 index 0000000..18905f5 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0008.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0009.png b/blue_noise_textures/256_256/HDR_RGBA_0009.png new file mode 100755 index 0000000..56625b8 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0009.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0010.png b/blue_noise_textures/256_256/HDR_RGBA_0010.png new file mode 100755 index 0000000..4eb28ad Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0010.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0011.png b/blue_noise_textures/256_256/HDR_RGBA_0011.png new file mode 100755 index 0000000..a06ed30 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0011.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0012.png b/blue_noise_textures/256_256/HDR_RGBA_0012.png new file mode 100755 index 0000000..8e08129 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0012.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0013.png b/blue_noise_textures/256_256/HDR_RGBA_0013.png new file mode 100755 index 0000000..7ea8120 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0013.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0014.png b/blue_noise_textures/256_256/HDR_RGBA_0014.png new file mode 100755 index 0000000..953ebaf Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0014.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0015.png b/blue_noise_textures/256_256/HDR_RGBA_0015.png new file mode 100755 index 0000000..9dd871b Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0015.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0016.png b/blue_noise_textures/256_256/HDR_RGBA_0016.png new file mode 100755 index 0000000..476dc14 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0016.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0017.png b/blue_noise_textures/256_256/HDR_RGBA_0017.png new file mode 100755 index 0000000..27b368d Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0017.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0018.png b/blue_noise_textures/256_256/HDR_RGBA_0018.png new file mode 100755 index 0000000..f376058 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0018.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0019.png b/blue_noise_textures/256_256/HDR_RGBA_0019.png new file mode 100755 index 0000000..167c49c Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0019.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0020.png b/blue_noise_textures/256_256/HDR_RGBA_0020.png new file mode 100755 index 0000000..8b5b2a0 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0020.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0021.png b/blue_noise_textures/256_256/HDR_RGBA_0021.png new file mode 100755 index 0000000..0acce31 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0021.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0022.png b/blue_noise_textures/256_256/HDR_RGBA_0022.png new file mode 100755 index 0000000..95107a0 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0022.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0023.png b/blue_noise_textures/256_256/HDR_RGBA_0023.png new file mode 100755 index 0000000..40e1e05 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0023.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0024.png b/blue_noise_textures/256_256/HDR_RGBA_0024.png new file mode 100755 index 0000000..a49eb8a Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0024.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0025.png b/blue_noise_textures/256_256/HDR_RGBA_0025.png new file mode 100755 index 0000000..020b6f1 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0025.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0026.png b/blue_noise_textures/256_256/HDR_RGBA_0026.png new file mode 100755 index 0000000..0c36e14 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0026.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0027.png b/blue_noise_textures/256_256/HDR_RGBA_0027.png new file mode 100755 index 0000000..1654903 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0027.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0028.png b/blue_noise_textures/256_256/HDR_RGBA_0028.png new file mode 100755 index 0000000..12ec44e Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0028.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0029.png b/blue_noise_textures/256_256/HDR_RGBA_0029.png new file mode 100755 index 0000000..6eea9b2 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0029.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0030.png b/blue_noise_textures/256_256/HDR_RGBA_0030.png new file mode 100755 index 0000000..cc59cd1 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0030.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0031.png b/blue_noise_textures/256_256/HDR_RGBA_0031.png new file mode 100755 index 0000000..5f3fa93 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0031.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0032.png b/blue_noise_textures/256_256/HDR_RGBA_0032.png new file mode 100755 index 0000000..4fec59b Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0032.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0033.png b/blue_noise_textures/256_256/HDR_RGBA_0033.png new file mode 100755 index 0000000..9b3294b Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0033.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0034.png b/blue_noise_textures/256_256/HDR_RGBA_0034.png new file mode 100755 index 0000000..1140d38 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0034.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0035.png b/blue_noise_textures/256_256/HDR_RGBA_0035.png new file mode 100755 index 0000000..42ccbf1 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0035.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0036.png b/blue_noise_textures/256_256/HDR_RGBA_0036.png new file mode 100755 index 0000000..3fad224 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0036.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0037.png b/blue_noise_textures/256_256/HDR_RGBA_0037.png new file mode 100755 index 0000000..e6b1bee Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0037.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0038.png b/blue_noise_textures/256_256/HDR_RGBA_0038.png new file mode 100755 index 0000000..09d2629 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0038.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0039.png b/blue_noise_textures/256_256/HDR_RGBA_0039.png new file mode 100755 index 0000000..65dfe86 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0039.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0040.png b/blue_noise_textures/256_256/HDR_RGBA_0040.png new file mode 100755 index 0000000..0fcc897 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0040.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0041.png b/blue_noise_textures/256_256/HDR_RGBA_0041.png new file mode 100755 index 0000000..11d2070 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0041.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0042.png b/blue_noise_textures/256_256/HDR_RGBA_0042.png new file mode 100755 index 0000000..ce3c4e7 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0042.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0043.png b/blue_noise_textures/256_256/HDR_RGBA_0043.png new file mode 100755 index 0000000..c61a338 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0043.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0044.png b/blue_noise_textures/256_256/HDR_RGBA_0044.png new file mode 100755 index 0000000..4c369df Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0044.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0045.png b/blue_noise_textures/256_256/HDR_RGBA_0045.png new file mode 100755 index 0000000..e1605a0 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0045.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0046.png b/blue_noise_textures/256_256/HDR_RGBA_0046.png new file mode 100755 index 0000000..f2d62a3 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0046.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0047.png b/blue_noise_textures/256_256/HDR_RGBA_0047.png new file mode 100755 index 0000000..a79e1d1 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0047.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0048.png b/blue_noise_textures/256_256/HDR_RGBA_0048.png new file mode 100755 index 0000000..a4eb362 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0048.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0049.png b/blue_noise_textures/256_256/HDR_RGBA_0049.png new file mode 100755 index 0000000..9b85908 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0049.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0050.png b/blue_noise_textures/256_256/HDR_RGBA_0050.png new file mode 100755 index 0000000..a6e8977 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0050.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0051.png b/blue_noise_textures/256_256/HDR_RGBA_0051.png new file mode 100755 index 0000000..5f4985f Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0051.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0052.png b/blue_noise_textures/256_256/HDR_RGBA_0052.png new file mode 100755 index 0000000..375f119 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0052.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0053.png b/blue_noise_textures/256_256/HDR_RGBA_0053.png new file mode 100755 index 0000000..948b717 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0053.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0054.png b/blue_noise_textures/256_256/HDR_RGBA_0054.png new file mode 100755 index 0000000..61de449 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0054.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0055.png b/blue_noise_textures/256_256/HDR_RGBA_0055.png new file mode 100755 index 0000000..3343649 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0055.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0056.png b/blue_noise_textures/256_256/HDR_RGBA_0056.png new file mode 100755 index 0000000..356e420 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0056.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0057.png b/blue_noise_textures/256_256/HDR_RGBA_0057.png new file mode 100755 index 0000000..2d1bee7 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0057.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0058.png b/blue_noise_textures/256_256/HDR_RGBA_0058.png new file mode 100755 index 0000000..97cd274 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0058.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0059.png b/blue_noise_textures/256_256/HDR_RGBA_0059.png new file mode 100755 index 0000000..fe5e9b8 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0059.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0060.png b/blue_noise_textures/256_256/HDR_RGBA_0060.png new file mode 100755 index 0000000..7247651 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0060.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0061.png b/blue_noise_textures/256_256/HDR_RGBA_0061.png new file mode 100755 index 0000000..30c1671 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0061.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0062.png b/blue_noise_textures/256_256/HDR_RGBA_0062.png new file mode 100755 index 0000000..1df8869 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0062.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0063.png b/blue_noise_textures/256_256/HDR_RGBA_0063.png new file mode 100755 index 0000000..5eea8de Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0063.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0064.png b/blue_noise_textures/256_256/HDR_RGBA_0064.png new file mode 100755 index 0000000..0b482eb Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0064.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0065.png b/blue_noise_textures/256_256/HDR_RGBA_0065.png new file mode 100755 index 0000000..316f1b4 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0065.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0066.png b/blue_noise_textures/256_256/HDR_RGBA_0066.png new file mode 100755 index 0000000..50ede12 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0066.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0067.png b/blue_noise_textures/256_256/HDR_RGBA_0067.png new file mode 100755 index 0000000..7fb00b9 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0067.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0068.png b/blue_noise_textures/256_256/HDR_RGBA_0068.png new file mode 100755 index 0000000..cdc32a5 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0068.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0069.png b/blue_noise_textures/256_256/HDR_RGBA_0069.png new file mode 100755 index 0000000..debafde Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0069.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0070.png b/blue_noise_textures/256_256/HDR_RGBA_0070.png new file mode 100755 index 0000000..af047e7 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0070.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0071.png b/blue_noise_textures/256_256/HDR_RGBA_0071.png new file mode 100755 index 0000000..a4ceedd Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0071.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0072.png b/blue_noise_textures/256_256/HDR_RGBA_0072.png new file mode 100755 index 0000000..48a56f9 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0072.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0073.png b/blue_noise_textures/256_256/HDR_RGBA_0073.png new file mode 100755 index 0000000..bd4366b Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0073.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0074.png b/blue_noise_textures/256_256/HDR_RGBA_0074.png new file mode 100755 index 0000000..749d0f8 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0074.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0075.png b/blue_noise_textures/256_256/HDR_RGBA_0075.png new file mode 100755 index 0000000..2c70107 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0075.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0076.png b/blue_noise_textures/256_256/HDR_RGBA_0076.png new file mode 100755 index 0000000..0c01495 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0076.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0077.png b/blue_noise_textures/256_256/HDR_RGBA_0077.png new file mode 100755 index 0000000..a480a27 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0077.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0078.png b/blue_noise_textures/256_256/HDR_RGBA_0078.png new file mode 100755 index 0000000..a43a391 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0078.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0079.png b/blue_noise_textures/256_256/HDR_RGBA_0079.png new file mode 100755 index 0000000..e266c99 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0079.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0080.png b/blue_noise_textures/256_256/HDR_RGBA_0080.png new file mode 100755 index 0000000..2051cd6 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0080.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0081.png b/blue_noise_textures/256_256/HDR_RGBA_0081.png new file mode 100755 index 0000000..287321b Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0081.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0082.png b/blue_noise_textures/256_256/HDR_RGBA_0082.png new file mode 100755 index 0000000..1ba20f3 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0082.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0083.png b/blue_noise_textures/256_256/HDR_RGBA_0083.png new file mode 100755 index 0000000..20534b6 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0083.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0084.png b/blue_noise_textures/256_256/HDR_RGBA_0084.png new file mode 100755 index 0000000..b0f3ab4 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0084.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0085.png b/blue_noise_textures/256_256/HDR_RGBA_0085.png new file mode 100755 index 0000000..baca055 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0085.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0086.png b/blue_noise_textures/256_256/HDR_RGBA_0086.png new file mode 100755 index 0000000..802de73 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0086.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0087.png b/blue_noise_textures/256_256/HDR_RGBA_0087.png new file mode 100755 index 0000000..a5e9e66 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0087.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0088.png b/blue_noise_textures/256_256/HDR_RGBA_0088.png new file mode 100755 index 0000000..320712b Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0088.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0089.png b/blue_noise_textures/256_256/HDR_RGBA_0089.png new file mode 100755 index 0000000..4c818eb Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0089.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0090.png b/blue_noise_textures/256_256/HDR_RGBA_0090.png new file mode 100755 index 0000000..67d63a7 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0090.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0091.png b/blue_noise_textures/256_256/HDR_RGBA_0091.png new file mode 100755 index 0000000..7d93f98 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0091.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0092.png b/blue_noise_textures/256_256/HDR_RGBA_0092.png new file mode 100755 index 0000000..fbe314a Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0092.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0093.png b/blue_noise_textures/256_256/HDR_RGBA_0093.png new file mode 100755 index 0000000..6dedc57 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0093.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0094.png b/blue_noise_textures/256_256/HDR_RGBA_0094.png new file mode 100755 index 0000000..7f04621 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0094.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0095.png b/blue_noise_textures/256_256/HDR_RGBA_0095.png new file mode 100755 index 0000000..3560178 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0095.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0096.png b/blue_noise_textures/256_256/HDR_RGBA_0096.png new file mode 100755 index 0000000..ded5b37 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0096.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0097.png b/blue_noise_textures/256_256/HDR_RGBA_0097.png new file mode 100755 index 0000000..69b339e Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0097.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0098.png b/blue_noise_textures/256_256/HDR_RGBA_0098.png new file mode 100755 index 0000000..ea39235 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0098.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0099.png b/blue_noise_textures/256_256/HDR_RGBA_0099.png new file mode 100755 index 0000000..6d7c1af Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0099.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0100.png b/blue_noise_textures/256_256/HDR_RGBA_0100.png new file mode 100755 index 0000000..a8db907 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0100.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0101.png b/blue_noise_textures/256_256/HDR_RGBA_0101.png new file mode 100755 index 0000000..0b1ca05 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0101.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0102.png b/blue_noise_textures/256_256/HDR_RGBA_0102.png new file mode 100755 index 0000000..55d230d Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0102.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0103.png b/blue_noise_textures/256_256/HDR_RGBA_0103.png new file mode 100755 index 0000000..c4e6a9f Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0103.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0104.png b/blue_noise_textures/256_256/HDR_RGBA_0104.png new file mode 100755 index 0000000..c32ba55 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0104.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0105.png b/blue_noise_textures/256_256/HDR_RGBA_0105.png new file mode 100755 index 0000000..153b2a0 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0105.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0106.png b/blue_noise_textures/256_256/HDR_RGBA_0106.png new file mode 100755 index 0000000..0896f74 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0106.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0107.png b/blue_noise_textures/256_256/HDR_RGBA_0107.png new file mode 100755 index 0000000..3ec0590 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0107.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0108.png b/blue_noise_textures/256_256/HDR_RGBA_0108.png new file mode 100755 index 0000000..1953ed9 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0108.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0109.png b/blue_noise_textures/256_256/HDR_RGBA_0109.png new file mode 100755 index 0000000..666bd32 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0109.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0110.png b/blue_noise_textures/256_256/HDR_RGBA_0110.png new file mode 100755 index 0000000..5f0e6b4 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0110.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0111.png b/blue_noise_textures/256_256/HDR_RGBA_0111.png new file mode 100755 index 0000000..75a2b97 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0111.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0112.png b/blue_noise_textures/256_256/HDR_RGBA_0112.png new file mode 100755 index 0000000..82c54e8 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0112.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0113.png b/blue_noise_textures/256_256/HDR_RGBA_0113.png new file mode 100755 index 0000000..61a9e40 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0113.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0114.png b/blue_noise_textures/256_256/HDR_RGBA_0114.png new file mode 100755 index 0000000..ff1897c Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0114.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0115.png b/blue_noise_textures/256_256/HDR_RGBA_0115.png new file mode 100755 index 0000000..cbd06cf Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0115.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0116.png b/blue_noise_textures/256_256/HDR_RGBA_0116.png new file mode 100755 index 0000000..fe2bff7 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0116.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0117.png b/blue_noise_textures/256_256/HDR_RGBA_0117.png new file mode 100755 index 0000000..bfb5bf4 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0117.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0118.png b/blue_noise_textures/256_256/HDR_RGBA_0118.png new file mode 100755 index 0000000..a38871e Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0118.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0119.png b/blue_noise_textures/256_256/HDR_RGBA_0119.png new file mode 100755 index 0000000..3bb6510 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0119.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0120.png b/blue_noise_textures/256_256/HDR_RGBA_0120.png new file mode 100755 index 0000000..9b9ca42 Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0120.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0121.png b/blue_noise_textures/256_256/HDR_RGBA_0121.png new file mode 100755 index 0000000..022bf2e Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0121.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0122.png b/blue_noise_textures/256_256/HDR_RGBA_0122.png new file mode 100755 index 0000000..a2ea58e Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0122.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0123.png b/blue_noise_textures/256_256/HDR_RGBA_0123.png new file mode 100755 index 0000000..6ef3bbf Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0123.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0124.png b/blue_noise_textures/256_256/HDR_RGBA_0124.png new file mode 100755 index 0000000..683000d Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0124.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0125.png b/blue_noise_textures/256_256/HDR_RGBA_0125.png new file mode 100755 index 0000000..681fc7b Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0125.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0126.png b/blue_noise_textures/256_256/HDR_RGBA_0126.png new file mode 100755 index 0000000..2717e9d Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0126.png differ diff --git a/blue_noise_textures/256_256/HDR_RGBA_0127.png b/blue_noise_textures/256_256/HDR_RGBA_0127.png new file mode 100755 index 0000000..669308b Binary files /dev/null and b/blue_noise_textures/256_256/HDR_RGBA_0127.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0000.png b/blue_noise_textures/256_256/LDR_RGBA_0000.png new file mode 100755 index 0000000..f1ac1a2 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0000.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0001.png b/blue_noise_textures/256_256/LDR_RGBA_0001.png new file mode 100755 index 0000000..89fe527 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0001.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0002.png b/blue_noise_textures/256_256/LDR_RGBA_0002.png new file mode 100755 index 0000000..fab009e Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0002.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0003.png b/blue_noise_textures/256_256/LDR_RGBA_0003.png new file mode 100755 index 0000000..0a2e97b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0003.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0004.png b/blue_noise_textures/256_256/LDR_RGBA_0004.png new file mode 100755 index 0000000..a7e0c16 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0004.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0005.png b/blue_noise_textures/256_256/LDR_RGBA_0005.png new file mode 100755 index 0000000..f584d61 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0005.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0006.png b/blue_noise_textures/256_256/LDR_RGBA_0006.png new file mode 100755 index 0000000..1908f56 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0006.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0007.png b/blue_noise_textures/256_256/LDR_RGBA_0007.png new file mode 100755 index 0000000..7503c1d Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0007.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0008.png b/blue_noise_textures/256_256/LDR_RGBA_0008.png new file mode 100755 index 0000000..56be238 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0008.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0009.png b/blue_noise_textures/256_256/LDR_RGBA_0009.png new file mode 100755 index 0000000..dad45cd Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0009.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0010.png b/blue_noise_textures/256_256/LDR_RGBA_0010.png new file mode 100755 index 0000000..e1faafd Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0010.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0011.png b/blue_noise_textures/256_256/LDR_RGBA_0011.png new file mode 100755 index 0000000..23e7fc7 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0011.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0012.png b/blue_noise_textures/256_256/LDR_RGBA_0012.png new file mode 100755 index 0000000..02f741e Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0012.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0013.png b/blue_noise_textures/256_256/LDR_RGBA_0013.png new file mode 100755 index 0000000..0787057 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0013.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0014.png b/blue_noise_textures/256_256/LDR_RGBA_0014.png new file mode 100755 index 0000000..9a44dd0 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0014.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0015.png b/blue_noise_textures/256_256/LDR_RGBA_0015.png new file mode 100755 index 0000000..4163fd3 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0015.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0016.png b/blue_noise_textures/256_256/LDR_RGBA_0016.png new file mode 100755 index 0000000..78ef22b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0016.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0017.png b/blue_noise_textures/256_256/LDR_RGBA_0017.png new file mode 100755 index 0000000..1092395 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0017.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0018.png b/blue_noise_textures/256_256/LDR_RGBA_0018.png new file mode 100755 index 0000000..d966f96 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0018.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0019.png b/blue_noise_textures/256_256/LDR_RGBA_0019.png new file mode 100755 index 0000000..2e164bf Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0019.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0020.png b/blue_noise_textures/256_256/LDR_RGBA_0020.png new file mode 100755 index 0000000..f6655f8 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0020.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0021.png b/blue_noise_textures/256_256/LDR_RGBA_0021.png new file mode 100755 index 0000000..599eb23 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0021.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0022.png b/blue_noise_textures/256_256/LDR_RGBA_0022.png new file mode 100755 index 0000000..fa096ed Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0022.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0023.png b/blue_noise_textures/256_256/LDR_RGBA_0023.png new file mode 100755 index 0000000..0afd57b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0023.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0024.png b/blue_noise_textures/256_256/LDR_RGBA_0024.png new file mode 100755 index 0000000..1beba5b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0024.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0025.png b/blue_noise_textures/256_256/LDR_RGBA_0025.png new file mode 100755 index 0000000..074c1b2 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0025.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0026.png b/blue_noise_textures/256_256/LDR_RGBA_0026.png new file mode 100755 index 0000000..6b70d29 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0026.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0027.png b/blue_noise_textures/256_256/LDR_RGBA_0027.png new file mode 100755 index 0000000..c5c3ed2 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0027.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0028.png b/blue_noise_textures/256_256/LDR_RGBA_0028.png new file mode 100755 index 0000000..9af2f0b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0028.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0029.png b/blue_noise_textures/256_256/LDR_RGBA_0029.png new file mode 100755 index 0000000..4c732ad Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0029.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0030.png b/blue_noise_textures/256_256/LDR_RGBA_0030.png new file mode 100755 index 0000000..a4bfaa7 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0030.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0031.png b/blue_noise_textures/256_256/LDR_RGBA_0031.png new file mode 100755 index 0000000..0490ee8 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0031.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0032.png b/blue_noise_textures/256_256/LDR_RGBA_0032.png new file mode 100755 index 0000000..523cdbd Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0032.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0033.png b/blue_noise_textures/256_256/LDR_RGBA_0033.png new file mode 100755 index 0000000..1060a5b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0033.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0034.png b/blue_noise_textures/256_256/LDR_RGBA_0034.png new file mode 100755 index 0000000..a5528ff Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0034.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0035.png b/blue_noise_textures/256_256/LDR_RGBA_0035.png new file mode 100755 index 0000000..7b91e8e Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0035.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0036.png b/blue_noise_textures/256_256/LDR_RGBA_0036.png new file mode 100755 index 0000000..52293be Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0036.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0037.png b/blue_noise_textures/256_256/LDR_RGBA_0037.png new file mode 100755 index 0000000..a550808 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0037.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0038.png b/blue_noise_textures/256_256/LDR_RGBA_0038.png new file mode 100755 index 0000000..d4e26c9 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0038.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0039.png b/blue_noise_textures/256_256/LDR_RGBA_0039.png new file mode 100755 index 0000000..8830785 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0039.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0040.png b/blue_noise_textures/256_256/LDR_RGBA_0040.png new file mode 100755 index 0000000..4bb3a89 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0040.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0041.png b/blue_noise_textures/256_256/LDR_RGBA_0041.png new file mode 100755 index 0000000..1d845bc Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0041.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0042.png b/blue_noise_textures/256_256/LDR_RGBA_0042.png new file mode 100755 index 0000000..981c2bb Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0042.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0043.png b/blue_noise_textures/256_256/LDR_RGBA_0043.png new file mode 100755 index 0000000..5f60da8 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0043.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0044.png b/blue_noise_textures/256_256/LDR_RGBA_0044.png new file mode 100755 index 0000000..3087ff7 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0044.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0045.png b/blue_noise_textures/256_256/LDR_RGBA_0045.png new file mode 100755 index 0000000..97de59d Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0045.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0046.png b/blue_noise_textures/256_256/LDR_RGBA_0046.png new file mode 100755 index 0000000..639d990 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0046.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0047.png b/blue_noise_textures/256_256/LDR_RGBA_0047.png new file mode 100755 index 0000000..4b2673b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0047.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0048.png b/blue_noise_textures/256_256/LDR_RGBA_0048.png new file mode 100755 index 0000000..4d4c7b8 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0048.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0049.png b/blue_noise_textures/256_256/LDR_RGBA_0049.png new file mode 100755 index 0000000..fe6ad63 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0049.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0050.png b/blue_noise_textures/256_256/LDR_RGBA_0050.png new file mode 100755 index 0000000..6afd9f2 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0050.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0051.png b/blue_noise_textures/256_256/LDR_RGBA_0051.png new file mode 100755 index 0000000..aa0449b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0051.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0052.png b/blue_noise_textures/256_256/LDR_RGBA_0052.png new file mode 100755 index 0000000..2900e24 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0052.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0053.png b/blue_noise_textures/256_256/LDR_RGBA_0053.png new file mode 100755 index 0000000..b1ff98c Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0053.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0054.png b/blue_noise_textures/256_256/LDR_RGBA_0054.png new file mode 100755 index 0000000..0ffbee7 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0054.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0055.png b/blue_noise_textures/256_256/LDR_RGBA_0055.png new file mode 100755 index 0000000..e4f4bfb Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0055.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0056.png b/blue_noise_textures/256_256/LDR_RGBA_0056.png new file mode 100755 index 0000000..cb451c9 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0056.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0057.png b/blue_noise_textures/256_256/LDR_RGBA_0057.png new file mode 100755 index 0000000..4b424e3 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0057.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0058.png b/blue_noise_textures/256_256/LDR_RGBA_0058.png new file mode 100755 index 0000000..29e1e0c Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0058.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0059.png b/blue_noise_textures/256_256/LDR_RGBA_0059.png new file mode 100755 index 0000000..da861f5 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0059.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0060.png b/blue_noise_textures/256_256/LDR_RGBA_0060.png new file mode 100755 index 0000000..8510635 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0060.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0061.png b/blue_noise_textures/256_256/LDR_RGBA_0061.png new file mode 100755 index 0000000..9030230 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0061.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0062.png b/blue_noise_textures/256_256/LDR_RGBA_0062.png new file mode 100755 index 0000000..c1e73a3 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0062.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0063.png b/blue_noise_textures/256_256/LDR_RGBA_0063.png new file mode 100755 index 0000000..89308c5 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0063.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0064.png b/blue_noise_textures/256_256/LDR_RGBA_0064.png new file mode 100755 index 0000000..04473a1 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0064.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0065.png b/blue_noise_textures/256_256/LDR_RGBA_0065.png new file mode 100755 index 0000000..a825729 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0065.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0066.png b/blue_noise_textures/256_256/LDR_RGBA_0066.png new file mode 100755 index 0000000..399e2be Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0066.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0067.png b/blue_noise_textures/256_256/LDR_RGBA_0067.png new file mode 100755 index 0000000..7472b11 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0067.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0068.png b/blue_noise_textures/256_256/LDR_RGBA_0068.png new file mode 100755 index 0000000..0cac279 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0068.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0069.png b/blue_noise_textures/256_256/LDR_RGBA_0069.png new file mode 100755 index 0000000..722b513 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0069.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0070.png b/blue_noise_textures/256_256/LDR_RGBA_0070.png new file mode 100755 index 0000000..55a808a Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0070.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0071.png b/blue_noise_textures/256_256/LDR_RGBA_0071.png new file mode 100755 index 0000000..7fe3d41 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0071.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0072.png b/blue_noise_textures/256_256/LDR_RGBA_0072.png new file mode 100755 index 0000000..a44878a Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0072.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0073.png b/blue_noise_textures/256_256/LDR_RGBA_0073.png new file mode 100755 index 0000000..1c70d1b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0073.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0074.png b/blue_noise_textures/256_256/LDR_RGBA_0074.png new file mode 100755 index 0000000..afb6381 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0074.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0075.png b/blue_noise_textures/256_256/LDR_RGBA_0075.png new file mode 100755 index 0000000..6c87bd6 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0075.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0076.png b/blue_noise_textures/256_256/LDR_RGBA_0076.png new file mode 100755 index 0000000..708bdf2 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0076.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0077.png b/blue_noise_textures/256_256/LDR_RGBA_0077.png new file mode 100755 index 0000000..43d7437 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0077.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0078.png b/blue_noise_textures/256_256/LDR_RGBA_0078.png new file mode 100755 index 0000000..a138099 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0078.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0079.png b/blue_noise_textures/256_256/LDR_RGBA_0079.png new file mode 100755 index 0000000..38448d9 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0079.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0080.png b/blue_noise_textures/256_256/LDR_RGBA_0080.png new file mode 100755 index 0000000..1410e19 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0080.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0081.png b/blue_noise_textures/256_256/LDR_RGBA_0081.png new file mode 100755 index 0000000..40de729 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0081.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0082.png b/blue_noise_textures/256_256/LDR_RGBA_0082.png new file mode 100755 index 0000000..375bf42 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0082.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0083.png b/blue_noise_textures/256_256/LDR_RGBA_0083.png new file mode 100755 index 0000000..1469b1b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0083.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0084.png b/blue_noise_textures/256_256/LDR_RGBA_0084.png new file mode 100755 index 0000000..d3f8c01 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0084.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0085.png b/blue_noise_textures/256_256/LDR_RGBA_0085.png new file mode 100755 index 0000000..9b96875 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0085.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0086.png b/blue_noise_textures/256_256/LDR_RGBA_0086.png new file mode 100755 index 0000000..7fc6b3e Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0086.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0087.png b/blue_noise_textures/256_256/LDR_RGBA_0087.png new file mode 100755 index 0000000..325a190 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0087.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0088.png b/blue_noise_textures/256_256/LDR_RGBA_0088.png new file mode 100755 index 0000000..ef63f63 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0088.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0089.png b/blue_noise_textures/256_256/LDR_RGBA_0089.png new file mode 100755 index 0000000..c50e3f9 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0089.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0090.png b/blue_noise_textures/256_256/LDR_RGBA_0090.png new file mode 100755 index 0000000..82b0c94 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0090.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0091.png b/blue_noise_textures/256_256/LDR_RGBA_0091.png new file mode 100755 index 0000000..f7825d0 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0091.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0092.png b/blue_noise_textures/256_256/LDR_RGBA_0092.png new file mode 100755 index 0000000..cf6e9d1 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0092.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0093.png b/blue_noise_textures/256_256/LDR_RGBA_0093.png new file mode 100755 index 0000000..9dd3302 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0093.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0094.png b/blue_noise_textures/256_256/LDR_RGBA_0094.png new file mode 100755 index 0000000..1bc26a4 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0094.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0095.png b/blue_noise_textures/256_256/LDR_RGBA_0095.png new file mode 100755 index 0000000..aa64ee8 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0095.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0096.png b/blue_noise_textures/256_256/LDR_RGBA_0096.png new file mode 100755 index 0000000..0bc2ad5 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0096.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0097.png b/blue_noise_textures/256_256/LDR_RGBA_0097.png new file mode 100755 index 0000000..fba255e Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0097.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0098.png b/blue_noise_textures/256_256/LDR_RGBA_0098.png new file mode 100755 index 0000000..1c6e1d8 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0098.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0099.png b/blue_noise_textures/256_256/LDR_RGBA_0099.png new file mode 100755 index 0000000..dd92e36 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0099.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0100.png b/blue_noise_textures/256_256/LDR_RGBA_0100.png new file mode 100755 index 0000000..35f1e9e Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0100.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0101.png b/blue_noise_textures/256_256/LDR_RGBA_0101.png new file mode 100755 index 0000000..0b7f10b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0101.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0102.png b/blue_noise_textures/256_256/LDR_RGBA_0102.png new file mode 100755 index 0000000..71da6cb Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0102.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0103.png b/blue_noise_textures/256_256/LDR_RGBA_0103.png new file mode 100755 index 0000000..2dc3225 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0103.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0104.png b/blue_noise_textures/256_256/LDR_RGBA_0104.png new file mode 100755 index 0000000..c904e2e Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0104.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0105.png b/blue_noise_textures/256_256/LDR_RGBA_0105.png new file mode 100755 index 0000000..cbbfb99 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0105.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0106.png b/blue_noise_textures/256_256/LDR_RGBA_0106.png new file mode 100755 index 0000000..9463e41 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0106.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0107.png b/blue_noise_textures/256_256/LDR_RGBA_0107.png new file mode 100755 index 0000000..f5c6ee0 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0107.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0108.png b/blue_noise_textures/256_256/LDR_RGBA_0108.png new file mode 100755 index 0000000..276b162 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0108.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0109.png b/blue_noise_textures/256_256/LDR_RGBA_0109.png new file mode 100755 index 0000000..0bff8eb Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0109.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0110.png b/blue_noise_textures/256_256/LDR_RGBA_0110.png new file mode 100755 index 0000000..17a77c5 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0110.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0111.png b/blue_noise_textures/256_256/LDR_RGBA_0111.png new file mode 100755 index 0000000..b6d3a98 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0111.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0112.png b/blue_noise_textures/256_256/LDR_RGBA_0112.png new file mode 100755 index 0000000..d43a955 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0112.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0113.png b/blue_noise_textures/256_256/LDR_RGBA_0113.png new file mode 100755 index 0000000..32d5b4d Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0113.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0114.png b/blue_noise_textures/256_256/LDR_RGBA_0114.png new file mode 100755 index 0000000..95f17a0 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0114.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0115.png b/blue_noise_textures/256_256/LDR_RGBA_0115.png new file mode 100755 index 0000000..124f174 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0115.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0116.png b/blue_noise_textures/256_256/LDR_RGBA_0116.png new file mode 100755 index 0000000..0577d8b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0116.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0117.png b/blue_noise_textures/256_256/LDR_RGBA_0117.png new file mode 100755 index 0000000..5efcabe Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0117.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0118.png b/blue_noise_textures/256_256/LDR_RGBA_0118.png new file mode 100755 index 0000000..cec4be5 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0118.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0119.png b/blue_noise_textures/256_256/LDR_RGBA_0119.png new file mode 100755 index 0000000..19e85f0 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0119.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0120.png b/blue_noise_textures/256_256/LDR_RGBA_0120.png new file mode 100755 index 0000000..d5f907b Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0120.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0121.png b/blue_noise_textures/256_256/LDR_RGBA_0121.png new file mode 100755 index 0000000..0f07822 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0121.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0122.png b/blue_noise_textures/256_256/LDR_RGBA_0122.png new file mode 100755 index 0000000..f1fa364 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0122.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0123.png b/blue_noise_textures/256_256/LDR_RGBA_0123.png new file mode 100755 index 0000000..e284da5 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0123.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0124.png b/blue_noise_textures/256_256/LDR_RGBA_0124.png new file mode 100755 index 0000000..76f0bf1 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0124.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0125.png b/blue_noise_textures/256_256/LDR_RGBA_0125.png new file mode 100755 index 0000000..5dfbc9e Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0125.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0126.png b/blue_noise_textures/256_256/LDR_RGBA_0126.png new file mode 100755 index 0000000..0ff46f3 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0126.png differ diff --git a/blue_noise_textures/256_256/LDR_RGBA_0127.png b/blue_noise_textures/256_256/LDR_RGBA_0127.png new file mode 100755 index 0000000..88f5991 Binary files /dev/null and b/blue_noise_textures/256_256/LDR_RGBA_0127.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_0.png b/blue_noise_textures/32_32/HDR_LA_0.png new file mode 100644 index 0000000..de0d8e7 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_0.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_1.png b/blue_noise_textures/32_32/HDR_LA_1.png new file mode 100644 index 0000000..a603e95 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_1.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_10.png b/blue_noise_textures/32_32/HDR_LA_10.png new file mode 100644 index 0000000..1f2d0f7 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_10.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_11.png b/blue_noise_textures/32_32/HDR_LA_11.png new file mode 100644 index 0000000..c085c1e Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_11.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_12.png b/blue_noise_textures/32_32/HDR_LA_12.png new file mode 100644 index 0000000..87ec042 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_12.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_13.png b/blue_noise_textures/32_32/HDR_LA_13.png new file mode 100644 index 0000000..0fe394f Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_13.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_14.png b/blue_noise_textures/32_32/HDR_LA_14.png new file mode 100644 index 0000000..16d5a30 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_14.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_15.png b/blue_noise_textures/32_32/HDR_LA_15.png new file mode 100644 index 0000000..f0b5413 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_15.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_16.png b/blue_noise_textures/32_32/HDR_LA_16.png new file mode 100644 index 0000000..4c60722 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_16.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_17.png b/blue_noise_textures/32_32/HDR_LA_17.png new file mode 100644 index 0000000..25beb78 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_17.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_18.png b/blue_noise_textures/32_32/HDR_LA_18.png new file mode 100644 index 0000000..9abc458 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_18.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_19.png b/blue_noise_textures/32_32/HDR_LA_19.png new file mode 100644 index 0000000..6644019 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_19.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_2.png b/blue_noise_textures/32_32/HDR_LA_2.png new file mode 100644 index 0000000..f906e15 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_2.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_20.png b/blue_noise_textures/32_32/HDR_LA_20.png new file mode 100644 index 0000000..89e631a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_20.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_21.png b/blue_noise_textures/32_32/HDR_LA_21.png new file mode 100644 index 0000000..e68169a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_21.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_22.png b/blue_noise_textures/32_32/HDR_LA_22.png new file mode 100644 index 0000000..cebc146 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_22.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_23.png b/blue_noise_textures/32_32/HDR_LA_23.png new file mode 100644 index 0000000..23d2588 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_23.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_24.png b/blue_noise_textures/32_32/HDR_LA_24.png new file mode 100644 index 0000000..42dd313 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_24.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_25.png b/blue_noise_textures/32_32/HDR_LA_25.png new file mode 100644 index 0000000..f5a7ae4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_25.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_26.png b/blue_noise_textures/32_32/HDR_LA_26.png new file mode 100644 index 0000000..9568e05 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_26.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_27.png b/blue_noise_textures/32_32/HDR_LA_27.png new file mode 100644 index 0000000..e0c2adb Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_27.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_28.png b/blue_noise_textures/32_32/HDR_LA_28.png new file mode 100644 index 0000000..39ada4d Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_28.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_29.png b/blue_noise_textures/32_32/HDR_LA_29.png new file mode 100644 index 0000000..e19ce41 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_29.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_3.png b/blue_noise_textures/32_32/HDR_LA_3.png new file mode 100644 index 0000000..e943abe Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_3.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_30.png b/blue_noise_textures/32_32/HDR_LA_30.png new file mode 100644 index 0000000..fb76caf Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_30.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_31.png b/blue_noise_textures/32_32/HDR_LA_31.png new file mode 100644 index 0000000..b203618 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_31.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_32.png b/blue_noise_textures/32_32/HDR_LA_32.png new file mode 100644 index 0000000..640c744 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_32.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_33.png b/blue_noise_textures/32_32/HDR_LA_33.png new file mode 100644 index 0000000..c822a49 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_33.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_34.png b/blue_noise_textures/32_32/HDR_LA_34.png new file mode 100644 index 0000000..fcd3244 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_34.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_35.png b/blue_noise_textures/32_32/HDR_LA_35.png new file mode 100644 index 0000000..e463e61 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_35.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_36.png b/blue_noise_textures/32_32/HDR_LA_36.png new file mode 100644 index 0000000..1fb849a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_36.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_37.png b/blue_noise_textures/32_32/HDR_LA_37.png new file mode 100644 index 0000000..1156606 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_37.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_38.png b/blue_noise_textures/32_32/HDR_LA_38.png new file mode 100644 index 0000000..ed064e0 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_38.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_39.png b/blue_noise_textures/32_32/HDR_LA_39.png new file mode 100644 index 0000000..4395558 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_39.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_4.png b/blue_noise_textures/32_32/HDR_LA_4.png new file mode 100644 index 0000000..079bfe5 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_4.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_40.png b/blue_noise_textures/32_32/HDR_LA_40.png new file mode 100644 index 0000000..eaa5766 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_40.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_41.png b/blue_noise_textures/32_32/HDR_LA_41.png new file mode 100644 index 0000000..14a5aa3 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_41.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_42.png b/blue_noise_textures/32_32/HDR_LA_42.png new file mode 100644 index 0000000..7bc4bbd Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_42.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_43.png b/blue_noise_textures/32_32/HDR_LA_43.png new file mode 100644 index 0000000..d1ada0e Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_43.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_44.png b/blue_noise_textures/32_32/HDR_LA_44.png new file mode 100644 index 0000000..c549de6 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_44.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_45.png b/blue_noise_textures/32_32/HDR_LA_45.png new file mode 100644 index 0000000..abeadf1 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_45.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_46.png b/blue_noise_textures/32_32/HDR_LA_46.png new file mode 100644 index 0000000..4f7ed46 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_46.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_47.png b/blue_noise_textures/32_32/HDR_LA_47.png new file mode 100644 index 0000000..ea9d4ee Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_47.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_48.png b/blue_noise_textures/32_32/HDR_LA_48.png new file mode 100644 index 0000000..fee16b6 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_48.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_49.png b/blue_noise_textures/32_32/HDR_LA_49.png new file mode 100644 index 0000000..a195c39 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_49.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_5.png b/blue_noise_textures/32_32/HDR_LA_5.png new file mode 100644 index 0000000..52002d4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_5.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_50.png b/blue_noise_textures/32_32/HDR_LA_50.png new file mode 100644 index 0000000..4d6292b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_50.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_51.png b/blue_noise_textures/32_32/HDR_LA_51.png new file mode 100644 index 0000000..d7ab893 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_51.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_52.png b/blue_noise_textures/32_32/HDR_LA_52.png new file mode 100644 index 0000000..d44a67e Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_52.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_53.png b/blue_noise_textures/32_32/HDR_LA_53.png new file mode 100644 index 0000000..ba6b5f2 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_53.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_54.png b/blue_noise_textures/32_32/HDR_LA_54.png new file mode 100644 index 0000000..708d6d4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_54.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_55.png b/blue_noise_textures/32_32/HDR_LA_55.png new file mode 100644 index 0000000..f9be4e3 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_55.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_56.png b/blue_noise_textures/32_32/HDR_LA_56.png new file mode 100644 index 0000000..a9d3814 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_56.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_57.png b/blue_noise_textures/32_32/HDR_LA_57.png new file mode 100644 index 0000000..b44f285 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_57.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_58.png b/blue_noise_textures/32_32/HDR_LA_58.png new file mode 100644 index 0000000..ae2cddf Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_58.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_59.png b/blue_noise_textures/32_32/HDR_LA_59.png new file mode 100644 index 0000000..18dbd61 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_59.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_6.png b/blue_noise_textures/32_32/HDR_LA_6.png new file mode 100644 index 0000000..cee6f80 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_6.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_60.png b/blue_noise_textures/32_32/HDR_LA_60.png new file mode 100644 index 0000000..ca35d8f Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_60.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_61.png b/blue_noise_textures/32_32/HDR_LA_61.png new file mode 100644 index 0000000..bf519c4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_61.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_62.png b/blue_noise_textures/32_32/HDR_LA_62.png new file mode 100644 index 0000000..072d9a8 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_62.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_63.png b/blue_noise_textures/32_32/HDR_LA_63.png new file mode 100644 index 0000000..473bc07 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_63.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_7.png b/blue_noise_textures/32_32/HDR_LA_7.png new file mode 100644 index 0000000..d1eb478 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_7.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_8.png b/blue_noise_textures/32_32/HDR_LA_8.png new file mode 100644 index 0000000..8bd53b9 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_8.png differ diff --git a/blue_noise_textures/32_32/HDR_LA_9.png b/blue_noise_textures/32_32/HDR_LA_9.png new file mode 100644 index 0000000..98d85b2 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_LA_9.png differ diff --git a/blue_noise_textures/32_32/HDR_L_0.png b/blue_noise_textures/32_32/HDR_L_0.png new file mode 100644 index 0000000..d399c58 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_0.png differ diff --git a/blue_noise_textures/32_32/HDR_L_1.png b/blue_noise_textures/32_32/HDR_L_1.png new file mode 100644 index 0000000..2c5ec63 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_1.png differ diff --git a/blue_noise_textures/32_32/HDR_L_10.png b/blue_noise_textures/32_32/HDR_L_10.png new file mode 100644 index 0000000..73340be Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_10.png differ diff --git a/blue_noise_textures/32_32/HDR_L_11.png b/blue_noise_textures/32_32/HDR_L_11.png new file mode 100644 index 0000000..c1778bf Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_11.png differ diff --git a/blue_noise_textures/32_32/HDR_L_12.png b/blue_noise_textures/32_32/HDR_L_12.png new file mode 100644 index 0000000..b9aeb6d Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_12.png differ diff --git a/blue_noise_textures/32_32/HDR_L_13.png b/blue_noise_textures/32_32/HDR_L_13.png new file mode 100644 index 0000000..afbddf6 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_13.png differ diff --git a/blue_noise_textures/32_32/HDR_L_14.png b/blue_noise_textures/32_32/HDR_L_14.png new file mode 100644 index 0000000..cf06452 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_14.png differ diff --git a/blue_noise_textures/32_32/HDR_L_15.png b/blue_noise_textures/32_32/HDR_L_15.png new file mode 100644 index 0000000..3c16c61 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_15.png differ diff --git a/blue_noise_textures/32_32/HDR_L_16.png b/blue_noise_textures/32_32/HDR_L_16.png new file mode 100644 index 0000000..d11ff83 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_16.png differ diff --git a/blue_noise_textures/32_32/HDR_L_17.png b/blue_noise_textures/32_32/HDR_L_17.png new file mode 100644 index 0000000..c859f78 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_17.png differ diff --git a/blue_noise_textures/32_32/HDR_L_18.png b/blue_noise_textures/32_32/HDR_L_18.png new file mode 100644 index 0000000..c8b89aa Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_18.png differ diff --git a/blue_noise_textures/32_32/HDR_L_19.png b/blue_noise_textures/32_32/HDR_L_19.png new file mode 100644 index 0000000..ec33d63 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_19.png differ diff --git a/blue_noise_textures/32_32/HDR_L_2.png b/blue_noise_textures/32_32/HDR_L_2.png new file mode 100644 index 0000000..64ab8ea Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_2.png differ diff --git a/blue_noise_textures/32_32/HDR_L_20.png b/blue_noise_textures/32_32/HDR_L_20.png new file mode 100644 index 0000000..77a5896 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_20.png differ diff --git a/blue_noise_textures/32_32/HDR_L_21.png b/blue_noise_textures/32_32/HDR_L_21.png new file mode 100644 index 0000000..954badc Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_21.png differ diff --git a/blue_noise_textures/32_32/HDR_L_22.png b/blue_noise_textures/32_32/HDR_L_22.png new file mode 100644 index 0000000..02c2f62 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_22.png differ diff --git a/blue_noise_textures/32_32/HDR_L_23.png b/blue_noise_textures/32_32/HDR_L_23.png new file mode 100644 index 0000000..153bf4f Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_23.png differ diff --git a/blue_noise_textures/32_32/HDR_L_24.png b/blue_noise_textures/32_32/HDR_L_24.png new file mode 100644 index 0000000..880b8aa Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_24.png differ diff --git a/blue_noise_textures/32_32/HDR_L_25.png b/blue_noise_textures/32_32/HDR_L_25.png new file mode 100644 index 0000000..ac590ca Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_25.png differ diff --git a/blue_noise_textures/32_32/HDR_L_26.png b/blue_noise_textures/32_32/HDR_L_26.png new file mode 100644 index 0000000..9d24fec Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_26.png differ diff --git a/blue_noise_textures/32_32/HDR_L_27.png b/blue_noise_textures/32_32/HDR_L_27.png new file mode 100644 index 0000000..232ad25 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_27.png differ diff --git a/blue_noise_textures/32_32/HDR_L_28.png b/blue_noise_textures/32_32/HDR_L_28.png new file mode 100644 index 0000000..df89af5 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_28.png differ diff --git a/blue_noise_textures/32_32/HDR_L_29.png b/blue_noise_textures/32_32/HDR_L_29.png new file mode 100644 index 0000000..8e8bf65 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_29.png differ diff --git a/blue_noise_textures/32_32/HDR_L_3.png b/blue_noise_textures/32_32/HDR_L_3.png new file mode 100644 index 0000000..0bb716f Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_3.png differ diff --git a/blue_noise_textures/32_32/HDR_L_30.png b/blue_noise_textures/32_32/HDR_L_30.png new file mode 100644 index 0000000..c5c4fc4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_30.png differ diff --git a/blue_noise_textures/32_32/HDR_L_31.png b/blue_noise_textures/32_32/HDR_L_31.png new file mode 100644 index 0000000..2b922e1 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_31.png differ diff --git a/blue_noise_textures/32_32/HDR_L_32.png b/blue_noise_textures/32_32/HDR_L_32.png new file mode 100644 index 0000000..30380ce Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_32.png differ diff --git a/blue_noise_textures/32_32/HDR_L_33.png b/blue_noise_textures/32_32/HDR_L_33.png new file mode 100644 index 0000000..56f170f Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_33.png differ diff --git a/blue_noise_textures/32_32/HDR_L_34.png b/blue_noise_textures/32_32/HDR_L_34.png new file mode 100644 index 0000000..571ed29 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_34.png differ diff --git a/blue_noise_textures/32_32/HDR_L_35.png b/blue_noise_textures/32_32/HDR_L_35.png new file mode 100644 index 0000000..79274bf Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_35.png differ diff --git a/blue_noise_textures/32_32/HDR_L_36.png b/blue_noise_textures/32_32/HDR_L_36.png new file mode 100644 index 0000000..8d12c78 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_36.png differ diff --git a/blue_noise_textures/32_32/HDR_L_37.png b/blue_noise_textures/32_32/HDR_L_37.png new file mode 100644 index 0000000..59a14f5 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_37.png differ diff --git a/blue_noise_textures/32_32/HDR_L_38.png b/blue_noise_textures/32_32/HDR_L_38.png new file mode 100644 index 0000000..530e996 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_38.png differ diff --git a/blue_noise_textures/32_32/HDR_L_39.png b/blue_noise_textures/32_32/HDR_L_39.png new file mode 100644 index 0000000..b559165 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_39.png differ diff --git a/blue_noise_textures/32_32/HDR_L_4.png b/blue_noise_textures/32_32/HDR_L_4.png new file mode 100644 index 0000000..7ae0e38 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_4.png differ diff --git a/blue_noise_textures/32_32/HDR_L_40.png b/blue_noise_textures/32_32/HDR_L_40.png new file mode 100644 index 0000000..8229918 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_40.png differ diff --git a/blue_noise_textures/32_32/HDR_L_41.png b/blue_noise_textures/32_32/HDR_L_41.png new file mode 100644 index 0000000..42e5ee3 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_41.png differ diff --git a/blue_noise_textures/32_32/HDR_L_42.png b/blue_noise_textures/32_32/HDR_L_42.png new file mode 100644 index 0000000..da3c5bc Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_42.png differ diff --git a/blue_noise_textures/32_32/HDR_L_43.png b/blue_noise_textures/32_32/HDR_L_43.png new file mode 100644 index 0000000..0d7763e Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_43.png differ diff --git a/blue_noise_textures/32_32/HDR_L_44.png b/blue_noise_textures/32_32/HDR_L_44.png new file mode 100644 index 0000000..09198c2 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_44.png differ diff --git a/blue_noise_textures/32_32/HDR_L_45.png b/blue_noise_textures/32_32/HDR_L_45.png new file mode 100644 index 0000000..39f7ace Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_45.png differ diff --git a/blue_noise_textures/32_32/HDR_L_46.png b/blue_noise_textures/32_32/HDR_L_46.png new file mode 100644 index 0000000..9f53dd8 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_46.png differ diff --git a/blue_noise_textures/32_32/HDR_L_47.png b/blue_noise_textures/32_32/HDR_L_47.png new file mode 100644 index 0000000..3bbcc54 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_47.png differ diff --git a/blue_noise_textures/32_32/HDR_L_48.png b/blue_noise_textures/32_32/HDR_L_48.png new file mode 100644 index 0000000..84685d4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_48.png differ diff --git a/blue_noise_textures/32_32/HDR_L_49.png b/blue_noise_textures/32_32/HDR_L_49.png new file mode 100644 index 0000000..75e9e48 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_49.png differ diff --git a/blue_noise_textures/32_32/HDR_L_5.png b/blue_noise_textures/32_32/HDR_L_5.png new file mode 100644 index 0000000..991e8ed Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_5.png differ diff --git a/blue_noise_textures/32_32/HDR_L_50.png b/blue_noise_textures/32_32/HDR_L_50.png new file mode 100644 index 0000000..5917f91 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_50.png differ diff --git a/blue_noise_textures/32_32/HDR_L_51.png b/blue_noise_textures/32_32/HDR_L_51.png new file mode 100644 index 0000000..7254273 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_51.png differ diff --git a/blue_noise_textures/32_32/HDR_L_52.png b/blue_noise_textures/32_32/HDR_L_52.png new file mode 100644 index 0000000..62bb9a1 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_52.png differ diff --git a/blue_noise_textures/32_32/HDR_L_53.png b/blue_noise_textures/32_32/HDR_L_53.png new file mode 100644 index 0000000..95ad5cb Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_53.png differ diff --git a/blue_noise_textures/32_32/HDR_L_54.png b/blue_noise_textures/32_32/HDR_L_54.png new file mode 100644 index 0000000..bd546a7 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_54.png differ diff --git a/blue_noise_textures/32_32/HDR_L_55.png b/blue_noise_textures/32_32/HDR_L_55.png new file mode 100644 index 0000000..96c2e96 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_55.png differ diff --git a/blue_noise_textures/32_32/HDR_L_56.png b/blue_noise_textures/32_32/HDR_L_56.png new file mode 100644 index 0000000..dbe9c8c Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_56.png differ diff --git a/blue_noise_textures/32_32/HDR_L_57.png b/blue_noise_textures/32_32/HDR_L_57.png new file mode 100644 index 0000000..4da420c Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_57.png differ diff --git a/blue_noise_textures/32_32/HDR_L_58.png b/blue_noise_textures/32_32/HDR_L_58.png new file mode 100644 index 0000000..fc18c58 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_58.png differ diff --git a/blue_noise_textures/32_32/HDR_L_59.png b/blue_noise_textures/32_32/HDR_L_59.png new file mode 100644 index 0000000..5460a73 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_59.png differ diff --git a/blue_noise_textures/32_32/HDR_L_6.png b/blue_noise_textures/32_32/HDR_L_6.png new file mode 100644 index 0000000..ea340d3 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_6.png differ diff --git a/blue_noise_textures/32_32/HDR_L_60.png b/blue_noise_textures/32_32/HDR_L_60.png new file mode 100644 index 0000000..fbbce89 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_60.png differ diff --git a/blue_noise_textures/32_32/HDR_L_61.png b/blue_noise_textures/32_32/HDR_L_61.png new file mode 100644 index 0000000..e1f0294 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_61.png differ diff --git a/blue_noise_textures/32_32/HDR_L_62.png b/blue_noise_textures/32_32/HDR_L_62.png new file mode 100644 index 0000000..bdb6ff0 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_62.png differ diff --git a/blue_noise_textures/32_32/HDR_L_63.png b/blue_noise_textures/32_32/HDR_L_63.png new file mode 100644 index 0000000..679f6c9 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_63.png differ diff --git a/blue_noise_textures/32_32/HDR_L_7.png b/blue_noise_textures/32_32/HDR_L_7.png new file mode 100644 index 0000000..17e2c77 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_7.png differ diff --git a/blue_noise_textures/32_32/HDR_L_8.png b/blue_noise_textures/32_32/HDR_L_8.png new file mode 100644 index 0000000..f997203 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_8.png differ diff --git a/blue_noise_textures/32_32/HDR_L_9.png b/blue_noise_textures/32_32/HDR_L_9.png new file mode 100644 index 0000000..d425e18 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_L_9.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_0.png b/blue_noise_textures/32_32/HDR_RGBA_0.png new file mode 100644 index 0000000..6def02e Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_0.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_1.png b/blue_noise_textures/32_32/HDR_RGBA_1.png new file mode 100644 index 0000000..9de1c81 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_1.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_10.png b/blue_noise_textures/32_32/HDR_RGBA_10.png new file mode 100644 index 0000000..0c10243 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_10.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_11.png b/blue_noise_textures/32_32/HDR_RGBA_11.png new file mode 100644 index 0000000..bfb5527 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_11.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_12.png b/blue_noise_textures/32_32/HDR_RGBA_12.png new file mode 100644 index 0000000..a51de7d Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_12.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_13.png b/blue_noise_textures/32_32/HDR_RGBA_13.png new file mode 100644 index 0000000..c30f780 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_13.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_14.png b/blue_noise_textures/32_32/HDR_RGBA_14.png new file mode 100644 index 0000000..b519ea6 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_14.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_15.png b/blue_noise_textures/32_32/HDR_RGBA_15.png new file mode 100644 index 0000000..ddc0fa3 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_15.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_16.png b/blue_noise_textures/32_32/HDR_RGBA_16.png new file mode 100644 index 0000000..b24e143 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_16.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_17.png b/blue_noise_textures/32_32/HDR_RGBA_17.png new file mode 100644 index 0000000..36fd4f1 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_17.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_18.png b/blue_noise_textures/32_32/HDR_RGBA_18.png new file mode 100644 index 0000000..fb84d98 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_18.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_19.png b/blue_noise_textures/32_32/HDR_RGBA_19.png new file mode 100644 index 0000000..8feb870 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_19.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_2.png b/blue_noise_textures/32_32/HDR_RGBA_2.png new file mode 100644 index 0000000..be3b75a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_2.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_20.png b/blue_noise_textures/32_32/HDR_RGBA_20.png new file mode 100644 index 0000000..acefff0 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_20.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_21.png b/blue_noise_textures/32_32/HDR_RGBA_21.png new file mode 100644 index 0000000..2901ff9 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_21.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_22.png b/blue_noise_textures/32_32/HDR_RGBA_22.png new file mode 100644 index 0000000..416f0dc Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_22.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_23.png b/blue_noise_textures/32_32/HDR_RGBA_23.png new file mode 100644 index 0000000..e70a5fe Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_23.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_24.png b/blue_noise_textures/32_32/HDR_RGBA_24.png new file mode 100644 index 0000000..304098b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_24.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_25.png b/blue_noise_textures/32_32/HDR_RGBA_25.png new file mode 100644 index 0000000..01261a1 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_25.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_26.png b/blue_noise_textures/32_32/HDR_RGBA_26.png new file mode 100644 index 0000000..a83d9d9 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_26.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_27.png b/blue_noise_textures/32_32/HDR_RGBA_27.png new file mode 100644 index 0000000..375a806 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_27.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_28.png b/blue_noise_textures/32_32/HDR_RGBA_28.png new file mode 100644 index 0000000..5229be4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_28.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_29.png b/blue_noise_textures/32_32/HDR_RGBA_29.png new file mode 100644 index 0000000..327c7ff Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_29.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_3.png b/blue_noise_textures/32_32/HDR_RGBA_3.png new file mode 100644 index 0000000..dcddcff Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_3.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_30.png b/blue_noise_textures/32_32/HDR_RGBA_30.png new file mode 100644 index 0000000..71f4490 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_30.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_31.png b/blue_noise_textures/32_32/HDR_RGBA_31.png new file mode 100644 index 0000000..77ee9e2 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_31.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_32.png b/blue_noise_textures/32_32/HDR_RGBA_32.png new file mode 100644 index 0000000..8b4f371 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_32.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_33.png b/blue_noise_textures/32_32/HDR_RGBA_33.png new file mode 100644 index 0000000..4ae2880 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_33.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_34.png b/blue_noise_textures/32_32/HDR_RGBA_34.png new file mode 100644 index 0000000..240f8af Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_34.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_35.png b/blue_noise_textures/32_32/HDR_RGBA_35.png new file mode 100644 index 0000000..2f505b4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_35.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_36.png b/blue_noise_textures/32_32/HDR_RGBA_36.png new file mode 100644 index 0000000..98f45c4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_36.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_37.png b/blue_noise_textures/32_32/HDR_RGBA_37.png new file mode 100644 index 0000000..35d1baa Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_37.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_38.png b/blue_noise_textures/32_32/HDR_RGBA_38.png new file mode 100644 index 0000000..fe478a7 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_38.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_39.png b/blue_noise_textures/32_32/HDR_RGBA_39.png new file mode 100644 index 0000000..0ccdbc7 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_39.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_4.png b/blue_noise_textures/32_32/HDR_RGBA_4.png new file mode 100644 index 0000000..cc3c76a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_4.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_40.png b/blue_noise_textures/32_32/HDR_RGBA_40.png new file mode 100644 index 0000000..7e3431a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_40.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_41.png b/blue_noise_textures/32_32/HDR_RGBA_41.png new file mode 100644 index 0000000..5a78d6b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_41.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_42.png b/blue_noise_textures/32_32/HDR_RGBA_42.png new file mode 100644 index 0000000..199f9a0 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_42.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_43.png b/blue_noise_textures/32_32/HDR_RGBA_43.png new file mode 100644 index 0000000..cd3d113 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_43.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_44.png b/blue_noise_textures/32_32/HDR_RGBA_44.png new file mode 100644 index 0000000..010590a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_44.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_45.png b/blue_noise_textures/32_32/HDR_RGBA_45.png new file mode 100644 index 0000000..8f0d637 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_45.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_46.png b/blue_noise_textures/32_32/HDR_RGBA_46.png new file mode 100644 index 0000000..57b4f73 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_46.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_47.png b/blue_noise_textures/32_32/HDR_RGBA_47.png new file mode 100644 index 0000000..3827790 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_47.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_48.png b/blue_noise_textures/32_32/HDR_RGBA_48.png new file mode 100644 index 0000000..d76d772 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_48.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_49.png b/blue_noise_textures/32_32/HDR_RGBA_49.png new file mode 100644 index 0000000..c19772f Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_49.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_5.png b/blue_noise_textures/32_32/HDR_RGBA_5.png new file mode 100644 index 0000000..a9e92e4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_5.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_50.png b/blue_noise_textures/32_32/HDR_RGBA_50.png new file mode 100644 index 0000000..06c00ed Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_50.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_51.png b/blue_noise_textures/32_32/HDR_RGBA_51.png new file mode 100644 index 0000000..2d311d8 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_51.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_52.png b/blue_noise_textures/32_32/HDR_RGBA_52.png new file mode 100644 index 0000000..77c01f3 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_52.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_53.png b/blue_noise_textures/32_32/HDR_RGBA_53.png new file mode 100644 index 0000000..54750bc Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_53.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_54.png b/blue_noise_textures/32_32/HDR_RGBA_54.png new file mode 100644 index 0000000..aad655b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_54.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_55.png b/blue_noise_textures/32_32/HDR_RGBA_55.png new file mode 100644 index 0000000..abb5314 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_55.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_56.png b/blue_noise_textures/32_32/HDR_RGBA_56.png new file mode 100644 index 0000000..e44b664 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_56.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_57.png b/blue_noise_textures/32_32/HDR_RGBA_57.png new file mode 100644 index 0000000..6d5ce47 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_57.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_58.png b/blue_noise_textures/32_32/HDR_RGBA_58.png new file mode 100644 index 0000000..6bcb8d3 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_58.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_59.png b/blue_noise_textures/32_32/HDR_RGBA_59.png new file mode 100644 index 0000000..f54ce37 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_59.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_6.png b/blue_noise_textures/32_32/HDR_RGBA_6.png new file mode 100644 index 0000000..6693252 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_6.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_60.png b/blue_noise_textures/32_32/HDR_RGBA_60.png new file mode 100644 index 0000000..b8f73c4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_60.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_61.png b/blue_noise_textures/32_32/HDR_RGBA_61.png new file mode 100644 index 0000000..e3868a3 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_61.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_62.png b/blue_noise_textures/32_32/HDR_RGBA_62.png new file mode 100644 index 0000000..3c28975 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_62.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_63.png b/blue_noise_textures/32_32/HDR_RGBA_63.png new file mode 100644 index 0000000..1cf0165 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_63.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_7.png b/blue_noise_textures/32_32/HDR_RGBA_7.png new file mode 100644 index 0000000..5f9b333 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_7.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_8.png b/blue_noise_textures/32_32/HDR_RGBA_8.png new file mode 100644 index 0000000..e69631d Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_8.png differ diff --git a/blue_noise_textures/32_32/HDR_RGBA_9.png b/blue_noise_textures/32_32/HDR_RGBA_9.png new file mode 100644 index 0000000..5bc80a4 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGBA_9.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_0.png b/blue_noise_textures/32_32/HDR_RGB_0.png new file mode 100644 index 0000000..9669361 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_0.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_1.png b/blue_noise_textures/32_32/HDR_RGB_1.png new file mode 100644 index 0000000..102e5ee Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_1.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_10.png b/blue_noise_textures/32_32/HDR_RGB_10.png new file mode 100644 index 0000000..d7fe49b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_10.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_11.png b/blue_noise_textures/32_32/HDR_RGB_11.png new file mode 100644 index 0000000..709ee4e Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_11.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_12.png b/blue_noise_textures/32_32/HDR_RGB_12.png new file mode 100644 index 0000000..026d376 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_12.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_13.png b/blue_noise_textures/32_32/HDR_RGB_13.png new file mode 100644 index 0000000..4c187b8 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_13.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_14.png b/blue_noise_textures/32_32/HDR_RGB_14.png new file mode 100644 index 0000000..bd5cf51 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_14.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_15.png b/blue_noise_textures/32_32/HDR_RGB_15.png new file mode 100644 index 0000000..18fe84a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_15.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_16.png b/blue_noise_textures/32_32/HDR_RGB_16.png new file mode 100644 index 0000000..cf79bd5 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_16.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_17.png b/blue_noise_textures/32_32/HDR_RGB_17.png new file mode 100644 index 0000000..eb5b11b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_17.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_18.png b/blue_noise_textures/32_32/HDR_RGB_18.png new file mode 100644 index 0000000..c6f80ae Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_18.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_19.png b/blue_noise_textures/32_32/HDR_RGB_19.png new file mode 100644 index 0000000..557a96e Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_19.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_2.png b/blue_noise_textures/32_32/HDR_RGB_2.png new file mode 100644 index 0000000..7494c9b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_2.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_20.png b/blue_noise_textures/32_32/HDR_RGB_20.png new file mode 100644 index 0000000..e4f46f1 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_20.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_21.png b/blue_noise_textures/32_32/HDR_RGB_21.png new file mode 100644 index 0000000..d54961c Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_21.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_22.png b/blue_noise_textures/32_32/HDR_RGB_22.png new file mode 100644 index 0000000..d1ed0e5 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_22.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_23.png b/blue_noise_textures/32_32/HDR_RGB_23.png new file mode 100644 index 0000000..5e4bb22 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_23.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_24.png b/blue_noise_textures/32_32/HDR_RGB_24.png new file mode 100644 index 0000000..c8a21c6 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_24.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_25.png b/blue_noise_textures/32_32/HDR_RGB_25.png new file mode 100644 index 0000000..e27b78f Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_25.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_26.png b/blue_noise_textures/32_32/HDR_RGB_26.png new file mode 100644 index 0000000..b8e3f43 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_26.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_27.png b/blue_noise_textures/32_32/HDR_RGB_27.png new file mode 100644 index 0000000..6a63624 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_27.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_28.png b/blue_noise_textures/32_32/HDR_RGB_28.png new file mode 100644 index 0000000..2ba0d6b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_28.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_29.png b/blue_noise_textures/32_32/HDR_RGB_29.png new file mode 100644 index 0000000..4ef9f92 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_29.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_3.png b/blue_noise_textures/32_32/HDR_RGB_3.png new file mode 100644 index 0000000..f951f25 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_3.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_30.png b/blue_noise_textures/32_32/HDR_RGB_30.png new file mode 100644 index 0000000..6cb7767 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_30.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_31.png b/blue_noise_textures/32_32/HDR_RGB_31.png new file mode 100644 index 0000000..bad8312 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_31.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_32.png b/blue_noise_textures/32_32/HDR_RGB_32.png new file mode 100644 index 0000000..3da6a97 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_32.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_33.png b/blue_noise_textures/32_32/HDR_RGB_33.png new file mode 100644 index 0000000..41ebd49 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_33.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_34.png b/blue_noise_textures/32_32/HDR_RGB_34.png new file mode 100644 index 0000000..f1540ec Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_34.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_35.png b/blue_noise_textures/32_32/HDR_RGB_35.png new file mode 100644 index 0000000..e0165c7 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_35.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_36.png b/blue_noise_textures/32_32/HDR_RGB_36.png new file mode 100644 index 0000000..8982b78 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_36.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_37.png b/blue_noise_textures/32_32/HDR_RGB_37.png new file mode 100644 index 0000000..b85ec65 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_37.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_38.png b/blue_noise_textures/32_32/HDR_RGB_38.png new file mode 100644 index 0000000..896bb2b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_38.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_39.png b/blue_noise_textures/32_32/HDR_RGB_39.png new file mode 100644 index 0000000..d56fa7d Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_39.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_4.png b/blue_noise_textures/32_32/HDR_RGB_4.png new file mode 100644 index 0000000..07c5bd6 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_4.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_40.png b/blue_noise_textures/32_32/HDR_RGB_40.png new file mode 100644 index 0000000..0641224 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_40.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_41.png b/blue_noise_textures/32_32/HDR_RGB_41.png new file mode 100644 index 0000000..5dc7b02 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_41.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_42.png b/blue_noise_textures/32_32/HDR_RGB_42.png new file mode 100644 index 0000000..adecded Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_42.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_43.png b/blue_noise_textures/32_32/HDR_RGB_43.png new file mode 100644 index 0000000..ba2ee8e Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_43.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_44.png b/blue_noise_textures/32_32/HDR_RGB_44.png new file mode 100644 index 0000000..9890e77 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_44.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_45.png b/blue_noise_textures/32_32/HDR_RGB_45.png new file mode 100644 index 0000000..8fbf41b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_45.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_46.png b/blue_noise_textures/32_32/HDR_RGB_46.png new file mode 100644 index 0000000..7fefaa1 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_46.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_47.png b/blue_noise_textures/32_32/HDR_RGB_47.png new file mode 100644 index 0000000..2f42d47 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_47.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_48.png b/blue_noise_textures/32_32/HDR_RGB_48.png new file mode 100644 index 0000000..2cf6796 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_48.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_49.png b/blue_noise_textures/32_32/HDR_RGB_49.png new file mode 100644 index 0000000..cc6e725 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_49.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_5.png b/blue_noise_textures/32_32/HDR_RGB_5.png new file mode 100644 index 0000000..f984351 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_5.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_50.png b/blue_noise_textures/32_32/HDR_RGB_50.png new file mode 100644 index 0000000..cf0b7b6 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_50.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_51.png b/blue_noise_textures/32_32/HDR_RGB_51.png new file mode 100644 index 0000000..da1b263 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_51.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_52.png b/blue_noise_textures/32_32/HDR_RGB_52.png new file mode 100644 index 0000000..3348ddc Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_52.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_53.png b/blue_noise_textures/32_32/HDR_RGB_53.png new file mode 100644 index 0000000..7b7d082 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_53.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_54.png b/blue_noise_textures/32_32/HDR_RGB_54.png new file mode 100644 index 0000000..42e1076 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_54.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_55.png b/blue_noise_textures/32_32/HDR_RGB_55.png new file mode 100644 index 0000000..e21130c Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_55.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_56.png b/blue_noise_textures/32_32/HDR_RGB_56.png new file mode 100644 index 0000000..3416b1d Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_56.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_57.png b/blue_noise_textures/32_32/HDR_RGB_57.png new file mode 100644 index 0000000..16504a9 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_57.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_58.png b/blue_noise_textures/32_32/HDR_RGB_58.png new file mode 100644 index 0000000..68204d1 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_58.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_59.png b/blue_noise_textures/32_32/HDR_RGB_59.png new file mode 100644 index 0000000..faac88d Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_59.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_6.png b/blue_noise_textures/32_32/HDR_RGB_6.png new file mode 100644 index 0000000..afd4b8a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_6.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_60.png b/blue_noise_textures/32_32/HDR_RGB_60.png new file mode 100644 index 0000000..d86b737 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_60.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_61.png b/blue_noise_textures/32_32/HDR_RGB_61.png new file mode 100644 index 0000000..04260f2 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_61.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_62.png b/blue_noise_textures/32_32/HDR_RGB_62.png new file mode 100644 index 0000000..526d862 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_62.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_63.png b/blue_noise_textures/32_32/HDR_RGB_63.png new file mode 100644 index 0000000..39cbc6b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_63.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_7.png b/blue_noise_textures/32_32/HDR_RGB_7.png new file mode 100644 index 0000000..a68907b Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_7.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_8.png b/blue_noise_textures/32_32/HDR_RGB_8.png new file mode 100644 index 0000000..5e2fd55 Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_8.png differ diff --git a/blue_noise_textures/32_32/HDR_RGB_9.png b/blue_noise_textures/32_32/HDR_RGB_9.png new file mode 100644 index 0000000..344551a Binary files /dev/null and b/blue_noise_textures/32_32/HDR_RGB_9.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_0.png b/blue_noise_textures/32_32/LDR_LLL1_0.png new file mode 100644 index 0000000..2bc84ba Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_0.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_1.png b/blue_noise_textures/32_32/LDR_LLL1_1.png new file mode 100644 index 0000000..2df6188 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_1.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_10.png b/blue_noise_textures/32_32/LDR_LLL1_10.png new file mode 100644 index 0000000..d012766 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_10.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_11.png b/blue_noise_textures/32_32/LDR_LLL1_11.png new file mode 100644 index 0000000..a1ef0fd Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_11.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_12.png b/blue_noise_textures/32_32/LDR_LLL1_12.png new file mode 100644 index 0000000..dd12d6a Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_12.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_13.png b/blue_noise_textures/32_32/LDR_LLL1_13.png new file mode 100644 index 0000000..c37c668 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_13.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_14.png b/blue_noise_textures/32_32/LDR_LLL1_14.png new file mode 100644 index 0000000..e529c78 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_14.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_15.png b/blue_noise_textures/32_32/LDR_LLL1_15.png new file mode 100644 index 0000000..b04335e Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_15.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_16.png b/blue_noise_textures/32_32/LDR_LLL1_16.png new file mode 100644 index 0000000..a3793d2 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_16.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_17.png b/blue_noise_textures/32_32/LDR_LLL1_17.png new file mode 100644 index 0000000..ab416b9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_17.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_18.png b/blue_noise_textures/32_32/LDR_LLL1_18.png new file mode 100644 index 0000000..aca4c96 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_18.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_19.png b/blue_noise_textures/32_32/LDR_LLL1_19.png new file mode 100644 index 0000000..8fc1ebd Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_19.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_2.png b/blue_noise_textures/32_32/LDR_LLL1_2.png new file mode 100644 index 0000000..84a8df4 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_2.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_20.png b/blue_noise_textures/32_32/LDR_LLL1_20.png new file mode 100644 index 0000000..8d064ff Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_20.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_21.png b/blue_noise_textures/32_32/LDR_LLL1_21.png new file mode 100644 index 0000000..1e3fbf5 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_21.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_22.png b/blue_noise_textures/32_32/LDR_LLL1_22.png new file mode 100644 index 0000000..ba13929 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_22.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_23.png b/blue_noise_textures/32_32/LDR_LLL1_23.png new file mode 100644 index 0000000..9acf3ba Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_23.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_24.png b/blue_noise_textures/32_32/LDR_LLL1_24.png new file mode 100644 index 0000000..018a2ec Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_24.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_25.png b/blue_noise_textures/32_32/LDR_LLL1_25.png new file mode 100644 index 0000000..db7c6b4 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_25.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_26.png b/blue_noise_textures/32_32/LDR_LLL1_26.png new file mode 100644 index 0000000..4ae7256 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_26.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_27.png b/blue_noise_textures/32_32/LDR_LLL1_27.png new file mode 100644 index 0000000..09ae5ae Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_27.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_28.png b/blue_noise_textures/32_32/LDR_LLL1_28.png new file mode 100644 index 0000000..2e50967 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_28.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_29.png b/blue_noise_textures/32_32/LDR_LLL1_29.png new file mode 100644 index 0000000..ee645a7 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_29.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_3.png b/blue_noise_textures/32_32/LDR_LLL1_3.png new file mode 100644 index 0000000..2ea1100 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_3.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_30.png b/blue_noise_textures/32_32/LDR_LLL1_30.png new file mode 100644 index 0000000..5decea8 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_30.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_31.png b/blue_noise_textures/32_32/LDR_LLL1_31.png new file mode 100644 index 0000000..7bde940 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_31.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_32.png b/blue_noise_textures/32_32/LDR_LLL1_32.png new file mode 100644 index 0000000..5efb37f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_32.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_33.png b/blue_noise_textures/32_32/LDR_LLL1_33.png new file mode 100644 index 0000000..2ccc486 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_33.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_34.png b/blue_noise_textures/32_32/LDR_LLL1_34.png new file mode 100644 index 0000000..98cf444 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_34.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_35.png b/blue_noise_textures/32_32/LDR_LLL1_35.png new file mode 100644 index 0000000..84a5c09 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_35.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_36.png b/blue_noise_textures/32_32/LDR_LLL1_36.png new file mode 100644 index 0000000..22514d9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_36.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_37.png b/blue_noise_textures/32_32/LDR_LLL1_37.png new file mode 100644 index 0000000..b7c771a Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_37.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_38.png b/blue_noise_textures/32_32/LDR_LLL1_38.png new file mode 100644 index 0000000..ce95b48 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_38.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_39.png b/blue_noise_textures/32_32/LDR_LLL1_39.png new file mode 100644 index 0000000..2ce41fd Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_39.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_4.png b/blue_noise_textures/32_32/LDR_LLL1_4.png new file mode 100644 index 0000000..758c8aa Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_4.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_40.png b/blue_noise_textures/32_32/LDR_LLL1_40.png new file mode 100644 index 0000000..0e5335e Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_40.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_41.png b/blue_noise_textures/32_32/LDR_LLL1_41.png new file mode 100644 index 0000000..892c34a Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_41.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_42.png b/blue_noise_textures/32_32/LDR_LLL1_42.png new file mode 100644 index 0000000..7374364 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_42.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_43.png b/blue_noise_textures/32_32/LDR_LLL1_43.png new file mode 100644 index 0000000..9fa773b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_43.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_44.png b/blue_noise_textures/32_32/LDR_LLL1_44.png new file mode 100644 index 0000000..1f76a01 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_44.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_45.png b/blue_noise_textures/32_32/LDR_LLL1_45.png new file mode 100644 index 0000000..b740fa2 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_45.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_46.png b/blue_noise_textures/32_32/LDR_LLL1_46.png new file mode 100644 index 0000000..abdb5e8 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_46.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_47.png b/blue_noise_textures/32_32/LDR_LLL1_47.png new file mode 100644 index 0000000..1c5b78b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_47.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_48.png b/blue_noise_textures/32_32/LDR_LLL1_48.png new file mode 100644 index 0000000..95347a9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_48.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_49.png b/blue_noise_textures/32_32/LDR_LLL1_49.png new file mode 100644 index 0000000..150d466 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_49.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_5.png b/blue_noise_textures/32_32/LDR_LLL1_5.png new file mode 100644 index 0000000..26d165f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_5.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_50.png b/blue_noise_textures/32_32/LDR_LLL1_50.png new file mode 100644 index 0000000..942858e Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_50.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_51.png b/blue_noise_textures/32_32/LDR_LLL1_51.png new file mode 100644 index 0000000..8549e1b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_51.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_52.png b/blue_noise_textures/32_32/LDR_LLL1_52.png new file mode 100644 index 0000000..75b3a1c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_52.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_53.png b/blue_noise_textures/32_32/LDR_LLL1_53.png new file mode 100644 index 0000000..2c95dd3 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_53.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_54.png b/blue_noise_textures/32_32/LDR_LLL1_54.png new file mode 100644 index 0000000..c568b83 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_54.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_55.png b/blue_noise_textures/32_32/LDR_LLL1_55.png new file mode 100644 index 0000000..650d96f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_55.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_56.png b/blue_noise_textures/32_32/LDR_LLL1_56.png new file mode 100644 index 0000000..f6968c9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_56.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_57.png b/blue_noise_textures/32_32/LDR_LLL1_57.png new file mode 100644 index 0000000..9ec92a7 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_57.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_58.png b/blue_noise_textures/32_32/LDR_LLL1_58.png new file mode 100644 index 0000000..a43d349 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_58.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_59.png b/blue_noise_textures/32_32/LDR_LLL1_59.png new file mode 100644 index 0000000..3a00dc5 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_59.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_6.png b/blue_noise_textures/32_32/LDR_LLL1_6.png new file mode 100644 index 0000000..1ecc36d Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_6.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_60.png b/blue_noise_textures/32_32/LDR_LLL1_60.png new file mode 100644 index 0000000..9a6b298 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_60.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_61.png b/blue_noise_textures/32_32/LDR_LLL1_61.png new file mode 100644 index 0000000..a95dbfb Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_61.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_62.png b/blue_noise_textures/32_32/LDR_LLL1_62.png new file mode 100644 index 0000000..65e8407 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_62.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_63.png b/blue_noise_textures/32_32/LDR_LLL1_63.png new file mode 100644 index 0000000..906f0b1 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_63.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_7.png b/blue_noise_textures/32_32/LDR_LLL1_7.png new file mode 100644 index 0000000..69558ea Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_7.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_8.png b/blue_noise_textures/32_32/LDR_LLL1_8.png new file mode 100644 index 0000000..dc7ddc6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_8.png differ diff --git a/blue_noise_textures/32_32/LDR_LLL1_9.png b/blue_noise_textures/32_32/LDR_LLL1_9.png new file mode 100644 index 0000000..a59686f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_LLL1_9.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_0.png b/blue_noise_textures/32_32/LDR_RG01_0.png new file mode 100644 index 0000000..7213450 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_0.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_1.png b/blue_noise_textures/32_32/LDR_RG01_1.png new file mode 100644 index 0000000..d318c4c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_1.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_10.png b/blue_noise_textures/32_32/LDR_RG01_10.png new file mode 100644 index 0000000..a2e78b7 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_10.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_11.png b/blue_noise_textures/32_32/LDR_RG01_11.png new file mode 100644 index 0000000..8264f15 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_11.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_12.png b/blue_noise_textures/32_32/LDR_RG01_12.png new file mode 100644 index 0000000..e574c1b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_12.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_13.png b/blue_noise_textures/32_32/LDR_RG01_13.png new file mode 100644 index 0000000..98c3098 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_13.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_14.png b/blue_noise_textures/32_32/LDR_RG01_14.png new file mode 100644 index 0000000..63d551a Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_14.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_15.png b/blue_noise_textures/32_32/LDR_RG01_15.png new file mode 100644 index 0000000..2e0af56 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_15.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_16.png b/blue_noise_textures/32_32/LDR_RG01_16.png new file mode 100644 index 0000000..4a01e48 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_16.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_17.png b/blue_noise_textures/32_32/LDR_RG01_17.png new file mode 100644 index 0000000..ba728ac Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_17.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_18.png b/blue_noise_textures/32_32/LDR_RG01_18.png new file mode 100644 index 0000000..b7570ea Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_18.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_19.png b/blue_noise_textures/32_32/LDR_RG01_19.png new file mode 100644 index 0000000..f1120bc Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_19.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_2.png b/blue_noise_textures/32_32/LDR_RG01_2.png new file mode 100644 index 0000000..754356d Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_2.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_20.png b/blue_noise_textures/32_32/LDR_RG01_20.png new file mode 100644 index 0000000..744a8b0 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_20.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_21.png b/blue_noise_textures/32_32/LDR_RG01_21.png new file mode 100644 index 0000000..1373143 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_21.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_22.png b/blue_noise_textures/32_32/LDR_RG01_22.png new file mode 100644 index 0000000..8044494 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_22.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_23.png b/blue_noise_textures/32_32/LDR_RG01_23.png new file mode 100644 index 0000000..18f3804 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_23.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_24.png b/blue_noise_textures/32_32/LDR_RG01_24.png new file mode 100644 index 0000000..ff193e8 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_24.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_25.png b/blue_noise_textures/32_32/LDR_RG01_25.png new file mode 100644 index 0000000..2e117a6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_25.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_26.png b/blue_noise_textures/32_32/LDR_RG01_26.png new file mode 100644 index 0000000..6393ae1 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_26.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_27.png b/blue_noise_textures/32_32/LDR_RG01_27.png new file mode 100644 index 0000000..268af96 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_27.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_28.png b/blue_noise_textures/32_32/LDR_RG01_28.png new file mode 100644 index 0000000..479d0a6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_28.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_29.png b/blue_noise_textures/32_32/LDR_RG01_29.png new file mode 100644 index 0000000..d8cf71c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_29.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_3.png b/blue_noise_textures/32_32/LDR_RG01_3.png new file mode 100644 index 0000000..eef9246 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_3.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_30.png b/blue_noise_textures/32_32/LDR_RG01_30.png new file mode 100644 index 0000000..b5e1207 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_30.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_31.png b/blue_noise_textures/32_32/LDR_RG01_31.png new file mode 100644 index 0000000..2232392 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_31.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_32.png b/blue_noise_textures/32_32/LDR_RG01_32.png new file mode 100644 index 0000000..cdff26f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_32.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_33.png b/blue_noise_textures/32_32/LDR_RG01_33.png new file mode 100644 index 0000000..0b64609 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_33.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_34.png b/blue_noise_textures/32_32/LDR_RG01_34.png new file mode 100644 index 0000000..2bed074 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_34.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_35.png b/blue_noise_textures/32_32/LDR_RG01_35.png new file mode 100644 index 0000000..e6ceeaf Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_35.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_36.png b/blue_noise_textures/32_32/LDR_RG01_36.png new file mode 100644 index 0000000..3d2de86 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_36.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_37.png b/blue_noise_textures/32_32/LDR_RG01_37.png new file mode 100644 index 0000000..1a6e2d3 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_37.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_38.png b/blue_noise_textures/32_32/LDR_RG01_38.png new file mode 100644 index 0000000..6db47c2 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_38.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_39.png b/blue_noise_textures/32_32/LDR_RG01_39.png new file mode 100644 index 0000000..1c0854b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_39.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_4.png b/blue_noise_textures/32_32/LDR_RG01_4.png new file mode 100644 index 0000000..bb446a0 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_4.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_40.png b/blue_noise_textures/32_32/LDR_RG01_40.png new file mode 100644 index 0000000..c61343b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_40.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_41.png b/blue_noise_textures/32_32/LDR_RG01_41.png new file mode 100644 index 0000000..e18b2fd Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_41.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_42.png b/blue_noise_textures/32_32/LDR_RG01_42.png new file mode 100644 index 0000000..5ef579f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_42.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_43.png b/blue_noise_textures/32_32/LDR_RG01_43.png new file mode 100644 index 0000000..ddd824d Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_43.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_44.png b/blue_noise_textures/32_32/LDR_RG01_44.png new file mode 100644 index 0000000..0d8c56a Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_44.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_45.png b/blue_noise_textures/32_32/LDR_RG01_45.png new file mode 100644 index 0000000..a0bc156 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_45.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_46.png b/blue_noise_textures/32_32/LDR_RG01_46.png new file mode 100644 index 0000000..67bc692 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_46.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_47.png b/blue_noise_textures/32_32/LDR_RG01_47.png new file mode 100644 index 0000000..65f0da2 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_47.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_48.png b/blue_noise_textures/32_32/LDR_RG01_48.png new file mode 100644 index 0000000..99624f4 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_48.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_49.png b/blue_noise_textures/32_32/LDR_RG01_49.png new file mode 100644 index 0000000..2dad9e5 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_49.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_5.png b/blue_noise_textures/32_32/LDR_RG01_5.png new file mode 100644 index 0000000..e786f8f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_5.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_50.png b/blue_noise_textures/32_32/LDR_RG01_50.png new file mode 100644 index 0000000..9c1aa5f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_50.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_51.png b/blue_noise_textures/32_32/LDR_RG01_51.png new file mode 100644 index 0000000..dfe05ec Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_51.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_52.png b/blue_noise_textures/32_32/LDR_RG01_52.png new file mode 100644 index 0000000..a086855 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_52.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_53.png b/blue_noise_textures/32_32/LDR_RG01_53.png new file mode 100644 index 0000000..51e2ff8 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_53.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_54.png b/blue_noise_textures/32_32/LDR_RG01_54.png new file mode 100644 index 0000000..b6ef6cf Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_54.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_55.png b/blue_noise_textures/32_32/LDR_RG01_55.png new file mode 100644 index 0000000..0d28dc0 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_55.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_56.png b/blue_noise_textures/32_32/LDR_RG01_56.png new file mode 100644 index 0000000..fc186a3 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_56.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_57.png b/blue_noise_textures/32_32/LDR_RG01_57.png new file mode 100644 index 0000000..e6d867c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_57.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_58.png b/blue_noise_textures/32_32/LDR_RG01_58.png new file mode 100644 index 0000000..505d41e Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_58.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_59.png b/blue_noise_textures/32_32/LDR_RG01_59.png new file mode 100644 index 0000000..48f516c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_59.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_6.png b/blue_noise_textures/32_32/LDR_RG01_6.png new file mode 100644 index 0000000..873def6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_6.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_60.png b/blue_noise_textures/32_32/LDR_RG01_60.png new file mode 100644 index 0000000..9941e6d Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_60.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_61.png b/blue_noise_textures/32_32/LDR_RG01_61.png new file mode 100644 index 0000000..9b9cd4b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_61.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_62.png b/blue_noise_textures/32_32/LDR_RG01_62.png new file mode 100644 index 0000000..a4af01d Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_62.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_63.png b/blue_noise_textures/32_32/LDR_RG01_63.png new file mode 100644 index 0000000..8860a48 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_63.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_7.png b/blue_noise_textures/32_32/LDR_RG01_7.png new file mode 100644 index 0000000..d644480 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_7.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_8.png b/blue_noise_textures/32_32/LDR_RG01_8.png new file mode 100644 index 0000000..e788c39 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_8.png differ diff --git a/blue_noise_textures/32_32/LDR_RG01_9.png b/blue_noise_textures/32_32/LDR_RG01_9.png new file mode 100644 index 0000000..dd63266 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RG01_9.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_0.png b/blue_noise_textures/32_32/LDR_RGB1_0.png new file mode 100644 index 0000000..973c6e7 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_0.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_1.png b/blue_noise_textures/32_32/LDR_RGB1_1.png new file mode 100644 index 0000000..34d50ae Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_1.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_10.png b/blue_noise_textures/32_32/LDR_RGB1_10.png new file mode 100644 index 0000000..3a25ee6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_10.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_11.png b/blue_noise_textures/32_32/LDR_RGB1_11.png new file mode 100644 index 0000000..c3728af Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_11.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_12.png b/blue_noise_textures/32_32/LDR_RGB1_12.png new file mode 100644 index 0000000..8237aff Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_12.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_13.png b/blue_noise_textures/32_32/LDR_RGB1_13.png new file mode 100644 index 0000000..af24904 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_13.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_14.png b/blue_noise_textures/32_32/LDR_RGB1_14.png new file mode 100644 index 0000000..e66779a Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_14.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_15.png b/blue_noise_textures/32_32/LDR_RGB1_15.png new file mode 100644 index 0000000..228672f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_15.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_16.png b/blue_noise_textures/32_32/LDR_RGB1_16.png new file mode 100644 index 0000000..12ca1ce Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_16.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_17.png b/blue_noise_textures/32_32/LDR_RGB1_17.png new file mode 100644 index 0000000..5cad023 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_17.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_18.png b/blue_noise_textures/32_32/LDR_RGB1_18.png new file mode 100644 index 0000000..c037aac Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_18.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_19.png b/blue_noise_textures/32_32/LDR_RGB1_19.png new file mode 100644 index 0000000..ed78fe6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_19.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_2.png b/blue_noise_textures/32_32/LDR_RGB1_2.png new file mode 100644 index 0000000..9a3e0bc Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_2.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_20.png b/blue_noise_textures/32_32/LDR_RGB1_20.png new file mode 100644 index 0000000..37a0cd1 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_20.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_21.png b/blue_noise_textures/32_32/LDR_RGB1_21.png new file mode 100644 index 0000000..7dce6b0 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_21.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_22.png b/blue_noise_textures/32_32/LDR_RGB1_22.png new file mode 100644 index 0000000..12334d1 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_22.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_23.png b/blue_noise_textures/32_32/LDR_RGB1_23.png new file mode 100644 index 0000000..2dceac4 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_23.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_24.png b/blue_noise_textures/32_32/LDR_RGB1_24.png new file mode 100644 index 0000000..a108d9e Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_24.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_25.png b/blue_noise_textures/32_32/LDR_RGB1_25.png new file mode 100644 index 0000000..a739e73 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_25.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_26.png b/blue_noise_textures/32_32/LDR_RGB1_26.png new file mode 100644 index 0000000..408cc62 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_26.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_27.png b/blue_noise_textures/32_32/LDR_RGB1_27.png new file mode 100644 index 0000000..49a01e4 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_27.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_28.png b/blue_noise_textures/32_32/LDR_RGB1_28.png new file mode 100644 index 0000000..7b5ea1b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_28.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_29.png b/blue_noise_textures/32_32/LDR_RGB1_29.png new file mode 100644 index 0000000..ff543fe Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_29.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_3.png b/blue_noise_textures/32_32/LDR_RGB1_3.png new file mode 100644 index 0000000..12564f3 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_3.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_30.png b/blue_noise_textures/32_32/LDR_RGB1_30.png new file mode 100644 index 0000000..6cc21af Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_30.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_31.png b/blue_noise_textures/32_32/LDR_RGB1_31.png new file mode 100644 index 0000000..77c1b4c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_31.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_32.png b/blue_noise_textures/32_32/LDR_RGB1_32.png new file mode 100644 index 0000000..06e2f4c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_32.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_33.png b/blue_noise_textures/32_32/LDR_RGB1_33.png new file mode 100644 index 0000000..b4fa9e6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_33.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_34.png b/blue_noise_textures/32_32/LDR_RGB1_34.png new file mode 100644 index 0000000..02e1113 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_34.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_35.png b/blue_noise_textures/32_32/LDR_RGB1_35.png new file mode 100644 index 0000000..ab98082 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_35.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_36.png b/blue_noise_textures/32_32/LDR_RGB1_36.png new file mode 100644 index 0000000..9f4b4ca Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_36.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_37.png b/blue_noise_textures/32_32/LDR_RGB1_37.png new file mode 100644 index 0000000..0e4cc6b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_37.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_38.png b/blue_noise_textures/32_32/LDR_RGB1_38.png new file mode 100644 index 0000000..9e703bb Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_38.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_39.png b/blue_noise_textures/32_32/LDR_RGB1_39.png new file mode 100644 index 0000000..93ebf12 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_39.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_4.png b/blue_noise_textures/32_32/LDR_RGB1_4.png new file mode 100644 index 0000000..acb2502 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_4.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_40.png b/blue_noise_textures/32_32/LDR_RGB1_40.png new file mode 100644 index 0000000..4dcbab6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_40.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_41.png b/blue_noise_textures/32_32/LDR_RGB1_41.png new file mode 100644 index 0000000..03442a5 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_41.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_42.png b/blue_noise_textures/32_32/LDR_RGB1_42.png new file mode 100644 index 0000000..06d68ec Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_42.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_43.png b/blue_noise_textures/32_32/LDR_RGB1_43.png new file mode 100644 index 0000000..41fea9d Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_43.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_44.png b/blue_noise_textures/32_32/LDR_RGB1_44.png new file mode 100644 index 0000000..7e4f661 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_44.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_45.png b/blue_noise_textures/32_32/LDR_RGB1_45.png new file mode 100644 index 0000000..1f556b4 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_45.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_46.png b/blue_noise_textures/32_32/LDR_RGB1_46.png new file mode 100644 index 0000000..47c18ab Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_46.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_47.png b/blue_noise_textures/32_32/LDR_RGB1_47.png new file mode 100644 index 0000000..2d3189c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_47.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_48.png b/blue_noise_textures/32_32/LDR_RGB1_48.png new file mode 100644 index 0000000..8a2b864 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_48.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_49.png b/blue_noise_textures/32_32/LDR_RGB1_49.png new file mode 100644 index 0000000..1b9b6d9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_49.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_5.png b/blue_noise_textures/32_32/LDR_RGB1_5.png new file mode 100644 index 0000000..d7ad0e6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_5.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_50.png b/blue_noise_textures/32_32/LDR_RGB1_50.png new file mode 100644 index 0000000..afbf018 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_50.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_51.png b/blue_noise_textures/32_32/LDR_RGB1_51.png new file mode 100644 index 0000000..bc047b4 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_51.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_52.png b/blue_noise_textures/32_32/LDR_RGB1_52.png new file mode 100644 index 0000000..288423e Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_52.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_53.png b/blue_noise_textures/32_32/LDR_RGB1_53.png new file mode 100644 index 0000000..be9fbdb Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_53.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_54.png b/blue_noise_textures/32_32/LDR_RGB1_54.png new file mode 100644 index 0000000..70311dd Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_54.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_55.png b/blue_noise_textures/32_32/LDR_RGB1_55.png new file mode 100644 index 0000000..b14f21f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_55.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_56.png b/blue_noise_textures/32_32/LDR_RGB1_56.png new file mode 100644 index 0000000..c931b83 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_56.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_57.png b/blue_noise_textures/32_32/LDR_RGB1_57.png new file mode 100644 index 0000000..8fcbc3c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_57.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_58.png b/blue_noise_textures/32_32/LDR_RGB1_58.png new file mode 100644 index 0000000..1984762 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_58.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_59.png b/blue_noise_textures/32_32/LDR_RGB1_59.png new file mode 100644 index 0000000..ac60d32 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_59.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_6.png b/blue_noise_textures/32_32/LDR_RGB1_6.png new file mode 100644 index 0000000..9fe072f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_6.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_60.png b/blue_noise_textures/32_32/LDR_RGB1_60.png new file mode 100644 index 0000000..1d0cd19 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_60.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_61.png b/blue_noise_textures/32_32/LDR_RGB1_61.png new file mode 100644 index 0000000..70f2ccb Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_61.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_62.png b/blue_noise_textures/32_32/LDR_RGB1_62.png new file mode 100644 index 0000000..255fa2c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_62.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_63.png b/blue_noise_textures/32_32/LDR_RGB1_63.png new file mode 100644 index 0000000..0b24e00 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_63.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_7.png b/blue_noise_textures/32_32/LDR_RGB1_7.png new file mode 100644 index 0000000..924151f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_7.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_8.png b/blue_noise_textures/32_32/LDR_RGB1_8.png new file mode 100644 index 0000000..081d5b1 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_8.png differ diff --git a/blue_noise_textures/32_32/LDR_RGB1_9.png b/blue_noise_textures/32_32/LDR_RGB1_9.png new file mode 100644 index 0000000..c3fe72f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGB1_9.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_0.png b/blue_noise_textures/32_32/LDR_RGBA_0.png new file mode 100644 index 0000000..fd9b4d4 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_0.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_1.png b/blue_noise_textures/32_32/LDR_RGBA_1.png new file mode 100644 index 0000000..67f0851 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_1.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_10.png b/blue_noise_textures/32_32/LDR_RGBA_10.png new file mode 100644 index 0000000..5308768 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_10.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_11.png b/blue_noise_textures/32_32/LDR_RGBA_11.png new file mode 100644 index 0000000..02e22fd Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_11.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_12.png b/blue_noise_textures/32_32/LDR_RGBA_12.png new file mode 100644 index 0000000..738f4d7 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_12.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_13.png b/blue_noise_textures/32_32/LDR_RGBA_13.png new file mode 100644 index 0000000..cd93209 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_13.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_14.png b/blue_noise_textures/32_32/LDR_RGBA_14.png new file mode 100644 index 0000000..1b73d6d Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_14.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_15.png b/blue_noise_textures/32_32/LDR_RGBA_15.png new file mode 100644 index 0000000..96a28d5 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_15.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_16.png b/blue_noise_textures/32_32/LDR_RGBA_16.png new file mode 100644 index 0000000..755cbae Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_16.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_17.png b/blue_noise_textures/32_32/LDR_RGBA_17.png new file mode 100644 index 0000000..441fdb6 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_17.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_18.png b/blue_noise_textures/32_32/LDR_RGBA_18.png new file mode 100644 index 0000000..0234316 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_18.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_19.png b/blue_noise_textures/32_32/LDR_RGBA_19.png new file mode 100644 index 0000000..01fc1b2 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_19.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_2.png b/blue_noise_textures/32_32/LDR_RGBA_2.png new file mode 100644 index 0000000..41da29b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_2.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_20.png b/blue_noise_textures/32_32/LDR_RGBA_20.png new file mode 100644 index 0000000..3d14679 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_20.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_21.png b/blue_noise_textures/32_32/LDR_RGBA_21.png new file mode 100644 index 0000000..af07f51 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_21.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_22.png b/blue_noise_textures/32_32/LDR_RGBA_22.png new file mode 100644 index 0000000..7293573 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_22.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_23.png b/blue_noise_textures/32_32/LDR_RGBA_23.png new file mode 100644 index 0000000..45b313e Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_23.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_24.png b/blue_noise_textures/32_32/LDR_RGBA_24.png new file mode 100644 index 0000000..4a23082 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_24.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_25.png b/blue_noise_textures/32_32/LDR_RGBA_25.png new file mode 100644 index 0000000..92729e2 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_25.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_26.png b/blue_noise_textures/32_32/LDR_RGBA_26.png new file mode 100644 index 0000000..60a70b9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_26.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_27.png b/blue_noise_textures/32_32/LDR_RGBA_27.png new file mode 100644 index 0000000..1b18edb Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_27.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_28.png b/blue_noise_textures/32_32/LDR_RGBA_28.png new file mode 100644 index 0000000..39f41d9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_28.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_29.png b/blue_noise_textures/32_32/LDR_RGBA_29.png new file mode 100644 index 0000000..c15b15f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_29.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_3.png b/blue_noise_textures/32_32/LDR_RGBA_3.png new file mode 100644 index 0000000..622df55 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_3.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_30.png b/blue_noise_textures/32_32/LDR_RGBA_30.png new file mode 100644 index 0000000..5bf8659 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_30.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_31.png b/blue_noise_textures/32_32/LDR_RGBA_31.png new file mode 100644 index 0000000..a771f5b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_31.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_32.png b/blue_noise_textures/32_32/LDR_RGBA_32.png new file mode 100644 index 0000000..049e447 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_32.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_33.png b/blue_noise_textures/32_32/LDR_RGBA_33.png new file mode 100644 index 0000000..906dc48 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_33.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_34.png b/blue_noise_textures/32_32/LDR_RGBA_34.png new file mode 100644 index 0000000..3535dad Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_34.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_35.png b/blue_noise_textures/32_32/LDR_RGBA_35.png new file mode 100644 index 0000000..8fab9e9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_35.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_36.png b/blue_noise_textures/32_32/LDR_RGBA_36.png new file mode 100644 index 0000000..6c1d267 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_36.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_37.png b/blue_noise_textures/32_32/LDR_RGBA_37.png new file mode 100644 index 0000000..ab7e93c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_37.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_38.png b/blue_noise_textures/32_32/LDR_RGBA_38.png new file mode 100644 index 0000000..d4eaaf5 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_38.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_39.png b/blue_noise_textures/32_32/LDR_RGBA_39.png new file mode 100644 index 0000000..0698700 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_39.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_4.png b/blue_noise_textures/32_32/LDR_RGBA_4.png new file mode 100644 index 0000000..cc21e57 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_4.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_40.png b/blue_noise_textures/32_32/LDR_RGBA_40.png new file mode 100644 index 0000000..67605d9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_40.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_41.png b/blue_noise_textures/32_32/LDR_RGBA_41.png new file mode 100644 index 0000000..d85c495 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_41.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_42.png b/blue_noise_textures/32_32/LDR_RGBA_42.png new file mode 100644 index 0000000..7cc739c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_42.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_43.png b/blue_noise_textures/32_32/LDR_RGBA_43.png new file mode 100644 index 0000000..9809d6b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_43.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_44.png b/blue_noise_textures/32_32/LDR_RGBA_44.png new file mode 100644 index 0000000..f1bdeb3 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_44.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_45.png b/blue_noise_textures/32_32/LDR_RGBA_45.png new file mode 100644 index 0000000..756b5c7 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_45.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_46.png b/blue_noise_textures/32_32/LDR_RGBA_46.png new file mode 100644 index 0000000..fb57d65 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_46.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_47.png b/blue_noise_textures/32_32/LDR_RGBA_47.png new file mode 100644 index 0000000..98fe871 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_47.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_48.png b/blue_noise_textures/32_32/LDR_RGBA_48.png new file mode 100644 index 0000000..a0203ab Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_48.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_49.png b/blue_noise_textures/32_32/LDR_RGBA_49.png new file mode 100644 index 0000000..4a6891a Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_49.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_5.png b/blue_noise_textures/32_32/LDR_RGBA_5.png new file mode 100644 index 0000000..6bd76ec Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_5.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_50.png b/blue_noise_textures/32_32/LDR_RGBA_50.png new file mode 100644 index 0000000..0fdd72b Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_50.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_51.png b/blue_noise_textures/32_32/LDR_RGBA_51.png new file mode 100644 index 0000000..36ecb57 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_51.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_52.png b/blue_noise_textures/32_32/LDR_RGBA_52.png new file mode 100644 index 0000000..4bf8540 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_52.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_53.png b/blue_noise_textures/32_32/LDR_RGBA_53.png new file mode 100644 index 0000000..992b184 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_53.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_54.png b/blue_noise_textures/32_32/LDR_RGBA_54.png new file mode 100644 index 0000000..31387de Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_54.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_55.png b/blue_noise_textures/32_32/LDR_RGBA_55.png new file mode 100644 index 0000000..bfffa2c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_55.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_56.png b/blue_noise_textures/32_32/LDR_RGBA_56.png new file mode 100644 index 0000000..9234db5 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_56.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_57.png b/blue_noise_textures/32_32/LDR_RGBA_57.png new file mode 100644 index 0000000..98870b4 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_57.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_58.png b/blue_noise_textures/32_32/LDR_RGBA_58.png new file mode 100644 index 0000000..7523633 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_58.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_59.png b/blue_noise_textures/32_32/LDR_RGBA_59.png new file mode 100644 index 0000000..87165ca Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_59.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_6.png b/blue_noise_textures/32_32/LDR_RGBA_6.png new file mode 100644 index 0000000..be5ae0c Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_6.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_60.png b/blue_noise_textures/32_32/LDR_RGBA_60.png new file mode 100644 index 0000000..88e6193 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_60.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_61.png b/blue_noise_textures/32_32/LDR_RGBA_61.png new file mode 100644 index 0000000..42b904a Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_61.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_62.png b/blue_noise_textures/32_32/LDR_RGBA_62.png new file mode 100644 index 0000000..6d463f9 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_62.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_63.png b/blue_noise_textures/32_32/LDR_RGBA_63.png new file mode 100644 index 0000000..254810f Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_63.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_7.png b/blue_noise_textures/32_32/LDR_RGBA_7.png new file mode 100644 index 0000000..fb06de2 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_7.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_8.png b/blue_noise_textures/32_32/LDR_RGBA_8.png new file mode 100644 index 0000000..d368108 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_8.png differ diff --git a/blue_noise_textures/32_32/LDR_RGBA_9.png b/blue_noise_textures/32_32/LDR_RGBA_9.png new file mode 100644 index 0000000..1777613 Binary files /dev/null and b/blue_noise_textures/32_32/LDR_RGBA_9.png differ diff --git a/blue_noise_textures/512_512/LDR_LLL1_0.png b/blue_noise_textures/512_512/LDR_LLL1_0.png new file mode 100644 index 0000000..e7d9173 Binary files /dev/null and b/blue_noise_textures/512_512/LDR_LLL1_0.png differ diff --git a/blue_noise_textures/512_512/LDR_RG01_0.png b/blue_noise_textures/512_512/LDR_RG01_0.png new file mode 100644 index 0000000..87bd792 Binary files /dev/null and b/blue_noise_textures/512_512/LDR_RG01_0.png differ diff --git a/blue_noise_textures/512_512/LDR_RGB1_0.png b/blue_noise_textures/512_512/LDR_RGB1_0.png new file mode 100644 index 0000000..3832805 Binary files /dev/null and b/blue_noise_textures/512_512/LDR_RGB1_0.png differ diff --git a/blue_noise_textures/512_512/LDR_RGBA_0.png b/blue_noise_textures/512_512/LDR_RGBA_0.png new file mode 100644 index 0000000..d1c768d Binary files /dev/null and b/blue_noise_textures/512_512/LDR_RGBA_0.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_0.png b/blue_noise_textures/64_64/HDR_LA_0.png new file mode 100644 index 0000000..8dc9376 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_0.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_1.png b/blue_noise_textures/64_64/HDR_LA_1.png new file mode 100644 index 0000000..54f154c Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_1.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_10.png b/blue_noise_textures/64_64/HDR_LA_10.png new file mode 100644 index 0000000..2a02170 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_10.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_11.png b/blue_noise_textures/64_64/HDR_LA_11.png new file mode 100644 index 0000000..dca8d80 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_11.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_12.png b/blue_noise_textures/64_64/HDR_LA_12.png new file mode 100644 index 0000000..adddb7b Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_12.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_13.png b/blue_noise_textures/64_64/HDR_LA_13.png new file mode 100644 index 0000000..fa7a0c5 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_13.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_14.png b/blue_noise_textures/64_64/HDR_LA_14.png new file mode 100644 index 0000000..fb8ff7d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_14.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_15.png b/blue_noise_textures/64_64/HDR_LA_15.png new file mode 100644 index 0000000..e2d987e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_15.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_16.png b/blue_noise_textures/64_64/HDR_LA_16.png new file mode 100644 index 0000000..5127613 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_16.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_17.png b/blue_noise_textures/64_64/HDR_LA_17.png new file mode 100644 index 0000000..8e26510 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_17.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_18.png b/blue_noise_textures/64_64/HDR_LA_18.png new file mode 100644 index 0000000..368e7a2 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_18.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_19.png b/blue_noise_textures/64_64/HDR_LA_19.png new file mode 100644 index 0000000..d48ad6a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_19.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_2.png b/blue_noise_textures/64_64/HDR_LA_2.png new file mode 100644 index 0000000..ed436e5 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_2.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_20.png b/blue_noise_textures/64_64/HDR_LA_20.png new file mode 100644 index 0000000..68a5b9a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_20.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_21.png b/blue_noise_textures/64_64/HDR_LA_21.png new file mode 100644 index 0000000..6b3be65 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_21.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_22.png b/blue_noise_textures/64_64/HDR_LA_22.png new file mode 100644 index 0000000..b40df9f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_22.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_23.png b/blue_noise_textures/64_64/HDR_LA_23.png new file mode 100644 index 0000000..dfa5510 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_23.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_24.png b/blue_noise_textures/64_64/HDR_LA_24.png new file mode 100644 index 0000000..363db5d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_24.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_25.png b/blue_noise_textures/64_64/HDR_LA_25.png new file mode 100644 index 0000000..11163ac Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_25.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_26.png b/blue_noise_textures/64_64/HDR_LA_26.png new file mode 100644 index 0000000..1714a68 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_26.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_27.png b/blue_noise_textures/64_64/HDR_LA_27.png new file mode 100644 index 0000000..a2f1da5 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_27.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_28.png b/blue_noise_textures/64_64/HDR_LA_28.png new file mode 100644 index 0000000..70bc8d2 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_28.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_29.png b/blue_noise_textures/64_64/HDR_LA_29.png new file mode 100644 index 0000000..6eef3e0 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_29.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_3.png b/blue_noise_textures/64_64/HDR_LA_3.png new file mode 100644 index 0000000..a236c24 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_3.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_30.png b/blue_noise_textures/64_64/HDR_LA_30.png new file mode 100644 index 0000000..18931e1 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_30.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_31.png b/blue_noise_textures/64_64/HDR_LA_31.png new file mode 100644 index 0000000..b3d1a4e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_31.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_32.png b/blue_noise_textures/64_64/HDR_LA_32.png new file mode 100644 index 0000000..876b887 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_32.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_33.png b/blue_noise_textures/64_64/HDR_LA_33.png new file mode 100644 index 0000000..deeb71a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_33.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_34.png b/blue_noise_textures/64_64/HDR_LA_34.png new file mode 100644 index 0000000..7eeb0bb Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_34.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_35.png b/blue_noise_textures/64_64/HDR_LA_35.png new file mode 100644 index 0000000..bf0b111 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_35.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_36.png b/blue_noise_textures/64_64/HDR_LA_36.png new file mode 100644 index 0000000..cecd34c Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_36.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_37.png b/blue_noise_textures/64_64/HDR_LA_37.png new file mode 100644 index 0000000..2116007 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_37.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_38.png b/blue_noise_textures/64_64/HDR_LA_38.png new file mode 100644 index 0000000..335f080 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_38.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_39.png b/blue_noise_textures/64_64/HDR_LA_39.png new file mode 100644 index 0000000..754459b Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_39.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_4.png b/blue_noise_textures/64_64/HDR_LA_4.png new file mode 100644 index 0000000..dc55ac4 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_4.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_40.png b/blue_noise_textures/64_64/HDR_LA_40.png new file mode 100644 index 0000000..12258c6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_40.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_41.png b/blue_noise_textures/64_64/HDR_LA_41.png new file mode 100644 index 0000000..e042197 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_41.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_42.png b/blue_noise_textures/64_64/HDR_LA_42.png new file mode 100644 index 0000000..b192a79 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_42.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_43.png b/blue_noise_textures/64_64/HDR_LA_43.png new file mode 100644 index 0000000..9c6396f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_43.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_44.png b/blue_noise_textures/64_64/HDR_LA_44.png new file mode 100644 index 0000000..f2a8dd6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_44.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_45.png b/blue_noise_textures/64_64/HDR_LA_45.png new file mode 100644 index 0000000..46b43e5 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_45.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_46.png b/blue_noise_textures/64_64/HDR_LA_46.png new file mode 100644 index 0000000..c08a877 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_46.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_47.png b/blue_noise_textures/64_64/HDR_LA_47.png new file mode 100644 index 0000000..5d736bb Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_47.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_48.png b/blue_noise_textures/64_64/HDR_LA_48.png new file mode 100644 index 0000000..9cc25ac Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_48.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_49.png b/blue_noise_textures/64_64/HDR_LA_49.png new file mode 100644 index 0000000..2a4f83b Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_49.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_5.png b/blue_noise_textures/64_64/HDR_LA_5.png new file mode 100644 index 0000000..3c296a0 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_5.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_50.png b/blue_noise_textures/64_64/HDR_LA_50.png new file mode 100644 index 0000000..bbc7af6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_50.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_51.png b/blue_noise_textures/64_64/HDR_LA_51.png new file mode 100644 index 0000000..6c3fea3 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_51.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_52.png b/blue_noise_textures/64_64/HDR_LA_52.png new file mode 100644 index 0000000..1e03c7f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_52.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_53.png b/blue_noise_textures/64_64/HDR_LA_53.png new file mode 100644 index 0000000..a777698 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_53.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_54.png b/blue_noise_textures/64_64/HDR_LA_54.png new file mode 100644 index 0000000..34bbe1f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_54.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_55.png b/blue_noise_textures/64_64/HDR_LA_55.png new file mode 100644 index 0000000..4e7c4c6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_55.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_56.png b/blue_noise_textures/64_64/HDR_LA_56.png new file mode 100644 index 0000000..bb93f95 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_56.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_57.png b/blue_noise_textures/64_64/HDR_LA_57.png new file mode 100644 index 0000000..5e88f5b Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_57.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_58.png b/blue_noise_textures/64_64/HDR_LA_58.png new file mode 100644 index 0000000..1deaf99 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_58.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_59.png b/blue_noise_textures/64_64/HDR_LA_59.png new file mode 100644 index 0000000..103a6ce Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_59.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_6.png b/blue_noise_textures/64_64/HDR_LA_6.png new file mode 100644 index 0000000..9f82bc2 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_6.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_60.png b/blue_noise_textures/64_64/HDR_LA_60.png new file mode 100644 index 0000000..7ba7a09 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_60.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_61.png b/blue_noise_textures/64_64/HDR_LA_61.png new file mode 100644 index 0000000..5391106 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_61.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_62.png b/blue_noise_textures/64_64/HDR_LA_62.png new file mode 100644 index 0000000..e161cdf Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_62.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_63.png b/blue_noise_textures/64_64/HDR_LA_63.png new file mode 100644 index 0000000..9d61e9b Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_63.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_7.png b/blue_noise_textures/64_64/HDR_LA_7.png new file mode 100644 index 0000000..67511d8 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_7.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_8.png b/blue_noise_textures/64_64/HDR_LA_8.png new file mode 100644 index 0000000..80361f1 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_8.png differ diff --git a/blue_noise_textures/64_64/HDR_LA_9.png b/blue_noise_textures/64_64/HDR_LA_9.png new file mode 100644 index 0000000..c99d88c Binary files /dev/null and b/blue_noise_textures/64_64/HDR_LA_9.png differ diff --git a/blue_noise_textures/64_64/HDR_L_0.png b/blue_noise_textures/64_64/HDR_L_0.png new file mode 100644 index 0000000..5730c03 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_0.png differ diff --git a/blue_noise_textures/64_64/HDR_L_1.png b/blue_noise_textures/64_64/HDR_L_1.png new file mode 100644 index 0000000..596dfa0 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_1.png differ diff --git a/blue_noise_textures/64_64/HDR_L_10.png b/blue_noise_textures/64_64/HDR_L_10.png new file mode 100644 index 0000000..5a75021 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_10.png differ diff --git a/blue_noise_textures/64_64/HDR_L_11.png b/blue_noise_textures/64_64/HDR_L_11.png new file mode 100644 index 0000000..569d80a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_11.png differ diff --git a/blue_noise_textures/64_64/HDR_L_12.png b/blue_noise_textures/64_64/HDR_L_12.png new file mode 100644 index 0000000..fe2131a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_12.png differ diff --git a/blue_noise_textures/64_64/HDR_L_13.png b/blue_noise_textures/64_64/HDR_L_13.png new file mode 100644 index 0000000..9dc38ff Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_13.png differ diff --git a/blue_noise_textures/64_64/HDR_L_14.png b/blue_noise_textures/64_64/HDR_L_14.png new file mode 100644 index 0000000..f7fbbec Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_14.png differ diff --git a/blue_noise_textures/64_64/HDR_L_15.png b/blue_noise_textures/64_64/HDR_L_15.png new file mode 100644 index 0000000..090b15f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_15.png differ diff --git a/blue_noise_textures/64_64/HDR_L_16.png b/blue_noise_textures/64_64/HDR_L_16.png new file mode 100644 index 0000000..eaa6d81 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_16.png differ diff --git a/blue_noise_textures/64_64/HDR_L_17.png b/blue_noise_textures/64_64/HDR_L_17.png new file mode 100644 index 0000000..ddf9b29 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_17.png differ diff --git a/blue_noise_textures/64_64/HDR_L_18.png b/blue_noise_textures/64_64/HDR_L_18.png new file mode 100644 index 0000000..5f7fc88 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_18.png differ diff --git a/blue_noise_textures/64_64/HDR_L_19.png b/blue_noise_textures/64_64/HDR_L_19.png new file mode 100644 index 0000000..21ce206 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_19.png differ diff --git a/blue_noise_textures/64_64/HDR_L_2.png b/blue_noise_textures/64_64/HDR_L_2.png new file mode 100644 index 0000000..6cdf639 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_2.png differ diff --git a/blue_noise_textures/64_64/HDR_L_20.png b/blue_noise_textures/64_64/HDR_L_20.png new file mode 100644 index 0000000..c316125 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_20.png differ diff --git a/blue_noise_textures/64_64/HDR_L_21.png b/blue_noise_textures/64_64/HDR_L_21.png new file mode 100644 index 0000000..b29b803 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_21.png differ diff --git a/blue_noise_textures/64_64/HDR_L_22.png b/blue_noise_textures/64_64/HDR_L_22.png new file mode 100644 index 0000000..abe7edb Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_22.png differ diff --git a/blue_noise_textures/64_64/HDR_L_23.png b/blue_noise_textures/64_64/HDR_L_23.png new file mode 100644 index 0000000..7c42d17 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_23.png differ diff --git a/blue_noise_textures/64_64/HDR_L_24.png b/blue_noise_textures/64_64/HDR_L_24.png new file mode 100644 index 0000000..242ac47 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_24.png differ diff --git a/blue_noise_textures/64_64/HDR_L_25.png b/blue_noise_textures/64_64/HDR_L_25.png new file mode 100644 index 0000000..61bc628 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_25.png differ diff --git a/blue_noise_textures/64_64/HDR_L_26.png b/blue_noise_textures/64_64/HDR_L_26.png new file mode 100644 index 0000000..a64222d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_26.png differ diff --git a/blue_noise_textures/64_64/HDR_L_27.png b/blue_noise_textures/64_64/HDR_L_27.png new file mode 100644 index 0000000..5d70c25 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_27.png differ diff --git a/blue_noise_textures/64_64/HDR_L_28.png b/blue_noise_textures/64_64/HDR_L_28.png new file mode 100644 index 0000000..ca84abb Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_28.png differ diff --git a/blue_noise_textures/64_64/HDR_L_29.png b/blue_noise_textures/64_64/HDR_L_29.png new file mode 100644 index 0000000..8181e94 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_29.png differ diff --git a/blue_noise_textures/64_64/HDR_L_3.png b/blue_noise_textures/64_64/HDR_L_3.png new file mode 100644 index 0000000..b9e476f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_3.png differ diff --git a/blue_noise_textures/64_64/HDR_L_30.png b/blue_noise_textures/64_64/HDR_L_30.png new file mode 100644 index 0000000..8d78550 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_30.png differ diff --git a/blue_noise_textures/64_64/HDR_L_31.png b/blue_noise_textures/64_64/HDR_L_31.png new file mode 100644 index 0000000..92c8f4f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_31.png differ diff --git a/blue_noise_textures/64_64/HDR_L_32.png b/blue_noise_textures/64_64/HDR_L_32.png new file mode 100644 index 0000000..342b0fd Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_32.png differ diff --git a/blue_noise_textures/64_64/HDR_L_33.png b/blue_noise_textures/64_64/HDR_L_33.png new file mode 100644 index 0000000..7b556cc Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_33.png differ diff --git a/blue_noise_textures/64_64/HDR_L_34.png b/blue_noise_textures/64_64/HDR_L_34.png new file mode 100644 index 0000000..93a6a79 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_34.png differ diff --git a/blue_noise_textures/64_64/HDR_L_35.png b/blue_noise_textures/64_64/HDR_L_35.png new file mode 100644 index 0000000..22c1272 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_35.png differ diff --git a/blue_noise_textures/64_64/HDR_L_36.png b/blue_noise_textures/64_64/HDR_L_36.png new file mode 100644 index 0000000..3a09ecf Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_36.png differ diff --git a/blue_noise_textures/64_64/HDR_L_37.png b/blue_noise_textures/64_64/HDR_L_37.png new file mode 100644 index 0000000..64ec78f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_37.png differ diff --git a/blue_noise_textures/64_64/HDR_L_38.png b/blue_noise_textures/64_64/HDR_L_38.png new file mode 100644 index 0000000..ad2162f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_38.png differ diff --git a/blue_noise_textures/64_64/HDR_L_39.png b/blue_noise_textures/64_64/HDR_L_39.png new file mode 100644 index 0000000..a06b071 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_39.png differ diff --git a/blue_noise_textures/64_64/HDR_L_4.png b/blue_noise_textures/64_64/HDR_L_4.png new file mode 100644 index 0000000..8679a35 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_4.png differ diff --git a/blue_noise_textures/64_64/HDR_L_40.png b/blue_noise_textures/64_64/HDR_L_40.png new file mode 100644 index 0000000..c7d7bff Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_40.png differ diff --git a/blue_noise_textures/64_64/HDR_L_41.png b/blue_noise_textures/64_64/HDR_L_41.png new file mode 100644 index 0000000..847557d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_41.png differ diff --git a/blue_noise_textures/64_64/HDR_L_42.png b/blue_noise_textures/64_64/HDR_L_42.png new file mode 100644 index 0000000..76d3961 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_42.png differ diff --git a/blue_noise_textures/64_64/HDR_L_43.png b/blue_noise_textures/64_64/HDR_L_43.png new file mode 100644 index 0000000..5c77c5a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_43.png differ diff --git a/blue_noise_textures/64_64/HDR_L_44.png b/blue_noise_textures/64_64/HDR_L_44.png new file mode 100644 index 0000000..1e21207 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_44.png differ diff --git a/blue_noise_textures/64_64/HDR_L_45.png b/blue_noise_textures/64_64/HDR_L_45.png new file mode 100644 index 0000000..de2becc Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_45.png differ diff --git a/blue_noise_textures/64_64/HDR_L_46.png b/blue_noise_textures/64_64/HDR_L_46.png new file mode 100644 index 0000000..c208869 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_46.png differ diff --git a/blue_noise_textures/64_64/HDR_L_47.png b/blue_noise_textures/64_64/HDR_L_47.png new file mode 100644 index 0000000..1f52d77 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_47.png differ diff --git a/blue_noise_textures/64_64/HDR_L_48.png b/blue_noise_textures/64_64/HDR_L_48.png new file mode 100644 index 0000000..7a4ab95 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_48.png differ diff --git a/blue_noise_textures/64_64/HDR_L_49.png b/blue_noise_textures/64_64/HDR_L_49.png new file mode 100644 index 0000000..01ffaca Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_49.png differ diff --git a/blue_noise_textures/64_64/HDR_L_5.png b/blue_noise_textures/64_64/HDR_L_5.png new file mode 100644 index 0000000..39f965a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_5.png differ diff --git a/blue_noise_textures/64_64/HDR_L_50.png b/blue_noise_textures/64_64/HDR_L_50.png new file mode 100644 index 0000000..328dc08 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_50.png differ diff --git a/blue_noise_textures/64_64/HDR_L_51.png b/blue_noise_textures/64_64/HDR_L_51.png new file mode 100644 index 0000000..c12cd2c Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_51.png differ diff --git a/blue_noise_textures/64_64/HDR_L_52.png b/blue_noise_textures/64_64/HDR_L_52.png new file mode 100644 index 0000000..f76cb27 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_52.png differ diff --git a/blue_noise_textures/64_64/HDR_L_53.png b/blue_noise_textures/64_64/HDR_L_53.png new file mode 100644 index 0000000..732322c Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_53.png differ diff --git a/blue_noise_textures/64_64/HDR_L_54.png b/blue_noise_textures/64_64/HDR_L_54.png new file mode 100644 index 0000000..5286ea6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_54.png differ diff --git a/blue_noise_textures/64_64/HDR_L_55.png b/blue_noise_textures/64_64/HDR_L_55.png new file mode 100644 index 0000000..824a36e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_55.png differ diff --git a/blue_noise_textures/64_64/HDR_L_56.png b/blue_noise_textures/64_64/HDR_L_56.png new file mode 100644 index 0000000..ef102d0 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_56.png differ diff --git a/blue_noise_textures/64_64/HDR_L_57.png b/blue_noise_textures/64_64/HDR_L_57.png new file mode 100644 index 0000000..356faa3 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_57.png differ diff --git a/blue_noise_textures/64_64/HDR_L_58.png b/blue_noise_textures/64_64/HDR_L_58.png new file mode 100644 index 0000000..d832e1a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_58.png differ diff --git a/blue_noise_textures/64_64/HDR_L_59.png b/blue_noise_textures/64_64/HDR_L_59.png new file mode 100644 index 0000000..84941d0 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_59.png differ diff --git a/blue_noise_textures/64_64/HDR_L_6.png b/blue_noise_textures/64_64/HDR_L_6.png new file mode 100644 index 0000000..101d253 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_6.png differ diff --git a/blue_noise_textures/64_64/HDR_L_60.png b/blue_noise_textures/64_64/HDR_L_60.png new file mode 100644 index 0000000..5b27a7d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_60.png differ diff --git a/blue_noise_textures/64_64/HDR_L_61.png b/blue_noise_textures/64_64/HDR_L_61.png new file mode 100644 index 0000000..e0b4e5c Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_61.png differ diff --git a/blue_noise_textures/64_64/HDR_L_62.png b/blue_noise_textures/64_64/HDR_L_62.png new file mode 100644 index 0000000..a16223a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_62.png differ diff --git a/blue_noise_textures/64_64/HDR_L_63.png b/blue_noise_textures/64_64/HDR_L_63.png new file mode 100644 index 0000000..ee50763 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_63.png differ diff --git a/blue_noise_textures/64_64/HDR_L_7.png b/blue_noise_textures/64_64/HDR_L_7.png new file mode 100644 index 0000000..509f4f9 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_7.png differ diff --git a/blue_noise_textures/64_64/HDR_L_8.png b/blue_noise_textures/64_64/HDR_L_8.png new file mode 100644 index 0000000..6559f34 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_8.png differ diff --git a/blue_noise_textures/64_64/HDR_L_9.png b/blue_noise_textures/64_64/HDR_L_9.png new file mode 100644 index 0000000..8b803e1 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_L_9.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_0.png b/blue_noise_textures/64_64/HDR_RGBA_0.png new file mode 100644 index 0000000..4ad6d46 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_0.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_1.png b/blue_noise_textures/64_64/HDR_RGBA_1.png new file mode 100644 index 0000000..e81a8e1 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_1.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_10.png b/blue_noise_textures/64_64/HDR_RGBA_10.png new file mode 100644 index 0000000..f72f89f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_10.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_11.png b/blue_noise_textures/64_64/HDR_RGBA_11.png new file mode 100644 index 0000000..25f1dac Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_11.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_12.png b/blue_noise_textures/64_64/HDR_RGBA_12.png new file mode 100644 index 0000000..f2dd8a5 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_12.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_13.png b/blue_noise_textures/64_64/HDR_RGBA_13.png new file mode 100644 index 0000000..ca8fec3 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_13.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_14.png b/blue_noise_textures/64_64/HDR_RGBA_14.png new file mode 100644 index 0000000..3a1ee7c Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_14.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_15.png b/blue_noise_textures/64_64/HDR_RGBA_15.png new file mode 100644 index 0000000..06d9f76 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_15.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_16.png b/blue_noise_textures/64_64/HDR_RGBA_16.png new file mode 100644 index 0000000..ffe2a6a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_16.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_17.png b/blue_noise_textures/64_64/HDR_RGBA_17.png new file mode 100644 index 0000000..e8a457e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_17.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_18.png b/blue_noise_textures/64_64/HDR_RGBA_18.png new file mode 100644 index 0000000..c765618 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_18.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_19.png b/blue_noise_textures/64_64/HDR_RGBA_19.png new file mode 100644 index 0000000..83b75e1 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_19.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_2.png b/blue_noise_textures/64_64/HDR_RGBA_2.png new file mode 100644 index 0000000..c554372 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_2.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_20.png b/blue_noise_textures/64_64/HDR_RGBA_20.png new file mode 100644 index 0000000..79b3ac4 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_20.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_21.png b/blue_noise_textures/64_64/HDR_RGBA_21.png new file mode 100644 index 0000000..e3600fe Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_21.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_22.png b/blue_noise_textures/64_64/HDR_RGBA_22.png new file mode 100644 index 0000000..08ef30e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_22.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_23.png b/blue_noise_textures/64_64/HDR_RGBA_23.png new file mode 100644 index 0000000..6b32c36 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_23.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_24.png b/blue_noise_textures/64_64/HDR_RGBA_24.png new file mode 100644 index 0000000..24b73c1 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_24.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_25.png b/blue_noise_textures/64_64/HDR_RGBA_25.png new file mode 100644 index 0000000..05207bb Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_25.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_26.png b/blue_noise_textures/64_64/HDR_RGBA_26.png new file mode 100644 index 0000000..e1661c6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_26.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_27.png b/blue_noise_textures/64_64/HDR_RGBA_27.png new file mode 100644 index 0000000..6876d49 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_27.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_28.png b/blue_noise_textures/64_64/HDR_RGBA_28.png new file mode 100644 index 0000000..7e918b3 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_28.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_29.png b/blue_noise_textures/64_64/HDR_RGBA_29.png new file mode 100644 index 0000000..36a069f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_29.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_3.png b/blue_noise_textures/64_64/HDR_RGBA_3.png new file mode 100644 index 0000000..61eb749 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_3.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_30.png b/blue_noise_textures/64_64/HDR_RGBA_30.png new file mode 100644 index 0000000..da47fe7 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_30.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_31.png b/blue_noise_textures/64_64/HDR_RGBA_31.png new file mode 100644 index 0000000..449ae1e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_31.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_32.png b/blue_noise_textures/64_64/HDR_RGBA_32.png new file mode 100644 index 0000000..077669a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_32.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_33.png b/blue_noise_textures/64_64/HDR_RGBA_33.png new file mode 100644 index 0000000..50d3c44 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_33.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_34.png b/blue_noise_textures/64_64/HDR_RGBA_34.png new file mode 100644 index 0000000..e0b5690 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_34.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_35.png b/blue_noise_textures/64_64/HDR_RGBA_35.png new file mode 100644 index 0000000..fd5b8c4 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_35.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_36.png b/blue_noise_textures/64_64/HDR_RGBA_36.png new file mode 100644 index 0000000..5c66cf0 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_36.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_37.png b/blue_noise_textures/64_64/HDR_RGBA_37.png new file mode 100644 index 0000000..267f8a2 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_37.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_38.png b/blue_noise_textures/64_64/HDR_RGBA_38.png new file mode 100644 index 0000000..8308c5f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_38.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_39.png b/blue_noise_textures/64_64/HDR_RGBA_39.png new file mode 100644 index 0000000..dbac741 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_39.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_4.png b/blue_noise_textures/64_64/HDR_RGBA_4.png new file mode 100644 index 0000000..0af870e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_4.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_40.png b/blue_noise_textures/64_64/HDR_RGBA_40.png new file mode 100644 index 0000000..63ec5b6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_40.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_41.png b/blue_noise_textures/64_64/HDR_RGBA_41.png new file mode 100644 index 0000000..3c7c67f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_41.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_42.png b/blue_noise_textures/64_64/HDR_RGBA_42.png new file mode 100644 index 0000000..7fcb995 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_42.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_43.png b/blue_noise_textures/64_64/HDR_RGBA_43.png new file mode 100644 index 0000000..e82e491 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_43.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_44.png b/blue_noise_textures/64_64/HDR_RGBA_44.png new file mode 100644 index 0000000..c5b408f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_44.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_45.png b/blue_noise_textures/64_64/HDR_RGBA_45.png new file mode 100644 index 0000000..82e3f3f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_45.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_46.png b/blue_noise_textures/64_64/HDR_RGBA_46.png new file mode 100644 index 0000000..b1b1341 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_46.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_47.png b/blue_noise_textures/64_64/HDR_RGBA_47.png new file mode 100644 index 0000000..99b0077 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_47.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_48.png b/blue_noise_textures/64_64/HDR_RGBA_48.png new file mode 100644 index 0000000..7354d36 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_48.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_49.png b/blue_noise_textures/64_64/HDR_RGBA_49.png new file mode 100644 index 0000000..33a35a6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_49.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_5.png b/blue_noise_textures/64_64/HDR_RGBA_5.png new file mode 100644 index 0000000..e7077e5 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_5.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_50.png b/blue_noise_textures/64_64/HDR_RGBA_50.png new file mode 100644 index 0000000..93b7619 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_50.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_51.png b/blue_noise_textures/64_64/HDR_RGBA_51.png new file mode 100644 index 0000000..677d24d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_51.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_52.png b/blue_noise_textures/64_64/HDR_RGBA_52.png new file mode 100644 index 0000000..4c7ef29 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_52.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_53.png b/blue_noise_textures/64_64/HDR_RGBA_53.png new file mode 100644 index 0000000..9017345 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_53.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_54.png b/blue_noise_textures/64_64/HDR_RGBA_54.png new file mode 100644 index 0000000..efdb86c Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_54.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_55.png b/blue_noise_textures/64_64/HDR_RGBA_55.png new file mode 100644 index 0000000..18cb41d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_55.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_56.png b/blue_noise_textures/64_64/HDR_RGBA_56.png new file mode 100644 index 0000000..db0eb1f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_56.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_57.png b/blue_noise_textures/64_64/HDR_RGBA_57.png new file mode 100644 index 0000000..31292ef Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_57.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_58.png b/blue_noise_textures/64_64/HDR_RGBA_58.png new file mode 100644 index 0000000..e4832b8 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_58.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_59.png b/blue_noise_textures/64_64/HDR_RGBA_59.png new file mode 100644 index 0000000..b7c4e66 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_59.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_6.png b/blue_noise_textures/64_64/HDR_RGBA_6.png new file mode 100644 index 0000000..48cdbf0 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_6.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_60.png b/blue_noise_textures/64_64/HDR_RGBA_60.png new file mode 100644 index 0000000..15154b6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_60.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_61.png b/blue_noise_textures/64_64/HDR_RGBA_61.png new file mode 100644 index 0000000..b0f38fc Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_61.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_62.png b/blue_noise_textures/64_64/HDR_RGBA_62.png new file mode 100644 index 0000000..b7d89e6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_62.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_63.png b/blue_noise_textures/64_64/HDR_RGBA_63.png new file mode 100644 index 0000000..4090e99 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_63.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_7.png b/blue_noise_textures/64_64/HDR_RGBA_7.png new file mode 100644 index 0000000..1de7d8e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_7.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_8.png b/blue_noise_textures/64_64/HDR_RGBA_8.png new file mode 100644 index 0000000..90b0e9d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_8.png differ diff --git a/blue_noise_textures/64_64/HDR_RGBA_9.png b/blue_noise_textures/64_64/HDR_RGBA_9.png new file mode 100644 index 0000000..12639de Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGBA_9.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_0.png b/blue_noise_textures/64_64/HDR_RGB_0.png new file mode 100644 index 0000000..7369127 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_0.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_1.png b/blue_noise_textures/64_64/HDR_RGB_1.png new file mode 100644 index 0000000..73b98b7 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_1.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_10.png b/blue_noise_textures/64_64/HDR_RGB_10.png new file mode 100644 index 0000000..68849a4 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_10.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_11.png b/blue_noise_textures/64_64/HDR_RGB_11.png new file mode 100644 index 0000000..2bf6d66 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_11.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_12.png b/blue_noise_textures/64_64/HDR_RGB_12.png new file mode 100644 index 0000000..6fc7f68 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_12.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_13.png b/blue_noise_textures/64_64/HDR_RGB_13.png new file mode 100644 index 0000000..d6b074a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_13.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_14.png b/blue_noise_textures/64_64/HDR_RGB_14.png new file mode 100644 index 0000000..987077a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_14.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_15.png b/blue_noise_textures/64_64/HDR_RGB_15.png new file mode 100644 index 0000000..56d74f6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_15.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_16.png b/blue_noise_textures/64_64/HDR_RGB_16.png new file mode 100644 index 0000000..b91bd3e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_16.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_17.png b/blue_noise_textures/64_64/HDR_RGB_17.png new file mode 100644 index 0000000..16fc1a1 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_17.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_18.png b/blue_noise_textures/64_64/HDR_RGB_18.png new file mode 100644 index 0000000..e812b6e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_18.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_19.png b/blue_noise_textures/64_64/HDR_RGB_19.png new file mode 100644 index 0000000..405f8b7 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_19.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_2.png b/blue_noise_textures/64_64/HDR_RGB_2.png new file mode 100644 index 0000000..ba7b3fa Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_2.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_20.png b/blue_noise_textures/64_64/HDR_RGB_20.png new file mode 100644 index 0000000..3888fa6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_20.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_21.png b/blue_noise_textures/64_64/HDR_RGB_21.png new file mode 100644 index 0000000..7bc9e43 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_21.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_22.png b/blue_noise_textures/64_64/HDR_RGB_22.png new file mode 100644 index 0000000..a934b76 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_22.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_23.png b/blue_noise_textures/64_64/HDR_RGB_23.png new file mode 100644 index 0000000..cadbe55 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_23.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_24.png b/blue_noise_textures/64_64/HDR_RGB_24.png new file mode 100644 index 0000000..9f6a59d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_24.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_25.png b/blue_noise_textures/64_64/HDR_RGB_25.png new file mode 100644 index 0000000..845afec Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_25.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_26.png b/blue_noise_textures/64_64/HDR_RGB_26.png new file mode 100644 index 0000000..2815aa8 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_26.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_27.png b/blue_noise_textures/64_64/HDR_RGB_27.png new file mode 100644 index 0000000..c9d9832 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_27.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_28.png b/blue_noise_textures/64_64/HDR_RGB_28.png new file mode 100644 index 0000000..8e3eca6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_28.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_29.png b/blue_noise_textures/64_64/HDR_RGB_29.png new file mode 100644 index 0000000..5ecd58f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_29.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_3.png b/blue_noise_textures/64_64/HDR_RGB_3.png new file mode 100644 index 0000000..d101cbc Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_3.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_30.png b/blue_noise_textures/64_64/HDR_RGB_30.png new file mode 100644 index 0000000..64674bf Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_30.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_31.png b/blue_noise_textures/64_64/HDR_RGB_31.png new file mode 100644 index 0000000..57b4ff8 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_31.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_32.png b/blue_noise_textures/64_64/HDR_RGB_32.png new file mode 100644 index 0000000..0499263 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_32.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_33.png b/blue_noise_textures/64_64/HDR_RGB_33.png new file mode 100644 index 0000000..5067cfa Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_33.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_34.png b/blue_noise_textures/64_64/HDR_RGB_34.png new file mode 100644 index 0000000..1312146 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_34.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_35.png b/blue_noise_textures/64_64/HDR_RGB_35.png new file mode 100644 index 0000000..4534812 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_35.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_36.png b/blue_noise_textures/64_64/HDR_RGB_36.png new file mode 100644 index 0000000..87b2e9f Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_36.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_37.png b/blue_noise_textures/64_64/HDR_RGB_37.png new file mode 100644 index 0000000..b57a7cf Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_37.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_38.png b/blue_noise_textures/64_64/HDR_RGB_38.png new file mode 100644 index 0000000..9ff2bfe Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_38.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_39.png b/blue_noise_textures/64_64/HDR_RGB_39.png new file mode 100644 index 0000000..4e74e69 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_39.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_4.png b/blue_noise_textures/64_64/HDR_RGB_4.png new file mode 100644 index 0000000..de6a29a Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_4.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_40.png b/blue_noise_textures/64_64/HDR_RGB_40.png new file mode 100644 index 0000000..1529df6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_40.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_41.png b/blue_noise_textures/64_64/HDR_RGB_41.png new file mode 100644 index 0000000..ec2354c Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_41.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_42.png b/blue_noise_textures/64_64/HDR_RGB_42.png new file mode 100644 index 0000000..988709d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_42.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_43.png b/blue_noise_textures/64_64/HDR_RGB_43.png new file mode 100644 index 0000000..491f190 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_43.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_44.png b/blue_noise_textures/64_64/HDR_RGB_44.png new file mode 100644 index 0000000..1726226 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_44.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_45.png b/blue_noise_textures/64_64/HDR_RGB_45.png new file mode 100644 index 0000000..2f0f883 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_45.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_46.png b/blue_noise_textures/64_64/HDR_RGB_46.png new file mode 100644 index 0000000..54e6da5 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_46.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_47.png b/blue_noise_textures/64_64/HDR_RGB_47.png new file mode 100644 index 0000000..de3b3c3 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_47.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_48.png b/blue_noise_textures/64_64/HDR_RGB_48.png new file mode 100644 index 0000000..155fc98 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_48.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_49.png b/blue_noise_textures/64_64/HDR_RGB_49.png new file mode 100644 index 0000000..78ee85d Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_49.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_5.png b/blue_noise_textures/64_64/HDR_RGB_5.png new file mode 100644 index 0000000..2f19f96 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_5.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_50.png b/blue_noise_textures/64_64/HDR_RGB_50.png new file mode 100644 index 0000000..f837e4b Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_50.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_51.png b/blue_noise_textures/64_64/HDR_RGB_51.png new file mode 100644 index 0000000..7d86d7e Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_51.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_52.png b/blue_noise_textures/64_64/HDR_RGB_52.png new file mode 100644 index 0000000..c4a5bd7 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_52.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_53.png b/blue_noise_textures/64_64/HDR_RGB_53.png new file mode 100644 index 0000000..d968a76 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_53.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_54.png b/blue_noise_textures/64_64/HDR_RGB_54.png new file mode 100644 index 0000000..2692c97 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_54.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_55.png b/blue_noise_textures/64_64/HDR_RGB_55.png new file mode 100644 index 0000000..b67a6e4 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_55.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_56.png b/blue_noise_textures/64_64/HDR_RGB_56.png new file mode 100644 index 0000000..ebcf29b Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_56.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_57.png b/blue_noise_textures/64_64/HDR_RGB_57.png new file mode 100644 index 0000000..4dbe62b Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_57.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_58.png b/blue_noise_textures/64_64/HDR_RGB_58.png new file mode 100644 index 0000000..d8e197b Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_58.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_59.png b/blue_noise_textures/64_64/HDR_RGB_59.png new file mode 100644 index 0000000..f9c4c20 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_59.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_6.png b/blue_noise_textures/64_64/HDR_RGB_6.png new file mode 100644 index 0000000..60f14d9 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_6.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_60.png b/blue_noise_textures/64_64/HDR_RGB_60.png new file mode 100644 index 0000000..534a038 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_60.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_61.png b/blue_noise_textures/64_64/HDR_RGB_61.png new file mode 100644 index 0000000..5895403 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_61.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_62.png b/blue_noise_textures/64_64/HDR_RGB_62.png new file mode 100644 index 0000000..aa5d275 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_62.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_63.png b/blue_noise_textures/64_64/HDR_RGB_63.png new file mode 100644 index 0000000..647d372 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_63.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_7.png b/blue_noise_textures/64_64/HDR_RGB_7.png new file mode 100644 index 0000000..e4374b6 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_7.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_8.png b/blue_noise_textures/64_64/HDR_RGB_8.png new file mode 100644 index 0000000..738fd71 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_8.png differ diff --git a/blue_noise_textures/64_64/HDR_RGB_9.png b/blue_noise_textures/64_64/HDR_RGB_9.png new file mode 100644 index 0000000..296ac24 Binary files /dev/null and b/blue_noise_textures/64_64/HDR_RGB_9.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_0.png b/blue_noise_textures/64_64/LDR_LLL1_0.png new file mode 100644 index 0000000..d1920c6 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_0.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_1.png b/blue_noise_textures/64_64/LDR_LLL1_1.png new file mode 100644 index 0000000..9d525e5 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_1.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_10.png b/blue_noise_textures/64_64/LDR_LLL1_10.png new file mode 100644 index 0000000..ecadafb Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_10.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_11.png b/blue_noise_textures/64_64/LDR_LLL1_11.png new file mode 100644 index 0000000..923292a Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_11.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_12.png b/blue_noise_textures/64_64/LDR_LLL1_12.png new file mode 100644 index 0000000..2077a1a Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_12.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_13.png b/blue_noise_textures/64_64/LDR_LLL1_13.png new file mode 100644 index 0000000..491f4c0 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_13.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_14.png b/blue_noise_textures/64_64/LDR_LLL1_14.png new file mode 100644 index 0000000..3093572 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_14.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_15.png b/blue_noise_textures/64_64/LDR_LLL1_15.png new file mode 100644 index 0000000..ece485d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_15.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_16.png b/blue_noise_textures/64_64/LDR_LLL1_16.png new file mode 100644 index 0000000..8750ad6 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_16.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_17.png b/blue_noise_textures/64_64/LDR_LLL1_17.png new file mode 100644 index 0000000..bdee0f8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_17.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_18.png b/blue_noise_textures/64_64/LDR_LLL1_18.png new file mode 100644 index 0000000..30c49f3 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_18.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_19.png b/blue_noise_textures/64_64/LDR_LLL1_19.png new file mode 100644 index 0000000..5180f1a Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_19.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_2.png b/blue_noise_textures/64_64/LDR_LLL1_2.png new file mode 100644 index 0000000..f5912ee Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_2.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_20.png b/blue_noise_textures/64_64/LDR_LLL1_20.png new file mode 100644 index 0000000..1424899 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_20.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_21.png b/blue_noise_textures/64_64/LDR_LLL1_21.png new file mode 100644 index 0000000..d634013 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_21.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_22.png b/blue_noise_textures/64_64/LDR_LLL1_22.png new file mode 100644 index 0000000..cb0a0ae Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_22.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_23.png b/blue_noise_textures/64_64/LDR_LLL1_23.png new file mode 100644 index 0000000..b063795 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_23.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_24.png b/blue_noise_textures/64_64/LDR_LLL1_24.png new file mode 100644 index 0000000..f4debb8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_24.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_25.png b/blue_noise_textures/64_64/LDR_LLL1_25.png new file mode 100644 index 0000000..c20d7b2 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_25.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_26.png b/blue_noise_textures/64_64/LDR_LLL1_26.png new file mode 100644 index 0000000..930ec4e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_26.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_27.png b/blue_noise_textures/64_64/LDR_LLL1_27.png new file mode 100644 index 0000000..06949cf Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_27.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_28.png b/blue_noise_textures/64_64/LDR_LLL1_28.png new file mode 100644 index 0000000..9807e41 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_28.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_29.png b/blue_noise_textures/64_64/LDR_LLL1_29.png new file mode 100644 index 0000000..413a86e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_29.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_3.png b/blue_noise_textures/64_64/LDR_LLL1_3.png new file mode 100644 index 0000000..767fc58 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_3.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_30.png b/blue_noise_textures/64_64/LDR_LLL1_30.png new file mode 100644 index 0000000..a1da55b Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_30.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_31.png b/blue_noise_textures/64_64/LDR_LLL1_31.png new file mode 100644 index 0000000..e2961b5 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_31.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_32.png b/blue_noise_textures/64_64/LDR_LLL1_32.png new file mode 100644 index 0000000..24d31e9 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_32.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_33.png b/blue_noise_textures/64_64/LDR_LLL1_33.png new file mode 100644 index 0000000..3403d4d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_33.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_34.png b/blue_noise_textures/64_64/LDR_LLL1_34.png new file mode 100644 index 0000000..2022cd9 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_34.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_35.png b/blue_noise_textures/64_64/LDR_LLL1_35.png new file mode 100644 index 0000000..bd9359c Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_35.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_36.png b/blue_noise_textures/64_64/LDR_LLL1_36.png new file mode 100644 index 0000000..22ed73a Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_36.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_37.png b/blue_noise_textures/64_64/LDR_LLL1_37.png new file mode 100644 index 0000000..6f84bb7 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_37.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_38.png b/blue_noise_textures/64_64/LDR_LLL1_38.png new file mode 100644 index 0000000..d9c27fb Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_38.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_39.png b/blue_noise_textures/64_64/LDR_LLL1_39.png new file mode 100644 index 0000000..2610149 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_39.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_4.png b/blue_noise_textures/64_64/LDR_LLL1_4.png new file mode 100644 index 0000000..81cdc3d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_4.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_40.png b/blue_noise_textures/64_64/LDR_LLL1_40.png new file mode 100644 index 0000000..8d95446 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_40.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_41.png b/blue_noise_textures/64_64/LDR_LLL1_41.png new file mode 100644 index 0000000..f6c01a6 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_41.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_42.png b/blue_noise_textures/64_64/LDR_LLL1_42.png new file mode 100644 index 0000000..1d42c2f Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_42.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_43.png b/blue_noise_textures/64_64/LDR_LLL1_43.png new file mode 100644 index 0000000..2f5c591 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_43.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_44.png b/blue_noise_textures/64_64/LDR_LLL1_44.png new file mode 100644 index 0000000..765c014 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_44.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_45.png b/blue_noise_textures/64_64/LDR_LLL1_45.png new file mode 100644 index 0000000..f335132 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_45.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_46.png b/blue_noise_textures/64_64/LDR_LLL1_46.png new file mode 100644 index 0000000..5118df3 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_46.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_47.png b/blue_noise_textures/64_64/LDR_LLL1_47.png new file mode 100644 index 0000000..c22a632 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_47.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_48.png b/blue_noise_textures/64_64/LDR_LLL1_48.png new file mode 100644 index 0000000..782c380 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_48.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_49.png b/blue_noise_textures/64_64/LDR_LLL1_49.png new file mode 100644 index 0000000..34d36e6 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_49.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_5.png b/blue_noise_textures/64_64/LDR_LLL1_5.png new file mode 100644 index 0000000..90a715a Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_5.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_50.png b/blue_noise_textures/64_64/LDR_LLL1_50.png new file mode 100644 index 0000000..df80595 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_50.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_51.png b/blue_noise_textures/64_64/LDR_LLL1_51.png new file mode 100644 index 0000000..ed9f2d3 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_51.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_52.png b/blue_noise_textures/64_64/LDR_LLL1_52.png new file mode 100644 index 0000000..be2c6fe Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_52.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_53.png b/blue_noise_textures/64_64/LDR_LLL1_53.png new file mode 100644 index 0000000..c226491 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_53.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_54.png b/blue_noise_textures/64_64/LDR_LLL1_54.png new file mode 100644 index 0000000..3b6bbb8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_54.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_55.png b/blue_noise_textures/64_64/LDR_LLL1_55.png new file mode 100644 index 0000000..261291a Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_55.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_56.png b/blue_noise_textures/64_64/LDR_LLL1_56.png new file mode 100644 index 0000000..7d8b298 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_56.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_57.png b/blue_noise_textures/64_64/LDR_LLL1_57.png new file mode 100644 index 0000000..97fe687 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_57.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_58.png b/blue_noise_textures/64_64/LDR_LLL1_58.png new file mode 100644 index 0000000..9c01659 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_58.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_59.png b/blue_noise_textures/64_64/LDR_LLL1_59.png new file mode 100644 index 0000000..805a44e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_59.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_6.png b/blue_noise_textures/64_64/LDR_LLL1_6.png new file mode 100644 index 0000000..326b1d3 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_6.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_60.png b/blue_noise_textures/64_64/LDR_LLL1_60.png new file mode 100644 index 0000000..5307242 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_60.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_61.png b/blue_noise_textures/64_64/LDR_LLL1_61.png new file mode 100644 index 0000000..623794c Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_61.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_62.png b/blue_noise_textures/64_64/LDR_LLL1_62.png new file mode 100644 index 0000000..d4b4f70 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_62.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_63.png b/blue_noise_textures/64_64/LDR_LLL1_63.png new file mode 100644 index 0000000..1746cc1 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_63.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_7.png b/blue_noise_textures/64_64/LDR_LLL1_7.png new file mode 100644 index 0000000..0a396d3 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_7.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_8.png b/blue_noise_textures/64_64/LDR_LLL1_8.png new file mode 100644 index 0000000..0b5d32e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_8.png differ diff --git a/blue_noise_textures/64_64/LDR_LLL1_9.png b/blue_noise_textures/64_64/LDR_LLL1_9.png new file mode 100644 index 0000000..2beb747 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_LLL1_9.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_0.png b/blue_noise_textures/64_64/LDR_RG01_0.png new file mode 100644 index 0000000..b04e41d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_0.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_1.png b/blue_noise_textures/64_64/LDR_RG01_1.png new file mode 100644 index 0000000..71794c4 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_1.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_10.png b/blue_noise_textures/64_64/LDR_RG01_10.png new file mode 100644 index 0000000..e693a04 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_10.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_11.png b/blue_noise_textures/64_64/LDR_RG01_11.png new file mode 100644 index 0000000..d5fd5d4 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_11.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_12.png b/blue_noise_textures/64_64/LDR_RG01_12.png new file mode 100644 index 0000000..9d983f4 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_12.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_13.png b/blue_noise_textures/64_64/LDR_RG01_13.png new file mode 100644 index 0000000..90d9665 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_13.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_14.png b/blue_noise_textures/64_64/LDR_RG01_14.png new file mode 100644 index 0000000..21da181 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_14.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_15.png b/blue_noise_textures/64_64/LDR_RG01_15.png new file mode 100644 index 0000000..9af5c23 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_15.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_16.png b/blue_noise_textures/64_64/LDR_RG01_16.png new file mode 100644 index 0000000..860cd36 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_16.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_17.png b/blue_noise_textures/64_64/LDR_RG01_17.png new file mode 100644 index 0000000..8102067 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_17.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_18.png b/blue_noise_textures/64_64/LDR_RG01_18.png new file mode 100644 index 0000000..428f6c8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_18.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_19.png b/blue_noise_textures/64_64/LDR_RG01_19.png new file mode 100644 index 0000000..e35fd23 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_19.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_2.png b/blue_noise_textures/64_64/LDR_RG01_2.png new file mode 100644 index 0000000..24e5834 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_2.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_20.png b/blue_noise_textures/64_64/LDR_RG01_20.png new file mode 100644 index 0000000..e6c1fd8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_20.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_21.png b/blue_noise_textures/64_64/LDR_RG01_21.png new file mode 100644 index 0000000..9f19c9b Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_21.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_22.png b/blue_noise_textures/64_64/LDR_RG01_22.png new file mode 100644 index 0000000..ccb532a Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_22.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_23.png b/blue_noise_textures/64_64/LDR_RG01_23.png new file mode 100644 index 0000000..c077541 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_23.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_24.png b/blue_noise_textures/64_64/LDR_RG01_24.png new file mode 100644 index 0000000..8c52a69 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_24.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_25.png b/blue_noise_textures/64_64/LDR_RG01_25.png new file mode 100644 index 0000000..70866d0 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_25.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_26.png b/blue_noise_textures/64_64/LDR_RG01_26.png new file mode 100644 index 0000000..1bae1e6 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_26.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_27.png b/blue_noise_textures/64_64/LDR_RG01_27.png new file mode 100644 index 0000000..bfd8ddd Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_27.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_28.png b/blue_noise_textures/64_64/LDR_RG01_28.png new file mode 100644 index 0000000..34afcce Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_28.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_29.png b/blue_noise_textures/64_64/LDR_RG01_29.png new file mode 100644 index 0000000..32d7c2d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_29.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_3.png b/blue_noise_textures/64_64/LDR_RG01_3.png new file mode 100644 index 0000000..ebf0ca0 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_3.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_30.png b/blue_noise_textures/64_64/LDR_RG01_30.png new file mode 100644 index 0000000..9793b70 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_30.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_31.png b/blue_noise_textures/64_64/LDR_RG01_31.png new file mode 100644 index 0000000..cb2896e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_31.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_32.png b/blue_noise_textures/64_64/LDR_RG01_32.png new file mode 100644 index 0000000..3a1d465 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_32.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_33.png b/blue_noise_textures/64_64/LDR_RG01_33.png new file mode 100644 index 0000000..c54bc6d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_33.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_34.png b/blue_noise_textures/64_64/LDR_RG01_34.png new file mode 100644 index 0000000..c1807c7 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_34.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_35.png b/blue_noise_textures/64_64/LDR_RG01_35.png new file mode 100644 index 0000000..5f22a7b Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_35.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_36.png b/blue_noise_textures/64_64/LDR_RG01_36.png new file mode 100644 index 0000000..673bc84 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_36.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_37.png b/blue_noise_textures/64_64/LDR_RG01_37.png new file mode 100644 index 0000000..a327adc Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_37.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_38.png b/blue_noise_textures/64_64/LDR_RG01_38.png new file mode 100644 index 0000000..52d4116 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_38.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_39.png b/blue_noise_textures/64_64/LDR_RG01_39.png new file mode 100644 index 0000000..d611ab6 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_39.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_4.png b/blue_noise_textures/64_64/LDR_RG01_4.png new file mode 100644 index 0000000..44c39cc Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_4.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_40.png b/blue_noise_textures/64_64/LDR_RG01_40.png new file mode 100644 index 0000000..93c99cf Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_40.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_41.png b/blue_noise_textures/64_64/LDR_RG01_41.png new file mode 100644 index 0000000..4c14b58 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_41.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_42.png b/blue_noise_textures/64_64/LDR_RG01_42.png new file mode 100644 index 0000000..f9357e7 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_42.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_43.png b/blue_noise_textures/64_64/LDR_RG01_43.png new file mode 100644 index 0000000..37a80bc Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_43.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_44.png b/blue_noise_textures/64_64/LDR_RG01_44.png new file mode 100644 index 0000000..7448e2a Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_44.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_45.png b/blue_noise_textures/64_64/LDR_RG01_45.png new file mode 100644 index 0000000..a7f0fa3 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_45.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_46.png b/blue_noise_textures/64_64/LDR_RG01_46.png new file mode 100644 index 0000000..158c599 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_46.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_47.png b/blue_noise_textures/64_64/LDR_RG01_47.png new file mode 100644 index 0000000..2dda532 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_47.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_48.png b/blue_noise_textures/64_64/LDR_RG01_48.png new file mode 100644 index 0000000..2d51df1 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_48.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_49.png b/blue_noise_textures/64_64/LDR_RG01_49.png new file mode 100644 index 0000000..bdad8de Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_49.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_5.png b/blue_noise_textures/64_64/LDR_RG01_5.png new file mode 100644 index 0000000..8d6d95c Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_5.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_50.png b/blue_noise_textures/64_64/LDR_RG01_50.png new file mode 100644 index 0000000..35643bb Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_50.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_51.png b/blue_noise_textures/64_64/LDR_RG01_51.png new file mode 100644 index 0000000..b7a08be Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_51.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_52.png b/blue_noise_textures/64_64/LDR_RG01_52.png new file mode 100644 index 0000000..c3a94ce Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_52.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_53.png b/blue_noise_textures/64_64/LDR_RG01_53.png new file mode 100644 index 0000000..9122ef3 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_53.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_54.png b/blue_noise_textures/64_64/LDR_RG01_54.png new file mode 100644 index 0000000..c9a5199 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_54.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_55.png b/blue_noise_textures/64_64/LDR_RG01_55.png new file mode 100644 index 0000000..13c5195 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_55.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_56.png b/blue_noise_textures/64_64/LDR_RG01_56.png new file mode 100644 index 0000000..19757ac Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_56.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_57.png b/blue_noise_textures/64_64/LDR_RG01_57.png new file mode 100644 index 0000000..9686a52 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_57.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_58.png b/blue_noise_textures/64_64/LDR_RG01_58.png new file mode 100644 index 0000000..1cd18bf Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_58.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_59.png b/blue_noise_textures/64_64/LDR_RG01_59.png new file mode 100644 index 0000000..492f0d5 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_59.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_6.png b/blue_noise_textures/64_64/LDR_RG01_6.png new file mode 100644 index 0000000..aed820d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_6.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_60.png b/blue_noise_textures/64_64/LDR_RG01_60.png new file mode 100644 index 0000000..ea1b3f5 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_60.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_61.png b/blue_noise_textures/64_64/LDR_RG01_61.png new file mode 100644 index 0000000..ce562ae Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_61.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_62.png b/blue_noise_textures/64_64/LDR_RG01_62.png new file mode 100644 index 0000000..ba3f597 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_62.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_63.png b/blue_noise_textures/64_64/LDR_RG01_63.png new file mode 100644 index 0000000..1c88c14 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_63.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_7.png b/blue_noise_textures/64_64/LDR_RG01_7.png new file mode 100644 index 0000000..989956b Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_7.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_8.png b/blue_noise_textures/64_64/LDR_RG01_8.png new file mode 100644 index 0000000..b39d28e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_8.png differ diff --git a/blue_noise_textures/64_64/LDR_RG01_9.png b/blue_noise_textures/64_64/LDR_RG01_9.png new file mode 100644 index 0000000..c362962 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RG01_9.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_0.png b/blue_noise_textures/64_64/LDR_RGB1_0.png new file mode 100644 index 0000000..685f0e5 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_0.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_1.png b/blue_noise_textures/64_64/LDR_RGB1_1.png new file mode 100644 index 0000000..93d596e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_1.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_10.png b/blue_noise_textures/64_64/LDR_RGB1_10.png new file mode 100644 index 0000000..c098c08 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_10.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_11.png b/blue_noise_textures/64_64/LDR_RGB1_11.png new file mode 100644 index 0000000..58c0d97 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_11.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_12.png b/blue_noise_textures/64_64/LDR_RGB1_12.png new file mode 100644 index 0000000..dccad1b Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_12.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_13.png b/blue_noise_textures/64_64/LDR_RGB1_13.png new file mode 100644 index 0000000..9aabf13 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_13.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_14.png b/blue_noise_textures/64_64/LDR_RGB1_14.png new file mode 100644 index 0000000..abe13fa Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_14.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_15.png b/blue_noise_textures/64_64/LDR_RGB1_15.png new file mode 100644 index 0000000..5e90b9f Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_15.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_16.png b/blue_noise_textures/64_64/LDR_RGB1_16.png new file mode 100644 index 0000000..aaba6f7 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_16.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_17.png b/blue_noise_textures/64_64/LDR_RGB1_17.png new file mode 100644 index 0000000..ae4ac5e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_17.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_18.png b/blue_noise_textures/64_64/LDR_RGB1_18.png new file mode 100644 index 0000000..fba1f21 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_18.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_19.png b/blue_noise_textures/64_64/LDR_RGB1_19.png new file mode 100644 index 0000000..dc0670c Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_19.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_2.png b/blue_noise_textures/64_64/LDR_RGB1_2.png new file mode 100644 index 0000000..03ec322 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_2.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_20.png b/blue_noise_textures/64_64/LDR_RGB1_20.png new file mode 100644 index 0000000..c63870d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_20.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_21.png b/blue_noise_textures/64_64/LDR_RGB1_21.png new file mode 100644 index 0000000..5c61c72 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_21.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_22.png b/blue_noise_textures/64_64/LDR_RGB1_22.png new file mode 100644 index 0000000..25f7ee7 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_22.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_23.png b/blue_noise_textures/64_64/LDR_RGB1_23.png new file mode 100644 index 0000000..a7d69e5 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_23.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_24.png b/blue_noise_textures/64_64/LDR_RGB1_24.png new file mode 100644 index 0000000..1007649 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_24.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_25.png b/blue_noise_textures/64_64/LDR_RGB1_25.png new file mode 100644 index 0000000..bb0f1f1 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_25.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_26.png b/blue_noise_textures/64_64/LDR_RGB1_26.png new file mode 100644 index 0000000..d309b0f Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_26.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_27.png b/blue_noise_textures/64_64/LDR_RGB1_27.png new file mode 100644 index 0000000..107fc92 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_27.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_28.png b/blue_noise_textures/64_64/LDR_RGB1_28.png new file mode 100644 index 0000000..2682ff8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_28.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_29.png b/blue_noise_textures/64_64/LDR_RGB1_29.png new file mode 100644 index 0000000..b161441 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_29.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_3.png b/blue_noise_textures/64_64/LDR_RGB1_3.png new file mode 100644 index 0000000..a615294 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_3.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_30.png b/blue_noise_textures/64_64/LDR_RGB1_30.png new file mode 100644 index 0000000..4ede013 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_30.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_31.png b/blue_noise_textures/64_64/LDR_RGB1_31.png new file mode 100644 index 0000000..8976fcd Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_31.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_32.png b/blue_noise_textures/64_64/LDR_RGB1_32.png new file mode 100644 index 0000000..9721186 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_32.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_33.png b/blue_noise_textures/64_64/LDR_RGB1_33.png new file mode 100644 index 0000000..9465c33 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_33.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_34.png b/blue_noise_textures/64_64/LDR_RGB1_34.png new file mode 100644 index 0000000..a9b9ed8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_34.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_35.png b/blue_noise_textures/64_64/LDR_RGB1_35.png new file mode 100644 index 0000000..8794e45 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_35.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_36.png b/blue_noise_textures/64_64/LDR_RGB1_36.png new file mode 100644 index 0000000..f3734d8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_36.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_37.png b/blue_noise_textures/64_64/LDR_RGB1_37.png new file mode 100644 index 0000000..3dbc915 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_37.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_38.png b/blue_noise_textures/64_64/LDR_RGB1_38.png new file mode 100644 index 0000000..62ae122 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_38.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_39.png b/blue_noise_textures/64_64/LDR_RGB1_39.png new file mode 100644 index 0000000..40a1898 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_39.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_4.png b/blue_noise_textures/64_64/LDR_RGB1_4.png new file mode 100644 index 0000000..c263767 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_4.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_40.png b/blue_noise_textures/64_64/LDR_RGB1_40.png new file mode 100644 index 0000000..0528f04 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_40.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_41.png b/blue_noise_textures/64_64/LDR_RGB1_41.png new file mode 100644 index 0000000..d03584b Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_41.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_42.png b/blue_noise_textures/64_64/LDR_RGB1_42.png new file mode 100644 index 0000000..a04ea81 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_42.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_43.png b/blue_noise_textures/64_64/LDR_RGB1_43.png new file mode 100644 index 0000000..b42f124 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_43.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_44.png b/blue_noise_textures/64_64/LDR_RGB1_44.png new file mode 100644 index 0000000..c013058 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_44.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_45.png b/blue_noise_textures/64_64/LDR_RGB1_45.png new file mode 100644 index 0000000..42ee805 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_45.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_46.png b/blue_noise_textures/64_64/LDR_RGB1_46.png new file mode 100644 index 0000000..19a53d0 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_46.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_47.png b/blue_noise_textures/64_64/LDR_RGB1_47.png new file mode 100644 index 0000000..4f12296 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_47.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_48.png b/blue_noise_textures/64_64/LDR_RGB1_48.png new file mode 100644 index 0000000..92f0be8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_48.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_49.png b/blue_noise_textures/64_64/LDR_RGB1_49.png new file mode 100644 index 0000000..9d5c1ec Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_49.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_5.png b/blue_noise_textures/64_64/LDR_RGB1_5.png new file mode 100644 index 0000000..6b562be Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_5.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_50.png b/blue_noise_textures/64_64/LDR_RGB1_50.png new file mode 100644 index 0000000..e034e62 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_50.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_51.png b/blue_noise_textures/64_64/LDR_RGB1_51.png new file mode 100644 index 0000000..cd236d0 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_51.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_52.png b/blue_noise_textures/64_64/LDR_RGB1_52.png new file mode 100644 index 0000000..4747440 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_52.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_53.png b/blue_noise_textures/64_64/LDR_RGB1_53.png new file mode 100644 index 0000000..3762b93 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_53.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_54.png b/blue_noise_textures/64_64/LDR_RGB1_54.png new file mode 100644 index 0000000..47f1ff6 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_54.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_55.png b/blue_noise_textures/64_64/LDR_RGB1_55.png new file mode 100644 index 0000000..b2e8837 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_55.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_56.png b/blue_noise_textures/64_64/LDR_RGB1_56.png new file mode 100644 index 0000000..3e4562e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_56.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_57.png b/blue_noise_textures/64_64/LDR_RGB1_57.png new file mode 100644 index 0000000..b5afdbd Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_57.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_58.png b/blue_noise_textures/64_64/LDR_RGB1_58.png new file mode 100644 index 0000000..9c71a09 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_58.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_59.png b/blue_noise_textures/64_64/LDR_RGB1_59.png new file mode 100644 index 0000000..f80ae3d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_59.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_6.png b/blue_noise_textures/64_64/LDR_RGB1_6.png new file mode 100644 index 0000000..2b5776f Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_6.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_60.png b/blue_noise_textures/64_64/LDR_RGB1_60.png new file mode 100644 index 0000000..55136c8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_60.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_61.png b/blue_noise_textures/64_64/LDR_RGB1_61.png new file mode 100644 index 0000000..fd8433e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_61.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_62.png b/blue_noise_textures/64_64/LDR_RGB1_62.png new file mode 100644 index 0000000..8b71bc9 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_62.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_63.png b/blue_noise_textures/64_64/LDR_RGB1_63.png new file mode 100644 index 0000000..a4db09c Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_63.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_7.png b/blue_noise_textures/64_64/LDR_RGB1_7.png new file mode 100644 index 0000000..367de10 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_7.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_8.png b/blue_noise_textures/64_64/LDR_RGB1_8.png new file mode 100644 index 0000000..192c657 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_8.png differ diff --git a/blue_noise_textures/64_64/LDR_RGB1_9.png b/blue_noise_textures/64_64/LDR_RGB1_9.png new file mode 100644 index 0000000..6cf293b Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGB1_9.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_0.png b/blue_noise_textures/64_64/LDR_RGBA_0.png new file mode 100644 index 0000000..5c1fc26 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_0.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_1.png b/blue_noise_textures/64_64/LDR_RGBA_1.png new file mode 100644 index 0000000..f41e4ed Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_1.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_10.png b/blue_noise_textures/64_64/LDR_RGBA_10.png new file mode 100644 index 0000000..6a0b810 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_10.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_11.png b/blue_noise_textures/64_64/LDR_RGBA_11.png new file mode 100644 index 0000000..77a4179 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_11.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_12.png b/blue_noise_textures/64_64/LDR_RGBA_12.png new file mode 100644 index 0000000..e27b5ae Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_12.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_13.png b/blue_noise_textures/64_64/LDR_RGBA_13.png new file mode 100644 index 0000000..0144bae Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_13.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_14.png b/blue_noise_textures/64_64/LDR_RGBA_14.png new file mode 100644 index 0000000..b4f4f82 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_14.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_15.png b/blue_noise_textures/64_64/LDR_RGBA_15.png new file mode 100644 index 0000000..2cc165d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_15.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_16.png b/blue_noise_textures/64_64/LDR_RGBA_16.png new file mode 100644 index 0000000..14b3619 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_16.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_17.png b/blue_noise_textures/64_64/LDR_RGBA_17.png new file mode 100644 index 0000000..942a3ec Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_17.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_18.png b/blue_noise_textures/64_64/LDR_RGBA_18.png new file mode 100644 index 0000000..d79b876 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_18.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_19.png b/blue_noise_textures/64_64/LDR_RGBA_19.png new file mode 100644 index 0000000..db28dcc Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_19.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_2.png b/blue_noise_textures/64_64/LDR_RGBA_2.png new file mode 100644 index 0000000..cb40652 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_2.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_20.png b/blue_noise_textures/64_64/LDR_RGBA_20.png new file mode 100644 index 0000000..81cf178 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_20.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_21.png b/blue_noise_textures/64_64/LDR_RGBA_21.png new file mode 100644 index 0000000..7642668 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_21.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_22.png b/blue_noise_textures/64_64/LDR_RGBA_22.png new file mode 100644 index 0000000..c2b4441 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_22.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_23.png b/blue_noise_textures/64_64/LDR_RGBA_23.png new file mode 100644 index 0000000..3859d12 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_23.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_24.png b/blue_noise_textures/64_64/LDR_RGBA_24.png new file mode 100644 index 0000000..f7becb1 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_24.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_25.png b/blue_noise_textures/64_64/LDR_RGBA_25.png new file mode 100644 index 0000000..776ab64 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_25.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_26.png b/blue_noise_textures/64_64/LDR_RGBA_26.png new file mode 100644 index 0000000..ca75692 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_26.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_27.png b/blue_noise_textures/64_64/LDR_RGBA_27.png new file mode 100644 index 0000000..1715769 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_27.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_28.png b/blue_noise_textures/64_64/LDR_RGBA_28.png new file mode 100644 index 0000000..c112d65 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_28.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_29.png b/blue_noise_textures/64_64/LDR_RGBA_29.png new file mode 100644 index 0000000..eb4ebec Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_29.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_3.png b/blue_noise_textures/64_64/LDR_RGBA_3.png new file mode 100644 index 0000000..6f2f2f4 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_3.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_30.png b/blue_noise_textures/64_64/LDR_RGBA_30.png new file mode 100644 index 0000000..5e13aef Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_30.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_31.png b/blue_noise_textures/64_64/LDR_RGBA_31.png new file mode 100644 index 0000000..a352c03 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_31.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_32.png b/blue_noise_textures/64_64/LDR_RGBA_32.png new file mode 100644 index 0000000..7243fc9 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_32.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_33.png b/blue_noise_textures/64_64/LDR_RGBA_33.png new file mode 100644 index 0000000..57be782 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_33.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_34.png b/blue_noise_textures/64_64/LDR_RGBA_34.png new file mode 100644 index 0000000..da1f938 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_34.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_35.png b/blue_noise_textures/64_64/LDR_RGBA_35.png new file mode 100644 index 0000000..33b6603 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_35.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_36.png b/blue_noise_textures/64_64/LDR_RGBA_36.png new file mode 100644 index 0000000..bdb81ce Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_36.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_37.png b/blue_noise_textures/64_64/LDR_RGBA_37.png new file mode 100644 index 0000000..32ae4fd Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_37.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_38.png b/blue_noise_textures/64_64/LDR_RGBA_38.png new file mode 100644 index 0000000..3b79e9c Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_38.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_39.png b/blue_noise_textures/64_64/LDR_RGBA_39.png new file mode 100644 index 0000000..20d643f Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_39.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_4.png b/blue_noise_textures/64_64/LDR_RGBA_4.png new file mode 100644 index 0000000..1d5b4d5 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_4.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_40.png b/blue_noise_textures/64_64/LDR_RGBA_40.png new file mode 100644 index 0000000..7e69870 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_40.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_41.png b/blue_noise_textures/64_64/LDR_RGBA_41.png new file mode 100644 index 0000000..dc49437 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_41.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_42.png b/blue_noise_textures/64_64/LDR_RGBA_42.png new file mode 100644 index 0000000..54c8e64 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_42.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_43.png b/blue_noise_textures/64_64/LDR_RGBA_43.png new file mode 100644 index 0000000..c087fce Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_43.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_44.png b/blue_noise_textures/64_64/LDR_RGBA_44.png new file mode 100644 index 0000000..7d8f46f Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_44.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_45.png b/blue_noise_textures/64_64/LDR_RGBA_45.png new file mode 100644 index 0000000..1f15197 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_45.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_46.png b/blue_noise_textures/64_64/LDR_RGBA_46.png new file mode 100644 index 0000000..1a6806c Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_46.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_47.png b/blue_noise_textures/64_64/LDR_RGBA_47.png new file mode 100644 index 0000000..077da76 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_47.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_48.png b/blue_noise_textures/64_64/LDR_RGBA_48.png new file mode 100644 index 0000000..394631f Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_48.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_49.png b/blue_noise_textures/64_64/LDR_RGBA_49.png new file mode 100644 index 0000000..cc6e590 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_49.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_5.png b/blue_noise_textures/64_64/LDR_RGBA_5.png new file mode 100644 index 0000000..4f146d3 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_5.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_50.png b/blue_noise_textures/64_64/LDR_RGBA_50.png new file mode 100644 index 0000000..e10123f Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_50.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_51.png b/blue_noise_textures/64_64/LDR_RGBA_51.png new file mode 100644 index 0000000..870ca6b Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_51.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_52.png b/blue_noise_textures/64_64/LDR_RGBA_52.png new file mode 100644 index 0000000..38f1ff7 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_52.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_53.png b/blue_noise_textures/64_64/LDR_RGBA_53.png new file mode 100644 index 0000000..7bac6dd Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_53.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_54.png b/blue_noise_textures/64_64/LDR_RGBA_54.png new file mode 100644 index 0000000..207ca5e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_54.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_55.png b/blue_noise_textures/64_64/LDR_RGBA_55.png new file mode 100644 index 0000000..e53c3ba Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_55.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_56.png b/blue_noise_textures/64_64/LDR_RGBA_56.png new file mode 100644 index 0000000..0d8edd7 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_56.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_57.png b/blue_noise_textures/64_64/LDR_RGBA_57.png new file mode 100644 index 0000000..0501a9a Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_57.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_58.png b/blue_noise_textures/64_64/LDR_RGBA_58.png new file mode 100644 index 0000000..77946d6 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_58.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_59.png b/blue_noise_textures/64_64/LDR_RGBA_59.png new file mode 100644 index 0000000..655444e Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_59.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_6.png b/blue_noise_textures/64_64/LDR_RGBA_6.png new file mode 100644 index 0000000..87517bb Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_6.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_60.png b/blue_noise_textures/64_64/LDR_RGBA_60.png new file mode 100644 index 0000000..d58be6d Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_60.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_61.png b/blue_noise_textures/64_64/LDR_RGBA_61.png new file mode 100644 index 0000000..9e886b1 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_61.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_62.png b/blue_noise_textures/64_64/LDR_RGBA_62.png new file mode 100644 index 0000000..6e2b59b Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_62.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_63.png b/blue_noise_textures/64_64/LDR_RGBA_63.png new file mode 100644 index 0000000..5e556e6 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_63.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_7.png b/blue_noise_textures/64_64/LDR_RGBA_7.png new file mode 100644 index 0000000..edf90a8 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_7.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_8.png b/blue_noise_textures/64_64/LDR_RGBA_8.png new file mode 100644 index 0000000..f320d3f Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_8.png differ diff --git a/blue_noise_textures/64_64/LDR_RGBA_9.png b/blue_noise_textures/64_64/LDR_RGBA_9.png new file mode 100644 index 0000000..758aed0 Binary files /dev/null and b/blue_noise_textures/64_64/LDR_RGBA_9.png differ diff --git a/blue_noise_textures/COPYING.txt b/blue_noise_textures/COPYING.txt new file mode 100644 index 0000000..0e259d4 --- /dev/null +++ b/blue_noise_textures/COPYING.txt @@ -0,0 +1,121 @@ +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. diff --git a/blue_noise_textures/LICENSE.txt b/blue_noise_textures/LICENSE.txt new file mode 100644 index 0000000..6dbab60 --- /dev/null +++ b/blue_noise_textures/LICENSE.txt @@ -0,0 +1,9 @@ +To the extent possible under law, Christoph Peters has waived all copyright and +related or neighboring rights to the files in this directory and its +subdirectories. This work is published from: Germany. + +The work is made available under the terms of the Creative Commons CC0 Public +Domain Dedication. + +For more information please visit: +https://creativecommons.org/publicdomain/zero/1.0/ diff --git a/cmake/HunterGate.cmake b/cmake/HunterGate.cmake new file mode 100644 index 0000000..c24c0e5 --- /dev/null +++ b/cmake/HunterGate.cmake @@ -0,0 +1,543 @@ +# Copyright (c) 2013-2017, Ruslan Baratov +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This is a gate file to Hunter package manager. +# Include this file using `include` command and add package you need, example: +# +# cmake_minimum_required(VERSION 3.0) +# +# include("cmake/HunterGate.cmake") +# HunterGate( +# URL "https://github.com/path/to/hunter/archive.tar.gz" +# SHA1 "798501e983f14b28b10cda16afa4de69eee1da1d" +# ) +# +# project(MyProject) +# +# hunter_add_package(Foo) +# hunter_add_package(Boo COMPONENTS Bar Baz) +# +# Projects: +# * https://github.com/hunter-packages/gate/ +# * https://github.com/ruslo/hunter + +option(HUNTER_ENABLED "Enable Hunter package manager support" ON) +if(HUNTER_ENABLED) + if(CMAKE_VERSION VERSION_LESS "3.0") + message(FATAL_ERROR "At least CMake version 3.0 required for hunter dependency management." + " Update CMake or set HUNTER_ENABLED to OFF.") + endif() +endif() + +include(CMakeParseArguments) # cmake_parse_arguments + +option(HUNTER_STATUS_PRINT "Print working status" ON) +option(HUNTER_STATUS_DEBUG "Print a lot info" OFF) +option(HUNTER_TLS_VERIFY "Enable/disable TLS certificate checking on downloads" ON) + +set(HUNTER_WIKI "https://github.com/ruslo/hunter/wiki") + +function(hunter_gate_status_print) + foreach(print_message ${ARGV}) + if(HUNTER_STATUS_PRINT OR HUNTER_STATUS_DEBUG) + message(STATUS "[hunter] ${print_message}") + endif() + endforeach() +endfunction() + +function(hunter_gate_status_debug) + foreach(print_message ${ARGV}) + if(HUNTER_STATUS_DEBUG) + string(TIMESTAMP timestamp) + message(STATUS "[hunter *** DEBUG *** ${timestamp}] ${print_message}") + endif() + endforeach() +endfunction() + +function(hunter_gate_wiki wiki_page) + message("------------------------------ WIKI -------------------------------") + message(" ${HUNTER_WIKI}/${wiki_page}") + message("-------------------------------------------------------------------") + message("") + message(FATAL_ERROR "") +endfunction() + +function(hunter_gate_internal_error) + message("") + foreach(print_message ${ARGV}) + message("[hunter ** INTERNAL **] ${print_message}") + endforeach() + message("[hunter ** INTERNAL **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") + message("") + hunter_gate_wiki("error.internal") +endfunction() + +function(hunter_gate_fatal_error) + cmake_parse_arguments(hunter "" "WIKI" "" "${ARGV}") + string(COMPARE EQUAL "${hunter_WIKI}" "" have_no_wiki) + if(have_no_wiki) + hunter_gate_internal_error("Expected wiki") + endif() + message("") + foreach(x ${hunter_UNPARSED_ARGUMENTS}) + message("[hunter ** FATAL ERROR **] ${x}") + endforeach() + message("[hunter ** FATAL ERROR **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") + message("") + hunter_gate_wiki("${hunter_WIKI}") +endfunction() + +function(hunter_gate_user_error) + hunter_gate_fatal_error(${ARGV} WIKI "error.incorrect.input.data") +endfunction() + +function(hunter_gate_self root version sha1 result) + string(COMPARE EQUAL "${root}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("root is empty") + endif() + + string(COMPARE EQUAL "${version}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("version is empty") + endif() + + string(COMPARE EQUAL "${sha1}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("sha1 is empty") + endif() + + string(SUBSTRING "${sha1}" 0 7 archive_id) + + if(EXISTS "${root}/cmake/Hunter") + set(hunter_self "${root}") + else() + set( + hunter_self + "${root}/_Base/Download/Hunter/${version}/${archive_id}/Unpacked" + ) + endif() + + set("${result}" "${hunter_self}" PARENT_SCOPE) +endfunction() + +# Set HUNTER_GATE_ROOT cmake variable to suitable value. +function(hunter_gate_detect_root) + # Check CMake variable + string(COMPARE NOTEQUAL "${HUNTER_ROOT}" "" not_empty) + if(not_empty) + set(HUNTER_GATE_ROOT "${HUNTER_ROOT}" PARENT_SCOPE) + hunter_gate_status_debug("HUNTER_ROOT detected by cmake variable") + return() + endif() + + # Check environment variable + string(COMPARE NOTEQUAL "$ENV{HUNTER_ROOT}" "" not_empty) + if(not_empty) + set(HUNTER_GATE_ROOT "$ENV{HUNTER_ROOT}" PARENT_SCOPE) + hunter_gate_status_debug("HUNTER_ROOT detected by environment variable") + return() + endif() + + # Check HOME environment variable + string(COMPARE NOTEQUAL "$ENV{HOME}" "" result) + if(result) + set(HUNTER_GATE_ROOT "$ENV{HOME}/.hunter" PARENT_SCOPE) + hunter_gate_status_debug("HUNTER_ROOT set using HOME environment variable") + return() + endif() + + # Check SYSTEMDRIVE and USERPROFILE environment variable (windows only) + if(WIN32) + string(COMPARE NOTEQUAL "$ENV{SYSTEMDRIVE}" "" result) + if(result) + set(HUNTER_GATE_ROOT "$ENV{SYSTEMDRIVE}/.hunter" PARENT_SCOPE) + hunter_gate_status_debug( + "HUNTER_ROOT set using SYSTEMDRIVE environment variable" + ) + return() + endif() + + string(COMPARE NOTEQUAL "$ENV{USERPROFILE}" "" result) + if(result) + set(HUNTER_GATE_ROOT "$ENV{USERPROFILE}/.hunter" PARENT_SCOPE) + hunter_gate_status_debug( + "HUNTER_ROOT set using USERPROFILE environment variable" + ) + return() + endif() + endif() + + hunter_gate_fatal_error( + "Can't detect HUNTER_ROOT" + WIKI "error.detect.hunter.root" + ) +endfunction() + +macro(hunter_gate_lock dir) + if(NOT HUNTER_SKIP_LOCK) + if("${CMAKE_VERSION}" VERSION_LESS "3.2") + hunter_gate_fatal_error( + "Can't lock, upgrade to CMake 3.2 or use HUNTER_SKIP_LOCK" + WIKI "error.can.not.lock" + ) + endif() + hunter_gate_status_debug("Locking directory: ${dir}") + file(LOCK "${dir}" DIRECTORY GUARD FUNCTION) + hunter_gate_status_debug("Lock done") + endif() +endmacro() + +function(hunter_gate_download dir) + string( + COMPARE + NOTEQUAL + "$ENV{HUNTER_DISABLE_AUTOINSTALL}" + "" + disable_autoinstall + ) + if(disable_autoinstall AND NOT HUNTER_RUN_INSTALL) + hunter_gate_fatal_error( + "Hunter not found in '${dir}'" + "Set HUNTER_RUN_INSTALL=ON to auto-install it from '${HUNTER_GATE_URL}'" + "Settings:" + " HUNTER_ROOT: ${HUNTER_GATE_ROOT}" + " HUNTER_SHA1: ${HUNTER_GATE_SHA1}" + WIKI "error.run.install" + ) + endif() + string(COMPARE EQUAL "${dir}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("Empty 'dir' argument") + endif() + + string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("HUNTER_GATE_SHA1 empty") + endif() + + string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("HUNTER_GATE_URL empty") + endif() + + set(done_location "${dir}/DONE") + set(sha1_location "${dir}/SHA1") + + set(build_dir "${dir}/Build") + set(cmakelists "${dir}/CMakeLists.txt") + + hunter_gate_lock("${dir}") + if(EXISTS "${done_location}") + # while waiting for lock other instance can do all the job + hunter_gate_status_debug("File '${done_location}' found, skip install") + return() + endif() + + file(REMOVE_RECURSE "${build_dir}") + file(REMOVE_RECURSE "${cmakelists}") + + file(MAKE_DIRECTORY "${build_dir}") # check directory permissions + + # Disabling languages speeds up a little bit, reduces noise in the output + # and avoids path too long windows error + file( + WRITE + "${cmakelists}" + "cmake_minimum_required(VERSION 3.0)\n" + "project(HunterDownload LANGUAGES NONE)\n" + "include(ExternalProject)\n" + "ExternalProject_Add(\n" + " Hunter\n" + " URL\n" + " \"${HUNTER_GATE_URL}\"\n" + " URL_HASH\n" + " SHA1=${HUNTER_GATE_SHA1}\n" + " DOWNLOAD_DIR\n" + " \"${dir}\"\n" + " TLS_VERIFY\n" + " ${HUNTER_TLS_VERIFY}\n" + " SOURCE_DIR\n" + " \"${dir}/Unpacked\"\n" + " CONFIGURE_COMMAND\n" + " \"\"\n" + " BUILD_COMMAND\n" + " \"\"\n" + " INSTALL_COMMAND\n" + " \"\"\n" + ")\n" + ) + + if(HUNTER_STATUS_DEBUG) + set(logging_params "") + else() + set(logging_params OUTPUT_QUIET) + endif() + + hunter_gate_status_debug("Run generate") + + # Need to add toolchain file too. + # Otherwise on Visual Studio + MDD this will fail with error: + # "Could not find an appropriate version of the Windows 10 SDK installed on this machine" + if(EXISTS "${CMAKE_TOOLCHAIN_FILE}") + get_filename_component(absolute_CMAKE_TOOLCHAIN_FILE "${CMAKE_TOOLCHAIN_FILE}" ABSOLUTE) + set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=${absolute_CMAKE_TOOLCHAIN_FILE}") + else() + # 'toolchain_arg' can't be empty + set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=") + endif() + + string(COMPARE EQUAL "${CMAKE_MAKE_PROGRAM}" "" no_make) + if(no_make) + set(make_arg "") + else() + # Test case: remove Ninja from PATH but set it via CMAKE_MAKE_PROGRAM + set(make_arg "-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}") + endif() + + execute_process( + COMMAND + "${CMAKE_COMMAND}" + "-H${dir}" + "-B${build_dir}" + "-G${CMAKE_GENERATOR}" + "${toolchain_arg}" + ${make_arg} + WORKING_DIRECTORY "${dir}" + RESULT_VARIABLE download_result + ${logging_params} + ) + + if(NOT download_result EQUAL 0) + hunter_gate_internal_error("Configure project failed") + endif() + + hunter_gate_status_print( + "Initializing Hunter workspace (${HUNTER_GATE_SHA1})" + " ${HUNTER_GATE_URL}" + " -> ${dir}" + ) + execute_process( + COMMAND "${CMAKE_COMMAND}" --build "${build_dir}" + WORKING_DIRECTORY "${dir}" + RESULT_VARIABLE download_result + ${logging_params} + ) + + if(NOT download_result EQUAL 0) + hunter_gate_internal_error("Build project failed") + endif() + + file(REMOVE_RECURSE "${build_dir}") + file(REMOVE_RECURSE "${cmakelists}") + + file(WRITE "${sha1_location}" "${HUNTER_GATE_SHA1}") + file(WRITE "${done_location}" "DONE") + + hunter_gate_status_debug("Finished") +endfunction() + +# Must be a macro so master file 'cmake/Hunter' can +# apply all variables easily just by 'include' command +# (otherwise PARENT_SCOPE magic needed) +macro(HunterGate) + if(HUNTER_GATE_DONE) + # variable HUNTER_GATE_DONE set explicitly for external project + # (see `hunter_download`) + set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) + endif() + + # First HunterGate command will init Hunter, others will be ignored + get_property(_hunter_gate_done GLOBAL PROPERTY HUNTER_GATE_DONE SET) + + if(NOT HUNTER_ENABLED) + # Empty function to avoid error "unknown function" + function(hunter_add_package) + endfunction() + + set( + _hunter_gate_disabled_mode_dir + "${CMAKE_CURRENT_LIST_DIR}/cmake/Hunter/disabled-mode" + ) + if(EXISTS "${_hunter_gate_disabled_mode_dir}") + hunter_gate_status_debug( + "Adding \"disabled-mode\" modules: ${_hunter_gate_disabled_mode_dir}" + ) + list(APPEND CMAKE_PREFIX_PATH "${_hunter_gate_disabled_mode_dir}") + endif() + elseif(_hunter_gate_done) + hunter_gate_status_debug("Secondary HunterGate (use old settings)") + hunter_gate_self( + "${HUNTER_CACHED_ROOT}" + "${HUNTER_VERSION}" + "${HUNTER_SHA1}" + _hunter_self + ) + include("${_hunter_self}/cmake/Hunter") + else() + set(HUNTER_GATE_LOCATION "${CMAKE_CURRENT_LIST_DIR}") + + string(COMPARE NOTEQUAL "${PROJECT_NAME}" "" _have_project_name) + if(_have_project_name) + hunter_gate_fatal_error( + "Please set HunterGate *before* 'project' command. " + "Detected project: ${PROJECT_NAME}" + WIKI "error.huntergate.before.project" + ) + endif() + + cmake_parse_arguments( + HUNTER_GATE "LOCAL" "URL;SHA1;GLOBAL;FILEPATH" "" ${ARGV} + ) + + string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" _empty_sha1) + string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" _empty_url) + string( + COMPARE + NOTEQUAL + "${HUNTER_GATE_UNPARSED_ARGUMENTS}" + "" + _have_unparsed + ) + string(COMPARE NOTEQUAL "${HUNTER_GATE_GLOBAL}" "" _have_global) + string(COMPARE NOTEQUAL "${HUNTER_GATE_FILEPATH}" "" _have_filepath) + + if(_have_unparsed) + hunter_gate_user_error( + "HunterGate unparsed arguments: ${HUNTER_GATE_UNPARSED_ARGUMENTS}" + ) + endif() + if(_empty_sha1) + hunter_gate_user_error("SHA1 suboption of HunterGate is mandatory") + endif() + if(_empty_url) + hunter_gate_user_error("URL suboption of HunterGate is mandatory") + endif() + if(_have_global) + if(HUNTER_GATE_LOCAL) + hunter_gate_user_error("Unexpected LOCAL (already has GLOBAL)") + endif() + if(_have_filepath) + hunter_gate_user_error("Unexpected FILEPATH (already has GLOBAL)") + endif() + endif() + if(HUNTER_GATE_LOCAL) + if(_have_global) + hunter_gate_user_error("Unexpected GLOBAL (already has LOCAL)") + endif() + if(_have_filepath) + hunter_gate_user_error("Unexpected FILEPATH (already has LOCAL)") + endif() + endif() + if(_have_filepath) + if(_have_global) + hunter_gate_user_error("Unexpected GLOBAL (already has FILEPATH)") + endif() + if(HUNTER_GATE_LOCAL) + hunter_gate_user_error("Unexpected LOCAL (already has FILEPATH)") + endif() + endif() + + hunter_gate_detect_root() # set HUNTER_GATE_ROOT + + # Beautify path, fix probable problems with windows path slashes + get_filename_component( + HUNTER_GATE_ROOT "${HUNTER_GATE_ROOT}" ABSOLUTE + ) + hunter_gate_status_debug("HUNTER_ROOT: ${HUNTER_GATE_ROOT}") + if(NOT HUNTER_ALLOW_SPACES_IN_PATH) + string(FIND "${HUNTER_GATE_ROOT}" " " _contain_spaces) + if(NOT _contain_spaces EQUAL -1) + hunter_gate_fatal_error( + "HUNTER_ROOT (${HUNTER_GATE_ROOT}) contains spaces." + "Set HUNTER_ALLOW_SPACES_IN_PATH=ON to skip this error" + "(Use at your own risk!)" + WIKI "error.spaces.in.hunter.root" + ) + endif() + endif() + + string( + REGEX + MATCH + "[0-9]+\\.[0-9]+\\.[0-9]+[-_a-z0-9]*" + HUNTER_GATE_VERSION + "${HUNTER_GATE_URL}" + ) + string(COMPARE EQUAL "${HUNTER_GATE_VERSION}" "" _is_empty) + if(_is_empty) + set(HUNTER_GATE_VERSION "unknown") + endif() + + hunter_gate_self( + "${HUNTER_GATE_ROOT}" + "${HUNTER_GATE_VERSION}" + "${HUNTER_GATE_SHA1}" + _hunter_self + ) + + set(_master_location "${_hunter_self}/cmake/Hunter") + if(EXISTS "${HUNTER_GATE_ROOT}/cmake/Hunter") + # Hunter downloaded manually (e.g. by 'git clone') + set(_unused "xxxxxxxxxx") + set(HUNTER_GATE_SHA1 "${_unused}") + set(HUNTER_GATE_VERSION "${_unused}") + else() + get_filename_component(_archive_id_location "${_hunter_self}/.." ABSOLUTE) + set(_done_location "${_archive_id_location}/DONE") + set(_sha1_location "${_archive_id_location}/SHA1") + + # Check Hunter already downloaded by HunterGate + if(NOT EXISTS "${_done_location}") + hunter_gate_download("${_archive_id_location}") + endif() + + if(NOT EXISTS "${_done_location}") + hunter_gate_internal_error("hunter_gate_download failed") + endif() + + if(NOT EXISTS "${_sha1_location}") + hunter_gate_internal_error("${_sha1_location} not found") + endif() + file(READ "${_sha1_location}" _sha1_value) + string(COMPARE EQUAL "${_sha1_value}" "${HUNTER_GATE_SHA1}" _is_equal) + if(NOT _is_equal) + hunter_gate_internal_error( + "Short SHA1 collision:" + " ${_sha1_value} (from ${_sha1_location})" + " ${HUNTER_GATE_SHA1} (HunterGate)" + ) + endif() + if(NOT EXISTS "${_master_location}") + hunter_gate_user_error( + "Master file not found:" + " ${_master_location}" + "try to update Hunter/HunterGate" + ) + endif() + endif() + include("${_master_location}") + set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) + endif() +endmacro() diff --git a/config_vkpt b/config_vkpt new file mode 100644 index 0000000..2e4a1c6 --- /dev/null +++ b/config_vkpt @@ -0,0 +1,19 @@ +# save as .config for compiling with Make (linux) + +CONFIG_PNG=1 +CONFIG_JPEG=1 +CONFIG_SDL2=1 + +# use vkpt renderer +CONFIG_VKPT_RENDERER=1 + +# enable/disable vulkan validation layer +CONFIG_VKPT_ENABLE_VALIDATION=0 + +# q2vkpt currently does not support md3 +CONFIG_NO_MD3=1 + +# set up a custom path to the glslangValidator binary here +#CONFIG_GLSLANGVALIDATOR=glslangValidator + +CC=gcc diff --git a/inc/client/sound/ogg.h b/inc/client/sound/ogg.h new file mode 100644 index 0000000..cf341e1 --- /dev/null +++ b/inc/client/sound/ogg.h @@ -0,0 +1,75 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, + * USA. + * + * ======================================================================= + * + * The header file for the OGG/Vorbis playback + * + * ======================================================================= + */ + +#ifdef OGG + +#ifndef CL_SOUND_VORBIS_H +#define CL_SOUND_VORBIS_H + +/* The OGG codec can return the samples in a number + * of different formats, we use the standard signed + * short format. */ +#define OGG_SAMPLEWIDTH 2 +#define OGG_DIR "music" + +typedef enum +{ + PLAY, + PAUSE, + STOP +} ogg_status_t; + +typedef enum +{ + ABS, + REL +} ogg_seek_t; + +void OGG_Init(void); +void OGG_Shutdown(void); +void OGG_Reinit(void); +qboolean OGG_Check(char *name); +void OGG_Seek(ogg_seek_t type, double offset); +void OGG_LoadFileList(void); +void OGG_LoadPlaylist(char *name); +qboolean OGG_Open(ogg_seek_t type, int offset); +qboolean OGG_OpenName(char *filename); +int OGG_Read(void); +void OGG_Sequence(void); +void OGG_Stop(void); +void OGG_Stream(void); +void S_RawSamplesVol(int samples, int rate, int width, + int channels, byte *data, float volume); + +/* Console commands. */ +void OGG_ListCmd(void); +void OGG_ParseCmd(char *arg); +void OGG_PauseCmd(void); +void OGG_PlayCmd(void); +void OGG_ResumeCmd(void); +void OGG_SeekCmd(void); +void OGG_StatusCmd(void); + +#endif +#endif diff --git a/inc/client/sound/sound.h b/inc/client/sound/sound.h index b625b8d..5e9c28d 100644 --- a/inc/client/sound/sound.h +++ b/inc/client/sound/sound.h @@ -39,6 +39,9 @@ void S_BeginRegistration(void); qhandle_t S_RegisterSound(const char *sample); void S_EndRegistration(void); +void S_RawSamples(int samples, int rate, int width, + int channels, byte *data, float volume); + extern vec3_t listener_origin; extern vec3_t listener_forward; extern vec3_t listener_right; diff --git a/inc/common/bsp.h b/inc/common/bsp.h index 47c7797..dfc4335 100644 --- a/inc/common/bsp.h +++ b/inc/common/bsp.h @@ -41,6 +41,7 @@ typedef struct mtexinfo_s { // used internally due to name len probs //ZOID char name[MAX_TEXNAME]; #if USE_REF + int radiance; vec3_t axis[2]; vec2_t offset; struct image_s *image; // used for texturing @@ -94,7 +95,7 @@ typedef struct mface_s { int texturemins[2]; int extents[2]; -#if USE_REF == REF_GL +#if USE_REF == REF_GL || USE_REF == REF_GLPT int texnum[2]; int statebits; int firstvert; diff --git a/inc/common/common.h b/inc/common/common.h index 1ed0a20..3978b7d 100644 --- a/inc/common/common.h +++ b/inc/common/common.h @@ -26,10 +26,10 @@ with this program; if not, write to the Free Software Foundation, Inc., // common.h -- definitions common between client and server, but not game.dll // -#define PRODUCT "Q2PRO" +#define PRODUCT "Q2VKPT" #if USE_CLIENT -#define APPLICATION "q2pro" +#define APPLICATION "q2vkpt" #else #define APPLICATION "q2proded" #endif diff --git a/inc/common/qbvhmp.h b/inc/common/qbvhmp.h new file mode 100644 index 0000000..91890f0 --- /dev/null +++ b/inc/common/qbvhmp.h @@ -0,0 +1,217 @@ +/* + This file is part of corona-13. + + corona-13 is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + corona-13 is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with corona-13. If not, see . +*/ + +#pragma once +#include "threads.h" +#include +#include +#include + +// will keep some stats and print them at the end: +// #define ACCEL_DEBUG +// don't do motion blur: +#define ACCEL_STATIC +// number of split planes tested for surface area heuristic: +#define SAH_TESTS 7 + +typedef union +{ + __m128 m; + float f[4]; + unsigned int i[4]; +} +qbvh_float4_t; + +typedef struct +{ +#ifdef ACCEL_STATIC + uint32_t paabbx, paabby, paabbz; + uint32_t aabb_mx, aabb_my, aabb_mz, aabb_Mx, aabb_My, aabb_Mz; + uint32_t pad0, pad1, pad2; + + // uint16_t paabb[6]; // more parent box in uint16_t + // uint8_t aabb[6][4]; // 8-bit quantised child boxes + // uint32_t pad[3]; + + // qbvh_float4_t aabb0[6]; + // TODO: we'll get away with very few bits here, say 16 per child + // TODO: use the rest for the axis bits + // 1 2 24 5 + // child is | 1 | ax | prim | cnt | for leaves and + // | 0 | ax | child | for inner + uint32_t child[4]; // child index or, if leaf -(prim<<5|num_prims) index +#else // with motion blur: double a regular qbvh node, 256 bytes + qbvh_float4_t aabb0[6]; + qbvh_float4_t aabb1[6]; + uint64_t child[4]; // child index or, if leaf -(prim<<5|num_prims) index + uint64_t parent; + int64_t axis0; + int64_t axis00; + int64_t axis01; +#endif +} +qbvh_node_t; + +#ifdef ACCEL_DEBUG +typedef struct accel_debug_t +{ + uint64_t aabb_intersect; + uint64_t prims_intersect; + uint64_t accel_intersect; + uint64_t aabb_true; +} +accel_debug_t; +#endif + +typedef enum job_type_t +{ + s_job_all = 0, + s_job_node, + s_job_split, + s_job_scan, + s_job_swap +} +job_type_t; + +// shared struct for split job +typedef struct shared_split_job_t +{ + int binmin[SAH_TESTS+1]; + int binmax[SAH_TESTS+1]; + float bound_m[SAH_TESTS+1]; + float bound_M[SAH_TESTS+1]; + int64_t left; // prim index bounds + int64_t right; + int64_t step; // step size + int64_t done; // thread counter + int p, q, d; // split dim and the other two (permutation of 0 1 2) + float aabb0, aabb1; +} +shared_split_job_t; + +typedef struct job_t +{ + job_type_t type; + union + { + struct + { + qbvh_node_t *node; + qbvh_node_t *parent; + int64_t left; + int64_t right; + float paabb[6]; + int depth; + int child; + } + node; + struct + { + shared_split_job_t *job; + int64_t left; + int64_t right; + } + split; + struct + { + uint64_t begin, back; + int axis; + float split; + // output: + int64_t *right; + float *aabbl, *aabbr; + int *done; + } + scan; + struct + { + uint64_t read, num_read; // indices to read from + uint64_t write; // current write index + uint64_t read_block, write_block; // block id of read/write indices + uint64_t *b_l, *b_r, *b_e; // pointers to shared thread blocks + int *done; + } + swap; + }; +} +job_t; + +typedef struct queue_t +{ + pthread_mutex_t mutex; + job_t *jobs; + int32_t num_jobs; +} +queue_t; + +typedef struct accel_t +{ + queue_t *queue; + uint64_t built; + pthread_mutex_t mutex; + threads_t *threads; + + float *prim_aabb; // cached prim aabb, allocated from outside + uint32_t *primid; // primid array, passed in, too + uint32_t num_prims; // number of primitives + + uint64_t num_nodes; + uint64_t node_bufsize; + float aabb[6]; + qbvh_node_t *tree; + + uint32_t *shadow_cache; + uint32_t shadow_cache_last; +#ifdef ACCEL_DEBUG + accel_debug_t *debug; +#endif +} +accel_t; + +// init new acceleration structure for given primitives (don't build yet) +accel_t* accel_init( + float *aabb, // bounding boxes of primitives + uint32_t *primid, // primitive id array + const int num_prims, // number of primitives + threads_t *t); // thread pool to use during construction + +// free memory +void accel_cleanup(accel_t *b); + +// build acceleration structure +void accel_build(accel_t *b); + +// build in the background +void accel_build_async(accel_t *b); + +// block until the background threads have finished working +void accel_build_wait(accel_t *b); + +#if 0 // disabled for now, we want to ray trace on GPU +// intersect ray (closest point) +void accel_intersect(const accel_t *b, const ray_t *ray, hit_t *hit); + +// test visibility up to max distance +int accel_visible(const accel_t *b, const ray_t *ray, const float max_dist); + +// find closest geo intersection point to world space point at ray with parameter centre: +// ray->pos + centre * ray->dir +void accel_closest(const accel_t *b, ray_t *ray, hit_t *hit, const float centre); + +// return pointer to the 6-float minxyz-maxxyz aabb +const float *accel_aabb(const accel_t *b); +#endif diff --git a/inc/common/utils.h b/inc/common/utils.h index b43283c..fffc0d7 100644 --- a/inc/common/utils.h +++ b/inc/common/utils.h @@ -59,7 +59,7 @@ void Com_PageInMemory(void *buffer, size_t size); color_index_t Com_ParseColor(const char *s, color_index_t last); -#if USE_REF == REF_GL +#if (USE_REF == REF_GL) || (USE_REF == REF_GLPT) unsigned Com_ParseExtensionString(const char *s, const char *const extnames[]); #endif diff --git a/inc/refresh/images.h b/inc/refresh/images.h index 683e923..b3b7930 100644 --- a/inc/refresh/images.h +++ b/inc/refresh/images.h @@ -33,7 +33,7 @@ with this program; if not, write to the Free Software Foundation, Inc., #define R_Malloc(size) Z_TagMalloc(size, TAG_RENDERER) #define R_Mallocz(size) Z_TagMallocz(size, TAG_RENDERER) -#if USE_REF == REF_GL +#if USE_REF == REF_GL || USE_REF == REF_GLPT #define IMG_AllocPixels(x) FS_AllocTempMem(x) #define IMG_FreePixels(x) FS_FreeTempMem(x) #else @@ -79,12 +79,20 @@ typedef struct image_s { int width, height; // source image int upload_width, upload_height; // after power of two and picmip int registration_sequence; // 0 = free -#if USE_REF == REF_GL +#if USE_REF == REF_GL || USE_REF == REF_GLPT unsigned texnum; // gl texture binding float sl, sh, tl, th; +#elif USE_REF == REF_VKPT + byte *pix_data; // todo: add miplevels + int material_idx; + uint32_t light_color; // use this color if this is a light source + int is_light; #else byte *pixels[4]; // mip levels #endif +#if USE_REF == REF_GLPT + uint64_t texhandle; // gl handle for bindless textures +#endif } image_t; #define MAX_RIMAGES 1024 @@ -118,3 +126,5 @@ void IMG_Load(image_t *image, byte *pic); byte *IMG_ReadPixels(int *width, int *height, int *rowbytes); #endif // IMAGES_H + +/* vim: set ts=8 sw=4 tw=0 et : */ diff --git a/inc/refresh/models.h b/inc/refresh/models.h index ef253a0..ab5f46b 100644 --- a/inc/refresh/models.h +++ b/inc/refresh/models.h @@ -52,7 +52,7 @@ typedef struct model_s { // alias models int numframes; struct maliasframe_s *frames; -#if USE_REF == REF_GL +#if USE_REF == REF_GL || USE_REF == REF_GLPT || USE_REF == REF_VKPT int nummeshes; struct maliasmesh_s *meshes; #else @@ -71,6 +71,9 @@ typedef struct model_s { struct mspriteframe_s *spriteframes; } model_t; +extern model_t r_models[]; +extern int r_numModels; + extern int registration_sequence; // these are implemented in r_models.c diff --git a/inc/refresh/refresh.h b/inc/refresh/refresh.h index f4422d3..bf99e2c 100644 --- a/inc/refresh/refresh.h +++ b/inc/refresh/refresh.h @@ -84,6 +84,8 @@ typedef struct entity_s { qhandle_t skin; // NULL for inline skin int flags; + + int id; } entity_t; typedef struct dlight_s { @@ -107,6 +109,19 @@ typedef struct lightstyle_s { vec3_t rgb; // 0.0 - 2.0 } lightstyle_t; +#ifdef USE_SMALL_GPU +#define MAX_DECALS 2 +#else +#define MAX_DECALS 50 +#endif +typedef struct decal_s { + vec3_t pos; + vec3_t dir; + float spread; + float length; + float dummy; +} decal_t; + typedef struct refdef_s { int x, y, width, height;// in virtual screen coordinates float fov_x, fov_y; @@ -128,6 +143,10 @@ typedef struct refdef_s { int num_particles; particle_t *particles; + + int decal_beg; + int decal_end; + decal_t decal[MAX_DECALS]; } refdef_t; typedef enum { @@ -228,4 +247,11 @@ void R_BeginFrame(void); void R_EndFrame(void); void R_ModeChanged(int width, int height, int flags, int rowbytes, void *pixels); +// add decal to ring buffer +void R_AddDecal(decal_t *d); + +#ifdef _GL_DEBUG +void R_SetRayProbe(vec3_t p, vec3_t n); +#endif + #endif // REFRESH_H diff --git a/inc/shared/shared.h b/inc/shared/shared.h index 372c792..f796b46 100644 --- a/inc/shared/shared.h +++ b/inc/shared/shared.h @@ -60,7 +60,7 @@ typedef int qerror_t; #define YAW 1 // left / right #define ROLL 2 // fall over -#define MAX_STRING_CHARS 1024 // max length of a string passed to Cmd_TokenizeString +#define MAX_STRING_CHARS 4096 // max length of a string passed to Cmd_TokenizeString #define MAX_STRING_TOKENS 256 // max tokens resulting from Cmd_TokenizeString #define MAX_TOKEN_CHARS 1024 // max length of an individual token #define MAX_NET_STRING 2048 // max length of a string used in network protocol @@ -180,6 +180,11 @@ static inline float Q_fabs(float f) #define Q_ftol(f) ((long)(f)) #define DEG2RAD(a) (a * M_PI) / 180.0F +#define RAD2DEG(a) (a * 180.0F) / M_PI + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define CLAMP(a, m, M) MIN(MAX(a, m), M) #define DotProduct(x,y) ((x)[0]*(y)[0]+(x)[1]*(y)[1]+(x)[2]*(y)[2]) #define CrossProduct(v1,v2,cross) \ diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 0000000..16a8e51 --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,373 @@ +cmake_minimum_required (VERSION 3.8) + +SET(SRC_BASEQ2 + baseq2/g_ai.c + baseq2/g_chase.c + baseq2/g_cmds.c + baseq2/g_combat.c + baseq2/g_func.c + baseq2/g_items.c + baseq2/g_main.c + baseq2/g_misc.c + baseq2/g_monster.c + baseq2/g_phys.c + baseq2/g_ptrs.c + baseq2/g_save.c + baseq2/g_spawn.c + baseq2/g_svcmds.c + baseq2/g_target.c + baseq2/g_trigger.c + baseq2/g_turret.c + baseq2/g_utils.c + baseq2/g_weapon.c + baseq2/m_actor.c + baseq2/m_berserk.c + baseq2/m_boss2.c + baseq2/m_boss3.c + baseq2/m_boss31.c + baseq2/m_boss32.c + baseq2/m_brain.c + baseq2/m_chick.c + baseq2/m_flipper.c + baseq2/m_float.c + baseq2/m_flyer.c + baseq2/m_gladiator.c + baseq2/m_gunner.c + baseq2/m_hover.c + baseq2/m_infantry.c + baseq2/m_insane.c + baseq2/m_medic.c + baseq2/m_move.c + baseq2/m_mutant.c + baseq2/m_parasite.c + baseq2/m_soldier.c + baseq2/m_supertank.c + baseq2/m_tank.c + baseq2/p_client.c + baseq2/p_hud.c + baseq2/p_trail.c + baseq2/p_view.c + baseq2/p_weapon.c +) + +SET(HEADERS_BASEQ2 + baseq2/g_local.h + baseq2/g_ptrs.h + baseq2/m_actor.h + baseq2/m_berserk.h + baseq2/m_boss2.h + baseq2/m_boss31.h + baseq2/m_boss32.h + baseq2/m_brain.h + baseq2/m_chick.h + baseq2/m_flipper.h + baseq2/m_float.h + baseq2/m_flyer.h + baseq2/m_gladiator.h + baseq2/m_gunner.h + baseq2/m_hover.h + baseq2/m_infantry.h + baseq2/m_insane.h + baseq2/m_medic.h + baseq2/m_mutant.h + baseq2/m_parasite.h + baseq2/m_player.h + baseq2/m_rider.h + baseq2/m_soldier.h + baseq2/m_supertank.h + baseq2/m_tank.h +) + + +SET(SRC_CLIENT + client/ascii.c + client/console.c + client/crc.c + client/demo.c + client/download.c + client/effects.c + client/entities.c +# client/gtv.c +# client/http.c + client/input.c + client/keys.c + client/locs.c + client/main.c + client/newfx.c +# client/null.c + client/parse.c + client/precache.c + client/predict.c + client/refresh.c + client/screen.c + client/tent.c + client/view.c + client/ui/demos.c + client/ui/menu.c + client/ui/playerconfig.c + client/ui/playermodels.c + client/ui/script.c + client/ui/servers.c + client/ui/ui.c + client/sound/dma.c + #client/sound/al.c + client/sound/main.c + client/sound/mem.c + client/sound/mix.c +# client/sound/qal/fixed.c + client/sound/qal/dynamic.c +) + +SET(HEADERS_CLIENT + client/client.h + client/ui/ui.h + client/sound/sound.h + client/sound/qal/dynamic.h + client/sound/qal/fixed.h +) + +SET(SRC_SERVER +# server/ac.c + server/commands.c + server/entities.c + server/game.c + server/init.c + server/main.c + server/mvd.c + server/save.c + server/send.c + server/user.c + server/world.c + server/mvd/client.c + server/mvd/parse.c + server/mvd/game.c +) + +SET(HEADERS_SERVER + server/server.h +) + +SET(SRC_COMMON + common/bsp.c + common/cmd.c + common/cmodel.c + common/common.c + common/cvar.c + common/error.c + common/field.c + common/fifo.c + common/files.c + common/math.c + common/mdfour.c + common/msg.c + common/pmove.c + common/prompt.c + common/sizebuf.c +# common/tests.c + common/utils.c + common/zone.c + common/net/chan.c + common/net/net.c + common/x86/fpu.c +) + +SET(HEADERS_COMMON + common/net/inet_ntop.h + common/net/inet_pton.h + common/net/win.h +) + +SET(SRC_REFRESH + refresh/images.c + refresh/models.c +) + +SET(SRC_GL + refresh/gl/draw.c + refresh/gl/hq2x.c + refresh/gl/images.c + refresh/gl/main.c + refresh/gl/mesh.c + refresh/gl/models.c + refresh/gl/sky.c + refresh/gl/state.c + refresh/gl/surf.c + refresh/gl/tess.c + refresh/gl/world.c + refresh/gl/qgl/dynamic.c +# refresh/gl/qgl/fixed.c +) + +SET(HEADERS_GL + refresh/gl/arbfp.h + refresh/gl/gl.h +) + +SET(SRC_SHARED + shared/m_flash.c + shared/shared.c +) + +SET(SRC_WINDOWS + windows/ac.c +# windows/ascii.c +#windows/client.c + windows/debug.c + #windows/dinput.c + #windows/dsound.c + #windows/glimp.c + windows/hunk.c + #windows/swimp.c + windows/system.c + windows/wave.c + #windows/wgl.c + #unix/sdl2/sound.c + unix/sdl2/video.c +) + +SET(HEADERS_WINDOWS + windows/wgl.h + windows/glimp.h + windows/client.h + windows/threads/threads.h +) + +SET(SRC_VKPT + refresh/vkpt/asvgf.c + refresh/vkpt/bsp_mesh.c + refresh/vkpt/draw.c + refresh/vkpt/light_hierarchy.c + refresh/vkpt/main.c + refresh/vkpt/matrix.c + refresh/vkpt/models.c + refresh/vkpt/path_tracer.c + refresh/vkpt/profiler.c + refresh/vkpt/stb.c + refresh/vkpt/textures.c + refresh/vkpt/uniform_buffer.c + refresh/vkpt/vertex_buffer.c + refresh/vkpt/vk_util.c +) + +SET(HEADERS_VKPT + refresh/vkpt/vkpt.h + refresh/vkpt/vk_util.h +) + +SET(SRC_QBVH + common/qbvhmp.c + refresh/glpt/bvh.c + windows/threads/threads.c +) + +SET(SRC_OPTIX + refresh/glpt/optix.c +) + +FILE(GLOB SRC_SHADERS + refresh/vkpt/shader/*.vert + refresh/vkpt/shader/*.geom + refresh/vkpt/shader/*.frag + refresh/vkpt/shader/*.comp + refresh/vkpt/shader/*.glsl + refresh/vkpt/shader/*.h + refresh/vkpt/shader/*.rgen + refresh/vkpt/shader/*.rchit + refresh/vkpt/shader/*.rmiss +) + +ADD_DEFINITIONS(-DHAVE_CONFIG_H=1 -DUSE_SERVER=1 -DUSE_CLIENT=1 -DCURL_STATICLIB) + +ADD_LIBRARY(gamex86 SHARED ${SRC_BASEQ2} ${HEADERS_BASEQ2} ${SRC_SHARED}) +ADD_EXECUTABLE(client WIN32 + ${SRC_CLIENT} ${HEADERS_CLIENT} + ${SRC_COMMON} ${HEADERS_COMMON} + ${SRC_REFRESH} ${SRC_SHADERS} + ${SRC_SHARED} + ${SRC_WINDOWS} ${HEADERS_WINDOWS} + ${SRC_SERVER} ${HEADERS_SERVER} +) + +IF (CONFIG_GL_RENDERER) + TARGET_SOURCES(client PRIVATE ${SRC_GL} ${HEADERS_GL}) + TARGET_COMPILE_DEFINITIONS(client PRIVATE REF_GL=1 USE_REF=1 VID_REF="gl") +ENDIF() + +IF (CONFIG_VKPT_RENDERER) + TARGET_SOURCES(client PRIVATE ${SRC_VKPT} ${HEADERS_VKPT}) + TARGET_INCLUDE_DIRECTORIES(client PRIVATE refresh/vkpt/include) + LINK_DIRECTORIES(client PRIVATE refresh/vkpt/include/vulkan) + TARGET_COMPILE_DEFINITIONS(client PRIVATE REF_VKPT=1 USE_REF=1 VID_REF="vkpt") + TARGET_LINK_LIBRARIES(client vulkan-1) +ENDIF() + +#IF (CONFIG_USE_OPTIX) +# TARGET_COMPILE_DEFINITIONS(client PRIVATE USE_OPTIX=1) +# FIND_PACKAGE(CUDA REQUIRED) +# GET_FILENAME_COMPONENT(CUDA_LIB_DIR ${CUDA_LIBRARIES} DIRECTORY) +# TARGET_LINK_LIBRARIES(client ${CUDA_LIB_DIR}/cuda.lib ${CONFIG_OPTIX_DIR}/lib64/optix_prime.1.lib) +# TARGET_INCLUDE_DIRECTORIES(client PRIVATE ${CUDA_INCLUDE_DIRS} ${CONFIG_OPTIX_DIR}/include) +# TARGET_SOURCES(client PRIVATE ${SRC_OPTIX}) +#ELSE() +# TARGET_COMPILE_DEFINITIONS(client PRIVATE ACCEL_NO_VLA=1) +# TARGET_SOURCES(client PRIVATE ${SRC_QBVH}) +# IF (WIN32) +# TARGET_INCLUDE_DIRECTORIES(client PRIVATE windows/threads) +# ELSE() +# TARGET_INCLUDE_DIRECTORIES(client PRIVATE unix/threads) +# ENDIF() +#ENDIF() + +SOURCE_GROUP("baseq2\\sources" FILES ${SRC_BASEQ2}) +SOURCE_GROUP("baseq2\\headers" FILES ${HEADERS_BASEQ2}) +SOURCE_GROUP("client\\sources" FILES ${SRC_CLIENT}) +SOURCE_GROUP("client\\headers" FILES ${HEADERS_CLIENT}) +SOURCE_GROUP("server\\sources" FILES ${SRC_SERVER}) +SOURCE_GROUP("server\\headers" FILES ${HEADERS_SERVER}) +SOURCE_GROUP("common\\sources" FILES ${SRC_COMMON}) +SOURCE_GROUP("common\\headers" FILES ${HEADERS_COMMON}) +SOURCE_GROUP("refresh\\sources" FILES ${SRC_REFRESH} ${SRC_VKPT}) +SOURCE_GROUP("refresh\\headers" FILES ${HEADERS_VKPT}) +SOURCE_GROUP("refresh\\shaders" FILES ${SRC_SHADERS}) +SOURCE_GROUP("shared\\sources" FILES ${SRC_SHARED}) +SOURCE_GROUP("windows\\sources" FILES ${SRC_WINDOWS}) +SOURCE_GROUP("windows\\headers" FILES ${HEADERS_WINDOWS}) + +IF (WIN32) + TARGET_INCLUDE_DIRECTORIES(client PRIVATE ../VC/inc) + TARGET_INCLUDE_DIRECTORIES(gamex86 PRIVATE ../VC/inc) + + TARGET_LINK_LIBRARIES(client winmm ws2_32) + + set_target_properties(client PROPERTIES VS_DEBUGGER_WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}") +ENDIF() + +TARGET_INCLUDE_DIRECTORIES(gamex86 PRIVATE ../inc) + +TARGET_INCLUDE_DIRECTORIES(client PRIVATE ../inc) +TARGET_INCLUDE_DIRECTORIES(client PRIVATE "${ZLIB_INCLUDE_DIRS}") + +TARGET_LINK_LIBRARIES(client PNG::png) +TARGET_LINK_LIBRARIES(client JPEG::jpeg) +TARGET_LINK_LIBRARIES(client SDL2::SDL2main SDL2::SDL2) + +SET_TARGET_PROPERTIES(client + PROPERTIES + RUNTIME_OUTPUT_DIRECTORY_DEBUG "${CMAKE_SOURCE_DIR}" + RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO "${CMAKE_SOURCE_DIR}" + DEBUG_POSTFIX "" +) + +SET_TARGET_PROPERTIES(gamex86 + PROPERTIES + RUNTIME_OUTPUT_DIRECTORY_DEBUG "${CMAKE_SOURCE_DIR}/baseq2" + RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO "${CMAKE_SOURCE_DIR}/baseq2" + DEBUG_POSTFIX "" +) + +IF(IS_64_BIT) + SET_TARGET_PROPERTIES(gamex86 + PROPERTIES + RUNTIME_OUTPUT_NAME "gamex86_64" + ) +ENDIF() diff --git a/src/baseq2/g_cmds.c b/src/baseq2/g_cmds.c index 46c4970..2cb1c02 100644 --- a/src/baseq2/g_cmds.c +++ b/src/baseq2/g_cmds.c @@ -286,8 +286,6 @@ argv(0) god */ void Cmd_God_f(edict_t *ent) { - char *msg; - if (deathmatch->value && !sv_cheats->value) { gi.cprintf(ent, PRINT_HIGH, "You must run the server with '+set cheats 1' to enable this command.\n"); return; @@ -295,11 +293,9 @@ void Cmd_God_f(edict_t *ent) ent->flags ^= FL_GODMODE; if (!(ent->flags & FL_GODMODE)) - msg = "godmode OFF\n"; + gi.cprintf(ent, PRINT_HIGH, "godmode OFF\n"); else - msg = "godmode ON\n"; - - gi.cprintf(ent, PRINT_HIGH, msg); + gi.cprintf(ent, PRINT_HIGH, "godmode ON\n"); } @@ -314,8 +310,6 @@ argv(0) notarget */ void Cmd_Notarget_f(edict_t *ent) { - char *msg; - if (deathmatch->value && !sv_cheats->value) { gi.cprintf(ent, PRINT_HIGH, "You must run the server with '+set cheats 1' to enable this command.\n"); return; @@ -323,11 +317,9 @@ void Cmd_Notarget_f(edict_t *ent) ent->flags ^= FL_NOTARGET; if (!(ent->flags & FL_NOTARGET)) - msg = "notarget OFF\n"; + gi.cprintf(ent, PRINT_HIGH, "notarget OFF\n"); else - msg = "notarget ON\n"; - - gi.cprintf(ent, PRINT_HIGH, msg); + gi.cprintf(ent, PRINT_HIGH, "notarget ON\n"); } @@ -340,8 +332,6 @@ argv(0) noclip */ void Cmd_Noclip_f(edict_t *ent) { - char *msg; - if (deathmatch->value && !sv_cheats->value) { gi.cprintf(ent, PRINT_HIGH, "You must run the server with '+set cheats 1' to enable this command.\n"); return; @@ -349,13 +339,11 @@ void Cmd_Noclip_f(edict_t *ent) if (ent->movetype == MOVETYPE_NOCLIP) { ent->movetype = MOVETYPE_WALK; - msg = "noclip OFF\n"; + gi.cprintf(ent, PRINT_HIGH, "noclip OFF\n"); } else { ent->movetype = MOVETYPE_NOCLIP; - msg = "noclip ON\n"; + gi.cprintf(ent, PRINT_HIGH, "noclip ON\n"); } - - gi.cprintf(ent, PRINT_HIGH, msg); } diff --git a/src/baseq2/g_combat.c b/src/baseq2/g_combat.c index 578ee88..d2f41d5 100644 --- a/src/baseq2/g_combat.c +++ b/src/baseq2/g_combat.c @@ -459,7 +459,10 @@ void T_Damage(edict_t *targ, edict_t *inflictor, edict_t *attacker, vec3_t dir, // do the damage if (take) { if ((targ->svflags & SVF_MONSTER) || (client)) - SpawnDamage(TE_BLOOD, point, normal, take); + { + // SpawnDamage(TE_BLOOD, point, normal, take); + SpawnDamage(TE_BLOOD, point, dir, take); + } else SpawnDamage(te_sparks, point, normal, take); diff --git a/src/baseq2/m_move.c b/src/baseq2/m_move.c index 5325b27..481c9a1 100644 --- a/src/baseq2/m_move.c +++ b/src/baseq2/m_move.c @@ -408,7 +408,7 @@ void SV_NewChaseDir(edict_t *actor, edict_t *enemy, float dist) } // try other directions - if (((rand() & 3) & 1) || abs(deltay) > abs(deltax)) { + if (((rand() & 3) & 1) || fabsf(deltay) > fabsf(deltax)) { tdir = d[1]; d[1] = d[2]; d[2] = tdir; diff --git a/src/client/client.h b/src/client/client.h index 8b70259..9f084c6 100644 --- a/src/client/client.h +++ b/src/client/client.h @@ -74,6 +74,8 @@ typedef struct centity_s { #endif int fly_stoptime; + + int id; } centity_t; extern centity_t cl_entities[MAX_EDICTS]; @@ -785,6 +787,7 @@ void CL_MuzzleFlash2(void); void CL_TeleporterParticles(vec3_t org); void CL_TeleportParticles(vec3_t org); void CL_ParticleEffect(vec3_t org, vec3_t dir, int color, int count); +void CL_BloodParticleEffect(vec3_t org, vec3_t dir, int color, int count); void CL_ParticleEffect2(vec3_t org, vec3_t dir, int color, int count); cparticle_t *CL_AllocParticle(void); void CL_RunParticles(void); diff --git a/src/client/demo.c b/src/client/demo.c index 644cb09..71950e6 100644 --- a/src/client/demo.c +++ b/src/client/demo.c @@ -569,7 +569,8 @@ static int read_first_message(qhandle_t f) type = 0; } - if (msglen < 64 || msglen > sizeof(msg_read_buffer)) { + // if (msglen < 64 || msglen > sizeof(msg_read_buffer)) { + if (msglen > sizeof(msg_read_buffer)) { return Q_ERR_INVALID_FORMAT; } diff --git a/src/client/download.c b/src/client/download.c index 0189685..248895c 100644 --- a/src/client/download.c +++ b/src/client/download.c @@ -410,6 +410,7 @@ static int inflate_udp_download(byte *data, int inlen, int outlen) // should never happen Com_Error(ERR_DROP, "Compressed server packet received, " "but no zlib support linked in."); + return 0; #endif } diff --git a/src/client/effects.c b/src/client/effects.c index 02250a6..bf8bbd1 100644 --- a/src/client/effects.c +++ b/src/client/effects.c @@ -877,6 +877,50 @@ void CL_ParticleEffect(vec3_t org, vec3_t dir, int color, int count) } } +void CL_BloodParticleEffect(vec3_t org, vec3_t dir, int color, int count) +{ + int i, j; + cparticle_t *p; + float d; + + // add decal: + decal_t dec = { + .pos = {org[0],org[1],org[2]}, + .dir = {dir[0],dir[1],dir[2]}, + .spread = 0.25f, + .length = 350}; + R_AddDecal(&dec); + + float a[3] = {dir[1], -dir[2], dir[0]}; + float b[3] = {-dir[2], dir[0], dir[1]}; + + for (i = 0; i < count; i++) { + p = CL_AllocParticle(); + if (!p) + return; + + p->time = cl.time; + p->color = color + (rand() & 7); + + d = (rand() & 31) * 10.0f; + for (j = 0; j < 3; j++) { + p->org[j] = org[j] + ((rand() & 7) - 4) + d * (dir[j] + + a[j] * 0.5f*((rand() & 31) / 32.0f - .5f) + + b[j] * 0.5f*((rand() & 31) / 32.0f - .5f)); + + p->vel[j] = 10.0f*dir[j] + crand() * 20; + } + // fake gravity + p->org[2] -= d*d * .001f; + + p->accel[0] = p->accel[1] = 0; + p->accel[2] = -PARTICLE_GRAVITY; + p->alpha = 1.0; + + p->alphavel = -1.0 / (0.5 + frand() * 0.3); + } +} + /* =============== diff --git a/src/client/entities.c b/src/client/entities.c index 9839c76..5eceee5 100644 --- a/src/client/entities.c +++ b/src/client/entities.c @@ -49,6 +49,8 @@ static inline qboolean entity_optimized(const entity_state_t *state) static inline void entity_update_new(centity_t *ent, const entity_state_t *state, const vec_t *origin) { + static int entity_ctr; + ent->id = ++entity_ctr; ent->trailcount = 1024; // for diminishing rocket / grenade trails // duplicate the current state so lerping doesn't hurt anything @@ -93,9 +95,9 @@ entity_update_old(centity_t *ent, const entity_state_t *state, const vec_t *orig || state->modelindex4 != ent->current.modelindex4 || event == EV_PLAYER_TELEPORT || event == EV_OTHER_TELEPORT - || abs(origin[0] - ent->current.origin[0]) > 512 - || abs(origin[1] - ent->current.origin[1]) > 512 - || abs(origin[2] - ent->current.origin[2]) > 512 + || fabsf(origin[0] - ent->current.origin[0]) > 512 + || fabsf(origin[1] - ent->current.origin[1]) > 512 + || fabsf(origin[2] - ent->current.origin[2]) > 512 || cl_nolerp->integer == 1) { // some data changes will force no lerping ent->trailcount = 1024; // for diminishing rocket / grenade trails @@ -488,6 +490,7 @@ static void CL_AddPacketEntities(void) s1 = &cl.entityStates[i]; cent = &cl_entities[s1->number]; + ent.id = cent->id; effects = s1->effects; renderfx = s1->renderfx; diff --git a/src/client/precache.c b/src/client/precache.c index 8127ee3..dff31c9 100644 --- a/src/client/precache.c +++ b/src/client/precache.c @@ -21,6 +21,7 @@ with this program; if not, write to the Free Software Foundation, Inc., // #include "client.h" +#include "client/sound/ogg.h" /* ================ @@ -381,6 +382,10 @@ void CL_PrepRefresh(void) Con_ClearNotify_f(); SCR_UpdateScreen(); + +#ifdef OGG + OGG_ParseCmd(cl.configstrings[CS_CDTRACK]); +#endif } /* diff --git a/src/client/sound/main.c b/src/client/sound/main.c index 776c3d9..fc00401 100644 --- a/src/client/sound/main.c +++ b/src/client/sound/main.c @@ -18,6 +18,7 @@ with this program; if not, write to the Free Software Foundation, Inc., // snd_main.c -- common sound functions #include "sound.h" +#include "client/sound/ogg.h" // ======================================================================= // Internal sound data & structures @@ -194,6 +195,9 @@ void S_Init(void) paintedtime = 0; s_registration_sequence = 1; +#ifdef OGG + OGG_Init(); +#endif fail: Cvar_SetInteger(s_enable, s_started, FROM_CODE); @@ -259,6 +263,9 @@ void S_Shutdown(void) Cmd_Deregister(c_sound); Z_LeakTest(TAG_SOUND); +#ifdef OGG + OGG_Shutdown(); +#endif } void S_Activate(void) @@ -1133,6 +1140,10 @@ void S_Update(void) // add loopsounds S_AddLoopSounds(); +#ifdef OGG + OGG_Stream(); +#endif + #ifdef _DEBUG // // debugging output diff --git a/src/client/sound/mix.c b/src/client/sound/mix.c index f835e50..d3ce0e7 100644 --- a/src/client/sound/mix.c +++ b/src/client/sound/mix.c @@ -24,6 +24,9 @@ with this program; if not, write to the Free Software Foundation, Inc., static int snd_scaletable[32][256]; static int snd_vol; +samplepair_t s_rawsamples[S_MAX_RAW_SAMPLES]; +int s_rawend = 0; + static void WriteLinearBlast(int16_t *out, samplepair_t *samp, int count) { int i, val; @@ -255,6 +258,19 @@ void S_PaintChannels(int endtime) } + if (s_rawend >= paintedtime) + { + /* add from the streaming sound source */ + int stop = (end < s_rawend) ? end : s_rawend; + + for (int i = paintedtime; i < stop; i++) + { + int s = i & (S_MAX_RAW_SAMPLES - 1); + paintbuffer[i - paintedtime].left += s_rawsamples[s].left; + paintbuffer[i - paintedtime].right += s_rawsamples[s].right; + } + } + // transfer out according to DMA format TransferPaintBuffer(paintbuffer, end); paintedtime = end; @@ -279,3 +295,97 @@ void S_InitScaletable(void) s_volume->modified = qfalse; } +/* + * Cinematic streaming and voice over network. + * This could be used for chat over network, but + * that would be terrible slow. + */ +void +S_RawSamples(int samples, int rate, int width, + int channels, byte *data, float volume) +{ + if (!s_started) return; + + if (s_rawend < paintedtime) + s_rawend = paintedtime; + + // rescale because ogg is always 44100 and dma is configurable via s_khz + float scale = (float)rate / dma.speed; + int intVolume = (int)(256 * volume); + + if ((channels == 2) && (width == 2)) + { + for (int i = 0; ; i++) + { + int src = (int)(i * scale); + + if (src >= samples) + { + break; + } + + int dst = s_rawend & (S_MAX_RAW_SAMPLES - 1); + s_rawend++; + s_rawsamples[dst].left = ((short *)data)[src * 2] * intVolume; + s_rawsamples[dst].right = ((short *)data)[src * 2 + 1] * intVolume; + } + } + else if ((channels == 1) && (width == 2)) + { + for (int i = 0; ; i++) + { + int src = (int)(i * scale); + + if (src >= samples) + { + break; + } + + int dst = s_rawend & (S_MAX_RAW_SAMPLES - 1); + s_rawend++; + s_rawsamples[dst].left = ((short *)data)[src] * intVolume; + s_rawsamples[dst].right = ((short *)data)[src] * intVolume; + } + } + else if ((channels == 2) && (width == 1)) + { + intVolume *= 256; + + for (int i = 0; ; i++) + { + int src = (int)(i * scale); + + if (src >= samples) + { + break; + } + + int dst = s_rawend & (S_MAX_RAW_SAMPLES - 1); + s_rawend++; + s_rawsamples[dst].left = + (((byte *)data)[src * 2] - 128) * intVolume; + s_rawsamples[dst].right = + (((byte *)data)[src * 2 + 1] - 128) * intVolume; + } + } + else if ((channels == 1) && (width == 1)) + { + intVolume *= 256; + + for (int i = 0; ; i++) + { + int src = (int)(i * scale); + + if (src >= samples) + { + break; + } + + int dst = s_rawend & (S_MAX_RAW_SAMPLES - 1); + s_rawend++; + s_rawsamples[dst].left = (((byte *)data)[src] - 128) * intVolume; + s_rawsamples[dst].right = (((byte *)data)[src] - 128) * intVolume; + } + } +} + diff --git a/src/client/sound/ogg.c b/src/client/sound/ogg.c new file mode 100644 index 0000000..d9de68d --- /dev/null +++ b/src/client/sound/ogg.c @@ -0,0 +1,398 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, + * USA. + * + * ======================================================================= + * + * This file implements an interface to libvorbis for decoding + * OGG/Vorbis files. Strongly spoken this file isn't part of the sound + * system but part of the main client. It justs converts Vorbis streams + * into normal, raw Wave stream which are injected into the backends as + * if they were normal "raw" samples. At this moment only background + * music playback and in theory .cin movie file playback is supported. + * + * ======================================================================= + */ + +#ifdef OGG + +#define OV_EXCLUDE_STATIC_CALLBACKS + +#ifndef _WIN32 +#include +#endif +#include +#include + +#include "shared/shared.h" +#include "common/cvar.h" +#include "common/files.h" +#include "client/client.h" +#include "client/sound/sound.h" +#include "client/sound/ogg.h" +#include "sound.h" + +qboolean ogg_started = qfalse; /* Initialization flag. */ +byte *ogg_buffer = 0; /* File buffer. */ +char ovBuf[4096]; /* Buffer for sound. */ +int ogg_curfile = -1; /* Index of currently played file. */ +int ovSection = 0; /* Position in Ogg Vorbis file. */ +ogg_status_t ogg_status = STOP; /* Status indicator. */ +cvar_t *ogg_volume = 0; /* Music volume. */ +OggVorbis_File ovFile; /* Ogg Vorbis file. */ +vorbis_info *ogg_info = 0; /* Ogg Vorbis file information */ + +void +OGG_Init(void) +{ + if (ogg_started) return; + + Com_Printf("Starting Ogg Vorbis.\n"); + + /* Skip initialization if disabled. */ + cvar_t *cv = Cvar_Get("ogg_enable", "1", CVAR_ARCHIVE); + + if (cv->value != 1) + { + Com_Printf("Ogg Vorbis not initializing.\n"); + return; + } + + /* Cvars. */ + ogg_volume = Cvar_Get("ogg_volume", "0.7", CVAR_ARCHIVE); + + /* Console commands. */ + Cmd_AddCommand("ogg_pause", OGG_PauseCmd); + Cmd_AddCommand("ogg_play", OGG_PlayCmd); + Cmd_AddCommand("ogg_reinit", OGG_Reinit); + Cmd_AddCommand("ogg_resume", OGG_ResumeCmd); + Cmd_AddCommand("ogg_seek", OGG_SeekCmd); + Cmd_AddCommand("ogg_status", OGG_StatusCmd); + Cmd_AddCommand("ogg_stop", OGG_Stop); + + ogg_started = qtrue; +} + +void +OGG_Shutdown(void) +{ + if (!ogg_started) return; + + Com_Printf("Shutting down Ogg Vorbis.\n"); + + OGG_Stop(); + + /* Remove console commands. */ + Cmd_RemoveCommand("ogg_pause"); + Cmd_RemoveCommand("ogg_play"); + Cmd_RemoveCommand("ogg_reinit"); + Cmd_RemoveCommand("ogg_resume"); + Cmd_RemoveCommand("ogg_seek"); + Cmd_RemoveCommand("ogg_status"); + Cmd_RemoveCommand("ogg_stop"); + + ogg_started = qfalse; +} + +void +OGG_Reinit(void) +{ + OGG_Shutdown(); + OGG_Init(); +} + +void +OGG_Seek(ogg_seek_t type, double offset) +{ + /* Check if the file is seekable. */ + if (ov_seekable(&ovFile) == 0) + { + Com_Printf("OGG_Seek: file is not seekable.\n"); + return; + } + + /* Get file information. */ + double pos = ov_time_tell(&ovFile); + double total = ov_time_total(&ovFile, -1); + + switch (type) + { + case ABS: + if ((offset >= 0) && (offset <= total)) + { + if (ov_time_seek(&ovFile, offset) != 0) + Com_Printf("OGG_Seek: could not seek.\n"); + else + Com_Printf("%0.2f -> %0.2f of %0.2f.\n", pos, offset, total); + } + else + { + Com_Printf("OGG_Seek: invalid offset.\n"); + } + break; + case REL: + if ((pos + offset >= 0) && (pos + offset <= total)) + { + if (ov_time_seek(&ovFile, pos + offset) != 0) + Com_Printf("OGG_Seek: could not seek.\n"); + else + Com_Printf("%0.2f -> %0.2f of %0.2f.\n", + pos, pos + offset, total); + } + else + { + Com_Printf("OGG_Seek: invalid offset.\n"); + } + break; + } +} + +// Play Ogg Vorbis file (with absolute or relative index). +qboolean +OGG_Open(ogg_seek_t type, int offset) +{ + int size; /* File size. */ + int pos = -1; /* Absolute position. */ + int res; /* Error indicator. */ + + switch (type) + { + case ABS: + /* Absolute index. */ + pos = offset; + break; + case REL: + /* Simulate a loopback. */ + if ((ogg_curfile == -1) && (offset < 1)) + offset++; + if (ogg_curfile + offset < 1) + offset = 1; + pos = ogg_curfile + offset; + break; + } + + /* Check running music. */ + if (ogg_status == PLAY) + { + if (ogg_curfile == pos) + return qtrue; + else + OGG_Stop(); + } + + char filename[1024]; + snprintf(filename, sizeof(filename), "%s/%d.ogg", OGG_DIR, pos); + /* Find file. */ + if ((size = FS_LoadFile(filename, (void **)&ogg_buffer)) == -1) + { + Com_Printf("OGG_Open: could not open %d (%s): %s.\n", + pos, filename, strerror(errno)); + return qfalse; + } + + /* Open ogg vorbis file. */ + if ((res = ov_open(NULL, &ovFile, (char *)ogg_buffer, size)) < 0) + { + Com_Printf("OGG_Open: '%s' is not a valid Ogg Vorbis file (error %i).\n", + filename, res); + FS_FreeFile(ogg_buffer); + ogg_buffer = NULL; + return qfalse; + } + + ogg_info = ov_info(&ovFile, 0); + + if (!ogg_info) + { + Com_Printf("OGG_Open: Unable to get stream information for %s.\n", + filename); + ov_clear(&ovFile); + FS_FreeFile(ogg_buffer); + ogg_buffer = NULL; + return qfalse; + } + + /* Play file. */ + ovSection = 0; + ogg_curfile = pos; + ogg_status = PLAY; + + return qtrue; +} + +// Play a portion of the currently opened file. +int +OGG_Read(void) +{ + if(!ogg_info) return 0; + + /* Read and resample. */ + int res = ov_read(&ovFile, ovBuf, sizeof(ovBuf), + 0 /* big endian */, OGG_SAMPLEWIDTH, 1, + &ovSection); + S_RawSamples(res / (OGG_SAMPLEWIDTH * ogg_info->channels), + ogg_info->rate, OGG_SAMPLEWIDTH, ogg_info->channels, + (byte *)ovBuf, ogg_volume->value); + + /* Check for end of file. */ + if (res == 0) OGG_Stop(); + + return res; +} + +// Stop playing the current file. +void +OGG_Stop(void) +{ + if (ogg_status == STOP) + return; + + ov_clear(&ovFile); + ogg_status = STOP; + ogg_info = 0; + + if (ogg_buffer) + { + FS_FreeFile(ogg_buffer); + ogg_buffer = 0; + } +} + +// Stream music. +void +OGG_Stream(void) +{ + if (!ogg_started) + return; + + if (ogg_status == PLAY) + { + /* Read that number samples into the buffer, that + were played since the last call to this function. + This keeps the buffer at all times at an "optimal" + fill level. */ + while (paintedtime + S_MAX_RAW_SAMPLES - 2048 > s_rawend) + { + if(!ogg_info) return; + OGG_Read(); + } + } +} + +// Parse play controls. +void +OGG_ParseCmd(char *arg) +{ + cvar_t *ogg_enable = Cvar_Get("ogg_enable", "1", CVAR_ARCHIVE); + + switch (arg[0]) + { + case '#': + OGG_Open(ABS, atol(arg+1)); + break; + case '>': + if (strlen(arg) > 1) + OGG_Open(REL, atol(arg+1)); + else + OGG_Open(REL, 1); + break; + case '<': + if (strlen(arg) > 1) + OGG_Open(REL, -atol(arg+1)); + else + OGG_Open(REL, -1); + break; + default: + if (ogg_enable->value != 0) + OGG_Open(ABS, atol(arg)); + break; + } +} + +void +OGG_PauseCmd(void) +{ + ogg_status = PAUSE; +} + +void +OGG_PlayCmd(void) +{ + if (Cmd_Argc() < 2) + Com_Printf("Usage: ogg_play {filename | #n | >n | n}\n"); + return; + } + + switch (Cmd_Argv(1)[0]) + { + case '>': + OGG_Seek(REL, strtod(Cmd_Argv(1) + 1, (char **)NULL)); + break; + case '<': + OGG_Seek(REL, -strtod(Cmd_Argv(1) + 1, (char **)NULL)); + break; + default: + OGG_Seek(ABS, strtod(Cmd_Argv(1), (char **)NULL)); + break; + } +} + +void +OGG_StatusCmd(void) +{ + switch (ogg_status) + { + case PLAY: + Com_Printf("Playing file %d at %0.2f seconds.\n", + ogg_curfile + 1, + ov_time_tell(&ovFile)); + break; + case PAUSE: + Com_Printf("Paused file %d at %0.2f seconds.\n", + ogg_curfile + 1, + ov_time_tell(&ovFile)); + break; + case STOP: + if (ogg_curfile == -1) + Com_Printf("Stopped.\n"); + else + Com_Printf("Stopped file %d.\n", + ogg_curfile + 1); + break; + } +} +#endif +/* vim: set ts=8 sw=4 tw=0 et : */ diff --git a/src/client/sound/sound.h b/src/client/sound/sound.h index d4247b5..6cf9734 100644 --- a/src/client/sound/sound.h +++ b/src/client/sound/sound.h @@ -148,7 +148,7 @@ extern qboolean s_active; extern channel_t channels[MAX_CHANNELS]; extern int s_numchannels; -extern int paintedtime; +extern int paintedtime; extern playsound_t s_pendingplays; extern vec3_t listener_origin; @@ -157,6 +157,10 @@ extern vec3_t listener_right; extern vec3_t listener_up; extern int listener_entnum; +#define S_MAX_RAW_SAMPLES 8192 +extern samplepair_t s_rawsamples[S_MAX_RAW_SAMPLES]; +extern int s_rawend; + extern wavinfo_t s_info; extern cvar_t *s_volume; diff --git a/src/client/tent.c b/src/client/tent.c index fbf1b10..1feced5 100644 --- a/src/client/tent.c +++ b/src/client/tent.c @@ -927,7 +927,10 @@ void CL_ParseTEnt(void) switch (te.type) { case TE_BLOOD: // bullet hitting flesh if (!(cl_disable_particles->integer & NOPART_BLOOD)) - CL_ParticleEffect(te.pos1, te.dir, 0xe8, 60); + { + // CL_ParticleEffect(te.pos1, te.dir, 0xe8, 60); + CL_BloodParticleEffect(te.pos1, te.dir, 0xe8, 1000); + } break; case TE_GUNSHOT: // bullet hitting wall @@ -1003,6 +1006,9 @@ void CL_ParseTEnt(void) ex->ent.flags = RF_FULLBRIGHT | RF_TRANSLUCENT; switch (te.type) { case TE_BLASTER: +#if USE_REF == REF_GLPT + R_SetRayProbe(te.pos1, te.dir); +#endif CL_BlasterParticles(te.pos1, te.dir); ex->lightcolor[0] = 1; ex->lightcolor[1] = 1; diff --git a/src/client/ui/ui.c b/src/client/ui/ui.c index 21cb836..9a2ae4b 100644 --- a/src/client/ui/ui.c +++ b/src/client/ui/ui.c @@ -625,7 +625,7 @@ void UI_Init(void) Cmd_Register(c_ui); ui_debug = Cvar_Get("ui_debug", "0", 0); - ui_open = Cvar_Get("ui_open", "0", 0); + ui_open = Cvar_Get("ui_open", "1", 0); UI_ModeChanged(); diff --git a/src/common/bsp.c b/src/common/bsp.c index 804b98a..905af6e 100644 --- a/src/common/bsp.c +++ b/src/common/bsp.c @@ -123,6 +123,7 @@ LOAD(Texinfo) out->c.value = LittleLong(in->value); #if USE_REF + out->radiance = in->value; for (j = 0; j < 2; j++) { for (k = 0; k < 3; k++) { out->axis[j][k] = LittleFloat(in->vecs[j][k]); diff --git a/src/common/cmd.c b/src/common/cmd.c index 65cfb43..6739f3b 100644 --- a/src/common/cmd.c +++ b/src/common/cmd.c @@ -1322,6 +1322,7 @@ char *Cmd_MacroExpandString(const char *text, qboolean aliasHack) if (len >= MAX_STRING_CHARS) { Com_Printf("Expanded line exceeded %i chars, discarded.\n", MAX_STRING_CHARS); + Com_Printf("was %s\n", text); return NULL; } diff --git a/src/common/common.c b/src/common/common.c index a136c43..5ce47eb 100644 --- a/src/common/common.c +++ b/src/common/common.c @@ -1041,7 +1041,7 @@ void Qcommon_Init(int argc, char **argv) Com_Printf("====== " PRODUCT " initialized ======\n\n"); Com_LPrintf(PRINT_NOTICE, APPLICATION " " VERSION ", " __DATE__ "\n"); - Com_Printf("http://skuller.net/q2pro/\n\n"); + Com_Printf("http://brechpunkt.de\n\n"); time(&com_startTime); diff --git a/src/common/files.c b/src/common/files.c index b166bd3..5bdeaaf 100644 --- a/src/common/files.c +++ b/src/common/files.c @@ -2092,7 +2092,7 @@ static pack_t *load_pak_file(const char *packfile) } header.dirlen = LittleLong(header.dirlen); - if (header.dirlen > LONG_MAX || header.dirlen % sizeof(dpackfile_t)) { + if (header.dirlen > INT_MAX || header.dirlen % sizeof(dpackfile_t)) { Com_Printf("%s has bad directory length\n", packfile); goto fail; } @@ -2125,7 +2125,7 @@ static pack_t *load_pak_file(const char *packfile) for (i = 0, dfile = info; i < num_files; i++, dfile++) { dfile->filepos = LittleLong(dfile->filepos); dfile->filelen = LittleLong(dfile->filelen); - if (dfile->filelen > LONG_MAX || dfile->filepos > LONG_MAX - dfile->filelen) { + if (dfile->filelen > INT_MAX || dfile->filepos > INT_MAX - dfile->filelen) { Com_Printf("%s has bad directory structure\n", packfile); goto fail; } diff --git a/src/common/math.c b/src/common/math.c index 5438b81..0e2a021 100644 --- a/src/common/math.c +++ b/src/common/math.c @@ -345,7 +345,16 @@ int BoxOnPlaneSide(vec3_t emins, vec3_t emaxs, cplane_t *p) return sides; } -#if USE_REF +void RotatePointAroundVector(vec3_t dst, const vec3_t dir, const vec3_t point, float degrees) +{ + vec3_t matrix[3]; + + SetupRotationMatrix(matrix, dir, degrees); + + dst[0] = DotProduct(matrix[0], point); + dst[1] = DotProduct(matrix[1], point); + dst[2] = DotProduct(matrix[2], point); +} /* ================== @@ -387,18 +396,10 @@ void SetupRotationMatrix(vec3_t matrix[3], const vec3_t dir, float degrees) matrix[2][2] = (one_c * zz) + c; } -#if USE_REF == REF_SOFT -void RotatePointAroundVector(vec3_t dst, const vec3_t dir, const vec3_t point, float degrees) -{ - vec3_t matrix[3]; - - SetupRotationMatrix(matrix, dir, degrees); +#if USE_REF - dst[0] = DotProduct(matrix[0], point); - dst[1] = DotProduct(matrix[1], point); - dst[2] = DotProduct(matrix[2], point); -} +#if USE_REF == REF_SOFT void ProjectPointOnPlane(vec3_t dst, const vec3_t p, const vec3_t normal) { diff --git a/src/common/msg.c b/src/common/msg.c index c7cfc1e..f972c22 100644 --- a/src/common/msg.c +++ b/src/common/msg.c @@ -1572,14 +1572,12 @@ size_t MSG_ReadStringLine(char *dest, size_t size) return len; } +#if USE_CLIENT static inline float MSG_ReadCoord(void) { return SHORT2COORD(MSG_ReadShort()); } -#if !USE_CLIENT -static inline -#endif void MSG_ReadPos(vec3_t pos) { pos[0] = MSG_ReadCoord(); @@ -1597,7 +1595,6 @@ static inline float MSG_ReadAngle16(void) return SHORT2ANGLE(MSG_ReadShort()); } -#if USE_CLIENT void MSG_ReadDir(vec3_t dir) { int b; diff --git a/src/common/qbvhmp.c b/src/common/qbvhmp.c new file mode 100644 index 0000000..8c84781 --- /dev/null +++ b/src/common/qbvhmp.c @@ -0,0 +1,1483 @@ +/* + This file is part of corona-13. + + copyright (c) 2016 johannes hanika. + + corona-13 is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + corona-13 is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with corona-13. If not, see . +*/ + +#include "common/qbvhmp.h" +#include "shared/shared.h" +#include "threads.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef ACCEL_NO_VLA +#define PER_THREAD_VLA(x) 1024 // ((assert(x<=1024)),1024) +#else +#define PER_THREAD_VLA(x) x +#endif + +// enforced during construction: +#define MAX_TREE_DEPTH 100 +// skip a couple of prims for SAH evaluation on the top levels: +#define SAH_LOG_STEP 3 +// number of primitives per leaf has to be < 32 +#define NUM_TRIS_PER_LEAF 2 +// minimum primitives per thread to parallelise split plane search: +#define NUM_PRIMS_PER_THREAD 100ul +// turn off to compute more honest bvh boxes. being honest slows everything down it seems +#define ACCEL_KD_SPLIT +// dump some stats at the end: +// #define ACCEL_STATS + +typedef struct +{ + int64_t num_tris; + int64_t max_tris_leaf; + int64_t num_nodes; + int64_t max_depth; + int64_t num_leaves; + int64_t sum_depth; + int64_t num_empties; + float parent_box; + float SA; +} +qbvh_stats_t; + +void accel_cleanup(accel_t *b) +{ + if(!b) return; +#ifdef ACCEL_DEBUG + for(int t=0;tthreads->num_threads;t++) + fprintf(stderr, "[accel stats] thread %d accel_intersect: %lu aabb_intersect %lu / %lu prims_intersect %lu\n", t, + b->debug[t].accel_intersect, b->debug[t].aabb_true, b->debug[t].aabb_intersect, b->debug[t].prims_intersect); + free(b->debug); +#endif + aligned_free(b->tree); + if(b->shadow_cache) free(b->shadow_cache); + for(int t=0;tthreads->num_threads;t++) + { + threads_mutex_destroy(&b->queue[t].mutex); + free(b->queue[t].jobs); + } + free(b->queue); + b->queue = 0; + threads_mutex_destroy(&b->mutex); + free(b); +} + +#ifdef ACCEL_STATS +static void checktree(accel_t *b, qbvh_node_t *node, qbvh_stats_t *stats, const int depth) +{ + if(depth == 0) stats->parent_box = (b->aabb[5] - b->aabb[2])*(b->aabb[4] - b->aabb[1]) + (b->aabb[3] - b->aabb[0])*(b->aabb[4] - b->aabb[1]) + (b->aabb[5] - b->aabb[2])*(b->aabb[3] - b->aabb[0]); + const float pbox = stats->parent_box; + int child_tris[4] = {0,0,0,0}; + float child_box[4]; + for(int c=0;c<4;c++) + child_box[c] = (node->aabb0[5].f[c] - node->aabb0[2].f[c])*(node->aabb0[4].f[c] - node->aabb0[1].f[c]) + + (node->aabb0[3].f[c] - node->aabb0[0].f[c])*(node->aabb0[4].f[c] - node->aabb0[1].f[c]) + + (node->aabb0[5].f[c] - node->aabb0[2].f[c])*(node->aabb0[3].f[c] - node->aabb0[0].f[c]); + stats->num_nodes++; + // recurse in all children, collect num tris etc. + int axis0 = (node->child[0] >> 29) & 3; + int axis00 = (node->child[1] >> 29) & 3; + int axis01 = (node->child[2] >> 29) & 3; + assert(axis0 >= 0 && axis0 < 3); + assert(axis00 >= 0 && axis00 < 3); + assert(axis01 >= 0 && axis01 < 3); + // assert(node - b->tree == 0 || (node->parent != -1 && node->parent < b->num_nodes)); + for(int c=0;c<4;c++) + { + uint64_t tris_before = stats->num_tris; + // leaf? + if(node->child[c] & (1u<<31)) + { + stats->num_leaves++; + stats->sum_depth += depth; + if(depth > stats->max_depth) stats->max_depth = depth; + const int64_t num_prims = node->child[c] & ~-(1<<5); + const int64_t prims = (node->child[c] & 0x1fffffffu) >> 5; + stats->max_tris_leaf = MAX(num_prims, stats->max_tris_leaf); + if(num_prims > 0) + { + stats->num_tris += num_prims; + if(prims + num_prims > b->num_prims) fprintf(stderr, "[accel] ERR: prims index out of range!\n"); + } + else stats->num_empties++; + } + else + { + if(child_box[c] > 0) stats->parent_box = child_box[c]; + else stats->parent_box = 0.0f; + qbvh_node_t *child = b->tree + (node->child[c] & 0x1fffffffu); + checktree(b, child, stats, depth + 1); + } + child_tris[c] = stats->num_tris - tris_before; + if(isfinite(child_box[c]) && isfinite(pbox)) stats->SA += (child_tris[c]*child_box[c])/pbox; + } +} +#endif + +static inline void accel_refit(accel_t *b, qbvh_node_t *node) +{ +#ifndef ACCEL_STATIC + // ideally this would catch all `root node is a leaf' cases: + if(b->prims->num_prims == 0) return; + for(int c=0;c<4;c++) + { + // leaves are already done, process inner only: + if(!(node->child[c] & (1ul<<63))) + { + qbvh_node_t *child = b->tree + node->child[c]; + accel_refit(b, child); + // set aabb1[c] to fit the four boxes of child[c] + for(int d=0;d<6;d++) + node->aabb1[d].f[c] = child->aabb1[d].f[0]; + for(int k=1;k<4;k++) + for(int d=0;d<3;d++) + { + node->aabb1[d].f[c] = MIN(node->aabb1[d].f[c], child->aabb1[d].f[k]); + node->aabb1[3+d].f[c] = MAX(node->aabb1[3+d].f[c], child->aabb1[3+d].f[k]); + } + } + } +#endif +} + +accel_t* accel_init( + float *aabb, // bounding boxes of primitives + uint32_t *primid, // primitive id array + const int num_prims, // number of primitives + threads_t *t) // thread pool to use during construction +{ + accel_t *b = malloc(sizeof(accel_t)); +#ifdef ACCEL_DEBUG + b->debug = malloc(sizeof(accel_debug_t)*b->threads->num_threads); + memset(b->debug, 0, sizeof(accel_debug_t)*b->threads->num_threads); +#endif + b->shadow_cache = NULL; + b->aabb[0] = b->aabb[1] = b->aabb[2] = FLT_MAX; + b->aabb[3] = b->aabb[4] = b->aabb[5] = - FLT_MAX; + + b->threads = t; + b->num_prims = num_prims; + b->prim_aabb = aabb; + b->primid = primid; + + b->num_nodes = 0; + b->node_bufsize = b->num_prims > 100 ? 4*b->num_prims: 100; + b->tree = aligned_alloc(128, b->node_bufsize * sizeof(qbvh_node_t)); + if(!b->tree) + fprintf(stderr, "qbvh could not allocate enough memory for node buffer!\n"); + + // init shadow cache: + b->shadow_cache_last = (1<<12) - 1; + b->shadow_cache = malloc(b->threads->num_threads * sizeof(unsigned int) * (b->shadow_cache_last+1)); + memset(b->shadow_cache, 0, b->threads->num_threads * sizeof(unsigned int) * (b->shadow_cache_last+1)); + + // init temp memory for scheduler + threads_mutex_init(&b->mutex, 0); + b->queue = malloc(sizeof(queue_t)*b->threads->num_threads); + for(int t=0;tthreads->num_threads;t++) + { + threads_mutex_init(&b->queue[t].mutex, 0); + b->queue[t].jobs = malloc(sizeof(job_t)*MAX_TREE_DEPTH * 3 + b->threads->num_threads); + b->queue[t].num_jobs = 0; + } + + return b; +} + +void split_job_work( + accel_t *b, + shared_split_job_t *const job, + const int64_t left, + const int64_t right) +{ + const float width = job->aabb1 - job->aabb0; + for(int64_t k=left;kstep) + { + const float min = b->prim_aabb[6 * k + job->d]; + const float max = b->prim_aabb[6 * k + 3 + job->d]; + const int imin = (int)CLAMP((SAH_TESTS+1)*(min - job->aabb0)/width, 0, SAH_TESTS); + const int imax = (int)CLAMP((SAH_TESTS+1)*(max - job->aabb0)/width, 0, SAH_TESTS); + int cost = 8; + // if(b->prims->primid[k].vcnt == 4) cost = 16; + // else if(b->prims->primid[k].vcnt == 2) cost = 1; + __sync_fetch_and_add(job->binmin+imin, cost); + __sync_fetch_and_add(job->binmax+imax, cost); +#ifndef ACCEL_KD_SPLIT + for(int i=imin;i<=imax;i++) + { + common_atomic_min(job->bound_m+i, min); + common_atomic_max(job->bound_M+i, max); + } +#endif + } + __sync_fetch_and_add(&job->done, 1); +} + +static uint64_t node_job_work( + accel_t *b, + qbvh_node_t *node, + const int64_t left, + const int64_t right, + float *paabb, + const int depth, + qbvh_node_t *parent, + const int child); +static void scan_job_work(accel_t *a, job_t *j); +static void swap_job_work(accel_t *a, job_t *j); + +static int do_one_job(accel_t *a, job_type_t mask) +{ + int t = threads_id; + int tries = 0; + job_t job; + while(1) + { + threads_mutex_lock(&a->queue[t].mutex); + int gotjob = a->queue[t].num_jobs > 0; + if(gotjob) + gotjob = (mask == s_job_all) || (a->queue[t].jobs[a->queue[t].num_jobs-1].type != s_job_node); + if(gotjob) + { + job = a->queue[t].jobs[--a->queue[t].num_jobs]; // take copy for sync + threads_mutex_unlock(&a->queue[t].mutex); + break; + } + else + { + threads_mutex_unlock(&a->queue[t].mutex); + if(a->built >= a->num_prims) return 1; // reading is atomic + // search the other queues: + t = (t+1)%a->threads->num_threads; + if(tries ++ >= a->threads->num_threads) + { + usleep(100); + return 0; + } + continue; + } + } + switch(job.type) + { + case s_job_split: + split_job_work(a, job.split.job, job.split.left, job.split.right); + break; + case s_job_node: + { + const uint64_t done = node_job_work(a, job.node.node, job.node.left, job.node.right, job.node.paabb, job.node.depth, job.node.parent, job.node.child); + __sync_fetch_and_add(&a->built, done); + break; + } + case s_job_scan: + scan_job_work(a, &job); + break; + case s_job_swap: + swap_job_work(a, &job); + break; + default: // to formally handle s_job_all + break; + } + return 0; +} + +static float accel_get_split_with_dim( + accel_t *b, + const int64_t left_in, + const int64_t right_in, + const float aabb[6], + const int d, + float *split) +{ + assert(right_in >= left_in); + //*split = 0.5f*(aabb[3+d] + aabb[d]); + //return 0.0f; + + float best = (aabb[d] + aabb[3+d])*0.5f; + float best_score = FLT_MAX; + if(right_in == left_in) + { + *split = best; + return best_score; + } + + shared_split_job_t job; + memset(&job, 0, sizeof(job)); + + job.d = d; + job.p = d == 2 ? 0 : d+1; + job.q = d == 0 ? 2 : d-1; + job.left = left_in; + job.right = right_in; + job.step = (int)(log10f(SAH_LOG_STEP*(right_in - left_in) + 1.0f) + 1.0f); + job.aabb0 = aabb[d]; + job.aabb1 = aabb[3+d]; +#ifndef ACCEL_KD_SPLIT + for(int k=0;k b->threads->num_threads * NUM_PRIMS_PER_THREAD) + { // run in parallel + const int tid = threads_id; + queue_t *q = b->queue+tid; + threads_mutex_lock(&q->mutex); + for(int t=0;tthreads->num_threads;t++) + { + int j = q->num_jobs++; + q->jobs[j].type = s_job_split; + q->jobs[j].split.left = left_in + (int64_t)( t /(double)b->threads->num_threads * (right_in-left_in)); + q->jobs[j].split.right = left_in + (int64_t)((t+1)/(double)b->threads->num_threads * (right_in-left_in)); + q->jobs[j].split.job = &job; + assert(q->jobs[j].split.left < q->jobs[j].split.right); + assert(q->num_jobs <= 3*MAX_TREE_DEPTH + b->threads->num_threads); + } + threads_mutex_unlock(&q->mutex); + // // keep the t=0 one for us: + // split_job_work(b, &job, + // left_in + (int64_t)( 0 /(double)b->threads->num_threads * (right_in-left_in)), + // left_in + (int64_t)((0+1)/(double)b->threads->num_threads * (right_in-left_in))); + // work some more until done: + while(job.done < b->threads->num_threads) do_one_job(b, s_job_split); + } + else + { // do it all ourselves + split_job_work(b, &job, job.left, job.right); + } + + // this is kd-code ;) + int left = job.binmin[0]; + int right = 0; + const float com = (aabb[3+job.p] - aabb[job.p])*(aabb[3+job.q] - aabb[job.q]); + const float width = aabb[3+d] - aabb[d]; + for(int i=1;iprim_aabb[6 * i + axis]; + pmax = b->prim_aabb[6 * i + 3 + axis]; + if(.5f*(pmin + pmax) >= split) + { // prim is right + // update right bounding box: + for(int k=0;k<3;k++) aabbr[k] = MIN(aabbr[k], b->prim_aabb[6*i+k]); + for(int k=3;k<6;k++) aabbr[k] = MAX(aabbr[k], b->prim_aabb[6*i+k]); + // swap bufs: + uint32_t tmp = b->primid[i]; + b->primid[i] = b->primid[--right]; + b->primid[right] = tmp; + for(int k=0;k<6;k++) + { + float tmp = b->prim_aabb[6*i+k]; + b->prim_aabb[6*i+k] = b->prim_aabb[6*right+k]; + b->prim_aabb[6*right+k] = tmp; + } + } + else + { // primitive is left + // update left bounding box + for(int k=0;k<3;k++) aabbl[k] = MIN(aabbl[k], b->prim_aabb[6*i+k]); + for(int k=3;k<6;k++) aabbl[k] = MAX(aabbl[k], b->prim_aabb[6*i+k]); + i++; // buffers are all good already + } + } + return right; +} + +static void scan_job_work(accel_t *a, job_t *j) +{ + *j->scan.right = sort_primids_helper(a, j->scan.axis, j->scan.split, + j->scan.begin, j->scan.back, j->scan.aabbl, j->scan.aabbr); + __sync_fetch_and_add(j->scan.done, 1); +} + +static void swap_job_work(accel_t *a, job_t *j) +{ + uint64_t r = j->swap.read - 1; // will increment first thing below + uint64_t w = j->swap.write - 1; + uint64_t rb = j->swap.read_block; + uint64_t wb = j->swap.write_block; + uint64_t rB = j->swap.b_e[j->swap.read_block]; + uint64_t wB = j->swap.b_r[j->swap.write_block]; + // fprintf(stderr, "swapping %lu elements from blocks %lu (%lu) <-> %lu (%lu)\n", j->swap.num_read, rb, r+1, wb, w+1); + for(uint64_t i=0;iswap.num_read;i++) + { + if(++r >= rB) + { // move to next read block (read dislocated right prims) + while(++rbthreads->num_threads) + { // jump over empty blocks + assert(rb < a->threads->num_threads); + r = j->swap.b_r[rb]; + rB = j->swap.b_e[rb]; + // fprintf(stderr, "r=%lu moving read block to %lu\n", r, rb); + if(r < rB) break; + } + } + assert(rb < a->threads->num_threads); + assert(r < rB); + if(++w >= wB) + { // move to next write block (write dislocated left prims) + while(++wbthreads->num_threads) + { // jump over empty blocks + assert(wb < a->threads->num_threads); + w = j->swap.b_l[wb]; + wB = j->swap.b_r[wb]; + // fprintf(stderr, "w=%lu moving write block to %lu\n", w, wb); + if(w < wB) break; + } + } + assert(wb < a->threads->num_threads); + assert(w < wB); + + // swap primid and aabb + uint32_t tmp = a->primid[r]; + a->primid[r] = a->primid[w]; + a->primid[w] = tmp; + for(int k=0;k<6;k++) + { + float tmp = a->prim_aabb[6*r+k]; + a->prim_aabb[6*r+k] = a->prim_aabb[6*w+k]; + a->prim_aabb[6*w+k] = tmp; + } + } + __sync_fetch_and_add(j->swap.done, 1); +} + +// sort primid indices in parallel +static uint64_t sort_primids_par( + accel_t *b, + const int axis, // split plane dimension [0 1 2] + const float split, // split plane coordinate + const int64_t begin, // begin of buffer + const int64_t back, // element after buffer + float *aabblo, // will be bounding box of left child + float *aabbro) // will be bounding box of right child +{ + // sort_tris with couple of threads for couple of blocks between [begin..back] + // TODO: if we knew the max num threads it would be faster to alloc these arrays as such: + int64_t right[PER_THREAD_VLA(b->threads->num_threads)]; + float aabbl[PER_THREAD_VLA(b->threads->num_threads)][6]; + float aabbr[PER_THREAD_VLA(b->threads->num_threads)][6]; + uint64_t b_l[PER_THREAD_VLA(b->threads->num_threads)], b_r[PER_THREAD_VLA(b->threads->num_threads)], b_e[PER_THREAD_VLA(b->threads->num_threads)]; + + int done = 0; + queue_t *q = b->queue+threads_id; + threads_mutex_lock(&q->mutex); + for(int t=0;tthreads->num_threads;t++) + { + int j = q->num_jobs++; + assert(j < 3*MAX_TREE_DEPTH + b->threads->num_threads); + job_t *job = q->jobs + j; + job->type = s_job_scan; + job->scan.right = right+t; + job->scan.axis = axis; + job->scan.split = split; + job->scan.begin = begin + (uint64_t)( t /(double)b->threads->num_threads*(back-begin)); + job->scan.back = begin + (uint64_t)((t+1)/(double)b->threads->num_threads*(back-begin)); + b_l[t] = job->scan.begin; + b_e[t] = job->scan.back; + job->scan.aabbl = aabbl[t]; + job->scan.aabbr = aabbr[t]; + job->scan.done = &done; + } + threads_mutex_unlock(&q->mutex); + + // work while waiting + while(done < b->threads->num_threads) do_one_job(b, s_job_scan); + + // merge aabb into slot 0 + for(int k=0;k<6;k++) aabblo[k] = aabbl[0][k]; + for(int k=0;k<6;k++) aabbro[k] = aabbr[0][k]; + for(int t=1;tthreads->num_threads;t++) + { + for(int k=0;k<3;k++) aabblo[k] = MIN(aabblo[k], aabbl[t][k]); + for(int k=3;k<6;k++) aabblo[k] = MAX(aabblo[k], aabbl[t][k]); + for(int k=0;k<3;k++) aabbro[k] = MIN(aabbro[k], aabbr[t][k]); + for(int k=3;k<6;k++) aabbro[k] = MAX(aabbro[k], aabbr[t][k]); + } + + uint64_t num_left = 0, num_right = 0; + for(int t=0;tthreads->num_threads;t++) + { + num_left += right[t] - b_l[t]; + num_right += b_e[t] - right[t]; + b_r[t] = right[t]; + } + + // then merge the blocks by copying the data again :( + // list will look something like this (blocked by thread): + // [ l r | l r | l r | .. | l r ] + // then determine globally from reduction: + // [ sum(l) | sum (r) ] + // and swap all displaced r and l blocks: + // generate global swap lists and parallelise displaced (r,l) pair indices + uint64_t num_dislocated = 0; + for(int t=0;tthreads->num_threads;t++) + { // count all primitives classified `right' which are left of the pivot index: + if(b_r[t] - b_l[0] >= num_left) break; + num_dislocated += MIN(num_left, b_e[t] - b_l[0]) - (b_r[t] - b_l[0]); + } + + // XXX TODO: find out how often this really happens. + // XXX i think the morton order of geo verts may be upside down. + if(num_dislocated == 0) return begin + num_left; // yay. + // single threaded construction never gets here. + + // submit swap_job_t + done = 0; + threads_mutex_lock(&q->mutex); + uint64_t read = b_r[0]; + uint64_t write = -1; + uint64_t read_block = 0, write_block = 0; + for(int t=0;tthreads->num_threads;t++) + { + if(b_r[t] - b_l[0] > num_left) + { + write_block = t; + write = MAX(b_l[0] + num_left, b_l[t]); + break; + } + } + for(int t=0;tthreads->num_threads;t++) + { + int j = q->num_jobs++; + assert(j < 3*MAX_TREE_DEPTH + b->threads->num_threads); + job_t *job = q->jobs + j; + job->type = s_job_swap; + uint64_t beg = ( t *num_dislocated)/b->threads->num_threads; + uint64_t end = ((t+1ul)*num_dislocated)/b->threads->num_threads; + job->swap.read = read; + job->swap.write = write; + job->swap.read_block = read_block; + job->swap.write_block = write_block; + job->swap.num_read = end - beg; + job->swap.b_l = b_l; + job->swap.b_r = b_r; + job->swap.b_e = b_e; + job->swap.done = &done; + int64_t cnt = end - beg; + // skip for last iteration: + if(t == b->threads->num_threads-1) break; + do + { // jump over empty blocks + int64_t remaining = b_e[read_block] - read; + if(remaining > cnt) + { + read += cnt; + cnt = 0; + } + else + { + cnt -= remaining; + read_block++; + assert(read_block < b->threads->num_threads); + read = b_r[read_block]; + } + } + while(cnt > 0); + cnt = end - beg; + do + { // jump over empty blocks + int64_t remaining = b_r[write_block] - write; + if(remaining > cnt) + { + write += cnt; + cnt = 0; + } + else + { + cnt -= remaining; + write_block++; + assert(write_block < b->threads->num_threads); + write = b_l[write_block]; + } + } + while(cnt > 0); + } + threads_mutex_unlock(&q->mutex); + + // work while waiting + while(done < b->threads->num_threads) do_one_job(b, s_job_swap); + + return begin + num_left; +} + +// returns pivot element in sorted list where the split occurs beginthreads->num_threads > 1 && back - begin > b->threads->num_threads * 1000) + return sort_primids_par(b, axis, split, begin, back, aabblo, aabbro); + + return sort_primids_helper(b, axis, split, begin, back, aabblo, aabbro); +} + +static void bound_leaf_t1(accel_t *b, qbvh_node_t *node, int c) +{ +#ifndef ACCEL_STATIC + for(int k=0;k<3;k++) node->aabb1[k].f[c] = FLT_MAX; + for(int k=3;k<6;k++) node->aabb1[k].f[c] = -FLT_MAX; + const uint64_t num_prims = node->child[c] & ~-(1<<5); + const uint64_t prims = (node->child[c] ^ (1ul<<63)) >> 5; + for(uint64_t k=prims;kprims, b->prims->primid[k], d, &m, &M); + node->aabb1[d].f[c] = MIN(node->aabb1[d].f[c], m); + node->aabb1[3+d].f[c] = MAX(node->aabb1[3+d].f[c], M); + } + } +#endif +} + +static uint64_t node_job_work( + accel_t *b, + qbvh_node_t *node, + const int64_t left, + const int64_t right, + float *paabb, + const int depth, + qbvh_node_t *parent, + const int child) +{ + // node->parent = parent - b->tree; + // split axis0 + int axis0, axis00, axis01; + uint64_t part[5]; + float split0, split1l, split1r; + float aabb[4][6]; + part[0] = left; + part[4] = right; + axis0 = 0; + float score, split; + float best_score = accel_get_split_with_dim(b, left, right, paabb, 0, &split0); + if((score = accel_get_split_with_dim(b, left, right, paabb, 1, &split)) < best_score) { best_score = score; split0 = split; axis0 = 1; } + if((score = accel_get_split_with_dim(b, left, right, paabb, 2, &split)) < best_score) { best_score = score; split0 = split; axis0 = 2; } + +#if 1 + if(best_score < 0.0f && (right - left) < 32) + { + // terminate + // FIXME: wastes mem for already alloc'ed node :( + if(parent) + { + fprintf(stderr, "making parent a leaf.\n"); + // keep axis bits (0x6) intact: + parent->child[child] = (parent->child[child] & 0x60000000u) | (1u<<31) | (left<<5) | ((right - left) & ~-(1<<5)); + // parent->child[child] = (1ul<<63) | (left<<5) | ((right - left) & ~-(1<<5)); + bound_leaf_t1(b, parent, child); + return right - left; + } + } +#endif + + // TODO: avoid stupid splits one by one! + + // split triangle list at axis0/split0. + part[2] = sort_primids(b, axis0, split0, left, right, aabb[0], aabb[1]); + + // split axis1 (2x) + axis00 = 0; + best_score = accel_get_split_with_dim(b, left, part[2], aabb[0], 0, &split1l); + if((score = accel_get_split_with_dim(b, left, part[2], aabb[0], 1, &split)) < best_score) { best_score = score; split1l = split; axis00 = 1; } + if((score = accel_get_split_with_dim(b, left, part[2], aabb[0], 2, &split)) < best_score) { split1l = split; axis00 = 2; } + axis01 = 0; + best_score = accel_get_split_with_dim(b, part[2], right, aabb[1], 0, &split1r); + if((score = accel_get_split_with_dim(b, part[2], right, aabb[1], 1, &split)) < best_score) { best_score = score; split1r = split; axis01 = 1; } + if((score = accel_get_split_with_dim(b, part[2], right, aabb[1], 2, &split)) < best_score) { split1r = split; axis01 = 2; } + + part[1] = sort_primids(b, axis00, split1l, left, part[2], aabb[0], aabb[1]); + part[3] = sort_primids(b, axis01, split1r, part[2], right, aabb[2], aabb[3]); + + if((right - part[3] == (uint64_t)(right - left)) || + (part[3] - part[2] == (uint64_t)(right - left)) || + (part[2] - part[1] == (uint64_t)(right - left)) || + (part[1] - left == (uint64_t)(right - left))) + { + // if((parent && (right - left > ~-(1<<5))) || !parent) + if((parent && (right - left > NUM_TRIS_PER_LEAF)) || !parent) + { + // fprintf(stderr, "stupid split!\n"); + // split list in the middle, for 1tri/leaf case: + part[2] = (right + left)/2; + part[3] = (right + part[2])/2; + part[1] = (part[2] + left)/2; + // find aabbs + // start with inverted parent aabb, so we can quantize later + for(int k=0;k<3;k++) for(int i=0;i<4;i++) { aabb[i][k] = paabb[k+3]; aabb[i][k+3] = paabb[k]; } + for(int p=0;p<4;p++) + { + for(uint64_t k=part[p];kprim_aabb[6*k+i]; + const float max = b->prim_aabb[6*k+3+i]; + if(min < aabb[p][i ]) aabb[p][i ] = min; + if(max > aabb[p][i+3]) aabb[p][i+3] = max; + } + } + } + } + else + { // regular leaf node case: + // fprintf(stderr, "stupid leaf!\n"); + parent->child[child] = (parent->child[child] & 0x60000000u) | (1u<<31) | (left<<5) | ((right - left) & ~-(1<<5)); + // parent->child[child] = (1ul<<63) | (left<<5) | ((right - left) & ~-(1<<5)); + bound_leaf_t1(b, parent, child); + return right - left; + } + } + + node->child[0] = axis0 << 29; + node->child[1] = axis00 << 29; + node->child[2] = axis01 << 29; + node->child[3] = 0; + // node->axis0 = axis0; + // node->axis00 = axis00; + // node->axis01 = axis01; + + // quantize parent box: + uint32_t pbx = CLAMP((int)floorf(0xffffu * (paabb[0] - b->aabb[0])/(b->aabb[3+0]-b->aabb[0])), 0, 0xffffu); + uint32_t pby = CLAMP((int)floorf(0xffffu * (paabb[1] - b->aabb[1])/(b->aabb[3+1]-b->aabb[1])), 0, 0xffffu); + uint32_t pbz = CLAMP((int)floorf(0xffffu * (paabb[2] - b->aabb[2])/(b->aabb[3+2]-b->aabb[2])), 0, 0xffffu); + uint32_t pbX = CLAMP((int)ceilf (0xffffu * (paabb[3] - b->aabb[0])/(b->aabb[3+0]-b->aabb[0])), 0, 0xffffu); + uint32_t pbY = CLAMP((int)ceilf (0xffffu * (paabb[4] - b->aabb[1])/(b->aabb[3+1]-b->aabb[1])), 0, 0xffffu); + uint32_t pbZ = CLAMP((int)ceilf (0xffffu * (paabb[5] - b->aabb[2])/(b->aabb[3+2]-b->aabb[2])), 0, 0xffffu); + node->paabbx = pbx | (pbX<<16); + node->paabby = pby | (pbY<<16); + node->paabbz = pbz | (pbZ<<16); + + float box[6] = { + b->aabb[0] + pbx * (b->aabb[3]-b->aabb[0])/0xffffu, + b->aabb[1] + pby * (b->aabb[4]-b->aabb[1])/0xffffu, + b->aabb[2] + pbz * (b->aabb[5]-b->aabb[2])/0xffffu, + b->aabb[0] + pbX * (b->aabb[3]-b->aabb[0])/0xffffu, + b->aabb[1] + pbY * (b->aabb[4]-b->aabb[1])/0xffffu, + b->aabb[2] + pbZ * (b->aabb[5]-b->aabb[2])/0xffffu}; + + // quantize child boxes relative to quantized parent box: + node->aabb_mx = CLAMP((int)floorf(255*(aabb[0][0] - box[0])/(box[3+0]-box[0])), 0, 255); + node->aabb_mx |= CLAMP((int)floorf(255*(aabb[1][0] - box[0])/(box[3+0]-box[0])), 0, 255)<<8; + node->aabb_mx |= CLAMP((int)floorf(255*(aabb[2][0] - box[0])/(box[3+0]-box[0])), 0, 255)<<16; + node->aabb_mx |= CLAMP((int)floorf(255*(aabb[3][0] - box[0])/(box[3+0]-box[0])), 0, 255)<<24; + node->aabb_my = CLAMP((int)floorf(255*(aabb[0][1] - box[1])/(box[3+1]-box[1])), 0, 255); + node->aabb_my |= CLAMP((int)floorf(255*(aabb[1][1] - box[1])/(box[3+1]-box[1])), 0, 255)<<8; + node->aabb_my |= CLAMP((int)floorf(255*(aabb[2][1] - box[1])/(box[3+1]-box[1])), 0, 255)<<16; + node->aabb_my |= CLAMP((int)floorf(255*(aabb[3][1] - box[1])/(box[3+1]-box[1])), 0, 255)<<24; + node->aabb_mz = CLAMP((int)floorf(255*(aabb[0][2] - box[2])/(box[3+2]-box[2])), 0, 255); + node->aabb_mz |= CLAMP((int)floorf(255*(aabb[1][2] - box[2])/(box[3+2]-box[2])), 0, 255)<<8; + node->aabb_mz |= CLAMP((int)floorf(255*(aabb[2][2] - box[2])/(box[3+2]-box[2])), 0, 255)<<16; + node->aabb_mz |= CLAMP((int)floorf(255*(aabb[3][2] - box[2])/(box[3+2]-box[2])), 0, 255)<<24; + node->aabb_Mx = CLAMP((int)ceilf (255*(aabb[0][3] - box[0])/(box[3+0]-box[0])), 0, 255); + node->aabb_Mx |= CLAMP((int)ceilf (255*(aabb[1][3] - box[0])/(box[3+0]-box[0])), 0, 255)<<8; + node->aabb_Mx |= CLAMP((int)ceilf (255*(aabb[2][3] - box[0])/(box[3+0]-box[0])), 0, 255)<<16; + node->aabb_Mx |= CLAMP((int)ceilf (255*(aabb[3][3] - box[0])/(box[3+0]-box[0])), 0, 255)<<24; + node->aabb_My = CLAMP((int)ceilf (255*(aabb[0][4] - box[1])/(box[3+1]-box[1])), 0, 255); + node->aabb_My |= CLAMP((int)ceilf (255*(aabb[1][4] - box[1])/(box[3+1]-box[1])), 0, 255)<<8; + node->aabb_My |= CLAMP((int)ceilf (255*(aabb[2][4] - box[1])/(box[3+1]-box[1])), 0, 255)<<16; + node->aabb_My |= CLAMP((int)ceilf (255*(aabb[3][4] - box[1])/(box[3+1]-box[1])), 0, 255)<<24; + node->aabb_Mz = CLAMP((int)ceilf (255*(aabb[0][5] - box[2])/(box[3+2]-box[2])), 0, 255); + node->aabb_Mz |= CLAMP((int)ceilf (255*(aabb[1][5] - box[2])/(box[3+2]-box[2])), 0, 255)<<8; + node->aabb_Mz |= CLAMP((int)ceilf (255*(aabb[2][5] - box[2])/(box[3+2]-box[2])), 0, 255)<<16; + node->aabb_Mz |= CLAMP((int)ceilf (255*(aabb[3][5] - box[2])/(box[3+2]-box[2])), 0, 255)<<24; + + // for(int k=0;k<6;k++) for(int p=0;p<4;p++) node->aabb0[k].f[p] = aabb[p][k]; + + uint64_t done = 0; + // block siblings together + int childcnt = 0; + for (int p=0;p<4;p++) + if (!(depth == MAX_TREE_DEPTH || part[p+1] - part[p] <= NUM_TRIS_PER_LEAF || b->num_nodes >= b->node_bufsize - 1)) + childcnt++; + uint64_t num_nodes = __sync_fetch_and_add(&b->num_nodes, childcnt); + queue_t *q = b->queue + threads_id; + for (int p=0;p<4;p++) + { + // TODO: quads shouldn't count as one but as two primitives here! + if (depth == MAX_TREE_DEPTH || part[p+1] - part[p] <= NUM_TRIS_PER_LEAF || b->num_nodes >= b->node_bufsize - 1) + { // make a leaf node + if(part[p+1]-part[p] > 10) + { + fprintf(stderr, "making extremely fat leaf at depth %d nodes %lu/%lu\n", depth, b->num_nodes, b->node_bufsize); + } + // node->child[p] = (1ul<<63) | (part[p]<<5) | ((part[p+1] - part[p]) & ~-(1<<5)); + node->child[p] = (node->child[p] & 0x60000000u) | (1u<<31) | (part[p]<<5) | ((part[p+1] - part[p]) & ~-(1<<5)); + bound_leaf_t1(b, node, p); + done += part[p+1]-part[p]; + } + else + { + node->child[p] |= 0x1fffffffu & (num_nodes ++); + assert(b->num_nodes < b->node_bufsize); + + if(q->num_jobs >= b->threads->num_threads) + { // in this thread: + done += node_job_work(b, b->tree + (0x1fffffffu & node->child[p]), part[p], part[p+1], aabb[p], depth + 1, node, p); + } + else + { // launch parallel threads: + threads_mutex_lock(&q->mutex); + int j = q->num_jobs++; + assert(j < 3*MAX_TREE_DEPTH + b->threads->num_threads); + job_t *job = q->jobs + j; + job->type = s_job_node; + job->node.node = b->tree + (0x1fffffffu & node->child[p]); + job->node.left = part[p]; + job->node.right = part[p+1]; + assert(job->node.left < job->node.right); + for(int k=0;k<6;k++) job->node.paabb[k] = aabb[p][k]; + job->node.depth = depth + 1; + job->node.parent = node; + job->node.child = p; + threads_mutex_unlock(&q->mutex); + } + } + } + return done; +} + +static void *accel_work(void *arg) +{ + accel_t *a = (accel_t *)arg; + while(1) + { + if(do_one_job(a, s_job_all) || (a->built >= a->num_prims)) + return 0; + } +} + +static void *compute_aabb(void *arg) +{ + accel_t *b = arg; + const uint64_t tid = threads_id; + const uint64_t beg = (b->num_prims* tid )/(uint64_t)b->threads->num_threads; + const uint64_t end = (b->num_prims*(tid+1))/(uint64_t)b->threads->num_threads; + float aabb[6]; + aabb[0] = aabb[1] = aabb[2] = FLT_MAX; + aabb[3] = aabb[4] = aabb[5] = - FLT_MAX; + for(uint64_t i=beg; iprim_aabb[6*i+k]; + const float max = b->prim_aabb[6*i+3+k]; + if(aabb[k] > min) aabb[k] = min; + if(aabb[k+3] < max) aabb[k+3] = max; + } + } + threads_mutex_lock(&b->mutex); + for(int k=0;k<3;k++) if(b->aabb[k] > aabb[k]) b->aabb[k] = aabb[k]; + for(int k=3;k<6;k++) if(b->aabb[k] < aabb[k]) b->aabb[k] = aabb[k]; + b->built ++; + threads_mutex_unlock(&b->mutex); + // make sure we don't pick another job in this thread (intervals depend on tid) + while(b->built < b->threads->num_threads) sched_yield(); + return 0; +} + +void accel_build_async(accel_t *b) +{ + b->num_nodes = 1; + b->aabb[0] = b->aabb[1] = b->aabb[2] = FLT_MAX; + b->aabb[3] = b->aabb[4] = b->aabb[5] = - FLT_MAX; + b->built = 0; + for(int k=0;kthreads->num_threads;k++) + pthread_pool_task_init(b->threads->task + k, &b->threads->pool, compute_aabb, b); + pthread_pool_wait(&b->threads->pool); + + for(int t=0;tthreads->num_threads;t++) + b->queue[t].num_jobs = 0; + b->built = 0; + + if(b->num_prims == 0) + { + qbvh_node_t *node = b->tree; + memset(node, 0, sizeof(qbvh_node_t)); + // fill with empty leaves + node->paabbx = 0xffffu; + node->paabby = 0xffffu; + node->paabbz = 0xffffu; +#ifndef ACCEL_STATIC +#error "deleted too much code" +#endif + for(int c=0;c<4;c++) + node->child[c] = (1u<<31); + node->child[1] |= 0x20000000u; + node->child[2] |= 0x20000000u; + // node->axis0 = 0; + // node->axis00 = 1; + // node->axis01 = 1; + // node->parent = -1; + // actually we're done now, no waiting needed. + } + else + { + // first job, root node: + threads_mutex_lock(&b->queue[0].mutex); + b->queue[0].jobs[0].type = s_job_node; + b->queue[0].jobs[0].node.node = b->tree; + b->queue[0].jobs[0].node.left = 0; + b->queue[0].jobs[0].node.right = b->num_prims; + for(int k=0;k<6;k++) b->queue[0].jobs[0].node.paabb[k] = b->aabb[k]; + b->queue[0].jobs[0].node.depth = 0; + b->queue[0].jobs[0].node.parent = 0; + b->queue[0].jobs[0].node.child = 0; + b->queue[0].num_jobs = 1; + threads_mutex_unlock(&b->queue[0].mutex); + + for(int k=0;kthreads->num_threads;k++) + pthread_pool_task_init(b->threads->task + k, &b->threads->pool, accel_work, b); + } +} + +void accel_build_wait(accel_t *b) +{ + if(b->num_prims == 0) return; // didn't build anything, don't wait. + + // wait for the worker threads + pthread_pool_wait(&b->threads->pool); + + // clean up parent + // b->tree[0].parent = -1; + // init motion boxes: + accel_refit(b, b->tree); + +#ifdef ACCEL_STATS + qbvh_stats_t stats; + memset(&stats, 0, sizeof(qbvh_stats_t)); + checktree(b, b->tree, &stats, 0); + printf("[accel] num nodes created: %ld\n", b->num_nodes); + printf("[accel] created qbvh with %ld nodes, %ld leaves (avg %.3f/%.3f depth/num prims, %ld empty), %ld prims, max %ld/%ld (depth/prims).\n", + stats.num_nodes, stats.num_leaves, stats.sum_depth/(float)stats.num_leaves, + stats.num_tris/(float)(stats.num_leaves-stats.num_empties), stats.num_empties, stats.num_tris, + stats.max_depth, stats.max_tris_leaf); + printf("[accel] overall SAV: %f\n", stats.SA); +#endif +} + +void accel_build(accel_t *b) +{ + accel_build_async(b); + accel_build_wait(b); +} + +// =================================================== +// below are the traversal routines for reference: +#if 0 +static inline void aabb_intersect( + const qbvh_node_t *n, + const __m128 *t0, + const __m128 *t1, + const __m128 *pos4, + const __m128 *invdir4, + qbvh_float4_t *tmin, + qbvh_float4_t *tmax) +{ + const qbvh_float4_t *aabb0 = n->aabb0; +#ifdef ACCEL_STATIC + for(int k=0;k<3;k++) + { + __m128 t[2]; + t[0] = _mm_mul_ps(_mm_sub_ps(aabb0[k ].m, pos4[k]), invdir4[k]); + t[1] = _mm_mul_ps(_mm_sub_ps(aabb0[k+3].m, pos4[k]), invdir4[k]); + tmin->m = _mm_max_ps(tmin->m, _mm_min_ps(t[0], t[1])); + tmax->m = _mm_min_ps(tmax->m, _mm_max_ps(t[0], t[1])); + } +#else + const qbvh_float4_t *aabb1 = n->aabb1; +#if 1 + for(int k=0;k<3;k++) + { + __m128 t[2]; + t[0] = _mm_mul_ps(_mm_sub_ps(_mm_add_ps( + _mm_mul_ps(aabb0[k].m, *t0), + _mm_mul_ps(aabb1[k].m, *t1)), pos4[k]), invdir4[k]); + t[1] = _mm_mul_ps(_mm_sub_ps(_mm_add_ps( + _mm_mul_ps(aabb0[k+3].m, *t0), + _mm_mul_ps(aabb1[k+3].m, *t1)), pos4[k]), invdir4[k]); + // unfortunately cannot use near and far bit masks to replace this last min/max instruction. + // since the bounding box interpolation for time comes with slight numeric jitter, the t[.] + // will have slightly different results even for zero-extent boxes and cause holes. + tmin->m = _mm_max_ps(tmin->m, _mm_min_ps(t[0], t[1])); + tmax->m = _mm_min_ps(tmax->m, _mm_max_ps(t[0], t[1])); + } +#else + for(int k=0;k<3;k++) + { + for(int j=0;j<4;j++) + { + // non-sse version: + const float tm0 = ((aabb0[k].f[j] * ((float*)t0)[j] + + aabb1[k].f[j] * ((float*)t0)[j]) - ((float*)(pos4+k))[j]) + * ((float*)(invdir4+k))[j]; + const float tM0 = ((aabb0[k+3].f[j] * ((float*)t0)[j] + + aabb1[k+3].f[j] * ((float*)t1)[j]) - ((float*)(pos4+k))[j]) + * ((float*)(invdir4+k))[j]; + const float tm = MIN(tm0, tM0); + const float tM = MAX(tm0, tM0); + + tmin->f[j] = tm > tmin->f[j] ? tm : tmin->f[j]; + tmax->f[j] = tM < tmax->f[j] ? tM : tmax->f[j]; + } + } +#endif +#endif +} + +#if 0 +static inline int accel_swap(const qbvh_float4_t tmin4, uint32_t *a, uint32_t *b) +{ + if(tmin4.f[*a] > tmin4.f[*b]) + { + uint32_t tmp=*b; + *b=*a; + *a=tmp; + return 1; + } + return 0; +} +#endif + +void accel_intersect(const accel_t *b, const ray_t *ray, hit_t *hit) +{ +#ifdef ACCEL_DEBUG + b->debug[threads_id].accel_intersect++; +#endif + int near[3]; + int far[3]; + for(int k=0;k<3;k++) + { + // need to take sign bit or test invdir for < 0 to catch +-inf cases in aabb intersection. + near[k] = (*(unsigned int*)(&(ray->dir[k])))>>31; + far[k] = 1 ^ near[k]; + } + + const qbvh_node_t *node = b->tree; + uint64_t stack[3*MAX_TREE_DEPTH]; + float stack_dist[3*MAX_TREE_DEPTH]; + int stackpos = 0; + uint64_t current; + stack[0] = 0; + stack_dist[0] = -FLT_MAX; + + __m128 invdir4[3], pos4[3]; + const __m128 t0 = _mm_set1_ps(1.0f-ray->time); + const __m128 t1 = _mm_set1_ps(ray->time); + + for(int k=0;k<3;k++) + { + invdir4[k] = _mm_set1_ps(1.0f/ray->dir[k]); + pos4[k] = _mm_load_ps1(ray->pos + k); + } + + while(1) + { + // 4x aabb test, sort and push selected index to stack, hold one reg + // push children according to volume intersection test. + qbvh_float4_t tmin4, tmax4; + tmin4.m = _mm_setzero_ps(); + tmax4.m = _mm_set_ps1(hit->dist); + aabb_intersect(node, &t0, &t1, pos4, invdir4, &tmin4, &tmax4); + const __m128 leq = _mm_cmple_ps(tmin4.m, tmax4.m); + // embree guys say this is a good idea, and on ryzen it helps some 3-4% together with tmin stack + if(__builtin_expect(_mm_movemask_ps(leq) == 0, 0)) goto pop; + const unsigned int *i = (unsigned int*)≤ +#ifdef ACCEL_DEBUG + b->debug[threads_id].aabb_intersect++; + for(int k=0;k<4;k++) if(i[k]) b->debug[threads_id].aabb_true++; +#endif + + //const unsigned int i[4] = {1, 1, 1, 1}; +#if 1 // topological sort + const int axis0 = node->axis0; + const int axis1n = near[axis0] ? node->axis01 : node->axis00; + const int axis1f = near[axis0] ? node->axis00 : node->axis01; + + const unsigned int n11 = (far [axis0]<<1) | far [axis1f]; + const unsigned int n10 = (far [axis0]<<1) | near[axis1f]; + const unsigned int n01 = (near[axis0]<<1) | far [axis1n]; + const unsigned int n00 = (near[axis0]<<1) | near[axis1n]; +#else // sort by entry point distance, 11% slower on emily + uint32_t n00 = 0, n01 = 1, n10 = 2, n11 = 3; + uint32_t swapped = accel_swap(tmin4, &n00, &n01); + swapped += accel_swap(tmin4, &n01, &n10); + swapped += accel_swap(tmin4, &n10, &n11); + // if(swapped) + { + swapped = accel_swap(tmin4, &n00, &n01); + swapped += accel_swap(tmin4, &n01, &n10); + // if(swapped) + accel_swap(tmin4, &n00, &n01); + } +#undef SWAP +#endif + + if(i[n00]) + { + current = node->child[n00]; + if(i[n11]) { stack_dist[stackpos] = tmin4.f[n11]; stack[stackpos++] = node->child[n11]; } + if(i[n10]) { stack_dist[stackpos] = tmin4.f[n10]; stack[stackpos++] = node->child[n10]; } + if(i[n01]) { stack_dist[stackpos] = tmin4.f[n01]; stack[stackpos++] = node->child[n01]; } + } + else if(i[n01]) + { + current = node->child[n01]; + if(i[n11]) { stack_dist[stackpos] = tmin4.f[n11]; stack[stackpos++] = node->child[n11]; } + if(i[n10]) { stack_dist[stackpos] = tmin4.f[n10]; stack[stackpos++] = node->child[n10]; } + } + else if(i[n10]) + { + current = node->child[n10]; + if(i[n11]) { stack_dist[stackpos] = tmin4.f[n11]; stack[stackpos++] = node->child[n11]; } + } + else if(i[n11]) current = node->child[n11]; + else + { + do pop: + { + if(stackpos == 0) return; + stackpos--; + current = stack[stackpos]; + } + while(__builtin_expect(stack_dist[stackpos] > hit->dist, 0)); + } + + const uint64_t leaf_mask = 1ul<<63; + while(current & leaf_mask) + { + // intersect primitives + uint64_t tmp_index = (current ^ leaf_mask) >> 5; + const uint64_t num = current & ~-(1<<5); + for (uint64_t i = 0; i < num; i++) + { +#ifdef ACCEL_DEBUG + b->debug[threads_id].prims_intersect++; +#endif + prims_intersect(b->prims, b->prims->primid[tmp_index], ray, hit); + tmp_index++; + } + do + { + if (stackpos == 0) return; + --stackpos; + current = stack[stackpos]; + } + while(__builtin_expect(stack_dist[stackpos] > hit->dist, 0)); + } + node = b->tree + current; + } +} + +int accel_visible(const accel_t *b, const ray_t *ray, const float max_dist) +{ +#ifdef ACCEL_DEBUG + b->debug[threads_id].accel_intersect++; +#endif + int near[3]; + int far[3]; + for(int k=0;k<3;k++) + { + near[k] = (*(unsigned int*)(&(ray->dir[k])))>>31; + far[k] = 1 ^ near[k]; + } + + const int hash = (b->shadow_cache_last+1)*threads_id + (((((int)ray->pos[0])*73856093) ^ (((int)ray->pos[1])*19349663) ^ (((int)ray->pos[2])*83492791)) & b->shadow_cache_last); + const qbvh_node_t *node = b->tree + b->shadow_cache[hash]; + const qbvh_node_t *ep = node; + uint64_t stack[3*MAX_TREE_DEPTH]; + int stackpos = 0; + uint64_t current; + stack[0] = 0; + + __m128 invdir4[3], pos4[3]; + const __m128 t0 = _mm_set1_ps(1.0f-ray->time); + const __m128 t1 = _mm_set1_ps(ray->time); + + for(int k=0;k<3;k++) + { + invdir4[k] = _mm_set1_ps(1.0f/ray->dir[k]); + pos4[k] = _mm_load_ps1(ray->pos + k); + } + + while(1) + { + qbvh_float4_t tmin4, tmax4; + tmin4.m = _mm_setzero_ps(); + tmax4.m = _mm_set_ps1(max_dist); + aabb_intersect(node, &t0, &t1, pos4, invdir4, &tmin4, &tmax4); + const __m128 leq = _mm_cmple_ps(tmin4.m, tmax4.m); + const int *i = (int*)≤ +#ifdef ACCEL_DEBUG + b->debug[threads_id].aabb_intersect++; + for(int k=0;k<4;k++) if(i[k]) b->debug[threads_id].aabb_true++; +#endif + for(int k=0;k<4;k++) + { + stack[stackpos] = node->child[k]; + stackpos -= i[k]; + } + while(1) + { + while (stackpos == 0) + { + // push children of node->parent + if(ep->parent == (uint64_t)-1) return 1; + node = b->tree + ep->parent; + // only push intersecting aabbs. + qbvh_float4_t tmin4, tmax4; + tmin4.m = _mm_setzero_ps(); + tmax4.m = _mm_set_ps1(max_dist); + aabb_intersect(node, &t0, &t1, pos4, invdir4, &tmin4, &tmax4); + const __m128 leq = _mm_cmple_ps(tmin4.m, tmax4.m); + const int *i = (int*)≤ +#ifdef ACCEL_DEBUG + b->debug[threads_id].aabb_intersect++; + for(int k=0;k<4;k++) if(i[k]) b->debug[threads_id].aabb_true++; +#endif + for(int k=0;k<4;k++) + { + if(node->child[k] != ep - b->tree) + { + stack[stackpos] = node->child[k]; + stackpos -= i[k]; + } + } + ep = node; + } + --stackpos; + current = stack[stackpos]; + if(!(current & (0x80000000ul<<32))) break; + // intersect primitives + uint64_t tmp_index = (current ^ (0x80000000ul<<32)) >> 5; + const uint64_t num = current & ~-(1<<5); + for (uint64_t i = 0; i < num; i++) + { +#ifdef ACCEL_DEBUG + b->debug[threads_id].prims_intersect++; +#endif + if(prims_intersect_visible(b->prims, b->prims->primid[tmp_index], ray, max_dist)) + { + // cache node: + b->shadow_cache[hash] = node - b->tree; + return 0; + } + tmp_index++; + } + } + node = b->tree + current; + } +} + +// XXX TODO: const ray and min dist on hit? +void accel_closest(const accel_t *b, ray_t *ray, hit_t *hit, const float centre) +{ + int near[3]; + int far[3]; + for(int k=0;k<3;k++) + { // need to take sign bit or test invdir for < 0 to catch +-inf cases in aabb intersection. + near[k] = (*(unsigned int*)(&(ray->dir[k])))>>31; + far[k] = 1 ^ near[k]; + } + + const qbvh_node_t *node = b->tree; + uint64_t stack[3*MAX_TREE_DEPTH]; + int stackpos = 0; + uint64_t current; + stack[0] = 0; + hit_t tent = *hit; + + __m128 invdir4[3], pos4[3]; + const __m128 t0 = _mm_set1_ps(1.0f-ray->time); + const __m128 t1 = _mm_set1_ps(ray->time); + + for(int k=0;k<3;k++) + { + invdir4[k] = _mm_set1_ps(1.0f/ray->dir[k]); + pos4[k] = _mm_load_ps1(ray->pos + k); + } + + while(1) + { + // 4x aabb test, sort and push selected index to stack, hold one reg + // push children according to volume intersection test. + qbvh_float4_t tmin4, tmax4; + tmin4.m = _mm_set_ps1(ray->min_dist); + tmax4.m = _mm_set_ps1(hit->dist); + aabb_intersect(node, &t0, &t1, pos4, invdir4, &tmin4, &tmax4); + const __m128 leq = _mm_cmple_ps(tmin4.m, tmax4.m); + const unsigned int *i = (unsigned int*)≤ + const int axis0 = node->axis0; + const int axis1n = near[axis0] ? node->axis01 : node->axis00; + const int axis1f = near[axis0] ? node->axis00 : node->axis01; + + const unsigned int n11 = (far [axis0]<<1) | far [axis1f]; + const unsigned int n10 = (far [axis0]<<1) | near[axis1f]; + const unsigned int n01 = (near[axis0]<<1) | far [axis1n]; + const unsigned int n00 = (near[axis0]<<1) | near[axis1n]; + + if(i[n00]) + { + current = node->child[n00]; + if(i[n11]) stack[stackpos++] = node->child[n11]; + if(i[n10]) stack[stackpos++] = node->child[n10]; + if(i[n01]) stack[stackpos++] = node->child[n01]; + } + else if(i[n01]) + { + current = node->child[n01]; + if(i[n11]) stack[stackpos++] = node->child[n11]; + if(i[n10]) stack[stackpos++] = node->child[n10]; + } + else if(i[n10]) + { + current = node->child[n10]; + if(i[n11]) stack[stackpos++] = node->child[n11]; + } + else if(i[n11]) current = node->child[n11]; + else + { + if(stackpos == 0) + { + *hit = tent; + return; + } + stackpos--; + current = stack[stackpos]; + } + + while(current & (0x80000000ul<<32)) + { + // intersect primitives + uint64_t tmp_index = (current ^ (0x80000000ul<<32)) >> 5; + const uint64_t num = current & ~-(1<<5); + for (uint64_t i = 0; i < num; i++) + { + prims_intersect(b->prims, b->prims->primid[tmp_index], ray, hit); + if(fabsf(hit->dist - centre) < fabsf(tent.dist - centre)) tent = *hit; + if(hit->dist > centre + 1e-6f) + { // move interval up from below + ray->min_dist = 2.0f*centre - hit->dist; + } + else if(hit->dist < centre - 1e-6f) + { // move interval up and clear far end to be two-sided around centre again + ray->min_dist = hit->dist; + hit->dist = 2.0f*centre - ray->min_dist; + } + else return; // hit->dist ~= centre, found a straight hit. + tmp_index++; + } + if (stackpos == 0) + { + *hit = tent; + return; + } + --stackpos; + current = stack[stackpos]; + } + node = b->tree + current; + } +} + +#if 0 +// TODO: callback to search for closeby points +// TODO: don't call into any primitive interface, just return a list of primids +// TODO: get fixed radius and hand out all overlapping boxes worth of data +// return the number of prims found +int accel_collect( + const accel_t *const a, + const float *const x, + const float radius, + primid_t *buf, + const int bufsize) +{ + int cnt = 0; + // TODO: intersect boxes with sphere around point + // jim arvo's version + bool BoxIntersectsSphere(Vec3 Bmin, Vec3 Bmax, Vec3 C, float r) { + float r2 = r * r; + dmin = 0; + for( i = 0; i < 3; i++ ) { + if( C[i] < Bmin[i] ) dmin += SQR( C[i] - Bmin[i] ); + else if( C[i] > Bmax[i] ) dmin += SQR( C[i] - Bmax[i] ); + } + return dmin <= r2; + } +} +#endif + +#endif diff --git a/src/common/utils.c b/src/common/utils.c index 1589e4f..28f0249 100644 --- a/src/common/utils.c +++ b/src/common/utils.c @@ -219,7 +219,7 @@ color_index_t Com_ParseColor(const char *s, color_index_t last) return COLOR_NONE; } -#if USE_REF == REF_GL +#if (USE_REF == REF_GL) || (USE_REF == REF_GLPT) /* ================ Com_ParseExtensionString diff --git a/src/refresh/gl/main.c b/src/refresh/gl/main.c index 2f5c6e2..6c1a9d3 100644 --- a/src/refresh/gl/main.c +++ b/src/refresh/gl/main.c @@ -1131,3 +1131,5 @@ void R_ModeChanged(int width, int height, int flags, int rowbytes, void *pixels) r_config.flags = flags; } +void R_AddDecal(decal_t *d) {} + diff --git a/src/refresh/images.c b/src/refresh/images.c index ce3e2f6..858dc60 100644 --- a/src/refresh/images.c +++ b/src/refresh/images.c @@ -1732,6 +1732,71 @@ static void r_texture_formats_changed(cvar_t *self) #endif // USE_PNG || USE_JPG || USE_TGA +qerror_t +load_img(const char *name, image_t *image) +{ + byte *pic; + imageformat_t fmt; + qerror_t ret; + + size_t len = strlen(name); + + // must have an extension and at least 1 char of base name + if (len <= 4) { + return Q_ERR_NAMETOOSHORT; + } + if (name[len - 4] != '.') { + return Q_ERR_INVALID_PATH; + } + + // fill in some basic info + memcpy(image->name, name, len + 1); + image->baselen = len - 4; + image->type = 0; + image->flags = 0; + image->registration_sequence = 1; + + // find out original extension + for (fmt = 0; fmt < IM_MAX; fmt++) { + if (!Q_stricmp(image->name + image->baselen + 1, img_loaders[fmt].ext)) { + break; + } + } + + // load the pic from disk + pic = NULL; + +#if USE_PNG || USE_JPG || USE_TGA + // first try with original extension + ret = _try_image_format(fmt, image, &pic); + if (ret == Q_ERR_NOENT) { + // retry with remaining extensions + ret = try_other_formats(fmt, image, &pic); + } + + // if we are replacing 8-bit texture with a higher resolution 32-bit + // texture, we need to recover original image dimensions + if (fmt <= IM_WAL && ret > IM_WAL) { + get_image_dimensions(fmt, image); + } +#else + if (fmt == IM_MAX) { + ret = Q_ERR_INVALID_PATH; + } else { + ret = _try_image_format(fmt, image, &pic); + } +#endif + + if (ret < 0) { + memset(image, 0, sizeof(*image)); + return ret; + } + + image->pix_data = pic; + + return Q_ERR_SUCCESS; +} + // finds or loads the given image, adding it to the hash table. static qerror_t find_or_load_image(const char *name, size_t len, imagetype_t type, imageflags_t flags, diff --git a/src/refresh/light_lists.c.h b/src/refresh/light_lists.c.h new file mode 100644 index 0000000..e91315a --- /dev/null +++ b/src/refresh/light_lists.c.h @@ -0,0 +1,243 @@ +/* +Copyright (C) 2018 Tobias Zirr + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include + +// this file extracts light lists for each cluster of the map +// light sampling algorithm in shaders/light_lists.h +// === +// NOTE FOR OTHER PROJECTS: +// while in this game, we conveniently leverage the PVS to +// aggregate coarse lists of potentially influencing lights, +// any other method of light culling could be used for that +// purpose, e.g. voxel grids, clipmaps of relevant nearfield/ +// farfield resp. local/global light sources. +// In principle, there are many parallels to deferred shading. +// The light sampling algorithm in shaders/light_lists.h takes +// a stochastic constant-size strided subsample of the lists +// emitted here in every frame, therefore it can handle decently +// long lists of light with constant computation time. Quality of +// lighting will ofc degrade with light lists longer than more +// than a few _dozens_ of light sources, but overall, temporal +// accumulation allows for quite a bit of lazyness/convenience/ +// measures of practicability in light culling. :-) +// It helps to make sure that every stochastic subset of the list is +// a somewhat good representation of lighting, striding hopefully does +// the trick for many scenarios + +static vec3_t* cluster_aabbs(bsp_mesh_t *wm, float dilation) { + vec3_t* aabbs = Z_Malloc(wm->num_clusters * 2 * sizeof(vec3_t)); + for (int i = 0; i < wm->num_clusters ; i++) { + vec3_t* aabb_min = aabbs[2 * i]; + vec3_t* aabb_max = aabb_min + 1; + + aabb_min[0][0] = 2.e32f; + aabb_min[0][1] = 2.e32f; + aabb_min[0][2] = 2.e32f; + + aabb_max[0][0] = -2.e32f; + aabb_max[0][1] = -2.e32f; + aabb_max[0][2] = -2.e32f; + } + for (int i = 0; i < wm->num_indices; i++) { + if (wm->clusters[i/3] < 0) continue; + vec3_t* aabb_min = aabbs[2 * wm->clusters[i/3]]; + vec3_t* aabb_max = aabb_min + 1; + + vec3_t v; + v[0] = wm->positions[wm->indices[i] * 3 + 0]; + v[1] = wm->positions[wm->indices[i] * 3 + 1]; + v[2] = wm->positions[wm->indices[i] * 3 + 2]; + + aabb_min[0][0] = MIN(aabb_min[0][0], v[0]); + aabb_min[0][1] = MIN(aabb_min[0][1], v[1]); + aabb_min[0][2] = MIN(aabb_min[0][2], v[2]); + + aabb_max[0][0] = MAX(aabb_max[0][0], v[0]); + aabb_max[0][1] = MAX(aabb_max[0][1], v[1]); + aabb_max[0][2] = MAX(aabb_max[0][2], v[2]); + } + // 'dilate', adjacency/overlap checked for light lists + for (int i = 0; i < wm->num_clusters ; i++) { + vec3_t* aabb_min = aabbs[2 * i]; + vec3_t* aabb_max = aabb_min + 1; + + aabb_min[0][0] -= dilation; + aabb_min[0][1] -= dilation; + aabb_min[0][2] -= dilation; + + aabb_max[0][0] += dilation; + aabb_max[0][1] += dilation; + aabb_max[0][2] += dilation; + } + return aabbs; +} + +static int aabb_overlap(vec3_t* aabbs, int i, int j) { + return MAX(aabbs[2*i][0], aabbs[2*j][0]) <= MIN(aabbs[2*i+1][0], aabbs[2*j+1][0]) + && MAX(aabbs[2*i][1], aabbs[2*j][1]) <= MIN(aabbs[2*i+1][1], aabbs[2*j+1][1]) + && MAX(aabbs[2*i][2], aabbs[2*j][2]) <= MIN(aabbs[2*i+1][2], aabbs[2*j+1][2]); +} + +static void cluster_vis_mask(bsp_t *bsp, byte mask[VIS_MAX_BYTES], int i, vec3_t* aabbs) { + byte imask[VIS_MAX_BYTES]; + BSP_ClusterVis(bsp, imask, i, DVIS_PVS); + assert(Q_IsBitSet(imask, i)); + memcpy(mask, imask, sizeof(imask)); + // dilate + for (int j = 0; j < bsp->visrowsize; j++) { + if (imask[j]) { + for (int k = 0; k < 8; ++k) { + if (imask[j] & (1 << k) && aabb_overlap(aabbs, i, 8 * j + k)) { + byte jmask[VIS_MAX_BYTES]; + BSP_ClusterVis(bsp, jmask, 8 * j + k, DVIS_PVS); + for (int l = 0; l < bsp->visrowsize; l++) { + mask[l] |= jmask[l]; + } + } + } + } + } +} + +static int* +collect_light_clusters(bsp_mesh_t *wm, bsp_t *bsp) +{ + mface_t *surfaces = bsp->faces; + int num_faces = bsp->numfaces; + + int *face_clusters = Z_Malloc(num_faces * sizeof(int)); + memset(face_clusters, -1, num_faces * sizeof(int)); + + int num_leafs = bsp->numleafs; + for (int i = 0; i < num_leafs; ++i) { + int cidx = bsp->leafs[i].cluster; + int nlf = bsp->leafs[i].numleaffaces; + mface_t** it = bsp->leafs[i].firstleafface; + for (int j = 0; j < nlf; ++j, ++it) { + int fidx = *it - surfaces; + assert(cidx >= 0); + face_clusters[fidx] = cidx; + } + } + + return face_clusters; +} + +static void +collect_cluster_lights(bsp_mesh_t *wm, bsp_t *bsp) +{ + int num_clusters = bsp->vis->numclusters; // bsp->visrowsize << 3; + int num_cluster_bytes = bsp->visrowsize; + + wm->num_clusters = num_clusters; + wm->cluster_light_offsets = Z_Malloc((num_clusters+1) * sizeof(int)); + + int *local_light_counts = Z_Malloc(num_clusters * sizeof(int)); + memset(local_light_counts, 0, num_clusters * sizeof(int)); + + int num_tris = wm->num_indices/3; + for (int i = 0; i < num_tris; i++) { + if (wm->materials[i] & BSP_FLAG_LIGHT || wm->clusters[i] & BSP_FLAG_LIGHT) { + int cidx = wm->clusters[i]; + if (cidx >= 0) { + cidx &= ~BSP_FLAG_LIGHT; + assert(cidx < num_clusters); + local_light_counts[cidx]++; + } + } + } + + int *local_light_offsets = Z_Malloc((num_clusters+1) * sizeof(int)); + int num_cluster_lights = 0; + for (int i = 0; i < num_clusters; i++) { + local_light_offsets[i] = num_cluster_lights; + num_cluster_lights += local_light_counts[i]; + } + local_light_offsets[num_clusters] = num_cluster_lights; + + int *local_cluster_lights = Z_Malloc(num_cluster_lights * sizeof(int)); + for (int i = 0; i < num_tris; i++) { + if (wm->materials[i] & BSP_FLAG_LIGHT || wm->clusters[i] & BSP_FLAG_LIGHT) { + int cidx = wm->clusters[i]; + if (cidx >= 0) { + cidx &= ~BSP_FLAG_LIGHT; + wm->clusters[i] = cidx; // flags no longer needed + local_cluster_lights[local_light_offsets[cidx]++] = i; + } + } + } + + // PVS seems slightly broken, try recovering by dilation step + // that requires AABBs of clusters! + vec3_t* aabbs = cluster_aabbs(wm, 8.f); // 8 taken from FatPVS + + int *cluster_light_counts = Z_Malloc(num_clusters * sizeof(int)); + memset(cluster_light_counts, 0, num_clusters * sizeof(int)); + + byte mask[VIS_MAX_BYTES]; + for (int i = 0; i < num_clusters; i++) { + local_light_offsets[i] -= local_light_counts[i]; // reset after prev loop + cluster_vis_mask(bsp, mask, i, aabbs); + for (int j = 0; j < num_cluster_bytes; j++) { + if (mask[j]) { + for (int k = 0; k < 8; ++k) { + if (mask[j] & (1 << k)) + cluster_light_counts[i] += local_light_counts[j * 8 + k]; + } + } + } + } + + num_cluster_lights = 0; + for (int i = 0; i < num_clusters; i++) { + wm->cluster_light_offsets[i] = num_cluster_lights; + num_cluster_lights += cluster_light_counts[i]; + } + wm->cluster_light_offsets[num_clusters] = num_cluster_lights; + + wm->num_cluster_lights = num_cluster_lights; + wm->cluster_lights = Z_Malloc(num_cluster_lights * sizeof(int)); + + for (int i = 0; i < num_clusters; i++) { + cluster_vis_mask(bsp, mask, i, aabbs); + for (int j = 0; j < num_cluster_bytes; j++) { + if (mask[j]) { + for (int k = 0; k < 8; ++k) { + if (mask[j] & (1 << k)) { + int ii = j * 8 + k; + memcpy(wm->cluster_lights + wm->cluster_light_offsets[i] + , local_cluster_lights + local_light_offsets[ii] + , sizeof(int) * local_light_counts[ii]); + wm->cluster_light_offsets[i] += local_light_counts[ii]; + } + } + } + } + } + + for (int i = 0; i < num_clusters; i++) { + wm->cluster_light_offsets[i] -= cluster_light_counts[i]; // reset after prev loop + } + + Z_Free(local_light_counts); + Z_Free(local_light_offsets); + Z_Free(local_cluster_lights); + Z_Free(cluster_light_counts); + Z_Free(aabbs); +} diff --git a/src/refresh/models.c b/src/refresh/models.c index e11cb59..562ee7d 100644 --- a/src/refresh/models.c +++ b/src/refresh/models.c @@ -35,8 +35,8 @@ with this program; if not, write to the Free Software Foundation, Inc., // we are sure we won't need it. #define MAX_RMODELS (MAX_MODELS * 2) -static model_t r_models[MAX_RMODELS]; -static int r_numModels; +model_t r_models[MAX_RMODELS]; +int r_numModels; static model_t *MOD_Alloc(void) { @@ -377,6 +377,10 @@ qhandle_t R_RegisterModel(const char *name) done: index = (model - r_models) + 1; +#if USE_REF == REF_VKPT + extern int register_model_dirty; + register_model_dirty = 1; +#endif return index; fail2: diff --git a/src/refresh/sw/main.c b/src/refresh/sw/main.c index d6060ec..3d2fa2c 100644 --- a/src/refresh/sw/main.c +++ b/src/refresh/sw/main.c @@ -920,3 +920,4 @@ void R_DrawBeam(entity_t *e) } } +void R_AddDecal(decal_t *d) {} diff --git a/src/refresh/vkpt/asvgf.c b/src/refresh/vkpt/asvgf.c new file mode 100644 index 0000000..526e464 --- /dev/null +++ b/src/refresh/vkpt/asvgf.c @@ -0,0 +1,319 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "vkpt.h" + +enum { + SEED_RNG, + FWD_PROJECT, + GRADIENT_IMAGE, + GRADIENT_ATROUS, + TEMPORAL, + ATROUS, + TAA, + ASVGF_NUM_PIPELINES +}; + +static VkPipeline pipeline_asvgf[ASVGF_NUM_PIPELINES]; +static VkPipelineLayout pipeline_layout_atrous; +static VkPipelineLayout pipeline_layout_general; +static VkPipelineLayout pipeline_layout_taa; + +VkResult +vkpt_asvgf_initialize() +{ + VkDescriptorSetLayout desc_set_layouts[] = { + qvk.desc_set_layout_ubo, qvk.desc_set_layout_textures, + qvk.desc_set_layout_vertex_buffer + }; + + VkPushConstantRange push_constant_range_atrous = { + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .offset = 0, + .size = sizeof(uint32_t) + }; + + CREATE_PIPELINE_LAYOUT(qvk.device, &pipeline_layout_atrous, + .setLayoutCount = LENGTH(desc_set_layouts), + .pSetLayouts = desc_set_layouts, + .pushConstantRangeCount = 1, + .pPushConstantRanges = &push_constant_range_atrous + ); + ATTACH_LABEL_VARIABLE(pipeline_layout_atrous, PIPELINE_LAYOUT); + + CREATE_PIPELINE_LAYOUT(qvk.device, &pipeline_layout_general, + .setLayoutCount = LENGTH(desc_set_layouts), + .pSetLayouts = desc_set_layouts, + ); + ATTACH_LABEL_VARIABLE(pipeline_layout_general, PIPELINE_LAYOUT); + + CREATE_PIPELINE_LAYOUT(qvk.device, &pipeline_layout_taa, + .setLayoutCount = LENGTH(desc_set_layouts), + .pSetLayouts = desc_set_layouts, + ); + ATTACH_LABEL_VARIABLE(pipeline_layout_taa, PIPELINE_LAYOUT); + return VK_SUCCESS; +} + +VkResult +vkpt_asvgf_destroy() +{ + vkDestroyPipelineLayout(qvk.device, pipeline_layout_atrous, NULL); + vkDestroyPipelineLayout(qvk.device, pipeline_layout_general, NULL); + vkDestroyPipelineLayout(qvk.device, pipeline_layout_taa, NULL); + return VK_SUCCESS; +} + +VkResult +vkpt_asvgf_create_pipelines() +{ + VkComputePipelineCreateInfo pipeline_info[ASVGF_NUM_PIPELINES] = { + [SEED_RNG] = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = SHADER_STAGE(QVK_MOD_ASVGF_SEED_RNG_COMP, VK_SHADER_STAGE_COMPUTE_BIT), + .layout = pipeline_layout_general, + }, + [FWD_PROJECT] = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = SHADER_STAGE(QVK_MOD_ASVGF_FWD_PROJECT_COMP, VK_SHADER_STAGE_COMPUTE_BIT), + .layout = pipeline_layout_general, + }, + [GRADIENT_IMAGE] = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = SHADER_STAGE(QVK_MOD_ASVGF_GRADIENT_IMG_COMP, VK_SHADER_STAGE_COMPUTE_BIT), + .layout = pipeline_layout_general, + }, + [GRADIENT_ATROUS] = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = SHADER_STAGE(QVK_MOD_ASVGF_GRADIENT_ATROUS_COMP, VK_SHADER_STAGE_COMPUTE_BIT), + .layout = pipeline_layout_atrous, + }, + [TEMPORAL] = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = SHADER_STAGE(QVK_MOD_ASVGF_TEMPORAL_COMP, VK_SHADER_STAGE_COMPUTE_BIT), + .layout = pipeline_layout_general, + }, + [ATROUS] = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = SHADER_STAGE(QVK_MOD_ASVGF_ATROUS_COMP, VK_SHADER_STAGE_COMPUTE_BIT), + .layout = pipeline_layout_atrous, + }, + [TAA] = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = SHADER_STAGE(QVK_MOD_ASVGF_TAA_COMP, VK_SHADER_STAGE_COMPUTE_BIT), + .layout = pipeline_layout_general, + }, + }; + + _VK(vkCreateComputePipelines(qvk.device, 0, LENGTH(pipeline_info), pipeline_info, 0, pipeline_asvgf)); + + return VK_SUCCESS; +} + +VkResult +vkpt_asvgf_destroy_pipelines() +{ + for(int i = 0; i < ASVGF_NUM_PIPELINES; i++) + vkDestroyPipeline(qvk.device, pipeline_asvgf[i], NULL); + return VK_SUCCESS; +} + +#define BARRIER_COMPUTE(img) \ + do { \ + VkImageSubresourceRange subresource_range = { \ + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, \ + .baseMipLevel = 0, \ + .levelCount = 1, \ + .baseArrayLayer = 0, \ + .layerCount = 1 \ + }; \ + IMAGE_BARRIER(qvk.cmd_buf_current, \ + .image = img, \ + .subresourceRange = subresource_range, \ + .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, \ + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, \ + .oldLayout = VK_IMAGE_LAYOUT_GENERAL, \ + .newLayout = VK_IMAGE_LAYOUT_GENERAL, \ + ); \ + } while(0) + + +VkResult +vkpt_asvgf_create_gradient_samples(VkCommandBuffer cmd_buf, uint32_t frame_num) +{ + VkDescriptorSet desc_sets[] = { + qvk.desc_set_ubo[qvk.current_image_index], + qvk.desc_set_textures, + qvk.desc_set_vertex_buffer + }; + VkClearColorValue clear_grd_smpl_pos = { + .uint32 = { 0, 0, 0, 0 } + }; + + VkImageSubresourceRange subresource_range = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + }; + + vkCmdClearColorImage(cmd_buf, qvk.images[VKPT_IMG_ASVGF_GRAD_SMPL_POS], + VK_IMAGE_LAYOUT_GENERAL, &clear_grd_smpl_pos, 1, &subresource_range); + + vkCmdClearColorImage(cmd_buf, qvk.images[VKPT_IMG_DEBUG], + VK_IMAGE_LAYOUT_GENERAL, &clear_grd_smpl_pos, 1, &subresource_range); + + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_RNG_SEED_A + (qvk.frame_counter & 1)]); + + vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_asvgf[SEED_RNG]); + vkCmdBindDescriptorSets(qvk.cmd_buf_current, VK_PIPELINE_BIND_POINT_COMPUTE, + pipeline_layout_general, 0, LENGTH(desc_sets), desc_sets, 0, 0); + vkCmdDispatch(cmd_buf, + (qvk.extent.width + 31) / 32, + (qvk.extent.height + 31) / 32, + 1); + + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_RNG_SEED_A + (qvk.frame_counter & 1)]); + + + vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_asvgf[FWD_PROJECT]); + vkCmdBindDescriptorSets(qvk.cmd_buf_current, VK_PIPELINE_BIND_POINT_COMPUTE, + pipeline_layout_general, 0, LENGTH(desc_sets), desc_sets, 0, 0); + vkCmdDispatch(cmd_buf, + (qvk.extent.width / GRAD_DWN + 31) / 32, + (qvk.extent.height / GRAD_DWN + 31) / 32, + 1); + + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_RNG_SEED_A + (qvk.frame_counter & 1)]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_GRAD_SMPL_POS]); + + return VK_SUCCESS; +} + +VkResult +vkpt_asvgf_record_cmd_buffer(VkCommandBuffer cmd_buf) +{ + VkDescriptorSet desc_sets[] = { + qvk.desc_set_ubo[qvk.current_image_index], + qvk.desc_set_textures, + qvk.desc_set_vertex_buffer + }; + BARRIER_COMPUTE(qvk.images[VKPT_IMG_PT_COLOR_A]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_PT_COLOR_B]); + + _VK(vkpt_profiler_query(PROFILER_ASVGF_RECONSTRUCT_GRADIENT, PROFILER_START)); + + /* create gradient image */ + vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_asvgf[GRADIENT_IMAGE]); + vkCmdBindDescriptorSets(qvk.cmd_buf_current, VK_PIPELINE_BIND_POINT_COMPUTE, + pipeline_layout_atrous, 0, LENGTH(desc_sets), desc_sets, 0, 0); + vkCmdDispatch(cmd_buf, + (qvk.extent.width / GRAD_DWN + 31) / 32, + (qvk.extent.height / GRAD_DWN + 31) / 32, + 1); + + // XXX BARRIERS!!! + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_GRAD_A]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_DEBUG]); + + vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_asvgf[GRADIENT_ATROUS]); + vkCmdBindDescriptorSets(qvk.cmd_buf_current, VK_PIPELINE_BIND_POINT_COMPUTE, + pipeline_layout_atrous, 0, LENGTH(desc_sets), desc_sets, 0, 0); + + /* reconstruct gradient image */ + const int num_atrous_iterations_gradient = 5; + for(int i = 0; i < num_atrous_iterations_gradient; i++) { + uint32_t push_constants[1] = { + i + }; + + vkCmdPushConstants(cmd_buf, pipeline_layout_atrous, + VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants), push_constants); + + vkCmdDispatch(cmd_buf, + (qvk.extent.width / GRAD_DWN + 31) / 32, + (qvk.extent.height / GRAD_DWN + 31) / 32, + 1); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_GRAD_A + !(i & 1)]); + + } + + _VK(vkpt_profiler_query(PROFILER_ASVGF_RECONSTRUCT_GRADIENT, PROFILER_STOP)); + + + /* temporal accumulation / filtering */ + _VK(vkpt_profiler_query(PROFILER_ASVGF_TEMPORAL, PROFILER_START)); + vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_asvgf[TEMPORAL]); + vkCmdBindDescriptorSets(qvk.cmd_buf_current, VK_PIPELINE_BIND_POINT_COMPUTE, + pipeline_layout_atrous, 0, LENGTH(desc_sets), desc_sets, 0, 0); + vkCmdDispatch(cmd_buf, + (qvk.extent.width + 31) / 32, + (qvk.extent.height + 31) / 32, + 1); + + + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_ATROUS_PING]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_HIST_MOMENTS_A]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_HIST_MOMENTS_B]); + _VK(vkpt_profiler_query(PROFILER_ASVGF_TEMPORAL, PROFILER_STOP)); + + _VK(vkpt_profiler_query(PROFILER_ASVGF_ATROUS, PROFILER_START)); + + vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_asvgf[ATROUS]); + vkCmdBindDescriptorSets(qvk.cmd_buf_current, VK_PIPELINE_BIND_POINT_COMPUTE, + pipeline_layout_atrous, 0, LENGTH(desc_sets), desc_sets, 0, 0); + + /* spatial reconstruction filtering */ + const int num_atrous_iterations = 5; + for(int i = 0; i < num_atrous_iterations; i++) { + uint32_t push_constants[1] = { + i + }; + + vkCmdPushConstants(cmd_buf, pipeline_layout_atrous, + VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants), push_constants); + + vkCmdDispatch(cmd_buf, + (qvk.extent.width + 31) / 32, + (qvk.extent.height + 31) / 32, + 1); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_ATROUS_PING]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_ATROUS_PONG]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_HIST_COLOR]); + } + + _VK(vkpt_profiler_query(PROFILER_ASVGF_ATROUS, PROFILER_STOP)); + + _VK(vkpt_profiler_query(PROFILER_ASVGF_TAA, PROFILER_START)); + + /* taa */ + vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_asvgf[TAA]); + vkCmdBindDescriptorSets(qvk.cmd_buf_current, VK_PIPELINE_BIND_POINT_COMPUTE, + pipeline_layout_taa, 0, LENGTH(desc_sets), desc_sets, 0, 0); + vkCmdDispatch(cmd_buf, + (qvk.extent.width + 31) / 32, + (qvk.extent.height + 31) / 32, + 1); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_TAA_A]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_ASVGF_TAA_B]); + + _VK(vkpt_profiler_query(PROFILER_ASVGF_TAA, PROFILER_STOP)); + + return VK_SUCCESS; +} diff --git a/src/refresh/vkpt/bsp_mesh.c b/src/refresh/vkpt/bsp_mesh.c new file mode 100644 index 0000000..5e60573 --- /dev/null +++ b/src/refresh/vkpt/bsp_mesh.c @@ -0,0 +1,325 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "vkpt.h" +#include "shader/global_textures.h" + +#include + +#include "../light_lists.c.h" + +static int +create_poly( + const mface_t *surf, + float *positions_out, + float *tex_coord_out, + uint32_t *material_out) +{ + static const int max_vertices = 32; + float positions [3 * /*max_vertices*/ 32]; + float tex_coords[2 * /*max_vertices*/ 32]; + mtexinfo_t *texinfo = surf->texinfo; + assert(surf->numsurfedges < max_vertices); + int flags = surf->drawflags; + flags |= (surf->texinfo ? surf->texinfo->c.flags : 0); + flags &= (surf->texinfo && surf->texinfo->radiance && !(flags & SURF_WARP) ? ~0 : ~SURF_LIGHT); + if(surf->texinfo && surf->texinfo->image->is_light) + flags |= SURF_LIGHT; + float sc[2] = { 1.0f / texinfo->image->width, 1.0f / texinfo->image->height }; + + float pos_center[3] = { 0 }; + float tc_center[2]; + + for (int i = 0; i < surf->numsurfedges; i++) { + msurfedge_t *src_surfedge = surf->firstsurfedge + i; + medge_t *src_edge = src_surfedge->edge; + mvertex_t *src_vert = src_edge->v[src_surfedge->vert]; + + float *p = positions + i * 3; + float *t = tex_coords + i * 2; + + VectorCopy(src_vert->point, p); + + pos_center[0] += src_vert->point[0]; + pos_center[1] += src_vert->point[1]; + pos_center[2] += src_vert->point[2]; + + t[0] = (DotProduct(p, texinfo->axis[0]) + texinfo->offset[0]) * sc[0]; + t[1] = (DotProduct(p, texinfo->axis[1]) + texinfo->offset[1]) * sc[1]; + } + + pos_center[0] /= (float)surf->numsurfedges; + pos_center[1] /= (float)surf->numsurfedges; + pos_center[2] /= (float)surf->numsurfedges; + + tc_center[0] = (DotProduct(pos_center, texinfo->axis[0]) + texinfo->offset[0]) * sc[0]; + tc_center[1] = (DotProduct(pos_center, texinfo->axis[1]) + texinfo->offset[1]) * sc[1]; + +#define CP_V(idx, src) \ + do { \ + if(positions_out) { \ + memcpy(positions_out + (idx) * 3, src, sizeof(float) * 3); \ + } \ + } while(0) + +#define CP_T(idx, src) \ + do { \ + if(tex_coord_out) { \ + memcpy(tex_coord_out + (idx) * 2, src, sizeof(float) * 2); \ + } \ + } while(0) + +#define CP_M(idx) \ + do { \ + if(material_out) { \ + material_out[k] = (int)(texinfo->image - r_images); \ + if(flags & SURF_LIGHT) material_out[k] |= BSP_FLAG_LIGHT; \ + if(flags & SURF_WARP) material_out[k] |= BSP_FLAG_WATER; \ + if(flags & SURF_TRANS_MASK) material_out[k] |= BSP_FLAG_TRANSPARENT; \ + } \ + } while(0) + + int k = 0; + /* switch between triangle fan around center or first vertex */ + //int tess_center = 0; + int tess_center = surf->numsurfedges > 4; + + const int num_triangles = tess_center + ? surf->numsurfedges + : surf->numsurfedges - 2; + + for (int i = 0; i < num_triangles; i++) { + const int e = surf->numsurfedges; + + int i1 = (i + 2 - tess_center) % e; + int i2 = (i + 1 - tess_center) % e; + + CP_V(k, tess_center ? pos_center : positions); + CP_T(k, tess_center ? tc_center : tex_coords); + CP_M(k); + k++; + + CP_V(k, positions + i1 * 3); + CP_T(k, tex_coords + i1 * 2); + CP_M(k); + k++; + + CP_V(k, positions + i2 * 3); + CP_T(k, tex_coords + i2 * 2); + CP_M(k); + k++; + + } + +#undef CP_V +#undef CP_T +#undef CP_M + + assert(k % 3 == 0); + return k; +} + +static int +belongs_to_model(bsp_t *bsp, mface_t *surf) +{ + for (int i = 0; i < bsp->nummodels; i++) { + if (surf >= bsp->models[i].firstface + && surf < bsp->models[i].firstface + bsp->models[i].numfaces) + return 1; + } + return 0; +} + +static void +collect_surfaces(int *idx_ctr, bsp_mesh_t *wm, bsp_t *bsp, int model_idx, int skip_mask, int filter_mask) +{ + mface_t *surfaces = model_idx < 0 ? bsp->faces : bsp->models[model_idx].firstface; + int num_faces = model_idx < 0 ? bsp->numfaces : bsp->models[model_idx].numfaces; + + int *face_clusters = model_idx < 0 ? collect_light_clusters(wm, bsp) : NULL; + + for (int i = 0; i < num_faces; i++) { + mface_t *surf = surfaces + i; + int flags = surf->drawflags; + flags |= (surf->texinfo ? surf->texinfo->c.flags : 0); + flags &= (surf->texinfo && surf->texinfo->radiance ? ~0 : ~SURF_LIGHT); + +#if 0 + if (surf->texinfo && surf->texinfo->image && (flags & SURF_LIGHT)) { + int material_slot = (int)(surf->texinfo->image - r_images); + if (material_slot >= 0 && material_slot < MAX_MATERIALS) { + material_buffer.material_data[material_slot].emission = 1.0; + material_buffer.dirty = 1; + } + } +#endif + + if ((flags & skip_mask)) + continue; + + if (filter_mask && !(flags & filter_mask)) + continue; + + if (model_idx < 0 && belongs_to_model(bsp, surf)) { + continue; + } + + if (*idx_ctr + create_poly(surf, NULL, NULL, NULL) >= WM_MAX_VERTICES) { + Com_Error(ERR_FATAL, "error: exceeding max vertex limit\n"); + } + + int cnt = create_poly(surf, + &wm->positions[*idx_ctr * 3], + &wm->tex_coords[*idx_ctr * 2], + &wm->materials[*idx_ctr / 3]); + + if(face_clusters) { + for (int it = *idx_ctr / 3, k = 0; k < cnt; k += 3, ++it) { + wm->clusters[it] = face_clusters[i]; + } + } + + *idx_ctr += cnt; + } +} + +void +bsp_mesh_create_from_bsp(bsp_mesh_t *wm, bsp_t *bsp) +{ + wm->models_idx_offset = Z_Malloc(bsp->nummodels * sizeof(int)); + wm->models_idx_count = Z_Malloc(bsp->nummodels * sizeof(int)); + memset(wm->models_idx_offset, 0, bsp->nummodels * sizeof(int)); + memset(wm->models_idx_count, 0, bsp->nummodels * sizeof(int)); + wm->model_centers = Z_Malloc(bsp->nummodels * 3 * sizeof(float)); + + wm->num_models = bsp->nummodels; + + wm->num_vertices = 0; + wm->num_indices = 0; + wm->positions = Z_Malloc(WM_MAX_VERTICES * 3 * sizeof(*wm->positions)); + wm->tex_coords = Z_Malloc(WM_MAX_VERTICES * 2 * sizeof(*wm->tex_coords)); + wm->materials = Z_Malloc(WM_MAX_VERTICES / 3 * sizeof(*wm->materials)); + wm->clusters = Z_Malloc(WM_MAX_VERTICES / 3 * sizeof(*wm->clusters)); + + int idx_ctr = 0; + + const int flags_static_world = SURF_NODRAW | SURF_SKY; + + collect_surfaces(&idx_ctr, wm, bsp, -1, flags_static_world, 0); + wm->world_idx_count = idx_ctr; + + wm->world_fluid_offset = idx_ctr; + collect_surfaces(&idx_ctr, wm, bsp, -1, 0, SURF_WARP); + wm->world_fluid_count = idx_ctr - wm->world_fluid_offset; + + wm->world_light_offset = idx_ctr; + collect_surfaces(&idx_ctr, wm, bsp, -1, flags_static_world, SURF_LIGHT); + wm->world_light_count = idx_ctr - wm->world_light_offset; + + for (int k = 0; k < bsp->nummodels; k++) { + wm->models_idx_offset[k] = idx_ctr; + collect_surfaces(&idx_ctr, wm, bsp, k, flags_static_world, 0); + wm->models_idx_count[k] = idx_ctr - wm->models_idx_offset[k]; + } + + wm->num_indices = idx_ctr; + wm->num_vertices = idx_ctr; + + wm->indices = Z_Malloc(idx_ctr * sizeof(int)); + for (int i = 0; i < wm->num_vertices; i++) + wm->indices[i] = i; + + if (wm->num_vertices >= WM_MAX_VERTICES) { + Com_Error(ERR_FATAL, "too many vertices\n"); + } + + for(int i = 0; i < wm->num_models; i++) { + vec3_t aabb_min = { 999999999.0f, 999999999.0f, 999999999.0f }; + vec3_t aabb_max = { -999999999.0f, -999999999.0f, -999999999.0f }; + + for(int j = 0; j < wm->models_idx_count[i]; j++) { + vec3_t v; + v[0] = wm->positions[(wm->models_idx_offset[i] + j) * 3 + 0]; + v[1] = wm->positions[(wm->models_idx_offset[i] + j) * 3 + 1]; + v[2] = wm->positions[(wm->models_idx_offset[i] + j) * 3 + 2]; + + aabb_min[0] = MIN(aabb_min[0], v[0]); + aabb_min[1] = MIN(aabb_min[1], v[1]); + aabb_min[2] = MIN(aabb_min[2], v[2]); + + aabb_max[0] = MAX(aabb_max[0], v[0]); + aabb_max[1] = MAX(aabb_max[1], v[1]); + aabb_max[2] = MAX(aabb_max[2], v[2]); + } + + wm->model_centers[i][0] = (aabb_min[0] + aabb_max[0]) * 0.5f; + wm->model_centers[i][1] = (aabb_min[1] + aabb_max[1]) * 0.5f; + wm->model_centers[i][2] = (aabb_min[2] + aabb_max[2]) * 0.5f; + } + + //FILE *f = fopen("/tmp/lights", "a+"); + for(int i = 0; i < wm->num_indices; i++) { + uint32_t m = wm->materials[i]; + m &= BSP_TEXTURE_MASK; + + if(m >= MAX_RIMAGES) + continue; + + if(r_images[m].is_light) + wm->materials[i] |= BSP_FLAG_LIGHT; + //fprintf(f, "%s\n", r_images[m].name); + + } + //fclose(f); + + collect_cluster_lights(wm, bsp); +} + +void +bsp_mesh_destroy(bsp_mesh_t *wm) +{ + Z_Free(wm->models_idx_offset); + Z_Free(wm->models_idx_count); + Z_Free(wm->model_centers); + + Z_Free(wm->positions); + Z_Free(wm->tex_coords); + Z_Free(wm->indices); + + memset(wm, 0, sizeof(*wm)); +} + +void +bsp_mesh_register_textures(bsp_t *bsp) +{ + for (int i = 0; i < bsp->numtexinfo; i++) { + mtexinfo_t *info = bsp->texinfo + i; + imageflags_t flags; + if (info->c.flags & SURF_WARP) + flags = IF_TURBULENT; + else + flags = IF_NONE; + + char buffer[MAX_QPATH]; + Q_concat(buffer, sizeof(buffer), "textures/", info->name, ".wal", NULL); + FS_NormalizePath(buffer, buffer); + info->image = IMG_Find(buffer, IT_WALL, flags); + } +} + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/draw.c b/src/refresh/vkpt/draw.c new file mode 100644 index 0000000..435fb37 --- /dev/null +++ b/src/refresh/vkpt/draw.c @@ -0,0 +1,565 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "shared/shared.h" +#include "refresh/refresh.h" +#include "client/client.h" +#include "refresh/images.h" + +#include + +#include "vkpt.h" +#include "shader/global_textures.h" + +#define TEXNUM_WHITE (~0) +#define MAX_STRETCH_PICS (1<<18) + +drawStatic_t draw = { + .scale = 1.0f, +}; + +static int num_stretch_pics = 0; +typedef struct { + float x, y, w, h; + float s, t, w_s, h_t; + uint32_t color, tex_handle; +} StretchPic_t; + +static StretchPic_t stretch_pic_queue[MAX_STRETCH_PICS]; + +static VkPipelineLayout pipeline_layout_stretch_pic; +static VkRenderPass render_pass_stretch_pic; +static VkPipeline pipeline_stretch_pic; +static VkFramebuffer framebuffer_stretch_pic[MAX_SWAPCHAIN_IMAGES]; +static BufferResource_t buf_stretch_pic_queue[MAX_SWAPCHAIN_IMAGES]; +static VkDescriptorSetLayout desc_set_layout_sbo; +static VkDescriptorPool desc_pool_sbo; +static VkDescriptorSet desc_set_sbo[MAX_SWAPCHAIN_IMAGES]; + + +static inline void enqueue_stretch_pic( + float x, float y, float w, float h, + float s1, float t1, float s2, float t2, + uint32_t color, int tex_handle) +{ + if(num_stretch_pics == MAX_STRETCH_PICS) { + Com_EPrintf("Error: stretch pic queue full!\n"); + assert(0); + return; + } + assert(tex_handle); + StretchPic_t *sp = stretch_pic_queue + num_stretch_pics++; + + float width = r_config.width * draw.scale; + float height = r_config.height * draw.scale; + + x = 2.0f * x / width - 1.0f; + y = 2.0f * y / height - 1.0f; + + w = 2.0f * w / width; + h = 2.0f * h / height; + + sp->x = x; + sp->y = y; + sp->w = w; + sp->h = h; + + sp->s = s1; + sp->t = t1; + sp->w_s = s2 - s1; + sp->h_t = t2 - t1; + + sp->color = color; + sp->tex_handle = tex_handle; + if(tex_handle >= 0 && tex_handle < MAX_RIMAGES + && !r_images[tex_handle].registration_sequence) { + sp->tex_handle = TEXNUM_WHITE; + } +} + +static void +create_render_pass() +{ + LOG_FUNC(); + VkAttachmentDescription color_attachment = { + .format = qvk.surf_format.format, + .samples = VK_SAMPLE_COUNT_1_BIT, + .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD, + //.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, + .storeOp = VK_ATTACHMENT_STORE_OP_STORE, + .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, + .stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + }; + + VkAttachmentReference color_attachment_ref = { + .attachment = 0, /* index in fragment shader */ + .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + }; + + VkSubpassDescription subpass = { + .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS, + .colorAttachmentCount = 1, + .pColorAttachments = &color_attachment_ref, + }; + + VkSubpassDependency dependencies[] = { + { + .srcSubpass = VK_SUBPASS_EXTERNAL, + .dstSubpass = 0, /* index for own subpass */ + .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + .srcAccessMask = 0, /* XXX verify */ + .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT + | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + }, + }; + + VkRenderPassCreateInfo render_pass_info = { + .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + .attachmentCount = 1, + .pAttachments = &color_attachment, + .subpassCount = 1, + .pSubpasses = &subpass, + .dependencyCount = LENGTH(dependencies), + .pDependencies = dependencies, + }; + + _VK(vkCreateRenderPass(qvk.device, &render_pass_info, NULL, &render_pass_stretch_pic)); + ATTACH_LABEL_VARIABLE(render_pass_stretch_pic, RENDER_PASS); +} + +VkResult +vkpt_draw_initialize() +{ + num_stretch_pics = 0; + LOG_FUNC(); + create_render_pass(); + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + _VK(buffer_create(buf_stretch_pic_queue + i, sizeof(StretchPic_t) * MAX_STRETCH_PICS, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)); + } + + VkDescriptorSetLayoutBinding layout_bindings[] = { + { + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .binding = 0, + .stageFlags = VK_SHADER_STAGE_VERTEX_BIT, + }, + }; + + VkDescriptorSetLayoutCreateInfo layout_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = LENGTH(layout_bindings), + .pBindings = layout_bindings, + }; + + _VK(vkCreateDescriptorSetLayout(qvk.device, &layout_info, NULL, &desc_set_layout_sbo)); + ATTACH_LABEL_VARIABLE(desc_set_layout_sbo, DESCRIPTOR_SET_LAYOUT); + + VkDescriptorPoolSize pool_size = { + .type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = qvk.num_swap_chain_images, + }; + + VkDescriptorPoolCreateInfo pool_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .poolSizeCount = 1, + .pPoolSizes = &pool_size, + .maxSets = qvk.num_swap_chain_images, + }; + + _VK(vkCreateDescriptorPool(qvk.device, &pool_info, NULL, &desc_pool_sbo)); + ATTACH_LABEL_VARIABLE(desc_pool_sbo, DESCRIPTOR_POOL); + + + VkDescriptorSetAllocateInfo descriptor_set_alloc_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = desc_pool_sbo, + .descriptorSetCount = 1, + .pSetLayouts = &desc_set_layout_sbo, + }; + + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + _VK(vkAllocateDescriptorSets(qvk.device, &descriptor_set_alloc_info, desc_set_sbo + i)); + BufferResource_t *sbo = buf_stretch_pic_queue + i; + + VkDescriptorBufferInfo buf_info = { + .buffer = sbo->buffer, + .offset = 0, + .range = sizeof(stretch_pic_queue), + }; + + VkWriteDescriptorSet output_buf_write = { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = desc_set_sbo[i], + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .pBufferInfo = &buf_info, + }; + + vkUpdateDescriptorSets(qvk.device, 1, &output_buf_write, 0, NULL); + } + return VK_SUCCESS; +} + +VkResult +vkpt_draw_destroy() +{ + LOG_FUNC(); + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + buffer_destroy(buf_stretch_pic_queue + i); + } + vkDestroyRenderPass(qvk.device, render_pass_stretch_pic, NULL); + vkDestroyDescriptorPool(qvk.device, desc_pool_sbo, NULL); + vkDestroyDescriptorSetLayout(qvk.device, desc_set_layout_sbo, NULL); + + return VK_SUCCESS; +} + +VkResult +vkpt_draw_destroy_pipelines() +{ + LOG_FUNC(); + vkDestroyPipeline(qvk.device, pipeline_stretch_pic, NULL); + vkDestroyPipelineLayout(qvk.device, pipeline_layout_stretch_pic, NULL); + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + vkDestroyFramebuffer(qvk.device, framebuffer_stretch_pic[i], NULL); + } + return VK_SUCCESS; +} + +VkResult +vkpt_draw_create_pipelines() +{ + LOG_FUNC(); + + assert(desc_set_layout_sbo); + VkDescriptorSetLayout desc_set_layouts[] = { + desc_set_layout_sbo, qvk.desc_set_layout_textures + }; + CREATE_PIPELINE_LAYOUT(qvk.device, &pipeline_layout_stretch_pic, + .setLayoutCount = LENGTH(desc_set_layouts), + .pSetLayouts = desc_set_layouts + ); + + VkPipelineShaderStageCreateInfo shader_info[] = { + SHADER_STAGE(QVK_MOD_STRETCH_PIC_VERT, VK_SHADER_STAGE_VERTEX_BIT), + SHADER_STAGE(QVK_MOD_STRETCH_PIC_FRAG, VK_SHADER_STAGE_FRAGMENT_BIT) + }; + + VkPipelineVertexInputStateCreateInfo vertex_input_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + .vertexBindingDescriptionCount = 0, + .pVertexBindingDescriptions = NULL, + .vertexAttributeDescriptionCount = 0, + .pVertexAttributeDescriptions = NULL, + }; + + VkPipelineInputAssemblyStateCreateInfo input_assembly_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, + .primitiveRestartEnable = VK_FALSE, + }; + + VkViewport viewport = { + .x = 0.0f, + .y = 0.0f, + .width = (float) qvk.extent.width, + .height = (float) qvk.extent.height, + .minDepth = 0.0f, + .maxDepth = 1.0f, + }; + + VkRect2D scissor = { + .offset = { 0, 0 }, + .extent = qvk.extent, + }; + + VkPipelineViewportStateCreateInfo viewport_state = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, + .viewportCount = 1, + .pViewports = &viewport, + .scissorCount = 1, + .pScissors = &scissor, + }; + + VkPipelineRasterizationStateCreateInfo rasterizer_state = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + .depthClampEnable = VK_FALSE, + .rasterizerDiscardEnable = VK_FALSE, /* skip rasterizer */ + .polygonMode = VK_POLYGON_MODE_FILL, + .lineWidth = 1.0f, + .cullMode = VK_CULL_MODE_BACK_BIT, + .frontFace = VK_FRONT_FACE_CLOCKWISE, + .depthBiasEnable = VK_FALSE, + .depthBiasConstantFactor = 0.0f, + .depthBiasClamp = 0.0f, + .depthBiasSlopeFactor = 0.0f, + }; + + VkPipelineMultisampleStateCreateInfo multisample_state = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + .sampleShadingEnable = VK_FALSE, + .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT, + .minSampleShading = 1.0f, + .pSampleMask = NULL, + .alphaToCoverageEnable = VK_FALSE, + .alphaToOneEnable = VK_FALSE, + }; + + VkPipelineColorBlendAttachmentState color_blend_attachment = { + .colorWriteMask = VK_COLOR_COMPONENT_R_BIT + | VK_COLOR_COMPONENT_G_BIT + | VK_COLOR_COMPONENT_B_BIT + | VK_COLOR_COMPONENT_A_BIT, + .blendEnable = VK_TRUE, + .srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA, + .dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, + .colorBlendOp = VK_BLEND_OP_ADD, + .srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE, + .dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO, + .alphaBlendOp = VK_BLEND_OP_ADD, + }; + + VkPipelineColorBlendStateCreateInfo color_blend_state = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + .logicOpEnable = VK_FALSE, + .logicOp = VK_LOGIC_OP_COPY, + .attachmentCount = 1, + .pAttachments = &color_blend_attachment, + .blendConstants = { 0.0f, 0.0f, 0.0f, 0.0f }, + }; + + VkGraphicsPipelineCreateInfo pipeline_info = { + .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, + .stageCount = LENGTH(shader_info), + .pStages = shader_info, + + .pVertexInputState = &vertex_input_info, + .pInputAssemblyState = &input_assembly_info, + .pViewportState = &viewport_state, + .pRasterizationState = &rasterizer_state, + .pMultisampleState = &multisample_state, + .pDepthStencilState = NULL, + .pColorBlendState = &color_blend_state, + .pDynamicState = NULL, + + .layout = pipeline_layout_stretch_pic, + .renderPass = render_pass_stretch_pic, + .subpass = 0, + + .basePipelineHandle = VK_NULL_HANDLE, + .basePipelineIndex = -1, + }; + + _VK(vkCreateGraphicsPipelines(qvk.device, VK_NULL_HANDLE, 1, &pipeline_info, NULL, &pipeline_stretch_pic)); + ATTACH_LABEL_VARIABLE(pipeline_stretch_pic, PIPELINE); + + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + VkImageView attachments[] = { + qvk.swap_chain_image_views[i] + }; + + VkFramebufferCreateInfo fb_create_info = { + .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, + .renderPass = render_pass_stretch_pic, + .attachmentCount = 1, + .pAttachments = attachments, + .width = qvk.extent.width, + .height = qvk.extent.height, + .layers = 1, + }; + + _VK(vkCreateFramebuffer(qvk.device, &fb_create_info, NULL, framebuffer_stretch_pic + i)); + ATTACH_LABEL_VARIABLE(framebuffer_stretch_pic[i], FRAMEBUFFER); + } + + return VK_SUCCESS; +} + +VkResult +vkpt_draw_clear_stretch_pics() +{ + num_stretch_pics = 0; + return VK_SUCCESS; +} + +VkResult +vkpt_draw_submit_stretch_pics(VkCommandBuffer *cmd_buf) +{ + BufferResource_t *buf_spq = buf_stretch_pic_queue + qvk.current_image_index; + StretchPic_t *spq_dev = (StretchPic_t *) buffer_map(buf_spq); + memcpy(spq_dev, stretch_pic_queue, sizeof(stretch_pic_queue)); + buffer_unmap(buf_spq); + spq_dev = NULL; + + VkClearValue clear_color = { .color = { .float32 = { 0.0f, 0.0f, 0.0f, 1.0f } } }; + + VkRenderPassBeginInfo render_pass_info = { + .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, + .renderPass = render_pass_stretch_pic, + .framebuffer = framebuffer_stretch_pic[qvk.current_image_index], + .renderArea.offset = { 0, 0 }, + .renderArea.extent = qvk.extent, + .clearValueCount = 1, + .pClearValues = &clear_color + }; + + VkDescriptorSet desc_sets[] = { + desc_set_sbo[qvk.current_image_index], + qvk.desc_set_textures + }; + + vkCmdBeginRenderPass(*cmd_buf, &render_pass_info, VK_SUBPASS_CONTENTS_INLINE); + vkCmdBindDescriptorSets(*cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS, + pipeline_layout_stretch_pic, 0, LENGTH(desc_sets), desc_sets, 0, 0); + vkCmdBindPipeline(*cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_stretch_pic); + vkCmdDraw(*cmd_buf, 4, num_stretch_pics, 0, 0); + vkCmdEndRenderPass(*cmd_buf); + + num_stretch_pics = 0; + return VK_SUCCESS; +} + +void R_SetClipRect(const clipRect_t *clip) { FUNC_UNIMPLEMENTED(); } + +void +R_ClearColor(void) +{ + draw.colors[0].u32 = U32_WHITE; + draw.colors[1].u32 = U32_WHITE; +} + +void +R_SetAlpha(float alpha) +{ + draw.colors[0].u8[3] = draw.colors[1].u8[3] = alpha * 255; +} + +void +R_SetColor(uint32_t color) +{ + draw.colors[0].u32 = color; + draw.colors[1].u8[3] = draw.colors[0].u8[3]; +} + +void +R_LightPoint(vec3_t origin, vec3_t light) +{ + VectorSet(light, 1, 1, 1); +} + +void +R_SetScale(float scale) +{ + draw.scale = scale; +} + +void +R_DrawStretchPic(int x, int y, int w, int h, qhandle_t pic) +{ + enqueue_stretch_pic( + x, y, w, h, + 0.0f, 0.0f, 1.0f, 1.0f, + draw.colors[0].u32, pic); +} + +void +R_DrawPic(int x, int y, qhandle_t pic) +{ + image_t *image = IMG_ForHandle(pic); + R_DrawStretchPic(x, y, image->width, image->height, pic); +} + +#define DIV64 (1.0f / 64.0f) + +void +R_TileClear(int x, int y, int w, int h, qhandle_t pic) +{ + enqueue_stretch_pic(x, y, w, h, + x * DIV64, y * DIV64, (x + w) * DIV64, (y + h) * DIV64, + U32_WHITE, pic); +} + +void +R_DrawFill8(int x, int y, int w, int h, int c) +{ + if(!w || !h) + return; + enqueue_stretch_pic(x, y, w, h, 0.0f, 0.0f, 1.0f, 1.0f, + d_8to24table[c & 0xff], TEXNUM_WHITE); +} + +void +R_DrawFill32(int x, int y, int w, int h, uint32_t color) +{ + if(!w || !h) + return; + enqueue_stretch_pic(x, y, w, h, 0.0f, 0.0f, 1.0f, 1.0f, + color, TEXNUM_WHITE); +} + +static inline void +draw_char(int x, int y, int flags, int c, qhandle_t font) +{ + if ((c & 127) == 32) { + return; + } + + if (flags & UI_ALTCOLOR) { + c |= 0x80; + } + if (flags & UI_XORCOLOR) { + c ^= 0x80; + } + + float s = (c & 15) * 0.0625f; + float t = (c >> 4) * 0.0625f; + + float eps = 1e-5f; /* fixes some ugly artifacts */ + + enqueue_stretch_pic(x, y, CHAR_WIDTH, CHAR_HEIGHT, + s + eps, t + eps, s + 0.0625f - eps, t + 0.0625f - eps, + draw.colors[c >> 7].u32, font); +} + +void +R_DrawChar(int x, int y, int flags, int c, qhandle_t font) +{ + draw_char(x, y, flags, c & 255, font); +} + +int +R_DrawString(int x, int y, int flags, size_t maxlen, const char *s, qhandle_t font) +{ + while(maxlen-- && *s) { + byte c = *s++; + draw_char(x, y, flags, c, font); + x += CHAR_WIDTH; + } + + return x; +} + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/include/stb_image.h b/src/refresh/vkpt/include/stb_image.h new file mode 100644 index 0000000..d9c21bc --- /dev/null +++ b/src/refresh/vkpt/include/stb_image.h @@ -0,0 +1,7462 @@ +/* stb_image - v2.19 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine + John-Mark Allen + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Phil Jordan + Dave Moore Roy Eltham Hayaki Saito Nathan Reed + Won Chun Luke Graham Johan Duparc Nick Verigakis + the Horde3D community Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Laurent Gomila Cort Stratton Sergio Gonzalez github:snagar + Aruelien Pocheville Thibault Reuille Cass Everitt github:Zelex + Ryamond Barbiero Paul Du Bois Engin Manap github:grim210 + Aldo Culquicondor Philipp Wiesemann Dale Weiler github:sammyhw + Oriol Ferrer Mesia Josh Tobin Matthew Gregan github:phprus + Julian Raschke Gregory Mullen Baldur Karlsson github:poppolopoppo + Christian Floisand Kevin Schmidt github:darealshinji + Blazej Dariusz Roszkowski github:Michaelangel007 +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy to use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// make more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image now supports loading HDR images in general, and currently +// the Radiance .HDR file format, although the support is provided +// generically. You can still load any file through the existing interface; +// if you attempt to load an HDR file, it will be automatically remapped to +// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// + + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// NOT THREADSAFE +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + fseek((FILE*) user, n, SEEK_CUR); +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +// this is not threadsafe +static const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load = flag_true_if_should_flip; +} + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 8) { + STBI_ASSERT(ri.bits_per_channel == 16); + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 16) { + STBI_ASSERT(ri.bits_per_channel == 8); + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) || !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} + +static void stbi__skip(stbi__context *s, int n) +{ + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} + +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} + +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} + +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + + +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0], dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0], dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + if (k < comp) output[i*comp + k] = data[i*comp+k]/255.0f; + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + STBI_ASSERT(n >= 0 && n < (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc << j->succ_low); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) << shift); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) << shift); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4]; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255; + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + if (z->zbuffer >= z->zbuffer_end) return 0; + return *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + STBI_ASSERT(z->code_buffer < (1U << z->num_bits)); + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s == 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + STBI_ASSERT(z->size[b] == s); + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) stbi__fill_bits(a); + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (int) (z->zout - z->zout_start); + limit = old_limit = (int) (z->zout_end - z->zout_start); + while (cur + n > limit) + limit *= 2; + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) + c = stbi__zreceive(a,3)+3; + else { + STBI_ASSERT(c == 18); + c = stbi__zreceive(a,7)+11; + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + STBI_ASSERT(a->num_bits == 0); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[288] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + STBI_ASSERT(img_width_bytes <= x); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth < 8) + ri->bits_per_channel = 8; + else + ri->bits_per_channel = p->depth; + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) n += 16, z >>= 16; + if (z >= 0x00100) n += 8, z >>= 8; + if (z >= 0x00010) n += 4, z >>= 4; + if (z >= 0x00004) n += 2, z >>= 2; + if (z >= 0x00002) n += 1, z >>= 1; + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v >= 0 && v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - 14 - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - 14 - info.hsz) >> 2; + } + + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - 14 - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i], p1[i] = p2[i], p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + g->out = (stbi_uc *) stbi__malloc(4 * g->w * g->h); + g->background = (stbi_uc *) stbi__malloc(4 * g->w * g->h); + g->history = (stbi_uc *) stbi__malloc(g->w * g->h); + if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "tranparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to teh color that was there the previous frame. + memset( g->out, 0x00, 4 * g->w * g->h ); + memset( g->background, 0x00, 4 * g->w * g->h ); // state of the background (starts transparent) + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispoase of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (o == NULL) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + out = (stbi_uc*) STBI_REALLOC( out, layers * stride ); + if (delays) { + *delays = (int*) STBI_REALLOC( *delays, sizeof(int) * layers ); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind( s ); + if (p == NULL) + return 0; + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) *comp = info.ma ? 4 : 3; + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + (void) stbi__get32be(s); + (void) stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/src/refresh/vkpt/include/stb_image_resize.h b/src/refresh/vkpt/include/stb_image_resize.h new file mode 100644 index 0000000..031ca99 --- /dev/null +++ b/src/refresh/vkpt/include/stb_image_resize.h @@ -0,0 +1,2627 @@ +/* stb_image_resize - v0.95 - public domain image resizing + by Jorge L Rodriguez (@VinoBS) - 2014 + http://github.com/nothings/stb + + Written with emphasis on usability, portability, and efficiency. (No + SIMD or threads, so it be easily outperformed by libs that use those.) + Only scaling and translation is supported, no rotations or shears. + Easy API downsamples w/Mitchell filter, upsamples w/cubic interpolation. + + COMPILING & LINKING + In one C/C++ file that #includes this file, do this: + #define STB_IMAGE_RESIZE_IMPLEMENTATION + before the #include. That will create the implementation in that file. + + QUICKSTART + stbir_resize_uint8( input_pixels , in_w , in_h , 0, + output_pixels, out_w, out_h, 0, num_channels) + stbir_resize_float(...) + stbir_resize_uint8_srgb( input_pixels , in_w , in_h , 0, + output_pixels, out_w, out_h, 0, + num_channels , alpha_chan , 0) + stbir_resize_uint8_srgb_edgemode( + input_pixels , in_w , in_h , 0, + output_pixels, out_w, out_h, 0, + num_channels , alpha_chan , 0, STBIR_EDGE_CLAMP) + // WRAP/REFLECT/ZERO + + FULL API + See the "header file" section of the source for API documentation. + + ADDITIONAL DOCUMENTATION + + SRGB & FLOATING POINT REPRESENTATION + The sRGB functions presume IEEE floating point. If you do not have + IEEE floating point, define STBIR_NON_IEEE_FLOAT. This will use + a slower implementation. + + MEMORY ALLOCATION + The resize functions here perform a single memory allocation using + malloc. To control the memory allocation, before the #include that + triggers the implementation, do: + + #define STBIR_MALLOC(size,context) ... + #define STBIR_FREE(ptr,context) ... + + Each resize function makes exactly one call to malloc/free, so to use + temp memory, store the temp memory in the context and return that. + + ASSERT + Define STBIR_ASSERT(boolval) to override assert() and not use assert.h + + OPTIMIZATION + Define STBIR_SATURATE_INT to compute clamp values in-range using + integer operations instead of float operations. This may be faster + on some platforms. + + DEFAULT FILTERS + For functions which don't provide explicit control over what filters + to use, you can change the compile-time defaults with + + #define STBIR_DEFAULT_FILTER_UPSAMPLE STBIR_FILTER_something + #define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_something + + See stbir_filter in the header-file section for the list of filters. + + NEW FILTERS + A number of 1D filter kernels are used. For a list of + supported filters see the stbir_filter enum. To add a new filter, + write a filter function and add it to stbir__filter_info_table. + + PROGRESS + For interactive use with slow resize operations, you can install + a progress-report callback: + + #define STBIR_PROGRESS_REPORT(val) some_func(val) + + The parameter val is a float which goes from 0 to 1 as progress is made. + + For example: + + static void my_progress_report(float progress); + #define STBIR_PROGRESS_REPORT(val) my_progress_report(val) + + #define STB_IMAGE_RESIZE_IMPLEMENTATION + #include "stb_image_resize.h" + + static void my_progress_report(float progress) + { + printf("Progress: %f%%\n", progress*100); + } + + MAX CHANNELS + If your image has more than 64 channels, define STBIR_MAX_CHANNELS + to the max you'll have. + + ALPHA CHANNEL + Most of the resizing functions provide the ability to control how + the alpha channel of an image is processed. The important things + to know about this: + + 1. The best mathematically-behaved version of alpha to use is + called "premultiplied alpha", in which the other color channels + have had the alpha value multiplied in. If you use premultiplied + alpha, linear filtering (such as image resampling done by this + library, or performed in texture units on GPUs) does the "right + thing". While premultiplied alpha is standard in the movie CGI + industry, it is still uncommon in the videogame/real-time world. + + If you linearly filter non-premultiplied alpha, strange effects + occur. (For example, the 50/50 average of 99% transparent bright green + and 1% transparent black produces 50% transparent dark green when + non-premultiplied, whereas premultiplied it produces 50% + transparent near-black. The former introduces green energy + that doesn't exist in the source image.) + + 2. Artists should not edit premultiplied-alpha images; artists + want non-premultiplied alpha images. Thus, art tools generally output + non-premultiplied alpha images. + + 3. You will get best results in most cases by converting images + to premultiplied alpha before processing them mathematically. + + 4. If you pass the flag STBIR_FLAG_ALPHA_PREMULTIPLIED, the + resizer does not do anything special for the alpha channel; + it is resampled identically to other channels. This produces + the correct results for premultiplied-alpha images, but produces + less-than-ideal results for non-premultiplied-alpha images. + + 5. If you do not pass the flag STBIR_FLAG_ALPHA_PREMULTIPLIED, + then the resizer weights the contribution of input pixels + based on their alpha values, or, equivalently, it multiplies + the alpha value into the color channels, resamples, then divides + by the resultant alpha value. Input pixels which have alpha=0 do + not contribute at all to output pixels unless _all_ of the input + pixels affecting that output pixel have alpha=0, in which case + the result for that pixel is the same as it would be without + STBIR_FLAG_ALPHA_PREMULTIPLIED. However, this is only true for + input images in integer formats. For input images in float format, + input pixels with alpha=0 have no effect, and output pixels + which have alpha=0 will be 0 in all channels. (For float images, + you can manually achieve the same result by adding a tiny epsilon + value to the alpha channel of every image, and then subtracting + or clamping it at the end.) + + 6. You can suppress the behavior described in #5 and make + all-0-alpha pixels have 0 in all channels by #defining + STBIR_NO_ALPHA_EPSILON. + + 7. You can separately control whether the alpha channel is + interpreted as linear or affected by the colorspace. By default + it is linear; you almost never want to apply the colorspace. + (For example, graphics hardware does not apply sRGB conversion + to the alpha channel.) + + CONTRIBUTORS + Jorge L Rodriguez: Implementation + Sean Barrett: API design, optimizations + Aras Pranckevicius: bugfix + Nathan Reed: warning fixes + + REVISIONS + 0.95 (2017-07-23) fixed warnings + 0.94 (2017-03-18) fixed warnings + 0.93 (2017-03-03) fixed bug with certain combinations of heights + 0.92 (2017-01-02) fix integer overflow on large (>2GB) images + 0.91 (2016-04-02) fix warnings; fix handling of subpixel regions + 0.90 (2014-09-17) first released version + + LICENSE + See end of file for license information. + + TODO + Don't decode all of the image data when only processing a partial tile + Don't use full-width decode buffers when only processing a partial tile + When processing wide images, break processing into tiles so data fits in L1 cache + Installable filters? + Resize that respects alpha test coverage + (Reference code: FloatImage::alphaTestCoverage and FloatImage::scaleAlphaToCoverage: + https://code.google.com/p/nvidia-texture-tools/source/browse/trunk/src/nvimage/FloatImage.cpp ) +*/ + +#ifndef STBIR_INCLUDE_STB_IMAGE_RESIZE_H +#define STBIR_INCLUDE_STB_IMAGE_RESIZE_H + +#ifdef _MSC_VER +typedef unsigned char stbir_uint8; +typedef unsigned short stbir_uint16; +typedef unsigned int stbir_uint32; +#else +#include +typedef uint8_t stbir_uint8; +typedef uint16_t stbir_uint16; +typedef uint32_t stbir_uint32; +#endif + +#ifdef STB_IMAGE_RESIZE_STATIC +#define STBIRDEF static +#else +#ifdef __cplusplus +#define STBIRDEF extern "C" +#else +#define STBIRDEF extern +#endif +#endif + + +////////////////////////////////////////////////////////////////////////////// +// +// Easy-to-use API: +// +// * "input pixels" points to an array of image data with 'num_channels' channels (e.g. RGB=3, RGBA=4) +// * input_w is input image width (x-axis), input_h is input image height (y-axis) +// * stride is the offset between successive rows of image data in memory, in bytes. you can +// specify 0 to mean packed continuously in memory +// * alpha channel is treated identically to other channels. +// * colorspace is linear or sRGB as specified by function name +// * returned result is 1 for success or 0 in case of an error. +// #define STBIR_ASSERT() to trigger an assert on parameter validation errors. +// * Memory required grows approximately linearly with input and output size, but with +// discontinuities at input_w == output_w and input_h == output_h. +// * These functions use a "default" resampling filter defined at compile time. To change the filter, +// you can change the compile-time defaults by #defining STBIR_DEFAULT_FILTER_UPSAMPLE +// and STBIR_DEFAULT_FILTER_DOWNSAMPLE, or you can use the medium-complexity API. + +STBIRDEF int stbir_resize_uint8( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels); + +STBIRDEF int stbir_resize_float( const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels); + + +// The following functions interpret image data as gamma-corrected sRGB. +// Specify STBIR_ALPHA_CHANNEL_NONE if you have no alpha channel, +// or otherwise provide the index of the alpha channel. Flags value +// of 0 will probably do the right thing if you're not sure what +// the flags mean. + +#define STBIR_ALPHA_CHANNEL_NONE -1 + +// Set this flag if your texture has premultiplied alpha. Otherwise, stbir will +// use alpha-weighted resampling (effectively premultiplying, resampling, +// then unpremultiplying). +#define STBIR_FLAG_ALPHA_PREMULTIPLIED (1 << 0) +// The specified alpha channel should be handled as gamma-corrected value even +// when doing sRGB operations. +#define STBIR_FLAG_ALPHA_USES_COLORSPACE (1 << 1) + +STBIRDEF int stbir_resize_uint8_srgb(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags); + + +typedef enum +{ + STBIR_EDGE_CLAMP = 1, + STBIR_EDGE_REFLECT = 2, + STBIR_EDGE_WRAP = 3, + STBIR_EDGE_ZERO = 4, +} stbir_edge; + +// This function adds the ability to specify how requests to sample off the edge of the image are handled. +STBIRDEF int stbir_resize_uint8_srgb_edgemode(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_wrap_mode); + +////////////////////////////////////////////////////////////////////////////// +// +// Medium-complexity API +// +// This extends the easy-to-use API as follows: +// +// * Alpha-channel can be processed separately +// * If alpha_channel is not STBIR_ALPHA_CHANNEL_NONE +// * Alpha channel will not be gamma corrected (unless flags&STBIR_FLAG_GAMMA_CORRECT) +// * Filters will be weighted by alpha channel (unless flags&STBIR_FLAG_ALPHA_PREMULTIPLIED) +// * Filter can be selected explicitly +// * uint16 image type +// * sRGB colorspace available for all types +// * context parameter for passing to STBIR_MALLOC + +typedef enum +{ + STBIR_FILTER_DEFAULT = 0, // use same filter type that easy-to-use API chooses + STBIR_FILTER_BOX = 1, // A trapezoid w/1-pixel wide ramps, same result as box for integer scale ratios + STBIR_FILTER_TRIANGLE = 2, // On upsampling, produces same results as bilinear texture filtering + STBIR_FILTER_CUBICBSPLINE = 3, // The cubic b-spline (aka Mitchell-Netrevalli with B=1,C=0), gaussian-esque + STBIR_FILTER_CATMULLROM = 4, // An interpolating cubic spline + STBIR_FILTER_MITCHELL = 5, // Mitchell-Netrevalli filter with B=1/3, C=1/3 +} stbir_filter; + +typedef enum +{ + STBIR_COLORSPACE_LINEAR, + STBIR_COLORSPACE_SRGB, + + STBIR_MAX_COLORSPACES, +} stbir_colorspace; + +// The following functions are all identical except for the type of the image data + +STBIRDEF int stbir_resize_uint8_generic( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, + void *alloc_context); + +STBIRDEF int stbir_resize_uint16_generic(const stbir_uint16 *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + stbir_uint16 *output_pixels , int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, + void *alloc_context); + +STBIRDEF int stbir_resize_float_generic( const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + float *output_pixels , int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, + void *alloc_context); + + + +////////////////////////////////////////////////////////////////////////////// +// +// Full-complexity API +// +// This extends the medium API as follows: +// +// * uint32 image type +// * not typesafe +// * separate filter types for each axis +// * separate edge modes for each axis +// * can specify scale explicitly for subpixel correctness +// * can specify image source tile using texture coordinates + +typedef enum +{ + STBIR_TYPE_UINT8 , + STBIR_TYPE_UINT16, + STBIR_TYPE_UINT32, + STBIR_TYPE_FLOAT , + + STBIR_MAX_TYPES +} stbir_datatype; + +STBIRDEF int stbir_resize( const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_datatype datatype, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, + stbir_filter filter_horizontal, stbir_filter filter_vertical, + stbir_colorspace space, void *alloc_context); + +STBIRDEF int stbir_resize_subpixel(const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_datatype datatype, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, + stbir_filter filter_horizontal, stbir_filter filter_vertical, + stbir_colorspace space, void *alloc_context, + float x_scale, float y_scale, + float x_offset, float y_offset); + +STBIRDEF int stbir_resize_region( const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_datatype datatype, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, + stbir_filter filter_horizontal, stbir_filter filter_vertical, + stbir_colorspace space, void *alloc_context, + float s0, float t0, float s1, float t1); +// (s0, t0) & (s1, t1) are the top-left and bottom right corner (uv addressing style: [0, 1]x[0, 1]) of a region of the input image to use. + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBIR_INCLUDE_STB_IMAGE_RESIZE_H + + + + + +#ifdef STB_IMAGE_RESIZE_IMPLEMENTATION + +#ifndef STBIR_ASSERT +#include +#define STBIR_ASSERT(x) assert(x) +#endif + +// For memset +#include + +#include + +#ifndef STBIR_MALLOC +#include +// use comma operator to evaluate c, to avoid "unused parameter" warnings +#define STBIR_MALLOC(size,c) ((void)(c), malloc(size)) +#define STBIR_FREE(ptr,c) ((void)(c), free(ptr)) +#endif + +#ifndef _MSC_VER +#ifdef __cplusplus +#define stbir__inline inline +#else +#define stbir__inline +#endif +#else +#define stbir__inline __forceinline +#endif + + +// should produce compiler error if size is wrong +typedef unsigned char stbir__validate_uint32[sizeof(stbir_uint32) == 4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBIR__NOTUSED(v) (void)(v) +#else +#define STBIR__NOTUSED(v) (void)sizeof(v) +#endif + +#define STBIR__ARRAY_SIZE(a) (sizeof((a))/sizeof((a)[0])) + +#ifndef STBIR_DEFAULT_FILTER_UPSAMPLE +#define STBIR_DEFAULT_FILTER_UPSAMPLE STBIR_FILTER_CATMULLROM +#endif + +#ifndef STBIR_DEFAULT_FILTER_DOWNSAMPLE +#define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_MITCHELL +#endif + +#ifndef STBIR_PROGRESS_REPORT +#define STBIR_PROGRESS_REPORT(float_0_to_1) +#endif + +#ifndef STBIR_MAX_CHANNELS +#define STBIR_MAX_CHANNELS 64 +#endif + +#if STBIR_MAX_CHANNELS > 65536 +#error "Too many channels; STBIR_MAX_CHANNELS must be no more than 65536." +// because we store the indices in 16-bit variables +#endif + +// This value is added to alpha just before premultiplication to avoid +// zeroing out color values. It is equivalent to 2^-80. If you don't want +// that behavior (it may interfere if you have floating point images with +// very small alpha values) then you can define STBIR_NO_ALPHA_EPSILON to +// disable it. +#ifndef STBIR_ALPHA_EPSILON +#define STBIR_ALPHA_EPSILON ((float)1 / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20)) +#endif + + + +#ifdef _MSC_VER +#define STBIR__UNUSED_PARAM(v) (void)(v) +#else +#define STBIR__UNUSED_PARAM(v) (void)sizeof(v) +#endif + +// must match stbir_datatype +static unsigned char stbir__type_size[] = { + 1, // STBIR_TYPE_UINT8 + 2, // STBIR_TYPE_UINT16 + 4, // STBIR_TYPE_UINT32 + 4, // STBIR_TYPE_FLOAT +}; + +// Kernel function centered at 0 +typedef float (stbir__kernel_fn)(float x, float scale); +typedef float (stbir__support_fn)(float scale); + +typedef struct +{ + stbir__kernel_fn* kernel; + stbir__support_fn* support; +} stbir__filter_info; + +// When upsampling, the contributors are which source pixels contribute. +// When downsampling, the contributors are which destination pixels are contributed to. +typedef struct +{ + int n0; // First contributing pixel + int n1; // Last contributing pixel +} stbir__contributors; + +typedef struct +{ + const void* input_data; + int input_w; + int input_h; + int input_stride_bytes; + + void* output_data; + int output_w; + int output_h; + int output_stride_bytes; + + float s0, t0, s1, t1; + + float horizontal_shift; // Units: output pixels + float vertical_shift; // Units: output pixels + float horizontal_scale; + float vertical_scale; + + int channels; + int alpha_channel; + stbir_uint32 flags; + stbir_datatype type; + stbir_filter horizontal_filter; + stbir_filter vertical_filter; + stbir_edge edge_horizontal; + stbir_edge edge_vertical; + stbir_colorspace colorspace; + + stbir__contributors* horizontal_contributors; + float* horizontal_coefficients; + + stbir__contributors* vertical_contributors; + float* vertical_coefficients; + + int decode_buffer_pixels; + float* decode_buffer; + + float* horizontal_buffer; + + // cache these because ceil/floor are inexplicably showing up in profile + int horizontal_coefficient_width; + int vertical_coefficient_width; + int horizontal_filter_pixel_width; + int vertical_filter_pixel_width; + int horizontal_filter_pixel_margin; + int vertical_filter_pixel_margin; + int horizontal_num_contributors; + int vertical_num_contributors; + + int ring_buffer_length_bytes; // The length of an individual entry in the ring buffer. The total number of ring buffers is stbir__get_filter_pixel_width(filter) + int ring_buffer_num_entries; // Total number of entries in the ring buffer. + int ring_buffer_first_scanline; + int ring_buffer_last_scanline; + int ring_buffer_begin_index; // first_scanline is at this index in the ring buffer + float* ring_buffer; + + float* encode_buffer; // A temporary buffer to store floats so we don't lose precision while we do multiply-adds. + + int horizontal_contributors_size; + int horizontal_coefficients_size; + int vertical_contributors_size; + int vertical_coefficients_size; + int decode_buffer_size; + int horizontal_buffer_size; + int ring_buffer_size; + int encode_buffer_size; +} stbir__info; + + +static const float stbir__max_uint8_as_float = 255.0f; +static const float stbir__max_uint16_as_float = 65535.0f; +static const double stbir__max_uint32_as_float = 4294967295.0; + + +static stbir__inline int stbir__min(int a, int b) +{ + return a < b ? a : b; +} + +static stbir__inline float stbir__saturate(float x) +{ + if (x < 0) + return 0; + + if (x > 1) + return 1; + + return x; +} + +#ifdef STBIR_SATURATE_INT +static stbir__inline stbir_uint8 stbir__saturate8(int x) +{ + if ((unsigned int) x <= 255) + return x; + + if (x < 0) + return 0; + + return 255; +} + +static stbir__inline stbir_uint16 stbir__saturate16(int x) +{ + if ((unsigned int) x <= 65535) + return x; + + if (x < 0) + return 0; + + return 65535; +} +#endif + +static float stbir__srgb_uchar_to_linear_float[256] = { + 0.000000f, 0.000304f, 0.000607f, 0.000911f, 0.001214f, 0.001518f, 0.001821f, 0.002125f, 0.002428f, 0.002732f, 0.003035f, + 0.003347f, 0.003677f, 0.004025f, 0.004391f, 0.004777f, 0.005182f, 0.005605f, 0.006049f, 0.006512f, 0.006995f, 0.007499f, + 0.008023f, 0.008568f, 0.009134f, 0.009721f, 0.010330f, 0.010960f, 0.011612f, 0.012286f, 0.012983f, 0.013702f, 0.014444f, + 0.015209f, 0.015996f, 0.016807f, 0.017642f, 0.018500f, 0.019382f, 0.020289f, 0.021219f, 0.022174f, 0.023153f, 0.024158f, + 0.025187f, 0.026241f, 0.027321f, 0.028426f, 0.029557f, 0.030713f, 0.031896f, 0.033105f, 0.034340f, 0.035601f, 0.036889f, + 0.038204f, 0.039546f, 0.040915f, 0.042311f, 0.043735f, 0.045186f, 0.046665f, 0.048172f, 0.049707f, 0.051269f, 0.052861f, + 0.054480f, 0.056128f, 0.057805f, 0.059511f, 0.061246f, 0.063010f, 0.064803f, 0.066626f, 0.068478f, 0.070360f, 0.072272f, + 0.074214f, 0.076185f, 0.078187f, 0.080220f, 0.082283f, 0.084376f, 0.086500f, 0.088656f, 0.090842f, 0.093059f, 0.095307f, + 0.097587f, 0.099899f, 0.102242f, 0.104616f, 0.107023f, 0.109462f, 0.111932f, 0.114435f, 0.116971f, 0.119538f, 0.122139f, + 0.124772f, 0.127438f, 0.130136f, 0.132868f, 0.135633f, 0.138432f, 0.141263f, 0.144128f, 0.147027f, 0.149960f, 0.152926f, + 0.155926f, 0.158961f, 0.162029f, 0.165132f, 0.168269f, 0.171441f, 0.174647f, 0.177888f, 0.181164f, 0.184475f, 0.187821f, + 0.191202f, 0.194618f, 0.198069f, 0.201556f, 0.205079f, 0.208637f, 0.212231f, 0.215861f, 0.219526f, 0.223228f, 0.226966f, + 0.230740f, 0.234551f, 0.238398f, 0.242281f, 0.246201f, 0.250158f, 0.254152f, 0.258183f, 0.262251f, 0.266356f, 0.270498f, + 0.274677f, 0.278894f, 0.283149f, 0.287441f, 0.291771f, 0.296138f, 0.300544f, 0.304987f, 0.309469f, 0.313989f, 0.318547f, + 0.323143f, 0.327778f, 0.332452f, 0.337164f, 0.341914f, 0.346704f, 0.351533f, 0.356400f, 0.361307f, 0.366253f, 0.371238f, + 0.376262f, 0.381326f, 0.386430f, 0.391573f, 0.396755f, 0.401978f, 0.407240f, 0.412543f, 0.417885f, 0.423268f, 0.428691f, + 0.434154f, 0.439657f, 0.445201f, 0.450786f, 0.456411f, 0.462077f, 0.467784f, 0.473532f, 0.479320f, 0.485150f, 0.491021f, + 0.496933f, 0.502887f, 0.508881f, 0.514918f, 0.520996f, 0.527115f, 0.533276f, 0.539480f, 0.545725f, 0.552011f, 0.558340f, + 0.564712f, 0.571125f, 0.577581f, 0.584078f, 0.590619f, 0.597202f, 0.603827f, 0.610496f, 0.617207f, 0.623960f, 0.630757f, + 0.637597f, 0.644480f, 0.651406f, 0.658375f, 0.665387f, 0.672443f, 0.679543f, 0.686685f, 0.693872f, 0.701102f, 0.708376f, + 0.715694f, 0.723055f, 0.730461f, 0.737911f, 0.745404f, 0.752942f, 0.760525f, 0.768151f, 0.775822f, 0.783538f, 0.791298f, + 0.799103f, 0.806952f, 0.814847f, 0.822786f, 0.830770f, 0.838799f, 0.846873f, 0.854993f, 0.863157f, 0.871367f, 0.879622f, + 0.887923f, 0.896269f, 0.904661f, 0.913099f, 0.921582f, 0.930111f, 0.938686f, 0.947307f, 0.955974f, 0.964686f, 0.973445f, + 0.982251f, 0.991102f, 1.0f +}; + +static float stbir__srgb_to_linear(float f) +{ + if (f <= 0.04045f) + return f / 12.92f; + else + return (float)pow((f + 0.055f) / 1.055f, 2.4f); +} + +static float stbir__linear_to_srgb(float f) +{ + if (f <= 0.0031308f) + return f * 12.92f; + else + return 1.055f * (float)pow(f, 1 / 2.4f) - 0.055f; +} + +#ifndef STBIR_NON_IEEE_FLOAT +// From https://gist.github.com/rygorous/2203834 + +typedef union +{ + stbir_uint32 u; + float f; +} stbir__FP32; + +static const stbir_uint32 fp32_to_srgb8_tab4[104] = { + 0x0073000d, 0x007a000d, 0x0080000d, 0x0087000d, 0x008d000d, 0x0094000d, 0x009a000d, 0x00a1000d, + 0x00a7001a, 0x00b4001a, 0x00c1001a, 0x00ce001a, 0x00da001a, 0x00e7001a, 0x00f4001a, 0x0101001a, + 0x010e0033, 0x01280033, 0x01410033, 0x015b0033, 0x01750033, 0x018f0033, 0x01a80033, 0x01c20033, + 0x01dc0067, 0x020f0067, 0x02430067, 0x02760067, 0x02aa0067, 0x02dd0067, 0x03110067, 0x03440067, + 0x037800ce, 0x03df00ce, 0x044600ce, 0x04ad00ce, 0x051400ce, 0x057b00c5, 0x05dd00bc, 0x063b00b5, + 0x06970158, 0x07420142, 0x07e30130, 0x087b0120, 0x090b0112, 0x09940106, 0x0a1700fc, 0x0a9500f2, + 0x0b0f01cb, 0x0bf401ae, 0x0ccb0195, 0x0d950180, 0x0e56016e, 0x0f0d015e, 0x0fbc0150, 0x10630143, + 0x11070264, 0x1238023e, 0x1357021d, 0x14660201, 0x156601e9, 0x165a01d3, 0x174401c0, 0x182401af, + 0x18fe0331, 0x1a9602fe, 0x1c1502d2, 0x1d7e02ad, 0x1ed4028d, 0x201a0270, 0x21520256, 0x227d0240, + 0x239f0443, 0x25c003fe, 0x27bf03c4, 0x29a10392, 0x2b6a0367, 0x2d1d0341, 0x2ebe031f, 0x304d0300, + 0x31d105b0, 0x34a80555, 0x37520507, 0x39d504c5, 0x3c37048b, 0x3e7c0458, 0x40a8042a, 0x42bd0401, + 0x44c20798, 0x488e071e, 0x4c1c06b6, 0x4f76065d, 0x52a50610, 0x55ac05cc, 0x5892058f, 0x5b590559, + 0x5e0c0a23, 0x631c0980, 0x67db08f6, 0x6c55087f, 0x70940818, 0x74a007bd, 0x787d076c, 0x7c330723, +}; + +static stbir_uint8 stbir__linear_to_srgb_uchar(float in) +{ + static const stbir__FP32 almostone = { 0x3f7fffff }; // 1-eps + static const stbir__FP32 minval = { (127-13) << 23 }; + stbir_uint32 tab,bias,scale,t; + stbir__FP32 f; + + // Clamp to [2^(-13), 1-eps]; these two values map to 0 and 1, respectively. + // The tests are carefully written so that NaNs map to 0, same as in the reference + // implementation. + if (!(in > minval.f)) // written this way to catch NaNs + in = minval.f; + if (in > almostone.f) + in = almostone.f; + + // Do the table lookup and unpack bias, scale + f.f = in; + tab = fp32_to_srgb8_tab4[(f.u - minval.u) >> 20]; + bias = (tab >> 16) << 9; + scale = tab & 0xffff; + + // Grab next-highest mantissa bits and perform linear interpolation + t = (f.u >> 12) & 0xff; + return (unsigned char) ((bias + scale*t) >> 16); +} + +#else +// sRGB transition values, scaled by 1<<28 +static int stbir__srgb_offset_to_linear_scaled[256] = +{ + 0, 40738, 122216, 203693, 285170, 366648, 448125, 529603, + 611080, 692557, 774035, 855852, 942009, 1033024, 1128971, 1229926, + 1335959, 1447142, 1563542, 1685229, 1812268, 1944725, 2082664, 2226148, + 2375238, 2529996, 2690481, 2856753, 3028870, 3206888, 3390865, 3580856, + 3776916, 3979100, 4187460, 4402049, 4622919, 4850123, 5083710, 5323731, + 5570236, 5823273, 6082892, 6349140, 6622065, 6901714, 7188133, 7481369, + 7781466, 8088471, 8402427, 8723380, 9051372, 9386448, 9728650, 10078021, + 10434603, 10798439, 11169569, 11548036, 11933879, 12327139, 12727857, 13136073, + 13551826, 13975156, 14406100, 14844697, 15290987, 15745007, 16206795, 16676389, + 17153826, 17639142, 18132374, 18633560, 19142734, 19659934, 20185196, 20718552, + 21260042, 21809696, 22367554, 22933648, 23508010, 24090680, 24681686, 25281066, + 25888850, 26505076, 27129772, 27762974, 28404716, 29055026, 29713942, 30381490, + 31057708, 31742624, 32436272, 33138682, 33849884, 34569912, 35298800, 36036568, + 36783260, 37538896, 38303512, 39077136, 39859796, 40651528, 41452360, 42262316, + 43081432, 43909732, 44747252, 45594016, 46450052, 47315392, 48190064, 49074096, + 49967516, 50870356, 51782636, 52704392, 53635648, 54576432, 55526772, 56486700, + 57456236, 58435408, 59424248, 60422780, 61431036, 62449032, 63476804, 64514376, + 65561776, 66619028, 67686160, 68763192, 69850160, 70947088, 72053992, 73170912, + 74297864, 75434880, 76581976, 77739184, 78906536, 80084040, 81271736, 82469648, + 83677792, 84896192, 86124888, 87363888, 88613232, 89872928, 91143016, 92423512, + 93714432, 95015816, 96327688, 97650056, 98982952, 100326408, 101680440, 103045072, + 104420320, 105806224, 107202800, 108610064, 110028048, 111456776, 112896264, 114346544, + 115807632, 117279552, 118762328, 120255976, 121760536, 123276016, 124802440, 126339832, + 127888216, 129447616, 131018048, 132599544, 134192112, 135795792, 137410592, 139036528, + 140673648, 142321952, 143981456, 145652208, 147334208, 149027488, 150732064, 152447968, + 154175200, 155913792, 157663776, 159425168, 161197984, 162982240, 164777968, 166585184, + 168403904, 170234160, 172075968, 173929344, 175794320, 177670896, 179559120, 181458992, + 183370528, 185293776, 187228736, 189175424, 191133888, 193104112, 195086128, 197079968, + 199085648, 201103184, 203132592, 205173888, 207227120, 209292272, 211369392, 213458480, + 215559568, 217672656, 219797792, 221934976, 224084240, 226245600, 228419056, 230604656, + 232802400, 235012320, 237234432, 239468736, 241715280, 243974080, 246245120, 248528464, + 250824112, 253132064, 255452368, 257785040, 260130080, 262487520, 264857376, 267239664, +}; + +static stbir_uint8 stbir__linear_to_srgb_uchar(float f) +{ + int x = (int) (f * (1 << 28)); // has headroom so you don't need to clamp + int v = 0; + int i; + + // Refine the guess with a short binary search. + i = v + 128; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; + i = v + 64; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; + i = v + 32; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; + i = v + 16; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; + i = v + 8; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; + i = v + 4; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; + i = v + 2; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; + i = v + 1; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; + + return (stbir_uint8) v; +} +#endif + +static float stbir__filter_trapezoid(float x, float scale) +{ + float halfscale = scale / 2; + float t = 0.5f + halfscale; + STBIR_ASSERT(scale <= 1); + + x = (float)fabs(x); + + if (x >= t) + return 0; + else + { + float r = 0.5f - halfscale; + if (x <= r) + return 1; + else + return (t - x) / scale; + } +} + +static float stbir__support_trapezoid(float scale) +{ + STBIR_ASSERT(scale <= 1); + return 0.5f + scale / 2; +} + +static float stbir__filter_triangle(float x, float s) +{ + STBIR__UNUSED_PARAM(s); + + x = (float)fabs(x); + + if (x <= 1.0f) + return 1 - x; + else + return 0; +} + +static float stbir__filter_cubic(float x, float s) +{ + STBIR__UNUSED_PARAM(s); + + x = (float)fabs(x); + + if (x < 1.0f) + return (4 + x*x*(3*x - 6))/6; + else if (x < 2.0f) + return (8 + x*(-12 + x*(6 - x)))/6; + + return (0.0f); +} + +static float stbir__filter_catmullrom(float x, float s) +{ + STBIR__UNUSED_PARAM(s); + + x = (float)fabs(x); + + if (x < 1.0f) + return 1 - x*x*(2.5f - 1.5f*x); + else if (x < 2.0f) + return 2 - x*(4 + x*(0.5f*x - 2.5f)); + + return (0.0f); +} + +static float stbir__filter_mitchell(float x, float s) +{ + STBIR__UNUSED_PARAM(s); + + x = (float)fabs(x); + + if (x < 1.0f) + return (16 + x*x*(21 * x - 36))/18; + else if (x < 2.0f) + return (32 + x*(-60 + x*(36 - 7*x)))/18; + + return (0.0f); +} + +static float stbir__support_zero(float s) +{ + STBIR__UNUSED_PARAM(s); + return 0; +} + +static float stbir__support_one(float s) +{ + STBIR__UNUSED_PARAM(s); + return 1; +} + +static float stbir__support_two(float s) +{ + STBIR__UNUSED_PARAM(s); + return 2; +} + +static stbir__filter_info stbir__filter_info_table[] = { + { NULL, stbir__support_zero }, + { stbir__filter_trapezoid, stbir__support_trapezoid }, + { stbir__filter_triangle, stbir__support_one }, + { stbir__filter_cubic, stbir__support_two }, + { stbir__filter_catmullrom, stbir__support_two }, + { stbir__filter_mitchell, stbir__support_two }, +}; + +stbir__inline static int stbir__use_upsampling(float ratio) +{ + return ratio > 1; +} + +stbir__inline static int stbir__use_width_upsampling(stbir__info* stbir_info) +{ + return stbir__use_upsampling(stbir_info->horizontal_scale); +} + +stbir__inline static int stbir__use_height_upsampling(stbir__info* stbir_info) +{ + return stbir__use_upsampling(stbir_info->vertical_scale); +} + +// This is the maximum number of input samples that can affect an output sample +// with the given filter +static int stbir__get_filter_pixel_width(stbir_filter filter, float scale) +{ + STBIR_ASSERT(filter != 0); + STBIR_ASSERT(filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); + + if (stbir__use_upsampling(scale)) + return (int)ceil(stbir__filter_info_table[filter].support(1/scale) * 2); + else + return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2 / scale); +} + +// This is how much to expand buffers to account for filters seeking outside +// the image boundaries. +static int stbir__get_filter_pixel_margin(stbir_filter filter, float scale) +{ + return stbir__get_filter_pixel_width(filter, scale) / 2; +} + +static int stbir__get_coefficient_width(stbir_filter filter, float scale) +{ + if (stbir__use_upsampling(scale)) + return (int)ceil(stbir__filter_info_table[filter].support(1 / scale) * 2); + else + return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2); +} + +static int stbir__get_contributors(float scale, stbir_filter filter, int input_size, int output_size) +{ + if (stbir__use_upsampling(scale)) + return output_size; + else + return (input_size + stbir__get_filter_pixel_margin(filter, scale) * 2); +} + +static int stbir__get_total_horizontal_coefficients(stbir__info* info) +{ + return info->horizontal_num_contributors + * stbir__get_coefficient_width (info->horizontal_filter, info->horizontal_scale); +} + +static int stbir__get_total_vertical_coefficients(stbir__info* info) +{ + return info->vertical_num_contributors + * stbir__get_coefficient_width (info->vertical_filter, info->vertical_scale); +} + +static stbir__contributors* stbir__get_contributor(stbir__contributors* contributors, int n) +{ + return &contributors[n]; +} + +// For perf reasons this code is duplicated in stbir__resample_horizontal_upsample/downsample, +// if you change it here change it there too. +static float* stbir__get_coefficient(float* coefficients, stbir_filter filter, float scale, int n, int c) +{ + int width = stbir__get_coefficient_width(filter, scale); + return &coefficients[width*n + c]; +} + +static int stbir__edge_wrap_slow(stbir_edge edge, int n, int max) +{ + switch (edge) + { + case STBIR_EDGE_ZERO: + return 0; // we'll decode the wrong pixel here, and then overwrite with 0s later + + case STBIR_EDGE_CLAMP: + if (n < 0) + return 0; + + if (n >= max) + return max - 1; + + return n; // NOTREACHED + + case STBIR_EDGE_REFLECT: + { + if (n < 0) + { + if (n < max) + return -n; + else + return max - 1; + } + + if (n >= max) + { + int max2 = max * 2; + if (n >= max2) + return 0; + else + return max2 - n - 1; + } + + return n; // NOTREACHED + } + + case STBIR_EDGE_WRAP: + if (n >= 0) + return (n % max); + else + { + int m = (-n) % max; + + if (m != 0) + m = max - m; + + return (m); + } + // NOTREACHED + + default: + STBIR_ASSERT(!"Unimplemented edge type"); + return 0; + } +} + +stbir__inline static int stbir__edge_wrap(stbir_edge edge, int n, int max) +{ + // avoid per-pixel switch + if (n >= 0 && n < max) + return n; + return stbir__edge_wrap_slow(edge, n, max); +} + +// What input pixels contribute to this output pixel? +static void stbir__calculate_sample_range_upsample(int n, float out_filter_radius, float scale_ratio, float out_shift, int* in_first_pixel, int* in_last_pixel, float* in_center_of_out) +{ + float out_pixel_center = (float)n + 0.5f; + float out_pixel_influence_lowerbound = out_pixel_center - out_filter_radius; + float out_pixel_influence_upperbound = out_pixel_center + out_filter_radius; + + float in_pixel_influence_lowerbound = (out_pixel_influence_lowerbound + out_shift) / scale_ratio; + float in_pixel_influence_upperbound = (out_pixel_influence_upperbound + out_shift) / scale_ratio; + + *in_center_of_out = (out_pixel_center + out_shift) / scale_ratio; + *in_first_pixel = (int)(floor(in_pixel_influence_lowerbound + 0.5)); + *in_last_pixel = (int)(floor(in_pixel_influence_upperbound - 0.5)); +} + +// What output pixels does this input pixel contribute to? +static void stbir__calculate_sample_range_downsample(int n, float in_pixels_radius, float scale_ratio, float out_shift, int* out_first_pixel, int* out_last_pixel, float* out_center_of_in) +{ + float in_pixel_center = (float)n + 0.5f; + float in_pixel_influence_lowerbound = in_pixel_center - in_pixels_radius; + float in_pixel_influence_upperbound = in_pixel_center + in_pixels_radius; + + float out_pixel_influence_lowerbound = in_pixel_influence_lowerbound * scale_ratio - out_shift; + float out_pixel_influence_upperbound = in_pixel_influence_upperbound * scale_ratio - out_shift; + + *out_center_of_in = in_pixel_center * scale_ratio - out_shift; + *out_first_pixel = (int)(floor(out_pixel_influence_lowerbound + 0.5)); + *out_last_pixel = (int)(floor(out_pixel_influence_upperbound - 0.5)); +} + +static void stbir__calculate_coefficients_upsample(stbir_filter filter, float scale, int in_first_pixel, int in_last_pixel, float in_center_of_out, stbir__contributors* contributor, float* coefficient_group) +{ + int i; + float total_filter = 0; + float filter_scale; + + STBIR_ASSERT(in_last_pixel - in_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(1/scale) * 2)); // Taken directly from stbir__get_coefficient_width() which we can't call because we don't know if we're horizontal or vertical. + + contributor->n0 = in_first_pixel; + contributor->n1 = in_last_pixel; + + STBIR_ASSERT(contributor->n1 >= contributor->n0); + + for (i = 0; i <= in_last_pixel - in_first_pixel; i++) + { + float in_pixel_center = (float)(i + in_first_pixel) + 0.5f; + coefficient_group[i] = stbir__filter_info_table[filter].kernel(in_center_of_out - in_pixel_center, 1 / scale); + + // If the coefficient is zero, skip it. (Don't do the <0 check here, we want the influence of those outside pixels.) + if (i == 0 && !coefficient_group[i]) + { + contributor->n0 = ++in_first_pixel; + i--; + continue; + } + + total_filter += coefficient_group[i]; + } + + STBIR_ASSERT(stbir__filter_info_table[filter].kernel((float)(in_last_pixel + 1) + 0.5f - in_center_of_out, 1/scale) == 0); + + STBIR_ASSERT(total_filter > 0.9); + STBIR_ASSERT(total_filter < 1.1f); // Make sure it's not way off. + + // Make sure the sum of all coefficients is 1. + filter_scale = 1 / total_filter; + + for (i = 0; i <= in_last_pixel - in_first_pixel; i++) + coefficient_group[i] *= filter_scale; + + for (i = in_last_pixel - in_first_pixel; i >= 0; i--) + { + if (coefficient_group[i]) + break; + + // This line has no weight. We can skip it. + contributor->n1 = contributor->n0 + i - 1; + } +} + +static void stbir__calculate_coefficients_downsample(stbir_filter filter, float scale_ratio, int out_first_pixel, int out_last_pixel, float out_center_of_in, stbir__contributors* contributor, float* coefficient_group) +{ + int i; + + STBIR_ASSERT(out_last_pixel - out_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(scale_ratio) * 2)); // Taken directly from stbir__get_coefficient_width() which we can't call because we don't know if we're horizontal or vertical. + + contributor->n0 = out_first_pixel; + contributor->n1 = out_last_pixel; + + STBIR_ASSERT(contributor->n1 >= contributor->n0); + + for (i = 0; i <= out_last_pixel - out_first_pixel; i++) + { + float out_pixel_center = (float)(i + out_first_pixel) + 0.5f; + float x = out_pixel_center - out_center_of_in; + coefficient_group[i] = stbir__filter_info_table[filter].kernel(x, scale_ratio) * scale_ratio; + } + + STBIR_ASSERT(stbir__filter_info_table[filter].kernel((float)(out_last_pixel + 1) + 0.5f - out_center_of_in, scale_ratio) == 0); + + for (i = out_last_pixel - out_first_pixel; i >= 0; i--) + { + if (coefficient_group[i]) + break; + + // This line has no weight. We can skip it. + contributor->n1 = contributor->n0 + i - 1; + } +} + +static void stbir__normalize_downsample_coefficients(stbir__contributors* contributors, float* coefficients, stbir_filter filter, float scale_ratio, int input_size, int output_size) +{ + int num_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size); + int num_coefficients = stbir__get_coefficient_width(filter, scale_ratio); + int i, j; + int skip; + + for (i = 0; i < output_size; i++) + { + float scale; + float total = 0; + + for (j = 0; j < num_contributors; j++) + { + if (i >= contributors[j].n0 && i <= contributors[j].n1) + { + float coefficient = *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i - contributors[j].n0); + total += coefficient; + } + else if (i < contributors[j].n0) + break; + } + + STBIR_ASSERT(total > 0.9f); + STBIR_ASSERT(total < 1.1f); + + scale = 1 / total; + + for (j = 0; j < num_contributors; j++) + { + if (i >= contributors[j].n0 && i <= contributors[j].n1) + *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i - contributors[j].n0) *= scale; + else if (i < contributors[j].n0) + break; + } + } + + // Optimize: Skip zero coefficients and contributions outside of image bounds. + // Do this after normalizing because normalization depends on the n0/n1 values. + for (j = 0; j < num_contributors; j++) + { + int range, max, width; + + skip = 0; + while (*stbir__get_coefficient(coefficients, filter, scale_ratio, j, skip) == 0) + skip++; + + contributors[j].n0 += skip; + + while (contributors[j].n0 < 0) + { + contributors[j].n0++; + skip++; + } + + range = contributors[j].n1 - contributors[j].n0 + 1; + max = stbir__min(num_coefficients, range); + + width = stbir__get_coefficient_width(filter, scale_ratio); + for (i = 0; i < max; i++) + { + if (i + skip >= width) + break; + + *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i) = *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i + skip); + } + + continue; + } + + // Using min to avoid writing into invalid pixels. + for (i = 0; i < num_contributors; i++) + contributors[i].n1 = stbir__min(contributors[i].n1, output_size - 1); +} + +// Each scan line uses the same kernel values so we should calculate the kernel +// values once and then we can use them for every scan line. +static void stbir__calculate_filters(stbir__contributors* contributors, float* coefficients, stbir_filter filter, float scale_ratio, float shift, int input_size, int output_size) +{ + int n; + int total_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size); + + if (stbir__use_upsampling(scale_ratio)) + { + float out_pixels_radius = stbir__filter_info_table[filter].support(1 / scale_ratio) * scale_ratio; + + // Looping through out pixels + for (n = 0; n < total_contributors; n++) + { + float in_center_of_out; // Center of the current out pixel in the in pixel space + int in_first_pixel, in_last_pixel; + + stbir__calculate_sample_range_upsample(n, out_pixels_radius, scale_ratio, shift, &in_first_pixel, &in_last_pixel, &in_center_of_out); + + stbir__calculate_coefficients_upsample(filter, scale_ratio, in_first_pixel, in_last_pixel, in_center_of_out, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0)); + } + } + else + { + float in_pixels_radius = stbir__filter_info_table[filter].support(scale_ratio) / scale_ratio; + + // Looping through in pixels + for (n = 0; n < total_contributors; n++) + { + float out_center_of_in; // Center of the current out pixel in the in pixel space + int out_first_pixel, out_last_pixel; + int n_adjusted = n - stbir__get_filter_pixel_margin(filter, scale_ratio); + + stbir__calculate_sample_range_downsample(n_adjusted, in_pixels_radius, scale_ratio, shift, &out_first_pixel, &out_last_pixel, &out_center_of_in); + + stbir__calculate_coefficients_downsample(filter, scale_ratio, out_first_pixel, out_last_pixel, out_center_of_in, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0)); + } + + stbir__normalize_downsample_coefficients(contributors, coefficients, filter, scale_ratio, input_size, output_size); + } +} + +static float* stbir__get_decode_buffer(stbir__info* stbir_info) +{ + // The 0 index of the decode buffer starts after the margin. This makes + // it okay to use negative indexes on the decode buffer. + return &stbir_info->decode_buffer[stbir_info->horizontal_filter_pixel_margin * stbir_info->channels]; +} + +#define STBIR__DECODE(type, colorspace) ((type) * (STBIR_MAX_COLORSPACES) + (colorspace)) + +static void stbir__decode_scanline(stbir__info* stbir_info, int n) +{ + int c; + int channels = stbir_info->channels; + int alpha_channel = stbir_info->alpha_channel; + int type = stbir_info->type; + int colorspace = stbir_info->colorspace; + int input_w = stbir_info->input_w; + size_t input_stride_bytes = stbir_info->input_stride_bytes; + float* decode_buffer = stbir__get_decode_buffer(stbir_info); + stbir_edge edge_horizontal = stbir_info->edge_horizontal; + stbir_edge edge_vertical = stbir_info->edge_vertical; + size_t in_buffer_row_offset = stbir__edge_wrap(edge_vertical, n, stbir_info->input_h) * input_stride_bytes; + const void* input_data = (char *) stbir_info->input_data + in_buffer_row_offset; + int max_x = input_w + stbir_info->horizontal_filter_pixel_margin; + int decode = STBIR__DECODE(type, colorspace); + + int x = -stbir_info->horizontal_filter_pixel_margin; + + // special handling for STBIR_EDGE_ZERO because it needs to return an item that doesn't appear in the input, + // and we want to avoid paying overhead on every pixel if not STBIR_EDGE_ZERO + if (edge_vertical == STBIR_EDGE_ZERO && (n < 0 || n >= stbir_info->input_h)) + { + for (; x < max_x; x++) + for (c = 0; c < channels; c++) + decode_buffer[x*channels + c] = 0; + return; + } + + switch (decode) + { + case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR): + for (; x < max_x; x++) + { + int decode_pixel_index = x * channels; + int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; + for (c = 0; c < channels; c++) + decode_buffer[decode_pixel_index + c] = ((float)((const unsigned char*)input_data)[input_pixel_index + c]) / stbir__max_uint8_as_float; + } + break; + + case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB): + for (; x < max_x; x++) + { + int decode_pixel_index = x * channels; + int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; + for (c = 0; c < channels; c++) + decode_buffer[decode_pixel_index + c] = stbir__srgb_uchar_to_linear_float[((const unsigned char*)input_data)[input_pixel_index + c]]; + + if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) + decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned char*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint8_as_float; + } + break; + + case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR): + for (; x < max_x; x++) + { + int decode_pixel_index = x * channels; + int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; + for (c = 0; c < channels; c++) + decode_buffer[decode_pixel_index + c] = ((float)((const unsigned short*)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float; + } + break; + + case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB): + for (; x < max_x; x++) + { + int decode_pixel_index = x * channels; + int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; + for (c = 0; c < channels; c++) + decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear(((float)((const unsigned short*)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float); + + if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) + decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned short*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint16_as_float; + } + break; + + case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR): + for (; x < max_x; x++) + { + int decode_pixel_index = x * channels; + int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; + for (c = 0; c < channels; c++) + decode_buffer[decode_pixel_index + c] = (float)(((double)((const unsigned int*)input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float); + } + break; + + case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB): + for (; x < max_x; x++) + { + int decode_pixel_index = x * channels; + int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; + for (c = 0; c < channels; c++) + decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear((float)(((double)((const unsigned int*)input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float)); + + if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) + decode_buffer[decode_pixel_index + alpha_channel] = (float)(((double)((const unsigned int*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint32_as_float); + } + break; + + case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR): + for (; x < max_x; x++) + { + int decode_pixel_index = x * channels; + int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; + for (c = 0; c < channels; c++) + decode_buffer[decode_pixel_index + c] = ((const float*)input_data)[input_pixel_index + c]; + } + break; + + case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB): + for (; x < max_x; x++) + { + int decode_pixel_index = x * channels; + int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; + for (c = 0; c < channels; c++) + decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear(((const float*)input_data)[input_pixel_index + c]); + + if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) + decode_buffer[decode_pixel_index + alpha_channel] = ((const float*)input_data)[input_pixel_index + alpha_channel]; + } + + break; + + default: + STBIR_ASSERT(!"Unknown type/colorspace/channels combination."); + break; + } + + if (!(stbir_info->flags & STBIR_FLAG_ALPHA_PREMULTIPLIED)) + { + for (x = -stbir_info->horizontal_filter_pixel_margin; x < max_x; x++) + { + int decode_pixel_index = x * channels; + + // If the alpha value is 0 it will clobber the color values. Make sure it's not. + float alpha = decode_buffer[decode_pixel_index + alpha_channel]; +#ifndef STBIR_NO_ALPHA_EPSILON + if (stbir_info->type != STBIR_TYPE_FLOAT) { + alpha += STBIR_ALPHA_EPSILON; + decode_buffer[decode_pixel_index + alpha_channel] = alpha; + } +#endif + for (c = 0; c < channels; c++) + { + if (c == alpha_channel) + continue; + + decode_buffer[decode_pixel_index + c] *= alpha; + } + } + } + + if (edge_horizontal == STBIR_EDGE_ZERO) + { + for (x = -stbir_info->horizontal_filter_pixel_margin; x < 0; x++) + { + for (c = 0; c < channels; c++) + decode_buffer[x*channels + c] = 0; + } + for (x = input_w; x < max_x; x++) + { + for (c = 0; c < channels; c++) + decode_buffer[x*channels + c] = 0; + } + } +} + +static float* stbir__get_ring_buffer_entry(float* ring_buffer, int index, int ring_buffer_length) +{ + return &ring_buffer[index * ring_buffer_length]; +} + +static float* stbir__add_empty_ring_buffer_entry(stbir__info* stbir_info, int n) +{ + int ring_buffer_index; + float* ring_buffer; + + stbir_info->ring_buffer_last_scanline = n; + + if (stbir_info->ring_buffer_begin_index < 0) + { + ring_buffer_index = stbir_info->ring_buffer_begin_index = 0; + stbir_info->ring_buffer_first_scanline = n; + } + else + { + ring_buffer_index = (stbir_info->ring_buffer_begin_index + (stbir_info->ring_buffer_last_scanline - stbir_info->ring_buffer_first_scanline)) % stbir_info->ring_buffer_num_entries; + STBIR_ASSERT(ring_buffer_index != stbir_info->ring_buffer_begin_index); + } + + ring_buffer = stbir__get_ring_buffer_entry(stbir_info->ring_buffer, ring_buffer_index, stbir_info->ring_buffer_length_bytes / sizeof(float)); + memset(ring_buffer, 0, stbir_info->ring_buffer_length_bytes); + + return ring_buffer; +} + + +static void stbir__resample_horizontal_upsample(stbir__info* stbir_info, float* output_buffer) +{ + int x, k; + int output_w = stbir_info->output_w; + int channels = stbir_info->channels; + float* decode_buffer = stbir__get_decode_buffer(stbir_info); + stbir__contributors* horizontal_contributors = stbir_info->horizontal_contributors; + float* horizontal_coefficients = stbir_info->horizontal_coefficients; + int coefficient_width = stbir_info->horizontal_coefficient_width; + + for (x = 0; x < output_w; x++) + { + int n0 = horizontal_contributors[x].n0; + int n1 = horizontal_contributors[x].n1; + + int out_pixel_index = x * channels; + int coefficient_group = coefficient_width * x; + int coefficient_counter = 0; + + STBIR_ASSERT(n1 >= n0); + STBIR_ASSERT(n0 >= -stbir_info->horizontal_filter_pixel_margin); + STBIR_ASSERT(n1 >= -stbir_info->horizontal_filter_pixel_margin); + STBIR_ASSERT(n0 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin); + STBIR_ASSERT(n1 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin); + + switch (channels) { + case 1: + for (k = n0; k <= n1; k++) + { + int in_pixel_index = k * 1; + float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; + STBIR_ASSERT(coefficient != 0); + output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; + } + break; + case 2: + for (k = n0; k <= n1; k++) + { + int in_pixel_index = k * 2; + float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; + STBIR_ASSERT(coefficient != 0); + output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; + output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; + } + break; + case 3: + for (k = n0; k <= n1; k++) + { + int in_pixel_index = k * 3; + float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; + STBIR_ASSERT(coefficient != 0); + output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; + output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; + output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; + } + break; + case 4: + for (k = n0; k <= n1; k++) + { + int in_pixel_index = k * 4; + float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; + STBIR_ASSERT(coefficient != 0); + output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; + output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; + output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; + output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient; + } + break; + default: + for (k = n0; k <= n1; k++) + { + int in_pixel_index = k * channels; + float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; + int c; + STBIR_ASSERT(coefficient != 0); + for (c = 0; c < channels; c++) + output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient; + } + break; + } + } +} + +static void stbir__resample_horizontal_downsample(stbir__info* stbir_info, float* output_buffer) +{ + int x, k; + int input_w = stbir_info->input_w; + int channels = stbir_info->channels; + float* decode_buffer = stbir__get_decode_buffer(stbir_info); + stbir__contributors* horizontal_contributors = stbir_info->horizontal_contributors; + float* horizontal_coefficients = stbir_info->horizontal_coefficients; + int coefficient_width = stbir_info->horizontal_coefficient_width; + int filter_pixel_margin = stbir_info->horizontal_filter_pixel_margin; + int max_x = input_w + filter_pixel_margin * 2; + + STBIR_ASSERT(!stbir__use_width_upsampling(stbir_info)); + + switch (channels) { + case 1: + for (x = 0; x < max_x; x++) + { + int n0 = horizontal_contributors[x].n0; + int n1 = horizontal_contributors[x].n1; + + int in_x = x - filter_pixel_margin; + int in_pixel_index = in_x * 1; + int max_n = n1; + int coefficient_group = coefficient_width * x; + + for (k = n0; k <= max_n; k++) + { + int out_pixel_index = k * 1; + float coefficient = horizontal_coefficients[coefficient_group + k - n0]; + STBIR_ASSERT(coefficient != 0); + output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; + } + } + break; + + case 2: + for (x = 0; x < max_x; x++) + { + int n0 = horizontal_contributors[x].n0; + int n1 = horizontal_contributors[x].n1; + + int in_x = x - filter_pixel_margin; + int in_pixel_index = in_x * 2; + int max_n = n1; + int coefficient_group = coefficient_width * x; + + for (k = n0; k <= max_n; k++) + { + int out_pixel_index = k * 2; + float coefficient = horizontal_coefficients[coefficient_group + k - n0]; + STBIR_ASSERT(coefficient != 0); + output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; + output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; + } + } + break; + + case 3: + for (x = 0; x < max_x; x++) + { + int n0 = horizontal_contributors[x].n0; + int n1 = horizontal_contributors[x].n1; + + int in_x = x - filter_pixel_margin; + int in_pixel_index = in_x * 3; + int max_n = n1; + int coefficient_group = coefficient_width * x; + + for (k = n0; k <= max_n; k++) + { + int out_pixel_index = k * 3; + float coefficient = horizontal_coefficients[coefficient_group + k - n0]; + STBIR_ASSERT(coefficient != 0); + output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; + output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; + output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; + } + } + break; + + case 4: + for (x = 0; x < max_x; x++) + { + int n0 = horizontal_contributors[x].n0; + int n1 = horizontal_contributors[x].n1; + + int in_x = x - filter_pixel_margin; + int in_pixel_index = in_x * 4; + int max_n = n1; + int coefficient_group = coefficient_width * x; + + for (k = n0; k <= max_n; k++) + { + int out_pixel_index = k * 4; + float coefficient = horizontal_coefficients[coefficient_group + k - n0]; + STBIR_ASSERT(coefficient != 0); + output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; + output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; + output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; + output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient; + } + } + break; + + default: + for (x = 0; x < max_x; x++) + { + int n0 = horizontal_contributors[x].n0; + int n1 = horizontal_contributors[x].n1; + + int in_x = x - filter_pixel_margin; + int in_pixel_index = in_x * channels; + int max_n = n1; + int coefficient_group = coefficient_width * x; + + for (k = n0; k <= max_n; k++) + { + int c; + int out_pixel_index = k * channels; + float coefficient = horizontal_coefficients[coefficient_group + k - n0]; + STBIR_ASSERT(coefficient != 0); + for (c = 0; c < channels; c++) + output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient; + } + } + break; + } +} + +static void stbir__decode_and_resample_upsample(stbir__info* stbir_info, int n) +{ + // Decode the nth scanline from the source image into the decode buffer. + stbir__decode_scanline(stbir_info, n); + + // Now resample it into the ring buffer. + if (stbir__use_width_upsampling(stbir_info)) + stbir__resample_horizontal_upsample(stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n)); + else + stbir__resample_horizontal_downsample(stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n)); + + // Now it's sitting in the ring buffer ready to be used as source for the vertical sampling. +} + +static void stbir__decode_and_resample_downsample(stbir__info* stbir_info, int n) +{ + // Decode the nth scanline from the source image into the decode buffer. + stbir__decode_scanline(stbir_info, n); + + memset(stbir_info->horizontal_buffer, 0, stbir_info->output_w * stbir_info->channels * sizeof(float)); + + // Now resample it into the horizontal buffer. + if (stbir__use_width_upsampling(stbir_info)) + stbir__resample_horizontal_upsample(stbir_info, stbir_info->horizontal_buffer); + else + stbir__resample_horizontal_downsample(stbir_info, stbir_info->horizontal_buffer); + + // Now it's sitting in the horizontal buffer ready to be distributed into the ring buffers. +} + +// Get the specified scan line from the ring buffer. +static float* stbir__get_ring_buffer_scanline(int get_scanline, float* ring_buffer, int begin_index, int first_scanline, int ring_buffer_num_entries, int ring_buffer_length) +{ + int ring_buffer_index = (begin_index + (get_scanline - first_scanline)) % ring_buffer_num_entries; + return stbir__get_ring_buffer_entry(ring_buffer, ring_buffer_index, ring_buffer_length); +} + + +static void stbir__encode_scanline(stbir__info* stbir_info, int num_pixels, void *output_buffer, float *encode_buffer, int channels, int alpha_channel, int decode) +{ + int x; + int n; + int num_nonalpha; + stbir_uint16 nonalpha[STBIR_MAX_CHANNELS]; + + if (!(stbir_info->flags&STBIR_FLAG_ALPHA_PREMULTIPLIED)) + { + for (x=0; x < num_pixels; ++x) + { + int pixel_index = x*channels; + + float alpha = encode_buffer[pixel_index + alpha_channel]; + float reciprocal_alpha = alpha ? 1.0f / alpha : 0; + + // unrolling this produced a 1% slowdown upscaling a large RGBA linear-space image on my machine - stb + for (n = 0; n < channels; n++) + if (n != alpha_channel) + encode_buffer[pixel_index + n] *= reciprocal_alpha; + + // We added in a small epsilon to prevent the color channel from being deleted with zero alpha. + // Because we only add it for integer types, it will automatically be discarded on integer + // conversion, so we don't need to subtract it back out (which would be problematic for + // numeric precision reasons). + } + } + + // build a table of all channels that need colorspace correction, so + // we don't perform colorspace correction on channels that don't need it. + for (x = 0, num_nonalpha = 0; x < channels; ++x) + { + if (x != alpha_channel || (stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) + { + nonalpha[num_nonalpha++] = (stbir_uint16)x; + } + } + + #define STBIR__ROUND_INT(f) ((int) ((f)+0.5)) + #define STBIR__ROUND_UINT(f) ((stbir_uint32) ((f)+0.5)) + + #ifdef STBIR__SATURATE_INT + #define STBIR__ENCODE_LINEAR8(f) stbir__saturate8 (STBIR__ROUND_INT((f) * stbir__max_uint8_as_float )) + #define STBIR__ENCODE_LINEAR16(f) stbir__saturate16(STBIR__ROUND_INT((f) * stbir__max_uint16_as_float)) + #else + #define STBIR__ENCODE_LINEAR8(f) (unsigned char ) STBIR__ROUND_INT(stbir__saturate(f) * stbir__max_uint8_as_float ) + #define STBIR__ENCODE_LINEAR16(f) (unsigned short) STBIR__ROUND_INT(stbir__saturate(f) * stbir__max_uint16_as_float) + #endif + + switch (decode) + { + case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR): + for (x=0; x < num_pixels; ++x) + { + int pixel_index = x*channels; + + for (n = 0; n < channels; n++) + { + int index = pixel_index + n; + ((unsigned char*)output_buffer)[index] = STBIR__ENCODE_LINEAR8(encode_buffer[index]); + } + } + break; + + case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB): + for (x=0; x < num_pixels; ++x) + { + int pixel_index = x*channels; + + for (n = 0; n < num_nonalpha; n++) + { + int index = pixel_index + nonalpha[n]; + ((unsigned char*)output_buffer)[index] = stbir__linear_to_srgb_uchar(encode_buffer[index]); + } + + if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) + ((unsigned char *)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR8(encode_buffer[pixel_index+alpha_channel]); + } + break; + + case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR): + for (x=0; x < num_pixels; ++x) + { + int pixel_index = x*channels; + + for (n = 0; n < channels; n++) + { + int index = pixel_index + n; + ((unsigned short*)output_buffer)[index] = STBIR__ENCODE_LINEAR16(encode_buffer[index]); + } + } + break; + + case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB): + for (x=0; x < num_pixels; ++x) + { + int pixel_index = x*channels; + + for (n = 0; n < num_nonalpha; n++) + { + int index = pixel_index + nonalpha[n]; + ((unsigned short*)output_buffer)[index] = (unsigned short)STBIR__ROUND_INT(stbir__linear_to_srgb(stbir__saturate(encode_buffer[index])) * stbir__max_uint16_as_float); + } + + if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) + ((unsigned short*)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR16(encode_buffer[pixel_index + alpha_channel]); + } + + break; + + case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR): + for (x=0; x < num_pixels; ++x) + { + int pixel_index = x*channels; + + for (n = 0; n < channels; n++) + { + int index = pixel_index + n; + ((unsigned int*)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT(((double)stbir__saturate(encode_buffer[index])) * stbir__max_uint32_as_float); + } + } + break; + + case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB): + for (x=0; x < num_pixels; ++x) + { + int pixel_index = x*channels; + + for (n = 0; n < num_nonalpha; n++) + { + int index = pixel_index + nonalpha[n]; + ((unsigned int*)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT(((double)stbir__linear_to_srgb(stbir__saturate(encode_buffer[index]))) * stbir__max_uint32_as_float); + } + + if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) + ((unsigned int*)output_buffer)[pixel_index + alpha_channel] = (unsigned int)STBIR__ROUND_INT(((double)stbir__saturate(encode_buffer[pixel_index + alpha_channel])) * stbir__max_uint32_as_float); + } + break; + + case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR): + for (x=0; x < num_pixels; ++x) + { + int pixel_index = x*channels; + + for (n = 0; n < channels; n++) + { + int index = pixel_index + n; + ((float*)output_buffer)[index] = encode_buffer[index]; + } + } + break; + + case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB): + for (x=0; x < num_pixels; ++x) + { + int pixel_index = x*channels; + + for (n = 0; n < num_nonalpha; n++) + { + int index = pixel_index + nonalpha[n]; + ((float*)output_buffer)[index] = stbir__linear_to_srgb(encode_buffer[index]); + } + + if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE)) + ((float*)output_buffer)[pixel_index + alpha_channel] = encode_buffer[pixel_index + alpha_channel]; + } + break; + + default: + STBIR_ASSERT(!"Unknown type/colorspace/channels combination."); + break; + } +} + +static void stbir__resample_vertical_upsample(stbir__info* stbir_info, int n) +{ + int x, k; + int output_w = stbir_info->output_w; + stbir__contributors* vertical_contributors = stbir_info->vertical_contributors; + float* vertical_coefficients = stbir_info->vertical_coefficients; + int channels = stbir_info->channels; + int alpha_channel = stbir_info->alpha_channel; + int type = stbir_info->type; + int colorspace = stbir_info->colorspace; + int ring_buffer_entries = stbir_info->ring_buffer_num_entries; + void* output_data = stbir_info->output_data; + float* encode_buffer = stbir_info->encode_buffer; + int decode = STBIR__DECODE(type, colorspace); + int coefficient_width = stbir_info->vertical_coefficient_width; + int coefficient_counter; + int contributor = n; + + float* ring_buffer = stbir_info->ring_buffer; + int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index; + int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline; + int ring_buffer_length = stbir_info->ring_buffer_length_bytes/sizeof(float); + + int n0,n1, output_row_start; + int coefficient_group = coefficient_width * contributor; + + n0 = vertical_contributors[contributor].n0; + n1 = vertical_contributors[contributor].n1; + + output_row_start = n * stbir_info->output_stride_bytes; + + STBIR_ASSERT(stbir__use_height_upsampling(stbir_info)); + + memset(encode_buffer, 0, output_w * sizeof(float) * channels); + + // I tried reblocking this for better cache usage of encode_buffer + // (using x_outer, k, x_inner), but it lost speed. -- stb + + coefficient_counter = 0; + switch (channels) { + case 1: + for (k = n0; k <= n1; k++) + { + int coefficient_index = coefficient_counter++; + float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); + float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; + for (x = 0; x < output_w; ++x) + { + int in_pixel_index = x * 1; + encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; + } + } + break; + case 2: + for (k = n0; k <= n1; k++) + { + int coefficient_index = coefficient_counter++; + float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); + float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; + for (x = 0; x < output_w; ++x) + { + int in_pixel_index = x * 2; + encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; + encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; + } + } + break; + case 3: + for (k = n0; k <= n1; k++) + { + int coefficient_index = coefficient_counter++; + float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); + float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; + for (x = 0; x < output_w; ++x) + { + int in_pixel_index = x * 3; + encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; + encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; + encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient; + } + } + break; + case 4: + for (k = n0; k <= n1; k++) + { + int coefficient_index = coefficient_counter++; + float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); + float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; + for (x = 0; x < output_w; ++x) + { + int in_pixel_index = x * 4; + encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; + encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; + encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient; + encode_buffer[in_pixel_index + 3] += ring_buffer_entry[in_pixel_index + 3] * coefficient; + } + } + break; + default: + for (k = n0; k <= n1; k++) + { + int coefficient_index = coefficient_counter++; + float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); + float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; + for (x = 0; x < output_w; ++x) + { + int in_pixel_index = x * channels; + int c; + for (c = 0; c < channels; c++) + encode_buffer[in_pixel_index + c] += ring_buffer_entry[in_pixel_index + c] * coefficient; + } + } + break; + } + stbir__encode_scanline(stbir_info, output_w, (char *) output_data + output_row_start, encode_buffer, channels, alpha_channel, decode); +} + +static void stbir__resample_vertical_downsample(stbir__info* stbir_info, int n) +{ + int x, k; + int output_w = stbir_info->output_w; + stbir__contributors* vertical_contributors = stbir_info->vertical_contributors; + float* vertical_coefficients = stbir_info->vertical_coefficients; + int channels = stbir_info->channels; + int ring_buffer_entries = stbir_info->ring_buffer_num_entries; + float* horizontal_buffer = stbir_info->horizontal_buffer; + int coefficient_width = stbir_info->vertical_coefficient_width; + int contributor = n + stbir_info->vertical_filter_pixel_margin; + + float* ring_buffer = stbir_info->ring_buffer; + int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index; + int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline; + int ring_buffer_length = stbir_info->ring_buffer_length_bytes/sizeof(float); + int n0,n1; + + n0 = vertical_contributors[contributor].n0; + n1 = vertical_contributors[contributor].n1; + + STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info)); + + for (k = n0; k <= n1; k++) + { + int coefficient_index = k - n0; + int coefficient_group = coefficient_width * contributor; + float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; + + float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); + + switch (channels) { + case 1: + for (x = 0; x < output_w; x++) + { + int in_pixel_index = x * 1; + ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; + } + break; + case 2: + for (x = 0; x < output_w; x++) + { + int in_pixel_index = x * 2; + ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; + ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; + } + break; + case 3: + for (x = 0; x < output_w; x++) + { + int in_pixel_index = x * 3; + ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; + ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; + ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient; + } + break; + case 4: + for (x = 0; x < output_w; x++) + { + int in_pixel_index = x * 4; + ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; + ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; + ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient; + ring_buffer_entry[in_pixel_index + 3] += horizontal_buffer[in_pixel_index + 3] * coefficient; + } + break; + default: + for (x = 0; x < output_w; x++) + { + int in_pixel_index = x * channels; + + int c; + for (c = 0; c < channels; c++) + ring_buffer_entry[in_pixel_index + c] += horizontal_buffer[in_pixel_index + c] * coefficient; + } + break; + } + } +} + +static void stbir__buffer_loop_upsample(stbir__info* stbir_info) +{ + int y; + float scale_ratio = stbir_info->vertical_scale; + float out_scanlines_radius = stbir__filter_info_table[stbir_info->vertical_filter].support(1/scale_ratio) * scale_ratio; + + STBIR_ASSERT(stbir__use_height_upsampling(stbir_info)); + + for (y = 0; y < stbir_info->output_h; y++) + { + float in_center_of_out = 0; // Center of the current out scanline in the in scanline space + int in_first_scanline = 0, in_last_scanline = 0; + + stbir__calculate_sample_range_upsample(y, out_scanlines_radius, scale_ratio, stbir_info->vertical_shift, &in_first_scanline, &in_last_scanline, &in_center_of_out); + + STBIR_ASSERT(in_last_scanline - in_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); + + if (stbir_info->ring_buffer_begin_index >= 0) + { + // Get rid of whatever we don't need anymore. + while (in_first_scanline > stbir_info->ring_buffer_first_scanline) + { + if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline) + { + // We just popped the last scanline off the ring buffer. + // Reset it to the empty state. + stbir_info->ring_buffer_begin_index = -1; + stbir_info->ring_buffer_first_scanline = 0; + stbir_info->ring_buffer_last_scanline = 0; + break; + } + else + { + stbir_info->ring_buffer_first_scanline++; + stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries; + } + } + } + + // Load in new ones. + if (stbir_info->ring_buffer_begin_index < 0) + stbir__decode_and_resample_upsample(stbir_info, in_first_scanline); + + while (in_last_scanline > stbir_info->ring_buffer_last_scanline) + stbir__decode_and_resample_upsample(stbir_info, stbir_info->ring_buffer_last_scanline + 1); + + // Now all buffers should be ready to write a row of vertical sampling. + stbir__resample_vertical_upsample(stbir_info, y); + + STBIR_PROGRESS_REPORT((float)y / stbir_info->output_h); + } +} + +static void stbir__empty_ring_buffer(stbir__info* stbir_info, int first_necessary_scanline) +{ + int output_stride_bytes = stbir_info->output_stride_bytes; + int channels = stbir_info->channels; + int alpha_channel = stbir_info->alpha_channel; + int type = stbir_info->type; + int colorspace = stbir_info->colorspace; + int output_w = stbir_info->output_w; + void* output_data = stbir_info->output_data; + int decode = STBIR__DECODE(type, colorspace); + + float* ring_buffer = stbir_info->ring_buffer; + int ring_buffer_length = stbir_info->ring_buffer_length_bytes/sizeof(float); + + if (stbir_info->ring_buffer_begin_index >= 0) + { + // Get rid of whatever we don't need anymore. + while (first_necessary_scanline > stbir_info->ring_buffer_first_scanline) + { + if (stbir_info->ring_buffer_first_scanline >= 0 && stbir_info->ring_buffer_first_scanline < stbir_info->output_h) + { + int output_row_start = stbir_info->ring_buffer_first_scanline * output_stride_bytes; + float* ring_buffer_entry = stbir__get_ring_buffer_entry(ring_buffer, stbir_info->ring_buffer_begin_index, ring_buffer_length); + stbir__encode_scanline(stbir_info, output_w, (char *) output_data + output_row_start, ring_buffer_entry, channels, alpha_channel, decode); + STBIR_PROGRESS_REPORT((float)stbir_info->ring_buffer_first_scanline / stbir_info->output_h); + } + + if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline) + { + // We just popped the last scanline off the ring buffer. + // Reset it to the empty state. + stbir_info->ring_buffer_begin_index = -1; + stbir_info->ring_buffer_first_scanline = 0; + stbir_info->ring_buffer_last_scanline = 0; + break; + } + else + { + stbir_info->ring_buffer_first_scanline++; + stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries; + } + } + } +} + +static void stbir__buffer_loop_downsample(stbir__info* stbir_info) +{ + int y; + float scale_ratio = stbir_info->vertical_scale; + int output_h = stbir_info->output_h; + float in_pixels_radius = stbir__filter_info_table[stbir_info->vertical_filter].support(scale_ratio) / scale_ratio; + int pixel_margin = stbir_info->vertical_filter_pixel_margin; + int max_y = stbir_info->input_h + pixel_margin; + + STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info)); + + for (y = -pixel_margin; y < max_y; y++) + { + float out_center_of_in; // Center of the current out scanline in the in scanline space + int out_first_scanline, out_last_scanline; + + stbir__calculate_sample_range_downsample(y, in_pixels_radius, scale_ratio, stbir_info->vertical_shift, &out_first_scanline, &out_last_scanline, &out_center_of_in); + + STBIR_ASSERT(out_last_scanline - out_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); + + if (out_last_scanline < 0 || out_first_scanline >= output_h) + continue; + + stbir__empty_ring_buffer(stbir_info, out_first_scanline); + + stbir__decode_and_resample_downsample(stbir_info, y); + + // Load in new ones. + if (stbir_info->ring_buffer_begin_index < 0) + stbir__add_empty_ring_buffer_entry(stbir_info, out_first_scanline); + + while (out_last_scanline > stbir_info->ring_buffer_last_scanline) + stbir__add_empty_ring_buffer_entry(stbir_info, stbir_info->ring_buffer_last_scanline + 1); + + // Now the horizontal buffer is ready to write to all ring buffer rows. + stbir__resample_vertical_downsample(stbir_info, y); + } + + stbir__empty_ring_buffer(stbir_info, stbir_info->output_h); +} + +static void stbir__setup(stbir__info *info, int input_w, int input_h, int output_w, int output_h, int channels) +{ + info->input_w = input_w; + info->input_h = input_h; + info->output_w = output_w; + info->output_h = output_h; + info->channels = channels; +} + +static void stbir__calculate_transform(stbir__info *info, float s0, float t0, float s1, float t1, float *transform) +{ + info->s0 = s0; + info->t0 = t0; + info->s1 = s1; + info->t1 = t1; + + if (transform) + { + info->horizontal_scale = transform[0]; + info->vertical_scale = transform[1]; + info->horizontal_shift = transform[2]; + info->vertical_shift = transform[3]; + } + else + { + info->horizontal_scale = ((float)info->output_w / info->input_w) / (s1 - s0); + info->vertical_scale = ((float)info->output_h / info->input_h) / (t1 - t0); + + info->horizontal_shift = s0 * info->output_w / (s1 - s0); + info->vertical_shift = t0 * info->output_h / (t1 - t0); + } +} + +static void stbir__choose_filter(stbir__info *info, stbir_filter h_filter, stbir_filter v_filter) +{ + if (h_filter == 0) + h_filter = stbir__use_upsampling(info->horizontal_scale) ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE; + if (v_filter == 0) + v_filter = stbir__use_upsampling(info->vertical_scale) ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE; + info->horizontal_filter = h_filter; + info->vertical_filter = v_filter; +} + +static stbir_uint32 stbir__calculate_memory(stbir__info *info) +{ + int pixel_margin = stbir__get_filter_pixel_margin(info->horizontal_filter, info->horizontal_scale); + int filter_height = stbir__get_filter_pixel_width(info->vertical_filter, info->vertical_scale); + + info->horizontal_num_contributors = stbir__get_contributors(info->horizontal_scale, info->horizontal_filter, info->input_w, info->output_w); + info->vertical_num_contributors = stbir__get_contributors(info->vertical_scale , info->vertical_filter , info->input_h, info->output_h); + + // One extra entry because floating point precision problems sometimes cause an extra to be necessary. + info->ring_buffer_num_entries = filter_height + 1; + + info->horizontal_contributors_size = info->horizontal_num_contributors * sizeof(stbir__contributors); + info->horizontal_coefficients_size = stbir__get_total_horizontal_coefficients(info) * sizeof(float); + info->vertical_contributors_size = info->vertical_num_contributors * sizeof(stbir__contributors); + info->vertical_coefficients_size = stbir__get_total_vertical_coefficients(info) * sizeof(float); + info->decode_buffer_size = (info->input_w + pixel_margin * 2) * info->channels * sizeof(float); + info->horizontal_buffer_size = info->output_w * info->channels * sizeof(float); + info->ring_buffer_size = info->output_w * info->channels * info->ring_buffer_num_entries * sizeof(float); + info->encode_buffer_size = info->output_w * info->channels * sizeof(float); + + STBIR_ASSERT(info->horizontal_filter != 0); + STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); // this now happens too late + STBIR_ASSERT(info->vertical_filter != 0); + STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); // this now happens too late + + if (stbir__use_height_upsampling(info)) + // The horizontal buffer is for when we're downsampling the height and we + // can't output the result of sampling the decode buffer directly into the + // ring buffers. + info->horizontal_buffer_size = 0; + else + // The encode buffer is to retain precision in the height upsampling method + // and isn't used when height downsampling. + info->encode_buffer_size = 0; + + return info->horizontal_contributors_size + info->horizontal_coefficients_size + + info->vertical_contributors_size + info->vertical_coefficients_size + + info->decode_buffer_size + info->horizontal_buffer_size + + info->ring_buffer_size + info->encode_buffer_size; +} + +static int stbir__resize_allocated(stbir__info *info, + const void* input_data, int input_stride_in_bytes, + void* output_data, int output_stride_in_bytes, + int alpha_channel, stbir_uint32 flags, stbir_datatype type, + stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace, + void* tempmem, size_t tempmem_size_in_bytes) +{ + size_t memory_required = stbir__calculate_memory(info); + + int width_stride_input = input_stride_in_bytes ? input_stride_in_bytes : info->channels * info->input_w * stbir__type_size[type]; + int width_stride_output = output_stride_in_bytes ? output_stride_in_bytes : info->channels * info->output_w * stbir__type_size[type]; + +#ifdef STBIR_DEBUG_OVERWRITE_TEST +#define OVERWRITE_ARRAY_SIZE 8 + unsigned char overwrite_output_before_pre[OVERWRITE_ARRAY_SIZE]; + unsigned char overwrite_tempmem_before_pre[OVERWRITE_ARRAY_SIZE]; + unsigned char overwrite_output_after_pre[OVERWRITE_ARRAY_SIZE]; + unsigned char overwrite_tempmem_after_pre[OVERWRITE_ARRAY_SIZE]; + + size_t begin_forbidden = width_stride_output * (info->output_h - 1) + info->output_w * info->channels * stbir__type_size[type]; + memcpy(overwrite_output_before_pre, &((unsigned char*)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE); + memcpy(overwrite_output_after_pre, &((unsigned char*)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE); + memcpy(overwrite_tempmem_before_pre, &((unsigned char*)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE); + memcpy(overwrite_tempmem_after_pre, &((unsigned char*)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE); +#endif + + STBIR_ASSERT(info->channels >= 0); + STBIR_ASSERT(info->channels <= STBIR_MAX_CHANNELS); + + if (info->channels < 0 || info->channels > STBIR_MAX_CHANNELS) + return 0; + + STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); + STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); + + if (info->horizontal_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table)) + return 0; + if (info->vertical_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table)) + return 0; + + if (alpha_channel < 0) + flags |= STBIR_FLAG_ALPHA_USES_COLORSPACE | STBIR_FLAG_ALPHA_PREMULTIPLIED; + + if (!(flags&STBIR_FLAG_ALPHA_USES_COLORSPACE) || !(flags&STBIR_FLAG_ALPHA_PREMULTIPLIED)) + STBIR_ASSERT(alpha_channel >= 0 && alpha_channel < info->channels); + + if (alpha_channel >= info->channels) + return 0; + + STBIR_ASSERT(tempmem); + + if (!tempmem) + return 0; + + STBIR_ASSERT(tempmem_size_in_bytes >= memory_required); + + if (tempmem_size_in_bytes < memory_required) + return 0; + + memset(tempmem, 0, tempmem_size_in_bytes); + + info->input_data = input_data; + info->input_stride_bytes = width_stride_input; + + info->output_data = output_data; + info->output_stride_bytes = width_stride_output; + + info->alpha_channel = alpha_channel; + info->flags = flags; + info->type = type; + info->edge_horizontal = edge_horizontal; + info->edge_vertical = edge_vertical; + info->colorspace = colorspace; + + info->horizontal_coefficient_width = stbir__get_coefficient_width (info->horizontal_filter, info->horizontal_scale); + info->vertical_coefficient_width = stbir__get_coefficient_width (info->vertical_filter , info->vertical_scale ); + info->horizontal_filter_pixel_width = stbir__get_filter_pixel_width (info->horizontal_filter, info->horizontal_scale); + info->vertical_filter_pixel_width = stbir__get_filter_pixel_width (info->vertical_filter , info->vertical_scale ); + info->horizontal_filter_pixel_margin = stbir__get_filter_pixel_margin(info->horizontal_filter, info->horizontal_scale); + info->vertical_filter_pixel_margin = stbir__get_filter_pixel_margin(info->vertical_filter , info->vertical_scale ); + + info->ring_buffer_length_bytes = info->output_w * info->channels * sizeof(float); + info->decode_buffer_pixels = info->input_w + info->horizontal_filter_pixel_margin * 2; + +#define STBIR__NEXT_MEMPTR(current, newtype) (newtype*)(((unsigned char*)current) + current##_size) + + info->horizontal_contributors = (stbir__contributors *) tempmem; + info->horizontal_coefficients = STBIR__NEXT_MEMPTR(info->horizontal_contributors, float); + info->vertical_contributors = STBIR__NEXT_MEMPTR(info->horizontal_coefficients, stbir__contributors); + info->vertical_coefficients = STBIR__NEXT_MEMPTR(info->vertical_contributors, float); + info->decode_buffer = STBIR__NEXT_MEMPTR(info->vertical_coefficients, float); + + if (stbir__use_height_upsampling(info)) + { + info->horizontal_buffer = NULL; + info->ring_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float); + info->encode_buffer = STBIR__NEXT_MEMPTR(info->ring_buffer, float); + + STBIR_ASSERT((size_t)STBIR__NEXT_MEMPTR(info->encode_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes); + } + else + { + info->horizontal_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float); + info->ring_buffer = STBIR__NEXT_MEMPTR(info->horizontal_buffer, float); + info->encode_buffer = NULL; + + STBIR_ASSERT((size_t)STBIR__NEXT_MEMPTR(info->ring_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes); + } + +#undef STBIR__NEXT_MEMPTR + + // This signals that the ring buffer is empty + info->ring_buffer_begin_index = -1; + + stbir__calculate_filters(info->horizontal_contributors, info->horizontal_coefficients, info->horizontal_filter, info->horizontal_scale, info->horizontal_shift, info->input_w, info->output_w); + stbir__calculate_filters(info->vertical_contributors, info->vertical_coefficients, info->vertical_filter, info->vertical_scale, info->vertical_shift, info->input_h, info->output_h); + + STBIR_PROGRESS_REPORT(0); + + if (stbir__use_height_upsampling(info)) + stbir__buffer_loop_upsample(info); + else + stbir__buffer_loop_downsample(info); + + STBIR_PROGRESS_REPORT(1); + +#ifdef STBIR_DEBUG_OVERWRITE_TEST + STBIR_ASSERT(memcmp(overwrite_output_before_pre, &((unsigned char*)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0); + STBIR_ASSERT(memcmp(overwrite_output_after_pre, &((unsigned char*)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE) == 0); + STBIR_ASSERT(memcmp(overwrite_tempmem_before_pre, &((unsigned char*)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0); + STBIR_ASSERT(memcmp(overwrite_tempmem_after_pre, &((unsigned char*)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE) == 0); +#endif + + return 1; +} + + +static int stbir__resize_arbitrary( + void *alloc_context, + const void* input_data, int input_w, int input_h, int input_stride_in_bytes, + void* output_data, int output_w, int output_h, int output_stride_in_bytes, + float s0, float t0, float s1, float t1, float *transform, + int channels, int alpha_channel, stbir_uint32 flags, stbir_datatype type, + stbir_filter h_filter, stbir_filter v_filter, + stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace) +{ + stbir__info info; + int result; + size_t memory_required; + void* extra_memory; + + stbir__setup(&info, input_w, input_h, output_w, output_h, channels); + stbir__calculate_transform(&info, s0,t0,s1,t1,transform); + stbir__choose_filter(&info, h_filter, v_filter); + memory_required = stbir__calculate_memory(&info); + extra_memory = STBIR_MALLOC(memory_required, alloc_context); + + if (!extra_memory) + return 0; + + result = stbir__resize_allocated(&info, input_data, input_stride_in_bytes, + output_data, output_stride_in_bytes, + alpha_channel, flags, type, + edge_horizontal, edge_vertical, + colorspace, extra_memory, memory_required); + + STBIR_FREE(extra_memory, alloc_context); + + return result; +} + +STBIRDEF int stbir_resize_uint8( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels) +{ + return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + 0,0,1,1,NULL,num_channels,-1,0, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, + STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR); +} + +STBIRDEF int stbir_resize_float( const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels) +{ + return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + 0,0,1,1,NULL,num_channels,-1,0, STBIR_TYPE_FLOAT, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, + STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR); +} + +STBIRDEF int stbir_resize_uint8_srgb(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags) +{ + return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, + STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_SRGB); +} + +STBIRDEF int stbir_resize_uint8_srgb_edgemode(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_wrap_mode) +{ + return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, + edge_wrap_mode, edge_wrap_mode, STBIR_COLORSPACE_SRGB); +} + +STBIRDEF int stbir_resize_uint8_generic( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, + void *alloc_context) +{ + return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT8, filter, filter, + edge_wrap_mode, edge_wrap_mode, space); +} + +STBIRDEF int stbir_resize_uint16_generic(const stbir_uint16 *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + stbir_uint16 *output_pixels , int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, + void *alloc_context) +{ + return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT16, filter, filter, + edge_wrap_mode, edge_wrap_mode, space); +} + + +STBIRDEF int stbir_resize_float_generic( const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + float *output_pixels , int output_w, int output_h, int output_stride_in_bytes, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, + void *alloc_context) +{ + return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + 0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_FLOAT, filter, filter, + edge_wrap_mode, edge_wrap_mode, space); +} + + +STBIRDEF int stbir_resize( const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_datatype datatype, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, + stbir_filter filter_horizontal, stbir_filter filter_vertical, + stbir_colorspace space, void *alloc_context) +{ + return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + 0,0,1,1,NULL,num_channels,alpha_channel,flags, datatype, filter_horizontal, filter_vertical, + edge_mode_horizontal, edge_mode_vertical, space); +} + + +STBIRDEF int stbir_resize_subpixel(const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_datatype datatype, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, + stbir_filter filter_horizontal, stbir_filter filter_vertical, + stbir_colorspace space, void *alloc_context, + float x_scale, float y_scale, + float x_offset, float y_offset) +{ + float transform[4]; + transform[0] = x_scale; + transform[1] = y_scale; + transform[2] = x_offset; + transform[3] = y_offset; + return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + 0,0,1,1,transform,num_channels,alpha_channel,flags, datatype, filter_horizontal, filter_vertical, + edge_mode_horizontal, edge_mode_vertical, space); +} + +STBIRDEF int stbir_resize_region( const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_datatype datatype, + int num_channels, int alpha_channel, int flags, + stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, + stbir_filter filter_horizontal, stbir_filter filter_vertical, + stbir_colorspace space, void *alloc_context, + float s0, float t0, float s1, float t1) +{ + return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + s0,t0,s1,t1,NULL,num_channels,alpha_channel,flags, datatype, filter_horizontal, filter_vertical, + edge_mode_horizontal, edge_mode_vertical, space); +} + +#endif // STB_IMAGE_RESIZE_IMPLEMENTATION + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/src/refresh/vkpt/include/stb_image_write.h b/src/refresh/vkpt/include/stb_image_write.h new file mode 100644 index 0000000..c05e958 --- /dev/null +++ b/src/refresh/vkpt/include/stb_image_write.h @@ -0,0 +1,1568 @@ +/* stb_image_write - v1.09 - public domain - http://nothings.org/stb/stb_image_write.h + writes out PNG/BMP/TGA/JPEG/HDR images to C stdio - Sean Barrett 2010-2015 + no warranty implied; use at your own risk + + Before #including, + + #define STB_IMAGE_WRITE_IMPLEMENTATION + + in the file that you want to have the implementation. + + Will probably not work correctly with strict-aliasing optimizations. + + If using a modern Microsoft Compiler, non-safe versions of CRT calls may cause + compilation warnings or even errors. To avoid this, also before #including, + + #define STBI_MSC_SECURE_CRT + +ABOUT: + + This header file is a library for writing images to C stdio. It could be + adapted to write to memory or a general streaming interface; let me know. + + The PNG output is not optimal; it is 20-50% larger than the file + written by a decent optimizing implementation; though providing a custom + zlib compress function (see STBIW_ZLIB_COMPRESS) can mitigate that. + This library is designed for source code compactness and simplicity, + not optimal image file size or run-time performance. + +BUILDING: + + You can #define STBIW_ASSERT(x) before the #include to avoid using assert.h. + You can #define STBIW_MALLOC(), STBIW_REALLOC(), and STBIW_FREE() to replace + malloc,realloc,free. + You can #define STBIW_MEMMOVE() to replace memmove() + You can #define STBIW_ZLIB_COMPRESS to use a custom zlib-style compress function + for PNG compression (instead of the builtin one), it must have the following signature: + unsigned char * my_compress(unsigned char *data, int data_len, int *out_len, int quality); + The returned data will be freed with STBIW_FREE() (free() by default), + so it must be heap allocated with STBIW_MALLOC() (malloc() by default), + +USAGE: + + There are five functions, one for each image file format: + + int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); + int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data); + int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data); + int stbi_write_jpg(char const *filename, int w, int h, int comp, const void *data, int quality); + int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data); + + void stbi_flip_vertically_on_write(int flag); // flag is non-zero to flip data vertically + + There are also five equivalent functions that use an arbitrary write function. You are + expected to open/close your file-equivalent before and after calling these: + + int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes); + int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); + int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); + int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); + int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); + + where the callback is: + void stbi_write_func(void *context, void *data, int size); + + You can configure it with these global variables: + int stbi_write_tga_with_rle; // defaults to true; set to 0 to disable RLE + int stbi_write_png_compression_level; // defaults to 8; set to higher for more compression + int stbi_write_force_png_filter; // defaults to -1; set to 0..5 to force a filter mode + + + You can define STBI_WRITE_NO_STDIO to disable the file variant of these + functions, so the library will not use stdio.h at all. However, this will + also disable HDR writing, because it requires stdio for formatted output. + + Each function returns 0 on failure and non-0 on success. + + The functions create an image file defined by the parameters. The image + is a rectangle of pixels stored from left-to-right, top-to-bottom. + Each pixel contains 'comp' channels of data stored interleaved with 8-bits + per channel, in the following order: 1=Y, 2=YA, 3=RGB, 4=RGBA. (Y is + monochrome color.) The rectangle is 'w' pixels wide and 'h' pixels tall. + The *data pointer points to the first byte of the top-left-most pixel. + For PNG, "stride_in_bytes" is the distance in bytes from the first byte of + a row of pixels to the first byte of the next row of pixels. + + PNG creates output files with the same number of components as the input. + The BMP format expands Y to RGB in the file format and does not + output alpha. + + PNG supports writing rectangles of data even when the bytes storing rows of + data are not consecutive in memory (e.g. sub-rectangles of a larger image), + by supplying the stride between the beginning of adjacent rows. The other + formats do not. (Thus you cannot write a native-format BMP through the BMP + writer, both because it is in BGR order and because it may have padding + at the end of the line.) + + PNG allows you to set the deflate compression level by setting the global + variable 'stbi_write_png_compression_level' (it defaults to 8). + + HDR expects linear float data. Since the format is always 32-bit rgb(e) + data, alpha (if provided) is discarded, and for monochrome data it is + replicated across all three channels. + + TGA supports RLE or non-RLE compressed data. To use non-RLE-compressed + data, set the global variable 'stbi_write_tga_with_rle' to 0. + + JPEG does ignore alpha channels in input data; quality is between 1 and 100. + Higher quality looks better but results in a bigger image. + JPEG baseline (no JPEG progressive). + +CREDITS: + + + Sean Barrett - PNG/BMP/TGA + Baldur Karlsson - HDR + Jean-Sebastien Guay - TGA monochrome + Tim Kelsey - misc enhancements + Alan Hickman - TGA RLE + Emmanuel Julien - initial file IO callback implementation + Jon Olick - original jo_jpeg.cpp code + Daniel Gibson - integrate JPEG, allow external zlib + Aarni Koskela - allow choosing PNG filter + + bugfixes: + github:Chribba + Guillaume Chereau + github:jry2 + github:romigrou + Sergio Gonzalez + Jonas Karlsson + Filip Wasil + Thatcher Ulrich + github:poppolopoppo + Patrick Boettcher + github:xeekworx + Cap Petschulat + Simon Rodriguez + Ivan Tikhonov + github:ignotion + Adam Schackart + +LICENSE + + See end of file for license information. + +*/ + +#ifndef INCLUDE_STB_IMAGE_WRITE_H +#define INCLUDE_STB_IMAGE_WRITE_H + +// if STB_IMAGE_WRITE_STATIC causes problems, try defining STBIWDEF to 'inline' or 'static inline' +#ifndef STBIWDEF +#ifdef STB_IMAGE_WRITE_STATIC +#define STBIWDEF static +#else +#ifdef __cplusplus +#define STBIWDEF extern "C" +#else +#define STBIWDEF extern +#endif +#endif +#endif + +#ifndef STB_IMAGE_WRITE_STATIC // C++ forbids static forward declarations +extern int stbi_write_tga_with_rle; +extern int stbi_write_png_compression_level; +extern int stbi_write_force_png_filter; +#endif + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); +STBIWDEF int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data); +STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality); +#endif + +typedef void stbi_write_func(void *context, void *data, int size); + +STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes); +STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); +STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); + +STBIWDEF void stbi_flip_vertically_on_write(int flip_boolean); + +#endif//INCLUDE_STB_IMAGE_WRITE_H + +#ifdef STB_IMAGE_WRITE_IMPLEMENTATION + +#ifdef _WIN32 + #ifndef _CRT_SECURE_NO_WARNINGS + #define _CRT_SECURE_NO_WARNINGS + #endif + #ifndef _CRT_NONSTDC_NO_DEPRECATE + #define _CRT_NONSTDC_NO_DEPRECATE + #endif +#endif + +#ifndef STBI_WRITE_NO_STDIO +#include +#endif // STBI_WRITE_NO_STDIO + +#include +#include +#include +#include + +#if defined(STBIW_MALLOC) && defined(STBIW_FREE) && (defined(STBIW_REALLOC) || defined(STBIW_REALLOC_SIZED)) +// ok +#elif !defined(STBIW_MALLOC) && !defined(STBIW_FREE) && !defined(STBIW_REALLOC) && !defined(STBIW_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBIW_MALLOC, STBIW_FREE, and STBIW_REALLOC (or STBIW_REALLOC_SIZED)." +#endif + +#ifndef STBIW_MALLOC +#define STBIW_MALLOC(sz) malloc(sz) +#define STBIW_REALLOC(p,newsz) realloc(p,newsz) +#define STBIW_FREE(p) free(p) +#endif + +#ifndef STBIW_REALLOC_SIZED +#define STBIW_REALLOC_SIZED(p,oldsz,newsz) STBIW_REALLOC(p,newsz) +#endif + + +#ifndef STBIW_MEMMOVE +#define STBIW_MEMMOVE(a,b,sz) memmove(a,b,sz) +#endif + + +#ifndef STBIW_ASSERT +#include +#define STBIW_ASSERT(x) assert(x) +#endif + +#define STBIW_UCHAR(x) (unsigned char) ((x) & 0xff) + +#ifdef STB_IMAGE_WRITE_STATIC +static int stbi__flip_vertically_on_write=0; +static int stbi_write_png_compression_level = 8; +static int stbi_write_tga_with_rle = 1; +static int stbi_write_force_png_filter = -1; +#else +int stbi_write_png_compression_level = 8; +int stbi__flip_vertically_on_write=0; +int stbi_write_tga_with_rle = 1; +int stbi_write_force_png_filter = -1; +#endif + +STBIWDEF void stbi_flip_vertically_on_write(int flag) +{ + stbi__flip_vertically_on_write = flag; +} + +typedef struct +{ + stbi_write_func *func; + void *context; +} stbi__write_context; + +// initialize a callback-based context +static void stbi__start_write_callbacks(stbi__write_context *s, stbi_write_func *c, void *context) +{ + s->func = c; + s->context = context; +} + +#ifndef STBI_WRITE_NO_STDIO + +static void stbi__stdio_write(void *context, void *data, int size) +{ + fwrite(data,1,size,(FILE*) context); +} + +static int stbi__start_write_file(stbi__write_context *s, const char *filename) +{ + FILE *f; +#ifdef STBI_MSC_SECURE_CRT + if (fopen_s(&f, filename, "wb")) + f = NULL; +#else + f = fopen(filename, "wb"); +#endif + stbi__start_write_callbacks(s, stbi__stdio_write, (void *) f); + return f != NULL; +} + +static void stbi__end_write_file(stbi__write_context *s) +{ + fclose((FILE *)s->context); +} + +#endif // !STBI_WRITE_NO_STDIO + +typedef unsigned int stbiw_uint32; +typedef int stb_image_write_test[sizeof(stbiw_uint32)==4 ? 1 : -1]; + +static void stbiw__writefv(stbi__write_context *s, const char *fmt, va_list v) +{ + while (*fmt) { + switch (*fmt++) { + case ' ': break; + case '1': { unsigned char x = STBIW_UCHAR(va_arg(v, int)); + s->func(s->context,&x,1); + break; } + case '2': { int x = va_arg(v,int); + unsigned char b[2]; + b[0] = STBIW_UCHAR(x); + b[1] = STBIW_UCHAR(x>>8); + s->func(s->context,b,2); + break; } + case '4': { stbiw_uint32 x = va_arg(v,int); + unsigned char b[4]; + b[0]=STBIW_UCHAR(x); + b[1]=STBIW_UCHAR(x>>8); + b[2]=STBIW_UCHAR(x>>16); + b[3]=STBIW_UCHAR(x>>24); + s->func(s->context,b,4); + break; } + default: + STBIW_ASSERT(0); + return; + } + } +} + +static void stbiw__writef(stbi__write_context *s, const char *fmt, ...) +{ + va_list v; + va_start(v, fmt); + stbiw__writefv(s, fmt, v); + va_end(v); +} + +static void stbiw__putc(stbi__write_context *s, unsigned char c) +{ + s->func(s->context, &c, 1); +} + +static void stbiw__write3(stbi__write_context *s, unsigned char a, unsigned char b, unsigned char c) +{ + unsigned char arr[3]; + arr[0] = a, arr[1] = b, arr[2] = c; + s->func(s->context, arr, 3); +} + +static void stbiw__write_pixel(stbi__write_context *s, int rgb_dir, int comp, int write_alpha, int expand_mono, unsigned char *d) +{ + unsigned char bg[3] = { 255, 0, 255}, px[3]; + int k; + + if (write_alpha < 0) + s->func(s->context, &d[comp - 1], 1); + + switch (comp) { + case 2: // 2 pixels = mono + alpha, alpha is written separately, so same as 1-channel case + case 1: + if (expand_mono) + stbiw__write3(s, d[0], d[0], d[0]); // monochrome bmp + else + s->func(s->context, d, 1); // monochrome TGA + break; + case 4: + if (!write_alpha) { + // composite against pink background + for (k = 0; k < 3; ++k) + px[k] = bg[k] + ((d[k] - bg[k]) * d[3]) / 255; + stbiw__write3(s, px[1 - rgb_dir], px[1], px[1 + rgb_dir]); + break; + } + /* FALLTHROUGH */ + case 3: + stbiw__write3(s, d[1 - rgb_dir], d[1], d[1 + rgb_dir]); + break; + } + if (write_alpha > 0) + s->func(s->context, &d[comp - 1], 1); +} + +static void stbiw__write_pixels(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, void *data, int write_alpha, int scanline_pad, int expand_mono) +{ + stbiw_uint32 zero = 0; + int i,j, j_end; + + if (y <= 0) + return; + + if (stbi__flip_vertically_on_write) + vdir *= -1; + + if (vdir < 0) + j_end = -1, j = y-1; + else + j_end = y, j = 0; + + for (; j != j_end; j += vdir) { + for (i=0; i < x; ++i) { + unsigned char *d = (unsigned char *) data + (j*x+i)*comp; + stbiw__write_pixel(s, rgb_dir, comp, write_alpha, expand_mono, d); + } + s->func(s->context, &zero, scanline_pad); + } +} + +static int stbiw__outfile(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, int expand_mono, void *data, int alpha, int pad, const char *fmt, ...) +{ + if (y < 0 || x < 0) { + return 0; + } else { + va_list v; + va_start(v, fmt); + stbiw__writefv(s, fmt, v); + va_end(v); + stbiw__write_pixels(s,rgb_dir,vdir,x,y,comp,data,alpha,pad, expand_mono); + return 1; + } +} + +static int stbi_write_bmp_core(stbi__write_context *s, int x, int y, int comp, const void *data) +{ + int pad = (-x*3) & 3; + return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *) data,0,pad, + "11 4 22 4" "4 44 22 444444", + 'B', 'M', 14+40+(x*3+pad)*y, 0,0, 14+40, // file header + 40, x,y, 1,24, 0,0,0,0,0,0); // bitmap header +} + +STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) +{ + stbi__write_context s; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_bmp_core(&s, x, y, comp, data); +} + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_bmp(char const *filename, int x, int y, int comp, const void *data) +{ + stbi__write_context s; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_bmp_core(&s, x, y, comp, data); + stbi__end_write_file(&s); + return r; + } else + return 0; +} +#endif //!STBI_WRITE_NO_STDIO + +static int stbi_write_tga_core(stbi__write_context *s, int x, int y, int comp, void *data) +{ + int has_alpha = (comp == 2 || comp == 4); + int colorbytes = has_alpha ? comp-1 : comp; + int format = colorbytes < 2 ? 3 : 2; // 3 color channels (RGB/RGBA) = 2, 1 color channel (Y/YA) = 3 + + if (y < 0 || x < 0) + return 0; + + if (!stbi_write_tga_with_rle) { + return stbiw__outfile(s, -1, -1, x, y, comp, 0, (void *) data, has_alpha, 0, + "111 221 2222 11", 0, 0, format, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8); + } else { + int i,j,k; + int jend, jdir; + + stbiw__writef(s, "111 221 2222 11", 0,0,format+8, 0,0,0, 0,0,x,y, (colorbytes + has_alpha) * 8, has_alpha * 8); + + if (stbi__flip_vertically_on_write) { + j = 0; + jend = y; + jdir = 1; + } else { + j = y-1; + jend = -1; + jdir = -1; + } + for (; j != jend; j += jdir) { + unsigned char *row = (unsigned char *) data + j * x * comp; + int len; + + for (i = 0; i < x; i += len) { + unsigned char *begin = row + i * comp; + int diff = 1; + len = 1; + + if (i < x - 1) { + ++len; + diff = memcmp(begin, row + (i + 1) * comp, comp); + if (diff) { + const unsigned char *prev = begin; + for (k = i + 2; k < x && len < 128; ++k) { + if (memcmp(prev, row + k * comp, comp)) { + prev += comp; + ++len; + } else { + --len; + break; + } + } + } else { + for (k = i + 2; k < x && len < 128; ++k) { + if (!memcmp(begin, row + k * comp, comp)) { + ++len; + } else { + break; + } + } + } + } + + if (diff) { + unsigned char header = STBIW_UCHAR(len - 1); + s->func(s->context, &header, 1); + for (k = 0; k < len; ++k) { + stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin + k * comp); + } + } else { + unsigned char header = STBIW_UCHAR(len - 129); + s->func(s->context, &header, 1); + stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin); + } + } + } + } + return 1; +} + +STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) +{ + stbi__write_context s; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_tga_core(&s, x, y, comp, (void *) data); +} + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_tga(char const *filename, int x, int y, int comp, const void *data) +{ + stbi__write_context s; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_tga_core(&s, x, y, comp, (void *) data); + stbi__end_write_file(&s); + return r; + } else + return 0; +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR writer +// by Baldur Karlsson + +#define stbiw__max(a, b) ((a) > (b) ? (a) : (b)) + +void stbiw__linear_to_rgbe(unsigned char *rgbe, float *linear) +{ + int exponent; + float maxcomp = stbiw__max(linear[0], stbiw__max(linear[1], linear[2])); + + if (maxcomp < 1e-32f) { + rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0; + } else { + float normalize = (float) frexp(maxcomp, &exponent) * 256.0f/maxcomp; + + rgbe[0] = (unsigned char)(linear[0] * normalize); + rgbe[1] = (unsigned char)(linear[1] * normalize); + rgbe[2] = (unsigned char)(linear[2] * normalize); + rgbe[3] = (unsigned char)(exponent + 128); + } +} + +void stbiw__write_run_data(stbi__write_context *s, int length, unsigned char databyte) +{ + unsigned char lengthbyte = STBIW_UCHAR(length+128); + STBIW_ASSERT(length+128 <= 255); + s->func(s->context, &lengthbyte, 1); + s->func(s->context, &databyte, 1); +} + +void stbiw__write_dump_data(stbi__write_context *s, int length, unsigned char *data) +{ + unsigned char lengthbyte = STBIW_UCHAR(length); + STBIW_ASSERT(length <= 128); // inconsistent with spec but consistent with official code + s->func(s->context, &lengthbyte, 1); + s->func(s->context, data, length); +} + +void stbiw__write_hdr_scanline(stbi__write_context *s, int width, int ncomp, unsigned char *scratch, float *scanline) +{ + unsigned char scanlineheader[4] = { 2, 2, 0, 0 }; + unsigned char rgbe[4]; + float linear[3]; + int x; + + scanlineheader[2] = (width&0xff00)>>8; + scanlineheader[3] = (width&0x00ff); + + /* skip RLE for images too small or large */ + if (width < 8 || width >= 32768) { + for (x=0; x < width; x++) { + switch (ncomp) { + case 4: /* fallthrough */ + case 3: linear[2] = scanline[x*ncomp + 2]; + linear[1] = scanline[x*ncomp + 1]; + linear[0] = scanline[x*ncomp + 0]; + break; + default: + linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; + break; + } + stbiw__linear_to_rgbe(rgbe, linear); + s->func(s->context, rgbe, 4); + } + } else { + int c,r; + /* encode into scratch buffer */ + for (x=0; x < width; x++) { + switch(ncomp) { + case 4: /* fallthrough */ + case 3: linear[2] = scanline[x*ncomp + 2]; + linear[1] = scanline[x*ncomp + 1]; + linear[0] = scanline[x*ncomp + 0]; + break; + default: + linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; + break; + } + stbiw__linear_to_rgbe(rgbe, linear); + scratch[x + width*0] = rgbe[0]; + scratch[x + width*1] = rgbe[1]; + scratch[x + width*2] = rgbe[2]; + scratch[x + width*3] = rgbe[3]; + } + + s->func(s->context, scanlineheader, 4); + + /* RLE each component separately */ + for (c=0; c < 4; c++) { + unsigned char *comp = &scratch[width*c]; + + x = 0; + while (x < width) { + // find first run + r = x; + while (r+2 < width) { + if (comp[r] == comp[r+1] && comp[r] == comp[r+2]) + break; + ++r; + } + if (r+2 >= width) + r = width; + // dump up to first run + while (x < r) { + int len = r-x; + if (len > 128) len = 128; + stbiw__write_dump_data(s, len, &comp[x]); + x += len; + } + // if there's a run, output it + if (r+2 < width) { // same test as what we break out of in search loop, so only true if we break'd + // find next byte after run + while (r < width && comp[r] == comp[x]) + ++r; + // output run up to r + while (x < r) { + int len = r-x; + if (len > 127) len = 127; + stbiw__write_run_data(s, len, comp[x]); + x += len; + } + } + } + } + } +} + +static int stbi_write_hdr_core(stbi__write_context *s, int x, int y, int comp, float *data) +{ + if (y <= 0 || x <= 0 || data == NULL) + return 0; + else { + // Each component is stored separately. Allocate scratch space for full output scanline. + unsigned char *scratch = (unsigned char *) STBIW_MALLOC(x*4); + int i, len; + char buffer[128]; + char header[] = "#?RADIANCE\n# Written by stb_image_write.h\nFORMAT=32-bit_rle_rgbe\n"; + s->func(s->context, header, sizeof(header)-1); + +#ifdef STBI_MSC_SECURE_CRT + len = sprintf_s(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); +#else + len = sprintf(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); +#endif + s->func(s->context, buffer, len); + + for(i=0; i < y; i++) + stbiw__write_hdr_scanline(s, x, comp, scratch, data + comp*x*(stbi__flip_vertically_on_write ? y-1-i : i)*x); + STBIW_FREE(scratch); + return 1; + } +} + +STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const float *data) +{ + stbi__write_context s; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_hdr_core(&s, x, y, comp, (float *) data); +} + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_hdr(char const *filename, int x, int y, int comp, const float *data) +{ + stbi__write_context s; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_hdr_core(&s, x, y, comp, (float *) data); + stbi__end_write_file(&s); + return r; + } else + return 0; +} +#endif // STBI_WRITE_NO_STDIO + + +////////////////////////////////////////////////////////////////////////////// +// +// PNG writer +// + +#ifndef STBIW_ZLIB_COMPRESS +// stretchy buffer; stbiw__sbpush() == vector<>::push_back() -- stbiw__sbcount() == vector<>::size() +#define stbiw__sbraw(a) ((int *) (a) - 2) +#define stbiw__sbm(a) stbiw__sbraw(a)[0] +#define stbiw__sbn(a) stbiw__sbraw(a)[1] + +#define stbiw__sbneedgrow(a,n) ((a)==0 || stbiw__sbn(a)+n >= stbiw__sbm(a)) +#define stbiw__sbmaybegrow(a,n) (stbiw__sbneedgrow(a,(n)) ? stbiw__sbgrow(a,n) : 0) +#define stbiw__sbgrow(a,n) stbiw__sbgrowf((void **) &(a), (n), sizeof(*(a))) + +#define stbiw__sbpush(a, v) (stbiw__sbmaybegrow(a,1), (a)[stbiw__sbn(a)++] = (v)) +#define stbiw__sbcount(a) ((a) ? stbiw__sbn(a) : 0) +#define stbiw__sbfree(a) ((a) ? STBIW_FREE(stbiw__sbraw(a)),0 : 0) + +static void *stbiw__sbgrowf(void **arr, int increment, int itemsize) +{ + int m = *arr ? 2*stbiw__sbm(*arr)+increment : increment+1; + void *p = STBIW_REALLOC_SIZED(*arr ? stbiw__sbraw(*arr) : 0, *arr ? (stbiw__sbm(*arr)*itemsize + sizeof(int)*2) : 0, itemsize * m + sizeof(int)*2); + STBIW_ASSERT(p); + if (p) { + if (!*arr) ((int *) p)[1] = 0; + *arr = (void *) ((int *) p + 2); + stbiw__sbm(*arr) = m; + } + return *arr; +} + +static unsigned char *stbiw__zlib_flushf(unsigned char *data, unsigned int *bitbuffer, int *bitcount) +{ + while (*bitcount >= 8) { + stbiw__sbpush(data, STBIW_UCHAR(*bitbuffer)); + *bitbuffer >>= 8; + *bitcount -= 8; + } + return data; +} + +static int stbiw__zlib_bitrev(int code, int codebits) +{ + int res=0; + while (codebits--) { + res = (res << 1) | (code & 1); + code >>= 1; + } + return res; +} + +static unsigned int stbiw__zlib_countm(unsigned char *a, unsigned char *b, int limit) +{ + int i; + for (i=0; i < limit && i < 258; ++i) + if (a[i] != b[i]) break; + return i; +} + +static unsigned int stbiw__zhash(unsigned char *data) +{ + stbiw_uint32 hash = data[0] + (data[1] << 8) + (data[2] << 16); + hash ^= hash << 3; + hash += hash >> 5; + hash ^= hash << 4; + hash += hash >> 17; + hash ^= hash << 25; + hash += hash >> 6; + return hash; +} + +#define stbiw__zlib_flush() (out = stbiw__zlib_flushf(out, &bitbuf, &bitcount)) +#define stbiw__zlib_add(code,codebits) \ + (bitbuf |= (code) << bitcount, bitcount += (codebits), stbiw__zlib_flush()) +#define stbiw__zlib_huffa(b,c) stbiw__zlib_add(stbiw__zlib_bitrev(b,c),c) +// default huffman tables +#define stbiw__zlib_huff1(n) stbiw__zlib_huffa(0x30 + (n), 8) +#define stbiw__zlib_huff2(n) stbiw__zlib_huffa(0x190 + (n)-144, 9) +#define stbiw__zlib_huff3(n) stbiw__zlib_huffa(0 + (n)-256,7) +#define stbiw__zlib_huff4(n) stbiw__zlib_huffa(0xc0 + (n)-280,8) +#define stbiw__zlib_huff(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : (n) <= 255 ? stbiw__zlib_huff2(n) : (n) <= 279 ? stbiw__zlib_huff3(n) : stbiw__zlib_huff4(n)) +#define stbiw__zlib_huffb(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : stbiw__zlib_huff2(n)) + +#define stbiw__ZHASH 16384 + +#endif // STBIW_ZLIB_COMPRESS + +unsigned char * stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality) +{ +#ifdef STBIW_ZLIB_COMPRESS + // user provided a zlib compress implementation, use that + return STBIW_ZLIB_COMPRESS(data, data_len, out_len, quality); +#else // use builtin + static unsigned short lengthc[] = { 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258, 259 }; + static unsigned char lengtheb[]= { 0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 }; + static unsigned short distc[] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 32768 }; + static unsigned char disteb[] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 }; + unsigned int bitbuf=0; + int i,j, bitcount=0; + unsigned char *out = NULL; + unsigned char ***hash_table = (unsigned char***) STBIW_MALLOC(stbiw__ZHASH * sizeof(char**)); + if (hash_table == NULL) + return NULL; + if (quality < 5) quality = 5; + + stbiw__sbpush(out, 0x78); // DEFLATE 32K window + stbiw__sbpush(out, 0x5e); // FLEVEL = 1 + stbiw__zlib_add(1,1); // BFINAL = 1 + stbiw__zlib_add(1,2); // BTYPE = 1 -- fixed huffman + + for (i=0; i < stbiw__ZHASH; ++i) + hash_table[i] = NULL; + + i=0; + while (i < data_len-3) { + // hash next 3 bytes of data to be compressed + int h = stbiw__zhash(data+i)&(stbiw__ZHASH-1), best=3; + unsigned char *bestloc = 0; + unsigned char **hlist = hash_table[h]; + int n = stbiw__sbcount(hlist); + for (j=0; j < n; ++j) { + if (hlist[j]-data > i-32768) { // if entry lies within window + int d = stbiw__zlib_countm(hlist[j], data+i, data_len-i); + if (d >= best) best=d,bestloc=hlist[j]; + } + } + // when hash table entry is too long, delete half the entries + if (hash_table[h] && stbiw__sbn(hash_table[h]) == 2*quality) { + STBIW_MEMMOVE(hash_table[h], hash_table[h]+quality, sizeof(hash_table[h][0])*quality); + stbiw__sbn(hash_table[h]) = quality; + } + stbiw__sbpush(hash_table[h],data+i); + + if (bestloc) { + // "lazy matching" - check match at *next* byte, and if it's better, do cur byte as literal + h = stbiw__zhash(data+i+1)&(stbiw__ZHASH-1); + hlist = hash_table[h]; + n = stbiw__sbcount(hlist); + for (j=0; j < n; ++j) { + if (hlist[j]-data > i-32767) { + int e = stbiw__zlib_countm(hlist[j], data+i+1, data_len-i-1); + if (e > best) { // if next match is better, bail on current match + bestloc = NULL; + break; + } + } + } + } + + if (bestloc) { + int d = (int) (data+i - bestloc); // distance back + STBIW_ASSERT(d <= 32767 && best <= 258); + for (j=0; best > lengthc[j+1]-1; ++j); + stbiw__zlib_huff(j+257); + if (lengtheb[j]) stbiw__zlib_add(best - lengthc[j], lengtheb[j]); + for (j=0; d > distc[j+1]-1; ++j); + stbiw__zlib_add(stbiw__zlib_bitrev(j,5),5); + if (disteb[j]) stbiw__zlib_add(d - distc[j], disteb[j]); + i += best; + } else { + stbiw__zlib_huffb(data[i]); + ++i; + } + } + // write out final bytes + for (;i < data_len; ++i) + stbiw__zlib_huffb(data[i]); + stbiw__zlib_huff(256); // end of block + // pad with 0 bits to byte boundary + while (bitcount) + stbiw__zlib_add(0,1); + + for (i=0; i < stbiw__ZHASH; ++i) + (void) stbiw__sbfree(hash_table[i]); + STBIW_FREE(hash_table); + + { + // compute adler32 on input + unsigned int s1=1, s2=0; + int blocklen = (int) (data_len % 5552); + j=0; + while (j < data_len) { + for (i=0; i < blocklen; ++i) s1 += data[j+i], s2 += s1; + s1 %= 65521, s2 %= 65521; + j += blocklen; + blocklen = 5552; + } + stbiw__sbpush(out, STBIW_UCHAR(s2 >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(s2)); + stbiw__sbpush(out, STBIW_UCHAR(s1 >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(s1)); + } + *out_len = stbiw__sbn(out); + // make returned pointer freeable + STBIW_MEMMOVE(stbiw__sbraw(out), out, *out_len); + return (unsigned char *) stbiw__sbraw(out); +#endif // STBIW_ZLIB_COMPRESS +} + +static unsigned int stbiw__crc32(unsigned char *buffer, int len) +{ + static unsigned int crc_table[256] = + { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0eDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D + }; + + unsigned int crc = ~0u; + int i; + for (i=0; i < len; ++i) + crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)]; + return ~crc; +} + +#define stbiw__wpng4(o,a,b,c,d) ((o)[0]=STBIW_UCHAR(a),(o)[1]=STBIW_UCHAR(b),(o)[2]=STBIW_UCHAR(c),(o)[3]=STBIW_UCHAR(d),(o)+=4) +#define stbiw__wp32(data,v) stbiw__wpng4(data, (v)>>24,(v)>>16,(v)>>8,(v)); +#define stbiw__wptag(data,s) stbiw__wpng4(data, s[0],s[1],s[2],s[3]) + +static void stbiw__wpcrc(unsigned char **data, int len) +{ + unsigned int crc = stbiw__crc32(*data - len - 4, len+4); + stbiw__wp32(*data, crc); +} + +static unsigned char stbiw__paeth(int a, int b, int c) +{ + int p = a + b - c, pa = abs(p-a), pb = abs(p-b), pc = abs(p-c); + if (pa <= pb && pa <= pc) return STBIW_UCHAR(a); + if (pb <= pc) return STBIW_UCHAR(b); + return STBIW_UCHAR(c); +} + +// @OPTIMIZE: provide an option that always forces left-predict or paeth predict +static void stbiw__encode_png_line(unsigned char *pixels, int stride_bytes, int width, int height, int y, int n, int filter_type, signed char *line_buffer) +{ + static int mapping[] = { 0,1,2,3,4 }; + static int firstmap[] = { 0,1,0,5,6 }; + int *mymap = (y != 0) ? mapping : firstmap; + int i; + int type = mymap[filter_type]; + unsigned char *z = pixels + stride_bytes * (stbi__flip_vertically_on_write ? height-1-y : y); + int signed_stride = stbi__flip_vertically_on_write ? -stride_bytes : stride_bytes; + for (i = 0; i < n; ++i) { + switch (type) { + case 0: line_buffer[i] = z[i]; break; + case 1: line_buffer[i] = z[i]; break; + case 2: line_buffer[i] = z[i] - z[i-signed_stride]; break; + case 3: line_buffer[i] = z[i] - (z[i-signed_stride]>>1); break; + case 4: line_buffer[i] = (signed char) (z[i] - stbiw__paeth(0,z[i-signed_stride],0)); break; + case 5: line_buffer[i] = z[i]; break; + case 6: line_buffer[i] = z[i]; break; + } + } + for (i=n; i < width*n; ++i) { + switch (type) { + case 0: line_buffer[i] = z[i]; break; + case 1: line_buffer[i] = z[i] - z[i-n]; break; + case 2: line_buffer[i] = z[i] - z[i-signed_stride]; break; + case 3: line_buffer[i] = z[i] - ((z[i-n] + z[i-signed_stride])>>1); break; + case 4: line_buffer[i] = z[i] - stbiw__paeth(z[i-n], z[i-signed_stride], z[i-signed_stride-n]); break; + case 5: line_buffer[i] = z[i] - (z[i-n]>>1); break; + case 6: line_buffer[i] = z[i] - stbiw__paeth(z[i-n], 0,0); break; + } + } +} + +unsigned char *stbi_write_png_to_mem(unsigned char *pixels, int stride_bytes, int x, int y, int n, int *out_len) +{ + int force_filter = stbi_write_force_png_filter; + int ctype[5] = { -1, 0, 4, 2, 6 }; + unsigned char sig[8] = { 137,80,78,71,13,10,26,10 }; + unsigned char *out,*o, *filt, *zlib; + signed char *line_buffer; + int j,zlen; + + if (stride_bytes == 0) + stride_bytes = x * n; + + if (force_filter >= 5) { + force_filter = -1; + } + + filt = (unsigned char *) STBIW_MALLOC((x*n+1) * y); if (!filt) return 0; + line_buffer = (signed char *) STBIW_MALLOC(x * n); if (!line_buffer) { STBIW_FREE(filt); return 0; } + for (j=0; j < y; ++j) { + int filter_type; + if (force_filter > -1) { + filter_type = force_filter; + stbiw__encode_png_line(pixels, stride_bytes, x, y, j, n, force_filter, line_buffer); + } else { // Estimate the best filter by running through all of them: + int best_filter = 0, best_filter_val = 0x7fffffff, est, i; + for (filter_type = 0; filter_type < 5; filter_type++) { + stbiw__encode_png_line(pixels, stride_bytes, x, y, j, n, filter_type, line_buffer); + + // Estimate the entropy of the line using this filter; the less, the better. + est = 0; + for (i = 0; i < x*n; ++i) { + est += abs((signed char) line_buffer[i]); + } + if (est < best_filter_val) { + best_filter_val = est; + best_filter = filter_type; + } + } + if (filter_type != best_filter) { // If the last iteration already got us the best filter, don't redo it + stbiw__encode_png_line(pixels, stride_bytes, x, y, j, n, best_filter, line_buffer); + filter_type = best_filter; + } + } + // when we get here, filter_type contains the filter type, and line_buffer contains the data + filt[j*(x*n+1)] = (unsigned char) filter_type; + STBIW_MEMMOVE(filt+j*(x*n+1)+1, line_buffer, x*n); + } + STBIW_FREE(line_buffer); + zlib = stbi_zlib_compress(filt, y*( x*n+1), &zlen, stbi_write_png_compression_level); + STBIW_FREE(filt); + if (!zlib) return 0; + + // each tag requires 12 bytes of overhead + out = (unsigned char *) STBIW_MALLOC(8 + 12+13 + 12+zlen + 12); + if (!out) return 0; + *out_len = 8 + 12+13 + 12+zlen + 12; + + o=out; + STBIW_MEMMOVE(o,sig,8); o+= 8; + stbiw__wp32(o, 13); // header length + stbiw__wptag(o, "IHDR"); + stbiw__wp32(o, x); + stbiw__wp32(o, y); + *o++ = 8; + *o++ = STBIW_UCHAR(ctype[n]); + *o++ = 0; + *o++ = 0; + *o++ = 0; + stbiw__wpcrc(&o,13); + + stbiw__wp32(o, zlen); + stbiw__wptag(o, "IDAT"); + STBIW_MEMMOVE(o, zlib, zlen); + o += zlen; + STBIW_FREE(zlib); + stbiw__wpcrc(&o, zlen); + + stbiw__wp32(o,0); + stbiw__wptag(o, "IEND"); + stbiw__wpcrc(&o,0); + + STBIW_ASSERT(o == out + *out_len); + + return out; +} + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_png(char const *filename, int x, int y, int comp, const void *data, int stride_bytes) +{ + FILE *f; + int len; + unsigned char *png = stbi_write_png_to_mem((unsigned char *) data, stride_bytes, x, y, comp, &len); + if (png == NULL) return 0; +#ifdef STBI_MSC_SECURE_CRT + if (fopen_s(&f, filename, "wb")) + f = NULL; +#else + f = fopen(filename, "wb"); +#endif + if (!f) { STBIW_FREE(png); return 0; } + fwrite(png, 1, len, f); + fclose(f); + STBIW_FREE(png); + return 1; +} +#endif + +STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int stride_bytes) +{ + int len; + unsigned char *png = stbi_write_png_to_mem((unsigned char *) data, stride_bytes, x, y, comp, &len); + if (png == NULL) return 0; + func(context, png, len); + STBIW_FREE(png); + return 1; +} + + +/* *************************************************************************** + * + * JPEG writer + * + * This is based on Jon Olick's jo_jpeg.cpp: + * public domain Simple, Minimalistic JPEG writer - http://www.jonolick.com/code.html + */ + +static const unsigned char stbiw__jpg_ZigZag[] = { 0,1,5,6,14,15,27,28,2,4,7,13,16,26,29,42,3,8,12,17,25,30,41,43,9,11,18, + 24,31,40,44,53,10,19,23,32,39,45,52,54,20,22,33,38,46,51,55,60,21,34,37,47,50,56,59,61,35,36,48,49,57,58,62,63 }; + +static void stbiw__jpg_writeBits(stbi__write_context *s, int *bitBufP, int *bitCntP, const unsigned short *bs) { + int bitBuf = *bitBufP, bitCnt = *bitCntP; + bitCnt += bs[1]; + bitBuf |= bs[0] << (24 - bitCnt); + while(bitCnt >= 8) { + unsigned char c = (bitBuf >> 16) & 255; + stbiw__putc(s, c); + if(c == 255) { + stbiw__putc(s, 0); + } + bitBuf <<= 8; + bitCnt -= 8; + } + *bitBufP = bitBuf; + *bitCntP = bitCnt; +} + +static void stbiw__jpg_DCT(float *d0p, float *d1p, float *d2p, float *d3p, float *d4p, float *d5p, float *d6p, float *d7p) { + float d0 = *d0p, d1 = *d1p, d2 = *d2p, d3 = *d3p, d4 = *d4p, d5 = *d5p, d6 = *d6p, d7 = *d7p; + float z1, z2, z3, z4, z5, z11, z13; + + float tmp0 = d0 + d7; + float tmp7 = d0 - d7; + float tmp1 = d1 + d6; + float tmp6 = d1 - d6; + float tmp2 = d2 + d5; + float tmp5 = d2 - d5; + float tmp3 = d3 + d4; + float tmp4 = d3 - d4; + + // Even part + float tmp10 = tmp0 + tmp3; // phase 2 + float tmp13 = tmp0 - tmp3; + float tmp11 = tmp1 + tmp2; + float tmp12 = tmp1 - tmp2; + + d0 = tmp10 + tmp11; // phase 3 + d4 = tmp10 - tmp11; + + z1 = (tmp12 + tmp13) * 0.707106781f; // c4 + d2 = tmp13 + z1; // phase 5 + d6 = tmp13 - z1; + + // Odd part + tmp10 = tmp4 + tmp5; // phase 2 + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + // The rotator is modified from fig 4-8 to avoid extra negations. + z5 = (tmp10 - tmp12) * 0.382683433f; // c6 + z2 = tmp10 * 0.541196100f + z5; // c2-c6 + z4 = tmp12 * 1.306562965f + z5; // c2+c6 + z3 = tmp11 * 0.707106781f; // c4 + + z11 = tmp7 + z3; // phase 5 + z13 = tmp7 - z3; + + *d5p = z13 + z2; // phase 6 + *d3p = z13 - z2; + *d1p = z11 + z4; + *d7p = z11 - z4; + + *d0p = d0; *d2p = d2; *d4p = d4; *d6p = d6; +} + +static void stbiw__jpg_calcBits(int val, unsigned short bits[2]) { + int tmp1 = val < 0 ? -val : val; + val = val < 0 ? val-1 : val; + bits[1] = 1; + while(tmp1 >>= 1) { + ++bits[1]; + } + bits[0] = val & ((1<0)&&(DU[end0pos]==0); --end0pos) { + } + // end0pos = first element in reverse order !=0 + if(end0pos == 0) { + stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); + return DU[0]; + } + for(i = 1; i <= end0pos; ++i) { + int startpos = i; + int nrzeroes; + unsigned short bits[2]; + for (; DU[i]==0 && i<=end0pos; ++i) { + } + nrzeroes = i-startpos; + if ( nrzeroes >= 16 ) { + int lng = nrzeroes>>4; + int nrmarker; + for (nrmarker=1; nrmarker <= lng; ++nrmarker) + stbiw__jpg_writeBits(s, bitBuf, bitCnt, M16zeroes); + nrzeroes &= 15; + } + stbiw__jpg_calcBits(DU[i], bits); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTAC[(nrzeroes<<4)+bits[1]]); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits); + } + if(end0pos != 63) { + stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); + } + return DU[0]; +} + +static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, int comp, const void* data, int quality) { + // Constants that don't pollute global namespace + static const unsigned char std_dc_luminance_nrcodes[] = {0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0}; + static const unsigned char std_dc_luminance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; + static const unsigned char std_ac_luminance_nrcodes[] = {0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d}; + static const unsigned char std_ac_luminance_values[] = { + 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08, + 0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28, + 0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59, + 0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89, + 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6, + 0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2, + 0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa + }; + static const unsigned char std_dc_chrominance_nrcodes[] = {0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0}; + static const unsigned char std_dc_chrominance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; + static const unsigned char std_ac_chrominance_nrcodes[] = {0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77}; + static const unsigned char std_ac_chrominance_values[] = { + 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91, + 0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26, + 0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58, + 0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87, + 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4, + 0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda, + 0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa + }; + // Huffman tables + static const unsigned short YDC_HT[256][2] = { {0,2},{2,3},{3,3},{4,3},{5,3},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9}}; + static const unsigned short UVDC_HT[256][2] = { {0,2},{1,2},{2,2},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9},{1022,10},{2046,11}}; + static const unsigned short YAC_HT[256][2] = { + {10,4},{0,2},{1,2},{4,3},{11,4},{26,5},{120,7},{248,8},{1014,10},{65410,16},{65411,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {12,4},{27,5},{121,7},{502,9},{2038,11},{65412,16},{65413,16},{65414,16},{65415,16},{65416,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {28,5},{249,8},{1015,10},{4084,12},{65417,16},{65418,16},{65419,16},{65420,16},{65421,16},{65422,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {58,6},{503,9},{4085,12},{65423,16},{65424,16},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {59,6},{1016,10},{65430,16},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {122,7},{2039,11},{65438,16},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {123,7},{4086,12},{65446,16},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {250,8},{4087,12},{65454,16},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {504,9},{32704,15},{65462,16},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {505,9},{65470,16},{65471,16},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {506,9},{65479,16},{65480,16},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1017,10},{65488,16},{65489,16},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1018,10},{65497,16},{65498,16},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2040,11},{65506,16},{65507,16},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {65515,16},{65516,16},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2041,11},{65525,16},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} + }; + static const unsigned short UVAC_HT[256][2] = { + {0,2},{1,2},{4,3},{10,4},{24,5},{25,5},{56,6},{120,7},{500,9},{1014,10},{4084,12},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {11,4},{57,6},{246,8},{501,9},{2038,11},{4085,12},{65416,16},{65417,16},{65418,16},{65419,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {26,5},{247,8},{1015,10},{4086,12},{32706,15},{65420,16},{65421,16},{65422,16},{65423,16},{65424,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {27,5},{248,8},{1016,10},{4087,12},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{65430,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {58,6},{502,9},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{65438,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {59,6},{1017,10},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{65446,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {121,7},{2039,11},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{65454,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {122,7},{2040,11},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{65462,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {249,8},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{65470,16},{65471,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {503,9},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{65479,16},{65480,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {504,9},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{65488,16},{65489,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {505,9},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{65497,16},{65498,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {506,9},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{65506,16},{65507,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2041,11},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{65515,16},{65516,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {16352,14},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{65525,16},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1018,10},{32707,15},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} + }; + static const int YQT[] = {16,11,10,16,24,40,51,61,12,12,14,19,26,58,60,55,14,13,16,24,40,57,69,56,14,17,22,29,51,87,80,62,18,22, + 37,56,68,109,103,77,24,35,55,64,81,104,113,92,49,64,78,87,103,121,120,101,72,92,95,98,112,100,103,99}; + static const int UVQT[] = {17,18,24,47,99,99,99,99,18,21,26,66,99,99,99,99,24,26,56,99,99,99,99,99,47,66,99,99,99,99,99,99, + 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99}; + static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f, + 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f }; + + int row, col, i, k; + float fdtbl_Y[64], fdtbl_UV[64]; + unsigned char YTable[64], UVTable[64]; + + if(!data || !width || !height || comp > 4 || comp < 1) { + return 0; + } + + quality = quality ? quality : 90; + quality = quality < 1 ? 1 : quality > 100 ? 100 : quality; + quality = quality < 50 ? 5000 / quality : 200 - quality * 2; + + for(i = 0; i < 64; ++i) { + int uvti, yti = (YQT[i]*quality+50)/100; + YTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (yti < 1 ? 1 : yti > 255 ? 255 : yti); + uvti = (UVQT[i]*quality+50)/100; + UVTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (uvti < 1 ? 1 : uvti > 255 ? 255 : uvti); + } + + for(row = 0, k = 0; row < 8; ++row) { + for(col = 0; col < 8; ++col, ++k) { + fdtbl_Y[k] = 1 / (YTable [stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); + fdtbl_UV[k] = 1 / (UVTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); + } + } + + // Write Headers + { + static const unsigned char head0[] = { 0xFF,0xD8,0xFF,0xE0,0,0x10,'J','F','I','F',0,1,1,0,0,1,0,1,0,0,0xFF,0xDB,0,0x84,0 }; + static const unsigned char head2[] = { 0xFF,0xDA,0,0xC,3,1,0,2,0x11,3,0x11,0,0x3F,0 }; + const unsigned char head1[] = { 0xFF,0xC0,0,0x11,8,(unsigned char)(height>>8),STBIW_UCHAR(height),(unsigned char)(width>>8),STBIW_UCHAR(width), + 3,1,0x11,0,2,0x11,1,3,0x11,1,0xFF,0xC4,0x01,0xA2,0 }; + s->func(s->context, (void*)head0, sizeof(head0)); + s->func(s->context, (void*)YTable, sizeof(YTable)); + stbiw__putc(s, 1); + s->func(s->context, UVTable, sizeof(UVTable)); + s->func(s->context, (void*)head1, sizeof(head1)); + s->func(s->context, (void*)(std_dc_luminance_nrcodes+1), sizeof(std_dc_luminance_nrcodes)-1); + s->func(s->context, (void*)std_dc_luminance_values, sizeof(std_dc_luminance_values)); + stbiw__putc(s, 0x10); // HTYACinfo + s->func(s->context, (void*)(std_ac_luminance_nrcodes+1), sizeof(std_ac_luminance_nrcodes)-1); + s->func(s->context, (void*)std_ac_luminance_values, sizeof(std_ac_luminance_values)); + stbiw__putc(s, 1); // HTUDCinfo + s->func(s->context, (void*)(std_dc_chrominance_nrcodes+1), sizeof(std_dc_chrominance_nrcodes)-1); + s->func(s->context, (void*)std_dc_chrominance_values, sizeof(std_dc_chrominance_values)); + stbiw__putc(s, 0x11); // HTUACinfo + s->func(s->context, (void*)(std_ac_chrominance_nrcodes+1), sizeof(std_ac_chrominance_nrcodes)-1); + s->func(s->context, (void*)std_ac_chrominance_values, sizeof(std_ac_chrominance_values)); + s->func(s->context, (void*)head2, sizeof(head2)); + } + + // Encode 8x8 macroblocks + { + static const unsigned short fillBits[] = {0x7F, 7}; + const unsigned char *imageData = (const unsigned char *)data; + int DCY=0, DCU=0, DCV=0; + int bitBuf=0, bitCnt=0; + // comp == 2 is grey+alpha (alpha is ignored) + int ofsG = comp > 2 ? 1 : 0, ofsB = comp > 2 ? 2 : 0; + int x, y, pos; + for(y = 0; y < height; y += 8) { + for(x = 0; x < width; x += 8) { + float YDU[64], UDU[64], VDU[64]; + for(row = y, pos = 0; row < y+8; ++row) { + for(col = x; col < x+8; ++col, ++pos) { + int p = (stbi__flip_vertically_on_write ? height-1-row : row)*width*comp + col*comp; + float r, g, b; + if(row >= height) { + p -= width*comp*(row+1 - height); + } + if(col >= width) { + p -= comp*(col+1 - width); + } + + r = imageData[p+0]; + g = imageData[p+ofsG]; + b = imageData[p+ofsB]; + YDU[pos]=+0.29900f*r+0.58700f*g+0.11400f*b-128; + UDU[pos]=-0.16874f*r-0.33126f*g+0.50000f*b; + VDU[pos]=+0.50000f*r-0.41869f*g-0.08131f*b; + } + } + + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, YDU, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, UDU, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); + DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, VDU, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); + } + } + + // Do the bit alignment of the EOI marker + stbiw__jpg_writeBits(s, &bitBuf, &bitCnt, fillBits); + } + + // EOI + stbiw__putc(s, 0xFF); + stbiw__putc(s, 0xD9); + + return 1; +} + +STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality) +{ + stbi__write_context s; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_jpg_core(&s, x, y, comp, (void *) data, quality); +} + + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality) +{ + stbi__write_context s; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_jpg_core(&s, x, y, comp, data, quality); + stbi__end_write_file(&s); + return r; + } else + return 0; +} +#endif + +#endif // STB_IMAGE_WRITE_IMPLEMENTATION + +/* Revision history + 1.09 (2018-02-11) + fix typo in zlib quality API, improve STB_I_W_STATIC in C++ + 1.08 (2018-01-29) + add stbi__flip_vertically_on_write, external zlib, zlib quality, choose PNG filter + 1.07 (2017-07-24) + doc fix + 1.06 (2017-07-23) + writing JPEG (using Jon Olick's code) + 1.05 ??? + 1.04 (2017-03-03) + monochrome BMP expansion + 1.03 ??? + 1.02 (2016-04-02) + avoid allocating large structures on the stack + 1.01 (2016-01-16) + STBIW_REALLOC_SIZED: support allocators with no realloc support + avoid race-condition in crc initialization + minor compile issues + 1.00 (2015-09-14) + installable file IO function + 0.99 (2015-09-13) + warning fixes; TGA rle support + 0.98 (2015-04-08) + added STBIW_MALLOC, STBIW_ASSERT etc + 0.97 (2015-01-18) + fixed HDR asserts, rewrote HDR rle logic + 0.96 (2015-01-17) + add HDR output + fix monochrome BMP + 0.95 (2014-08-17) + add monochrome TGA output + 0.94 (2014-05-31) + rename private functions to avoid conflicts with stb_image.h + 0.93 (2014-05-27) + warning fixes + 0.92 (2010-08-01) + casts to unsigned char to fix warnings + 0.91 (2010-07-17) + first public release + 0.90 first internal release +*/ + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/src/refresh/vkpt/include/vulkan/GLSL.std.450.h b/src/refresh/vkpt/include/vulkan/GLSL.std.450.h new file mode 100644 index 0000000..4364002 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/GLSL.std.450.h @@ -0,0 +1,131 @@ +/* +** Copyright (c) 2014-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLstd450_H +#define GLSLstd450_H + +static const int GLSLstd450Version = 100; +static const int GLSLstd450Revision = 3; + +enum GLSLstd450 { + GLSLstd450Bad = 0, // Don't use + + GLSLstd450Round = 1, + GLSLstd450RoundEven = 2, + GLSLstd450Trunc = 3, + GLSLstd450FAbs = 4, + GLSLstd450SAbs = 5, + GLSLstd450FSign = 6, + GLSLstd450SSign = 7, + GLSLstd450Floor = 8, + GLSLstd450Ceil = 9, + GLSLstd450Fract = 10, + + GLSLstd450Radians = 11, + GLSLstd450Degrees = 12, + GLSLstd450Sin = 13, + GLSLstd450Cos = 14, + GLSLstd450Tan = 15, + GLSLstd450Asin = 16, + GLSLstd450Acos = 17, + GLSLstd450Atan = 18, + GLSLstd450Sinh = 19, + GLSLstd450Cosh = 20, + GLSLstd450Tanh = 21, + GLSLstd450Asinh = 22, + GLSLstd450Acosh = 23, + GLSLstd450Atanh = 24, + GLSLstd450Atan2 = 25, + + GLSLstd450Pow = 26, + GLSLstd450Exp = 27, + GLSLstd450Log = 28, + GLSLstd450Exp2 = 29, + GLSLstd450Log2 = 30, + GLSLstd450Sqrt = 31, + GLSLstd450InverseSqrt = 32, + + GLSLstd450Determinant = 33, + GLSLstd450MatrixInverse = 34, + + GLSLstd450Modf = 35, // second operand needs an OpVariable to write to + GLSLstd450ModfStruct = 36, // no OpVariable operand + GLSLstd450FMin = 37, + GLSLstd450UMin = 38, + GLSLstd450SMin = 39, + GLSLstd450FMax = 40, + GLSLstd450UMax = 41, + GLSLstd450SMax = 42, + GLSLstd450FClamp = 43, + GLSLstd450UClamp = 44, + GLSLstd450SClamp = 45, + GLSLstd450FMix = 46, + GLSLstd450IMix = 47, // Reserved + GLSLstd450Step = 48, + GLSLstd450SmoothStep = 49, + + GLSLstd450Fma = 50, + GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to + GLSLstd450FrexpStruct = 52, // no OpVariable operand + GLSLstd450Ldexp = 53, + + GLSLstd450PackSnorm4x8 = 54, + GLSLstd450PackUnorm4x8 = 55, + GLSLstd450PackSnorm2x16 = 56, + GLSLstd450PackUnorm2x16 = 57, + GLSLstd450PackHalf2x16 = 58, + GLSLstd450PackDouble2x32 = 59, + GLSLstd450UnpackSnorm2x16 = 60, + GLSLstd450UnpackUnorm2x16 = 61, + GLSLstd450UnpackHalf2x16 = 62, + GLSLstd450UnpackSnorm4x8 = 63, + GLSLstd450UnpackUnorm4x8 = 64, + GLSLstd450UnpackDouble2x32 = 65, + + GLSLstd450Length = 66, + GLSLstd450Distance = 67, + GLSLstd450Cross = 68, + GLSLstd450Normalize = 69, + GLSLstd450FaceForward = 70, + GLSLstd450Reflect = 71, + GLSLstd450Refract = 72, + + GLSLstd450FindILsb = 73, + GLSLstd450FindSMsb = 74, + GLSLstd450FindUMsb = 75, + + GLSLstd450InterpolateAtCentroid = 76, + GLSLstd450InterpolateAtSample = 77, + GLSLstd450InterpolateAtOffset = 78, + + GLSLstd450NMin = 79, + GLSLstd450NMax = 80, + GLSLstd450NClamp = 81, + + GLSLstd450Count +}; + +#endif // #ifndef GLSLstd450_H diff --git a/src/refresh/vkpt/include/vulkan/spirv.h b/src/refresh/vkpt/include/vulkan/spirv.h new file mode 100644 index 0000000..137c1ea --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/spirv.h @@ -0,0 +1,1177 @@ +/* +** Copyright (c) 2014-2018 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +/* +** This header is automatically generated by the same tool that creates +** the Binary Section of the SPIR-V specification. +*/ + +/* +** Enumeration tokens for SPIR-V, in various styles: +** C, C++, C++11, JSON, Lua, Python +** +** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +** - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +** +** Some tokens act like mask values, which can be OR'd together, +** while others are mutually exclusive. The mask-like ones have +** "Mask" in their name, and a parallel enum that has the shift +** amount (1 << x) for each corresponding enumerant. +*/ + +#ifndef spirv_H +#define spirv_H + +typedef unsigned int SpvId; + +#define SPV_VERSION 0x10300 +#define SPV_REVISION 1 + +static const unsigned int SpvMagicNumber = 0x07230203; +static const unsigned int SpvVersion = 0x00010300; +static const unsigned int SpvRevision = 1; +static const unsigned int SpvOpCodeMask = 0xffff; +static const unsigned int SpvWordCountShift = 16; + +typedef enum SpvSourceLanguage_ { + SpvSourceLanguageUnknown = 0, + SpvSourceLanguageESSL = 1, + SpvSourceLanguageGLSL = 2, + SpvSourceLanguageOpenCL_C = 3, + SpvSourceLanguageOpenCL_CPP = 4, + SpvSourceLanguageHLSL = 5, + SpvSourceLanguageMax = 0x7fffffff, +} SpvSourceLanguage; + +typedef enum SpvExecutionModel_ { + SpvExecutionModelVertex = 0, + SpvExecutionModelTessellationControl = 1, + SpvExecutionModelTessellationEvaluation = 2, + SpvExecutionModelGeometry = 3, + SpvExecutionModelFragment = 4, + SpvExecutionModelGLCompute = 5, + SpvExecutionModelKernel = 6, + SpvExecutionModelTaskNV = 5267, + SpvExecutionModelMeshNV = 5268, + SpvExecutionModelRayGenerationNVX = 5313, + SpvExecutionModelIntersectionNVX = 5314, + SpvExecutionModelAnyHitNVX = 5315, + SpvExecutionModelClosestHitNVX = 5316, + SpvExecutionModelMissNVX = 5317, + SpvExecutionModelCallableNVX = 5318, + SpvExecutionModelMax = 0x7fffffff, +} SpvExecutionModel; + +typedef enum SpvAddressingModel_ { + SpvAddressingModelLogical = 0, + SpvAddressingModelPhysical32 = 1, + SpvAddressingModelPhysical64 = 2, + SpvAddressingModelMax = 0x7fffffff, +} SpvAddressingModel; + +typedef enum SpvMemoryModel_ { + SpvMemoryModelSimple = 0, + SpvMemoryModelGLSL450 = 1, + SpvMemoryModelOpenCL = 2, + SpvMemoryModelVulkanKHR = 3, + SpvMemoryModelMax = 0x7fffffff, +} SpvMemoryModel; + +typedef enum SpvExecutionMode_ { + SpvExecutionModeInvocations = 0, + SpvExecutionModeSpacingEqual = 1, + SpvExecutionModeSpacingFractionalEven = 2, + SpvExecutionModeSpacingFractionalOdd = 3, + SpvExecutionModeVertexOrderCw = 4, + SpvExecutionModeVertexOrderCcw = 5, + SpvExecutionModePixelCenterInteger = 6, + SpvExecutionModeOriginUpperLeft = 7, + SpvExecutionModeOriginLowerLeft = 8, + SpvExecutionModeEarlyFragmentTests = 9, + SpvExecutionModePointMode = 10, + SpvExecutionModeXfb = 11, + SpvExecutionModeDepthReplacing = 12, + SpvExecutionModeDepthGreater = 14, + SpvExecutionModeDepthLess = 15, + SpvExecutionModeDepthUnchanged = 16, + SpvExecutionModeLocalSize = 17, + SpvExecutionModeLocalSizeHint = 18, + SpvExecutionModeInputPoints = 19, + SpvExecutionModeInputLines = 20, + SpvExecutionModeInputLinesAdjacency = 21, + SpvExecutionModeTriangles = 22, + SpvExecutionModeInputTrianglesAdjacency = 23, + SpvExecutionModeQuads = 24, + SpvExecutionModeIsolines = 25, + SpvExecutionModeOutputVertices = 26, + SpvExecutionModeOutputPoints = 27, + SpvExecutionModeOutputLineStrip = 28, + SpvExecutionModeOutputTriangleStrip = 29, + SpvExecutionModeVecTypeHint = 30, + SpvExecutionModeContractionOff = 31, + SpvExecutionModeInitializer = 33, + SpvExecutionModeFinalizer = 34, + SpvExecutionModeSubgroupSize = 35, + SpvExecutionModeSubgroupsPerWorkgroup = 36, + SpvExecutionModeSubgroupsPerWorkgroupId = 37, + SpvExecutionModeLocalSizeId = 38, + SpvExecutionModeLocalSizeHintId = 39, + SpvExecutionModePostDepthCoverage = 4446, + SpvExecutionModeStencilRefReplacingEXT = 5027, + SpvExecutionModeOutputLinesNV = 5269, + SpvExecutionModeOutputPrimitivesNV = 5270, + SpvExecutionModeDerivativeGroupQuadsNV = 5289, + SpvExecutionModeDerivativeGroupLinearNV = 5290, + SpvExecutionModeOutputTrianglesNV = 5298, + SpvExecutionModeMax = 0x7fffffff, +} SpvExecutionMode; + +typedef enum SpvStorageClass_ { + SpvStorageClassUniformConstant = 0, + SpvStorageClassInput = 1, + SpvStorageClassUniform = 2, + SpvStorageClassOutput = 3, + SpvStorageClassWorkgroup = 4, + SpvStorageClassCrossWorkgroup = 5, + SpvStorageClassPrivate = 6, + SpvStorageClassFunction = 7, + SpvStorageClassGeneric = 8, + SpvStorageClassPushConstant = 9, + SpvStorageClassAtomicCounter = 10, + SpvStorageClassImage = 11, + SpvStorageClassStorageBuffer = 12, + SpvStorageClassRayPayloadNVX = 5338, + SpvStorageClassHitAttributeNVX = 5339, + SpvStorageClassIncomingRayPayloadNVX = 5342, + SpvStorageClassShaderRecordBufferNVX = 5343, + SpvStorageClassMax = 0x7fffffff, +} SpvStorageClass; + +typedef enum SpvDim_ { + SpvDim1D = 0, + SpvDim2D = 1, + SpvDim3D = 2, + SpvDimCube = 3, + SpvDimRect = 4, + SpvDimBuffer = 5, + SpvDimSubpassData = 6, + SpvDimMax = 0x7fffffff, +} SpvDim; + +typedef enum SpvSamplerAddressingMode_ { + SpvSamplerAddressingModeNone = 0, + SpvSamplerAddressingModeClampToEdge = 1, + SpvSamplerAddressingModeClamp = 2, + SpvSamplerAddressingModeRepeat = 3, + SpvSamplerAddressingModeRepeatMirrored = 4, + SpvSamplerAddressingModeMax = 0x7fffffff, +} SpvSamplerAddressingMode; + +typedef enum SpvSamplerFilterMode_ { + SpvSamplerFilterModeNearest = 0, + SpvSamplerFilterModeLinear = 1, + SpvSamplerFilterModeMax = 0x7fffffff, +} SpvSamplerFilterMode; + +typedef enum SpvImageFormat_ { + SpvImageFormatUnknown = 0, + SpvImageFormatRgba32f = 1, + SpvImageFormatRgba16f = 2, + SpvImageFormatR32f = 3, + SpvImageFormatRgba8 = 4, + SpvImageFormatRgba8Snorm = 5, + SpvImageFormatRg32f = 6, + SpvImageFormatRg16f = 7, + SpvImageFormatR11fG11fB10f = 8, + SpvImageFormatR16f = 9, + SpvImageFormatRgba16 = 10, + SpvImageFormatRgb10A2 = 11, + SpvImageFormatRg16 = 12, + SpvImageFormatRg8 = 13, + SpvImageFormatR16 = 14, + SpvImageFormatR8 = 15, + SpvImageFormatRgba16Snorm = 16, + SpvImageFormatRg16Snorm = 17, + SpvImageFormatRg8Snorm = 18, + SpvImageFormatR16Snorm = 19, + SpvImageFormatR8Snorm = 20, + SpvImageFormatRgba32i = 21, + SpvImageFormatRgba16i = 22, + SpvImageFormatRgba8i = 23, + SpvImageFormatR32i = 24, + SpvImageFormatRg32i = 25, + SpvImageFormatRg16i = 26, + SpvImageFormatRg8i = 27, + SpvImageFormatR16i = 28, + SpvImageFormatR8i = 29, + SpvImageFormatRgba32ui = 30, + SpvImageFormatRgba16ui = 31, + SpvImageFormatRgba8ui = 32, + SpvImageFormatR32ui = 33, + SpvImageFormatRgb10a2ui = 34, + SpvImageFormatRg32ui = 35, + SpvImageFormatRg16ui = 36, + SpvImageFormatRg8ui = 37, + SpvImageFormatR16ui = 38, + SpvImageFormatR8ui = 39, + SpvImageFormatMax = 0x7fffffff, +} SpvImageFormat; + +typedef enum SpvImageChannelOrder_ { + SpvImageChannelOrderR = 0, + SpvImageChannelOrderA = 1, + SpvImageChannelOrderRG = 2, + SpvImageChannelOrderRA = 3, + SpvImageChannelOrderRGB = 4, + SpvImageChannelOrderRGBA = 5, + SpvImageChannelOrderBGRA = 6, + SpvImageChannelOrderARGB = 7, + SpvImageChannelOrderIntensity = 8, + SpvImageChannelOrderLuminance = 9, + SpvImageChannelOrderRx = 10, + SpvImageChannelOrderRGx = 11, + SpvImageChannelOrderRGBx = 12, + SpvImageChannelOrderDepth = 13, + SpvImageChannelOrderDepthStencil = 14, + SpvImageChannelOrdersRGB = 15, + SpvImageChannelOrdersRGBx = 16, + SpvImageChannelOrdersRGBA = 17, + SpvImageChannelOrdersBGRA = 18, + SpvImageChannelOrderABGR = 19, + SpvImageChannelOrderMax = 0x7fffffff, +} SpvImageChannelOrder; + +typedef enum SpvImageChannelDataType_ { + SpvImageChannelDataTypeSnormInt8 = 0, + SpvImageChannelDataTypeSnormInt16 = 1, + SpvImageChannelDataTypeUnormInt8 = 2, + SpvImageChannelDataTypeUnormInt16 = 3, + SpvImageChannelDataTypeUnormShort565 = 4, + SpvImageChannelDataTypeUnormShort555 = 5, + SpvImageChannelDataTypeUnormInt101010 = 6, + SpvImageChannelDataTypeSignedInt8 = 7, + SpvImageChannelDataTypeSignedInt16 = 8, + SpvImageChannelDataTypeSignedInt32 = 9, + SpvImageChannelDataTypeUnsignedInt8 = 10, + SpvImageChannelDataTypeUnsignedInt16 = 11, + SpvImageChannelDataTypeUnsignedInt32 = 12, + SpvImageChannelDataTypeHalfFloat = 13, + SpvImageChannelDataTypeFloat = 14, + SpvImageChannelDataTypeUnormInt24 = 15, + SpvImageChannelDataTypeUnormInt101010_2 = 16, + SpvImageChannelDataTypeMax = 0x7fffffff, +} SpvImageChannelDataType; + +typedef enum SpvImageOperandsShift_ { + SpvImageOperandsBiasShift = 0, + SpvImageOperandsLodShift = 1, + SpvImageOperandsGradShift = 2, + SpvImageOperandsConstOffsetShift = 3, + SpvImageOperandsOffsetShift = 4, + SpvImageOperandsConstOffsetsShift = 5, + SpvImageOperandsSampleShift = 6, + SpvImageOperandsMinLodShift = 7, + SpvImageOperandsMakeTexelAvailableKHRShift = 8, + SpvImageOperandsMakeTexelVisibleKHRShift = 9, + SpvImageOperandsNonPrivateTexelKHRShift = 10, + SpvImageOperandsVolatileTexelKHRShift = 11, + SpvImageOperandsMax = 0x7fffffff, +} SpvImageOperandsShift; + +typedef enum SpvImageOperandsMask_ { + SpvImageOperandsMaskNone = 0, + SpvImageOperandsBiasMask = 0x00000001, + SpvImageOperandsLodMask = 0x00000002, + SpvImageOperandsGradMask = 0x00000004, + SpvImageOperandsConstOffsetMask = 0x00000008, + SpvImageOperandsOffsetMask = 0x00000010, + SpvImageOperandsConstOffsetsMask = 0x00000020, + SpvImageOperandsSampleMask = 0x00000040, + SpvImageOperandsMinLodMask = 0x00000080, + SpvImageOperandsMakeTexelAvailableKHRMask = 0x00000100, + SpvImageOperandsMakeTexelVisibleKHRMask = 0x00000200, + SpvImageOperandsNonPrivateTexelKHRMask = 0x00000400, + SpvImageOperandsVolatileTexelKHRMask = 0x00000800, +} SpvImageOperandsMask; + +typedef enum SpvFPFastMathModeShift_ { + SpvFPFastMathModeNotNaNShift = 0, + SpvFPFastMathModeNotInfShift = 1, + SpvFPFastMathModeNSZShift = 2, + SpvFPFastMathModeAllowRecipShift = 3, + SpvFPFastMathModeFastShift = 4, + SpvFPFastMathModeMax = 0x7fffffff, +} SpvFPFastMathModeShift; + +typedef enum SpvFPFastMathModeMask_ { + SpvFPFastMathModeMaskNone = 0, + SpvFPFastMathModeNotNaNMask = 0x00000001, + SpvFPFastMathModeNotInfMask = 0x00000002, + SpvFPFastMathModeNSZMask = 0x00000004, + SpvFPFastMathModeAllowRecipMask = 0x00000008, + SpvFPFastMathModeFastMask = 0x00000010, +} SpvFPFastMathModeMask; + +typedef enum SpvFPRoundingMode_ { + SpvFPRoundingModeRTE = 0, + SpvFPRoundingModeRTZ = 1, + SpvFPRoundingModeRTP = 2, + SpvFPRoundingModeRTN = 3, + SpvFPRoundingModeMax = 0x7fffffff, +} SpvFPRoundingMode; + +typedef enum SpvLinkageType_ { + SpvLinkageTypeExport = 0, + SpvLinkageTypeImport = 1, + SpvLinkageTypeMax = 0x7fffffff, +} SpvLinkageType; + +typedef enum SpvAccessQualifier_ { + SpvAccessQualifierReadOnly = 0, + SpvAccessQualifierWriteOnly = 1, + SpvAccessQualifierReadWrite = 2, + SpvAccessQualifierMax = 0x7fffffff, +} SpvAccessQualifier; + +typedef enum SpvFunctionParameterAttribute_ { + SpvFunctionParameterAttributeZext = 0, + SpvFunctionParameterAttributeSext = 1, + SpvFunctionParameterAttributeByVal = 2, + SpvFunctionParameterAttributeSret = 3, + SpvFunctionParameterAttributeNoAlias = 4, + SpvFunctionParameterAttributeNoCapture = 5, + SpvFunctionParameterAttributeNoWrite = 6, + SpvFunctionParameterAttributeNoReadWrite = 7, + SpvFunctionParameterAttributeMax = 0x7fffffff, +} SpvFunctionParameterAttribute; + +typedef enum SpvDecoration_ { + SpvDecorationRelaxedPrecision = 0, + SpvDecorationSpecId = 1, + SpvDecorationBlock = 2, + SpvDecorationBufferBlock = 3, + SpvDecorationRowMajor = 4, + SpvDecorationColMajor = 5, + SpvDecorationArrayStride = 6, + SpvDecorationMatrixStride = 7, + SpvDecorationGLSLShared = 8, + SpvDecorationGLSLPacked = 9, + SpvDecorationCPacked = 10, + SpvDecorationBuiltIn = 11, + SpvDecorationNoPerspective = 13, + SpvDecorationFlat = 14, + SpvDecorationPatch = 15, + SpvDecorationCentroid = 16, + SpvDecorationSample = 17, + SpvDecorationInvariant = 18, + SpvDecorationRestrict = 19, + SpvDecorationAliased = 20, + SpvDecorationVolatile = 21, + SpvDecorationConstant = 22, + SpvDecorationCoherent = 23, + SpvDecorationNonWritable = 24, + SpvDecorationNonReadable = 25, + SpvDecorationUniform = 26, + SpvDecorationSaturatedConversion = 28, + SpvDecorationStream = 29, + SpvDecorationLocation = 30, + SpvDecorationComponent = 31, + SpvDecorationIndex = 32, + SpvDecorationBinding = 33, + SpvDecorationDescriptorSet = 34, + SpvDecorationOffset = 35, + SpvDecorationXfbBuffer = 36, + SpvDecorationXfbStride = 37, + SpvDecorationFuncParamAttr = 38, + SpvDecorationFPRoundingMode = 39, + SpvDecorationFPFastMathMode = 40, + SpvDecorationLinkageAttributes = 41, + SpvDecorationNoContraction = 42, + SpvDecorationInputAttachmentIndex = 43, + SpvDecorationAlignment = 44, + SpvDecorationMaxByteOffset = 45, + SpvDecorationAlignmentId = 46, + SpvDecorationMaxByteOffsetId = 47, + SpvDecorationExplicitInterpAMD = 4999, + SpvDecorationOverrideCoverageNV = 5248, + SpvDecorationPassthroughNV = 5250, + SpvDecorationViewportRelativeNV = 5252, + SpvDecorationSecondaryViewportRelativeNV = 5256, + SpvDecorationPerPrimitiveNV = 5271, + SpvDecorationPerViewNV = 5272, + SpvDecorationPerTaskNV = 5273, + SpvDecorationPerVertexNV = 5285, + SpvDecorationNonUniformEXT = 5300, + SpvDecorationHlslCounterBufferGOOGLE = 5634, + SpvDecorationHlslSemanticGOOGLE = 5635, + SpvDecorationMax = 0x7fffffff, +} SpvDecoration; + +typedef enum SpvBuiltIn_ { + SpvBuiltInPosition = 0, + SpvBuiltInPointSize = 1, + SpvBuiltInClipDistance = 3, + SpvBuiltInCullDistance = 4, + SpvBuiltInVertexId = 5, + SpvBuiltInInstanceId = 6, + SpvBuiltInPrimitiveId = 7, + SpvBuiltInInvocationId = 8, + SpvBuiltInLayer = 9, + SpvBuiltInViewportIndex = 10, + SpvBuiltInTessLevelOuter = 11, + SpvBuiltInTessLevelInner = 12, + SpvBuiltInTessCoord = 13, + SpvBuiltInPatchVertices = 14, + SpvBuiltInFragCoord = 15, + SpvBuiltInPointCoord = 16, + SpvBuiltInFrontFacing = 17, + SpvBuiltInSampleId = 18, + SpvBuiltInSamplePosition = 19, + SpvBuiltInSampleMask = 20, + SpvBuiltInFragDepth = 22, + SpvBuiltInHelperInvocation = 23, + SpvBuiltInNumWorkgroups = 24, + SpvBuiltInWorkgroupSize = 25, + SpvBuiltInWorkgroupId = 26, + SpvBuiltInLocalInvocationId = 27, + SpvBuiltInGlobalInvocationId = 28, + SpvBuiltInLocalInvocationIndex = 29, + SpvBuiltInWorkDim = 30, + SpvBuiltInGlobalSize = 31, + SpvBuiltInEnqueuedWorkgroupSize = 32, + SpvBuiltInGlobalOffset = 33, + SpvBuiltInGlobalLinearId = 34, + SpvBuiltInSubgroupSize = 36, + SpvBuiltInSubgroupMaxSize = 37, + SpvBuiltInNumSubgroups = 38, + SpvBuiltInNumEnqueuedSubgroups = 39, + SpvBuiltInSubgroupId = 40, + SpvBuiltInSubgroupLocalInvocationId = 41, + SpvBuiltInVertexIndex = 42, + SpvBuiltInInstanceIndex = 43, + SpvBuiltInSubgroupEqMask = 4416, + SpvBuiltInSubgroupEqMaskKHR = 4416, + SpvBuiltInSubgroupGeMask = 4417, + SpvBuiltInSubgroupGeMaskKHR = 4417, + SpvBuiltInSubgroupGtMask = 4418, + SpvBuiltInSubgroupGtMaskKHR = 4418, + SpvBuiltInSubgroupLeMask = 4419, + SpvBuiltInSubgroupLeMaskKHR = 4419, + SpvBuiltInSubgroupLtMask = 4420, + SpvBuiltInSubgroupLtMaskKHR = 4420, + SpvBuiltInBaseVertex = 4424, + SpvBuiltInBaseInstance = 4425, + SpvBuiltInDrawIndex = 4426, + SpvBuiltInDeviceIndex = 4438, + SpvBuiltInViewIndex = 4440, + SpvBuiltInBaryCoordNoPerspAMD = 4992, + SpvBuiltInBaryCoordNoPerspCentroidAMD = 4993, + SpvBuiltInBaryCoordNoPerspSampleAMD = 4994, + SpvBuiltInBaryCoordSmoothAMD = 4995, + SpvBuiltInBaryCoordSmoothCentroidAMD = 4996, + SpvBuiltInBaryCoordSmoothSampleAMD = 4997, + SpvBuiltInBaryCoordPullModelAMD = 4998, + SpvBuiltInFragStencilRefEXT = 5014, + SpvBuiltInViewportMaskNV = 5253, + SpvBuiltInSecondaryPositionNV = 5257, + SpvBuiltInSecondaryViewportMaskNV = 5258, + SpvBuiltInPositionPerViewNV = 5261, + SpvBuiltInViewportMaskPerViewNV = 5262, + SpvBuiltInFullyCoveredEXT = 5264, + SpvBuiltInTaskCountNV = 5274, + SpvBuiltInPrimitiveCountNV = 5275, + SpvBuiltInPrimitiveIndicesNV = 5276, + SpvBuiltInClipDistancePerViewNV = 5277, + SpvBuiltInCullDistancePerViewNV = 5278, + SpvBuiltInLayerPerViewNV = 5279, + SpvBuiltInMeshViewCountNV = 5280, + SpvBuiltInMeshViewIndicesNV = 5281, + SpvBuiltInBaryCoordNV = 5286, + SpvBuiltInBaryCoordNoPerspNV = 5287, + SpvBuiltInFragmentSizeNV = 5292, + SpvBuiltInInvocationsPerPixelNV = 5293, + SpvBuiltInLaunchIdNVX = 5319, + SpvBuiltInLaunchSizeNVX = 5320, + SpvBuiltInWorldRayOriginNVX = 5321, + SpvBuiltInWorldRayDirectionNVX = 5322, + SpvBuiltInObjectRayOriginNVX = 5323, + SpvBuiltInObjectRayDirectionNVX = 5324, + SpvBuiltInRayTminNVX = 5325, + SpvBuiltInRayTmaxNVX = 5326, + SpvBuiltInInstanceCustomIndexNVX = 5327, + SpvBuiltInObjectToWorldNVX = 5330, + SpvBuiltInWorldToObjectNVX = 5331, + SpvBuiltInHitTNVX = 5332, + SpvBuiltInHitKindNVX = 5333, + SpvBuiltInMax = 0x7fffffff, +} SpvBuiltIn; + +typedef enum SpvSelectionControlShift_ { + SpvSelectionControlFlattenShift = 0, + SpvSelectionControlDontFlattenShift = 1, + SpvSelectionControlMax = 0x7fffffff, +} SpvSelectionControlShift; + +typedef enum SpvSelectionControlMask_ { + SpvSelectionControlMaskNone = 0, + SpvSelectionControlFlattenMask = 0x00000001, + SpvSelectionControlDontFlattenMask = 0x00000002, +} SpvSelectionControlMask; + +typedef enum SpvLoopControlShift_ { + SpvLoopControlUnrollShift = 0, + SpvLoopControlDontUnrollShift = 1, + SpvLoopControlDependencyInfiniteShift = 2, + SpvLoopControlDependencyLengthShift = 3, + SpvLoopControlMax = 0x7fffffff, +} SpvLoopControlShift; + +typedef enum SpvLoopControlMask_ { + SpvLoopControlMaskNone = 0, + SpvLoopControlUnrollMask = 0x00000001, + SpvLoopControlDontUnrollMask = 0x00000002, + SpvLoopControlDependencyInfiniteMask = 0x00000004, + SpvLoopControlDependencyLengthMask = 0x00000008, +} SpvLoopControlMask; + +typedef enum SpvFunctionControlShift_ { + SpvFunctionControlInlineShift = 0, + SpvFunctionControlDontInlineShift = 1, + SpvFunctionControlPureShift = 2, + SpvFunctionControlConstShift = 3, + SpvFunctionControlMax = 0x7fffffff, +} SpvFunctionControlShift; + +typedef enum SpvFunctionControlMask_ { + SpvFunctionControlMaskNone = 0, + SpvFunctionControlInlineMask = 0x00000001, + SpvFunctionControlDontInlineMask = 0x00000002, + SpvFunctionControlPureMask = 0x00000004, + SpvFunctionControlConstMask = 0x00000008, +} SpvFunctionControlMask; + +typedef enum SpvMemorySemanticsShift_ { + SpvMemorySemanticsAcquireShift = 1, + SpvMemorySemanticsReleaseShift = 2, + SpvMemorySemanticsAcquireReleaseShift = 3, + SpvMemorySemanticsSequentiallyConsistentShift = 4, + SpvMemorySemanticsUniformMemoryShift = 6, + SpvMemorySemanticsSubgroupMemoryShift = 7, + SpvMemorySemanticsWorkgroupMemoryShift = 8, + SpvMemorySemanticsCrossWorkgroupMemoryShift = 9, + SpvMemorySemanticsAtomicCounterMemoryShift = 10, + SpvMemorySemanticsImageMemoryShift = 11, + SpvMemorySemanticsOutputMemoryKHRShift = 12, + SpvMemorySemanticsMakeAvailableKHRShift = 13, + SpvMemorySemanticsMakeVisibleKHRShift = 14, + SpvMemorySemanticsMax = 0x7fffffff, +} SpvMemorySemanticsShift; + +typedef enum SpvMemorySemanticsMask_ { + SpvMemorySemanticsMaskNone = 0, + SpvMemorySemanticsAcquireMask = 0x00000002, + SpvMemorySemanticsReleaseMask = 0x00000004, + SpvMemorySemanticsAcquireReleaseMask = 0x00000008, + SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010, + SpvMemorySemanticsUniformMemoryMask = 0x00000040, + SpvMemorySemanticsSubgroupMemoryMask = 0x00000080, + SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100, + SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400, + SpvMemorySemanticsImageMemoryMask = 0x00000800, + SpvMemorySemanticsOutputMemoryKHRMask = 0x00001000, + SpvMemorySemanticsMakeAvailableKHRMask = 0x00002000, + SpvMemorySemanticsMakeVisibleKHRMask = 0x00004000, +} SpvMemorySemanticsMask; + +typedef enum SpvMemoryAccessShift_ { + SpvMemoryAccessVolatileShift = 0, + SpvMemoryAccessAlignedShift = 1, + SpvMemoryAccessNontemporalShift = 2, + SpvMemoryAccessMakePointerAvailableKHRShift = 3, + SpvMemoryAccessMakePointerVisibleKHRShift = 4, + SpvMemoryAccessNonPrivatePointerKHRShift = 5, + SpvMemoryAccessMax = 0x7fffffff, +} SpvMemoryAccessShift; + +typedef enum SpvMemoryAccessMask_ { + SpvMemoryAccessMaskNone = 0, + SpvMemoryAccessVolatileMask = 0x00000001, + SpvMemoryAccessAlignedMask = 0x00000002, + SpvMemoryAccessNontemporalMask = 0x00000004, + SpvMemoryAccessMakePointerAvailableKHRMask = 0x00000008, + SpvMemoryAccessMakePointerVisibleKHRMask = 0x00000010, + SpvMemoryAccessNonPrivatePointerKHRMask = 0x00000020, +} SpvMemoryAccessMask; + +typedef enum SpvScope_ { + SpvScopeCrossDevice = 0, + SpvScopeDevice = 1, + SpvScopeWorkgroup = 2, + SpvScopeSubgroup = 3, + SpvScopeInvocation = 4, + SpvScopeQueueFamilyKHR = 5, + SpvScopeMax = 0x7fffffff, +} SpvScope; + +typedef enum SpvGroupOperation_ { + SpvGroupOperationReduce = 0, + SpvGroupOperationInclusiveScan = 1, + SpvGroupOperationExclusiveScan = 2, + SpvGroupOperationClusteredReduce = 3, + SpvGroupOperationPartitionedReduceNV = 6, + SpvGroupOperationPartitionedInclusiveScanNV = 7, + SpvGroupOperationPartitionedExclusiveScanNV = 8, + SpvGroupOperationMax = 0x7fffffff, +} SpvGroupOperation; + +typedef enum SpvKernelEnqueueFlags_ { + SpvKernelEnqueueFlagsNoWait = 0, + SpvKernelEnqueueFlagsWaitKernel = 1, + SpvKernelEnqueueFlagsWaitWorkGroup = 2, + SpvKernelEnqueueFlagsMax = 0x7fffffff, +} SpvKernelEnqueueFlags; + +typedef enum SpvKernelProfilingInfoShift_ { + SpvKernelProfilingInfoCmdExecTimeShift = 0, + SpvKernelProfilingInfoMax = 0x7fffffff, +} SpvKernelProfilingInfoShift; + +typedef enum SpvKernelProfilingInfoMask_ { + SpvKernelProfilingInfoMaskNone = 0, + SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001, +} SpvKernelProfilingInfoMask; + +typedef enum SpvCapability_ { + SpvCapabilityMatrix = 0, + SpvCapabilityShader = 1, + SpvCapabilityGeometry = 2, + SpvCapabilityTessellation = 3, + SpvCapabilityAddresses = 4, + SpvCapabilityLinkage = 5, + SpvCapabilityKernel = 6, + SpvCapabilityVector16 = 7, + SpvCapabilityFloat16Buffer = 8, + SpvCapabilityFloat16 = 9, + SpvCapabilityFloat64 = 10, + SpvCapabilityInt64 = 11, + SpvCapabilityInt64Atomics = 12, + SpvCapabilityImageBasic = 13, + SpvCapabilityImageReadWrite = 14, + SpvCapabilityImageMipmap = 15, + SpvCapabilityPipes = 17, + SpvCapabilityGroups = 18, + SpvCapabilityDeviceEnqueue = 19, + SpvCapabilityLiteralSampler = 20, + SpvCapabilityAtomicStorage = 21, + SpvCapabilityInt16 = 22, + SpvCapabilityTessellationPointSize = 23, + SpvCapabilityGeometryPointSize = 24, + SpvCapabilityImageGatherExtended = 25, + SpvCapabilityStorageImageMultisample = 27, + SpvCapabilityUniformBufferArrayDynamicIndexing = 28, + SpvCapabilitySampledImageArrayDynamicIndexing = 29, + SpvCapabilityStorageBufferArrayDynamicIndexing = 30, + SpvCapabilityStorageImageArrayDynamicIndexing = 31, + SpvCapabilityClipDistance = 32, + SpvCapabilityCullDistance = 33, + SpvCapabilityImageCubeArray = 34, + SpvCapabilitySampleRateShading = 35, + SpvCapabilityImageRect = 36, + SpvCapabilitySampledRect = 37, + SpvCapabilityGenericPointer = 38, + SpvCapabilityInt8 = 39, + SpvCapabilityInputAttachment = 40, + SpvCapabilitySparseResidency = 41, + SpvCapabilityMinLod = 42, + SpvCapabilitySampled1D = 43, + SpvCapabilityImage1D = 44, + SpvCapabilitySampledCubeArray = 45, + SpvCapabilitySampledBuffer = 46, + SpvCapabilityImageBuffer = 47, + SpvCapabilityImageMSArray = 48, + SpvCapabilityStorageImageExtendedFormats = 49, + SpvCapabilityImageQuery = 50, + SpvCapabilityDerivativeControl = 51, + SpvCapabilityInterpolationFunction = 52, + SpvCapabilityTransformFeedback = 53, + SpvCapabilityGeometryStreams = 54, + SpvCapabilityStorageImageReadWithoutFormat = 55, + SpvCapabilityStorageImageWriteWithoutFormat = 56, + SpvCapabilityMultiViewport = 57, + SpvCapabilitySubgroupDispatch = 58, + SpvCapabilityNamedBarrier = 59, + SpvCapabilityPipeStorage = 60, + SpvCapabilityGroupNonUniform = 61, + SpvCapabilityGroupNonUniformVote = 62, + SpvCapabilityGroupNonUniformArithmetic = 63, + SpvCapabilityGroupNonUniformBallot = 64, + SpvCapabilityGroupNonUniformShuffle = 65, + SpvCapabilityGroupNonUniformShuffleRelative = 66, + SpvCapabilityGroupNonUniformClustered = 67, + SpvCapabilityGroupNonUniformQuad = 68, + SpvCapabilitySubgroupBallotKHR = 4423, + SpvCapabilityDrawParameters = 4427, + SpvCapabilitySubgroupVoteKHR = 4431, + SpvCapabilityStorageBuffer16BitAccess = 4433, + SpvCapabilityStorageUniformBufferBlock16 = 4433, + SpvCapabilityStorageUniform16 = 4434, + SpvCapabilityUniformAndStorageBuffer16BitAccess = 4434, + SpvCapabilityStoragePushConstant16 = 4435, + SpvCapabilityStorageInputOutput16 = 4436, + SpvCapabilityDeviceGroup = 4437, + SpvCapabilityMultiView = 4439, + SpvCapabilityVariablePointersStorageBuffer = 4441, + SpvCapabilityVariablePointers = 4442, + SpvCapabilityAtomicStorageOps = 4445, + SpvCapabilitySampleMaskPostDepthCoverage = 4447, + SpvCapabilityStorageBuffer8BitAccess = 4448, + SpvCapabilityUniformAndStorageBuffer8BitAccess = 4449, + SpvCapabilityStoragePushConstant8 = 4450, + SpvCapabilityFloat16ImageAMD = 5008, + SpvCapabilityImageGatherBiasLodAMD = 5009, + SpvCapabilityFragmentMaskAMD = 5010, + SpvCapabilityStencilExportEXT = 5013, + SpvCapabilityImageReadWriteLodAMD = 5015, + SpvCapabilitySampleMaskOverrideCoverageNV = 5249, + SpvCapabilityGeometryShaderPassthroughNV = 5251, + SpvCapabilityShaderViewportIndexLayerEXT = 5254, + SpvCapabilityShaderViewportIndexLayerNV = 5254, + SpvCapabilityShaderViewportMaskNV = 5255, + SpvCapabilityShaderStereoViewNV = 5259, + SpvCapabilityPerViewAttributesNV = 5260, + SpvCapabilityFragmentFullyCoveredEXT = 5265, + SpvCapabilityMeshShadingNV = 5266, + SpvCapabilityImageFootprintNV = 5282, + SpvCapabilityFragmentBarycentricNV = 5284, + SpvCapabilityComputeDerivativeGroupQuadsNV = 5288, + SpvCapabilityShadingRateNV = 5291, + SpvCapabilityGroupNonUniformPartitionedNV = 5297, + SpvCapabilityShaderNonUniformEXT = 5301, + SpvCapabilityRuntimeDescriptorArrayEXT = 5302, + SpvCapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + SpvCapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + SpvCapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + SpvCapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + SpvCapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + SpvCapabilityRaytracingNVX = 5340, + SpvCapabilityVulkanMemoryModelKHR = 5345, + SpvCapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + SpvCapabilityComputeDerivativeGroupLinearNV = 5350, + SpvCapabilitySubgroupShuffleINTEL = 5568, + SpvCapabilitySubgroupBufferBlockIOINTEL = 5569, + SpvCapabilitySubgroupImageBlockIOINTEL = 5570, + SpvCapabilityMax = 0x7fffffff, +} SpvCapability; + +typedef enum SpvOp_ { + SpvOpNop = 0, + SpvOpUndef = 1, + SpvOpSourceContinued = 2, + SpvOpSource = 3, + SpvOpSourceExtension = 4, + SpvOpName = 5, + SpvOpMemberName = 6, + SpvOpString = 7, + SpvOpLine = 8, + SpvOpExtension = 10, + SpvOpExtInstImport = 11, + SpvOpExtInst = 12, + SpvOpMemoryModel = 14, + SpvOpEntryPoint = 15, + SpvOpExecutionMode = 16, + SpvOpCapability = 17, + SpvOpTypeVoid = 19, + SpvOpTypeBool = 20, + SpvOpTypeInt = 21, + SpvOpTypeFloat = 22, + SpvOpTypeVector = 23, + SpvOpTypeMatrix = 24, + SpvOpTypeImage = 25, + SpvOpTypeSampler = 26, + SpvOpTypeSampledImage = 27, + SpvOpTypeArray = 28, + SpvOpTypeRuntimeArray = 29, + SpvOpTypeStruct = 30, + SpvOpTypeOpaque = 31, + SpvOpTypePointer = 32, + SpvOpTypeFunction = 33, + SpvOpTypeEvent = 34, + SpvOpTypeDeviceEvent = 35, + SpvOpTypeReserveId = 36, + SpvOpTypeQueue = 37, + SpvOpTypePipe = 38, + SpvOpTypeForwardPointer = 39, + SpvOpConstantTrue = 41, + SpvOpConstantFalse = 42, + SpvOpConstant = 43, + SpvOpConstantComposite = 44, + SpvOpConstantSampler = 45, + SpvOpConstantNull = 46, + SpvOpSpecConstantTrue = 48, + SpvOpSpecConstantFalse = 49, + SpvOpSpecConstant = 50, + SpvOpSpecConstantComposite = 51, + SpvOpSpecConstantOp = 52, + SpvOpFunction = 54, + SpvOpFunctionParameter = 55, + SpvOpFunctionEnd = 56, + SpvOpFunctionCall = 57, + SpvOpVariable = 59, + SpvOpImageTexelPointer = 60, + SpvOpLoad = 61, + SpvOpStore = 62, + SpvOpCopyMemory = 63, + SpvOpCopyMemorySized = 64, + SpvOpAccessChain = 65, + SpvOpInBoundsAccessChain = 66, + SpvOpPtrAccessChain = 67, + SpvOpArrayLength = 68, + SpvOpGenericPtrMemSemantics = 69, + SpvOpInBoundsPtrAccessChain = 70, + SpvOpDecorate = 71, + SpvOpMemberDecorate = 72, + SpvOpDecorationGroup = 73, + SpvOpGroupDecorate = 74, + SpvOpGroupMemberDecorate = 75, + SpvOpVectorExtractDynamic = 77, + SpvOpVectorInsertDynamic = 78, + SpvOpVectorShuffle = 79, + SpvOpCompositeConstruct = 80, + SpvOpCompositeExtract = 81, + SpvOpCompositeInsert = 82, + SpvOpCopyObject = 83, + SpvOpTranspose = 84, + SpvOpSampledImage = 86, + SpvOpImageSampleImplicitLod = 87, + SpvOpImageSampleExplicitLod = 88, + SpvOpImageSampleDrefImplicitLod = 89, + SpvOpImageSampleDrefExplicitLod = 90, + SpvOpImageSampleProjImplicitLod = 91, + SpvOpImageSampleProjExplicitLod = 92, + SpvOpImageSampleProjDrefImplicitLod = 93, + SpvOpImageSampleProjDrefExplicitLod = 94, + SpvOpImageFetch = 95, + SpvOpImageGather = 96, + SpvOpImageDrefGather = 97, + SpvOpImageRead = 98, + SpvOpImageWrite = 99, + SpvOpImage = 100, + SpvOpImageQueryFormat = 101, + SpvOpImageQueryOrder = 102, + SpvOpImageQuerySizeLod = 103, + SpvOpImageQuerySize = 104, + SpvOpImageQueryLod = 105, + SpvOpImageQueryLevels = 106, + SpvOpImageQuerySamples = 107, + SpvOpConvertFToU = 109, + SpvOpConvertFToS = 110, + SpvOpConvertSToF = 111, + SpvOpConvertUToF = 112, + SpvOpUConvert = 113, + SpvOpSConvert = 114, + SpvOpFConvert = 115, + SpvOpQuantizeToF16 = 116, + SpvOpConvertPtrToU = 117, + SpvOpSatConvertSToU = 118, + SpvOpSatConvertUToS = 119, + SpvOpConvertUToPtr = 120, + SpvOpPtrCastToGeneric = 121, + SpvOpGenericCastToPtr = 122, + SpvOpGenericCastToPtrExplicit = 123, + SpvOpBitcast = 124, + SpvOpSNegate = 126, + SpvOpFNegate = 127, + SpvOpIAdd = 128, + SpvOpFAdd = 129, + SpvOpISub = 130, + SpvOpFSub = 131, + SpvOpIMul = 132, + SpvOpFMul = 133, + SpvOpUDiv = 134, + SpvOpSDiv = 135, + SpvOpFDiv = 136, + SpvOpUMod = 137, + SpvOpSRem = 138, + SpvOpSMod = 139, + SpvOpFRem = 140, + SpvOpFMod = 141, + SpvOpVectorTimesScalar = 142, + SpvOpMatrixTimesScalar = 143, + SpvOpVectorTimesMatrix = 144, + SpvOpMatrixTimesVector = 145, + SpvOpMatrixTimesMatrix = 146, + SpvOpOuterProduct = 147, + SpvOpDot = 148, + SpvOpIAddCarry = 149, + SpvOpISubBorrow = 150, + SpvOpUMulExtended = 151, + SpvOpSMulExtended = 152, + SpvOpAny = 154, + SpvOpAll = 155, + SpvOpIsNan = 156, + SpvOpIsInf = 157, + SpvOpIsFinite = 158, + SpvOpIsNormal = 159, + SpvOpSignBitSet = 160, + SpvOpLessOrGreater = 161, + SpvOpOrdered = 162, + SpvOpUnordered = 163, + SpvOpLogicalEqual = 164, + SpvOpLogicalNotEqual = 165, + SpvOpLogicalOr = 166, + SpvOpLogicalAnd = 167, + SpvOpLogicalNot = 168, + SpvOpSelect = 169, + SpvOpIEqual = 170, + SpvOpINotEqual = 171, + SpvOpUGreaterThan = 172, + SpvOpSGreaterThan = 173, + SpvOpUGreaterThanEqual = 174, + SpvOpSGreaterThanEqual = 175, + SpvOpULessThan = 176, + SpvOpSLessThan = 177, + SpvOpULessThanEqual = 178, + SpvOpSLessThanEqual = 179, + SpvOpFOrdEqual = 180, + SpvOpFUnordEqual = 181, + SpvOpFOrdNotEqual = 182, + SpvOpFUnordNotEqual = 183, + SpvOpFOrdLessThan = 184, + SpvOpFUnordLessThan = 185, + SpvOpFOrdGreaterThan = 186, + SpvOpFUnordGreaterThan = 187, + SpvOpFOrdLessThanEqual = 188, + SpvOpFUnordLessThanEqual = 189, + SpvOpFOrdGreaterThanEqual = 190, + SpvOpFUnordGreaterThanEqual = 191, + SpvOpShiftRightLogical = 194, + SpvOpShiftRightArithmetic = 195, + SpvOpShiftLeftLogical = 196, + SpvOpBitwiseOr = 197, + SpvOpBitwiseXor = 198, + SpvOpBitwiseAnd = 199, + SpvOpNot = 200, + SpvOpBitFieldInsert = 201, + SpvOpBitFieldSExtract = 202, + SpvOpBitFieldUExtract = 203, + SpvOpBitReverse = 204, + SpvOpBitCount = 205, + SpvOpDPdx = 207, + SpvOpDPdy = 208, + SpvOpFwidth = 209, + SpvOpDPdxFine = 210, + SpvOpDPdyFine = 211, + SpvOpFwidthFine = 212, + SpvOpDPdxCoarse = 213, + SpvOpDPdyCoarse = 214, + SpvOpFwidthCoarse = 215, + SpvOpEmitVertex = 218, + SpvOpEndPrimitive = 219, + SpvOpEmitStreamVertex = 220, + SpvOpEndStreamPrimitive = 221, + SpvOpControlBarrier = 224, + SpvOpMemoryBarrier = 225, + SpvOpAtomicLoad = 227, + SpvOpAtomicStore = 228, + SpvOpAtomicExchange = 229, + SpvOpAtomicCompareExchange = 230, + SpvOpAtomicCompareExchangeWeak = 231, + SpvOpAtomicIIncrement = 232, + SpvOpAtomicIDecrement = 233, + SpvOpAtomicIAdd = 234, + SpvOpAtomicISub = 235, + SpvOpAtomicSMin = 236, + SpvOpAtomicUMin = 237, + SpvOpAtomicSMax = 238, + SpvOpAtomicUMax = 239, + SpvOpAtomicAnd = 240, + SpvOpAtomicOr = 241, + SpvOpAtomicXor = 242, + SpvOpPhi = 245, + SpvOpLoopMerge = 246, + SpvOpSelectionMerge = 247, + SpvOpLabel = 248, + SpvOpBranch = 249, + SpvOpBranchConditional = 250, + SpvOpSwitch = 251, + SpvOpKill = 252, + SpvOpReturn = 253, + SpvOpReturnValue = 254, + SpvOpUnreachable = 255, + SpvOpLifetimeStart = 256, + SpvOpLifetimeStop = 257, + SpvOpGroupAsyncCopy = 259, + SpvOpGroupWaitEvents = 260, + SpvOpGroupAll = 261, + SpvOpGroupAny = 262, + SpvOpGroupBroadcast = 263, + SpvOpGroupIAdd = 264, + SpvOpGroupFAdd = 265, + SpvOpGroupFMin = 266, + SpvOpGroupUMin = 267, + SpvOpGroupSMin = 268, + SpvOpGroupFMax = 269, + SpvOpGroupUMax = 270, + SpvOpGroupSMax = 271, + SpvOpReadPipe = 274, + SpvOpWritePipe = 275, + SpvOpReservedReadPipe = 276, + SpvOpReservedWritePipe = 277, + SpvOpReserveReadPipePackets = 278, + SpvOpReserveWritePipePackets = 279, + SpvOpCommitReadPipe = 280, + SpvOpCommitWritePipe = 281, + SpvOpIsValidReserveId = 282, + SpvOpGetNumPipePackets = 283, + SpvOpGetMaxPipePackets = 284, + SpvOpGroupReserveReadPipePackets = 285, + SpvOpGroupReserveWritePipePackets = 286, + SpvOpGroupCommitReadPipe = 287, + SpvOpGroupCommitWritePipe = 288, + SpvOpEnqueueMarker = 291, + SpvOpEnqueueKernel = 292, + SpvOpGetKernelNDrangeSubGroupCount = 293, + SpvOpGetKernelNDrangeMaxSubGroupSize = 294, + SpvOpGetKernelWorkGroupSize = 295, + SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296, + SpvOpRetainEvent = 297, + SpvOpReleaseEvent = 298, + SpvOpCreateUserEvent = 299, + SpvOpIsValidEvent = 300, + SpvOpSetUserEventStatus = 301, + SpvOpCaptureEventProfilingInfo = 302, + SpvOpGetDefaultQueue = 303, + SpvOpBuildNDRange = 304, + SpvOpImageSparseSampleImplicitLod = 305, + SpvOpImageSparseSampleExplicitLod = 306, + SpvOpImageSparseSampleDrefImplicitLod = 307, + SpvOpImageSparseSampleDrefExplicitLod = 308, + SpvOpImageSparseSampleProjImplicitLod = 309, + SpvOpImageSparseSampleProjExplicitLod = 310, + SpvOpImageSparseSampleProjDrefImplicitLod = 311, + SpvOpImageSparseSampleProjDrefExplicitLod = 312, + SpvOpImageSparseFetch = 313, + SpvOpImageSparseGather = 314, + SpvOpImageSparseDrefGather = 315, + SpvOpImageSparseTexelsResident = 316, + SpvOpNoLine = 317, + SpvOpAtomicFlagTestAndSet = 318, + SpvOpAtomicFlagClear = 319, + SpvOpImageSparseRead = 320, + SpvOpSizeOf = 321, + SpvOpTypePipeStorage = 322, + SpvOpConstantPipeStorage = 323, + SpvOpCreatePipeFromPipeStorage = 324, + SpvOpGetKernelLocalSizeForSubgroupCount = 325, + SpvOpGetKernelMaxNumSubgroups = 326, + SpvOpTypeNamedBarrier = 327, + SpvOpNamedBarrierInitialize = 328, + SpvOpMemoryNamedBarrier = 329, + SpvOpModuleProcessed = 330, + SpvOpExecutionModeId = 331, + SpvOpDecorateId = 332, + SpvOpGroupNonUniformElect = 333, + SpvOpGroupNonUniformAll = 334, + SpvOpGroupNonUniformAny = 335, + SpvOpGroupNonUniformAllEqual = 336, + SpvOpGroupNonUniformBroadcast = 337, + SpvOpGroupNonUniformBroadcastFirst = 338, + SpvOpGroupNonUniformBallot = 339, + SpvOpGroupNonUniformInverseBallot = 340, + SpvOpGroupNonUniformBallotBitExtract = 341, + SpvOpGroupNonUniformBallotBitCount = 342, + SpvOpGroupNonUniformBallotFindLSB = 343, + SpvOpGroupNonUniformBallotFindMSB = 344, + SpvOpGroupNonUniformShuffle = 345, + SpvOpGroupNonUniformShuffleXor = 346, + SpvOpGroupNonUniformShuffleUp = 347, + SpvOpGroupNonUniformShuffleDown = 348, + SpvOpGroupNonUniformIAdd = 349, + SpvOpGroupNonUniformFAdd = 350, + SpvOpGroupNonUniformIMul = 351, + SpvOpGroupNonUniformFMul = 352, + SpvOpGroupNonUniformSMin = 353, + SpvOpGroupNonUniformUMin = 354, + SpvOpGroupNonUniformFMin = 355, + SpvOpGroupNonUniformSMax = 356, + SpvOpGroupNonUniformUMax = 357, + SpvOpGroupNonUniformFMax = 358, + SpvOpGroupNonUniformBitwiseAnd = 359, + SpvOpGroupNonUniformBitwiseOr = 360, + SpvOpGroupNonUniformBitwiseXor = 361, + SpvOpGroupNonUniformLogicalAnd = 362, + SpvOpGroupNonUniformLogicalOr = 363, + SpvOpGroupNonUniformLogicalXor = 364, + SpvOpGroupNonUniformQuadBroadcast = 365, + SpvOpGroupNonUniformQuadSwap = 366, + SpvOpSubgroupBallotKHR = 4421, + SpvOpSubgroupFirstInvocationKHR = 4422, + SpvOpSubgroupAllKHR = 4428, + SpvOpSubgroupAnyKHR = 4429, + SpvOpSubgroupAllEqualKHR = 4430, + SpvOpSubgroupReadInvocationKHR = 4432, + SpvOpGroupIAddNonUniformAMD = 5000, + SpvOpGroupFAddNonUniformAMD = 5001, + SpvOpGroupFMinNonUniformAMD = 5002, + SpvOpGroupUMinNonUniformAMD = 5003, + SpvOpGroupSMinNonUniformAMD = 5004, + SpvOpGroupFMaxNonUniformAMD = 5005, + SpvOpGroupUMaxNonUniformAMD = 5006, + SpvOpGroupSMaxNonUniformAMD = 5007, + SpvOpFragmentMaskFetchAMD = 5011, + SpvOpFragmentFetchAMD = 5012, + SpvOpImageSampleFootprintNV = 5283, + SpvOpGroupNonUniformPartitionNV = 5296, + SpvOpWritePackedPrimitiveIndices4x8NV = 5299, + SpvOpReportIntersectionNVX = 5334, + SpvOpIgnoreIntersectionNVX = 5335, + SpvOpTerminateRayNVX = 5336, + SpvOpTraceNVX = 5337, + SpvOpTypeAccelerationStructureNVX = 5341, + SpvOpSubgroupShuffleINTEL = 5571, + SpvOpSubgroupShuffleDownINTEL = 5572, + SpvOpSubgroupShuffleUpINTEL = 5573, + SpvOpSubgroupShuffleXorINTEL = 5574, + SpvOpSubgroupBlockReadINTEL = 5575, + SpvOpSubgroupBlockWriteINTEL = 5576, + SpvOpSubgroupImageBlockReadINTEL = 5577, + SpvOpSubgroupImageBlockWriteINTEL = 5578, + SpvOpDecorateStringGOOGLE = 5632, + SpvOpMemberDecorateStringGOOGLE = 5633, + SpvOpMax = 0x7fffffff, +} SpvOp; + +#endif // #ifndef spirv_H + diff --git a/src/refresh/vkpt/include/vulkan/spirv.hpp b/src/refresh/vkpt/include/vulkan/spirv.hpp new file mode 100644 index 0000000..7c8f033 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/spirv.hpp @@ -0,0 +1,1186 @@ +// Copyright (c) 2014-2018 The Khronos Group Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and/or associated documentation files (the "Materials"), +// to deal in the Materials without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Materials, and to permit persons to whom the +// Materials are furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Materials. +// +// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +// +// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +// IN THE MATERIALS. + +// This header is automatically generated by the same tool that creates +// the Binary Section of the SPIR-V specification. + +// Enumeration tokens for SPIR-V, in various styles: +// C, C++, C++11, JSON, Lua, Python +// +// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +// +// Some tokens act like mask values, which can be OR'd together, +// while others are mutually exclusive. The mask-like ones have +// "Mask" in their name, and a parallel enum that has the shift +// amount (1 << x) for each corresponding enumerant. + +#ifndef spirv_HPP +#define spirv_HPP + +namespace spv { + +typedef unsigned int Id; + +#define SPV_VERSION 0x10300 +#define SPV_REVISION 1 + +static const unsigned int MagicNumber = 0x07230203; +static const unsigned int Version = 0x00010300; +static const unsigned int Revision = 1; +static const unsigned int OpCodeMask = 0xffff; +static const unsigned int WordCountShift = 16; + +enum SourceLanguage { + SourceLanguageUnknown = 0, + SourceLanguageESSL = 1, + SourceLanguageGLSL = 2, + SourceLanguageOpenCL_C = 3, + SourceLanguageOpenCL_CPP = 4, + SourceLanguageHLSL = 5, + SourceLanguageMax = 0x7fffffff, +}; + +enum ExecutionModel { + ExecutionModelVertex = 0, + ExecutionModelTessellationControl = 1, + ExecutionModelTessellationEvaluation = 2, + ExecutionModelGeometry = 3, + ExecutionModelFragment = 4, + ExecutionModelGLCompute = 5, + ExecutionModelKernel = 6, + ExecutionModelTaskNV = 5267, + ExecutionModelMeshNV = 5268, + ExecutionModelRayGenerationNVX = 5313, + ExecutionModelIntersectionNVX = 5314, + ExecutionModelAnyHitNVX = 5315, + ExecutionModelClosestHitNVX = 5316, + ExecutionModelMissNVX = 5317, + ExecutionModelCallableNVX = 5318, + ExecutionModelMax = 0x7fffffff, +}; + +enum AddressingModel { + AddressingModelLogical = 0, + AddressingModelPhysical32 = 1, + AddressingModelPhysical64 = 2, + AddressingModelMax = 0x7fffffff, +}; + +enum MemoryModel { + MemoryModelSimple = 0, + MemoryModelGLSL450 = 1, + MemoryModelOpenCL = 2, + MemoryModelVulkanKHR = 3, + MemoryModelMax = 0x7fffffff, +}; + +enum ExecutionMode { + ExecutionModeInvocations = 0, + ExecutionModeSpacingEqual = 1, + ExecutionModeSpacingFractionalEven = 2, + ExecutionModeSpacingFractionalOdd = 3, + ExecutionModeVertexOrderCw = 4, + ExecutionModeVertexOrderCcw = 5, + ExecutionModePixelCenterInteger = 6, + ExecutionModeOriginUpperLeft = 7, + ExecutionModeOriginLowerLeft = 8, + ExecutionModeEarlyFragmentTests = 9, + ExecutionModePointMode = 10, + ExecutionModeXfb = 11, + ExecutionModeDepthReplacing = 12, + ExecutionModeDepthGreater = 14, + ExecutionModeDepthLess = 15, + ExecutionModeDepthUnchanged = 16, + ExecutionModeLocalSize = 17, + ExecutionModeLocalSizeHint = 18, + ExecutionModeInputPoints = 19, + ExecutionModeInputLines = 20, + ExecutionModeInputLinesAdjacency = 21, + ExecutionModeTriangles = 22, + ExecutionModeInputTrianglesAdjacency = 23, + ExecutionModeQuads = 24, + ExecutionModeIsolines = 25, + ExecutionModeOutputVertices = 26, + ExecutionModeOutputPoints = 27, + ExecutionModeOutputLineStrip = 28, + ExecutionModeOutputTriangleStrip = 29, + ExecutionModeVecTypeHint = 30, + ExecutionModeContractionOff = 31, + ExecutionModeInitializer = 33, + ExecutionModeFinalizer = 34, + ExecutionModeSubgroupSize = 35, + ExecutionModeSubgroupsPerWorkgroup = 36, + ExecutionModeSubgroupsPerWorkgroupId = 37, + ExecutionModeLocalSizeId = 38, + ExecutionModeLocalSizeHintId = 39, + ExecutionModePostDepthCoverage = 4446, + ExecutionModeStencilRefReplacingEXT = 5027, + ExecutionModeOutputLinesNV = 5269, + ExecutionModeOutputPrimitivesNV = 5270, + ExecutionModeDerivativeGroupQuadsNV = 5289, + ExecutionModeDerivativeGroupLinearNV = 5290, + ExecutionModeOutputTrianglesNV = 5298, + ExecutionModeMax = 0x7fffffff, +}; + +enum StorageClass { + StorageClassUniformConstant = 0, + StorageClassInput = 1, + StorageClassUniform = 2, + StorageClassOutput = 3, + StorageClassWorkgroup = 4, + StorageClassCrossWorkgroup = 5, + StorageClassPrivate = 6, + StorageClassFunction = 7, + StorageClassGeneric = 8, + StorageClassPushConstant = 9, + StorageClassAtomicCounter = 10, + StorageClassImage = 11, + StorageClassStorageBuffer = 12, + StorageClassRayPayloadNVX = 5338, + StorageClassHitAttributeNVX = 5339, + StorageClassIncomingRayPayloadNVX = 5342, + StorageClassShaderRecordBufferNVX = 5343, + StorageClassMax = 0x7fffffff, +}; + +enum Dim { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + DimCube = 3, + DimRect = 4, + DimBuffer = 5, + DimSubpassData = 6, + DimMax = 0x7fffffff, +}; + +enum SamplerAddressingMode { + SamplerAddressingModeNone = 0, + SamplerAddressingModeClampToEdge = 1, + SamplerAddressingModeClamp = 2, + SamplerAddressingModeRepeat = 3, + SamplerAddressingModeRepeatMirrored = 4, + SamplerAddressingModeMax = 0x7fffffff, +}; + +enum SamplerFilterMode { + SamplerFilterModeNearest = 0, + SamplerFilterModeLinear = 1, + SamplerFilterModeMax = 0x7fffffff, +}; + +enum ImageFormat { + ImageFormatUnknown = 0, + ImageFormatRgba32f = 1, + ImageFormatRgba16f = 2, + ImageFormatR32f = 3, + ImageFormatRgba8 = 4, + ImageFormatRgba8Snorm = 5, + ImageFormatRg32f = 6, + ImageFormatRg16f = 7, + ImageFormatR11fG11fB10f = 8, + ImageFormatR16f = 9, + ImageFormatRgba16 = 10, + ImageFormatRgb10A2 = 11, + ImageFormatRg16 = 12, + ImageFormatRg8 = 13, + ImageFormatR16 = 14, + ImageFormatR8 = 15, + ImageFormatRgba16Snorm = 16, + ImageFormatRg16Snorm = 17, + ImageFormatRg8Snorm = 18, + ImageFormatR16Snorm = 19, + ImageFormatR8Snorm = 20, + ImageFormatRgba32i = 21, + ImageFormatRgba16i = 22, + ImageFormatRgba8i = 23, + ImageFormatR32i = 24, + ImageFormatRg32i = 25, + ImageFormatRg16i = 26, + ImageFormatRg8i = 27, + ImageFormatR16i = 28, + ImageFormatR8i = 29, + ImageFormatRgba32ui = 30, + ImageFormatRgba16ui = 31, + ImageFormatRgba8ui = 32, + ImageFormatR32ui = 33, + ImageFormatRgb10a2ui = 34, + ImageFormatRg32ui = 35, + ImageFormatRg16ui = 36, + ImageFormatRg8ui = 37, + ImageFormatR16ui = 38, + ImageFormatR8ui = 39, + ImageFormatMax = 0x7fffffff, +}; + +enum ImageChannelOrder { + ImageChannelOrderR = 0, + ImageChannelOrderA = 1, + ImageChannelOrderRG = 2, + ImageChannelOrderRA = 3, + ImageChannelOrderRGB = 4, + ImageChannelOrderRGBA = 5, + ImageChannelOrderBGRA = 6, + ImageChannelOrderARGB = 7, + ImageChannelOrderIntensity = 8, + ImageChannelOrderLuminance = 9, + ImageChannelOrderRx = 10, + ImageChannelOrderRGx = 11, + ImageChannelOrderRGBx = 12, + ImageChannelOrderDepth = 13, + ImageChannelOrderDepthStencil = 14, + ImageChannelOrdersRGB = 15, + ImageChannelOrdersRGBx = 16, + ImageChannelOrdersRGBA = 17, + ImageChannelOrdersBGRA = 18, + ImageChannelOrderABGR = 19, + ImageChannelOrderMax = 0x7fffffff, +}; + +enum ImageChannelDataType { + ImageChannelDataTypeSnormInt8 = 0, + ImageChannelDataTypeSnormInt16 = 1, + ImageChannelDataTypeUnormInt8 = 2, + ImageChannelDataTypeUnormInt16 = 3, + ImageChannelDataTypeUnormShort565 = 4, + ImageChannelDataTypeUnormShort555 = 5, + ImageChannelDataTypeUnormInt101010 = 6, + ImageChannelDataTypeSignedInt8 = 7, + ImageChannelDataTypeSignedInt16 = 8, + ImageChannelDataTypeSignedInt32 = 9, + ImageChannelDataTypeUnsignedInt8 = 10, + ImageChannelDataTypeUnsignedInt16 = 11, + ImageChannelDataTypeUnsignedInt32 = 12, + ImageChannelDataTypeHalfFloat = 13, + ImageChannelDataTypeFloat = 14, + ImageChannelDataTypeUnormInt24 = 15, + ImageChannelDataTypeUnormInt101010_2 = 16, + ImageChannelDataTypeMax = 0x7fffffff, +}; + +enum ImageOperandsShift { + ImageOperandsBiasShift = 0, + ImageOperandsLodShift = 1, + ImageOperandsGradShift = 2, + ImageOperandsConstOffsetShift = 3, + ImageOperandsOffsetShift = 4, + ImageOperandsConstOffsetsShift = 5, + ImageOperandsSampleShift = 6, + ImageOperandsMinLodShift = 7, + ImageOperandsMakeTexelAvailableKHRShift = 8, + ImageOperandsMakeTexelVisibleKHRShift = 9, + ImageOperandsNonPrivateTexelKHRShift = 10, + ImageOperandsVolatileTexelKHRShift = 11, + ImageOperandsMax = 0x7fffffff, +}; + +enum ImageOperandsMask { + ImageOperandsMaskNone = 0, + ImageOperandsBiasMask = 0x00000001, + ImageOperandsLodMask = 0x00000002, + ImageOperandsGradMask = 0x00000004, + ImageOperandsConstOffsetMask = 0x00000008, + ImageOperandsOffsetMask = 0x00000010, + ImageOperandsConstOffsetsMask = 0x00000020, + ImageOperandsSampleMask = 0x00000040, + ImageOperandsMinLodMask = 0x00000080, + ImageOperandsMakeTexelAvailableKHRMask = 0x00000100, + ImageOperandsMakeTexelVisibleKHRMask = 0x00000200, + ImageOperandsNonPrivateTexelKHRMask = 0x00000400, + ImageOperandsVolatileTexelKHRMask = 0x00000800, +}; + +enum FPFastMathModeShift { + FPFastMathModeNotNaNShift = 0, + FPFastMathModeNotInfShift = 1, + FPFastMathModeNSZShift = 2, + FPFastMathModeAllowRecipShift = 3, + FPFastMathModeFastShift = 4, + FPFastMathModeMax = 0x7fffffff, +}; + +enum FPFastMathModeMask { + FPFastMathModeMaskNone = 0, + FPFastMathModeNotNaNMask = 0x00000001, + FPFastMathModeNotInfMask = 0x00000002, + FPFastMathModeNSZMask = 0x00000004, + FPFastMathModeAllowRecipMask = 0x00000008, + FPFastMathModeFastMask = 0x00000010, +}; + +enum FPRoundingMode { + FPRoundingModeRTE = 0, + FPRoundingModeRTZ = 1, + FPRoundingModeRTP = 2, + FPRoundingModeRTN = 3, + FPRoundingModeMax = 0x7fffffff, +}; + +enum LinkageType { + LinkageTypeExport = 0, + LinkageTypeImport = 1, + LinkageTypeMax = 0x7fffffff, +}; + +enum AccessQualifier { + AccessQualifierReadOnly = 0, + AccessQualifierWriteOnly = 1, + AccessQualifierReadWrite = 2, + AccessQualifierMax = 0x7fffffff, +}; + +enum FunctionParameterAttribute { + FunctionParameterAttributeZext = 0, + FunctionParameterAttributeSext = 1, + FunctionParameterAttributeByVal = 2, + FunctionParameterAttributeSret = 3, + FunctionParameterAttributeNoAlias = 4, + FunctionParameterAttributeNoCapture = 5, + FunctionParameterAttributeNoWrite = 6, + FunctionParameterAttributeNoReadWrite = 7, + FunctionParameterAttributeMax = 0x7fffffff, +}; + +enum Decoration { + DecorationRelaxedPrecision = 0, + DecorationSpecId = 1, + DecorationBlock = 2, + DecorationBufferBlock = 3, + DecorationRowMajor = 4, + DecorationColMajor = 5, + DecorationArrayStride = 6, + DecorationMatrixStride = 7, + DecorationGLSLShared = 8, + DecorationGLSLPacked = 9, + DecorationCPacked = 10, + DecorationBuiltIn = 11, + DecorationNoPerspective = 13, + DecorationFlat = 14, + DecorationPatch = 15, + DecorationCentroid = 16, + DecorationSample = 17, + DecorationInvariant = 18, + DecorationRestrict = 19, + DecorationAliased = 20, + DecorationVolatile = 21, + DecorationConstant = 22, + DecorationCoherent = 23, + DecorationNonWritable = 24, + DecorationNonReadable = 25, + DecorationUniform = 26, + DecorationSaturatedConversion = 28, + DecorationStream = 29, + DecorationLocation = 30, + DecorationComponent = 31, + DecorationIndex = 32, + DecorationBinding = 33, + DecorationDescriptorSet = 34, + DecorationOffset = 35, + DecorationXfbBuffer = 36, + DecorationXfbStride = 37, + DecorationFuncParamAttr = 38, + DecorationFPRoundingMode = 39, + DecorationFPFastMathMode = 40, + DecorationLinkageAttributes = 41, + DecorationNoContraction = 42, + DecorationInputAttachmentIndex = 43, + DecorationAlignment = 44, + DecorationMaxByteOffset = 45, + DecorationAlignmentId = 46, + DecorationMaxByteOffsetId = 47, + DecorationExplicitInterpAMD = 4999, + DecorationOverrideCoverageNV = 5248, + DecorationPassthroughNV = 5250, + DecorationViewportRelativeNV = 5252, + DecorationSecondaryViewportRelativeNV = 5256, + DecorationPerPrimitiveNV = 5271, + DecorationPerViewNV = 5272, + DecorationPerTaskNV = 5273, + DecorationPerVertexNV = 5285, + DecorationNonUniformEXT = 5300, + DecorationHlslCounterBufferGOOGLE = 5634, + DecorationHlslSemanticGOOGLE = 5635, + DecorationMax = 0x7fffffff, +}; + +enum BuiltIn { + BuiltInPosition = 0, + BuiltInPointSize = 1, + BuiltInClipDistance = 3, + BuiltInCullDistance = 4, + BuiltInVertexId = 5, + BuiltInInstanceId = 6, + BuiltInPrimitiveId = 7, + BuiltInInvocationId = 8, + BuiltInLayer = 9, + BuiltInViewportIndex = 10, + BuiltInTessLevelOuter = 11, + BuiltInTessLevelInner = 12, + BuiltInTessCoord = 13, + BuiltInPatchVertices = 14, + BuiltInFragCoord = 15, + BuiltInPointCoord = 16, + BuiltInFrontFacing = 17, + BuiltInSampleId = 18, + BuiltInSamplePosition = 19, + BuiltInSampleMask = 20, + BuiltInFragDepth = 22, + BuiltInHelperInvocation = 23, + BuiltInNumWorkgroups = 24, + BuiltInWorkgroupSize = 25, + BuiltInWorkgroupId = 26, + BuiltInLocalInvocationId = 27, + BuiltInGlobalInvocationId = 28, + BuiltInLocalInvocationIndex = 29, + BuiltInWorkDim = 30, + BuiltInGlobalSize = 31, + BuiltInEnqueuedWorkgroupSize = 32, + BuiltInGlobalOffset = 33, + BuiltInGlobalLinearId = 34, + BuiltInSubgroupSize = 36, + BuiltInSubgroupMaxSize = 37, + BuiltInNumSubgroups = 38, + BuiltInNumEnqueuedSubgroups = 39, + BuiltInSubgroupId = 40, + BuiltInSubgroupLocalInvocationId = 41, + BuiltInVertexIndex = 42, + BuiltInInstanceIndex = 43, + BuiltInSubgroupEqMask = 4416, + BuiltInSubgroupEqMaskKHR = 4416, + BuiltInSubgroupGeMask = 4417, + BuiltInSubgroupGeMaskKHR = 4417, + BuiltInSubgroupGtMask = 4418, + BuiltInSubgroupGtMaskKHR = 4418, + BuiltInSubgroupLeMask = 4419, + BuiltInSubgroupLeMaskKHR = 4419, + BuiltInSubgroupLtMask = 4420, + BuiltInSubgroupLtMaskKHR = 4420, + BuiltInBaseVertex = 4424, + BuiltInBaseInstance = 4425, + BuiltInDrawIndex = 4426, + BuiltInDeviceIndex = 4438, + BuiltInViewIndex = 4440, + BuiltInBaryCoordNoPerspAMD = 4992, + BuiltInBaryCoordNoPerspCentroidAMD = 4993, + BuiltInBaryCoordNoPerspSampleAMD = 4994, + BuiltInBaryCoordSmoothAMD = 4995, + BuiltInBaryCoordSmoothCentroidAMD = 4996, + BuiltInBaryCoordSmoothSampleAMD = 4997, + BuiltInBaryCoordPullModelAMD = 4998, + BuiltInFragStencilRefEXT = 5014, + BuiltInViewportMaskNV = 5253, + BuiltInSecondaryPositionNV = 5257, + BuiltInSecondaryViewportMaskNV = 5258, + BuiltInPositionPerViewNV = 5261, + BuiltInViewportMaskPerViewNV = 5262, + BuiltInFullyCoveredEXT = 5264, + BuiltInTaskCountNV = 5274, + BuiltInPrimitiveCountNV = 5275, + BuiltInPrimitiveIndicesNV = 5276, + BuiltInClipDistancePerViewNV = 5277, + BuiltInCullDistancePerViewNV = 5278, + BuiltInLayerPerViewNV = 5279, + BuiltInMeshViewCountNV = 5280, + BuiltInMeshViewIndicesNV = 5281, + BuiltInBaryCoordNV = 5286, + BuiltInBaryCoordNoPerspNV = 5287, + BuiltInFragmentSizeNV = 5292, + BuiltInInvocationsPerPixelNV = 5293, + BuiltInLaunchIdNVX = 5319, + BuiltInLaunchSizeNVX = 5320, + BuiltInWorldRayOriginNVX = 5321, + BuiltInWorldRayDirectionNVX = 5322, + BuiltInObjectRayOriginNVX = 5323, + BuiltInObjectRayDirectionNVX = 5324, + BuiltInRayTminNVX = 5325, + BuiltInRayTmaxNVX = 5326, + BuiltInInstanceCustomIndexNVX = 5327, + BuiltInObjectToWorldNVX = 5330, + BuiltInWorldToObjectNVX = 5331, + BuiltInHitTNVX = 5332, + BuiltInHitKindNVX = 5333, + BuiltInMax = 0x7fffffff, +}; + +enum SelectionControlShift { + SelectionControlFlattenShift = 0, + SelectionControlDontFlattenShift = 1, + SelectionControlMax = 0x7fffffff, +}; + +enum SelectionControlMask { + SelectionControlMaskNone = 0, + SelectionControlFlattenMask = 0x00000001, + SelectionControlDontFlattenMask = 0x00000002, +}; + +enum LoopControlShift { + LoopControlUnrollShift = 0, + LoopControlDontUnrollShift = 1, + LoopControlDependencyInfiniteShift = 2, + LoopControlDependencyLengthShift = 3, + LoopControlMax = 0x7fffffff, +}; + +enum LoopControlMask { + LoopControlMaskNone = 0, + LoopControlUnrollMask = 0x00000001, + LoopControlDontUnrollMask = 0x00000002, + LoopControlDependencyInfiniteMask = 0x00000004, + LoopControlDependencyLengthMask = 0x00000008, +}; + +enum FunctionControlShift { + FunctionControlInlineShift = 0, + FunctionControlDontInlineShift = 1, + FunctionControlPureShift = 2, + FunctionControlConstShift = 3, + FunctionControlMax = 0x7fffffff, +}; + +enum FunctionControlMask { + FunctionControlMaskNone = 0, + FunctionControlInlineMask = 0x00000001, + FunctionControlDontInlineMask = 0x00000002, + FunctionControlPureMask = 0x00000004, + FunctionControlConstMask = 0x00000008, +}; + +enum MemorySemanticsShift { + MemorySemanticsAcquireShift = 1, + MemorySemanticsReleaseShift = 2, + MemorySemanticsAcquireReleaseShift = 3, + MemorySemanticsSequentiallyConsistentShift = 4, + MemorySemanticsUniformMemoryShift = 6, + MemorySemanticsSubgroupMemoryShift = 7, + MemorySemanticsWorkgroupMemoryShift = 8, + MemorySemanticsCrossWorkgroupMemoryShift = 9, + MemorySemanticsAtomicCounterMemoryShift = 10, + MemorySemanticsImageMemoryShift = 11, + MemorySemanticsOutputMemoryKHRShift = 12, + MemorySemanticsMakeAvailableKHRShift = 13, + MemorySemanticsMakeVisibleKHRShift = 14, + MemorySemanticsMax = 0x7fffffff, +}; + +enum MemorySemanticsMask { + MemorySemanticsMaskNone = 0, + MemorySemanticsAcquireMask = 0x00000002, + MemorySemanticsReleaseMask = 0x00000004, + MemorySemanticsAcquireReleaseMask = 0x00000008, + MemorySemanticsSequentiallyConsistentMask = 0x00000010, + MemorySemanticsUniformMemoryMask = 0x00000040, + MemorySemanticsSubgroupMemoryMask = 0x00000080, + MemorySemanticsWorkgroupMemoryMask = 0x00000100, + MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + MemorySemanticsAtomicCounterMemoryMask = 0x00000400, + MemorySemanticsImageMemoryMask = 0x00000800, + MemorySemanticsOutputMemoryKHRMask = 0x00001000, + MemorySemanticsMakeAvailableKHRMask = 0x00002000, + MemorySemanticsMakeVisibleKHRMask = 0x00004000, +}; + +enum MemoryAccessShift { + MemoryAccessVolatileShift = 0, + MemoryAccessAlignedShift = 1, + MemoryAccessNontemporalShift = 2, + MemoryAccessMakePointerAvailableKHRShift = 3, + MemoryAccessMakePointerVisibleKHRShift = 4, + MemoryAccessNonPrivatePointerKHRShift = 5, + MemoryAccessMax = 0x7fffffff, +}; + +enum MemoryAccessMask { + MemoryAccessMaskNone = 0, + MemoryAccessVolatileMask = 0x00000001, + MemoryAccessAlignedMask = 0x00000002, + MemoryAccessNontemporalMask = 0x00000004, + MemoryAccessMakePointerAvailableKHRMask = 0x00000008, + MemoryAccessMakePointerVisibleKHRMask = 0x00000010, + MemoryAccessNonPrivatePointerKHRMask = 0x00000020, +}; + +enum Scope { + ScopeCrossDevice = 0, + ScopeDevice = 1, + ScopeWorkgroup = 2, + ScopeSubgroup = 3, + ScopeInvocation = 4, + ScopeQueueFamilyKHR = 5, + ScopeMax = 0x7fffffff, +}; + +enum GroupOperation { + GroupOperationReduce = 0, + GroupOperationInclusiveScan = 1, + GroupOperationExclusiveScan = 2, + GroupOperationClusteredReduce = 3, + GroupOperationPartitionedReduceNV = 6, + GroupOperationPartitionedInclusiveScanNV = 7, + GroupOperationPartitionedExclusiveScanNV = 8, + GroupOperationMax = 0x7fffffff, +}; + +enum KernelEnqueueFlags { + KernelEnqueueFlagsNoWait = 0, + KernelEnqueueFlagsWaitKernel = 1, + KernelEnqueueFlagsWaitWorkGroup = 2, + KernelEnqueueFlagsMax = 0x7fffffff, +}; + +enum KernelProfilingInfoShift { + KernelProfilingInfoCmdExecTimeShift = 0, + KernelProfilingInfoMax = 0x7fffffff, +}; + +enum KernelProfilingInfoMask { + KernelProfilingInfoMaskNone = 0, + KernelProfilingInfoCmdExecTimeMask = 0x00000001, +}; + +enum Capability { + CapabilityMatrix = 0, + CapabilityShader = 1, + CapabilityGeometry = 2, + CapabilityTessellation = 3, + CapabilityAddresses = 4, + CapabilityLinkage = 5, + CapabilityKernel = 6, + CapabilityVector16 = 7, + CapabilityFloat16Buffer = 8, + CapabilityFloat16 = 9, + CapabilityFloat64 = 10, + CapabilityInt64 = 11, + CapabilityInt64Atomics = 12, + CapabilityImageBasic = 13, + CapabilityImageReadWrite = 14, + CapabilityImageMipmap = 15, + CapabilityPipes = 17, + CapabilityGroups = 18, + CapabilityDeviceEnqueue = 19, + CapabilityLiteralSampler = 20, + CapabilityAtomicStorage = 21, + CapabilityInt16 = 22, + CapabilityTessellationPointSize = 23, + CapabilityGeometryPointSize = 24, + CapabilityImageGatherExtended = 25, + CapabilityStorageImageMultisample = 27, + CapabilityUniformBufferArrayDynamicIndexing = 28, + CapabilitySampledImageArrayDynamicIndexing = 29, + CapabilityStorageBufferArrayDynamicIndexing = 30, + CapabilityStorageImageArrayDynamicIndexing = 31, + CapabilityClipDistance = 32, + CapabilityCullDistance = 33, + CapabilityImageCubeArray = 34, + CapabilitySampleRateShading = 35, + CapabilityImageRect = 36, + CapabilitySampledRect = 37, + CapabilityGenericPointer = 38, + CapabilityInt8 = 39, + CapabilityInputAttachment = 40, + CapabilitySparseResidency = 41, + CapabilityMinLod = 42, + CapabilitySampled1D = 43, + CapabilityImage1D = 44, + CapabilitySampledCubeArray = 45, + CapabilitySampledBuffer = 46, + CapabilityImageBuffer = 47, + CapabilityImageMSArray = 48, + CapabilityStorageImageExtendedFormats = 49, + CapabilityImageQuery = 50, + CapabilityDerivativeControl = 51, + CapabilityInterpolationFunction = 52, + CapabilityTransformFeedback = 53, + CapabilityGeometryStreams = 54, + CapabilityStorageImageReadWithoutFormat = 55, + CapabilityStorageImageWriteWithoutFormat = 56, + CapabilityMultiViewport = 57, + CapabilitySubgroupDispatch = 58, + CapabilityNamedBarrier = 59, + CapabilityPipeStorage = 60, + CapabilityGroupNonUniform = 61, + CapabilityGroupNonUniformVote = 62, + CapabilityGroupNonUniformArithmetic = 63, + CapabilityGroupNonUniformBallot = 64, + CapabilityGroupNonUniformShuffle = 65, + CapabilityGroupNonUniformShuffleRelative = 66, + CapabilityGroupNonUniformClustered = 67, + CapabilityGroupNonUniformQuad = 68, + CapabilitySubgroupBallotKHR = 4423, + CapabilityDrawParameters = 4427, + CapabilitySubgroupVoteKHR = 4431, + CapabilityStorageBuffer16BitAccess = 4433, + CapabilityStorageUniformBufferBlock16 = 4433, + CapabilityStorageUniform16 = 4434, + CapabilityUniformAndStorageBuffer16BitAccess = 4434, + CapabilityStoragePushConstant16 = 4435, + CapabilityStorageInputOutput16 = 4436, + CapabilityDeviceGroup = 4437, + CapabilityMultiView = 4439, + CapabilityVariablePointersStorageBuffer = 4441, + CapabilityVariablePointers = 4442, + CapabilityAtomicStorageOps = 4445, + CapabilitySampleMaskPostDepthCoverage = 4447, + CapabilityStorageBuffer8BitAccess = 4448, + CapabilityUniformAndStorageBuffer8BitAccess = 4449, + CapabilityStoragePushConstant8 = 4450, + CapabilityFloat16ImageAMD = 5008, + CapabilityImageGatherBiasLodAMD = 5009, + CapabilityFragmentMaskAMD = 5010, + CapabilityStencilExportEXT = 5013, + CapabilityImageReadWriteLodAMD = 5015, + CapabilitySampleMaskOverrideCoverageNV = 5249, + CapabilityGeometryShaderPassthroughNV = 5251, + CapabilityShaderViewportIndexLayerEXT = 5254, + CapabilityShaderViewportIndexLayerNV = 5254, + CapabilityShaderViewportMaskNV = 5255, + CapabilityShaderStereoViewNV = 5259, + CapabilityPerViewAttributesNV = 5260, + CapabilityFragmentFullyCoveredEXT = 5265, + CapabilityMeshShadingNV = 5266, + CapabilityImageFootprintNV = 5282, + CapabilityFragmentBarycentricNV = 5284, + CapabilityComputeDerivativeGroupQuadsNV = 5288, + CapabilityShadingRateNV = 5291, + CapabilityGroupNonUniformPartitionedNV = 5297, + CapabilityShaderNonUniformEXT = 5301, + CapabilityRuntimeDescriptorArrayEXT = 5302, + CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + CapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + CapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + CapabilityRaytracingNVX = 5340, + CapabilityVulkanMemoryModelKHR = 5345, + CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + CapabilityComputeDerivativeGroupLinearNV = 5350, + CapabilitySubgroupShuffleINTEL = 5568, + CapabilitySubgroupBufferBlockIOINTEL = 5569, + CapabilitySubgroupImageBlockIOINTEL = 5570, + CapabilityMax = 0x7fffffff, +}; + +enum Op { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpSubgroupReadInvocationKHR = 4432, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpImageSampleFootprintNV = 5283, + OpGroupNonUniformPartitionNV = 5296, + OpWritePackedPrimitiveIndices4x8NV = 5299, + OpReportIntersectionNVX = 5334, + OpIgnoreIntersectionNVX = 5335, + OpTerminateRayNVX = 5336, + OpTraceNVX = 5337, + OpTypeAccelerationStructureNVX = 5341, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateStringGOOGLE = 5633, + OpMax = 0x7fffffff, +}; + +// Overload operator| for mask bit combining + +inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); } +inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); } +inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); } +inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); } +inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); } +inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); } +inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); } +inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); } + +} // end namespace spv + +#endif // #ifndef spirv_HPP + diff --git a/src/refresh/vkpt/include/vulkan/spirv.hpp11 b/src/refresh/vkpt/include/vulkan/spirv.hpp11 new file mode 100644 index 0000000..7fbacb9 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/spirv.hpp11 @@ -0,0 +1,1186 @@ +// Copyright (c) 2014-2018 The Khronos Group Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and/or associated documentation files (the "Materials"), +// to deal in the Materials without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Materials, and to permit persons to whom the +// Materials are furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Materials. +// +// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +// +// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +// IN THE MATERIALS. + +// This header is automatically generated by the same tool that creates +// the Binary Section of the SPIR-V specification. + +// Enumeration tokens for SPIR-V, in various styles: +// C, C++, C++11, JSON, Lua, Python +// +// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +// +// Some tokens act like mask values, which can be OR'd together, +// while others are mutually exclusive. The mask-like ones have +// "Mask" in their name, and a parallel enum that has the shift +// amount (1 << x) for each corresponding enumerant. + +#ifndef spirv_HPP +#define spirv_HPP + +namespace spv { + +typedef unsigned int Id; + +#define SPV_VERSION 0x10300 +#define SPV_REVISION 1 + +static const unsigned int MagicNumber = 0x07230203; +static const unsigned int Version = 0x00010300; +static const unsigned int Revision = 1; +static const unsigned int OpCodeMask = 0xffff; +static const unsigned int WordCountShift = 16; + +enum class SourceLanguage : unsigned { + Unknown = 0, + ESSL = 1, + GLSL = 2, + OpenCL_C = 3, + OpenCL_CPP = 4, + HLSL = 5, + Max = 0x7fffffff, +}; + +enum class ExecutionModel : unsigned { + Vertex = 0, + TessellationControl = 1, + TessellationEvaluation = 2, + Geometry = 3, + Fragment = 4, + GLCompute = 5, + Kernel = 6, + TaskNV = 5267, + MeshNV = 5268, + RayGenerationNVX = 5313, + IntersectionNVX = 5314, + AnyHitNVX = 5315, + ClosestHitNVX = 5316, + MissNVX = 5317, + CallableNVX = 5318, + Max = 0x7fffffff, +}; + +enum class AddressingModel : unsigned { + Logical = 0, + Physical32 = 1, + Physical64 = 2, + Max = 0x7fffffff, +}; + +enum class MemoryModel : unsigned { + Simple = 0, + GLSL450 = 1, + OpenCL = 2, + VulkanKHR = 3, + Max = 0x7fffffff, +}; + +enum class ExecutionMode : unsigned { + Invocations = 0, + SpacingEqual = 1, + SpacingFractionalEven = 2, + SpacingFractionalOdd = 3, + VertexOrderCw = 4, + VertexOrderCcw = 5, + PixelCenterInteger = 6, + OriginUpperLeft = 7, + OriginLowerLeft = 8, + EarlyFragmentTests = 9, + PointMode = 10, + Xfb = 11, + DepthReplacing = 12, + DepthGreater = 14, + DepthLess = 15, + DepthUnchanged = 16, + LocalSize = 17, + LocalSizeHint = 18, + InputPoints = 19, + InputLines = 20, + InputLinesAdjacency = 21, + Triangles = 22, + InputTrianglesAdjacency = 23, + Quads = 24, + Isolines = 25, + OutputVertices = 26, + OutputPoints = 27, + OutputLineStrip = 28, + OutputTriangleStrip = 29, + VecTypeHint = 30, + ContractionOff = 31, + Initializer = 33, + Finalizer = 34, + SubgroupSize = 35, + SubgroupsPerWorkgroup = 36, + SubgroupsPerWorkgroupId = 37, + LocalSizeId = 38, + LocalSizeHintId = 39, + PostDepthCoverage = 4446, + StencilRefReplacingEXT = 5027, + OutputLinesNV = 5269, + OutputPrimitivesNV = 5270, + DerivativeGroupQuadsNV = 5289, + DerivativeGroupLinearNV = 5290, + OutputTrianglesNV = 5298, + Max = 0x7fffffff, +}; + +enum class StorageClass : unsigned { + UniformConstant = 0, + Input = 1, + Uniform = 2, + Output = 3, + Workgroup = 4, + CrossWorkgroup = 5, + Private = 6, + Function = 7, + Generic = 8, + PushConstant = 9, + AtomicCounter = 10, + Image = 11, + StorageBuffer = 12, + RayPayloadNVX = 5338, + HitAttributeNVX = 5339, + IncomingRayPayloadNVX = 5342, + ShaderRecordBufferNVX = 5343, + Max = 0x7fffffff, +}; + +enum class Dim : unsigned { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + Cube = 3, + Rect = 4, + Buffer = 5, + SubpassData = 6, + Max = 0x7fffffff, +}; + +enum class SamplerAddressingMode : unsigned { + None = 0, + ClampToEdge = 1, + Clamp = 2, + Repeat = 3, + RepeatMirrored = 4, + Max = 0x7fffffff, +}; + +enum class SamplerFilterMode : unsigned { + Nearest = 0, + Linear = 1, + Max = 0x7fffffff, +}; + +enum class ImageFormat : unsigned { + Unknown = 0, + Rgba32f = 1, + Rgba16f = 2, + R32f = 3, + Rgba8 = 4, + Rgba8Snorm = 5, + Rg32f = 6, + Rg16f = 7, + R11fG11fB10f = 8, + R16f = 9, + Rgba16 = 10, + Rgb10A2 = 11, + Rg16 = 12, + Rg8 = 13, + R16 = 14, + R8 = 15, + Rgba16Snorm = 16, + Rg16Snorm = 17, + Rg8Snorm = 18, + R16Snorm = 19, + R8Snorm = 20, + Rgba32i = 21, + Rgba16i = 22, + Rgba8i = 23, + R32i = 24, + Rg32i = 25, + Rg16i = 26, + Rg8i = 27, + R16i = 28, + R8i = 29, + Rgba32ui = 30, + Rgba16ui = 31, + Rgba8ui = 32, + R32ui = 33, + Rgb10a2ui = 34, + Rg32ui = 35, + Rg16ui = 36, + Rg8ui = 37, + R16ui = 38, + R8ui = 39, + Max = 0x7fffffff, +}; + +enum class ImageChannelOrder : unsigned { + R = 0, + A = 1, + RG = 2, + RA = 3, + RGB = 4, + RGBA = 5, + BGRA = 6, + ARGB = 7, + Intensity = 8, + Luminance = 9, + Rx = 10, + RGx = 11, + RGBx = 12, + Depth = 13, + DepthStencil = 14, + sRGB = 15, + sRGBx = 16, + sRGBA = 17, + sBGRA = 18, + ABGR = 19, + Max = 0x7fffffff, +}; + +enum class ImageChannelDataType : unsigned { + SnormInt8 = 0, + SnormInt16 = 1, + UnormInt8 = 2, + UnormInt16 = 3, + UnormShort565 = 4, + UnormShort555 = 5, + UnormInt101010 = 6, + SignedInt8 = 7, + SignedInt16 = 8, + SignedInt32 = 9, + UnsignedInt8 = 10, + UnsignedInt16 = 11, + UnsignedInt32 = 12, + HalfFloat = 13, + Float = 14, + UnormInt24 = 15, + UnormInt101010_2 = 16, + Max = 0x7fffffff, +}; + +enum class ImageOperandsShift : unsigned { + Bias = 0, + Lod = 1, + Grad = 2, + ConstOffset = 3, + Offset = 4, + ConstOffsets = 5, + Sample = 6, + MinLod = 7, + MakeTexelAvailableKHR = 8, + MakeTexelVisibleKHR = 9, + NonPrivateTexelKHR = 10, + VolatileTexelKHR = 11, + Max = 0x7fffffff, +}; + +enum class ImageOperandsMask : unsigned { + MaskNone = 0, + Bias = 0x00000001, + Lod = 0x00000002, + Grad = 0x00000004, + ConstOffset = 0x00000008, + Offset = 0x00000010, + ConstOffsets = 0x00000020, + Sample = 0x00000040, + MinLod = 0x00000080, + MakeTexelAvailableKHR = 0x00000100, + MakeTexelVisibleKHR = 0x00000200, + NonPrivateTexelKHR = 0x00000400, + VolatileTexelKHR = 0x00000800, +}; + +enum class FPFastMathModeShift : unsigned { + NotNaN = 0, + NotInf = 1, + NSZ = 2, + AllowRecip = 3, + Fast = 4, + Max = 0x7fffffff, +}; + +enum class FPFastMathModeMask : unsigned { + MaskNone = 0, + NotNaN = 0x00000001, + NotInf = 0x00000002, + NSZ = 0x00000004, + AllowRecip = 0x00000008, + Fast = 0x00000010, +}; + +enum class FPRoundingMode : unsigned { + RTE = 0, + RTZ = 1, + RTP = 2, + RTN = 3, + Max = 0x7fffffff, +}; + +enum class LinkageType : unsigned { + Export = 0, + Import = 1, + Max = 0x7fffffff, +}; + +enum class AccessQualifier : unsigned { + ReadOnly = 0, + WriteOnly = 1, + ReadWrite = 2, + Max = 0x7fffffff, +}; + +enum class FunctionParameterAttribute : unsigned { + Zext = 0, + Sext = 1, + ByVal = 2, + Sret = 3, + NoAlias = 4, + NoCapture = 5, + NoWrite = 6, + NoReadWrite = 7, + Max = 0x7fffffff, +}; + +enum class Decoration : unsigned { + RelaxedPrecision = 0, + SpecId = 1, + Block = 2, + BufferBlock = 3, + RowMajor = 4, + ColMajor = 5, + ArrayStride = 6, + MatrixStride = 7, + GLSLShared = 8, + GLSLPacked = 9, + CPacked = 10, + BuiltIn = 11, + NoPerspective = 13, + Flat = 14, + Patch = 15, + Centroid = 16, + Sample = 17, + Invariant = 18, + Restrict = 19, + Aliased = 20, + Volatile = 21, + Constant = 22, + Coherent = 23, + NonWritable = 24, + NonReadable = 25, + Uniform = 26, + SaturatedConversion = 28, + Stream = 29, + Location = 30, + Component = 31, + Index = 32, + Binding = 33, + DescriptorSet = 34, + Offset = 35, + XfbBuffer = 36, + XfbStride = 37, + FuncParamAttr = 38, + FPRoundingMode = 39, + FPFastMathMode = 40, + LinkageAttributes = 41, + NoContraction = 42, + InputAttachmentIndex = 43, + Alignment = 44, + MaxByteOffset = 45, + AlignmentId = 46, + MaxByteOffsetId = 47, + ExplicitInterpAMD = 4999, + OverrideCoverageNV = 5248, + PassthroughNV = 5250, + ViewportRelativeNV = 5252, + SecondaryViewportRelativeNV = 5256, + PerPrimitiveNV = 5271, + PerViewNV = 5272, + PerTaskNV = 5273, + PerVertexNV = 5285, + NonUniformEXT = 5300, + HlslCounterBufferGOOGLE = 5634, + HlslSemanticGOOGLE = 5635, + Max = 0x7fffffff, +}; + +enum class BuiltIn : unsigned { + Position = 0, + PointSize = 1, + ClipDistance = 3, + CullDistance = 4, + VertexId = 5, + InstanceId = 6, + PrimitiveId = 7, + InvocationId = 8, + Layer = 9, + ViewportIndex = 10, + TessLevelOuter = 11, + TessLevelInner = 12, + TessCoord = 13, + PatchVertices = 14, + FragCoord = 15, + PointCoord = 16, + FrontFacing = 17, + SampleId = 18, + SamplePosition = 19, + SampleMask = 20, + FragDepth = 22, + HelperInvocation = 23, + NumWorkgroups = 24, + WorkgroupSize = 25, + WorkgroupId = 26, + LocalInvocationId = 27, + GlobalInvocationId = 28, + LocalInvocationIndex = 29, + WorkDim = 30, + GlobalSize = 31, + EnqueuedWorkgroupSize = 32, + GlobalOffset = 33, + GlobalLinearId = 34, + SubgroupSize = 36, + SubgroupMaxSize = 37, + NumSubgroups = 38, + NumEnqueuedSubgroups = 39, + SubgroupId = 40, + SubgroupLocalInvocationId = 41, + VertexIndex = 42, + InstanceIndex = 43, + SubgroupEqMask = 4416, + SubgroupEqMaskKHR = 4416, + SubgroupGeMask = 4417, + SubgroupGeMaskKHR = 4417, + SubgroupGtMask = 4418, + SubgroupGtMaskKHR = 4418, + SubgroupLeMask = 4419, + SubgroupLeMaskKHR = 4419, + SubgroupLtMask = 4420, + SubgroupLtMaskKHR = 4420, + BaseVertex = 4424, + BaseInstance = 4425, + DrawIndex = 4426, + DeviceIndex = 4438, + ViewIndex = 4440, + BaryCoordNoPerspAMD = 4992, + BaryCoordNoPerspCentroidAMD = 4993, + BaryCoordNoPerspSampleAMD = 4994, + BaryCoordSmoothAMD = 4995, + BaryCoordSmoothCentroidAMD = 4996, + BaryCoordSmoothSampleAMD = 4997, + BaryCoordPullModelAMD = 4998, + FragStencilRefEXT = 5014, + ViewportMaskNV = 5253, + SecondaryPositionNV = 5257, + SecondaryViewportMaskNV = 5258, + PositionPerViewNV = 5261, + ViewportMaskPerViewNV = 5262, + FullyCoveredEXT = 5264, + TaskCountNV = 5274, + PrimitiveCountNV = 5275, + PrimitiveIndicesNV = 5276, + ClipDistancePerViewNV = 5277, + CullDistancePerViewNV = 5278, + LayerPerViewNV = 5279, + MeshViewCountNV = 5280, + MeshViewIndicesNV = 5281, + BaryCoordNV = 5286, + BaryCoordNoPerspNV = 5287, + FragmentSizeNV = 5292, + InvocationsPerPixelNV = 5293, + LaunchIdNVX = 5319, + LaunchSizeNVX = 5320, + WorldRayOriginNVX = 5321, + WorldRayDirectionNVX = 5322, + ObjectRayOriginNVX = 5323, + ObjectRayDirectionNVX = 5324, + RayTminNVX = 5325, + RayTmaxNVX = 5326, + InstanceCustomIndexNVX = 5327, + ObjectToWorldNVX = 5330, + WorldToObjectNVX = 5331, + HitTNVX = 5332, + HitKindNVX = 5333, + Max = 0x7fffffff, +}; + +enum class SelectionControlShift : unsigned { + Flatten = 0, + DontFlatten = 1, + Max = 0x7fffffff, +}; + +enum class SelectionControlMask : unsigned { + MaskNone = 0, + Flatten = 0x00000001, + DontFlatten = 0x00000002, +}; + +enum class LoopControlShift : unsigned { + Unroll = 0, + DontUnroll = 1, + DependencyInfinite = 2, + DependencyLength = 3, + Max = 0x7fffffff, +}; + +enum class LoopControlMask : unsigned { + MaskNone = 0, + Unroll = 0x00000001, + DontUnroll = 0x00000002, + DependencyInfinite = 0x00000004, + DependencyLength = 0x00000008, +}; + +enum class FunctionControlShift : unsigned { + Inline = 0, + DontInline = 1, + Pure = 2, + Const = 3, + Max = 0x7fffffff, +}; + +enum class FunctionControlMask : unsigned { + MaskNone = 0, + Inline = 0x00000001, + DontInline = 0x00000002, + Pure = 0x00000004, + Const = 0x00000008, +}; + +enum class MemorySemanticsShift : unsigned { + Acquire = 1, + Release = 2, + AcquireRelease = 3, + SequentiallyConsistent = 4, + UniformMemory = 6, + SubgroupMemory = 7, + WorkgroupMemory = 8, + CrossWorkgroupMemory = 9, + AtomicCounterMemory = 10, + ImageMemory = 11, + OutputMemoryKHR = 12, + MakeAvailableKHR = 13, + MakeVisibleKHR = 14, + Max = 0x7fffffff, +}; + +enum class MemorySemanticsMask : unsigned { + MaskNone = 0, + Acquire = 0x00000002, + Release = 0x00000004, + AcquireRelease = 0x00000008, + SequentiallyConsistent = 0x00000010, + UniformMemory = 0x00000040, + SubgroupMemory = 0x00000080, + WorkgroupMemory = 0x00000100, + CrossWorkgroupMemory = 0x00000200, + AtomicCounterMemory = 0x00000400, + ImageMemory = 0x00000800, + OutputMemoryKHR = 0x00001000, + MakeAvailableKHR = 0x00002000, + MakeVisibleKHR = 0x00004000, +}; + +enum class MemoryAccessShift : unsigned { + Volatile = 0, + Aligned = 1, + Nontemporal = 2, + MakePointerAvailableKHR = 3, + MakePointerVisibleKHR = 4, + NonPrivatePointerKHR = 5, + Max = 0x7fffffff, +}; + +enum class MemoryAccessMask : unsigned { + MaskNone = 0, + Volatile = 0x00000001, + Aligned = 0x00000002, + Nontemporal = 0x00000004, + MakePointerAvailableKHR = 0x00000008, + MakePointerVisibleKHR = 0x00000010, + NonPrivatePointerKHR = 0x00000020, +}; + +enum class Scope : unsigned { + CrossDevice = 0, + Device = 1, + Workgroup = 2, + Subgroup = 3, + Invocation = 4, + QueueFamilyKHR = 5, + Max = 0x7fffffff, +}; + +enum class GroupOperation : unsigned { + Reduce = 0, + InclusiveScan = 1, + ExclusiveScan = 2, + ClusteredReduce = 3, + PartitionedReduceNV = 6, + PartitionedInclusiveScanNV = 7, + PartitionedExclusiveScanNV = 8, + Max = 0x7fffffff, +}; + +enum class KernelEnqueueFlags : unsigned { + NoWait = 0, + WaitKernel = 1, + WaitWorkGroup = 2, + Max = 0x7fffffff, +}; + +enum class KernelProfilingInfoShift : unsigned { + CmdExecTime = 0, + Max = 0x7fffffff, +}; + +enum class KernelProfilingInfoMask : unsigned { + MaskNone = 0, + CmdExecTime = 0x00000001, +}; + +enum class Capability : unsigned { + Matrix = 0, + Shader = 1, + Geometry = 2, + Tessellation = 3, + Addresses = 4, + Linkage = 5, + Kernel = 6, + Vector16 = 7, + Float16Buffer = 8, + Float16 = 9, + Float64 = 10, + Int64 = 11, + Int64Atomics = 12, + ImageBasic = 13, + ImageReadWrite = 14, + ImageMipmap = 15, + Pipes = 17, + Groups = 18, + DeviceEnqueue = 19, + LiteralSampler = 20, + AtomicStorage = 21, + Int16 = 22, + TessellationPointSize = 23, + GeometryPointSize = 24, + ImageGatherExtended = 25, + StorageImageMultisample = 27, + UniformBufferArrayDynamicIndexing = 28, + SampledImageArrayDynamicIndexing = 29, + StorageBufferArrayDynamicIndexing = 30, + StorageImageArrayDynamicIndexing = 31, + ClipDistance = 32, + CullDistance = 33, + ImageCubeArray = 34, + SampleRateShading = 35, + ImageRect = 36, + SampledRect = 37, + GenericPointer = 38, + Int8 = 39, + InputAttachment = 40, + SparseResidency = 41, + MinLod = 42, + Sampled1D = 43, + Image1D = 44, + SampledCubeArray = 45, + SampledBuffer = 46, + ImageBuffer = 47, + ImageMSArray = 48, + StorageImageExtendedFormats = 49, + ImageQuery = 50, + DerivativeControl = 51, + InterpolationFunction = 52, + TransformFeedback = 53, + GeometryStreams = 54, + StorageImageReadWithoutFormat = 55, + StorageImageWriteWithoutFormat = 56, + MultiViewport = 57, + SubgroupDispatch = 58, + NamedBarrier = 59, + PipeStorage = 60, + GroupNonUniform = 61, + GroupNonUniformVote = 62, + GroupNonUniformArithmetic = 63, + GroupNonUniformBallot = 64, + GroupNonUniformShuffle = 65, + GroupNonUniformShuffleRelative = 66, + GroupNonUniformClustered = 67, + GroupNonUniformQuad = 68, + SubgroupBallotKHR = 4423, + DrawParameters = 4427, + SubgroupVoteKHR = 4431, + StorageBuffer16BitAccess = 4433, + StorageUniformBufferBlock16 = 4433, + StorageUniform16 = 4434, + UniformAndStorageBuffer16BitAccess = 4434, + StoragePushConstant16 = 4435, + StorageInputOutput16 = 4436, + DeviceGroup = 4437, + MultiView = 4439, + VariablePointersStorageBuffer = 4441, + VariablePointers = 4442, + AtomicStorageOps = 4445, + SampleMaskPostDepthCoverage = 4447, + StorageBuffer8BitAccess = 4448, + UniformAndStorageBuffer8BitAccess = 4449, + StoragePushConstant8 = 4450, + Float16ImageAMD = 5008, + ImageGatherBiasLodAMD = 5009, + FragmentMaskAMD = 5010, + StencilExportEXT = 5013, + ImageReadWriteLodAMD = 5015, + SampleMaskOverrideCoverageNV = 5249, + GeometryShaderPassthroughNV = 5251, + ShaderViewportIndexLayerEXT = 5254, + ShaderViewportIndexLayerNV = 5254, + ShaderViewportMaskNV = 5255, + ShaderStereoViewNV = 5259, + PerViewAttributesNV = 5260, + FragmentFullyCoveredEXT = 5265, + MeshShadingNV = 5266, + ImageFootprintNV = 5282, + FragmentBarycentricNV = 5284, + ComputeDerivativeGroupQuadsNV = 5288, + ShadingRateNV = 5291, + GroupNonUniformPartitionedNV = 5297, + ShaderNonUniformEXT = 5301, + RuntimeDescriptorArrayEXT = 5302, + InputAttachmentArrayDynamicIndexingEXT = 5303, + UniformTexelBufferArrayDynamicIndexingEXT = 5304, + StorageTexelBufferArrayDynamicIndexingEXT = 5305, + UniformBufferArrayNonUniformIndexingEXT = 5306, + SampledImageArrayNonUniformIndexingEXT = 5307, + StorageBufferArrayNonUniformIndexingEXT = 5308, + StorageImageArrayNonUniformIndexingEXT = 5309, + InputAttachmentArrayNonUniformIndexingEXT = 5310, + UniformTexelBufferArrayNonUniformIndexingEXT = 5311, + StorageTexelBufferArrayNonUniformIndexingEXT = 5312, + RaytracingNVX = 5340, + VulkanMemoryModelKHR = 5345, + VulkanMemoryModelDeviceScopeKHR = 5346, + ComputeDerivativeGroupLinearNV = 5350, + SubgroupShuffleINTEL = 5568, + SubgroupBufferBlockIOINTEL = 5569, + SubgroupImageBlockIOINTEL = 5570, + Max = 0x7fffffff, +}; + +enum class Op : unsigned { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpSubgroupReadInvocationKHR = 4432, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpImageSampleFootprintNV = 5283, + OpGroupNonUniformPartitionNV = 5296, + OpWritePackedPrimitiveIndices4x8NV = 5299, + OpReportIntersectionNVX = 5334, + OpIgnoreIntersectionNVX = 5335, + OpTerminateRayNVX = 5336, + OpTraceNVX = 5337, + OpTypeAccelerationStructureNVX = 5341, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateStringGOOGLE = 5633, + Max = 0x7fffffff, +}; + +// Overload operator| for mask bit combining + +inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); } +inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); } +inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); } +inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); } +inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); } +inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); } +inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); } +inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); } + +} // end namespace spv + +#endif // #ifndef spirv_HPP + diff --git a/src/refresh/vkpt/include/vulkan/spirv.json b/src/refresh/vkpt/include/vulkan/spirv.json new file mode 100644 index 0000000..3531ae7 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/spirv.json @@ -0,0 +1,1192 @@ +{ + "spv": + { + "meta": + { + "Comment": + [ + [ + "Copyright (c) 2014-2018 The Khronos Group Inc.", + "", + "Permission is hereby granted, free of charge, to any person obtaining a copy", + "of this software and/or associated documentation files (the \"Materials\"),", + "to deal in the Materials without restriction, including without limitation", + "the rights to use, copy, modify, merge, publish, distribute, sublicense,", + "and/or sell copies of the Materials, and to permit persons to whom the", + "Materials are furnished to do so, subject to the following conditions:", + "", + "The above copyright notice and this permission notice shall be included in", + "all copies or substantial portions of the Materials.", + "", + "MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS", + "STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND", + "HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ ", + "", + "THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", + "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", + "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", + "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", + "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", + "FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS", + "IN THE MATERIALS." + ], + [ + "This header is automatically generated by the same tool that creates", + "the Binary Section of the SPIR-V specification." + ], + [ + "Enumeration tokens for SPIR-V, in various styles:", + " C, C++, C++11, JSON, Lua, Python", + "", + "- C will have tokens with a \"Spv\" prefix, e.g.: SpvSourceLanguageGLSL", + "- C++ will have tokens in the \"spv\" name space, e.g.: spv::SourceLanguageGLSL", + "- C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL", + "- Lua will use tables, e.g.: spv.SourceLanguage.GLSL", + "- Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']", + "", + "Some tokens act like mask values, which can be OR'd together,", + "while others are mutually exclusive. The mask-like ones have", + "\"Mask\" in their name, and a parallel enum that has the shift", + "amount (1 << x) for each corresponding enumerant." + ] + ], + "MagicNumber": 119734787, + "Version": 66304, + "Revision": 1, + "OpCodeMask": 65535, + "WordCountShift": 16 + }, + "enum": + [ + { + "Name": "SourceLanguage", + "Type": "Value", + "Values": + { + "Unknown": 0, + "ESSL": 1, + "GLSL": 2, + "OpenCL_C": 3, + "OpenCL_CPP": 4, + "HLSL": 5 + } + }, + { + "Name": "ExecutionModel", + "Type": "Value", + "Values": + { + "Vertex": 0, + "TessellationControl": 1, + "TessellationEvaluation": 2, + "Geometry": 3, + "Fragment": 4, + "GLCompute": 5, + "Kernel": 6, + "TaskNV": 5267, + "MeshNV": 5268, + "RayGenerationNVX": 5313, + "IntersectionNVX": 5314, + "AnyHitNVX": 5315, + "ClosestHitNVX": 5316, + "MissNVX": 5317, + "CallableNVX": 5318 + } + }, + { + "Name": "AddressingModel", + "Type": "Value", + "Values": + { + "Logical": 0, + "Physical32": 1, + "Physical64": 2 + } + }, + { + "Name": "MemoryModel", + "Type": "Value", + "Values": + { + "Simple": 0, + "GLSL450": 1, + "OpenCL": 2, + "VulkanKHR": 3 + } + }, + { + "Name": "ExecutionMode", + "Type": "Value", + "Values": + { + "Invocations": 0, + "SpacingEqual": 1, + "SpacingFractionalEven": 2, + "SpacingFractionalOdd": 3, + "VertexOrderCw": 4, + "VertexOrderCcw": 5, + "PixelCenterInteger": 6, + "OriginUpperLeft": 7, + "OriginLowerLeft": 8, + "EarlyFragmentTests": 9, + "PointMode": 10, + "Xfb": 11, + "DepthReplacing": 12, + "DepthGreater": 14, + "DepthLess": 15, + "DepthUnchanged": 16, + "LocalSize": 17, + "LocalSizeHint": 18, + "InputPoints": 19, + "InputLines": 20, + "InputLinesAdjacency": 21, + "Triangles": 22, + "InputTrianglesAdjacency": 23, + "Quads": 24, + "Isolines": 25, + "OutputVertices": 26, + "OutputPoints": 27, + "OutputLineStrip": 28, + "OutputTriangleStrip": 29, + "VecTypeHint": 30, + "ContractionOff": 31, + "Initializer": 33, + "Finalizer": 34, + "SubgroupSize": 35, + "SubgroupsPerWorkgroup": 36, + "SubgroupsPerWorkgroupId": 37, + "LocalSizeId": 38, + "LocalSizeHintId": 39, + "PostDepthCoverage": 4446, + "StencilRefReplacingEXT": 5027, + "OutputLinesNV": 5269, + "OutputPrimitivesNV": 5270, + "DerivativeGroupQuadsNV": 5289, + "DerivativeGroupLinearNV": 5290, + "OutputTrianglesNV": 5298 + } + }, + { + "Name": "StorageClass", + "Type": "Value", + "Values": + { + "UniformConstant": 0, + "Input": 1, + "Uniform": 2, + "Output": 3, + "Workgroup": 4, + "CrossWorkgroup": 5, + "Private": 6, + "Function": 7, + "Generic": 8, + "PushConstant": 9, + "AtomicCounter": 10, + "Image": 11, + "StorageBuffer": 12, + "RayPayloadNVX": 5338, + "HitAttributeNVX": 5339, + "IncomingRayPayloadNVX": 5342, + "ShaderRecordBufferNVX": 5343 + } + }, + { + "Name": "Dim", + "Type": "Value", + "Values": + { + "Dim1D": 0, + "Dim2D": 1, + "Dim3D": 2, + "Cube": 3, + "Rect": 4, + "Buffer": 5, + "SubpassData": 6 + } + }, + { + "Name": "SamplerAddressingMode", + "Type": "Value", + "Values": + { + "None": 0, + "ClampToEdge": 1, + "Clamp": 2, + "Repeat": 3, + "RepeatMirrored": 4 + } + }, + { + "Name": "SamplerFilterMode", + "Type": "Value", + "Values": + { + "Nearest": 0, + "Linear": 1 + } + }, + { + "Name": "ImageFormat", + "Type": "Value", + "Values": + { + "Unknown": 0, + "Rgba32f": 1, + "Rgba16f": 2, + "R32f": 3, + "Rgba8": 4, + "Rgba8Snorm": 5, + "Rg32f": 6, + "Rg16f": 7, + "R11fG11fB10f": 8, + "R16f": 9, + "Rgba16": 10, + "Rgb10A2": 11, + "Rg16": 12, + "Rg8": 13, + "R16": 14, + "R8": 15, + "Rgba16Snorm": 16, + "Rg16Snorm": 17, + "Rg8Snorm": 18, + "R16Snorm": 19, + "R8Snorm": 20, + "Rgba32i": 21, + "Rgba16i": 22, + "Rgba8i": 23, + "R32i": 24, + "Rg32i": 25, + "Rg16i": 26, + "Rg8i": 27, + "R16i": 28, + "R8i": 29, + "Rgba32ui": 30, + "Rgba16ui": 31, + "Rgba8ui": 32, + "R32ui": 33, + "Rgb10a2ui": 34, + "Rg32ui": 35, + "Rg16ui": 36, + "Rg8ui": 37, + "R16ui": 38, + "R8ui": 39 + } + }, + { + "Name": "ImageChannelOrder", + "Type": "Value", + "Values": + { + "R": 0, + "A": 1, + "RG": 2, + "RA": 3, + "RGB": 4, + "RGBA": 5, + "BGRA": 6, + "ARGB": 7, + "Intensity": 8, + "Luminance": 9, + "Rx": 10, + "RGx": 11, + "RGBx": 12, + "Depth": 13, + "DepthStencil": 14, + "sRGB": 15, + "sRGBx": 16, + "sRGBA": 17, + "sBGRA": 18, + "ABGR": 19 + } + }, + { + "Name": "ImageChannelDataType", + "Type": "Value", + "Values": + { + "SnormInt8": 0, + "SnormInt16": 1, + "UnormInt8": 2, + "UnormInt16": 3, + "UnormShort565": 4, + "UnormShort555": 5, + "UnormInt101010": 6, + "SignedInt8": 7, + "SignedInt16": 8, + "SignedInt32": 9, + "UnsignedInt8": 10, + "UnsignedInt16": 11, + "UnsignedInt32": 12, + "HalfFloat": 13, + "Float": 14, + "UnormInt24": 15, + "UnormInt101010_2": 16 + } + }, + { + "Name": "ImageOperands", + "Type": "Bit", + "Values": + { + "Bias": 0, + "Lod": 1, + "Grad": 2, + "ConstOffset": 3, + "Offset": 4, + "ConstOffsets": 5, + "Sample": 6, + "MinLod": 7, + "MakeTexelAvailableKHR": 8, + "MakeTexelVisibleKHR": 9, + "NonPrivateTexelKHR": 10, + "VolatileTexelKHR": 11 + } + }, + { + "Name": "FPFastMathMode", + "Type": "Bit", + "Values": + { + "NotNaN": 0, + "NotInf": 1, + "NSZ": 2, + "AllowRecip": 3, + "Fast": 4 + } + }, + { + "Name": "FPRoundingMode", + "Type": "Value", + "Values": + { + "RTE": 0, + "RTZ": 1, + "RTP": 2, + "RTN": 3 + } + }, + { + "Name": "LinkageType", + "Type": "Value", + "Values": + { + "Export": 0, + "Import": 1 + } + }, + { + "Name": "AccessQualifier", + "Type": "Value", + "Values": + { + "ReadOnly": 0, + "WriteOnly": 1, + "ReadWrite": 2 + } + }, + { + "Name": "FunctionParameterAttribute", + "Type": "Value", + "Values": + { + "Zext": 0, + "Sext": 1, + "ByVal": 2, + "Sret": 3, + "NoAlias": 4, + "NoCapture": 5, + "NoWrite": 6, + "NoReadWrite": 7 + } + }, + { + "Name": "Decoration", + "Type": "Value", + "Values": + { + "RelaxedPrecision": 0, + "SpecId": 1, + "Block": 2, + "BufferBlock": 3, + "RowMajor": 4, + "ColMajor": 5, + "ArrayStride": 6, + "MatrixStride": 7, + "GLSLShared": 8, + "GLSLPacked": 9, + "CPacked": 10, + "BuiltIn": 11, + "NoPerspective": 13, + "Flat": 14, + "Patch": 15, + "Centroid": 16, + "Sample": 17, + "Invariant": 18, + "Restrict": 19, + "Aliased": 20, + "Volatile": 21, + "Constant": 22, + "Coherent": 23, + "NonWritable": 24, + "NonReadable": 25, + "Uniform": 26, + "SaturatedConversion": 28, + "Stream": 29, + "Location": 30, + "Component": 31, + "Index": 32, + "Binding": 33, + "DescriptorSet": 34, + "Offset": 35, + "XfbBuffer": 36, + "XfbStride": 37, + "FuncParamAttr": 38, + "FPRoundingMode": 39, + "FPFastMathMode": 40, + "LinkageAttributes": 41, + "NoContraction": 42, + "InputAttachmentIndex": 43, + "Alignment": 44, + "MaxByteOffset": 45, + "AlignmentId": 46, + "MaxByteOffsetId": 47, + "ExplicitInterpAMD": 4999, + "OverrideCoverageNV": 5248, + "PassthroughNV": 5250, + "ViewportRelativeNV": 5252, + "SecondaryViewportRelativeNV": 5256, + "PerPrimitiveNV": 5271, + "PerViewNV": 5272, + "PerTaskNV": 5273, + "PerVertexNV": 5285, + "NonUniformEXT": 5300, + "HlslCounterBufferGOOGLE": 5634, + "HlslSemanticGOOGLE": 5635 + } + }, + { + "Name": "BuiltIn", + "Type": "Value", + "Values": + { + "Position": 0, + "PointSize": 1, + "ClipDistance": 3, + "CullDistance": 4, + "VertexId": 5, + "InstanceId": 6, + "PrimitiveId": 7, + "InvocationId": 8, + "Layer": 9, + "ViewportIndex": 10, + "TessLevelOuter": 11, + "TessLevelInner": 12, + "TessCoord": 13, + "PatchVertices": 14, + "FragCoord": 15, + "PointCoord": 16, + "FrontFacing": 17, + "SampleId": 18, + "SamplePosition": 19, + "SampleMask": 20, + "FragDepth": 22, + "HelperInvocation": 23, + "NumWorkgroups": 24, + "WorkgroupSize": 25, + "WorkgroupId": 26, + "LocalInvocationId": 27, + "GlobalInvocationId": 28, + "LocalInvocationIndex": 29, + "WorkDim": 30, + "GlobalSize": 31, + "EnqueuedWorkgroupSize": 32, + "GlobalOffset": 33, + "GlobalLinearId": 34, + "SubgroupSize": 36, + "SubgroupMaxSize": 37, + "NumSubgroups": 38, + "NumEnqueuedSubgroups": 39, + "SubgroupId": 40, + "SubgroupLocalInvocationId": 41, + "VertexIndex": 42, + "InstanceIndex": 43, + "SubgroupEqMask": 4416, + "SubgroupEqMaskKHR": 4416, + "SubgroupGeMask": 4417, + "SubgroupGeMaskKHR": 4417, + "SubgroupGtMask": 4418, + "SubgroupGtMaskKHR": 4418, + "SubgroupLeMask": 4419, + "SubgroupLeMaskKHR": 4419, + "SubgroupLtMask": 4420, + "SubgroupLtMaskKHR": 4420, + "BaseVertex": 4424, + "BaseInstance": 4425, + "DrawIndex": 4426, + "DeviceIndex": 4438, + "ViewIndex": 4440, + "BaryCoordNoPerspAMD": 4992, + "BaryCoordNoPerspCentroidAMD": 4993, + "BaryCoordNoPerspSampleAMD": 4994, + "BaryCoordSmoothAMD": 4995, + "BaryCoordSmoothCentroidAMD": 4996, + "BaryCoordSmoothSampleAMD": 4997, + "BaryCoordPullModelAMD": 4998, + "FragStencilRefEXT": 5014, + "ViewportMaskNV": 5253, + "SecondaryPositionNV": 5257, + "SecondaryViewportMaskNV": 5258, + "PositionPerViewNV": 5261, + "ViewportMaskPerViewNV": 5262, + "FullyCoveredEXT": 5264, + "TaskCountNV": 5274, + "PrimitiveCountNV": 5275, + "PrimitiveIndicesNV": 5276, + "ClipDistancePerViewNV": 5277, + "CullDistancePerViewNV": 5278, + "LayerPerViewNV": 5279, + "MeshViewCountNV": 5280, + "MeshViewIndicesNV": 5281, + "BaryCoordNV": 5286, + "BaryCoordNoPerspNV": 5287, + "FragmentSizeNV": 5292, + "InvocationsPerPixelNV": 5293, + "LaunchIdNVX": 5319, + "LaunchSizeNVX": 5320, + "WorldRayOriginNVX": 5321, + "WorldRayDirectionNVX": 5322, + "ObjectRayOriginNVX": 5323, + "ObjectRayDirectionNVX": 5324, + "RayTminNVX": 5325, + "RayTmaxNVX": 5326, + "InstanceCustomIndexNVX": 5327, + "ObjectToWorldNVX": 5330, + "WorldToObjectNVX": 5331, + "HitTNVX": 5332, + "HitKindNVX": 5333 + } + }, + { + "Name": "SelectionControl", + "Type": "Bit", + "Values": + { + "Flatten": 0, + "DontFlatten": 1 + } + }, + { + "Name": "LoopControl", + "Type": "Bit", + "Values": + { + "Unroll": 0, + "DontUnroll": 1, + "DependencyInfinite": 2, + "DependencyLength": 3 + } + }, + { + "Name": "FunctionControl", + "Type": "Bit", + "Values": + { + "Inline": 0, + "DontInline": 1, + "Pure": 2, + "Const": 3 + } + }, + { + "Name": "MemorySemantics", + "Type": "Bit", + "Values": + { + "Acquire": 1, + "Release": 2, + "AcquireRelease": 3, + "SequentiallyConsistent": 4, + "UniformMemory": 6, + "SubgroupMemory": 7, + "WorkgroupMemory": 8, + "CrossWorkgroupMemory": 9, + "AtomicCounterMemory": 10, + "ImageMemory": 11, + "OutputMemoryKHR": 12, + "MakeAvailableKHR": 13, + "MakeVisibleKHR": 14 + } + }, + { + "Name": "MemoryAccess", + "Type": "Bit", + "Values": + { + "Volatile": 0, + "Aligned": 1, + "Nontemporal": 2, + "MakePointerAvailableKHR": 3, + "MakePointerVisibleKHR": 4, + "NonPrivatePointerKHR": 5 + } + }, + { + "Name": "Scope", + "Type": "Value", + "Values": + { + "CrossDevice": 0, + "Device": 1, + "Workgroup": 2, + "Subgroup": 3, + "Invocation": 4, + "QueueFamilyKHR": 5 + } + }, + { + "Name": "GroupOperation", + "Type": "Value", + "Values": + { + "Reduce": 0, + "InclusiveScan": 1, + "ExclusiveScan": 2, + "ClusteredReduce": 3, + "PartitionedReduceNV": 6, + "PartitionedInclusiveScanNV": 7, + "PartitionedExclusiveScanNV": 8 + } + }, + { + "Name": "KernelEnqueueFlags", + "Type": "Value", + "Values": + { + "NoWait": 0, + "WaitKernel": 1, + "WaitWorkGroup": 2 + } + }, + { + "Name": "KernelProfilingInfo", + "Type": "Bit", + "Values": + { + "CmdExecTime": 0 + } + }, + { + "Name": "Capability", + "Type": "Value", + "Values": + { + "Matrix": 0, + "Shader": 1, + "Geometry": 2, + "Tessellation": 3, + "Addresses": 4, + "Linkage": 5, + "Kernel": 6, + "Vector16": 7, + "Float16Buffer": 8, + "Float16": 9, + "Float64": 10, + "Int64": 11, + "Int64Atomics": 12, + "ImageBasic": 13, + "ImageReadWrite": 14, + "ImageMipmap": 15, + "Pipes": 17, + "Groups": 18, + "DeviceEnqueue": 19, + "LiteralSampler": 20, + "AtomicStorage": 21, + "Int16": 22, + "TessellationPointSize": 23, + "GeometryPointSize": 24, + "ImageGatherExtended": 25, + "StorageImageMultisample": 27, + "UniformBufferArrayDynamicIndexing": 28, + "SampledImageArrayDynamicIndexing": 29, + "StorageBufferArrayDynamicIndexing": 30, + "StorageImageArrayDynamicIndexing": 31, + "ClipDistance": 32, + "CullDistance": 33, + "ImageCubeArray": 34, + "SampleRateShading": 35, + "ImageRect": 36, + "SampledRect": 37, + "GenericPointer": 38, + "Int8": 39, + "InputAttachment": 40, + "SparseResidency": 41, + "MinLod": 42, + "Sampled1D": 43, + "Image1D": 44, + "SampledCubeArray": 45, + "SampledBuffer": 46, + "ImageBuffer": 47, + "ImageMSArray": 48, + "StorageImageExtendedFormats": 49, + "ImageQuery": 50, + "DerivativeControl": 51, + "InterpolationFunction": 52, + "TransformFeedback": 53, + "GeometryStreams": 54, + "StorageImageReadWithoutFormat": 55, + "StorageImageWriteWithoutFormat": 56, + "MultiViewport": 57, + "SubgroupDispatch": 58, + "NamedBarrier": 59, + "PipeStorage": 60, + "GroupNonUniform": 61, + "GroupNonUniformVote": 62, + "GroupNonUniformArithmetic": 63, + "GroupNonUniformBallot": 64, + "GroupNonUniformShuffle": 65, + "GroupNonUniformShuffleRelative": 66, + "GroupNonUniformClustered": 67, + "GroupNonUniformQuad": 68, + "SubgroupBallotKHR": 4423, + "DrawParameters": 4427, + "SubgroupVoteKHR": 4431, + "StorageBuffer16BitAccess": 4433, + "StorageUniformBufferBlock16": 4433, + "StorageUniform16": 4434, + "UniformAndStorageBuffer16BitAccess": 4434, + "StoragePushConstant16": 4435, + "StorageInputOutput16": 4436, + "DeviceGroup": 4437, + "MultiView": 4439, + "VariablePointersStorageBuffer": 4441, + "VariablePointers": 4442, + "AtomicStorageOps": 4445, + "SampleMaskPostDepthCoverage": 4447, + "StorageBuffer8BitAccess": 4448, + "UniformAndStorageBuffer8BitAccess": 4449, + "StoragePushConstant8": 4450, + "Float16ImageAMD": 5008, + "ImageGatherBiasLodAMD": 5009, + "FragmentMaskAMD": 5010, + "StencilExportEXT": 5013, + "ImageReadWriteLodAMD": 5015, + "SampleMaskOverrideCoverageNV": 5249, + "GeometryShaderPassthroughNV": 5251, + "ShaderViewportIndexLayerEXT": 5254, + "ShaderViewportIndexLayerNV": 5254, + "ShaderViewportMaskNV": 5255, + "ShaderStereoViewNV": 5259, + "PerViewAttributesNV": 5260, + "FragmentFullyCoveredEXT": 5265, + "MeshShadingNV": 5266, + "ImageFootprintNV": 5282, + "FragmentBarycentricNV": 5284, + "ComputeDerivativeGroupQuadsNV": 5288, + "ShadingRateNV": 5291, + "GroupNonUniformPartitionedNV": 5297, + "ShaderNonUniformEXT": 5301, + "RuntimeDescriptorArrayEXT": 5302, + "InputAttachmentArrayDynamicIndexingEXT": 5303, + "UniformTexelBufferArrayDynamicIndexingEXT": 5304, + "StorageTexelBufferArrayDynamicIndexingEXT": 5305, + "UniformBufferArrayNonUniformIndexingEXT": 5306, + "SampledImageArrayNonUniformIndexingEXT": 5307, + "StorageBufferArrayNonUniformIndexingEXT": 5308, + "StorageImageArrayNonUniformIndexingEXT": 5309, + "InputAttachmentArrayNonUniformIndexingEXT": 5310, + "UniformTexelBufferArrayNonUniformIndexingEXT": 5311, + "StorageTexelBufferArrayNonUniformIndexingEXT": 5312, + "RaytracingNVX": 5340, + "VulkanMemoryModelKHR": 5345, + "VulkanMemoryModelDeviceScopeKHR": 5346, + "ComputeDerivativeGroupLinearNV": 5350, + "SubgroupShuffleINTEL": 5568, + "SubgroupBufferBlockIOINTEL": 5569, + "SubgroupImageBlockIOINTEL": 5570 + } + }, + { + "Name": "Op", + "Type": "Value", + "Values": + { + "OpNop": 0, + "OpUndef": 1, + "OpSourceContinued": 2, + "OpSource": 3, + "OpSourceExtension": 4, + "OpName": 5, + "OpMemberName": 6, + "OpString": 7, + "OpLine": 8, + "OpExtension": 10, + "OpExtInstImport": 11, + "OpExtInst": 12, + "OpMemoryModel": 14, + "OpEntryPoint": 15, + "OpExecutionMode": 16, + "OpCapability": 17, + "OpTypeVoid": 19, + "OpTypeBool": 20, + "OpTypeInt": 21, + "OpTypeFloat": 22, + "OpTypeVector": 23, + "OpTypeMatrix": 24, + "OpTypeImage": 25, + "OpTypeSampler": 26, + "OpTypeSampledImage": 27, + "OpTypeArray": 28, + "OpTypeRuntimeArray": 29, + "OpTypeStruct": 30, + "OpTypeOpaque": 31, + "OpTypePointer": 32, + "OpTypeFunction": 33, + "OpTypeEvent": 34, + "OpTypeDeviceEvent": 35, + "OpTypeReserveId": 36, + "OpTypeQueue": 37, + "OpTypePipe": 38, + "OpTypeForwardPointer": 39, + "OpConstantTrue": 41, + "OpConstantFalse": 42, + "OpConstant": 43, + "OpConstantComposite": 44, + "OpConstantSampler": 45, + "OpConstantNull": 46, + "OpSpecConstantTrue": 48, + "OpSpecConstantFalse": 49, + "OpSpecConstant": 50, + "OpSpecConstantComposite": 51, + "OpSpecConstantOp": 52, + "OpFunction": 54, + "OpFunctionParameter": 55, + "OpFunctionEnd": 56, + "OpFunctionCall": 57, + "OpVariable": 59, + "OpImageTexelPointer": 60, + "OpLoad": 61, + "OpStore": 62, + "OpCopyMemory": 63, + "OpCopyMemorySized": 64, + "OpAccessChain": 65, + "OpInBoundsAccessChain": 66, + "OpPtrAccessChain": 67, + "OpArrayLength": 68, + "OpGenericPtrMemSemantics": 69, + "OpInBoundsPtrAccessChain": 70, + "OpDecorate": 71, + "OpMemberDecorate": 72, + "OpDecorationGroup": 73, + "OpGroupDecorate": 74, + "OpGroupMemberDecorate": 75, + "OpVectorExtractDynamic": 77, + "OpVectorInsertDynamic": 78, + "OpVectorShuffle": 79, + "OpCompositeConstruct": 80, + "OpCompositeExtract": 81, + "OpCompositeInsert": 82, + "OpCopyObject": 83, + "OpTranspose": 84, + "OpSampledImage": 86, + "OpImageSampleImplicitLod": 87, + "OpImageSampleExplicitLod": 88, + "OpImageSampleDrefImplicitLod": 89, + "OpImageSampleDrefExplicitLod": 90, + "OpImageSampleProjImplicitLod": 91, + "OpImageSampleProjExplicitLod": 92, + "OpImageSampleProjDrefImplicitLod": 93, + "OpImageSampleProjDrefExplicitLod": 94, + "OpImageFetch": 95, + "OpImageGather": 96, + "OpImageDrefGather": 97, + "OpImageRead": 98, + "OpImageWrite": 99, + "OpImage": 100, + "OpImageQueryFormat": 101, + "OpImageQueryOrder": 102, + "OpImageQuerySizeLod": 103, + "OpImageQuerySize": 104, + "OpImageQueryLod": 105, + "OpImageQueryLevels": 106, + "OpImageQuerySamples": 107, + "OpConvertFToU": 109, + "OpConvertFToS": 110, + "OpConvertSToF": 111, + "OpConvertUToF": 112, + "OpUConvert": 113, + "OpSConvert": 114, + "OpFConvert": 115, + "OpQuantizeToF16": 116, + "OpConvertPtrToU": 117, + "OpSatConvertSToU": 118, + "OpSatConvertUToS": 119, + "OpConvertUToPtr": 120, + "OpPtrCastToGeneric": 121, + "OpGenericCastToPtr": 122, + "OpGenericCastToPtrExplicit": 123, + "OpBitcast": 124, + "OpSNegate": 126, + "OpFNegate": 127, + "OpIAdd": 128, + "OpFAdd": 129, + "OpISub": 130, + "OpFSub": 131, + "OpIMul": 132, + "OpFMul": 133, + "OpUDiv": 134, + "OpSDiv": 135, + "OpFDiv": 136, + "OpUMod": 137, + "OpSRem": 138, + "OpSMod": 139, + "OpFRem": 140, + "OpFMod": 141, + "OpVectorTimesScalar": 142, + "OpMatrixTimesScalar": 143, + "OpVectorTimesMatrix": 144, + "OpMatrixTimesVector": 145, + "OpMatrixTimesMatrix": 146, + "OpOuterProduct": 147, + "OpDot": 148, + "OpIAddCarry": 149, + "OpISubBorrow": 150, + "OpUMulExtended": 151, + "OpSMulExtended": 152, + "OpAny": 154, + "OpAll": 155, + "OpIsNan": 156, + "OpIsInf": 157, + "OpIsFinite": 158, + "OpIsNormal": 159, + "OpSignBitSet": 160, + "OpLessOrGreater": 161, + "OpOrdered": 162, + "OpUnordered": 163, + "OpLogicalEqual": 164, + "OpLogicalNotEqual": 165, + "OpLogicalOr": 166, + "OpLogicalAnd": 167, + "OpLogicalNot": 168, + "OpSelect": 169, + "OpIEqual": 170, + "OpINotEqual": 171, + "OpUGreaterThan": 172, + "OpSGreaterThan": 173, + "OpUGreaterThanEqual": 174, + "OpSGreaterThanEqual": 175, + "OpULessThan": 176, + "OpSLessThan": 177, + "OpULessThanEqual": 178, + "OpSLessThanEqual": 179, + "OpFOrdEqual": 180, + "OpFUnordEqual": 181, + "OpFOrdNotEqual": 182, + "OpFUnordNotEqual": 183, + "OpFOrdLessThan": 184, + "OpFUnordLessThan": 185, + "OpFOrdGreaterThan": 186, + "OpFUnordGreaterThan": 187, + "OpFOrdLessThanEqual": 188, + "OpFUnordLessThanEqual": 189, + "OpFOrdGreaterThanEqual": 190, + "OpFUnordGreaterThanEqual": 191, + "OpShiftRightLogical": 194, + "OpShiftRightArithmetic": 195, + "OpShiftLeftLogical": 196, + "OpBitwiseOr": 197, + "OpBitwiseXor": 198, + "OpBitwiseAnd": 199, + "OpNot": 200, + "OpBitFieldInsert": 201, + "OpBitFieldSExtract": 202, + "OpBitFieldUExtract": 203, + "OpBitReverse": 204, + "OpBitCount": 205, + "OpDPdx": 207, + "OpDPdy": 208, + "OpFwidth": 209, + "OpDPdxFine": 210, + "OpDPdyFine": 211, + "OpFwidthFine": 212, + "OpDPdxCoarse": 213, + "OpDPdyCoarse": 214, + "OpFwidthCoarse": 215, + "OpEmitVertex": 218, + "OpEndPrimitive": 219, + "OpEmitStreamVertex": 220, + "OpEndStreamPrimitive": 221, + "OpControlBarrier": 224, + "OpMemoryBarrier": 225, + "OpAtomicLoad": 227, + "OpAtomicStore": 228, + "OpAtomicExchange": 229, + "OpAtomicCompareExchange": 230, + "OpAtomicCompareExchangeWeak": 231, + "OpAtomicIIncrement": 232, + "OpAtomicIDecrement": 233, + "OpAtomicIAdd": 234, + "OpAtomicISub": 235, + "OpAtomicSMin": 236, + "OpAtomicUMin": 237, + "OpAtomicSMax": 238, + "OpAtomicUMax": 239, + "OpAtomicAnd": 240, + "OpAtomicOr": 241, + "OpAtomicXor": 242, + "OpPhi": 245, + "OpLoopMerge": 246, + "OpSelectionMerge": 247, + "OpLabel": 248, + "OpBranch": 249, + "OpBranchConditional": 250, + "OpSwitch": 251, + "OpKill": 252, + "OpReturn": 253, + "OpReturnValue": 254, + "OpUnreachable": 255, + "OpLifetimeStart": 256, + "OpLifetimeStop": 257, + "OpGroupAsyncCopy": 259, + "OpGroupWaitEvents": 260, + "OpGroupAll": 261, + "OpGroupAny": 262, + "OpGroupBroadcast": 263, + "OpGroupIAdd": 264, + "OpGroupFAdd": 265, + "OpGroupFMin": 266, + "OpGroupUMin": 267, + "OpGroupSMin": 268, + "OpGroupFMax": 269, + "OpGroupUMax": 270, + "OpGroupSMax": 271, + "OpReadPipe": 274, + "OpWritePipe": 275, + "OpReservedReadPipe": 276, + "OpReservedWritePipe": 277, + "OpReserveReadPipePackets": 278, + "OpReserveWritePipePackets": 279, + "OpCommitReadPipe": 280, + "OpCommitWritePipe": 281, + "OpIsValidReserveId": 282, + "OpGetNumPipePackets": 283, + "OpGetMaxPipePackets": 284, + "OpGroupReserveReadPipePackets": 285, + "OpGroupReserveWritePipePackets": 286, + "OpGroupCommitReadPipe": 287, + "OpGroupCommitWritePipe": 288, + "OpEnqueueMarker": 291, + "OpEnqueueKernel": 292, + "OpGetKernelNDrangeSubGroupCount": 293, + "OpGetKernelNDrangeMaxSubGroupSize": 294, + "OpGetKernelWorkGroupSize": 295, + "OpGetKernelPreferredWorkGroupSizeMultiple": 296, + "OpRetainEvent": 297, + "OpReleaseEvent": 298, + "OpCreateUserEvent": 299, + "OpIsValidEvent": 300, + "OpSetUserEventStatus": 301, + "OpCaptureEventProfilingInfo": 302, + "OpGetDefaultQueue": 303, + "OpBuildNDRange": 304, + "OpImageSparseSampleImplicitLod": 305, + "OpImageSparseSampleExplicitLod": 306, + "OpImageSparseSampleDrefImplicitLod": 307, + "OpImageSparseSampleDrefExplicitLod": 308, + "OpImageSparseSampleProjImplicitLod": 309, + "OpImageSparseSampleProjExplicitLod": 310, + "OpImageSparseSampleProjDrefImplicitLod": 311, + "OpImageSparseSampleProjDrefExplicitLod": 312, + "OpImageSparseFetch": 313, + "OpImageSparseGather": 314, + "OpImageSparseDrefGather": 315, + "OpImageSparseTexelsResident": 316, + "OpNoLine": 317, + "OpAtomicFlagTestAndSet": 318, + "OpAtomicFlagClear": 319, + "OpImageSparseRead": 320, + "OpSizeOf": 321, + "OpTypePipeStorage": 322, + "OpConstantPipeStorage": 323, + "OpCreatePipeFromPipeStorage": 324, + "OpGetKernelLocalSizeForSubgroupCount": 325, + "OpGetKernelMaxNumSubgroups": 326, + "OpTypeNamedBarrier": 327, + "OpNamedBarrierInitialize": 328, + "OpMemoryNamedBarrier": 329, + "OpModuleProcessed": 330, + "OpExecutionModeId": 331, + "OpDecorateId": 332, + "OpGroupNonUniformElect": 333, + "OpGroupNonUniformAll": 334, + "OpGroupNonUniformAny": 335, + "OpGroupNonUniformAllEqual": 336, + "OpGroupNonUniformBroadcast": 337, + "OpGroupNonUniformBroadcastFirst": 338, + "OpGroupNonUniformBallot": 339, + "OpGroupNonUniformInverseBallot": 340, + "OpGroupNonUniformBallotBitExtract": 341, + "OpGroupNonUniformBallotBitCount": 342, + "OpGroupNonUniformBallotFindLSB": 343, + "OpGroupNonUniformBallotFindMSB": 344, + "OpGroupNonUniformShuffle": 345, + "OpGroupNonUniformShuffleXor": 346, + "OpGroupNonUniformShuffleUp": 347, + "OpGroupNonUniformShuffleDown": 348, + "OpGroupNonUniformIAdd": 349, + "OpGroupNonUniformFAdd": 350, + "OpGroupNonUniformIMul": 351, + "OpGroupNonUniformFMul": 352, + "OpGroupNonUniformSMin": 353, + "OpGroupNonUniformUMin": 354, + "OpGroupNonUniformFMin": 355, + "OpGroupNonUniformSMax": 356, + "OpGroupNonUniformUMax": 357, + "OpGroupNonUniformFMax": 358, + "OpGroupNonUniformBitwiseAnd": 359, + "OpGroupNonUniformBitwiseOr": 360, + "OpGroupNonUniformBitwiseXor": 361, + "OpGroupNonUniformLogicalAnd": 362, + "OpGroupNonUniformLogicalOr": 363, + "OpGroupNonUniformLogicalXor": 364, + "OpGroupNonUniformQuadBroadcast": 365, + "OpGroupNonUniformQuadSwap": 366, + "OpSubgroupBallotKHR": 4421, + "OpSubgroupFirstInvocationKHR": 4422, + "OpSubgroupAllKHR": 4428, + "OpSubgroupAnyKHR": 4429, + "OpSubgroupAllEqualKHR": 4430, + "OpSubgroupReadInvocationKHR": 4432, + "OpGroupIAddNonUniformAMD": 5000, + "OpGroupFAddNonUniformAMD": 5001, + "OpGroupFMinNonUniformAMD": 5002, + "OpGroupUMinNonUniformAMD": 5003, + "OpGroupSMinNonUniformAMD": 5004, + "OpGroupFMaxNonUniformAMD": 5005, + "OpGroupUMaxNonUniformAMD": 5006, + "OpGroupSMaxNonUniformAMD": 5007, + "OpFragmentMaskFetchAMD": 5011, + "OpFragmentFetchAMD": 5012, + "OpImageSampleFootprintNV": 5283, + "OpGroupNonUniformPartitionNV": 5296, + "OpWritePackedPrimitiveIndices4x8NV": 5299, + "OpReportIntersectionNVX": 5334, + "OpIgnoreIntersectionNVX": 5335, + "OpTerminateRayNVX": 5336, + "OpTraceNVX": 5337, + "OpTypeAccelerationStructureNVX": 5341, + "OpSubgroupShuffleINTEL": 5571, + "OpSubgroupShuffleDownINTEL": 5572, + "OpSubgroupShuffleUpINTEL": 5573, + "OpSubgroupShuffleXorINTEL": 5574, + "OpSubgroupBlockReadINTEL": 5575, + "OpSubgroupBlockWriteINTEL": 5576, + "OpSubgroupImageBlockReadINTEL": 5577, + "OpSubgroupImageBlockWriteINTEL": 5578, + "OpDecorateStringGOOGLE": 5632, + "OpMemberDecorateStringGOOGLE": 5633 + } + } + ] + } +} + diff --git a/src/refresh/vkpt/include/vulkan/spirv.lua b/src/refresh/vkpt/include/vulkan/spirv.lua new file mode 100644 index 0000000..6cc236e --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/spirv.lua @@ -0,0 +1,1133 @@ +-- Copyright (c) 2014-2018 The Khronos Group Inc. +-- +-- Permission is hereby granted, free of charge, to any person obtaining a copy +-- of this software and/or associated documentation files (the "Materials"), +-- to deal in the Materials without restriction, including without limitation +-- the rights to use, copy, modify, merge, publish, distribute, sublicense, +-- and/or sell copies of the Materials, and to permit persons to whom the +-- Materials are furnished to do so, subject to the following conditions: +-- +-- The above copyright notice and this permission notice shall be included in +-- all copies or substantial portions of the Materials. +-- +-- MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +-- STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +-- HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +-- +-- THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +-- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +-- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +-- THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +-- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +-- FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +-- IN THE MATERIALS. + +-- This header is automatically generated by the same tool that creates +-- the Binary Section of the SPIR-V specification. + +-- Enumeration tokens for SPIR-V, in various styles: +-- C, C++, C++11, JSON, Lua, Python +-- +-- - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +-- - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +-- - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +-- - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +-- - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +-- +-- Some tokens act like mask values, which can be OR'd together, +-- while others are mutually exclusive. The mask-like ones have +-- "Mask" in their name, and a parallel enum that has the shift +-- amount (1 << x) for each corresponding enumerant. + +spv = { + MagicNumber = 0x07230203, + Version = 0x00010300, + Revision = 1, + OpCodeMask = 0xffff, + WordCountShift = 16, + + SourceLanguage = { + Unknown = 0, + ESSL = 1, + GLSL = 2, + OpenCL_C = 3, + OpenCL_CPP = 4, + HLSL = 5, + }, + + ExecutionModel = { + Vertex = 0, + TessellationControl = 1, + TessellationEvaluation = 2, + Geometry = 3, + Fragment = 4, + GLCompute = 5, + Kernel = 6, + TaskNV = 5267, + MeshNV = 5268, + RayGenerationNVX = 5313, + IntersectionNVX = 5314, + AnyHitNVX = 5315, + ClosestHitNVX = 5316, + MissNVX = 5317, + CallableNVX = 5318, + }, + + AddressingModel = { + Logical = 0, + Physical32 = 1, + Physical64 = 2, + }, + + MemoryModel = { + Simple = 0, + GLSL450 = 1, + OpenCL = 2, + VulkanKHR = 3, + }, + + ExecutionMode = { + Invocations = 0, + SpacingEqual = 1, + SpacingFractionalEven = 2, + SpacingFractionalOdd = 3, + VertexOrderCw = 4, + VertexOrderCcw = 5, + PixelCenterInteger = 6, + OriginUpperLeft = 7, + OriginLowerLeft = 8, + EarlyFragmentTests = 9, + PointMode = 10, + Xfb = 11, + DepthReplacing = 12, + DepthGreater = 14, + DepthLess = 15, + DepthUnchanged = 16, + LocalSize = 17, + LocalSizeHint = 18, + InputPoints = 19, + InputLines = 20, + InputLinesAdjacency = 21, + Triangles = 22, + InputTrianglesAdjacency = 23, + Quads = 24, + Isolines = 25, + OutputVertices = 26, + OutputPoints = 27, + OutputLineStrip = 28, + OutputTriangleStrip = 29, + VecTypeHint = 30, + ContractionOff = 31, + Initializer = 33, + Finalizer = 34, + SubgroupSize = 35, + SubgroupsPerWorkgroup = 36, + SubgroupsPerWorkgroupId = 37, + LocalSizeId = 38, + LocalSizeHintId = 39, + PostDepthCoverage = 4446, + StencilRefReplacingEXT = 5027, + OutputLinesNV = 5269, + OutputPrimitivesNV = 5270, + DerivativeGroupQuadsNV = 5289, + DerivativeGroupLinearNV = 5290, + OutputTrianglesNV = 5298, + }, + + StorageClass = { + UniformConstant = 0, + Input = 1, + Uniform = 2, + Output = 3, + Workgroup = 4, + CrossWorkgroup = 5, + Private = 6, + Function = 7, + Generic = 8, + PushConstant = 9, + AtomicCounter = 10, + Image = 11, + StorageBuffer = 12, + RayPayloadNVX = 5338, + HitAttributeNVX = 5339, + IncomingRayPayloadNVX = 5342, + ShaderRecordBufferNVX = 5343, + }, + + Dim = { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + Cube = 3, + Rect = 4, + Buffer = 5, + SubpassData = 6, + }, + + SamplerAddressingMode = { + None = 0, + ClampToEdge = 1, + Clamp = 2, + Repeat = 3, + RepeatMirrored = 4, + }, + + SamplerFilterMode = { + Nearest = 0, + Linear = 1, + }, + + ImageFormat = { + Unknown = 0, + Rgba32f = 1, + Rgba16f = 2, + R32f = 3, + Rgba8 = 4, + Rgba8Snorm = 5, + Rg32f = 6, + Rg16f = 7, + R11fG11fB10f = 8, + R16f = 9, + Rgba16 = 10, + Rgb10A2 = 11, + Rg16 = 12, + Rg8 = 13, + R16 = 14, + R8 = 15, + Rgba16Snorm = 16, + Rg16Snorm = 17, + Rg8Snorm = 18, + R16Snorm = 19, + R8Snorm = 20, + Rgba32i = 21, + Rgba16i = 22, + Rgba8i = 23, + R32i = 24, + Rg32i = 25, + Rg16i = 26, + Rg8i = 27, + R16i = 28, + R8i = 29, + Rgba32ui = 30, + Rgba16ui = 31, + Rgba8ui = 32, + R32ui = 33, + Rgb10a2ui = 34, + Rg32ui = 35, + Rg16ui = 36, + Rg8ui = 37, + R16ui = 38, + R8ui = 39, + }, + + ImageChannelOrder = { + R = 0, + A = 1, + RG = 2, + RA = 3, + RGB = 4, + RGBA = 5, + BGRA = 6, + ARGB = 7, + Intensity = 8, + Luminance = 9, + Rx = 10, + RGx = 11, + RGBx = 12, + Depth = 13, + DepthStencil = 14, + sRGB = 15, + sRGBx = 16, + sRGBA = 17, + sBGRA = 18, + ABGR = 19, + }, + + ImageChannelDataType = { + SnormInt8 = 0, + SnormInt16 = 1, + UnormInt8 = 2, + UnormInt16 = 3, + UnormShort565 = 4, + UnormShort555 = 5, + UnormInt101010 = 6, + SignedInt8 = 7, + SignedInt16 = 8, + SignedInt32 = 9, + UnsignedInt8 = 10, + UnsignedInt16 = 11, + UnsignedInt32 = 12, + HalfFloat = 13, + Float = 14, + UnormInt24 = 15, + UnormInt101010_2 = 16, + }, + + ImageOperandsShift = { + Bias = 0, + Lod = 1, + Grad = 2, + ConstOffset = 3, + Offset = 4, + ConstOffsets = 5, + Sample = 6, + MinLod = 7, + MakeTexelAvailableKHR = 8, + MakeTexelVisibleKHR = 9, + NonPrivateTexelKHR = 10, + VolatileTexelKHR = 11, + }, + + ImageOperandsMask = { + MaskNone = 0, + Bias = 0x00000001, + Lod = 0x00000002, + Grad = 0x00000004, + ConstOffset = 0x00000008, + Offset = 0x00000010, + ConstOffsets = 0x00000020, + Sample = 0x00000040, + MinLod = 0x00000080, + MakeTexelAvailableKHR = 0x00000100, + MakeTexelVisibleKHR = 0x00000200, + NonPrivateTexelKHR = 0x00000400, + VolatileTexelKHR = 0x00000800, + }, + + FPFastMathModeShift = { + NotNaN = 0, + NotInf = 1, + NSZ = 2, + AllowRecip = 3, + Fast = 4, + }, + + FPFastMathModeMask = { + MaskNone = 0, + NotNaN = 0x00000001, + NotInf = 0x00000002, + NSZ = 0x00000004, + AllowRecip = 0x00000008, + Fast = 0x00000010, + }, + + FPRoundingMode = { + RTE = 0, + RTZ = 1, + RTP = 2, + RTN = 3, + }, + + LinkageType = { + Export = 0, + Import = 1, + }, + + AccessQualifier = { + ReadOnly = 0, + WriteOnly = 1, + ReadWrite = 2, + }, + + FunctionParameterAttribute = { + Zext = 0, + Sext = 1, + ByVal = 2, + Sret = 3, + NoAlias = 4, + NoCapture = 5, + NoWrite = 6, + NoReadWrite = 7, + }, + + Decoration = { + RelaxedPrecision = 0, + SpecId = 1, + Block = 2, + BufferBlock = 3, + RowMajor = 4, + ColMajor = 5, + ArrayStride = 6, + MatrixStride = 7, + GLSLShared = 8, + GLSLPacked = 9, + CPacked = 10, + BuiltIn = 11, + NoPerspective = 13, + Flat = 14, + Patch = 15, + Centroid = 16, + Sample = 17, + Invariant = 18, + Restrict = 19, + Aliased = 20, + Volatile = 21, + Constant = 22, + Coherent = 23, + NonWritable = 24, + NonReadable = 25, + Uniform = 26, + SaturatedConversion = 28, + Stream = 29, + Location = 30, + Component = 31, + Index = 32, + Binding = 33, + DescriptorSet = 34, + Offset = 35, + XfbBuffer = 36, + XfbStride = 37, + FuncParamAttr = 38, + FPRoundingMode = 39, + FPFastMathMode = 40, + LinkageAttributes = 41, + NoContraction = 42, + InputAttachmentIndex = 43, + Alignment = 44, + MaxByteOffset = 45, + AlignmentId = 46, + MaxByteOffsetId = 47, + ExplicitInterpAMD = 4999, + OverrideCoverageNV = 5248, + PassthroughNV = 5250, + ViewportRelativeNV = 5252, + SecondaryViewportRelativeNV = 5256, + PerPrimitiveNV = 5271, + PerViewNV = 5272, + PerTaskNV = 5273, + PerVertexNV = 5285, + NonUniformEXT = 5300, + HlslCounterBufferGOOGLE = 5634, + HlslSemanticGOOGLE = 5635, + }, + + BuiltIn = { + Position = 0, + PointSize = 1, + ClipDistance = 3, + CullDistance = 4, + VertexId = 5, + InstanceId = 6, + PrimitiveId = 7, + InvocationId = 8, + Layer = 9, + ViewportIndex = 10, + TessLevelOuter = 11, + TessLevelInner = 12, + TessCoord = 13, + PatchVertices = 14, + FragCoord = 15, + PointCoord = 16, + FrontFacing = 17, + SampleId = 18, + SamplePosition = 19, + SampleMask = 20, + FragDepth = 22, + HelperInvocation = 23, + NumWorkgroups = 24, + WorkgroupSize = 25, + WorkgroupId = 26, + LocalInvocationId = 27, + GlobalInvocationId = 28, + LocalInvocationIndex = 29, + WorkDim = 30, + GlobalSize = 31, + EnqueuedWorkgroupSize = 32, + GlobalOffset = 33, + GlobalLinearId = 34, + SubgroupSize = 36, + SubgroupMaxSize = 37, + NumSubgroups = 38, + NumEnqueuedSubgroups = 39, + SubgroupId = 40, + SubgroupLocalInvocationId = 41, + VertexIndex = 42, + InstanceIndex = 43, + SubgroupEqMask = 4416, + SubgroupEqMaskKHR = 4416, + SubgroupGeMask = 4417, + SubgroupGeMaskKHR = 4417, + SubgroupGtMask = 4418, + SubgroupGtMaskKHR = 4418, + SubgroupLeMask = 4419, + SubgroupLeMaskKHR = 4419, + SubgroupLtMask = 4420, + SubgroupLtMaskKHR = 4420, + BaseVertex = 4424, + BaseInstance = 4425, + DrawIndex = 4426, + DeviceIndex = 4438, + ViewIndex = 4440, + BaryCoordNoPerspAMD = 4992, + BaryCoordNoPerspCentroidAMD = 4993, + BaryCoordNoPerspSampleAMD = 4994, + BaryCoordSmoothAMD = 4995, + BaryCoordSmoothCentroidAMD = 4996, + BaryCoordSmoothSampleAMD = 4997, + BaryCoordPullModelAMD = 4998, + FragStencilRefEXT = 5014, + ViewportMaskNV = 5253, + SecondaryPositionNV = 5257, + SecondaryViewportMaskNV = 5258, + PositionPerViewNV = 5261, + ViewportMaskPerViewNV = 5262, + FullyCoveredEXT = 5264, + TaskCountNV = 5274, + PrimitiveCountNV = 5275, + PrimitiveIndicesNV = 5276, + ClipDistancePerViewNV = 5277, + CullDistancePerViewNV = 5278, + LayerPerViewNV = 5279, + MeshViewCountNV = 5280, + MeshViewIndicesNV = 5281, + BaryCoordNV = 5286, + BaryCoordNoPerspNV = 5287, + FragmentSizeNV = 5292, + InvocationsPerPixelNV = 5293, + LaunchIdNVX = 5319, + LaunchSizeNVX = 5320, + WorldRayOriginNVX = 5321, + WorldRayDirectionNVX = 5322, + ObjectRayOriginNVX = 5323, + ObjectRayDirectionNVX = 5324, + RayTminNVX = 5325, + RayTmaxNVX = 5326, + InstanceCustomIndexNVX = 5327, + ObjectToWorldNVX = 5330, + WorldToObjectNVX = 5331, + HitTNVX = 5332, + HitKindNVX = 5333, + }, + + SelectionControlShift = { + Flatten = 0, + DontFlatten = 1, + }, + + SelectionControlMask = { + MaskNone = 0, + Flatten = 0x00000001, + DontFlatten = 0x00000002, + }, + + LoopControlShift = { + Unroll = 0, + DontUnroll = 1, + DependencyInfinite = 2, + DependencyLength = 3, + }, + + LoopControlMask = { + MaskNone = 0, + Unroll = 0x00000001, + DontUnroll = 0x00000002, + DependencyInfinite = 0x00000004, + DependencyLength = 0x00000008, + }, + + FunctionControlShift = { + Inline = 0, + DontInline = 1, + Pure = 2, + Const = 3, + }, + + FunctionControlMask = { + MaskNone = 0, + Inline = 0x00000001, + DontInline = 0x00000002, + Pure = 0x00000004, + Const = 0x00000008, + }, + + MemorySemanticsShift = { + Acquire = 1, + Release = 2, + AcquireRelease = 3, + SequentiallyConsistent = 4, + UniformMemory = 6, + SubgroupMemory = 7, + WorkgroupMemory = 8, + CrossWorkgroupMemory = 9, + AtomicCounterMemory = 10, + ImageMemory = 11, + OutputMemoryKHR = 12, + MakeAvailableKHR = 13, + MakeVisibleKHR = 14, + }, + + MemorySemanticsMask = { + MaskNone = 0, + Acquire = 0x00000002, + Release = 0x00000004, + AcquireRelease = 0x00000008, + SequentiallyConsistent = 0x00000010, + UniformMemory = 0x00000040, + SubgroupMemory = 0x00000080, + WorkgroupMemory = 0x00000100, + CrossWorkgroupMemory = 0x00000200, + AtomicCounterMemory = 0x00000400, + ImageMemory = 0x00000800, + OutputMemoryKHR = 0x00001000, + MakeAvailableKHR = 0x00002000, + MakeVisibleKHR = 0x00004000, + }, + + MemoryAccessShift = { + Volatile = 0, + Aligned = 1, + Nontemporal = 2, + MakePointerAvailableKHR = 3, + MakePointerVisibleKHR = 4, + NonPrivatePointerKHR = 5, + }, + + MemoryAccessMask = { + MaskNone = 0, + Volatile = 0x00000001, + Aligned = 0x00000002, + Nontemporal = 0x00000004, + MakePointerAvailableKHR = 0x00000008, + MakePointerVisibleKHR = 0x00000010, + NonPrivatePointerKHR = 0x00000020, + }, + + Scope = { + CrossDevice = 0, + Device = 1, + Workgroup = 2, + Subgroup = 3, + Invocation = 4, + QueueFamilyKHR = 5, + }, + + GroupOperation = { + Reduce = 0, + InclusiveScan = 1, + ExclusiveScan = 2, + ClusteredReduce = 3, + PartitionedReduceNV = 6, + PartitionedInclusiveScanNV = 7, + PartitionedExclusiveScanNV = 8, + }, + + KernelEnqueueFlags = { + NoWait = 0, + WaitKernel = 1, + WaitWorkGroup = 2, + }, + + KernelProfilingInfoShift = { + CmdExecTime = 0, + }, + + KernelProfilingInfoMask = { + MaskNone = 0, + CmdExecTime = 0x00000001, + }, + + Capability = { + Matrix = 0, + Shader = 1, + Geometry = 2, + Tessellation = 3, + Addresses = 4, + Linkage = 5, + Kernel = 6, + Vector16 = 7, + Float16Buffer = 8, + Float16 = 9, + Float64 = 10, + Int64 = 11, + Int64Atomics = 12, + ImageBasic = 13, + ImageReadWrite = 14, + ImageMipmap = 15, + Pipes = 17, + Groups = 18, + DeviceEnqueue = 19, + LiteralSampler = 20, + AtomicStorage = 21, + Int16 = 22, + TessellationPointSize = 23, + GeometryPointSize = 24, + ImageGatherExtended = 25, + StorageImageMultisample = 27, + UniformBufferArrayDynamicIndexing = 28, + SampledImageArrayDynamicIndexing = 29, + StorageBufferArrayDynamicIndexing = 30, + StorageImageArrayDynamicIndexing = 31, + ClipDistance = 32, + CullDistance = 33, + ImageCubeArray = 34, + SampleRateShading = 35, + ImageRect = 36, + SampledRect = 37, + GenericPointer = 38, + Int8 = 39, + InputAttachment = 40, + SparseResidency = 41, + MinLod = 42, + Sampled1D = 43, + Image1D = 44, + SampledCubeArray = 45, + SampledBuffer = 46, + ImageBuffer = 47, + ImageMSArray = 48, + StorageImageExtendedFormats = 49, + ImageQuery = 50, + DerivativeControl = 51, + InterpolationFunction = 52, + TransformFeedback = 53, + GeometryStreams = 54, + StorageImageReadWithoutFormat = 55, + StorageImageWriteWithoutFormat = 56, + MultiViewport = 57, + SubgroupDispatch = 58, + NamedBarrier = 59, + PipeStorage = 60, + GroupNonUniform = 61, + GroupNonUniformVote = 62, + GroupNonUniformArithmetic = 63, + GroupNonUniformBallot = 64, + GroupNonUniformShuffle = 65, + GroupNonUniformShuffleRelative = 66, + GroupNonUniformClustered = 67, + GroupNonUniformQuad = 68, + SubgroupBallotKHR = 4423, + DrawParameters = 4427, + SubgroupVoteKHR = 4431, + StorageBuffer16BitAccess = 4433, + StorageUniformBufferBlock16 = 4433, + StorageUniform16 = 4434, + UniformAndStorageBuffer16BitAccess = 4434, + StoragePushConstant16 = 4435, + StorageInputOutput16 = 4436, + DeviceGroup = 4437, + MultiView = 4439, + VariablePointersStorageBuffer = 4441, + VariablePointers = 4442, + AtomicStorageOps = 4445, + SampleMaskPostDepthCoverage = 4447, + StorageBuffer8BitAccess = 4448, + UniformAndStorageBuffer8BitAccess = 4449, + StoragePushConstant8 = 4450, + Float16ImageAMD = 5008, + ImageGatherBiasLodAMD = 5009, + FragmentMaskAMD = 5010, + StencilExportEXT = 5013, + ImageReadWriteLodAMD = 5015, + SampleMaskOverrideCoverageNV = 5249, + GeometryShaderPassthroughNV = 5251, + ShaderViewportIndexLayerEXT = 5254, + ShaderViewportIndexLayerNV = 5254, + ShaderViewportMaskNV = 5255, + ShaderStereoViewNV = 5259, + PerViewAttributesNV = 5260, + FragmentFullyCoveredEXT = 5265, + MeshShadingNV = 5266, + ImageFootprintNV = 5282, + FragmentBarycentricNV = 5284, + ComputeDerivativeGroupQuadsNV = 5288, + ShadingRateNV = 5291, + GroupNonUniformPartitionedNV = 5297, + ShaderNonUniformEXT = 5301, + RuntimeDescriptorArrayEXT = 5302, + InputAttachmentArrayDynamicIndexingEXT = 5303, + UniformTexelBufferArrayDynamicIndexingEXT = 5304, + StorageTexelBufferArrayDynamicIndexingEXT = 5305, + UniformBufferArrayNonUniformIndexingEXT = 5306, + SampledImageArrayNonUniformIndexingEXT = 5307, + StorageBufferArrayNonUniformIndexingEXT = 5308, + StorageImageArrayNonUniformIndexingEXT = 5309, + InputAttachmentArrayNonUniformIndexingEXT = 5310, + UniformTexelBufferArrayNonUniformIndexingEXT = 5311, + StorageTexelBufferArrayNonUniformIndexingEXT = 5312, + RaytracingNVX = 5340, + VulkanMemoryModelKHR = 5345, + VulkanMemoryModelDeviceScopeKHR = 5346, + ComputeDerivativeGroupLinearNV = 5350, + SubgroupShuffleINTEL = 5568, + SubgroupBufferBlockIOINTEL = 5569, + SubgroupImageBlockIOINTEL = 5570, + }, + + Op = { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpSubgroupReadInvocationKHR = 4432, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpImageSampleFootprintNV = 5283, + OpGroupNonUniformPartitionNV = 5296, + OpWritePackedPrimitiveIndices4x8NV = 5299, + OpReportIntersectionNVX = 5334, + OpIgnoreIntersectionNVX = 5335, + OpTerminateRayNVX = 5336, + OpTraceNVX = 5337, + OpTypeAccelerationStructureNVX = 5341, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateStringGOOGLE = 5633, + }, + +} + diff --git a/src/refresh/vkpt/include/vulkan/spirv.py b/src/refresh/vkpt/include/vulkan/spirv.py new file mode 100644 index 0000000..aa7d576 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/spirv.py @@ -0,0 +1,1133 @@ +# Copyright (c) 2014-2018 The Khronos Group Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and/or associated documentation files (the "Materials"), +# to deal in the Materials without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Materials, and to permit persons to whom the +# Materials are furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Materials. +# +# MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +# STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +# HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +# +# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +# IN THE MATERIALS. + +# This header is automatically generated by the same tool that creates +# the Binary Section of the SPIR-V specification. + +# Enumeration tokens for SPIR-V, in various styles: +# C, C++, C++11, JSON, Lua, Python +# +# - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +# - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +# - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +# - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +# - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +# +# Some tokens act like mask values, which can be OR'd together, +# while others are mutually exclusive. The mask-like ones have +# "Mask" in their name, and a parallel enum that has the shift +# amount (1 << x) for each corresponding enumerant. + +spv = { + 'MagicNumber' : 0x07230203, + 'Version' : 0x00010300, + 'Revision' : 1, + 'OpCodeMask' : 0xffff, + 'WordCountShift' : 16, + + 'SourceLanguage' : { + 'Unknown' : 0, + 'ESSL' : 1, + 'GLSL' : 2, + 'OpenCL_C' : 3, + 'OpenCL_CPP' : 4, + 'HLSL' : 5, + }, + + 'ExecutionModel' : { + 'Vertex' : 0, + 'TessellationControl' : 1, + 'TessellationEvaluation' : 2, + 'Geometry' : 3, + 'Fragment' : 4, + 'GLCompute' : 5, + 'Kernel' : 6, + 'TaskNV' : 5267, + 'MeshNV' : 5268, + 'RayGenerationNVX' : 5313, + 'IntersectionNVX' : 5314, + 'AnyHitNVX' : 5315, + 'ClosestHitNVX' : 5316, + 'MissNVX' : 5317, + 'CallableNVX' : 5318, + }, + + 'AddressingModel' : { + 'Logical' : 0, + 'Physical32' : 1, + 'Physical64' : 2, + }, + + 'MemoryModel' : { + 'Simple' : 0, + 'GLSL450' : 1, + 'OpenCL' : 2, + 'VulkanKHR' : 3, + }, + + 'ExecutionMode' : { + 'Invocations' : 0, + 'SpacingEqual' : 1, + 'SpacingFractionalEven' : 2, + 'SpacingFractionalOdd' : 3, + 'VertexOrderCw' : 4, + 'VertexOrderCcw' : 5, + 'PixelCenterInteger' : 6, + 'OriginUpperLeft' : 7, + 'OriginLowerLeft' : 8, + 'EarlyFragmentTests' : 9, + 'PointMode' : 10, + 'Xfb' : 11, + 'DepthReplacing' : 12, + 'DepthGreater' : 14, + 'DepthLess' : 15, + 'DepthUnchanged' : 16, + 'LocalSize' : 17, + 'LocalSizeHint' : 18, + 'InputPoints' : 19, + 'InputLines' : 20, + 'InputLinesAdjacency' : 21, + 'Triangles' : 22, + 'InputTrianglesAdjacency' : 23, + 'Quads' : 24, + 'Isolines' : 25, + 'OutputVertices' : 26, + 'OutputPoints' : 27, + 'OutputLineStrip' : 28, + 'OutputTriangleStrip' : 29, + 'VecTypeHint' : 30, + 'ContractionOff' : 31, + 'Initializer' : 33, + 'Finalizer' : 34, + 'SubgroupSize' : 35, + 'SubgroupsPerWorkgroup' : 36, + 'SubgroupsPerWorkgroupId' : 37, + 'LocalSizeId' : 38, + 'LocalSizeHintId' : 39, + 'PostDepthCoverage' : 4446, + 'StencilRefReplacingEXT' : 5027, + 'OutputLinesNV' : 5269, + 'OutputPrimitivesNV' : 5270, + 'DerivativeGroupQuadsNV' : 5289, + 'DerivativeGroupLinearNV' : 5290, + 'OutputTrianglesNV' : 5298, + }, + + 'StorageClass' : { + 'UniformConstant' : 0, + 'Input' : 1, + 'Uniform' : 2, + 'Output' : 3, + 'Workgroup' : 4, + 'CrossWorkgroup' : 5, + 'Private' : 6, + 'Function' : 7, + 'Generic' : 8, + 'PushConstant' : 9, + 'AtomicCounter' : 10, + 'Image' : 11, + 'StorageBuffer' : 12, + 'RayPayloadNVX' : 5338, + 'HitAttributeNVX' : 5339, + 'IncomingRayPayloadNVX' : 5342, + 'ShaderRecordBufferNVX' : 5343, + }, + + 'Dim' : { + 'Dim1D' : 0, + 'Dim2D' : 1, + 'Dim3D' : 2, + 'Cube' : 3, + 'Rect' : 4, + 'Buffer' : 5, + 'SubpassData' : 6, + }, + + 'SamplerAddressingMode' : { + 'None' : 0, + 'ClampToEdge' : 1, + 'Clamp' : 2, + 'Repeat' : 3, + 'RepeatMirrored' : 4, + }, + + 'SamplerFilterMode' : { + 'Nearest' : 0, + 'Linear' : 1, + }, + + 'ImageFormat' : { + 'Unknown' : 0, + 'Rgba32f' : 1, + 'Rgba16f' : 2, + 'R32f' : 3, + 'Rgba8' : 4, + 'Rgba8Snorm' : 5, + 'Rg32f' : 6, + 'Rg16f' : 7, + 'R11fG11fB10f' : 8, + 'R16f' : 9, + 'Rgba16' : 10, + 'Rgb10A2' : 11, + 'Rg16' : 12, + 'Rg8' : 13, + 'R16' : 14, + 'R8' : 15, + 'Rgba16Snorm' : 16, + 'Rg16Snorm' : 17, + 'Rg8Snorm' : 18, + 'R16Snorm' : 19, + 'R8Snorm' : 20, + 'Rgba32i' : 21, + 'Rgba16i' : 22, + 'Rgba8i' : 23, + 'R32i' : 24, + 'Rg32i' : 25, + 'Rg16i' : 26, + 'Rg8i' : 27, + 'R16i' : 28, + 'R8i' : 29, + 'Rgba32ui' : 30, + 'Rgba16ui' : 31, + 'Rgba8ui' : 32, + 'R32ui' : 33, + 'Rgb10a2ui' : 34, + 'Rg32ui' : 35, + 'Rg16ui' : 36, + 'Rg8ui' : 37, + 'R16ui' : 38, + 'R8ui' : 39, + }, + + 'ImageChannelOrder' : { + 'R' : 0, + 'A' : 1, + 'RG' : 2, + 'RA' : 3, + 'RGB' : 4, + 'RGBA' : 5, + 'BGRA' : 6, + 'ARGB' : 7, + 'Intensity' : 8, + 'Luminance' : 9, + 'Rx' : 10, + 'RGx' : 11, + 'RGBx' : 12, + 'Depth' : 13, + 'DepthStencil' : 14, + 'sRGB' : 15, + 'sRGBx' : 16, + 'sRGBA' : 17, + 'sBGRA' : 18, + 'ABGR' : 19, + }, + + 'ImageChannelDataType' : { + 'SnormInt8' : 0, + 'SnormInt16' : 1, + 'UnormInt8' : 2, + 'UnormInt16' : 3, + 'UnormShort565' : 4, + 'UnormShort555' : 5, + 'UnormInt101010' : 6, + 'SignedInt8' : 7, + 'SignedInt16' : 8, + 'SignedInt32' : 9, + 'UnsignedInt8' : 10, + 'UnsignedInt16' : 11, + 'UnsignedInt32' : 12, + 'HalfFloat' : 13, + 'Float' : 14, + 'UnormInt24' : 15, + 'UnormInt101010_2' : 16, + }, + + 'ImageOperandsShift' : { + 'Bias' : 0, + 'Lod' : 1, + 'Grad' : 2, + 'ConstOffset' : 3, + 'Offset' : 4, + 'ConstOffsets' : 5, + 'Sample' : 6, + 'MinLod' : 7, + 'MakeTexelAvailableKHR' : 8, + 'MakeTexelVisibleKHR' : 9, + 'NonPrivateTexelKHR' : 10, + 'VolatileTexelKHR' : 11, + }, + + 'ImageOperandsMask' : { + 'MaskNone' : 0, + 'Bias' : 0x00000001, + 'Lod' : 0x00000002, + 'Grad' : 0x00000004, + 'ConstOffset' : 0x00000008, + 'Offset' : 0x00000010, + 'ConstOffsets' : 0x00000020, + 'Sample' : 0x00000040, + 'MinLod' : 0x00000080, + 'MakeTexelAvailableKHR' : 0x00000100, + 'MakeTexelVisibleKHR' : 0x00000200, + 'NonPrivateTexelKHR' : 0x00000400, + 'VolatileTexelKHR' : 0x00000800, + }, + + 'FPFastMathModeShift' : { + 'NotNaN' : 0, + 'NotInf' : 1, + 'NSZ' : 2, + 'AllowRecip' : 3, + 'Fast' : 4, + }, + + 'FPFastMathModeMask' : { + 'MaskNone' : 0, + 'NotNaN' : 0x00000001, + 'NotInf' : 0x00000002, + 'NSZ' : 0x00000004, + 'AllowRecip' : 0x00000008, + 'Fast' : 0x00000010, + }, + + 'FPRoundingMode' : { + 'RTE' : 0, + 'RTZ' : 1, + 'RTP' : 2, + 'RTN' : 3, + }, + + 'LinkageType' : { + 'Export' : 0, + 'Import' : 1, + }, + + 'AccessQualifier' : { + 'ReadOnly' : 0, + 'WriteOnly' : 1, + 'ReadWrite' : 2, + }, + + 'FunctionParameterAttribute' : { + 'Zext' : 0, + 'Sext' : 1, + 'ByVal' : 2, + 'Sret' : 3, + 'NoAlias' : 4, + 'NoCapture' : 5, + 'NoWrite' : 6, + 'NoReadWrite' : 7, + }, + + 'Decoration' : { + 'RelaxedPrecision' : 0, + 'SpecId' : 1, + 'Block' : 2, + 'BufferBlock' : 3, + 'RowMajor' : 4, + 'ColMajor' : 5, + 'ArrayStride' : 6, + 'MatrixStride' : 7, + 'GLSLShared' : 8, + 'GLSLPacked' : 9, + 'CPacked' : 10, + 'BuiltIn' : 11, + 'NoPerspective' : 13, + 'Flat' : 14, + 'Patch' : 15, + 'Centroid' : 16, + 'Sample' : 17, + 'Invariant' : 18, + 'Restrict' : 19, + 'Aliased' : 20, + 'Volatile' : 21, + 'Constant' : 22, + 'Coherent' : 23, + 'NonWritable' : 24, + 'NonReadable' : 25, + 'Uniform' : 26, + 'SaturatedConversion' : 28, + 'Stream' : 29, + 'Location' : 30, + 'Component' : 31, + 'Index' : 32, + 'Binding' : 33, + 'DescriptorSet' : 34, + 'Offset' : 35, + 'XfbBuffer' : 36, + 'XfbStride' : 37, + 'FuncParamAttr' : 38, + 'FPRoundingMode' : 39, + 'FPFastMathMode' : 40, + 'LinkageAttributes' : 41, + 'NoContraction' : 42, + 'InputAttachmentIndex' : 43, + 'Alignment' : 44, + 'MaxByteOffset' : 45, + 'AlignmentId' : 46, + 'MaxByteOffsetId' : 47, + 'ExplicitInterpAMD' : 4999, + 'OverrideCoverageNV' : 5248, + 'PassthroughNV' : 5250, + 'ViewportRelativeNV' : 5252, + 'SecondaryViewportRelativeNV' : 5256, + 'PerPrimitiveNV' : 5271, + 'PerViewNV' : 5272, + 'PerTaskNV' : 5273, + 'PerVertexNV' : 5285, + 'NonUniformEXT' : 5300, + 'HlslCounterBufferGOOGLE' : 5634, + 'HlslSemanticGOOGLE' : 5635, + }, + + 'BuiltIn' : { + 'Position' : 0, + 'PointSize' : 1, + 'ClipDistance' : 3, + 'CullDistance' : 4, + 'VertexId' : 5, + 'InstanceId' : 6, + 'PrimitiveId' : 7, + 'InvocationId' : 8, + 'Layer' : 9, + 'ViewportIndex' : 10, + 'TessLevelOuter' : 11, + 'TessLevelInner' : 12, + 'TessCoord' : 13, + 'PatchVertices' : 14, + 'FragCoord' : 15, + 'PointCoord' : 16, + 'FrontFacing' : 17, + 'SampleId' : 18, + 'SamplePosition' : 19, + 'SampleMask' : 20, + 'FragDepth' : 22, + 'HelperInvocation' : 23, + 'NumWorkgroups' : 24, + 'WorkgroupSize' : 25, + 'WorkgroupId' : 26, + 'LocalInvocationId' : 27, + 'GlobalInvocationId' : 28, + 'LocalInvocationIndex' : 29, + 'WorkDim' : 30, + 'GlobalSize' : 31, + 'EnqueuedWorkgroupSize' : 32, + 'GlobalOffset' : 33, + 'GlobalLinearId' : 34, + 'SubgroupSize' : 36, + 'SubgroupMaxSize' : 37, + 'NumSubgroups' : 38, + 'NumEnqueuedSubgroups' : 39, + 'SubgroupId' : 40, + 'SubgroupLocalInvocationId' : 41, + 'VertexIndex' : 42, + 'InstanceIndex' : 43, + 'SubgroupEqMask' : 4416, + 'SubgroupEqMaskKHR' : 4416, + 'SubgroupGeMask' : 4417, + 'SubgroupGeMaskKHR' : 4417, + 'SubgroupGtMask' : 4418, + 'SubgroupGtMaskKHR' : 4418, + 'SubgroupLeMask' : 4419, + 'SubgroupLeMaskKHR' : 4419, + 'SubgroupLtMask' : 4420, + 'SubgroupLtMaskKHR' : 4420, + 'BaseVertex' : 4424, + 'BaseInstance' : 4425, + 'DrawIndex' : 4426, + 'DeviceIndex' : 4438, + 'ViewIndex' : 4440, + 'BaryCoordNoPerspAMD' : 4992, + 'BaryCoordNoPerspCentroidAMD' : 4993, + 'BaryCoordNoPerspSampleAMD' : 4994, + 'BaryCoordSmoothAMD' : 4995, + 'BaryCoordSmoothCentroidAMD' : 4996, + 'BaryCoordSmoothSampleAMD' : 4997, + 'BaryCoordPullModelAMD' : 4998, + 'FragStencilRefEXT' : 5014, + 'ViewportMaskNV' : 5253, + 'SecondaryPositionNV' : 5257, + 'SecondaryViewportMaskNV' : 5258, + 'PositionPerViewNV' : 5261, + 'ViewportMaskPerViewNV' : 5262, + 'FullyCoveredEXT' : 5264, + 'TaskCountNV' : 5274, + 'PrimitiveCountNV' : 5275, + 'PrimitiveIndicesNV' : 5276, + 'ClipDistancePerViewNV' : 5277, + 'CullDistancePerViewNV' : 5278, + 'LayerPerViewNV' : 5279, + 'MeshViewCountNV' : 5280, + 'MeshViewIndicesNV' : 5281, + 'BaryCoordNV' : 5286, + 'BaryCoordNoPerspNV' : 5287, + 'FragmentSizeNV' : 5292, + 'InvocationsPerPixelNV' : 5293, + 'LaunchIdNVX' : 5319, + 'LaunchSizeNVX' : 5320, + 'WorldRayOriginNVX' : 5321, + 'WorldRayDirectionNVX' : 5322, + 'ObjectRayOriginNVX' : 5323, + 'ObjectRayDirectionNVX' : 5324, + 'RayTminNVX' : 5325, + 'RayTmaxNVX' : 5326, + 'InstanceCustomIndexNVX' : 5327, + 'ObjectToWorldNVX' : 5330, + 'WorldToObjectNVX' : 5331, + 'HitTNVX' : 5332, + 'HitKindNVX' : 5333, + }, + + 'SelectionControlShift' : { + 'Flatten' : 0, + 'DontFlatten' : 1, + }, + + 'SelectionControlMask' : { + 'MaskNone' : 0, + 'Flatten' : 0x00000001, + 'DontFlatten' : 0x00000002, + }, + + 'LoopControlShift' : { + 'Unroll' : 0, + 'DontUnroll' : 1, + 'DependencyInfinite' : 2, + 'DependencyLength' : 3, + }, + + 'LoopControlMask' : { + 'MaskNone' : 0, + 'Unroll' : 0x00000001, + 'DontUnroll' : 0x00000002, + 'DependencyInfinite' : 0x00000004, + 'DependencyLength' : 0x00000008, + }, + + 'FunctionControlShift' : { + 'Inline' : 0, + 'DontInline' : 1, + 'Pure' : 2, + 'Const' : 3, + }, + + 'FunctionControlMask' : { + 'MaskNone' : 0, + 'Inline' : 0x00000001, + 'DontInline' : 0x00000002, + 'Pure' : 0x00000004, + 'Const' : 0x00000008, + }, + + 'MemorySemanticsShift' : { + 'Acquire' : 1, + 'Release' : 2, + 'AcquireRelease' : 3, + 'SequentiallyConsistent' : 4, + 'UniformMemory' : 6, + 'SubgroupMemory' : 7, + 'WorkgroupMemory' : 8, + 'CrossWorkgroupMemory' : 9, + 'AtomicCounterMemory' : 10, + 'ImageMemory' : 11, + 'OutputMemoryKHR' : 12, + 'MakeAvailableKHR' : 13, + 'MakeVisibleKHR' : 14, + }, + + 'MemorySemanticsMask' : { + 'MaskNone' : 0, + 'Acquire' : 0x00000002, + 'Release' : 0x00000004, + 'AcquireRelease' : 0x00000008, + 'SequentiallyConsistent' : 0x00000010, + 'UniformMemory' : 0x00000040, + 'SubgroupMemory' : 0x00000080, + 'WorkgroupMemory' : 0x00000100, + 'CrossWorkgroupMemory' : 0x00000200, + 'AtomicCounterMemory' : 0x00000400, + 'ImageMemory' : 0x00000800, + 'OutputMemoryKHR' : 0x00001000, + 'MakeAvailableKHR' : 0x00002000, + 'MakeVisibleKHR' : 0x00004000, + }, + + 'MemoryAccessShift' : { + 'Volatile' : 0, + 'Aligned' : 1, + 'Nontemporal' : 2, + 'MakePointerAvailableKHR' : 3, + 'MakePointerVisibleKHR' : 4, + 'NonPrivatePointerKHR' : 5, + }, + + 'MemoryAccessMask' : { + 'MaskNone' : 0, + 'Volatile' : 0x00000001, + 'Aligned' : 0x00000002, + 'Nontemporal' : 0x00000004, + 'MakePointerAvailableKHR' : 0x00000008, + 'MakePointerVisibleKHR' : 0x00000010, + 'NonPrivatePointerKHR' : 0x00000020, + }, + + 'Scope' : { + 'CrossDevice' : 0, + 'Device' : 1, + 'Workgroup' : 2, + 'Subgroup' : 3, + 'Invocation' : 4, + 'QueueFamilyKHR' : 5, + }, + + 'GroupOperation' : { + 'Reduce' : 0, + 'InclusiveScan' : 1, + 'ExclusiveScan' : 2, + 'ClusteredReduce' : 3, + 'PartitionedReduceNV' : 6, + 'PartitionedInclusiveScanNV' : 7, + 'PartitionedExclusiveScanNV' : 8, + }, + + 'KernelEnqueueFlags' : { + 'NoWait' : 0, + 'WaitKernel' : 1, + 'WaitWorkGroup' : 2, + }, + + 'KernelProfilingInfoShift' : { + 'CmdExecTime' : 0, + }, + + 'KernelProfilingInfoMask' : { + 'MaskNone' : 0, + 'CmdExecTime' : 0x00000001, + }, + + 'Capability' : { + 'Matrix' : 0, + 'Shader' : 1, + 'Geometry' : 2, + 'Tessellation' : 3, + 'Addresses' : 4, + 'Linkage' : 5, + 'Kernel' : 6, + 'Vector16' : 7, + 'Float16Buffer' : 8, + 'Float16' : 9, + 'Float64' : 10, + 'Int64' : 11, + 'Int64Atomics' : 12, + 'ImageBasic' : 13, + 'ImageReadWrite' : 14, + 'ImageMipmap' : 15, + 'Pipes' : 17, + 'Groups' : 18, + 'DeviceEnqueue' : 19, + 'LiteralSampler' : 20, + 'AtomicStorage' : 21, + 'Int16' : 22, + 'TessellationPointSize' : 23, + 'GeometryPointSize' : 24, + 'ImageGatherExtended' : 25, + 'StorageImageMultisample' : 27, + 'UniformBufferArrayDynamicIndexing' : 28, + 'SampledImageArrayDynamicIndexing' : 29, + 'StorageBufferArrayDynamicIndexing' : 30, + 'StorageImageArrayDynamicIndexing' : 31, + 'ClipDistance' : 32, + 'CullDistance' : 33, + 'ImageCubeArray' : 34, + 'SampleRateShading' : 35, + 'ImageRect' : 36, + 'SampledRect' : 37, + 'GenericPointer' : 38, + 'Int8' : 39, + 'InputAttachment' : 40, + 'SparseResidency' : 41, + 'MinLod' : 42, + 'Sampled1D' : 43, + 'Image1D' : 44, + 'SampledCubeArray' : 45, + 'SampledBuffer' : 46, + 'ImageBuffer' : 47, + 'ImageMSArray' : 48, + 'StorageImageExtendedFormats' : 49, + 'ImageQuery' : 50, + 'DerivativeControl' : 51, + 'InterpolationFunction' : 52, + 'TransformFeedback' : 53, + 'GeometryStreams' : 54, + 'StorageImageReadWithoutFormat' : 55, + 'StorageImageWriteWithoutFormat' : 56, + 'MultiViewport' : 57, + 'SubgroupDispatch' : 58, + 'NamedBarrier' : 59, + 'PipeStorage' : 60, + 'GroupNonUniform' : 61, + 'GroupNonUniformVote' : 62, + 'GroupNonUniformArithmetic' : 63, + 'GroupNonUniformBallot' : 64, + 'GroupNonUniformShuffle' : 65, + 'GroupNonUniformShuffleRelative' : 66, + 'GroupNonUniformClustered' : 67, + 'GroupNonUniformQuad' : 68, + 'SubgroupBallotKHR' : 4423, + 'DrawParameters' : 4427, + 'SubgroupVoteKHR' : 4431, + 'StorageBuffer16BitAccess' : 4433, + 'StorageUniformBufferBlock16' : 4433, + 'StorageUniform16' : 4434, + 'UniformAndStorageBuffer16BitAccess' : 4434, + 'StoragePushConstant16' : 4435, + 'StorageInputOutput16' : 4436, + 'DeviceGroup' : 4437, + 'MultiView' : 4439, + 'VariablePointersStorageBuffer' : 4441, + 'VariablePointers' : 4442, + 'AtomicStorageOps' : 4445, + 'SampleMaskPostDepthCoverage' : 4447, + 'StorageBuffer8BitAccess' : 4448, + 'UniformAndStorageBuffer8BitAccess' : 4449, + 'StoragePushConstant8' : 4450, + 'Float16ImageAMD' : 5008, + 'ImageGatherBiasLodAMD' : 5009, + 'FragmentMaskAMD' : 5010, + 'StencilExportEXT' : 5013, + 'ImageReadWriteLodAMD' : 5015, + 'SampleMaskOverrideCoverageNV' : 5249, + 'GeometryShaderPassthroughNV' : 5251, + 'ShaderViewportIndexLayerEXT' : 5254, + 'ShaderViewportIndexLayerNV' : 5254, + 'ShaderViewportMaskNV' : 5255, + 'ShaderStereoViewNV' : 5259, + 'PerViewAttributesNV' : 5260, + 'FragmentFullyCoveredEXT' : 5265, + 'MeshShadingNV' : 5266, + 'ImageFootprintNV' : 5282, + 'FragmentBarycentricNV' : 5284, + 'ComputeDerivativeGroupQuadsNV' : 5288, + 'ShadingRateNV' : 5291, + 'GroupNonUniformPartitionedNV' : 5297, + 'ShaderNonUniformEXT' : 5301, + 'RuntimeDescriptorArrayEXT' : 5302, + 'InputAttachmentArrayDynamicIndexingEXT' : 5303, + 'UniformTexelBufferArrayDynamicIndexingEXT' : 5304, + 'StorageTexelBufferArrayDynamicIndexingEXT' : 5305, + 'UniformBufferArrayNonUniformIndexingEXT' : 5306, + 'SampledImageArrayNonUniformIndexingEXT' : 5307, + 'StorageBufferArrayNonUniformIndexingEXT' : 5308, + 'StorageImageArrayNonUniformIndexingEXT' : 5309, + 'InputAttachmentArrayNonUniformIndexingEXT' : 5310, + 'UniformTexelBufferArrayNonUniformIndexingEXT' : 5311, + 'StorageTexelBufferArrayNonUniformIndexingEXT' : 5312, + 'RaytracingNVX' : 5340, + 'VulkanMemoryModelKHR' : 5345, + 'VulkanMemoryModelDeviceScopeKHR' : 5346, + 'ComputeDerivativeGroupLinearNV' : 5350, + 'SubgroupShuffleINTEL' : 5568, + 'SubgroupBufferBlockIOINTEL' : 5569, + 'SubgroupImageBlockIOINTEL' : 5570, + }, + + 'Op' : { + 'OpNop' : 0, + 'OpUndef' : 1, + 'OpSourceContinued' : 2, + 'OpSource' : 3, + 'OpSourceExtension' : 4, + 'OpName' : 5, + 'OpMemberName' : 6, + 'OpString' : 7, + 'OpLine' : 8, + 'OpExtension' : 10, + 'OpExtInstImport' : 11, + 'OpExtInst' : 12, + 'OpMemoryModel' : 14, + 'OpEntryPoint' : 15, + 'OpExecutionMode' : 16, + 'OpCapability' : 17, + 'OpTypeVoid' : 19, + 'OpTypeBool' : 20, + 'OpTypeInt' : 21, + 'OpTypeFloat' : 22, + 'OpTypeVector' : 23, + 'OpTypeMatrix' : 24, + 'OpTypeImage' : 25, + 'OpTypeSampler' : 26, + 'OpTypeSampledImage' : 27, + 'OpTypeArray' : 28, + 'OpTypeRuntimeArray' : 29, + 'OpTypeStruct' : 30, + 'OpTypeOpaque' : 31, + 'OpTypePointer' : 32, + 'OpTypeFunction' : 33, + 'OpTypeEvent' : 34, + 'OpTypeDeviceEvent' : 35, + 'OpTypeReserveId' : 36, + 'OpTypeQueue' : 37, + 'OpTypePipe' : 38, + 'OpTypeForwardPointer' : 39, + 'OpConstantTrue' : 41, + 'OpConstantFalse' : 42, + 'OpConstant' : 43, + 'OpConstantComposite' : 44, + 'OpConstantSampler' : 45, + 'OpConstantNull' : 46, + 'OpSpecConstantTrue' : 48, + 'OpSpecConstantFalse' : 49, + 'OpSpecConstant' : 50, + 'OpSpecConstantComposite' : 51, + 'OpSpecConstantOp' : 52, + 'OpFunction' : 54, + 'OpFunctionParameter' : 55, + 'OpFunctionEnd' : 56, + 'OpFunctionCall' : 57, + 'OpVariable' : 59, + 'OpImageTexelPointer' : 60, + 'OpLoad' : 61, + 'OpStore' : 62, + 'OpCopyMemory' : 63, + 'OpCopyMemorySized' : 64, + 'OpAccessChain' : 65, + 'OpInBoundsAccessChain' : 66, + 'OpPtrAccessChain' : 67, + 'OpArrayLength' : 68, + 'OpGenericPtrMemSemantics' : 69, + 'OpInBoundsPtrAccessChain' : 70, + 'OpDecorate' : 71, + 'OpMemberDecorate' : 72, + 'OpDecorationGroup' : 73, + 'OpGroupDecorate' : 74, + 'OpGroupMemberDecorate' : 75, + 'OpVectorExtractDynamic' : 77, + 'OpVectorInsertDynamic' : 78, + 'OpVectorShuffle' : 79, + 'OpCompositeConstruct' : 80, + 'OpCompositeExtract' : 81, + 'OpCompositeInsert' : 82, + 'OpCopyObject' : 83, + 'OpTranspose' : 84, + 'OpSampledImage' : 86, + 'OpImageSampleImplicitLod' : 87, + 'OpImageSampleExplicitLod' : 88, + 'OpImageSampleDrefImplicitLod' : 89, + 'OpImageSampleDrefExplicitLod' : 90, + 'OpImageSampleProjImplicitLod' : 91, + 'OpImageSampleProjExplicitLod' : 92, + 'OpImageSampleProjDrefImplicitLod' : 93, + 'OpImageSampleProjDrefExplicitLod' : 94, + 'OpImageFetch' : 95, + 'OpImageGather' : 96, + 'OpImageDrefGather' : 97, + 'OpImageRead' : 98, + 'OpImageWrite' : 99, + 'OpImage' : 100, + 'OpImageQueryFormat' : 101, + 'OpImageQueryOrder' : 102, + 'OpImageQuerySizeLod' : 103, + 'OpImageQuerySize' : 104, + 'OpImageQueryLod' : 105, + 'OpImageQueryLevels' : 106, + 'OpImageQuerySamples' : 107, + 'OpConvertFToU' : 109, + 'OpConvertFToS' : 110, + 'OpConvertSToF' : 111, + 'OpConvertUToF' : 112, + 'OpUConvert' : 113, + 'OpSConvert' : 114, + 'OpFConvert' : 115, + 'OpQuantizeToF16' : 116, + 'OpConvertPtrToU' : 117, + 'OpSatConvertSToU' : 118, + 'OpSatConvertUToS' : 119, + 'OpConvertUToPtr' : 120, + 'OpPtrCastToGeneric' : 121, + 'OpGenericCastToPtr' : 122, + 'OpGenericCastToPtrExplicit' : 123, + 'OpBitcast' : 124, + 'OpSNegate' : 126, + 'OpFNegate' : 127, + 'OpIAdd' : 128, + 'OpFAdd' : 129, + 'OpISub' : 130, + 'OpFSub' : 131, + 'OpIMul' : 132, + 'OpFMul' : 133, + 'OpUDiv' : 134, + 'OpSDiv' : 135, + 'OpFDiv' : 136, + 'OpUMod' : 137, + 'OpSRem' : 138, + 'OpSMod' : 139, + 'OpFRem' : 140, + 'OpFMod' : 141, + 'OpVectorTimesScalar' : 142, + 'OpMatrixTimesScalar' : 143, + 'OpVectorTimesMatrix' : 144, + 'OpMatrixTimesVector' : 145, + 'OpMatrixTimesMatrix' : 146, + 'OpOuterProduct' : 147, + 'OpDot' : 148, + 'OpIAddCarry' : 149, + 'OpISubBorrow' : 150, + 'OpUMulExtended' : 151, + 'OpSMulExtended' : 152, + 'OpAny' : 154, + 'OpAll' : 155, + 'OpIsNan' : 156, + 'OpIsInf' : 157, + 'OpIsFinite' : 158, + 'OpIsNormal' : 159, + 'OpSignBitSet' : 160, + 'OpLessOrGreater' : 161, + 'OpOrdered' : 162, + 'OpUnordered' : 163, + 'OpLogicalEqual' : 164, + 'OpLogicalNotEqual' : 165, + 'OpLogicalOr' : 166, + 'OpLogicalAnd' : 167, + 'OpLogicalNot' : 168, + 'OpSelect' : 169, + 'OpIEqual' : 170, + 'OpINotEqual' : 171, + 'OpUGreaterThan' : 172, + 'OpSGreaterThan' : 173, + 'OpUGreaterThanEqual' : 174, + 'OpSGreaterThanEqual' : 175, + 'OpULessThan' : 176, + 'OpSLessThan' : 177, + 'OpULessThanEqual' : 178, + 'OpSLessThanEqual' : 179, + 'OpFOrdEqual' : 180, + 'OpFUnordEqual' : 181, + 'OpFOrdNotEqual' : 182, + 'OpFUnordNotEqual' : 183, + 'OpFOrdLessThan' : 184, + 'OpFUnordLessThan' : 185, + 'OpFOrdGreaterThan' : 186, + 'OpFUnordGreaterThan' : 187, + 'OpFOrdLessThanEqual' : 188, + 'OpFUnordLessThanEqual' : 189, + 'OpFOrdGreaterThanEqual' : 190, + 'OpFUnordGreaterThanEqual' : 191, + 'OpShiftRightLogical' : 194, + 'OpShiftRightArithmetic' : 195, + 'OpShiftLeftLogical' : 196, + 'OpBitwiseOr' : 197, + 'OpBitwiseXor' : 198, + 'OpBitwiseAnd' : 199, + 'OpNot' : 200, + 'OpBitFieldInsert' : 201, + 'OpBitFieldSExtract' : 202, + 'OpBitFieldUExtract' : 203, + 'OpBitReverse' : 204, + 'OpBitCount' : 205, + 'OpDPdx' : 207, + 'OpDPdy' : 208, + 'OpFwidth' : 209, + 'OpDPdxFine' : 210, + 'OpDPdyFine' : 211, + 'OpFwidthFine' : 212, + 'OpDPdxCoarse' : 213, + 'OpDPdyCoarse' : 214, + 'OpFwidthCoarse' : 215, + 'OpEmitVertex' : 218, + 'OpEndPrimitive' : 219, + 'OpEmitStreamVertex' : 220, + 'OpEndStreamPrimitive' : 221, + 'OpControlBarrier' : 224, + 'OpMemoryBarrier' : 225, + 'OpAtomicLoad' : 227, + 'OpAtomicStore' : 228, + 'OpAtomicExchange' : 229, + 'OpAtomicCompareExchange' : 230, + 'OpAtomicCompareExchangeWeak' : 231, + 'OpAtomicIIncrement' : 232, + 'OpAtomicIDecrement' : 233, + 'OpAtomicIAdd' : 234, + 'OpAtomicISub' : 235, + 'OpAtomicSMin' : 236, + 'OpAtomicUMin' : 237, + 'OpAtomicSMax' : 238, + 'OpAtomicUMax' : 239, + 'OpAtomicAnd' : 240, + 'OpAtomicOr' : 241, + 'OpAtomicXor' : 242, + 'OpPhi' : 245, + 'OpLoopMerge' : 246, + 'OpSelectionMerge' : 247, + 'OpLabel' : 248, + 'OpBranch' : 249, + 'OpBranchConditional' : 250, + 'OpSwitch' : 251, + 'OpKill' : 252, + 'OpReturn' : 253, + 'OpReturnValue' : 254, + 'OpUnreachable' : 255, + 'OpLifetimeStart' : 256, + 'OpLifetimeStop' : 257, + 'OpGroupAsyncCopy' : 259, + 'OpGroupWaitEvents' : 260, + 'OpGroupAll' : 261, + 'OpGroupAny' : 262, + 'OpGroupBroadcast' : 263, + 'OpGroupIAdd' : 264, + 'OpGroupFAdd' : 265, + 'OpGroupFMin' : 266, + 'OpGroupUMin' : 267, + 'OpGroupSMin' : 268, + 'OpGroupFMax' : 269, + 'OpGroupUMax' : 270, + 'OpGroupSMax' : 271, + 'OpReadPipe' : 274, + 'OpWritePipe' : 275, + 'OpReservedReadPipe' : 276, + 'OpReservedWritePipe' : 277, + 'OpReserveReadPipePackets' : 278, + 'OpReserveWritePipePackets' : 279, + 'OpCommitReadPipe' : 280, + 'OpCommitWritePipe' : 281, + 'OpIsValidReserveId' : 282, + 'OpGetNumPipePackets' : 283, + 'OpGetMaxPipePackets' : 284, + 'OpGroupReserveReadPipePackets' : 285, + 'OpGroupReserveWritePipePackets' : 286, + 'OpGroupCommitReadPipe' : 287, + 'OpGroupCommitWritePipe' : 288, + 'OpEnqueueMarker' : 291, + 'OpEnqueueKernel' : 292, + 'OpGetKernelNDrangeSubGroupCount' : 293, + 'OpGetKernelNDrangeMaxSubGroupSize' : 294, + 'OpGetKernelWorkGroupSize' : 295, + 'OpGetKernelPreferredWorkGroupSizeMultiple' : 296, + 'OpRetainEvent' : 297, + 'OpReleaseEvent' : 298, + 'OpCreateUserEvent' : 299, + 'OpIsValidEvent' : 300, + 'OpSetUserEventStatus' : 301, + 'OpCaptureEventProfilingInfo' : 302, + 'OpGetDefaultQueue' : 303, + 'OpBuildNDRange' : 304, + 'OpImageSparseSampleImplicitLod' : 305, + 'OpImageSparseSampleExplicitLod' : 306, + 'OpImageSparseSampleDrefImplicitLod' : 307, + 'OpImageSparseSampleDrefExplicitLod' : 308, + 'OpImageSparseSampleProjImplicitLod' : 309, + 'OpImageSparseSampleProjExplicitLod' : 310, + 'OpImageSparseSampleProjDrefImplicitLod' : 311, + 'OpImageSparseSampleProjDrefExplicitLod' : 312, + 'OpImageSparseFetch' : 313, + 'OpImageSparseGather' : 314, + 'OpImageSparseDrefGather' : 315, + 'OpImageSparseTexelsResident' : 316, + 'OpNoLine' : 317, + 'OpAtomicFlagTestAndSet' : 318, + 'OpAtomicFlagClear' : 319, + 'OpImageSparseRead' : 320, + 'OpSizeOf' : 321, + 'OpTypePipeStorage' : 322, + 'OpConstantPipeStorage' : 323, + 'OpCreatePipeFromPipeStorage' : 324, + 'OpGetKernelLocalSizeForSubgroupCount' : 325, + 'OpGetKernelMaxNumSubgroups' : 326, + 'OpTypeNamedBarrier' : 327, + 'OpNamedBarrierInitialize' : 328, + 'OpMemoryNamedBarrier' : 329, + 'OpModuleProcessed' : 330, + 'OpExecutionModeId' : 331, + 'OpDecorateId' : 332, + 'OpGroupNonUniformElect' : 333, + 'OpGroupNonUniformAll' : 334, + 'OpGroupNonUniformAny' : 335, + 'OpGroupNonUniformAllEqual' : 336, + 'OpGroupNonUniformBroadcast' : 337, + 'OpGroupNonUniformBroadcastFirst' : 338, + 'OpGroupNonUniformBallot' : 339, + 'OpGroupNonUniformInverseBallot' : 340, + 'OpGroupNonUniformBallotBitExtract' : 341, + 'OpGroupNonUniformBallotBitCount' : 342, + 'OpGroupNonUniformBallotFindLSB' : 343, + 'OpGroupNonUniformBallotFindMSB' : 344, + 'OpGroupNonUniformShuffle' : 345, + 'OpGroupNonUniformShuffleXor' : 346, + 'OpGroupNonUniformShuffleUp' : 347, + 'OpGroupNonUniformShuffleDown' : 348, + 'OpGroupNonUniformIAdd' : 349, + 'OpGroupNonUniformFAdd' : 350, + 'OpGroupNonUniformIMul' : 351, + 'OpGroupNonUniformFMul' : 352, + 'OpGroupNonUniformSMin' : 353, + 'OpGroupNonUniformUMin' : 354, + 'OpGroupNonUniformFMin' : 355, + 'OpGroupNonUniformSMax' : 356, + 'OpGroupNonUniformUMax' : 357, + 'OpGroupNonUniformFMax' : 358, + 'OpGroupNonUniformBitwiseAnd' : 359, + 'OpGroupNonUniformBitwiseOr' : 360, + 'OpGroupNonUniformBitwiseXor' : 361, + 'OpGroupNonUniformLogicalAnd' : 362, + 'OpGroupNonUniformLogicalOr' : 363, + 'OpGroupNonUniformLogicalXor' : 364, + 'OpGroupNonUniformQuadBroadcast' : 365, + 'OpGroupNonUniformQuadSwap' : 366, + 'OpSubgroupBallotKHR' : 4421, + 'OpSubgroupFirstInvocationKHR' : 4422, + 'OpSubgroupAllKHR' : 4428, + 'OpSubgroupAnyKHR' : 4429, + 'OpSubgroupAllEqualKHR' : 4430, + 'OpSubgroupReadInvocationKHR' : 4432, + 'OpGroupIAddNonUniformAMD' : 5000, + 'OpGroupFAddNonUniformAMD' : 5001, + 'OpGroupFMinNonUniformAMD' : 5002, + 'OpGroupUMinNonUniformAMD' : 5003, + 'OpGroupSMinNonUniformAMD' : 5004, + 'OpGroupFMaxNonUniformAMD' : 5005, + 'OpGroupUMaxNonUniformAMD' : 5006, + 'OpGroupSMaxNonUniformAMD' : 5007, + 'OpFragmentMaskFetchAMD' : 5011, + 'OpFragmentFetchAMD' : 5012, + 'OpImageSampleFootprintNV' : 5283, + 'OpGroupNonUniformPartitionNV' : 5296, + 'OpWritePackedPrimitiveIndices4x8NV' : 5299, + 'OpReportIntersectionNVX' : 5334, + 'OpIgnoreIntersectionNVX' : 5335, + 'OpTerminateRayNVX' : 5336, + 'OpTraceNVX' : 5337, + 'OpTypeAccelerationStructureNVX' : 5341, + 'OpSubgroupShuffleINTEL' : 5571, + 'OpSubgroupShuffleDownINTEL' : 5572, + 'OpSubgroupShuffleUpINTEL' : 5573, + 'OpSubgroupShuffleXorINTEL' : 5574, + 'OpSubgroupBlockReadINTEL' : 5575, + 'OpSubgroupBlockWriteINTEL' : 5576, + 'OpSubgroupImageBlockReadINTEL' : 5577, + 'OpSubgroupImageBlockWriteINTEL' : 5578, + 'OpDecorateStringGOOGLE' : 5632, + 'OpMemberDecorateStringGOOGLE' : 5633, + }, + +} + diff --git a/src/refresh/vkpt/include/vulkan/vk_icd.h b/src/refresh/vkpt/include/vulkan/vk_icd.h new file mode 100644 index 0000000..dbd4855 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vk_icd.h @@ -0,0 +1,170 @@ +// +// File: vk_icd.h +// +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef VKICD_H +#define VKICD_H + +#include "vulkan.h" +#include + +// Loader-ICD version negotiation API. Versions add the following features: +// Version 0 - Initial. Doesn't support vk_icdGetInstanceProcAddr +// or vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 1 - Add support for vk_icdGetInstanceProcAddr. +// Version 2 - Add Loader/ICD Interface version negotiation +// via vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 3 - Add ICD creation/destruction of KHR_surface objects. +// Version 4 - Add unknown physical device extension qyering via +// vk_icdGetPhysicalDeviceProcAddr. +// Version 5 - Tells ICDs that the loader is now paying attention to the +// application version of Vulkan passed into the ApplicationInfo +// structure during vkCreateInstance. This will tell the ICD +// that if the loader is older, it should automatically fail a +// call for any API version > 1.0. Otherwise, the loader will +// manually determine if it can support the expected version. +#define CURRENT_LOADER_ICD_INTERFACE_VERSION 5 +#define MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION 0 +#define MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION 4 +typedef VkResult(VKAPI_PTR *PFN_vkNegotiateLoaderICDInterfaceVersion)(uint32_t *pVersion); + +// This is defined in vk_layer.h which will be found by the loader, but if an ICD is building against this +// file directly, it won't be found. +#ifndef PFN_GetPhysicalDeviceProcAddr +typedef PFN_vkVoidFunction(VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char *pName); +#endif + +/* + * The ICD must reserve space for a pointer for the loader's dispatch + * table, at the start of . + * The ICD must initialize this variable using the SET_LOADER_MAGIC_VALUE macro. + */ + +#define ICD_LOADER_MAGIC 0x01CDC0DE + +typedef union { + uintptr_t loaderMagic; + void *loaderData; +} VK_LOADER_DATA; + +static inline void set_loader_magic_value(void *pNewObject) { + VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + loader_info->loaderMagic = ICD_LOADER_MAGIC; +} + +static inline bool valid_loader_magic_value(void *pNewObject) { + const VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + return (loader_info->loaderMagic & 0xffffffff) == ICD_LOADER_MAGIC; +} + +/* + * Windows and Linux ICDs will treat VkSurfaceKHR as a pointer to a struct that + * contains the platform-specific connection and surface information. + */ +typedef enum { + VK_ICD_WSI_PLATFORM_MIR, + VK_ICD_WSI_PLATFORM_WAYLAND, + VK_ICD_WSI_PLATFORM_WIN32, + VK_ICD_WSI_PLATFORM_XCB, + VK_ICD_WSI_PLATFORM_XLIB, + VK_ICD_WSI_PLATFORM_ANDROID, + VK_ICD_WSI_PLATFORM_MACOS, + VK_ICD_WSI_PLATFORM_IOS, + VK_ICD_WSI_PLATFORM_DISPLAY +} VkIcdWsiPlatform; + +typedef struct { + VkIcdWsiPlatform platform; +} VkIcdSurfaceBase; + +#ifdef VK_USE_PLATFORM_MIR_KHR +typedef struct { + VkIcdSurfaceBase base; + MirConnection *connection; + MirSurface *mirSurface; +} VkIcdSurfaceMir; +#endif // VK_USE_PLATFORM_MIR_KHR + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +typedef struct { + VkIcdSurfaceBase base; + struct wl_display *display; + struct wl_surface *surface; +} VkIcdSurfaceWayland; +#endif // VK_USE_PLATFORM_WAYLAND_KHR + +#ifdef VK_USE_PLATFORM_WIN32_KHR +typedef struct { + VkIcdSurfaceBase base; + HINSTANCE hinstance; + HWND hwnd; +} VkIcdSurfaceWin32; +#endif // VK_USE_PLATFORM_WIN32_KHR + +#ifdef VK_USE_PLATFORM_XCB_KHR +typedef struct { + VkIcdSurfaceBase base; + xcb_connection_t *connection; + xcb_window_t window; +} VkIcdSurfaceXcb; +#endif // VK_USE_PLATFORM_XCB_KHR + +#ifdef VK_USE_PLATFORM_XLIB_KHR +typedef struct { + VkIcdSurfaceBase base; + Display *dpy; + Window window; +} VkIcdSurfaceXlib; +#endif // VK_USE_PLATFORM_XLIB_KHR + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +typedef struct { + VkIcdSurfaceBase base; + struct ANativeWindow *window; +} VkIcdSurfaceAndroid; +#endif // VK_USE_PLATFORM_ANDROID_KHR + +#ifdef VK_USE_PLATFORM_MACOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceMacOS; +#endif // VK_USE_PLATFORM_MACOS_MVK + +#ifdef VK_USE_PLATFORM_IOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceIOS; +#endif // VK_USE_PLATFORM_IOS_MVK + +typedef struct { + VkIcdSurfaceBase base; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkIcdSurfaceDisplay; + +#endif // VKICD_H diff --git a/src/refresh/vkpt/include/vulkan/vk_layer.h b/src/refresh/vkpt/include/vulkan/vk_layer.h new file mode 100644 index 0000000..ce366d5 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vk_layer.h @@ -0,0 +1,195 @@ +// +// File: vk_layer.h +// +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Need to define dispatch table + * Core struct can then have ptr to dispatch table at the top + * Along with object ptrs for current and next OBJ + */ +#pragma once + +#include "vulkan.h" +#if defined(__GNUC__) && __GNUC__ >= 4 +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#else +#define VK_LAYER_EXPORT +#endif + +#define MAX_NUM_UNKNOWN_EXTS 250 + + // Loader-Layer version negotiation API. Versions add the following features: + // Versions 0/1 - Initial. Doesn't support vk_layerGetPhysicalDeviceProcAddr + // or vk_icdNegotiateLoaderLayerInterfaceVersion. + // Version 2 - Add support for vk_layerGetPhysicalDeviceProcAddr and + // vk_icdNegotiateLoaderLayerInterfaceVersion. +#define CURRENT_LOADER_LAYER_INTERFACE_VERSION 2 +#define MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION 1 + +#define VK_CURRENT_CHAIN_VERSION 1 + +// Typedef for use in the interfaces below +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName); + +// Version negotiation values +typedef enum VkNegotiateLayerStructType { + LAYER_NEGOTIATE_UNINTIALIZED = 0, + LAYER_NEGOTIATE_INTERFACE_STRUCT = 1, +} VkNegotiateLayerStructType; + +// Version negotiation structures +typedef struct VkNegotiateLayerInterface { + VkNegotiateLayerStructType sType; + void *pNext; + uint32_t loaderLayerInterfaceVersion; + PFN_vkGetInstanceProcAddr pfnGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnGetDeviceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnGetPhysicalDeviceProcAddr; +} VkNegotiateLayerInterface; + +// Version negotiation functions +typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderLayerInterfaceVersion)(VkNegotiateLayerInterface *pVersionStruct); + +// Function prototype for unknown physical device extension command +typedef VkResult(VKAPI_PTR *PFN_PhysDevExt)(VkPhysicalDevice phys_device); + +// ------------------------------------------------------------------------------------------------ +// CreateInstance and CreateDevice support structures + +/* Sub type of structure for instance and device loader ext of CreateInfo. + * When sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + * or sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + * then VkLayerFunction indicates struct type pointed to by pNext + */ +typedef enum VkLayerFunction_ { + VK_LAYER_LINK_INFO = 0, + VK_LOADER_DATA_CALLBACK = 1 +} VkLayerFunction; + +typedef struct VkLayerInstanceLink_ { + struct VkLayerInstanceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnNextGetPhysicalDeviceProcAddr; +} VkLayerInstanceLink; + +/* + * When creating the device chain the loader needs to pass + * down information about it's device structure needed at + * the end of the chain. Passing the data via the + * VkLayerDeviceInfo avoids issues with finding the + * exact instance being used. + */ +typedef struct VkLayerDeviceInfo_ { + void *device_info; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; +} VkLayerDeviceInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkSetInstanceLoaderData)(VkInstance instance, + void *object); +typedef VkResult (VKAPI_PTR *PFN_vkSetDeviceLoaderData)(VkDevice device, + void *object); + +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerInstanceLink *pLayerInfo; + PFN_vkSetInstanceLoaderData pfnSetInstanceLoaderData; + } u; +} VkLayerInstanceCreateInfo; + +typedef struct VkLayerDeviceLink_ { + struct VkLayerDeviceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnNextGetDeviceProcAddr; +} VkLayerDeviceLink; + +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerDeviceLink *pLayerInfo; + PFN_vkSetDeviceLoaderData pfnSetDeviceLoaderData; + } u; +} VkLayerDeviceCreateInfo; + +#ifdef __cplusplus +extern "C" { +#endif + +VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct); + +typedef enum VkChainType { + VK_CHAIN_TYPE_UNKNOWN = 0, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES = 1, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES = 2, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_VERSION = 3, +} VkChainType; + +typedef struct VkChainHeader { + VkChainType type; + uint32_t version; + uint32_t size; +} VkChainHeader; + +typedef struct VkEnumerateInstanceExtensionPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceExtensionPropertiesChain *, const char *, uint32_t *, + VkExtensionProperties *); + const struct VkEnumerateInstanceExtensionPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties) const { + return pfnNextLayer(pNextLink, pLayerName, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceExtensionPropertiesChain; + +typedef struct VkEnumerateInstanceLayerPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceLayerPropertiesChain *, uint32_t *, VkLayerProperties *); + const struct VkEnumerateInstanceLayerPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pPropertyCount, VkLayerProperties *pProperties) const { + return pfnNextLayer(pNextLink, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceLayerPropertiesChain; + +typedef struct VkEnumerateInstanceVersionChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceVersionChain *, uint32_t *); + const struct VkEnumerateInstanceVersionChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pApiVersion) const { + return pfnNextLayer(pNextLink, pApiVersion); + } +#endif +} VkEnumerateInstanceVersionChain; + +#ifdef __cplusplus +} +#endif diff --git a/src/refresh/vkpt/include/vulkan/vk_platform.h b/src/refresh/vkpt/include/vulkan/vk_platform.h new file mode 100644 index 0000000..8e21a17 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vk_platform.h @@ -0,0 +1,92 @@ +// +// File: vk_platform.h +// +/* +** Copyright (c) 2014-2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + + +#ifndef VK_PLATFORM_H_ +#define VK_PLATFORM_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif // __cplusplus + +/* +*************************************************************************************************** +* Platform-specific directives and type declarations +*************************************************************************************************** +*/ + +/* Platform-specific calling convention macros. + * + * Platforms should define these so that Vulkan clients call Vulkan commands + * with the same calling conventions that the Vulkan implementation expects. + * + * VKAPI_ATTR - Placed before the return type in function declarations. + * Useful for C++11 and GCC/Clang-style function attribute syntax. + * VKAPI_CALL - Placed after the return type in function declarations. + * Useful for MSVC-style calling convention syntax. + * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. + * + * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); + * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); + */ +#if defined(_WIN32) + // On Windows, Vulkan commands use the stdcall convention + #define VKAPI_ATTR + #define VKAPI_CALL __stdcall + #define VKAPI_PTR VKAPI_CALL +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 + #error "Vulkan isn't supported for the 'armeabi' NDK ABI" +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) + // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" + // calling convention, i.e. float parameters are passed in registers. This + // is true even if the rest of the application passes floats on the stack, + // as it does by default when compiling for the armeabi-v7a NDK ABI. + #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) + #define VKAPI_CALL + #define VKAPI_PTR VKAPI_ATTR +#else + // On other platforms, use the default calling convention + #define VKAPI_ATTR + #define VKAPI_CALL + #define VKAPI_PTR +#endif + +#include + +#if !defined(VK_NO_STDINT_H) + #if defined(_MSC_VER) && (_MSC_VER < 1600) + typedef signed __int8 int8_t; + typedef unsigned __int8 uint8_t; + typedef signed __int16 int16_t; + typedef unsigned __int16 uint16_t; + typedef signed __int32 int32_t; + typedef unsigned __int32 uint32_t; + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + #else + #include + #endif +#endif // !defined(VK_NO_STDINT_H) + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vk_sdk_platform.h b/src/refresh/vkpt/include/vulkan/vk_sdk_platform.h new file mode 100644 index 0000000..9fd0765 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vk_sdk_platform.h @@ -0,0 +1,69 @@ +// +// File: vk_sdk_platform.h +// +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VK_SDK_PLATFORM_H +#define VK_SDK_PLATFORM_H + +#if defined(_WIN32) +#define NOMINMAX +#ifndef __cplusplus +#undef inline +#define inline __inline +#endif // __cplusplus + +#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) +// C99: +// Microsoft didn't implement C99 in Visual Studio; but started adding it with +// VS2013. However, VS2013 still didn't have snprintf(). The following is a +// work-around (Note: The _CRT_SECURE_NO_WARNINGS macro must be set in the +// "CMakeLists.txt" file). +// NOTE: This is fixed in Visual Studio 2015. +#define snprintf _snprintf +#endif + +#define strdup _strdup + +#endif // _WIN32 + +// Check for noexcept support using clang, with fallback to Windows or GCC version numbers +#ifndef NOEXCEPT +#if defined(__clang__) +#if __has_feature(cxx_noexcept) +#define HAS_NOEXCEPT +#endif +#else +#if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46 +#define HAS_NOEXCEPT +#else +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 && defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS +#define HAS_NOEXCEPT +#endif +#endif +#endif + +#ifdef HAS_NOEXCEPT +#define NOEXCEPT noexcept +#else +#define NOEXCEPT +#endif +#endif + +#endif // VK_SDK_PLATFORM_H diff --git a/src/refresh/vkpt/include/vulkan/vulkan-1.lib b/src/refresh/vkpt/include/vulkan/vulkan-1.lib new file mode 100644 index 0000000..89f2cd3 Binary files /dev/null and b/src/refresh/vkpt/include/vulkan/vulkan-1.lib differ diff --git a/src/refresh/vkpt/include/vulkan/vulkan.h b/src/refresh/vkpt/include/vulkan/vulkan.h new file mode 100644 index 0000000..bf35a3a --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan.h @@ -0,0 +1,79 @@ +#ifndef VULKAN_H_ +#define VULKAN_H_ 1 + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "vk_platform.h" +#include "vulkan_core.h" + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +#include "vulkan_android.h" +#endif + + +#ifdef VK_USE_PLATFORM_IOS_MVK +#include "vulkan_ios.h" +#endif + + +#ifdef VK_USE_PLATFORM_MACOS_MVK +#include "vulkan_macos.h" +#endif + + +#ifdef VK_USE_PLATFORM_MIR_KHR +#include +#include "vulkan_mir.h" +#endif + + +#ifdef VK_USE_PLATFORM_VI_NN +#include "vulkan_vi.h" +#endif + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +#include +#include "vulkan_wayland.h" +#endif + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#include +#include "vulkan_win32.h" +#endif + + +#ifdef VK_USE_PLATFORM_XCB_KHR +#include +#include "vulkan_xcb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_KHR +#include +#include "vulkan_xlib.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +#include +#include +#include "vulkan_xlib_xrandr.h" +#endif + +#endif // VULKAN_H_ diff --git a/src/refresh/vkpt/include/vulkan/vulkan.hpp b/src/refresh/vkpt/include/vulkan/vulkan.hpp new file mode 100644 index 0000000..af5e795 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan.hpp @@ -0,0 +1,50675 @@ +// Copyright (c) 2015-2018 The Khronos Group Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ---- Exceptions to the Apache 2.0 License: ---- +// +// As an exception, if you use this Software to generate code and portions of +// this Software are embedded into the generated code as a result, you may +// redistribute such product without providing attribution as would otherwise +// be required by Sections 4(a), 4(b) and 4(d) of the License. +// +// In addition, if you combine or link code generated by this Software with +// software that is licensed under the GPLv2 or the LGPL v2.0 or 2.1 +// ("`Combined Software`") and if a court of competent jurisdiction determines +// that the patent provision (Section 3), the indemnity provision (Section 9) +// or other Section of the License conflicts with the conditions of the +// applicable GPL or LGPL license, you may retroactively and prospectively +// choose to deem waived or otherwise exclude such Section(s) of the License, +// but only in their entirety and only with respect to the Combined Software. +// + +// This header is generated from the Khronos Vulkan XML API Registry. + +#ifndef VULKAN_HPP +#define VULKAN_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE +# include +# include +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#if !defined(VULKAN_HPP_ASSERT) +# include +# define VULKAN_HPP_ASSERT assert +#endif + +// includes through some other header +// this results in major(x) being resolved to gnu_dev_major(x) +// which is an expression in a constructor initializer list. +#if defined(major) + #undef major +#endif +#if defined(minor) + #undef minor +#endif + +// Windows defines MemoryBarrier which is deprecated and collides +// with the vk::MemoryBarrier struct. +#ifdef MemoryBarrier + #undef MemoryBarrier +#endif + +static_assert( VK_HEADER_VERSION == 85 , "Wrong VK_HEADER_VERSION!" ); + +// 32-bit vulkan is not typesafe for handles, so don't allow copy constructors on this platform by default. +// To enable this feature on 32-bit platforms please define VULKAN_HPP_TYPESAFE_CONVERSION +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) +# if !defined( VULKAN_HPP_TYPESAFE_CONVERSION ) +# define VULKAN_HPP_TYPESAFE_CONVERSION +# endif +#endif + +#if !defined(VULKAN_HPP_HAS_UNRESTRICTED_UNIONS) +# if defined(__clang__) +# if __has_feature(cxx_unrestricted_unions) +# define VULKAN_HPP_HAS_UNRESTRICTED_UNIONS +# endif +# elif defined(__GNUC__) +# define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +# if 40600 <= GCC_VERSION +# define VULKAN_HPP_HAS_UNRESTRICTED_UNIONS +# endif +# elif defined(_MSC_VER) +# if 1900 <= _MSC_VER +# define VULKAN_HPP_HAS_UNRESTRICTED_UNIONS +# endif +# endif +#endif + +#if !defined(VULKAN_HPP_INLINE) +# if defined(__clang___) +# if __has_attribute(always_inline) +# define VULKAN_HPP_INLINE __attribute__((always_inline)) __inline__ +# else +# define VULKAN_HPP_INLINE inline +# endif +# elif defined(__GNUC__) +# define VULKAN_HPP_INLINE __attribute__((always_inline)) __inline__ +# elif defined(_MSC_VER) +# define VULKAN_HPP_INLINE inline +# else +# define VULKAN_HPP_INLINE inline +# endif +#endif + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) +# define VULKAN_HPP_TYPESAFE_EXPLICIT +#else +# define VULKAN_HPP_TYPESAFE_EXPLICIT explicit +#endif + +#if defined(_MSC_VER) && (_MSC_VER <= 1800) +# define VULKAN_HPP_CONSTEXPR +#else +# define VULKAN_HPP_CONSTEXPR constexpr +#endif + + +#if !defined(VULKAN_HPP_NAMESPACE) +#define VULKAN_HPP_NAMESPACE vk +#endif + +#define VULKAN_HPP_STRINGIFY2(text) #text +#define VULKAN_HPP_STRINGIFY(text) VULKAN_HPP_STRINGIFY2(text) +#define VULKAN_HPP_NAMESPACE_STRING VULKAN_HPP_STRINGIFY(VULKAN_HPP_NAMESPACE) + +namespace VULKAN_HPP_NAMESPACE +{ + + template struct FlagTraits + { + enum { allFlags = 0 }; + }; + + template + class Flags + { + public: + VULKAN_HPP_CONSTEXPR Flags() + : m_mask(0) + { + } + + Flags(BitType bit) + : m_mask(static_cast(bit)) + { + } + + Flags(Flags const& rhs) + : m_mask(rhs.m_mask) + { + } + + explicit Flags(MaskType flags) + : m_mask(flags) + { + } + + Flags & operator=(Flags const& rhs) + { + m_mask = rhs.m_mask; + return *this; + } + + Flags & operator|=(Flags const& rhs) + { + m_mask |= rhs.m_mask; + return *this; + } + + Flags & operator&=(Flags const& rhs) + { + m_mask &= rhs.m_mask; + return *this; + } + + Flags & operator^=(Flags const& rhs) + { + m_mask ^= rhs.m_mask; + return *this; + } + + Flags operator|(Flags const& rhs) const + { + Flags result(*this); + result |= rhs; + return result; + } + + Flags operator&(Flags const& rhs) const + { + Flags result(*this); + result &= rhs; + return result; + } + + Flags operator^(Flags const& rhs) const + { + Flags result(*this); + result ^= rhs; + return result; + } + + bool operator!() const + { + return !m_mask; + } + + Flags operator~() const + { + Flags result(*this); + result.m_mask ^= FlagTraits::allFlags; + return result; + } + + bool operator==(Flags const& rhs) const + { + return m_mask == rhs.m_mask; + } + + bool operator!=(Flags const& rhs) const + { + return m_mask != rhs.m_mask; + } + + explicit operator bool() const + { + return !!m_mask; + } + + explicit operator MaskType() const + { + return m_mask; + } + + private: + MaskType m_mask; + }; + + template + Flags operator|(BitType bit, Flags const& flags) + { + return flags | bit; + } + + template + Flags operator&(BitType bit, Flags const& flags) + { + return flags & bit; + } + + template + Flags operator^(BitType bit, Flags const& flags) + { + return flags ^ bit; + } + + + template + class Optional + { + public: + Optional(RefType & reference) { m_ptr = &reference; } + Optional(RefType * ptr) { m_ptr = ptr; } + Optional(std::nullptr_t) { m_ptr = nullptr; } + + operator RefType*() const { return m_ptr; } + RefType const* operator->() const { return m_ptr; } + explicit operator bool() const { return !!m_ptr; } + + private: + RefType *m_ptr; + }; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + class ArrayProxy + { + public: + VULKAN_HPP_CONSTEXPR ArrayProxy(std::nullptr_t) + : m_count(0) + , m_ptr(nullptr) + {} + + ArrayProxy(T & ptr) + : m_count(1) + , m_ptr(&ptr) + {} + + ArrayProxy(uint32_t count, T * ptr) + : m_count(count) + , m_ptr(ptr) + {} + + template + ArrayProxy(std::array::type, N> & data) + : m_count(N) + , m_ptr(data.data()) + {} + + template + ArrayProxy(std::array::type, N> const& data) + : m_count(N) + , m_ptr(data.data()) + {} + + template ::type>> + ArrayProxy(std::vector::type, Allocator> & data) + : m_count(static_cast(data.size())) + , m_ptr(data.data()) + {} + + template ::type>> + ArrayProxy(std::vector::type, Allocator> const& data) + : m_count(static_cast(data.size())) + , m_ptr(data.data()) + {} + + ArrayProxy(std::initializer_list const& data) + : m_count(static_cast(data.end() - data.begin())) + , m_ptr(data.begin()) + {} + + const T * begin() const + { + return m_ptr; + } + + const T * end() const + { + return m_ptr + m_count; + } + + const T & front() const + { + VULKAN_HPP_ASSERT(m_count && m_ptr); + return *m_ptr; + } + + const T & back() const + { + VULKAN_HPP_ASSERT(m_count && m_ptr); + return *(m_ptr + m_count - 1); + } + + bool empty() const + { + return (m_count == 0); + } + + uint32_t size() const + { + return m_count; + } + + T * data() const + { + return m_ptr; + } + + private: + uint32_t m_count; + T * m_ptr; + }; +#endif + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + + template class UniqueHandleTraits; + + template + class UniqueHandle : public UniqueHandleTraits::deleter + { + private: + using Deleter = typename UniqueHandleTraits::deleter; + public: + explicit UniqueHandle( Type const& value = Type(), Deleter const& deleter = Deleter() ) + : Deleter( deleter) + , m_value( value ) + {} + + UniqueHandle( UniqueHandle const& ) = delete; + + UniqueHandle( UniqueHandle && other ) + : Deleter( std::move( static_cast( other ) ) ) + , m_value( other.release() ) + {} + + ~UniqueHandle() + { + if ( m_value ) this->destroy( m_value ); + } + + UniqueHandle & operator=( UniqueHandle const& ) = delete; + + UniqueHandle & operator=( UniqueHandle && other ) + { + reset( other.release() ); + *static_cast(this) = std::move( static_cast(other) ); + return *this; + } + + explicit operator bool() const + { + return m_value.operator bool(); + } + + Type const* operator->() const + { + return &m_value; + } + + Type * operator->() + { + return &m_value; + } + + Type const& operator*() const + { + return m_value; + } + + Type & operator*() + { + return m_value; + } + + const Type & get() const + { + return m_value; + } + + Type & get() + { + return m_value; + } + + void reset( Type const& value = Type() ) + { + if ( m_value != value ) + { + if ( m_value ) this->destroy( m_value ); + m_value = value; + } + } + + Type release() + { + Type value = m_value; + m_value = nullptr; + return value; + } + + void swap( UniqueHandle & rhs ) + { + std::swap(m_value, rhs.m_value); + std::swap(static_cast(*this), static_cast(rhs)); + } + + private: + Type m_value; + }; + + template + VULKAN_HPP_INLINE void swap( UniqueHandle & lhs, UniqueHandle & rhs ) + { + lhs.swap( rhs ); + } +#endif + + + + template struct isStructureChainValid { enum { value = false }; }; + + template + struct TypeList + { + using list = P; + using last = T; + }; + + template + struct extendCheck + { + static const bool valid = isStructureChainValid::value || extendCheck::valid; + }; + + template + struct extendCheck,X> + { + static const bool valid = isStructureChainValid::value; + }; + + template + struct extendCheck + { + static const bool valid = true; + }; + + template + class StructureChainElement + { + public: + explicit operator Element&() { return value; } + explicit operator const Element&() const { return value; } + private: + Element value; + }; + + template + class StructureChain : private StructureChainElement... + { + public: + StructureChain() + { + link(); + } + + StructureChain(StructureChain const &rhs) + { + linkAndCopy(rhs); + } + + StructureChain(StructureElements const &... elems) + { + linkAndCopyElements(elems...); + } + + StructureChain& operator=(StructureChain const &rhs) + { + linkAndCopy(rhs); + return *this; + } + + template ClassType& get() { return static_cast(*this);} + + private: + template + void link() + { + static_assert(extendCheck::valid, "The structure chain is not valid!"); + } + + template + void link() + { + static_assert(extendCheck::valid, "The structure chain is not valid!"); + X& x = static_cast(*this); + Y& y = static_cast(*this); + x.pNext = &y; + link, Y, Z...>(); + } + + template + void linkAndCopy(StructureChain const &rhs) + { + static_assert(extendCheck::valid, "The structure chain is not valid!"); + static_cast(*this) = static_cast(rhs); + } + + template + void linkAndCopy(StructureChain const &rhs) + { + static_assert(extendCheck::valid, "The structure chain is not valid!"); + X& x = static_cast(*this); + Y& y = static_cast(*this); + x = static_cast(rhs); + x.pNext = &y; + linkAndCopy, Y, Z...>(rhs); + } + + template + void linkAndCopyElements(X const &xelem) + { + static_assert(extendCheck::valid, "The structure chain is not valid!"); + static_cast(*this) = xelem; + } + + template + void linkAndCopyElements(X const &xelem, Y const &yelem, Z const &... zelem) + { + static_assert(extendCheck::valid, "The structure chain is not valid!"); + X& x = static_cast(*this); + Y& y = static_cast(*this); + x = xelem; + x.pNext = &y; + linkAndCopyElements, Y, Z...>(yelem, zelem...); + } + }; + + enum class Result + { + eSuccess = VK_SUCCESS, + eNotReady = VK_NOT_READY, + eTimeout = VK_TIMEOUT, + eEventSet = VK_EVENT_SET, + eEventReset = VK_EVENT_RESET, + eIncomplete = VK_INCOMPLETE, + eErrorOutOfHostMemory = VK_ERROR_OUT_OF_HOST_MEMORY, + eErrorOutOfDeviceMemory = VK_ERROR_OUT_OF_DEVICE_MEMORY, + eErrorInitializationFailed = VK_ERROR_INITIALIZATION_FAILED, + eErrorDeviceLost = VK_ERROR_DEVICE_LOST, + eErrorMemoryMapFailed = VK_ERROR_MEMORY_MAP_FAILED, + eErrorLayerNotPresent = VK_ERROR_LAYER_NOT_PRESENT, + eErrorExtensionNotPresent = VK_ERROR_EXTENSION_NOT_PRESENT, + eErrorFeatureNotPresent = VK_ERROR_FEATURE_NOT_PRESENT, + eErrorIncompatibleDriver = VK_ERROR_INCOMPATIBLE_DRIVER, + eErrorTooManyObjects = VK_ERROR_TOO_MANY_OBJECTS, + eErrorFormatNotSupported = VK_ERROR_FORMAT_NOT_SUPPORTED, + eErrorFragmentedPool = VK_ERROR_FRAGMENTED_POOL, + eErrorOutOfPoolMemory = VK_ERROR_OUT_OF_POOL_MEMORY, + eErrorOutOfPoolMemoryKHR = VK_ERROR_OUT_OF_POOL_MEMORY, + eErrorInvalidExternalHandle = VK_ERROR_INVALID_EXTERNAL_HANDLE, + eErrorInvalidExternalHandleKHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, + eErrorSurfaceLostKHR = VK_ERROR_SURFACE_LOST_KHR, + eErrorNativeWindowInUseKHR = VK_ERROR_NATIVE_WINDOW_IN_USE_KHR, + eSuboptimalKHR = VK_SUBOPTIMAL_KHR, + eErrorOutOfDateKHR = VK_ERROR_OUT_OF_DATE_KHR, + eErrorIncompatibleDisplayKHR = VK_ERROR_INCOMPATIBLE_DISPLAY_KHR, + eErrorValidationFailedEXT = VK_ERROR_VALIDATION_FAILED_EXT, + eErrorInvalidShaderNV = VK_ERROR_INVALID_SHADER_NV, + eErrorFragmentationEXT = VK_ERROR_FRAGMENTATION_EXT, + eErrorNotPermittedEXT = VK_ERROR_NOT_PERMITTED_EXT + }; + + VULKAN_HPP_INLINE std::string to_string(Result value) + { + switch (value) + { + case Result::eSuccess: return "Success"; + case Result::eNotReady: return "NotReady"; + case Result::eTimeout: return "Timeout"; + case Result::eEventSet: return "EventSet"; + case Result::eEventReset: return "EventReset"; + case Result::eIncomplete: return "Incomplete"; + case Result::eErrorOutOfHostMemory: return "ErrorOutOfHostMemory"; + case Result::eErrorOutOfDeviceMemory: return "ErrorOutOfDeviceMemory"; + case Result::eErrorInitializationFailed: return "ErrorInitializationFailed"; + case Result::eErrorDeviceLost: return "ErrorDeviceLost"; + case Result::eErrorMemoryMapFailed: return "ErrorMemoryMapFailed"; + case Result::eErrorLayerNotPresent: return "ErrorLayerNotPresent"; + case Result::eErrorExtensionNotPresent: return "ErrorExtensionNotPresent"; + case Result::eErrorFeatureNotPresent: return "ErrorFeatureNotPresent"; + case Result::eErrorIncompatibleDriver: return "ErrorIncompatibleDriver"; + case Result::eErrorTooManyObjects: return "ErrorTooManyObjects"; + case Result::eErrorFormatNotSupported: return "ErrorFormatNotSupported"; + case Result::eErrorFragmentedPool: return "ErrorFragmentedPool"; + case Result::eErrorOutOfPoolMemory: return "ErrorOutOfPoolMemory"; + case Result::eErrorInvalidExternalHandle: return "ErrorInvalidExternalHandle"; + case Result::eErrorSurfaceLostKHR: return "ErrorSurfaceLostKHR"; + case Result::eErrorNativeWindowInUseKHR: return "ErrorNativeWindowInUseKHR"; + case Result::eSuboptimalKHR: return "SuboptimalKHR"; + case Result::eErrorOutOfDateKHR: return "ErrorOutOfDateKHR"; + case Result::eErrorIncompatibleDisplayKHR: return "ErrorIncompatibleDisplayKHR"; + case Result::eErrorValidationFailedEXT: return "ErrorValidationFailedEXT"; + case Result::eErrorInvalidShaderNV: return "ErrorInvalidShaderNV"; + case Result::eErrorFragmentationEXT: return "ErrorFragmentationEXT"; + case Result::eErrorNotPermittedEXT: return "ErrorNotPermittedEXT"; + default: return "invalid"; + } + } + +#ifndef VULKAN_HPP_NO_EXCEPTIONS +#if defined(_MSC_VER) && (_MSC_VER == 1800) +# define noexcept _NOEXCEPT +#endif + + class ErrorCategoryImpl : public std::error_category + { + public: + virtual const char* name() const noexcept override { return VULKAN_HPP_NAMESPACE_STRING"::Result"; } + virtual std::string message(int ev) const override { return to_string(static_cast(ev)); } + }; + +#if defined(_MSC_VER) && (_MSC_VER == 1800) +# undef noexcept +#endif + + VULKAN_HPP_INLINE const std::error_category& errorCategory() + { + static ErrorCategoryImpl instance; + return instance; + } + + VULKAN_HPP_INLINE std::error_code make_error_code(Result e) + { + return std::error_code(static_cast(e), errorCategory()); + } + + VULKAN_HPP_INLINE std::error_condition make_error_condition(Result e) + { + return std::error_condition(static_cast(e), errorCategory()); + } + +#if defined(_MSC_VER) && (_MSC_VER == 1800) +# define noexcept _NOEXCEPT +#endif + + class Error + { + public: + virtual ~Error() = default; + + virtual const char* what() const noexcept = 0; + }; + + class LogicError : public Error, public std::logic_error + { + public: + explicit LogicError( const std::string& what ) + : Error(), std::logic_error(what) {} + explicit LogicError( char const * what ) + : Error(), std::logic_error(what) {} + virtual ~LogicError() = default; + + virtual const char* what() const noexcept { return std::logic_error::what(); } + }; + + class SystemError : public Error, public std::system_error + { + public: + SystemError( std::error_code ec ) + : Error(), std::system_error(ec) {} + SystemError( std::error_code ec, std::string const& what ) + : Error(), std::system_error(ec, what) {} + SystemError( std::error_code ec, char const * what ) + : Error(), std::system_error(ec, what) {} + SystemError( int ev, std::error_category const& ecat ) + : Error(), std::system_error(ev, ecat) {} + SystemError( int ev, std::error_category const& ecat, std::string const& what) + : Error(), std::system_error(ev, ecat, what) {} + SystemError( int ev, std::error_category const& ecat, char const * what) + : Error(), std::system_error(ev, ecat, what) {} + virtual ~SystemError() = default; + + virtual const char* what() const noexcept { return std::system_error::what(); } + }; + +#if defined(_MSC_VER) && (_MSC_VER == 1800) +# undef noexcept +#endif + + class OutOfHostMemoryError : public SystemError + { + public: + OutOfHostMemoryError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorOutOfHostMemory ), message ) {} + OutOfHostMemoryError( char const * message ) + : SystemError( make_error_code( Result::eErrorOutOfHostMemory ), message ) {} + }; + class OutOfDeviceMemoryError : public SystemError + { + public: + OutOfDeviceMemoryError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorOutOfDeviceMemory ), message ) {} + OutOfDeviceMemoryError( char const * message ) + : SystemError( make_error_code( Result::eErrorOutOfDeviceMemory ), message ) {} + }; + class InitializationFailedError : public SystemError + { + public: + InitializationFailedError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorInitializationFailed ), message ) {} + InitializationFailedError( char const * message ) + : SystemError( make_error_code( Result::eErrorInitializationFailed ), message ) {} + }; + class DeviceLostError : public SystemError + { + public: + DeviceLostError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorDeviceLost ), message ) {} + DeviceLostError( char const * message ) + : SystemError( make_error_code( Result::eErrorDeviceLost ), message ) {} + }; + class MemoryMapFailedError : public SystemError + { + public: + MemoryMapFailedError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorMemoryMapFailed ), message ) {} + MemoryMapFailedError( char const * message ) + : SystemError( make_error_code( Result::eErrorMemoryMapFailed ), message ) {} + }; + class LayerNotPresentError : public SystemError + { + public: + LayerNotPresentError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorLayerNotPresent ), message ) {} + LayerNotPresentError( char const * message ) + : SystemError( make_error_code( Result::eErrorLayerNotPresent ), message ) {} + }; + class ExtensionNotPresentError : public SystemError + { + public: + ExtensionNotPresentError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorExtensionNotPresent ), message ) {} + ExtensionNotPresentError( char const * message ) + : SystemError( make_error_code( Result::eErrorExtensionNotPresent ), message ) {} + }; + class FeatureNotPresentError : public SystemError + { + public: + FeatureNotPresentError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorFeatureNotPresent ), message ) {} + FeatureNotPresentError( char const * message ) + : SystemError( make_error_code( Result::eErrorFeatureNotPresent ), message ) {} + }; + class IncompatibleDriverError : public SystemError + { + public: + IncompatibleDriverError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorIncompatibleDriver ), message ) {} + IncompatibleDriverError( char const * message ) + : SystemError( make_error_code( Result::eErrorIncompatibleDriver ), message ) {} + }; + class TooManyObjectsError : public SystemError + { + public: + TooManyObjectsError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorTooManyObjects ), message ) {} + TooManyObjectsError( char const * message ) + : SystemError( make_error_code( Result::eErrorTooManyObjects ), message ) {} + }; + class FormatNotSupportedError : public SystemError + { + public: + FormatNotSupportedError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorFormatNotSupported ), message ) {} + FormatNotSupportedError( char const * message ) + : SystemError( make_error_code( Result::eErrorFormatNotSupported ), message ) {} + }; + class FragmentedPoolError : public SystemError + { + public: + FragmentedPoolError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorFragmentedPool ), message ) {} + FragmentedPoolError( char const * message ) + : SystemError( make_error_code( Result::eErrorFragmentedPool ), message ) {} + }; + class OutOfPoolMemoryError : public SystemError + { + public: + OutOfPoolMemoryError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorOutOfPoolMemory ), message ) {} + OutOfPoolMemoryError( char const * message ) + : SystemError( make_error_code( Result::eErrorOutOfPoolMemory ), message ) {} + }; + class InvalidExternalHandleError : public SystemError + { + public: + InvalidExternalHandleError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorInvalidExternalHandle ), message ) {} + InvalidExternalHandleError( char const * message ) + : SystemError( make_error_code( Result::eErrorInvalidExternalHandle ), message ) {} + }; + class SurfaceLostKHRError : public SystemError + { + public: + SurfaceLostKHRError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorSurfaceLostKHR ), message ) {} + SurfaceLostKHRError( char const * message ) + : SystemError( make_error_code( Result::eErrorSurfaceLostKHR ), message ) {} + }; + class NativeWindowInUseKHRError : public SystemError + { + public: + NativeWindowInUseKHRError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorNativeWindowInUseKHR ), message ) {} + NativeWindowInUseKHRError( char const * message ) + : SystemError( make_error_code( Result::eErrorNativeWindowInUseKHR ), message ) {} + }; + class OutOfDateKHRError : public SystemError + { + public: + OutOfDateKHRError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorOutOfDateKHR ), message ) {} + OutOfDateKHRError( char const * message ) + : SystemError( make_error_code( Result::eErrorOutOfDateKHR ), message ) {} + }; + class IncompatibleDisplayKHRError : public SystemError + { + public: + IncompatibleDisplayKHRError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorIncompatibleDisplayKHR ), message ) {} + IncompatibleDisplayKHRError( char const * message ) + : SystemError( make_error_code( Result::eErrorIncompatibleDisplayKHR ), message ) {} + }; + class ValidationFailedEXTError : public SystemError + { + public: + ValidationFailedEXTError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorValidationFailedEXT ), message ) {} + ValidationFailedEXTError( char const * message ) + : SystemError( make_error_code( Result::eErrorValidationFailedEXT ), message ) {} + }; + class InvalidShaderNVError : public SystemError + { + public: + InvalidShaderNVError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorInvalidShaderNV ), message ) {} + InvalidShaderNVError( char const * message ) + : SystemError( make_error_code( Result::eErrorInvalidShaderNV ), message ) {} + }; + class FragmentationEXTError : public SystemError + { + public: + FragmentationEXTError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorFragmentationEXT ), message ) {} + FragmentationEXTError( char const * message ) + : SystemError( make_error_code( Result::eErrorFragmentationEXT ), message ) {} + }; + class NotPermittedEXTError : public SystemError + { + public: + NotPermittedEXTError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorNotPermittedEXT ), message ) {} + NotPermittedEXTError( char const * message ) + : SystemError( make_error_code( Result::eErrorNotPermittedEXT ), message ) {} + }; + + VULKAN_HPP_INLINE void throwResultException( Result result, char const * message ) + { + switch ( result ) + { + case Result::eErrorOutOfHostMemory: throw OutOfHostMemoryError ( message ); + case Result::eErrorOutOfDeviceMemory: throw OutOfDeviceMemoryError ( message ); + case Result::eErrorInitializationFailed: throw InitializationFailedError ( message ); + case Result::eErrorDeviceLost: throw DeviceLostError ( message ); + case Result::eErrorMemoryMapFailed: throw MemoryMapFailedError ( message ); + case Result::eErrorLayerNotPresent: throw LayerNotPresentError ( message ); + case Result::eErrorExtensionNotPresent: throw ExtensionNotPresentError ( message ); + case Result::eErrorFeatureNotPresent: throw FeatureNotPresentError ( message ); + case Result::eErrorIncompatibleDriver: throw IncompatibleDriverError ( message ); + case Result::eErrorTooManyObjects: throw TooManyObjectsError ( message ); + case Result::eErrorFormatNotSupported: throw FormatNotSupportedError ( message ); + case Result::eErrorFragmentedPool: throw FragmentedPoolError ( message ); + case Result::eErrorOutOfPoolMemory: throw OutOfPoolMemoryError ( message ); + case Result::eErrorInvalidExternalHandle: throw InvalidExternalHandleError ( message ); + case Result::eErrorSurfaceLostKHR: throw SurfaceLostKHRError ( message ); + case Result::eErrorNativeWindowInUseKHR: throw NativeWindowInUseKHRError ( message ); + case Result::eErrorOutOfDateKHR: throw OutOfDateKHRError ( message ); + case Result::eErrorIncompatibleDisplayKHR: throw IncompatibleDisplayKHRError ( message ); + case Result::eErrorValidationFailedEXT: throw ValidationFailedEXTError ( message ); + case Result::eErrorInvalidShaderNV: throw InvalidShaderNVError ( message ); + case Result::eErrorFragmentationEXT: throw FragmentationEXTError ( message ); + case Result::eErrorNotPermittedEXT: throw NotPermittedEXTError ( message ); + default: throw SystemError( make_error_code( result ) ); + } + } +#endif +} // namespace VULKAN_HPP_NAMESPACE + +namespace std +{ + template <> + struct is_error_code_enum : public true_type + {}; +} + +namespace VULKAN_HPP_NAMESPACE +{ + + template + struct ResultValue + { + ResultValue( Result r, T & v ) + : result( r ) + , value( v ) + {} + + ResultValue( Result r, T && v ) + : result( r ) + , value( std::move( v ) ) + {} + + Result result; + T value; + + operator std::tuple() { return std::tuple(result, value); } + }; + + template + struct ResultValueType + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + typedef ResultValue type; +#else + typedef T type; +#endif + }; + + template <> + struct ResultValueType + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + typedef Result type; +#else + typedef void type; +#endif + }; + + VULKAN_HPP_INLINE ResultValueType::type createResultValue( Result result, char const * message ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( result == Result::eSuccess ); + return result; +#else + if ( result != Result::eSuccess ) + { + throwResultException( result, message ); + } +#endif + } + + template + VULKAN_HPP_INLINE typename ResultValueType::type createResultValue( Result result, T & data, char const * message ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( result == Result::eSuccess ); + return ResultValue( result, data ); +#else + if ( result != Result::eSuccess ) + { + throwResultException( result, message ); + } + return std::move( data ); +#endif + } + + VULKAN_HPP_INLINE Result createResultValue( Result result, char const * message, std::initializer_list successCodes ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); +#else + if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) + { + throwResultException( result, message ); + } +#endif + return result; + } + + template + VULKAN_HPP_INLINE ResultValue createResultValue( Result result, T & data, char const * message, std::initializer_list successCodes ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); +#else + if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) + { + throwResultException( result, message ); + } +#endif + return ResultValue( result, data ); + } + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type createResultValue( Result result, T & data, char const * message, typename UniqueHandleTraits::deleter const& deleter ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( result == Result::eSuccess ); + return ResultValue>( result, UniqueHandle(data, deleter) ); +#else + if ( result != Result::eSuccess ) + { + throwResultException( result, message ); + } + return UniqueHandle(data, deleter); +#endif + } +#endif + +class DispatchLoaderStatic +{ +public: + VkResult vkAcquireNextImage2KHR( VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex ) const + { + return ::vkAcquireNextImage2KHR( device, pAcquireInfo, pImageIndex); + } + VkResult vkAcquireNextImageKHR( VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex ) const + { + return ::vkAcquireNextImageKHR( device, swapchain, timeout, semaphore, fence, pImageIndex); + } +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + VkResult vkAcquireXlibDisplayEXT( VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display ) const + { + return ::vkAcquireXlibDisplayEXT( physicalDevice, dpy, display); + } +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + VkResult vkAllocateCommandBuffers( VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers ) const + { + return ::vkAllocateCommandBuffers( device, pAllocateInfo, pCommandBuffers); + } + VkResult vkAllocateDescriptorSets( VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets ) const + { + return ::vkAllocateDescriptorSets( device, pAllocateInfo, pDescriptorSets); + } + VkResult vkAllocateMemory( VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory ) const + { + return ::vkAllocateMemory( device, pAllocateInfo, pAllocator, pMemory); + } + VkResult vkBeginCommandBuffer( VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo ) const + { + return ::vkBeginCommandBuffer( commandBuffer, pBeginInfo); + } + VkResult vkBindAccelerationStructureMemoryNVX( VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNVX* pBindInfos ) const + { + return ::vkBindAccelerationStructureMemoryNVX( device, bindInfoCount, pBindInfos); + } + VkResult vkBindBufferMemory( VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset ) const + { + return ::vkBindBufferMemory( device, buffer, memory, memoryOffset); + } + VkResult vkBindBufferMemory2( VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos ) const + { + return ::vkBindBufferMemory2( device, bindInfoCount, pBindInfos); + } + VkResult vkBindBufferMemory2KHR( VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos ) const + { + return ::vkBindBufferMemory2KHR( device, bindInfoCount, pBindInfos); + } + VkResult vkBindImageMemory( VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset ) const + { + return ::vkBindImageMemory( device, image, memory, memoryOffset); + } + VkResult vkBindImageMemory2( VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos ) const + { + return ::vkBindImageMemory2( device, bindInfoCount, pBindInfos); + } + VkResult vkBindImageMemory2KHR( VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos ) const + { + return ::vkBindImageMemory2KHR( device, bindInfoCount, pBindInfos); + } + void vkCmdBeginConditionalRenderingEXT( VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin ) const + { + return ::vkCmdBeginConditionalRenderingEXT( commandBuffer, pConditionalRenderingBegin); + } + void vkCmdBeginDebugUtilsLabelEXT( VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo ) const + { + return ::vkCmdBeginDebugUtilsLabelEXT( commandBuffer, pLabelInfo); + } + void vkCmdBeginQuery( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags ) const + { + return ::vkCmdBeginQuery( commandBuffer, queryPool, query, flags); + } + void vkCmdBeginRenderPass( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents ) const + { + return ::vkCmdBeginRenderPass( commandBuffer, pRenderPassBegin, contents); + } + void vkCmdBeginRenderPass2KHR( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfoKHR* pSubpassBeginInfo ) const + { + return ::vkCmdBeginRenderPass2KHR( commandBuffer, pRenderPassBegin, pSubpassBeginInfo); + } + void vkCmdBindDescriptorSets( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets ) const + { + return ::vkCmdBindDescriptorSets( commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets); + } + void vkCmdBindIndexBuffer( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType ) const + { + return ::vkCmdBindIndexBuffer( commandBuffer, buffer, offset, indexType); + } + void vkCmdBindPipeline( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline ) const + { + return ::vkCmdBindPipeline( commandBuffer, pipelineBindPoint, pipeline); + } + void vkCmdBindShadingRateImageNV( VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout ) const + { + return ::vkCmdBindShadingRateImageNV( commandBuffer, imageView, imageLayout); + } + void vkCmdBindVertexBuffers( VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets ) const + { + return ::vkCmdBindVertexBuffers( commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets); + } + void vkCmdBlitImage( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter ) const + { + return ::vkCmdBlitImage( commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter); + } + void vkCmdBuildAccelerationStructureNVX( VkCommandBuffer cmdBuf, VkAccelerationStructureTypeNVX type, uint32_t instanceCount, VkBuffer instanceData, VkDeviceSize instanceOffset, uint32_t geometryCount, const VkGeometryNVX* pGeometries, VkBuildAccelerationStructureFlagsNVX flags, VkBool32 update, VkAccelerationStructureNVX dst, VkAccelerationStructureNVX src, VkBuffer scratch, VkDeviceSize scratchOffset ) const + { + return ::vkCmdBuildAccelerationStructureNVX( cmdBuf, type, instanceCount, instanceData, instanceOffset, geometryCount, pGeometries, flags, update, dst, src, scratch, scratchOffset); + } + void vkCmdClearAttachments( VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects ) const + { + return ::vkCmdClearAttachments( commandBuffer, attachmentCount, pAttachments, rectCount, pRects); + } + void vkCmdClearColorImage( VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges ) const + { + return ::vkCmdClearColorImage( commandBuffer, image, imageLayout, pColor, rangeCount, pRanges); + } + void vkCmdClearDepthStencilImage( VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges ) const + { + return ::vkCmdClearDepthStencilImage( commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges); + } + void vkCmdCopyAccelerationStructureNVX( VkCommandBuffer cmdBuf, VkAccelerationStructureNVX dst, VkAccelerationStructureNVX src, VkCopyAccelerationStructureModeNVX mode ) const + { + return ::vkCmdCopyAccelerationStructureNVX( cmdBuf, dst, src, mode); + } + void vkCmdCopyBuffer( VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions ) const + { + return ::vkCmdCopyBuffer( commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions); + } + void vkCmdCopyBufferToImage( VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions ) const + { + return ::vkCmdCopyBufferToImage( commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions); + } + void vkCmdCopyImage( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions ) const + { + return ::vkCmdCopyImage( commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); + } + void vkCmdCopyImageToBuffer( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions ) const + { + return ::vkCmdCopyImageToBuffer( commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions); + } + void vkCmdCopyQueryPoolResults( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags ) const + { + return ::vkCmdCopyQueryPoolResults( commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags); + } + void vkCmdDebugMarkerBeginEXT( VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo ) const + { + return ::vkCmdDebugMarkerBeginEXT( commandBuffer, pMarkerInfo); + } + void vkCmdDebugMarkerEndEXT( VkCommandBuffer commandBuffer ) const + { + return ::vkCmdDebugMarkerEndEXT( commandBuffer); + } + void vkCmdDebugMarkerInsertEXT( VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo ) const + { + return ::vkCmdDebugMarkerInsertEXT( commandBuffer, pMarkerInfo); + } + void vkCmdDispatch( VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const + { + return ::vkCmdDispatch( commandBuffer, groupCountX, groupCountY, groupCountZ); + } + void vkCmdDispatchBase( VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const + { + return ::vkCmdDispatchBase( commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ); + } + void vkCmdDispatchBaseKHR( VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const + { + return ::vkCmdDispatchBaseKHR( commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ); + } + void vkCmdDispatchIndirect( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset ) const + { + return ::vkCmdDispatchIndirect( commandBuffer, buffer, offset); + } + void vkCmdDraw( VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance ) const + { + return ::vkCmdDraw( commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance); + } + void vkCmdDrawIndexed( VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance ) const + { + return ::vkCmdDrawIndexed( commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); + } + void vkCmdDrawIndexedIndirect( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride ) const + { + return ::vkCmdDrawIndexedIndirect( commandBuffer, buffer, offset, drawCount, stride); + } + void vkCmdDrawIndexedIndirectCountAMD( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const + { + return ::vkCmdDrawIndexedIndirectCountAMD( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } + void vkCmdDrawIndexedIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const + { + return ::vkCmdDrawIndexedIndirectCountKHR( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } + void vkCmdDrawIndirect( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride ) const + { + return ::vkCmdDrawIndirect( commandBuffer, buffer, offset, drawCount, stride); + } + void vkCmdDrawIndirectCountAMD( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const + { + return ::vkCmdDrawIndirectCountAMD( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } + void vkCmdDrawIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const + { + return ::vkCmdDrawIndirectCountKHR( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } + void vkCmdDrawMeshTasksIndirectCountNV( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const + { + return ::vkCmdDrawMeshTasksIndirectCountNV( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } + void vkCmdDrawMeshTasksIndirectNV( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride ) const + { + return ::vkCmdDrawMeshTasksIndirectNV( commandBuffer, buffer, offset, drawCount, stride); + } + void vkCmdDrawMeshTasksNV( VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask ) const + { + return ::vkCmdDrawMeshTasksNV( commandBuffer, taskCount, firstTask); + } + void vkCmdEndConditionalRenderingEXT( VkCommandBuffer commandBuffer ) const + { + return ::vkCmdEndConditionalRenderingEXT( commandBuffer); + } + void vkCmdEndDebugUtilsLabelEXT( VkCommandBuffer commandBuffer ) const + { + return ::vkCmdEndDebugUtilsLabelEXT( commandBuffer); + } + void vkCmdEndQuery( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query ) const + { + return ::vkCmdEndQuery( commandBuffer, queryPool, query); + } + void vkCmdEndRenderPass( VkCommandBuffer commandBuffer ) const + { + return ::vkCmdEndRenderPass( commandBuffer); + } + void vkCmdEndRenderPass2KHR( VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo ) const + { + return ::vkCmdEndRenderPass2KHR( commandBuffer, pSubpassEndInfo); + } + void vkCmdExecuteCommands( VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers ) const + { + return ::vkCmdExecuteCommands( commandBuffer, commandBufferCount, pCommandBuffers); + } + void vkCmdFillBuffer( VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data ) const + { + return ::vkCmdFillBuffer( commandBuffer, dstBuffer, dstOffset, size, data); + } + void vkCmdInsertDebugUtilsLabelEXT( VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo ) const + { + return ::vkCmdInsertDebugUtilsLabelEXT( commandBuffer, pLabelInfo); + } + void vkCmdNextSubpass( VkCommandBuffer commandBuffer, VkSubpassContents contents ) const + { + return ::vkCmdNextSubpass( commandBuffer, contents); + } + void vkCmdNextSubpass2KHR( VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo, const VkSubpassEndInfoKHR* pSubpassEndInfo ) const + { + return ::vkCmdNextSubpass2KHR( commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); + } + void vkCmdPipelineBarrier( VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers ) const + { + return ::vkCmdPipelineBarrier( commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); + } + void vkCmdProcessCommandsNVX( VkCommandBuffer commandBuffer, const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo ) const + { + return ::vkCmdProcessCommandsNVX( commandBuffer, pProcessCommandsInfo); + } + void vkCmdPushConstants( VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues ) const + { + return ::vkCmdPushConstants( commandBuffer, layout, stageFlags, offset, size, pValues); + } + void vkCmdPushDescriptorSetKHR( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites ) const + { + return ::vkCmdPushDescriptorSetKHR( commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites); + } + void vkCmdPushDescriptorSetWithTemplateKHR( VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData ) const + { + return ::vkCmdPushDescriptorSetWithTemplateKHR( commandBuffer, descriptorUpdateTemplate, layout, set, pData); + } + void vkCmdReserveSpaceForCommandsNVX( VkCommandBuffer commandBuffer, const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo ) const + { + return ::vkCmdReserveSpaceForCommandsNVX( commandBuffer, pReserveSpaceInfo); + } + void vkCmdResetEvent( VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask ) const + { + return ::vkCmdResetEvent( commandBuffer, event, stageMask); + } + void vkCmdResetQueryPool( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount ) const + { + return ::vkCmdResetQueryPool( commandBuffer, queryPool, firstQuery, queryCount); + } + void vkCmdResolveImage( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions ) const + { + return ::vkCmdResolveImage( commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); + } + void vkCmdSetBlendConstants( VkCommandBuffer commandBuffer, const float blendConstants[4] ) const + { + return ::vkCmdSetBlendConstants( commandBuffer, blendConstants); + } + void vkCmdSetCheckpointNV( VkCommandBuffer commandBuffer, const void* pCheckpointMarker ) const + { + return ::vkCmdSetCheckpointNV( commandBuffer, pCheckpointMarker); + } + void vkCmdSetCoarseSampleOrderNV( VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders ) const + { + return ::vkCmdSetCoarseSampleOrderNV( commandBuffer, sampleOrderType, customSampleOrderCount, pCustomSampleOrders); + } + void vkCmdSetDepthBias( VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor ) const + { + return ::vkCmdSetDepthBias( commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor); + } + void vkCmdSetDepthBounds( VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds ) const + { + return ::vkCmdSetDepthBounds( commandBuffer, minDepthBounds, maxDepthBounds); + } + void vkCmdSetDeviceMask( VkCommandBuffer commandBuffer, uint32_t deviceMask ) const + { + return ::vkCmdSetDeviceMask( commandBuffer, deviceMask); + } + void vkCmdSetDeviceMaskKHR( VkCommandBuffer commandBuffer, uint32_t deviceMask ) const + { + return ::vkCmdSetDeviceMaskKHR( commandBuffer, deviceMask); + } + void vkCmdSetDiscardRectangleEXT( VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles ) const + { + return ::vkCmdSetDiscardRectangleEXT( commandBuffer, firstDiscardRectangle, discardRectangleCount, pDiscardRectangles); + } + void vkCmdSetEvent( VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask ) const + { + return ::vkCmdSetEvent( commandBuffer, event, stageMask); + } + void vkCmdSetExclusiveScissorNV( VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors ) const + { + return ::vkCmdSetExclusiveScissorNV( commandBuffer, firstExclusiveScissor, exclusiveScissorCount, pExclusiveScissors); + } + void vkCmdSetLineWidth( VkCommandBuffer commandBuffer, float lineWidth ) const + { + return ::vkCmdSetLineWidth( commandBuffer, lineWidth); + } + void vkCmdSetSampleLocationsEXT( VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo ) const + { + return ::vkCmdSetSampleLocationsEXT( commandBuffer, pSampleLocationsInfo); + } + void vkCmdSetScissor( VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors ) const + { + return ::vkCmdSetScissor( commandBuffer, firstScissor, scissorCount, pScissors); + } + void vkCmdSetStencilCompareMask( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask ) const + { + return ::vkCmdSetStencilCompareMask( commandBuffer, faceMask, compareMask); + } + void vkCmdSetStencilReference( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference ) const + { + return ::vkCmdSetStencilReference( commandBuffer, faceMask, reference); + } + void vkCmdSetStencilWriteMask( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask ) const + { + return ::vkCmdSetStencilWriteMask( commandBuffer, faceMask, writeMask); + } + void vkCmdSetViewport( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports ) const + { + return ::vkCmdSetViewport( commandBuffer, firstViewport, viewportCount, pViewports); + } + void vkCmdSetViewportShadingRatePaletteNV( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes ) const + { + return ::vkCmdSetViewportShadingRatePaletteNV( commandBuffer, firstViewport, viewportCount, pShadingRatePalettes); + } + void vkCmdSetViewportWScalingNV( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings ) const + { + return ::vkCmdSetViewportWScalingNV( commandBuffer, firstViewport, viewportCount, pViewportWScalings); + } + void vkCmdTraceRaysNVX( VkCommandBuffer cmdBuf, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, uint32_t width, uint32_t height ) const + { + return ::vkCmdTraceRaysNVX( cmdBuf, raygenShaderBindingTableBuffer, raygenShaderBindingOffset, missShaderBindingTableBuffer, missShaderBindingOffset, missShaderBindingStride, hitShaderBindingTableBuffer, hitShaderBindingOffset, hitShaderBindingStride, width, height); + } + void vkCmdUpdateBuffer( VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData ) const + { + return ::vkCmdUpdateBuffer( commandBuffer, dstBuffer, dstOffset, dataSize, pData); + } + void vkCmdWaitEvents( VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers ) const + { + return ::vkCmdWaitEvents( commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); + } + void vkCmdWriteAccelerationStructurePropertiesNVX( VkCommandBuffer cmdBuf, VkAccelerationStructureNVX accelerationStructure, VkQueryType queryType, VkQueryPool queryPool, uint32_t query ) const + { + return ::vkCmdWriteAccelerationStructurePropertiesNVX( cmdBuf, accelerationStructure, queryType, queryPool, query); + } + void vkCmdWriteBufferMarkerAMD( VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker ) const + { + return ::vkCmdWriteBufferMarkerAMD( commandBuffer, pipelineStage, dstBuffer, dstOffset, marker); + } + void vkCmdWriteTimestamp( VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query ) const + { + return ::vkCmdWriteTimestamp( commandBuffer, pipelineStage, queryPool, query); + } + VkResult vkCompileDeferredNVX( VkDevice device, VkPipeline pipeline, uint32_t shader ) const + { + return ::vkCompileDeferredNVX( device, pipeline, shader); + } + VkResult vkCreateAccelerationStructureNVX( VkDevice device, const VkAccelerationStructureCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNVX* pAccelerationStructure ) const + { + return ::vkCreateAccelerationStructureNVX( device, pCreateInfo, pAllocator, pAccelerationStructure); + } +#ifdef VK_USE_PLATFORM_ANDROID_KHR + VkResult vkCreateAndroidSurfaceKHR( VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateAndroidSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface); + } +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + VkResult vkCreateBuffer( VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer ) const + { + return ::vkCreateBuffer( device, pCreateInfo, pAllocator, pBuffer); + } + VkResult vkCreateBufferView( VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView ) const + { + return ::vkCreateBufferView( device, pCreateInfo, pAllocator, pView); + } + VkResult vkCreateCommandPool( VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool ) const + { + return ::vkCreateCommandPool( device, pCreateInfo, pAllocator, pCommandPool); + } + VkResult vkCreateComputePipelines( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const + { + return ::vkCreateComputePipelines( device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); + } + VkResult vkCreateDebugReportCallbackEXT( VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback ) const + { + return ::vkCreateDebugReportCallbackEXT( instance, pCreateInfo, pAllocator, pCallback); + } + VkResult vkCreateDebugUtilsMessengerEXT( VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger ) const + { + return ::vkCreateDebugUtilsMessengerEXT( instance, pCreateInfo, pAllocator, pMessenger); + } + VkResult vkCreateDescriptorPool( VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool ) const + { + return ::vkCreateDescriptorPool( device, pCreateInfo, pAllocator, pDescriptorPool); + } + VkResult vkCreateDescriptorSetLayout( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout ) const + { + return ::vkCreateDescriptorSetLayout( device, pCreateInfo, pAllocator, pSetLayout); + } + VkResult vkCreateDescriptorUpdateTemplate( VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate ) const + { + return ::vkCreateDescriptorUpdateTemplate( device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate); + } + VkResult vkCreateDescriptorUpdateTemplateKHR( VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate ) const + { + return ::vkCreateDescriptorUpdateTemplateKHR( device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate); + } + VkResult vkCreateDevice( VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice ) const + { + return ::vkCreateDevice( physicalDevice, pCreateInfo, pAllocator, pDevice); + } + VkResult vkCreateDisplayModeKHR( VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode ) const + { + return ::vkCreateDisplayModeKHR( physicalDevice, display, pCreateInfo, pAllocator, pMode); + } + VkResult vkCreateDisplayPlaneSurfaceKHR( VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateDisplayPlaneSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface); + } + VkResult vkCreateEvent( VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent ) const + { + return ::vkCreateEvent( device, pCreateInfo, pAllocator, pEvent); + } + VkResult vkCreateFence( VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence ) const + { + return ::vkCreateFence( device, pCreateInfo, pAllocator, pFence); + } + VkResult vkCreateFramebuffer( VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer ) const + { + return ::vkCreateFramebuffer( device, pCreateInfo, pAllocator, pFramebuffer); + } + VkResult vkCreateGraphicsPipelines( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const + { + return ::vkCreateGraphicsPipelines( device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); + } +#ifdef VK_USE_PLATFORM_IOS_MVK + VkResult vkCreateIOSSurfaceMVK( VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateIOSSurfaceMVK( instance, pCreateInfo, pAllocator, pSurface); + } +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + VkResult vkCreateImage( VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage ) const + { + return ::vkCreateImage( device, pCreateInfo, pAllocator, pImage); + } + VkResult vkCreateImageView( VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView ) const + { + return ::vkCreateImageView( device, pCreateInfo, pAllocator, pView); + } + VkResult vkCreateIndirectCommandsLayoutNVX( VkDevice device, const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout ) const + { + return ::vkCreateIndirectCommandsLayoutNVX( device, pCreateInfo, pAllocator, pIndirectCommandsLayout); + } + VkResult vkCreateInstance( const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance ) const + { + return ::vkCreateInstance( pCreateInfo, pAllocator, pInstance); + } +#ifdef VK_USE_PLATFORM_MACOS_MVK + VkResult vkCreateMacOSSurfaceMVK( VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateMacOSSurfaceMVK( instance, pCreateInfo, pAllocator, pSurface); + } +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ +#ifdef VK_USE_PLATFORM_MIR_KHR + VkResult vkCreateMirSurfaceKHR( VkInstance instance, const VkMirSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateMirSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface); + } +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + VkResult vkCreateObjectTableNVX( VkDevice device, const VkObjectTableCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkObjectTableNVX* pObjectTable ) const + { + return ::vkCreateObjectTableNVX( device, pCreateInfo, pAllocator, pObjectTable); + } + VkResult vkCreatePipelineCache( VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache ) const + { + return ::vkCreatePipelineCache( device, pCreateInfo, pAllocator, pPipelineCache); + } + VkResult vkCreatePipelineLayout( VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout ) const + { + return ::vkCreatePipelineLayout( device, pCreateInfo, pAllocator, pPipelineLayout); + } + VkResult vkCreateQueryPool( VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool ) const + { + return ::vkCreateQueryPool( device, pCreateInfo, pAllocator, pQueryPool); + } + VkResult vkCreateRaytracingPipelinesNVX( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRaytracingPipelineCreateInfoNVX* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const + { + return ::vkCreateRaytracingPipelinesNVX( device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); + } + VkResult vkCreateRenderPass( VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass ) const + { + return ::vkCreateRenderPass( device, pCreateInfo, pAllocator, pRenderPass); + } + VkResult vkCreateRenderPass2KHR( VkDevice device, const VkRenderPassCreateInfo2KHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass ) const + { + return ::vkCreateRenderPass2KHR( device, pCreateInfo, pAllocator, pRenderPass); + } + VkResult vkCreateSampler( VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler ) const + { + return ::vkCreateSampler( device, pCreateInfo, pAllocator, pSampler); + } + VkResult vkCreateSamplerYcbcrConversion( VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion ) const + { + return ::vkCreateSamplerYcbcrConversion( device, pCreateInfo, pAllocator, pYcbcrConversion); + } + VkResult vkCreateSamplerYcbcrConversionKHR( VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion ) const + { + return ::vkCreateSamplerYcbcrConversionKHR( device, pCreateInfo, pAllocator, pYcbcrConversion); + } + VkResult vkCreateSemaphore( VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore ) const + { + return ::vkCreateSemaphore( device, pCreateInfo, pAllocator, pSemaphore); + } + VkResult vkCreateShaderModule( VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule ) const + { + return ::vkCreateShaderModule( device, pCreateInfo, pAllocator, pShaderModule); + } + VkResult vkCreateSharedSwapchainsKHR( VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains ) const + { + return ::vkCreateSharedSwapchainsKHR( device, swapchainCount, pCreateInfos, pAllocator, pSwapchains); + } + VkResult vkCreateSwapchainKHR( VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain ) const + { + return ::vkCreateSwapchainKHR( device, pCreateInfo, pAllocator, pSwapchain); + } + VkResult vkCreateValidationCacheEXT( VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache ) const + { + return ::vkCreateValidationCacheEXT( device, pCreateInfo, pAllocator, pValidationCache); + } +#ifdef VK_USE_PLATFORM_VI_NN + VkResult vkCreateViSurfaceNN( VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateViSurfaceNN( instance, pCreateInfo, pAllocator, pSurface); + } +#endif /*VK_USE_PLATFORM_VI_NN*/ +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + VkResult vkCreateWaylandSurfaceKHR( VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateWaylandSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface); + } +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkCreateWin32SurfaceKHR( VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateWin32SurfaceKHR( instance, pCreateInfo, pAllocator, pSurface); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + VkResult vkCreateXcbSurfaceKHR( VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateXcbSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface); + } +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + VkResult vkCreateXlibSurfaceKHR( VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const + { + return ::vkCreateXlibSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface); + } +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + VkResult vkDebugMarkerSetObjectNameEXT( VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo ) const + { + return ::vkDebugMarkerSetObjectNameEXT( device, pNameInfo); + } + VkResult vkDebugMarkerSetObjectTagEXT( VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo ) const + { + return ::vkDebugMarkerSetObjectTagEXT( device, pTagInfo); + } + void vkDebugReportMessageEXT( VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage ) const + { + return ::vkDebugReportMessageEXT( instance, flags, objectType, object, location, messageCode, pLayerPrefix, pMessage); + } + void vkDestroyAccelerationStructureNVX( VkDevice device, VkAccelerationStructureNVX accelerationStructure, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyAccelerationStructureNVX( device, accelerationStructure, pAllocator); + } + void vkDestroyBuffer( VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyBuffer( device, buffer, pAllocator); + } + void vkDestroyBufferView( VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyBufferView( device, bufferView, pAllocator); + } + void vkDestroyCommandPool( VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyCommandPool( device, commandPool, pAllocator); + } + void vkDestroyDebugReportCallbackEXT( VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyDebugReportCallbackEXT( instance, callback, pAllocator); + } + void vkDestroyDebugUtilsMessengerEXT( VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyDebugUtilsMessengerEXT( instance, messenger, pAllocator); + } + void vkDestroyDescriptorPool( VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyDescriptorPool( device, descriptorPool, pAllocator); + } + void vkDestroyDescriptorSetLayout( VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyDescriptorSetLayout( device, descriptorSetLayout, pAllocator); + } + void vkDestroyDescriptorUpdateTemplate( VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyDescriptorUpdateTemplate( device, descriptorUpdateTemplate, pAllocator); + } + void vkDestroyDescriptorUpdateTemplateKHR( VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyDescriptorUpdateTemplateKHR( device, descriptorUpdateTemplate, pAllocator); + } + void vkDestroyDevice( VkDevice device, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyDevice( device, pAllocator); + } + void vkDestroyEvent( VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyEvent( device, event, pAllocator); + } + void vkDestroyFence( VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyFence( device, fence, pAllocator); + } + void vkDestroyFramebuffer( VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyFramebuffer( device, framebuffer, pAllocator); + } + void vkDestroyImage( VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyImage( device, image, pAllocator); + } + void vkDestroyImageView( VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyImageView( device, imageView, pAllocator); + } + void vkDestroyIndirectCommandsLayoutNVX( VkDevice device, VkIndirectCommandsLayoutNVX indirectCommandsLayout, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyIndirectCommandsLayoutNVX( device, indirectCommandsLayout, pAllocator); + } + void vkDestroyInstance( VkInstance instance, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyInstance( instance, pAllocator); + } + void vkDestroyObjectTableNVX( VkDevice device, VkObjectTableNVX objectTable, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyObjectTableNVX( device, objectTable, pAllocator); + } + void vkDestroyPipeline( VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyPipeline( device, pipeline, pAllocator); + } + void vkDestroyPipelineCache( VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyPipelineCache( device, pipelineCache, pAllocator); + } + void vkDestroyPipelineLayout( VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyPipelineLayout( device, pipelineLayout, pAllocator); + } + void vkDestroyQueryPool( VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyQueryPool( device, queryPool, pAllocator); + } + void vkDestroyRenderPass( VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyRenderPass( device, renderPass, pAllocator); + } + void vkDestroySampler( VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroySampler( device, sampler, pAllocator); + } + void vkDestroySamplerYcbcrConversion( VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroySamplerYcbcrConversion( device, ycbcrConversion, pAllocator); + } + void vkDestroySamplerYcbcrConversionKHR( VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroySamplerYcbcrConversionKHR( device, ycbcrConversion, pAllocator); + } + void vkDestroySemaphore( VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroySemaphore( device, semaphore, pAllocator); + } + void vkDestroyShaderModule( VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyShaderModule( device, shaderModule, pAllocator); + } + void vkDestroySurfaceKHR( VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroySurfaceKHR( instance, surface, pAllocator); + } + void vkDestroySwapchainKHR( VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroySwapchainKHR( device, swapchain, pAllocator); + } + void vkDestroyValidationCacheEXT( VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkDestroyValidationCacheEXT( device, validationCache, pAllocator); + } + VkResult vkDeviceWaitIdle( VkDevice device ) const + { + return ::vkDeviceWaitIdle( device); + } + VkResult vkDisplayPowerControlEXT( VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo ) const + { + return ::vkDisplayPowerControlEXT( device, display, pDisplayPowerInfo); + } + VkResult vkEndCommandBuffer( VkCommandBuffer commandBuffer ) const + { + return ::vkEndCommandBuffer( commandBuffer); + } + VkResult vkEnumerateDeviceExtensionProperties( VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties ) const + { + return ::vkEnumerateDeviceExtensionProperties( physicalDevice, pLayerName, pPropertyCount, pProperties); + } + VkResult vkEnumerateDeviceLayerProperties( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties ) const + { + return ::vkEnumerateDeviceLayerProperties( physicalDevice, pPropertyCount, pProperties); + } + VkResult vkEnumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties ) const + { + return ::vkEnumerateInstanceExtensionProperties( pLayerName, pPropertyCount, pProperties); + } + VkResult vkEnumerateInstanceLayerProperties( uint32_t* pPropertyCount, VkLayerProperties* pProperties ) const + { + return ::vkEnumerateInstanceLayerProperties( pPropertyCount, pProperties); + } + VkResult vkEnumerateInstanceVersion( uint32_t* pApiVersion ) const + { + return ::vkEnumerateInstanceVersion( pApiVersion); + } + VkResult vkEnumeratePhysicalDeviceGroups( VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties ) const + { + return ::vkEnumeratePhysicalDeviceGroups( instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties); + } + VkResult vkEnumeratePhysicalDeviceGroupsKHR( VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties ) const + { + return ::vkEnumeratePhysicalDeviceGroupsKHR( instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties); + } + VkResult vkEnumeratePhysicalDevices( VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices ) const + { + return ::vkEnumeratePhysicalDevices( instance, pPhysicalDeviceCount, pPhysicalDevices); + } + VkResult vkFlushMappedMemoryRanges( VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges ) const + { + return ::vkFlushMappedMemoryRanges( device, memoryRangeCount, pMemoryRanges); + } + void vkFreeCommandBuffers( VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers ) const + { + return ::vkFreeCommandBuffers( device, commandPool, commandBufferCount, pCommandBuffers); + } + VkResult vkFreeDescriptorSets( VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets ) const + { + return ::vkFreeDescriptorSets( device, descriptorPool, descriptorSetCount, pDescriptorSets); + } + void vkFreeMemory( VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator ) const + { + return ::vkFreeMemory( device, memory, pAllocator); + } + VkResult vkGetAccelerationStructureHandleNVX( VkDevice device, VkAccelerationStructureNVX accelerationStructure, size_t dataSize, void* pData ) const + { + return ::vkGetAccelerationStructureHandleNVX( device, accelerationStructure, dataSize, pData); + } + void vkGetAccelerationStructureMemoryRequirementsNVX( VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements ) const + { + return ::vkGetAccelerationStructureMemoryRequirementsNVX( device, pInfo, pMemoryRequirements); + } + void vkGetAccelerationStructureScratchMemoryRequirementsNVX( VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements ) const + { + return ::vkGetAccelerationStructureScratchMemoryRequirementsNVX( device, pInfo, pMemoryRequirements); + } +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + VkResult vkGetAndroidHardwareBufferPropertiesANDROID( VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties ) const + { + return ::vkGetAndroidHardwareBufferPropertiesANDROID( device, buffer, pProperties); + } +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + void vkGetBufferMemoryRequirements( VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements ) const + { + return ::vkGetBufferMemoryRequirements( device, buffer, pMemoryRequirements); + } + void vkGetBufferMemoryRequirements2( VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const + { + return ::vkGetBufferMemoryRequirements2( device, pInfo, pMemoryRequirements); + } + void vkGetBufferMemoryRequirements2KHR( VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const + { + return ::vkGetBufferMemoryRequirements2KHR( device, pInfo, pMemoryRequirements); + } + void vkGetDescriptorSetLayoutSupport( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport ) const + { + return ::vkGetDescriptorSetLayoutSupport( device, pCreateInfo, pSupport); + } + void vkGetDescriptorSetLayoutSupportKHR( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport ) const + { + return ::vkGetDescriptorSetLayoutSupportKHR( device, pCreateInfo, pSupport); + } + void vkGetDeviceGroupPeerMemoryFeatures( VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures ) const + { + return ::vkGetDeviceGroupPeerMemoryFeatures( device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures); + } + void vkGetDeviceGroupPeerMemoryFeaturesKHR( VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures ) const + { + return ::vkGetDeviceGroupPeerMemoryFeaturesKHR( device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures); + } + VkResult vkGetDeviceGroupPresentCapabilitiesKHR( VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities ) const + { + return ::vkGetDeviceGroupPresentCapabilitiesKHR( device, pDeviceGroupPresentCapabilities); + } + VkResult vkGetDeviceGroupSurfacePresentModesKHR( VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes ) const + { + return ::vkGetDeviceGroupSurfacePresentModesKHR( device, surface, pModes); + } + void vkGetDeviceMemoryCommitment( VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes ) const + { + return ::vkGetDeviceMemoryCommitment( device, memory, pCommittedMemoryInBytes); + } + PFN_vkVoidFunction vkGetDeviceProcAddr( VkDevice device, const char* pName ) const + { + return ::vkGetDeviceProcAddr( device, pName); + } + void vkGetDeviceQueue( VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue ) const + { + return ::vkGetDeviceQueue( device, queueFamilyIndex, queueIndex, pQueue); + } + void vkGetDeviceQueue2( VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue ) const + { + return ::vkGetDeviceQueue2( device, pQueueInfo, pQueue); + } + VkResult vkGetDisplayModeProperties2KHR( VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties ) const + { + return ::vkGetDisplayModeProperties2KHR( physicalDevice, display, pPropertyCount, pProperties); + } + VkResult vkGetDisplayModePropertiesKHR( VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties ) const + { + return ::vkGetDisplayModePropertiesKHR( physicalDevice, display, pPropertyCount, pProperties); + } + VkResult vkGetDisplayPlaneCapabilities2KHR( VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities ) const + { + return ::vkGetDisplayPlaneCapabilities2KHR( physicalDevice, pDisplayPlaneInfo, pCapabilities); + } + VkResult vkGetDisplayPlaneCapabilitiesKHR( VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities ) const + { + return ::vkGetDisplayPlaneCapabilitiesKHR( physicalDevice, mode, planeIndex, pCapabilities); + } + VkResult vkGetDisplayPlaneSupportedDisplaysKHR( VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays ) const + { + return ::vkGetDisplayPlaneSupportedDisplaysKHR( physicalDevice, planeIndex, pDisplayCount, pDisplays); + } + VkResult vkGetEventStatus( VkDevice device, VkEvent event ) const + { + return ::vkGetEventStatus( device, event); + } + VkResult vkGetFenceFdKHR( VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd ) const + { + return ::vkGetFenceFdKHR( device, pGetFdInfo, pFd); + } + VkResult vkGetFenceStatus( VkDevice device, VkFence fence ) const + { + return ::vkGetFenceStatus( device, fence); + } +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetFenceWin32HandleKHR( VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle ) const + { + return ::vkGetFenceWin32HandleKHR( device, pGetWin32HandleInfo, pHandle); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + void vkGetImageMemoryRequirements( VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements ) const + { + return ::vkGetImageMemoryRequirements( device, image, pMemoryRequirements); + } + void vkGetImageMemoryRequirements2( VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const + { + return ::vkGetImageMemoryRequirements2( device, pInfo, pMemoryRequirements); + } + void vkGetImageMemoryRequirements2KHR( VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const + { + return ::vkGetImageMemoryRequirements2KHR( device, pInfo, pMemoryRequirements); + } + void vkGetImageSparseMemoryRequirements( VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements ) const + { + return ::vkGetImageSparseMemoryRequirements( device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements); + } + void vkGetImageSparseMemoryRequirements2( VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements ) const + { + return ::vkGetImageSparseMemoryRequirements2( device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements); + } + void vkGetImageSparseMemoryRequirements2KHR( VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements ) const + { + return ::vkGetImageSparseMemoryRequirements2KHR( device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements); + } + void vkGetImageSubresourceLayout( VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout ) const + { + return ::vkGetImageSubresourceLayout( device, image, pSubresource, pLayout); + } + PFN_vkVoidFunction vkGetInstanceProcAddr( VkInstance instance, const char* pName ) const + { + return ::vkGetInstanceProcAddr( instance, pName); + } +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + VkResult vkGetMemoryAndroidHardwareBufferANDROID( VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer ) const + { + return ::vkGetMemoryAndroidHardwareBufferANDROID( device, pInfo, pBuffer); + } +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + VkResult vkGetMemoryFdKHR( VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd ) const + { + return ::vkGetMemoryFdKHR( device, pGetFdInfo, pFd); + } + VkResult vkGetMemoryFdPropertiesKHR( VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties ) const + { + return ::vkGetMemoryFdPropertiesKHR( device, handleType, fd, pMemoryFdProperties); + } + VkResult vkGetMemoryHostPointerPropertiesEXT( VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties ) const + { + return ::vkGetMemoryHostPointerPropertiesEXT( device, handleType, pHostPointer, pMemoryHostPointerProperties); + } +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetMemoryWin32HandleKHR( VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle ) const + { + return ::vkGetMemoryWin32HandleKHR( device, pGetWin32HandleInfo, pHandle); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_NV + VkResult vkGetMemoryWin32HandleNV( VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle ) const + { + return ::vkGetMemoryWin32HandleNV( device, memory, handleType, pHandle); + } +#endif /*VK_USE_PLATFORM_WIN32_NV*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetMemoryWin32HandlePropertiesKHR( VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties ) const + { + return ::vkGetMemoryWin32HandlePropertiesKHR( device, handleType, handle, pMemoryWin32HandleProperties); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + VkResult vkGetPastPresentationTimingGOOGLE( VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings ) const + { + return ::vkGetPastPresentationTimingGOOGLE( device, swapchain, pPresentationTimingCount, pPresentationTimings); + } + VkResult vkGetPhysicalDeviceDisplayPlaneProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties ) const + { + return ::vkGetPhysicalDeviceDisplayPlaneProperties2KHR( physicalDevice, pPropertyCount, pProperties); + } + VkResult vkGetPhysicalDeviceDisplayPlanePropertiesKHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties ) const + { + return ::vkGetPhysicalDeviceDisplayPlanePropertiesKHR( physicalDevice, pPropertyCount, pProperties); + } + VkResult vkGetPhysicalDeviceDisplayProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties ) const + { + return ::vkGetPhysicalDeviceDisplayProperties2KHR( physicalDevice, pPropertyCount, pProperties); + } + VkResult vkGetPhysicalDeviceDisplayPropertiesKHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties ) const + { + return ::vkGetPhysicalDeviceDisplayPropertiesKHR( physicalDevice, pPropertyCount, pProperties); + } + void vkGetPhysicalDeviceExternalBufferProperties( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties ) const + { + return ::vkGetPhysicalDeviceExternalBufferProperties( physicalDevice, pExternalBufferInfo, pExternalBufferProperties); + } + void vkGetPhysicalDeviceExternalBufferPropertiesKHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties ) const + { + return ::vkGetPhysicalDeviceExternalBufferPropertiesKHR( physicalDevice, pExternalBufferInfo, pExternalBufferProperties); + } + void vkGetPhysicalDeviceExternalFenceProperties( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties ) const + { + return ::vkGetPhysicalDeviceExternalFenceProperties( physicalDevice, pExternalFenceInfo, pExternalFenceProperties); + } + void vkGetPhysicalDeviceExternalFencePropertiesKHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties ) const + { + return ::vkGetPhysicalDeviceExternalFencePropertiesKHR( physicalDevice, pExternalFenceInfo, pExternalFenceProperties); + } + VkResult vkGetPhysicalDeviceExternalImageFormatPropertiesNV( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties ) const + { + return ::vkGetPhysicalDeviceExternalImageFormatPropertiesNV( physicalDevice, format, type, tiling, usage, flags, externalHandleType, pExternalImageFormatProperties); + } + void vkGetPhysicalDeviceExternalSemaphoreProperties( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties ) const + { + return ::vkGetPhysicalDeviceExternalSemaphoreProperties( physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties); + } + void vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties ) const + { + return ::vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties); + } + void vkGetPhysicalDeviceFeatures( VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures ) const + { + return ::vkGetPhysicalDeviceFeatures( physicalDevice, pFeatures); + } + void vkGetPhysicalDeviceFeatures2( VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures ) const + { + return ::vkGetPhysicalDeviceFeatures2( physicalDevice, pFeatures); + } + void vkGetPhysicalDeviceFeatures2KHR( VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures ) const + { + return ::vkGetPhysicalDeviceFeatures2KHR( physicalDevice, pFeatures); + } + void vkGetPhysicalDeviceFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties ) const + { + return ::vkGetPhysicalDeviceFormatProperties( physicalDevice, format, pFormatProperties); + } + void vkGetPhysicalDeviceFormatProperties2( VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties ) const + { + return ::vkGetPhysicalDeviceFormatProperties2( physicalDevice, format, pFormatProperties); + } + void vkGetPhysicalDeviceFormatProperties2KHR( VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties ) const + { + return ::vkGetPhysicalDeviceFormatProperties2KHR( physicalDevice, format, pFormatProperties); + } + void vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( VkPhysicalDevice physicalDevice, VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, VkDeviceGeneratedCommandsLimitsNVX* pLimits ) const + { + return ::vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( physicalDevice, pFeatures, pLimits); + } + VkResult vkGetPhysicalDeviceImageFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties ) const + { + return ::vkGetPhysicalDeviceImageFormatProperties( physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties); + } + VkResult vkGetPhysicalDeviceImageFormatProperties2( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties ) const + { + return ::vkGetPhysicalDeviceImageFormatProperties2( physicalDevice, pImageFormatInfo, pImageFormatProperties); + } + VkResult vkGetPhysicalDeviceImageFormatProperties2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties ) const + { + return ::vkGetPhysicalDeviceImageFormatProperties2KHR( physicalDevice, pImageFormatInfo, pImageFormatProperties); + } + void vkGetPhysicalDeviceMemoryProperties( VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties ) const + { + return ::vkGetPhysicalDeviceMemoryProperties( physicalDevice, pMemoryProperties); + } + void vkGetPhysicalDeviceMemoryProperties2( VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties ) const + { + return ::vkGetPhysicalDeviceMemoryProperties2( physicalDevice, pMemoryProperties); + } + void vkGetPhysicalDeviceMemoryProperties2KHR( VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties ) const + { + return ::vkGetPhysicalDeviceMemoryProperties2KHR( physicalDevice, pMemoryProperties); + } +#ifdef VK_USE_PLATFORM_MIR_KHR + VkBool32 vkGetPhysicalDeviceMirPresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, MirConnection* connection ) const + { + return ::vkGetPhysicalDeviceMirPresentationSupportKHR( physicalDevice, queueFamilyIndex, connection); + } +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + void vkGetPhysicalDeviceMultisamplePropertiesEXT( VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties ) const + { + return ::vkGetPhysicalDeviceMultisamplePropertiesEXT( physicalDevice, samples, pMultisampleProperties); + } + VkResult vkGetPhysicalDevicePresentRectanglesKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects ) const + { + return ::vkGetPhysicalDevicePresentRectanglesKHR( physicalDevice, surface, pRectCount, pRects); + } + void vkGetPhysicalDeviceProperties( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties ) const + { + return ::vkGetPhysicalDeviceProperties( physicalDevice, pProperties); + } + void vkGetPhysicalDeviceProperties2( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties ) const + { + return ::vkGetPhysicalDeviceProperties2( physicalDevice, pProperties); + } + void vkGetPhysicalDeviceProperties2KHR( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties ) const + { + return ::vkGetPhysicalDeviceProperties2KHR( physicalDevice, pProperties); + } + void vkGetPhysicalDeviceQueueFamilyProperties( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties ) const + { + return ::vkGetPhysicalDeviceQueueFamilyProperties( physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties); + } + void vkGetPhysicalDeviceQueueFamilyProperties2( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties ) const + { + return ::vkGetPhysicalDeviceQueueFamilyProperties2( physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties); + } + void vkGetPhysicalDeviceQueueFamilyProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties ) const + { + return ::vkGetPhysicalDeviceQueueFamilyProperties2KHR( physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties); + } + void vkGetPhysicalDeviceSparseImageFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties ) const + { + return ::vkGetPhysicalDeviceSparseImageFormatProperties( physicalDevice, format, type, samples, usage, tiling, pPropertyCount, pProperties); + } + void vkGetPhysicalDeviceSparseImageFormatProperties2( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties ) const + { + return ::vkGetPhysicalDeviceSparseImageFormatProperties2( physicalDevice, pFormatInfo, pPropertyCount, pProperties); + } + void vkGetPhysicalDeviceSparseImageFormatProperties2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties ) const + { + return ::vkGetPhysicalDeviceSparseImageFormatProperties2KHR( physicalDevice, pFormatInfo, pPropertyCount, pProperties); + } + VkResult vkGetPhysicalDeviceSurfaceCapabilities2EXT( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities ) const + { + return ::vkGetPhysicalDeviceSurfaceCapabilities2EXT( physicalDevice, surface, pSurfaceCapabilities); + } + VkResult vkGetPhysicalDeviceSurfaceCapabilities2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities ) const + { + return ::vkGetPhysicalDeviceSurfaceCapabilities2KHR( physicalDevice, pSurfaceInfo, pSurfaceCapabilities); + } + VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities ) const + { + return ::vkGetPhysicalDeviceSurfaceCapabilitiesKHR( physicalDevice, surface, pSurfaceCapabilities); + } + VkResult vkGetPhysicalDeviceSurfaceFormats2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats ) const + { + return ::vkGetPhysicalDeviceSurfaceFormats2KHR( physicalDevice, pSurfaceInfo, pSurfaceFormatCount, pSurfaceFormats); + } + VkResult vkGetPhysicalDeviceSurfaceFormatsKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats ) const + { + return ::vkGetPhysicalDeviceSurfaceFormatsKHR( physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats); + } + VkResult vkGetPhysicalDeviceSurfacePresentModesKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes ) const + { + return ::vkGetPhysicalDeviceSurfacePresentModesKHR( physicalDevice, surface, pPresentModeCount, pPresentModes); + } + VkResult vkGetPhysicalDeviceSurfaceSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported ) const + { + return ::vkGetPhysicalDeviceSurfaceSupportKHR( physicalDevice, queueFamilyIndex, surface, pSupported); + } +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + VkBool32 vkGetPhysicalDeviceWaylandPresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display ) const + { + return ::vkGetPhysicalDeviceWaylandPresentationSupportKHR( physicalDevice, queueFamilyIndex, display); + } +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkBool32 vkGetPhysicalDeviceWin32PresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex ) const + { + return ::vkGetPhysicalDeviceWin32PresentationSupportKHR( physicalDevice, queueFamilyIndex); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + VkBool32 vkGetPhysicalDeviceXcbPresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id ) const + { + return ::vkGetPhysicalDeviceXcbPresentationSupportKHR( physicalDevice, queueFamilyIndex, connection, visual_id); + } +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + VkBool32 vkGetPhysicalDeviceXlibPresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID ) const + { + return ::vkGetPhysicalDeviceXlibPresentationSupportKHR( physicalDevice, queueFamilyIndex, dpy, visualID); + } +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + VkResult vkGetPipelineCacheData( VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData ) const + { + return ::vkGetPipelineCacheData( device, pipelineCache, pDataSize, pData); + } + VkResult vkGetQueryPoolResults( VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags ) const + { + return ::vkGetQueryPoolResults( device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags); + } + void vkGetQueueCheckpointDataNV( VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData ) const + { + return ::vkGetQueueCheckpointDataNV( queue, pCheckpointDataCount, pCheckpointData); + } +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + VkResult vkGetRandROutputDisplayEXT( VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay ) const + { + return ::vkGetRandROutputDisplayEXT( physicalDevice, dpy, rrOutput, pDisplay); + } +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + VkResult vkGetRaytracingShaderHandlesNVX( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData ) const + { + return ::vkGetRaytracingShaderHandlesNVX( device, pipeline, firstGroup, groupCount, dataSize, pData); + } + VkResult vkGetRefreshCycleDurationGOOGLE( VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties ) const + { + return ::vkGetRefreshCycleDurationGOOGLE( device, swapchain, pDisplayTimingProperties); + } + void vkGetRenderAreaGranularity( VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity ) const + { + return ::vkGetRenderAreaGranularity( device, renderPass, pGranularity); + } + VkResult vkGetSemaphoreFdKHR( VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd ) const + { + return ::vkGetSemaphoreFdKHR( device, pGetFdInfo, pFd); + } +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetSemaphoreWin32HandleKHR( VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle ) const + { + return ::vkGetSemaphoreWin32HandleKHR( device, pGetWin32HandleInfo, pHandle); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + VkResult vkGetShaderInfoAMD( VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo ) const + { + return ::vkGetShaderInfoAMD( device, pipeline, shaderStage, infoType, pInfoSize, pInfo); + } + VkResult vkGetSwapchainCounterEXT( VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue ) const + { + return ::vkGetSwapchainCounterEXT( device, swapchain, counter, pCounterValue); + } + VkResult vkGetSwapchainImagesKHR( VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages ) const + { + return ::vkGetSwapchainImagesKHR( device, swapchain, pSwapchainImageCount, pSwapchainImages); + } + VkResult vkGetSwapchainStatusKHR( VkDevice device, VkSwapchainKHR swapchain ) const + { + return ::vkGetSwapchainStatusKHR( device, swapchain); + } + VkResult vkGetValidationCacheDataEXT( VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData ) const + { + return ::vkGetValidationCacheDataEXT( device, validationCache, pDataSize, pData); + } + VkResult vkImportFenceFdKHR( VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo ) const + { + return ::vkImportFenceFdKHR( device, pImportFenceFdInfo); + } +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkImportFenceWin32HandleKHR( VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo ) const + { + return ::vkImportFenceWin32HandleKHR( device, pImportFenceWin32HandleInfo); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + VkResult vkImportSemaphoreFdKHR( VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo ) const + { + return ::vkImportSemaphoreFdKHR( device, pImportSemaphoreFdInfo); + } +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo ) const + { + return ::vkImportSemaphoreWin32HandleKHR( device, pImportSemaphoreWin32HandleInfo); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + VkResult vkInvalidateMappedMemoryRanges( VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges ) const + { + return ::vkInvalidateMappedMemoryRanges( device, memoryRangeCount, pMemoryRanges); + } + VkResult vkMapMemory( VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData ) const + { + return ::vkMapMemory( device, memory, offset, size, flags, ppData); + } + VkResult vkMergePipelineCaches( VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches ) const + { + return ::vkMergePipelineCaches( device, dstCache, srcCacheCount, pSrcCaches); + } + VkResult vkMergeValidationCachesEXT( VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches ) const + { + return ::vkMergeValidationCachesEXT( device, dstCache, srcCacheCount, pSrcCaches); + } + void vkQueueBeginDebugUtilsLabelEXT( VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo ) const + { + return ::vkQueueBeginDebugUtilsLabelEXT( queue, pLabelInfo); + } + VkResult vkQueueBindSparse( VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence ) const + { + return ::vkQueueBindSparse( queue, bindInfoCount, pBindInfo, fence); + } + void vkQueueEndDebugUtilsLabelEXT( VkQueue queue ) const + { + return ::vkQueueEndDebugUtilsLabelEXT( queue); + } + void vkQueueInsertDebugUtilsLabelEXT( VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo ) const + { + return ::vkQueueInsertDebugUtilsLabelEXT( queue, pLabelInfo); + } + VkResult vkQueuePresentKHR( VkQueue queue, const VkPresentInfoKHR* pPresentInfo ) const + { + return ::vkQueuePresentKHR( queue, pPresentInfo); + } + VkResult vkQueueSubmit( VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence ) const + { + return ::vkQueueSubmit( queue, submitCount, pSubmits, fence); + } + VkResult vkQueueWaitIdle( VkQueue queue ) const + { + return ::vkQueueWaitIdle( queue); + } + VkResult vkRegisterDeviceEventEXT( VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence ) const + { + return ::vkRegisterDeviceEventEXT( device, pDeviceEventInfo, pAllocator, pFence); + } + VkResult vkRegisterDisplayEventEXT( VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence ) const + { + return ::vkRegisterDisplayEventEXT( device, display, pDisplayEventInfo, pAllocator, pFence); + } + VkResult vkRegisterObjectsNVX( VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices ) const + { + return ::vkRegisterObjectsNVX( device, objectTable, objectCount, ppObjectTableEntries, pObjectIndices); + } + VkResult vkReleaseDisplayEXT( VkPhysicalDevice physicalDevice, VkDisplayKHR display ) const + { + return ::vkReleaseDisplayEXT( physicalDevice, display); + } + VkResult vkResetCommandBuffer( VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags ) const + { + return ::vkResetCommandBuffer( commandBuffer, flags); + } + VkResult vkResetCommandPool( VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags ) const + { + return ::vkResetCommandPool( device, commandPool, flags); + } + VkResult vkResetDescriptorPool( VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags ) const + { + return ::vkResetDescriptorPool( device, descriptorPool, flags); + } + VkResult vkResetEvent( VkDevice device, VkEvent event ) const + { + return ::vkResetEvent( device, event); + } + VkResult vkResetFences( VkDevice device, uint32_t fenceCount, const VkFence* pFences ) const + { + return ::vkResetFences( device, fenceCount, pFences); + } + VkResult vkSetDebugUtilsObjectNameEXT( VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo ) const + { + return ::vkSetDebugUtilsObjectNameEXT( device, pNameInfo); + } + VkResult vkSetDebugUtilsObjectTagEXT( VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo ) const + { + return ::vkSetDebugUtilsObjectTagEXT( device, pTagInfo); + } + VkResult vkSetEvent( VkDevice device, VkEvent event ) const + { + return ::vkSetEvent( device, event); + } + void vkSetHdrMetadataEXT( VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata ) const + { + return ::vkSetHdrMetadataEXT( device, swapchainCount, pSwapchains, pMetadata); + } + void vkSubmitDebugUtilsMessageEXT( VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData ) const + { + return ::vkSubmitDebugUtilsMessageEXT( instance, messageSeverity, messageTypes, pCallbackData); + } + void vkTrimCommandPool( VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags ) const + { + return ::vkTrimCommandPool( device, commandPool, flags); + } + void vkTrimCommandPoolKHR( VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags ) const + { + return ::vkTrimCommandPoolKHR( device, commandPool, flags); + } + void vkUnmapMemory( VkDevice device, VkDeviceMemory memory ) const + { + return ::vkUnmapMemory( device, memory); + } + VkResult vkUnregisterObjectsNVX( VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices ) const + { + return ::vkUnregisterObjectsNVX( device, objectTable, objectCount, pObjectEntryTypes, pObjectIndices); + } + void vkUpdateDescriptorSetWithTemplate( VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData ) const + { + return ::vkUpdateDescriptorSetWithTemplate( device, descriptorSet, descriptorUpdateTemplate, pData); + } + void vkUpdateDescriptorSetWithTemplateKHR( VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData ) const + { + return ::vkUpdateDescriptorSetWithTemplateKHR( device, descriptorSet, descriptorUpdateTemplate, pData); + } + void vkUpdateDescriptorSets( VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies ) const + { + return ::vkUpdateDescriptorSets( device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); + } + VkResult vkWaitForFences( VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout ) const + { + return ::vkWaitForFences( device, fenceCount, pFences, waitAll, timeout); + } +}; + + struct AllocationCallbacks; + + template + class ObjectDestroy + { + public: + ObjectDestroy( OwnerType owner = OwnerType(), Optional allocator = nullptr, Dispatch const &dispatch = Dispatch() ) + : m_owner( owner ) + , m_allocator( allocator ) + , m_dispatch( &dispatch ) + {} + + OwnerType getOwner() const { return m_owner; } + Optional getAllocator() const { return m_allocator; } + + protected: + template + void destroy(T t) + { + m_owner.destroy( t, m_allocator, *m_dispatch ); + } + + private: + OwnerType m_owner; + Optional m_allocator; + Dispatch const* m_dispatch; + }; + + class NoParent; + + template + class ObjectDestroy + { + public: + ObjectDestroy( Optional allocator = nullptr, Dispatch const &dispatch = Dispatch() ) + : m_allocator( allocator ) + , m_dispatch( &dispatch ) + {} + + Optional getAllocator() const { return m_allocator; } + + protected: + template + void destroy(T t) + { + t.destroy( m_allocator, *m_dispatch ); + } + + private: + Optional m_allocator; + Dispatch const* m_dispatch; + }; + + template + class ObjectFree + { + public: + ObjectFree( OwnerType owner = OwnerType(), Optional allocator = nullptr, Dispatch const &dispatch = Dispatch() ) + : m_owner( owner ) + , m_allocator( allocator ) + , m_dispatch( &dispatch ) + {} + + OwnerType getOwner() const { return m_owner; } + Optional getAllocator() const { return m_allocator; } + + protected: + template + void destroy(T t) + { + m_owner.free( t, m_allocator, *m_dispatch ); + } + + private: + OwnerType m_owner; + Optional m_allocator; + Dispatch const* m_dispatch; + }; + + template + class PoolFree + { + public: + PoolFree( OwnerType owner = OwnerType(), PoolType pool = PoolType(), Dispatch const &dispatch = Dispatch() ) + : m_owner( owner ) + , m_pool( pool ) + , m_dispatch( &dispatch ) + {} + + OwnerType getOwner() const { return m_owner; } + PoolType getPool() const { return m_pool; } + + protected: + template + void destroy(T t) + { + m_owner.free( m_pool, t, *m_dispatch ); + } + + private: + OwnerType m_owner; + PoolType m_pool; + Dispatch const* m_dispatch; + }; + + using SampleMask = uint32_t; + + using Bool32 = uint32_t; + + using DeviceSize = uint64_t; + + enum class FramebufferCreateFlagBits + { + }; + + using FramebufferCreateFlags = Flags; + + enum class QueryPoolCreateFlagBits + { + }; + + using QueryPoolCreateFlags = Flags; + + enum class RenderPassCreateFlagBits + { + }; + + using RenderPassCreateFlags = Flags; + + enum class SamplerCreateFlagBits + { + }; + + using SamplerCreateFlags = Flags; + + enum class PipelineLayoutCreateFlagBits + { + }; + + using PipelineLayoutCreateFlags = Flags; + + enum class PipelineCacheCreateFlagBits + { + }; + + using PipelineCacheCreateFlags = Flags; + + enum class PipelineDepthStencilStateCreateFlagBits + { + }; + + using PipelineDepthStencilStateCreateFlags = Flags; + + enum class PipelineDynamicStateCreateFlagBits + { + }; + + using PipelineDynamicStateCreateFlags = Flags; + + enum class PipelineColorBlendStateCreateFlagBits + { + }; + + using PipelineColorBlendStateCreateFlags = Flags; + + enum class PipelineMultisampleStateCreateFlagBits + { + }; + + using PipelineMultisampleStateCreateFlags = Flags; + + enum class PipelineRasterizationStateCreateFlagBits + { + }; + + using PipelineRasterizationStateCreateFlags = Flags; + + enum class PipelineViewportStateCreateFlagBits + { + }; + + using PipelineViewportStateCreateFlags = Flags; + + enum class PipelineTessellationStateCreateFlagBits + { + }; + + using PipelineTessellationStateCreateFlags = Flags; + + enum class PipelineInputAssemblyStateCreateFlagBits + { + }; + + using PipelineInputAssemblyStateCreateFlags = Flags; + + enum class PipelineVertexInputStateCreateFlagBits + { + }; + + using PipelineVertexInputStateCreateFlags = Flags; + + enum class PipelineShaderStageCreateFlagBits + { + }; + + using PipelineShaderStageCreateFlags = Flags; + + enum class BufferViewCreateFlagBits + { + }; + + using BufferViewCreateFlags = Flags; + + enum class InstanceCreateFlagBits + { + }; + + using InstanceCreateFlags = Flags; + + enum class DeviceCreateFlagBits + { + }; + + using DeviceCreateFlags = Flags; + + enum class ImageViewCreateFlagBits + { + }; + + using ImageViewCreateFlags = Flags; + + enum class SemaphoreCreateFlagBits + { + }; + + using SemaphoreCreateFlags = Flags; + + enum class ShaderModuleCreateFlagBits + { + }; + + using ShaderModuleCreateFlags = Flags; + + enum class EventCreateFlagBits + { + }; + + using EventCreateFlags = Flags; + + enum class MemoryMapFlagBits + { + }; + + using MemoryMapFlags = Flags; + + enum class DescriptorPoolResetFlagBits + { + }; + + using DescriptorPoolResetFlags = Flags; + + enum class DescriptorUpdateTemplateCreateFlagBits + { + }; + + using DescriptorUpdateTemplateCreateFlags = Flags; + + using DescriptorUpdateTemplateCreateFlagsKHR = DescriptorUpdateTemplateCreateFlags; + + enum class DisplayModeCreateFlagBitsKHR + { + }; + + using DisplayModeCreateFlagsKHR = Flags; + + enum class DisplaySurfaceCreateFlagBitsKHR + { + }; + + using DisplaySurfaceCreateFlagsKHR = Flags; + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + enum class AndroidSurfaceCreateFlagBitsKHR + { + }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + using AndroidSurfaceCreateFlagsKHR = Flags; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#ifdef VK_USE_PLATFORM_MIR_KHR + enum class MirSurfaceCreateFlagBitsKHR + { + }; +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + +#ifdef VK_USE_PLATFORM_MIR_KHR + using MirSurfaceCreateFlagsKHR = Flags; +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + +#ifdef VK_USE_PLATFORM_VI_NN + enum class ViSurfaceCreateFlagBitsNN + { + }; +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_VI_NN + using ViSurfaceCreateFlagsNN = Flags; +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + enum class WaylandSurfaceCreateFlagBitsKHR + { + }; +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + using WaylandSurfaceCreateFlagsKHR = Flags; +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + enum class Win32SurfaceCreateFlagBitsKHR + { + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + using Win32SurfaceCreateFlagsKHR = Flags; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + enum class XlibSurfaceCreateFlagBitsKHR + { + }; +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + using XlibSurfaceCreateFlagsKHR = Flags; +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + enum class XcbSurfaceCreateFlagBitsKHR + { + }; +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + using XcbSurfaceCreateFlagsKHR = Flags; +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + +#ifdef VK_USE_PLATFORM_IOS_MVK + enum class IOSSurfaceCreateFlagBitsMVK + { + }; +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + +#ifdef VK_USE_PLATFORM_IOS_MVK + using IOSSurfaceCreateFlagsMVK = Flags; +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + +#ifdef VK_USE_PLATFORM_MACOS_MVK + enum class MacOSSurfaceCreateFlagBitsMVK + { + }; +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + +#ifdef VK_USE_PLATFORM_MACOS_MVK + using MacOSSurfaceCreateFlagsMVK = Flags; +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + enum class CommandPoolTrimFlagBits + { + }; + + using CommandPoolTrimFlags = Flags; + + using CommandPoolTrimFlagsKHR = CommandPoolTrimFlags; + + enum class PipelineViewportSwizzleStateCreateFlagBitsNV + { + }; + + using PipelineViewportSwizzleStateCreateFlagsNV = Flags; + + enum class PipelineDiscardRectangleStateCreateFlagBitsEXT + { + }; + + using PipelineDiscardRectangleStateCreateFlagsEXT = Flags; + + enum class PipelineCoverageToColorStateCreateFlagBitsNV + { + }; + + using PipelineCoverageToColorStateCreateFlagsNV = Flags; + + enum class PipelineCoverageModulationStateCreateFlagBitsNV + { + }; + + using PipelineCoverageModulationStateCreateFlagsNV = Flags; + + enum class ValidationCacheCreateFlagBitsEXT + { + }; + + using ValidationCacheCreateFlagsEXT = Flags; + + enum class DebugUtilsMessengerCreateFlagBitsEXT + { + }; + + using DebugUtilsMessengerCreateFlagsEXT = Flags; + + enum class DebugUtilsMessengerCallbackDataFlagBitsEXT + { + }; + + using DebugUtilsMessengerCallbackDataFlagsEXT = Flags; + + enum class PipelineRasterizationConservativeStateCreateFlagBitsEXT + { + }; + + using PipelineRasterizationConservativeStateCreateFlagsEXT = Flags; + + class DeviceMemory + { + public: + VULKAN_HPP_CONSTEXPR DeviceMemory() + : m_deviceMemory(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DeviceMemory( std::nullptr_t ) + : m_deviceMemory(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DeviceMemory( VkDeviceMemory deviceMemory ) + : m_deviceMemory( deviceMemory ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DeviceMemory & operator=(VkDeviceMemory deviceMemory) + { + m_deviceMemory = deviceMemory; + return *this; + } +#endif + + DeviceMemory & operator=( std::nullptr_t ) + { + m_deviceMemory = VK_NULL_HANDLE; + return *this; + } + + bool operator==( DeviceMemory const & rhs ) const + { + return m_deviceMemory == rhs.m_deviceMemory; + } + + bool operator!=(DeviceMemory const & rhs ) const + { + return m_deviceMemory != rhs.m_deviceMemory; + } + + bool operator<(DeviceMemory const & rhs ) const + { + return m_deviceMemory < rhs.m_deviceMemory; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDeviceMemory() const + { + return m_deviceMemory; + } + + explicit operator bool() const + { + return m_deviceMemory != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_deviceMemory == VK_NULL_HANDLE; + } + + private: + VkDeviceMemory m_deviceMemory; + }; + + static_assert( sizeof( DeviceMemory ) == sizeof( VkDeviceMemory ), "handle and wrapper have different size!" ); + + class CommandPool + { + public: + VULKAN_HPP_CONSTEXPR CommandPool() + : m_commandPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR CommandPool( std::nullptr_t ) + : m_commandPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT CommandPool( VkCommandPool commandPool ) + : m_commandPool( commandPool ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + CommandPool & operator=(VkCommandPool commandPool) + { + m_commandPool = commandPool; + return *this; + } +#endif + + CommandPool & operator=( std::nullptr_t ) + { + m_commandPool = VK_NULL_HANDLE; + return *this; + } + + bool operator==( CommandPool const & rhs ) const + { + return m_commandPool == rhs.m_commandPool; + } + + bool operator!=(CommandPool const & rhs ) const + { + return m_commandPool != rhs.m_commandPool; + } + + bool operator<(CommandPool const & rhs ) const + { + return m_commandPool < rhs.m_commandPool; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCommandPool() const + { + return m_commandPool; + } + + explicit operator bool() const + { + return m_commandPool != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_commandPool == VK_NULL_HANDLE; + } + + private: + VkCommandPool m_commandPool; + }; + + static_assert( sizeof( CommandPool ) == sizeof( VkCommandPool ), "handle and wrapper have different size!" ); + + class Buffer + { + public: + VULKAN_HPP_CONSTEXPR Buffer() + : m_buffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Buffer( std::nullptr_t ) + : m_buffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Buffer( VkBuffer buffer ) + : m_buffer( buffer ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Buffer & operator=(VkBuffer buffer) + { + m_buffer = buffer; + return *this; + } +#endif + + Buffer & operator=( std::nullptr_t ) + { + m_buffer = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Buffer const & rhs ) const + { + return m_buffer == rhs.m_buffer; + } + + bool operator!=(Buffer const & rhs ) const + { + return m_buffer != rhs.m_buffer; + } + + bool operator<(Buffer const & rhs ) const + { + return m_buffer < rhs.m_buffer; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBuffer() const + { + return m_buffer; + } + + explicit operator bool() const + { + return m_buffer != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_buffer == VK_NULL_HANDLE; + } + + private: + VkBuffer m_buffer; + }; + + static_assert( sizeof( Buffer ) == sizeof( VkBuffer ), "handle and wrapper have different size!" ); + + class BufferView + { + public: + VULKAN_HPP_CONSTEXPR BufferView() + : m_bufferView(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR BufferView( std::nullptr_t ) + : m_bufferView(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT BufferView( VkBufferView bufferView ) + : m_bufferView( bufferView ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + BufferView & operator=(VkBufferView bufferView) + { + m_bufferView = bufferView; + return *this; + } +#endif + + BufferView & operator=( std::nullptr_t ) + { + m_bufferView = VK_NULL_HANDLE; + return *this; + } + + bool operator==( BufferView const & rhs ) const + { + return m_bufferView == rhs.m_bufferView; + } + + bool operator!=(BufferView const & rhs ) const + { + return m_bufferView != rhs.m_bufferView; + } + + bool operator<(BufferView const & rhs ) const + { + return m_bufferView < rhs.m_bufferView; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBufferView() const + { + return m_bufferView; + } + + explicit operator bool() const + { + return m_bufferView != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_bufferView == VK_NULL_HANDLE; + } + + private: + VkBufferView m_bufferView; + }; + + static_assert( sizeof( BufferView ) == sizeof( VkBufferView ), "handle and wrapper have different size!" ); + + class Image + { + public: + VULKAN_HPP_CONSTEXPR Image() + : m_image(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Image( std::nullptr_t ) + : m_image(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Image( VkImage image ) + : m_image( image ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Image & operator=(VkImage image) + { + m_image = image; + return *this; + } +#endif + + Image & operator=( std::nullptr_t ) + { + m_image = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Image const & rhs ) const + { + return m_image == rhs.m_image; + } + + bool operator!=(Image const & rhs ) const + { + return m_image != rhs.m_image; + } + + bool operator<(Image const & rhs ) const + { + return m_image < rhs.m_image; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkImage() const + { + return m_image; + } + + explicit operator bool() const + { + return m_image != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_image == VK_NULL_HANDLE; + } + + private: + VkImage m_image; + }; + + static_assert( sizeof( Image ) == sizeof( VkImage ), "handle and wrapper have different size!" ); + + class ImageView + { + public: + VULKAN_HPP_CONSTEXPR ImageView() + : m_imageView(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR ImageView( std::nullptr_t ) + : m_imageView(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT ImageView( VkImageView imageView ) + : m_imageView( imageView ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + ImageView & operator=(VkImageView imageView) + { + m_imageView = imageView; + return *this; + } +#endif + + ImageView & operator=( std::nullptr_t ) + { + m_imageView = VK_NULL_HANDLE; + return *this; + } + + bool operator==( ImageView const & rhs ) const + { + return m_imageView == rhs.m_imageView; + } + + bool operator!=(ImageView const & rhs ) const + { + return m_imageView != rhs.m_imageView; + } + + bool operator<(ImageView const & rhs ) const + { + return m_imageView < rhs.m_imageView; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkImageView() const + { + return m_imageView; + } + + explicit operator bool() const + { + return m_imageView != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_imageView == VK_NULL_HANDLE; + } + + private: + VkImageView m_imageView; + }; + + static_assert( sizeof( ImageView ) == sizeof( VkImageView ), "handle and wrapper have different size!" ); + + class ShaderModule + { + public: + VULKAN_HPP_CONSTEXPR ShaderModule() + : m_shaderModule(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR ShaderModule( std::nullptr_t ) + : m_shaderModule(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT ShaderModule( VkShaderModule shaderModule ) + : m_shaderModule( shaderModule ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + ShaderModule & operator=(VkShaderModule shaderModule) + { + m_shaderModule = shaderModule; + return *this; + } +#endif + + ShaderModule & operator=( std::nullptr_t ) + { + m_shaderModule = VK_NULL_HANDLE; + return *this; + } + + bool operator==( ShaderModule const & rhs ) const + { + return m_shaderModule == rhs.m_shaderModule; + } + + bool operator!=(ShaderModule const & rhs ) const + { + return m_shaderModule != rhs.m_shaderModule; + } + + bool operator<(ShaderModule const & rhs ) const + { + return m_shaderModule < rhs.m_shaderModule; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkShaderModule() const + { + return m_shaderModule; + } + + explicit operator bool() const + { + return m_shaderModule != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_shaderModule == VK_NULL_HANDLE; + } + + private: + VkShaderModule m_shaderModule; + }; + + static_assert( sizeof( ShaderModule ) == sizeof( VkShaderModule ), "handle and wrapper have different size!" ); + + class Pipeline + { + public: + VULKAN_HPP_CONSTEXPR Pipeline() + : m_pipeline(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Pipeline( std::nullptr_t ) + : m_pipeline(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Pipeline( VkPipeline pipeline ) + : m_pipeline( pipeline ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Pipeline & operator=(VkPipeline pipeline) + { + m_pipeline = pipeline; + return *this; + } +#endif + + Pipeline & operator=( std::nullptr_t ) + { + m_pipeline = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Pipeline const & rhs ) const + { + return m_pipeline == rhs.m_pipeline; + } + + bool operator!=(Pipeline const & rhs ) const + { + return m_pipeline != rhs.m_pipeline; + } + + bool operator<(Pipeline const & rhs ) const + { + return m_pipeline < rhs.m_pipeline; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipeline() const + { + return m_pipeline; + } + + explicit operator bool() const + { + return m_pipeline != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_pipeline == VK_NULL_HANDLE; + } + + private: + VkPipeline m_pipeline; + }; + + static_assert( sizeof( Pipeline ) == sizeof( VkPipeline ), "handle and wrapper have different size!" ); + + class PipelineLayout + { + public: + VULKAN_HPP_CONSTEXPR PipelineLayout() + : m_pipelineLayout(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR PipelineLayout( std::nullptr_t ) + : m_pipelineLayout(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT PipelineLayout( VkPipelineLayout pipelineLayout ) + : m_pipelineLayout( pipelineLayout ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + PipelineLayout & operator=(VkPipelineLayout pipelineLayout) + { + m_pipelineLayout = pipelineLayout; + return *this; + } +#endif + + PipelineLayout & operator=( std::nullptr_t ) + { + m_pipelineLayout = VK_NULL_HANDLE; + return *this; + } + + bool operator==( PipelineLayout const & rhs ) const + { + return m_pipelineLayout == rhs.m_pipelineLayout; + } + + bool operator!=(PipelineLayout const & rhs ) const + { + return m_pipelineLayout != rhs.m_pipelineLayout; + } + + bool operator<(PipelineLayout const & rhs ) const + { + return m_pipelineLayout < rhs.m_pipelineLayout; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineLayout() const + { + return m_pipelineLayout; + } + + explicit operator bool() const + { + return m_pipelineLayout != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_pipelineLayout == VK_NULL_HANDLE; + } + + private: + VkPipelineLayout m_pipelineLayout; + }; + + static_assert( sizeof( PipelineLayout ) == sizeof( VkPipelineLayout ), "handle and wrapper have different size!" ); + + class Sampler + { + public: + VULKAN_HPP_CONSTEXPR Sampler() + : m_sampler(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Sampler( std::nullptr_t ) + : m_sampler(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Sampler( VkSampler sampler ) + : m_sampler( sampler ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Sampler & operator=(VkSampler sampler) + { + m_sampler = sampler; + return *this; + } +#endif + + Sampler & operator=( std::nullptr_t ) + { + m_sampler = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Sampler const & rhs ) const + { + return m_sampler == rhs.m_sampler; + } + + bool operator!=(Sampler const & rhs ) const + { + return m_sampler != rhs.m_sampler; + } + + bool operator<(Sampler const & rhs ) const + { + return m_sampler < rhs.m_sampler; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSampler() const + { + return m_sampler; + } + + explicit operator bool() const + { + return m_sampler != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_sampler == VK_NULL_HANDLE; + } + + private: + VkSampler m_sampler; + }; + + static_assert( sizeof( Sampler ) == sizeof( VkSampler ), "handle and wrapper have different size!" ); + + class DescriptorSet + { + public: + VULKAN_HPP_CONSTEXPR DescriptorSet() + : m_descriptorSet(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSet( std::nullptr_t ) + : m_descriptorSet(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorSet( VkDescriptorSet descriptorSet ) + : m_descriptorSet( descriptorSet ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DescriptorSet & operator=(VkDescriptorSet descriptorSet) + { + m_descriptorSet = descriptorSet; + return *this; + } +#endif + + DescriptorSet & operator=( std::nullptr_t ) + { + m_descriptorSet = VK_NULL_HANDLE; + return *this; + } + + bool operator==( DescriptorSet const & rhs ) const + { + return m_descriptorSet == rhs.m_descriptorSet; + } + + bool operator!=(DescriptorSet const & rhs ) const + { + return m_descriptorSet != rhs.m_descriptorSet; + } + + bool operator<(DescriptorSet const & rhs ) const + { + return m_descriptorSet < rhs.m_descriptorSet; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorSet() const + { + return m_descriptorSet; + } + + explicit operator bool() const + { + return m_descriptorSet != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_descriptorSet == VK_NULL_HANDLE; + } + + private: + VkDescriptorSet m_descriptorSet; + }; + + static_assert( sizeof( DescriptorSet ) == sizeof( VkDescriptorSet ), "handle and wrapper have different size!" ); + + class DescriptorSetLayout + { + public: + VULKAN_HPP_CONSTEXPR DescriptorSetLayout() + : m_descriptorSetLayout(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSetLayout( std::nullptr_t ) + : m_descriptorSetLayout(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorSetLayout( VkDescriptorSetLayout descriptorSetLayout ) + : m_descriptorSetLayout( descriptorSetLayout ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DescriptorSetLayout & operator=(VkDescriptorSetLayout descriptorSetLayout) + { + m_descriptorSetLayout = descriptorSetLayout; + return *this; + } +#endif + + DescriptorSetLayout & operator=( std::nullptr_t ) + { + m_descriptorSetLayout = VK_NULL_HANDLE; + return *this; + } + + bool operator==( DescriptorSetLayout const & rhs ) const + { + return m_descriptorSetLayout == rhs.m_descriptorSetLayout; + } + + bool operator!=(DescriptorSetLayout const & rhs ) const + { + return m_descriptorSetLayout != rhs.m_descriptorSetLayout; + } + + bool operator<(DescriptorSetLayout const & rhs ) const + { + return m_descriptorSetLayout < rhs.m_descriptorSetLayout; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorSetLayout() const + { + return m_descriptorSetLayout; + } + + explicit operator bool() const + { + return m_descriptorSetLayout != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_descriptorSetLayout == VK_NULL_HANDLE; + } + + private: + VkDescriptorSetLayout m_descriptorSetLayout; + }; + + static_assert( sizeof( DescriptorSetLayout ) == sizeof( VkDescriptorSetLayout ), "handle and wrapper have different size!" ); + + class DescriptorPool + { + public: + VULKAN_HPP_CONSTEXPR DescriptorPool() + : m_descriptorPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DescriptorPool( std::nullptr_t ) + : m_descriptorPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorPool( VkDescriptorPool descriptorPool ) + : m_descriptorPool( descriptorPool ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DescriptorPool & operator=(VkDescriptorPool descriptorPool) + { + m_descriptorPool = descriptorPool; + return *this; + } +#endif + + DescriptorPool & operator=( std::nullptr_t ) + { + m_descriptorPool = VK_NULL_HANDLE; + return *this; + } + + bool operator==( DescriptorPool const & rhs ) const + { + return m_descriptorPool == rhs.m_descriptorPool; + } + + bool operator!=(DescriptorPool const & rhs ) const + { + return m_descriptorPool != rhs.m_descriptorPool; + } + + bool operator<(DescriptorPool const & rhs ) const + { + return m_descriptorPool < rhs.m_descriptorPool; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorPool() const + { + return m_descriptorPool; + } + + explicit operator bool() const + { + return m_descriptorPool != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_descriptorPool == VK_NULL_HANDLE; + } + + private: + VkDescriptorPool m_descriptorPool; + }; + + static_assert( sizeof( DescriptorPool ) == sizeof( VkDescriptorPool ), "handle and wrapper have different size!" ); + + class Fence + { + public: + VULKAN_HPP_CONSTEXPR Fence() + : m_fence(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Fence( std::nullptr_t ) + : m_fence(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Fence( VkFence fence ) + : m_fence( fence ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Fence & operator=(VkFence fence) + { + m_fence = fence; + return *this; + } +#endif + + Fence & operator=( std::nullptr_t ) + { + m_fence = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Fence const & rhs ) const + { + return m_fence == rhs.m_fence; + } + + bool operator!=(Fence const & rhs ) const + { + return m_fence != rhs.m_fence; + } + + bool operator<(Fence const & rhs ) const + { + return m_fence < rhs.m_fence; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkFence() const + { + return m_fence; + } + + explicit operator bool() const + { + return m_fence != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_fence == VK_NULL_HANDLE; + } + + private: + VkFence m_fence; + }; + + static_assert( sizeof( Fence ) == sizeof( VkFence ), "handle and wrapper have different size!" ); + + class Semaphore + { + public: + VULKAN_HPP_CONSTEXPR Semaphore() + : m_semaphore(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Semaphore( std::nullptr_t ) + : m_semaphore(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Semaphore( VkSemaphore semaphore ) + : m_semaphore( semaphore ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Semaphore & operator=(VkSemaphore semaphore) + { + m_semaphore = semaphore; + return *this; + } +#endif + + Semaphore & operator=( std::nullptr_t ) + { + m_semaphore = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Semaphore const & rhs ) const + { + return m_semaphore == rhs.m_semaphore; + } + + bool operator!=(Semaphore const & rhs ) const + { + return m_semaphore != rhs.m_semaphore; + } + + bool operator<(Semaphore const & rhs ) const + { + return m_semaphore < rhs.m_semaphore; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSemaphore() const + { + return m_semaphore; + } + + explicit operator bool() const + { + return m_semaphore != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_semaphore == VK_NULL_HANDLE; + } + + private: + VkSemaphore m_semaphore; + }; + + static_assert( sizeof( Semaphore ) == sizeof( VkSemaphore ), "handle and wrapper have different size!" ); + + class Event + { + public: + VULKAN_HPP_CONSTEXPR Event() + : m_event(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Event( std::nullptr_t ) + : m_event(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Event( VkEvent event ) + : m_event( event ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Event & operator=(VkEvent event) + { + m_event = event; + return *this; + } +#endif + + Event & operator=( std::nullptr_t ) + { + m_event = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Event const & rhs ) const + { + return m_event == rhs.m_event; + } + + bool operator!=(Event const & rhs ) const + { + return m_event != rhs.m_event; + } + + bool operator<(Event const & rhs ) const + { + return m_event < rhs.m_event; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkEvent() const + { + return m_event; + } + + explicit operator bool() const + { + return m_event != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_event == VK_NULL_HANDLE; + } + + private: + VkEvent m_event; + }; + + static_assert( sizeof( Event ) == sizeof( VkEvent ), "handle and wrapper have different size!" ); + + class QueryPool + { + public: + VULKAN_HPP_CONSTEXPR QueryPool() + : m_queryPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR QueryPool( std::nullptr_t ) + : m_queryPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT QueryPool( VkQueryPool queryPool ) + : m_queryPool( queryPool ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + QueryPool & operator=(VkQueryPool queryPool) + { + m_queryPool = queryPool; + return *this; + } +#endif + + QueryPool & operator=( std::nullptr_t ) + { + m_queryPool = VK_NULL_HANDLE; + return *this; + } + + bool operator==( QueryPool const & rhs ) const + { + return m_queryPool == rhs.m_queryPool; + } + + bool operator!=(QueryPool const & rhs ) const + { + return m_queryPool != rhs.m_queryPool; + } + + bool operator<(QueryPool const & rhs ) const + { + return m_queryPool < rhs.m_queryPool; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkQueryPool() const + { + return m_queryPool; + } + + explicit operator bool() const + { + return m_queryPool != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_queryPool == VK_NULL_HANDLE; + } + + private: + VkQueryPool m_queryPool; + }; + + static_assert( sizeof( QueryPool ) == sizeof( VkQueryPool ), "handle and wrapper have different size!" ); + + class Framebuffer + { + public: + VULKAN_HPP_CONSTEXPR Framebuffer() + : m_framebuffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Framebuffer( std::nullptr_t ) + : m_framebuffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Framebuffer( VkFramebuffer framebuffer ) + : m_framebuffer( framebuffer ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Framebuffer & operator=(VkFramebuffer framebuffer) + { + m_framebuffer = framebuffer; + return *this; + } +#endif + + Framebuffer & operator=( std::nullptr_t ) + { + m_framebuffer = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Framebuffer const & rhs ) const + { + return m_framebuffer == rhs.m_framebuffer; + } + + bool operator!=(Framebuffer const & rhs ) const + { + return m_framebuffer != rhs.m_framebuffer; + } + + bool operator<(Framebuffer const & rhs ) const + { + return m_framebuffer < rhs.m_framebuffer; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkFramebuffer() const + { + return m_framebuffer; + } + + explicit operator bool() const + { + return m_framebuffer != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_framebuffer == VK_NULL_HANDLE; + } + + private: + VkFramebuffer m_framebuffer; + }; + + static_assert( sizeof( Framebuffer ) == sizeof( VkFramebuffer ), "handle and wrapper have different size!" ); + + class RenderPass + { + public: + VULKAN_HPP_CONSTEXPR RenderPass() + : m_renderPass(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR RenderPass( std::nullptr_t ) + : m_renderPass(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT RenderPass( VkRenderPass renderPass ) + : m_renderPass( renderPass ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + RenderPass & operator=(VkRenderPass renderPass) + { + m_renderPass = renderPass; + return *this; + } +#endif + + RenderPass & operator=( std::nullptr_t ) + { + m_renderPass = VK_NULL_HANDLE; + return *this; + } + + bool operator==( RenderPass const & rhs ) const + { + return m_renderPass == rhs.m_renderPass; + } + + bool operator!=(RenderPass const & rhs ) const + { + return m_renderPass != rhs.m_renderPass; + } + + bool operator<(RenderPass const & rhs ) const + { + return m_renderPass < rhs.m_renderPass; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkRenderPass() const + { + return m_renderPass; + } + + explicit operator bool() const + { + return m_renderPass != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_renderPass == VK_NULL_HANDLE; + } + + private: + VkRenderPass m_renderPass; + }; + + static_assert( sizeof( RenderPass ) == sizeof( VkRenderPass ), "handle and wrapper have different size!" ); + + class PipelineCache + { + public: + VULKAN_HPP_CONSTEXPR PipelineCache() + : m_pipelineCache(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR PipelineCache( std::nullptr_t ) + : m_pipelineCache(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT PipelineCache( VkPipelineCache pipelineCache ) + : m_pipelineCache( pipelineCache ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + PipelineCache & operator=(VkPipelineCache pipelineCache) + { + m_pipelineCache = pipelineCache; + return *this; + } +#endif + + PipelineCache & operator=( std::nullptr_t ) + { + m_pipelineCache = VK_NULL_HANDLE; + return *this; + } + + bool operator==( PipelineCache const & rhs ) const + { + return m_pipelineCache == rhs.m_pipelineCache; + } + + bool operator!=(PipelineCache const & rhs ) const + { + return m_pipelineCache != rhs.m_pipelineCache; + } + + bool operator<(PipelineCache const & rhs ) const + { + return m_pipelineCache < rhs.m_pipelineCache; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineCache() const + { + return m_pipelineCache; + } + + explicit operator bool() const + { + return m_pipelineCache != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_pipelineCache == VK_NULL_HANDLE; + } + + private: + VkPipelineCache m_pipelineCache; + }; + + static_assert( sizeof( PipelineCache ) == sizeof( VkPipelineCache ), "handle and wrapper have different size!" ); + + class ObjectTableNVX + { + public: + VULKAN_HPP_CONSTEXPR ObjectTableNVX() + : m_objectTableNVX(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR ObjectTableNVX( std::nullptr_t ) + : m_objectTableNVX(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT ObjectTableNVX( VkObjectTableNVX objectTableNVX ) + : m_objectTableNVX( objectTableNVX ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + ObjectTableNVX & operator=(VkObjectTableNVX objectTableNVX) + { + m_objectTableNVX = objectTableNVX; + return *this; + } +#endif + + ObjectTableNVX & operator=( std::nullptr_t ) + { + m_objectTableNVX = VK_NULL_HANDLE; + return *this; + } + + bool operator==( ObjectTableNVX const & rhs ) const + { + return m_objectTableNVX == rhs.m_objectTableNVX; + } + + bool operator!=(ObjectTableNVX const & rhs ) const + { + return m_objectTableNVX != rhs.m_objectTableNVX; + } + + bool operator<(ObjectTableNVX const & rhs ) const + { + return m_objectTableNVX < rhs.m_objectTableNVX; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkObjectTableNVX() const + { + return m_objectTableNVX; + } + + explicit operator bool() const + { + return m_objectTableNVX != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_objectTableNVX == VK_NULL_HANDLE; + } + + private: + VkObjectTableNVX m_objectTableNVX; + }; + + static_assert( sizeof( ObjectTableNVX ) == sizeof( VkObjectTableNVX ), "handle and wrapper have different size!" ); + + class IndirectCommandsLayoutNVX + { + public: + VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutNVX() + : m_indirectCommandsLayoutNVX(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutNVX( std::nullptr_t ) + : m_indirectCommandsLayoutNVX(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT IndirectCommandsLayoutNVX( VkIndirectCommandsLayoutNVX indirectCommandsLayoutNVX ) + : m_indirectCommandsLayoutNVX( indirectCommandsLayoutNVX ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + IndirectCommandsLayoutNVX & operator=(VkIndirectCommandsLayoutNVX indirectCommandsLayoutNVX) + { + m_indirectCommandsLayoutNVX = indirectCommandsLayoutNVX; + return *this; + } +#endif + + IndirectCommandsLayoutNVX & operator=( std::nullptr_t ) + { + m_indirectCommandsLayoutNVX = VK_NULL_HANDLE; + return *this; + } + + bool operator==( IndirectCommandsLayoutNVX const & rhs ) const + { + return m_indirectCommandsLayoutNVX == rhs.m_indirectCommandsLayoutNVX; + } + + bool operator!=(IndirectCommandsLayoutNVX const & rhs ) const + { + return m_indirectCommandsLayoutNVX != rhs.m_indirectCommandsLayoutNVX; + } + + bool operator<(IndirectCommandsLayoutNVX const & rhs ) const + { + return m_indirectCommandsLayoutNVX < rhs.m_indirectCommandsLayoutNVX; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkIndirectCommandsLayoutNVX() const + { + return m_indirectCommandsLayoutNVX; + } + + explicit operator bool() const + { + return m_indirectCommandsLayoutNVX != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_indirectCommandsLayoutNVX == VK_NULL_HANDLE; + } + + private: + VkIndirectCommandsLayoutNVX m_indirectCommandsLayoutNVX; + }; + + static_assert( sizeof( IndirectCommandsLayoutNVX ) == sizeof( VkIndirectCommandsLayoutNVX ), "handle and wrapper have different size!" ); + + class DescriptorUpdateTemplate + { + public: + VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplate() + : m_descriptorUpdateTemplate(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplate( std::nullptr_t ) + : m_descriptorUpdateTemplate(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorUpdateTemplate( VkDescriptorUpdateTemplate descriptorUpdateTemplate ) + : m_descriptorUpdateTemplate( descriptorUpdateTemplate ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DescriptorUpdateTemplate & operator=(VkDescriptorUpdateTemplate descriptorUpdateTemplate) + { + m_descriptorUpdateTemplate = descriptorUpdateTemplate; + return *this; + } +#endif + + DescriptorUpdateTemplate & operator=( std::nullptr_t ) + { + m_descriptorUpdateTemplate = VK_NULL_HANDLE; + return *this; + } + + bool operator==( DescriptorUpdateTemplate const & rhs ) const + { + return m_descriptorUpdateTemplate == rhs.m_descriptorUpdateTemplate; + } + + bool operator!=(DescriptorUpdateTemplate const & rhs ) const + { + return m_descriptorUpdateTemplate != rhs.m_descriptorUpdateTemplate; + } + + bool operator<(DescriptorUpdateTemplate const & rhs ) const + { + return m_descriptorUpdateTemplate < rhs.m_descriptorUpdateTemplate; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorUpdateTemplate() const + { + return m_descriptorUpdateTemplate; + } + + explicit operator bool() const + { + return m_descriptorUpdateTemplate != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_descriptorUpdateTemplate == VK_NULL_HANDLE; + } + + private: + VkDescriptorUpdateTemplate m_descriptorUpdateTemplate; + }; + + static_assert( sizeof( DescriptorUpdateTemplate ) == sizeof( VkDescriptorUpdateTemplate ), "handle and wrapper have different size!" ); + + using DescriptorUpdateTemplateKHR = DescriptorUpdateTemplate; + + class SamplerYcbcrConversion + { + public: + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversion() + : m_samplerYcbcrConversion(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversion( std::nullptr_t ) + : m_samplerYcbcrConversion(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT SamplerYcbcrConversion( VkSamplerYcbcrConversion samplerYcbcrConversion ) + : m_samplerYcbcrConversion( samplerYcbcrConversion ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + SamplerYcbcrConversion & operator=(VkSamplerYcbcrConversion samplerYcbcrConversion) + { + m_samplerYcbcrConversion = samplerYcbcrConversion; + return *this; + } +#endif + + SamplerYcbcrConversion & operator=( std::nullptr_t ) + { + m_samplerYcbcrConversion = VK_NULL_HANDLE; + return *this; + } + + bool operator==( SamplerYcbcrConversion const & rhs ) const + { + return m_samplerYcbcrConversion == rhs.m_samplerYcbcrConversion; + } + + bool operator!=(SamplerYcbcrConversion const & rhs ) const + { + return m_samplerYcbcrConversion != rhs.m_samplerYcbcrConversion; + } + + bool operator<(SamplerYcbcrConversion const & rhs ) const + { + return m_samplerYcbcrConversion < rhs.m_samplerYcbcrConversion; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSamplerYcbcrConversion() const + { + return m_samplerYcbcrConversion; + } + + explicit operator bool() const + { + return m_samplerYcbcrConversion != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_samplerYcbcrConversion == VK_NULL_HANDLE; + } + + private: + VkSamplerYcbcrConversion m_samplerYcbcrConversion; + }; + + static_assert( sizeof( SamplerYcbcrConversion ) == sizeof( VkSamplerYcbcrConversion ), "handle and wrapper have different size!" ); + + using SamplerYcbcrConversionKHR = SamplerYcbcrConversion; + + class ValidationCacheEXT + { + public: + VULKAN_HPP_CONSTEXPR ValidationCacheEXT() + : m_validationCacheEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR ValidationCacheEXT( std::nullptr_t ) + : m_validationCacheEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT ValidationCacheEXT( VkValidationCacheEXT validationCacheEXT ) + : m_validationCacheEXT( validationCacheEXT ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + ValidationCacheEXT & operator=(VkValidationCacheEXT validationCacheEXT) + { + m_validationCacheEXT = validationCacheEXT; + return *this; + } +#endif + + ValidationCacheEXT & operator=( std::nullptr_t ) + { + m_validationCacheEXT = VK_NULL_HANDLE; + return *this; + } + + bool operator==( ValidationCacheEXT const & rhs ) const + { + return m_validationCacheEXT == rhs.m_validationCacheEXT; + } + + bool operator!=(ValidationCacheEXT const & rhs ) const + { + return m_validationCacheEXT != rhs.m_validationCacheEXT; + } + + bool operator<(ValidationCacheEXT const & rhs ) const + { + return m_validationCacheEXT < rhs.m_validationCacheEXT; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkValidationCacheEXT() const + { + return m_validationCacheEXT; + } + + explicit operator bool() const + { + return m_validationCacheEXT != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_validationCacheEXT == VK_NULL_HANDLE; + } + + private: + VkValidationCacheEXT m_validationCacheEXT; + }; + + static_assert( sizeof( ValidationCacheEXT ) == sizeof( VkValidationCacheEXT ), "handle and wrapper have different size!" ); + + class AccelerationStructureNVX + { + public: + VULKAN_HPP_CONSTEXPR AccelerationStructureNVX() + : m_accelerationStructureNVX(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureNVX( std::nullptr_t ) + : m_accelerationStructureNVX(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT AccelerationStructureNVX( VkAccelerationStructureNVX accelerationStructureNVX ) + : m_accelerationStructureNVX( accelerationStructureNVX ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + AccelerationStructureNVX & operator=(VkAccelerationStructureNVX accelerationStructureNVX) + { + m_accelerationStructureNVX = accelerationStructureNVX; + return *this; + } +#endif + + AccelerationStructureNVX & operator=( std::nullptr_t ) + { + m_accelerationStructureNVX = VK_NULL_HANDLE; + return *this; + } + + bool operator==( AccelerationStructureNVX const & rhs ) const + { + return m_accelerationStructureNVX == rhs.m_accelerationStructureNVX; + } + + bool operator!=(AccelerationStructureNVX const & rhs ) const + { + return m_accelerationStructureNVX != rhs.m_accelerationStructureNVX; + } + + bool operator<(AccelerationStructureNVX const & rhs ) const + { + return m_accelerationStructureNVX < rhs.m_accelerationStructureNVX; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkAccelerationStructureNVX() const + { + return m_accelerationStructureNVX; + } + + explicit operator bool() const + { + return m_accelerationStructureNVX != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_accelerationStructureNVX == VK_NULL_HANDLE; + } + + private: + VkAccelerationStructureNVX m_accelerationStructureNVX; + }; + + static_assert( sizeof( AccelerationStructureNVX ) == sizeof( VkAccelerationStructureNVX ), "handle and wrapper have different size!" ); + + class DisplayKHR + { + public: + VULKAN_HPP_CONSTEXPR DisplayKHR() + : m_displayKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DisplayKHR( std::nullptr_t ) + : m_displayKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DisplayKHR( VkDisplayKHR displayKHR ) + : m_displayKHR( displayKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DisplayKHR & operator=(VkDisplayKHR displayKHR) + { + m_displayKHR = displayKHR; + return *this; + } +#endif + + DisplayKHR & operator=( std::nullptr_t ) + { + m_displayKHR = VK_NULL_HANDLE; + return *this; + } + + bool operator==( DisplayKHR const & rhs ) const + { + return m_displayKHR == rhs.m_displayKHR; + } + + bool operator!=(DisplayKHR const & rhs ) const + { + return m_displayKHR != rhs.m_displayKHR; + } + + bool operator<(DisplayKHR const & rhs ) const + { + return m_displayKHR < rhs.m_displayKHR; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDisplayKHR() const + { + return m_displayKHR; + } + + explicit operator bool() const + { + return m_displayKHR != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_displayKHR == VK_NULL_HANDLE; + } + + private: + VkDisplayKHR m_displayKHR; + }; + + static_assert( sizeof( DisplayKHR ) == sizeof( VkDisplayKHR ), "handle and wrapper have different size!" ); + + class DisplayModeKHR + { + public: + VULKAN_HPP_CONSTEXPR DisplayModeKHR() + : m_displayModeKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DisplayModeKHR( std::nullptr_t ) + : m_displayModeKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DisplayModeKHR( VkDisplayModeKHR displayModeKHR ) + : m_displayModeKHR( displayModeKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DisplayModeKHR & operator=(VkDisplayModeKHR displayModeKHR) + { + m_displayModeKHR = displayModeKHR; + return *this; + } +#endif + + DisplayModeKHR & operator=( std::nullptr_t ) + { + m_displayModeKHR = VK_NULL_HANDLE; + return *this; + } + + bool operator==( DisplayModeKHR const & rhs ) const + { + return m_displayModeKHR == rhs.m_displayModeKHR; + } + + bool operator!=(DisplayModeKHR const & rhs ) const + { + return m_displayModeKHR != rhs.m_displayModeKHR; + } + + bool operator<(DisplayModeKHR const & rhs ) const + { + return m_displayModeKHR < rhs.m_displayModeKHR; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDisplayModeKHR() const + { + return m_displayModeKHR; + } + + explicit operator bool() const + { + return m_displayModeKHR != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_displayModeKHR == VK_NULL_HANDLE; + } + + private: + VkDisplayModeKHR m_displayModeKHR; + }; + + static_assert( sizeof( DisplayModeKHR ) == sizeof( VkDisplayModeKHR ), "handle and wrapper have different size!" ); + + class SurfaceKHR + { + public: + VULKAN_HPP_CONSTEXPR SurfaceKHR() + : m_surfaceKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR SurfaceKHR( std::nullptr_t ) + : m_surfaceKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT SurfaceKHR( VkSurfaceKHR surfaceKHR ) + : m_surfaceKHR( surfaceKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + SurfaceKHR & operator=(VkSurfaceKHR surfaceKHR) + { + m_surfaceKHR = surfaceKHR; + return *this; + } +#endif + + SurfaceKHR & operator=( std::nullptr_t ) + { + m_surfaceKHR = VK_NULL_HANDLE; + return *this; + } + + bool operator==( SurfaceKHR const & rhs ) const + { + return m_surfaceKHR == rhs.m_surfaceKHR; + } + + bool operator!=(SurfaceKHR const & rhs ) const + { + return m_surfaceKHR != rhs.m_surfaceKHR; + } + + bool operator<(SurfaceKHR const & rhs ) const + { + return m_surfaceKHR < rhs.m_surfaceKHR; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSurfaceKHR() const + { + return m_surfaceKHR; + } + + explicit operator bool() const + { + return m_surfaceKHR != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_surfaceKHR == VK_NULL_HANDLE; + } + + private: + VkSurfaceKHR m_surfaceKHR; + }; + + static_assert( sizeof( SurfaceKHR ) == sizeof( VkSurfaceKHR ), "handle and wrapper have different size!" ); + + class SwapchainKHR + { + public: + VULKAN_HPP_CONSTEXPR SwapchainKHR() + : m_swapchainKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR SwapchainKHR( std::nullptr_t ) + : m_swapchainKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT SwapchainKHR( VkSwapchainKHR swapchainKHR ) + : m_swapchainKHR( swapchainKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + SwapchainKHR & operator=(VkSwapchainKHR swapchainKHR) + { + m_swapchainKHR = swapchainKHR; + return *this; + } +#endif + + SwapchainKHR & operator=( std::nullptr_t ) + { + m_swapchainKHR = VK_NULL_HANDLE; + return *this; + } + + bool operator==( SwapchainKHR const & rhs ) const + { + return m_swapchainKHR == rhs.m_swapchainKHR; + } + + bool operator!=(SwapchainKHR const & rhs ) const + { + return m_swapchainKHR != rhs.m_swapchainKHR; + } + + bool operator<(SwapchainKHR const & rhs ) const + { + return m_swapchainKHR < rhs.m_swapchainKHR; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSwapchainKHR() const + { + return m_swapchainKHR; + } + + explicit operator bool() const + { + return m_swapchainKHR != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_swapchainKHR == VK_NULL_HANDLE; + } + + private: + VkSwapchainKHR m_swapchainKHR; + }; + + static_assert( sizeof( SwapchainKHR ) == sizeof( VkSwapchainKHR ), "handle and wrapper have different size!" ); + + class DebugReportCallbackEXT + { + public: + VULKAN_HPP_CONSTEXPR DebugReportCallbackEXT() + : m_debugReportCallbackEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DebugReportCallbackEXT( std::nullptr_t ) + : m_debugReportCallbackEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DebugReportCallbackEXT( VkDebugReportCallbackEXT debugReportCallbackEXT ) + : m_debugReportCallbackEXT( debugReportCallbackEXT ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DebugReportCallbackEXT & operator=(VkDebugReportCallbackEXT debugReportCallbackEXT) + { + m_debugReportCallbackEXT = debugReportCallbackEXT; + return *this; + } +#endif + + DebugReportCallbackEXT & operator=( std::nullptr_t ) + { + m_debugReportCallbackEXT = VK_NULL_HANDLE; + return *this; + } + + bool operator==( DebugReportCallbackEXT const & rhs ) const + { + return m_debugReportCallbackEXT == rhs.m_debugReportCallbackEXT; + } + + bool operator!=(DebugReportCallbackEXT const & rhs ) const + { + return m_debugReportCallbackEXT != rhs.m_debugReportCallbackEXT; + } + + bool operator<(DebugReportCallbackEXT const & rhs ) const + { + return m_debugReportCallbackEXT < rhs.m_debugReportCallbackEXT; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDebugReportCallbackEXT() const + { + return m_debugReportCallbackEXT; + } + + explicit operator bool() const + { + return m_debugReportCallbackEXT != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_debugReportCallbackEXT == VK_NULL_HANDLE; + } + + private: + VkDebugReportCallbackEXT m_debugReportCallbackEXT; + }; + + static_assert( sizeof( DebugReportCallbackEXT ) == sizeof( VkDebugReportCallbackEXT ), "handle and wrapper have different size!" ); + + class DebugUtilsMessengerEXT + { + public: + VULKAN_HPP_CONSTEXPR DebugUtilsMessengerEXT() + : m_debugUtilsMessengerEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DebugUtilsMessengerEXT( std::nullptr_t ) + : m_debugUtilsMessengerEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DebugUtilsMessengerEXT( VkDebugUtilsMessengerEXT debugUtilsMessengerEXT ) + : m_debugUtilsMessengerEXT( debugUtilsMessengerEXT ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DebugUtilsMessengerEXT & operator=(VkDebugUtilsMessengerEXT debugUtilsMessengerEXT) + { + m_debugUtilsMessengerEXT = debugUtilsMessengerEXT; + return *this; + } +#endif + + DebugUtilsMessengerEXT & operator=( std::nullptr_t ) + { + m_debugUtilsMessengerEXT = VK_NULL_HANDLE; + return *this; + } + + bool operator==( DebugUtilsMessengerEXT const & rhs ) const + { + return m_debugUtilsMessengerEXT == rhs.m_debugUtilsMessengerEXT; + } + + bool operator!=(DebugUtilsMessengerEXT const & rhs ) const + { + return m_debugUtilsMessengerEXT != rhs.m_debugUtilsMessengerEXT; + } + + bool operator<(DebugUtilsMessengerEXT const & rhs ) const + { + return m_debugUtilsMessengerEXT < rhs.m_debugUtilsMessengerEXT; + } + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDebugUtilsMessengerEXT() const + { + return m_debugUtilsMessengerEXT; + } + + explicit operator bool() const + { + return m_debugUtilsMessengerEXT != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_debugUtilsMessengerEXT == VK_NULL_HANDLE; + } + + private: + VkDebugUtilsMessengerEXT m_debugUtilsMessengerEXT; + }; + + static_assert( sizeof( DebugUtilsMessengerEXT ) == sizeof( VkDebugUtilsMessengerEXT ), "handle and wrapper have different size!" ); + + struct Offset2D + { + Offset2D( int32_t x_ = 0, + int32_t y_ = 0 ) + : x( x_ ) + , y( y_ ) + { + } + + Offset2D( VkOffset2D const & rhs ) + { + memcpy( this, &rhs, sizeof( Offset2D ) ); + } + + Offset2D& operator=( VkOffset2D const & rhs ) + { + memcpy( this, &rhs, sizeof( Offset2D ) ); + return *this; + } + Offset2D& setX( int32_t x_ ) + { + x = x_; + return *this; + } + + Offset2D& setY( int32_t y_ ) + { + y = y_; + return *this; + } + + operator VkOffset2D const&() const + { + return *reinterpret_cast(this); + } + + operator VkOffset2D &() + { + return *reinterpret_cast(this); + } + + bool operator==( Offset2D const& rhs ) const + { + return ( x == rhs.x ) + && ( y == rhs.y ); + } + + bool operator!=( Offset2D const& rhs ) const + { + return !operator==( rhs ); + } + + int32_t x; + int32_t y; + }; + static_assert( sizeof( Offset2D ) == sizeof( VkOffset2D ), "struct and wrapper have different size!" ); + + struct Offset3D + { + Offset3D( int32_t x_ = 0, + int32_t y_ = 0, + int32_t z_ = 0 ) + : x( x_ ) + , y( y_ ) + , z( z_ ) + { + } + + explicit Offset3D( Offset2D const& offset2D, + int32_t z_ = 0 ) + : x( offset2D.x ) + , y( offset2D.y ) + , z( z_ ) + {} + + Offset3D( VkOffset3D const & rhs ) + { + memcpy( this, &rhs, sizeof( Offset3D ) ); + } + + Offset3D& operator=( VkOffset3D const & rhs ) + { + memcpy( this, &rhs, sizeof( Offset3D ) ); + return *this; + } + Offset3D& setX( int32_t x_ ) + { + x = x_; + return *this; + } + + Offset3D& setY( int32_t y_ ) + { + y = y_; + return *this; + } + + Offset3D& setZ( int32_t z_ ) + { + z = z_; + return *this; + } + + operator VkOffset3D const&() const + { + return *reinterpret_cast(this); + } + + operator VkOffset3D &() + { + return *reinterpret_cast(this); + } + + bool operator==( Offset3D const& rhs ) const + { + return ( x == rhs.x ) + && ( y == rhs.y ) + && ( z == rhs.z ); + } + + bool operator!=( Offset3D const& rhs ) const + { + return !operator==( rhs ); + } + + int32_t x; + int32_t y; + int32_t z; + }; + static_assert( sizeof( Offset3D ) == sizeof( VkOffset3D ), "struct and wrapper have different size!" ); + + struct Extent2D + { + Extent2D( uint32_t width_ = 0, + uint32_t height_ = 0 ) + : width( width_ ) + , height( height_ ) + { + } + + Extent2D( VkExtent2D const & rhs ) + { + memcpy( this, &rhs, sizeof( Extent2D ) ); + } + + Extent2D& operator=( VkExtent2D const & rhs ) + { + memcpy( this, &rhs, sizeof( Extent2D ) ); + return *this; + } + Extent2D& setWidth( uint32_t width_ ) + { + width = width_; + return *this; + } + + Extent2D& setHeight( uint32_t height_ ) + { + height = height_; + return *this; + } + + operator VkExtent2D const&() const + { + return *reinterpret_cast(this); + } + + operator VkExtent2D &() + { + return *reinterpret_cast(this); + } + + bool operator==( Extent2D const& rhs ) const + { + return ( width == rhs.width ) + && ( height == rhs.height ); + } + + bool operator!=( Extent2D const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t width; + uint32_t height; + }; + static_assert( sizeof( Extent2D ) == sizeof( VkExtent2D ), "struct and wrapper have different size!" ); + + struct Extent3D + { + Extent3D( uint32_t width_ = 0, + uint32_t height_ = 0, + uint32_t depth_ = 0 ) + : width( width_ ) + , height( height_ ) + , depth( depth_ ) + { + } + + explicit Extent3D( Extent2D const& extent2D, + uint32_t depth_ = 0 ) + : width( extent2D.width ) + , height( extent2D.height ) + , depth( depth_ ) + {} + + Extent3D( VkExtent3D const & rhs ) + { + memcpy( this, &rhs, sizeof( Extent3D ) ); + } + + Extent3D& operator=( VkExtent3D const & rhs ) + { + memcpy( this, &rhs, sizeof( Extent3D ) ); + return *this; + } + Extent3D& setWidth( uint32_t width_ ) + { + width = width_; + return *this; + } + + Extent3D& setHeight( uint32_t height_ ) + { + height = height_; + return *this; + } + + Extent3D& setDepth( uint32_t depth_ ) + { + depth = depth_; + return *this; + } + + operator VkExtent3D const&() const + { + return *reinterpret_cast(this); + } + + operator VkExtent3D &() + { + return *reinterpret_cast(this); + } + + bool operator==( Extent3D const& rhs ) const + { + return ( width == rhs.width ) + && ( height == rhs.height ) + && ( depth == rhs.depth ); + } + + bool operator!=( Extent3D const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t width; + uint32_t height; + uint32_t depth; + }; + static_assert( sizeof( Extent3D ) == sizeof( VkExtent3D ), "struct and wrapper have different size!" ); + + struct Viewport + { + Viewport( float x_ = 0, + float y_ = 0, + float width_ = 0, + float height_ = 0, + float minDepth_ = 0, + float maxDepth_ = 0 ) + : x( x_ ) + , y( y_ ) + , width( width_ ) + , height( height_ ) + , minDepth( minDepth_ ) + , maxDepth( maxDepth_ ) + { + } + + Viewport( VkViewport const & rhs ) + { + memcpy( this, &rhs, sizeof( Viewport ) ); + } + + Viewport& operator=( VkViewport const & rhs ) + { + memcpy( this, &rhs, sizeof( Viewport ) ); + return *this; + } + Viewport& setX( float x_ ) + { + x = x_; + return *this; + } + + Viewport& setY( float y_ ) + { + y = y_; + return *this; + } + + Viewport& setWidth( float width_ ) + { + width = width_; + return *this; + } + + Viewport& setHeight( float height_ ) + { + height = height_; + return *this; + } + + Viewport& setMinDepth( float minDepth_ ) + { + minDepth = minDepth_; + return *this; + } + + Viewport& setMaxDepth( float maxDepth_ ) + { + maxDepth = maxDepth_; + return *this; + } + + operator VkViewport const&() const + { + return *reinterpret_cast(this); + } + + operator VkViewport &() + { + return *reinterpret_cast(this); + } + + bool operator==( Viewport const& rhs ) const + { + return ( x == rhs.x ) + && ( y == rhs.y ) + && ( width == rhs.width ) + && ( height == rhs.height ) + && ( minDepth == rhs.minDepth ) + && ( maxDepth == rhs.maxDepth ); + } + + bool operator!=( Viewport const& rhs ) const + { + return !operator==( rhs ); + } + + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; + }; + static_assert( sizeof( Viewport ) == sizeof( VkViewport ), "struct and wrapper have different size!" ); + + struct Rect2D + { + Rect2D( Offset2D offset_ = Offset2D(), + Extent2D extent_ = Extent2D() ) + : offset( offset_ ) + , extent( extent_ ) + { + } + + Rect2D( VkRect2D const & rhs ) + { + memcpy( this, &rhs, sizeof( Rect2D ) ); + } + + Rect2D& operator=( VkRect2D const & rhs ) + { + memcpy( this, &rhs, sizeof( Rect2D ) ); + return *this; + } + Rect2D& setOffset( Offset2D offset_ ) + { + offset = offset_; + return *this; + } + + Rect2D& setExtent( Extent2D extent_ ) + { + extent = extent_; + return *this; + } + + operator VkRect2D const&() const + { + return *reinterpret_cast(this); + } + + operator VkRect2D &() + { + return *reinterpret_cast(this); + } + + bool operator==( Rect2D const& rhs ) const + { + return ( offset == rhs.offset ) + && ( extent == rhs.extent ); + } + + bool operator!=( Rect2D const& rhs ) const + { + return !operator==( rhs ); + } + + Offset2D offset; + Extent2D extent; + }; + static_assert( sizeof( Rect2D ) == sizeof( VkRect2D ), "struct and wrapper have different size!" ); + + struct ClearRect + { + ClearRect( Rect2D rect_ = Rect2D(), + uint32_t baseArrayLayer_ = 0, + uint32_t layerCount_ = 0 ) + : rect( rect_ ) + , baseArrayLayer( baseArrayLayer_ ) + , layerCount( layerCount_ ) + { + } + + ClearRect( VkClearRect const & rhs ) + { + memcpy( this, &rhs, sizeof( ClearRect ) ); + } + + ClearRect& operator=( VkClearRect const & rhs ) + { + memcpy( this, &rhs, sizeof( ClearRect ) ); + return *this; + } + ClearRect& setRect( Rect2D rect_ ) + { + rect = rect_; + return *this; + } + + ClearRect& setBaseArrayLayer( uint32_t baseArrayLayer_ ) + { + baseArrayLayer = baseArrayLayer_; + return *this; + } + + ClearRect& setLayerCount( uint32_t layerCount_ ) + { + layerCount = layerCount_; + return *this; + } + + operator VkClearRect const&() const + { + return *reinterpret_cast(this); + } + + operator VkClearRect &() + { + return *reinterpret_cast(this); + } + + bool operator==( ClearRect const& rhs ) const + { + return ( rect == rhs.rect ) + && ( baseArrayLayer == rhs.baseArrayLayer ) + && ( layerCount == rhs.layerCount ); + } + + bool operator!=( ClearRect const& rhs ) const + { + return !operator==( rhs ); + } + + Rect2D rect; + uint32_t baseArrayLayer; + uint32_t layerCount; + }; + static_assert( sizeof( ClearRect ) == sizeof( VkClearRect ), "struct and wrapper have different size!" ); + + struct ExtensionProperties + { + operator VkExtensionProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkExtensionProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExtensionProperties const& rhs ) const + { + return ( memcmp( extensionName, rhs.extensionName, VK_MAX_EXTENSION_NAME_SIZE * sizeof( char ) ) == 0 ) + && ( specVersion == rhs.specVersion ); + } + + bool operator!=( ExtensionProperties const& rhs ) const + { + return !operator==( rhs ); + } + + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + }; + static_assert( sizeof( ExtensionProperties ) == sizeof( VkExtensionProperties ), "struct and wrapper have different size!" ); + + struct LayerProperties + { + operator VkLayerProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkLayerProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( LayerProperties const& rhs ) const + { + return ( memcmp( layerName, rhs.layerName, VK_MAX_EXTENSION_NAME_SIZE * sizeof( char ) ) == 0 ) + && ( specVersion == rhs.specVersion ) + && ( implementationVersion == rhs.implementationVersion ) + && ( memcmp( description, rhs.description, VK_MAX_DESCRIPTION_SIZE * sizeof( char ) ) == 0 ); + } + + bool operator!=( LayerProperties const& rhs ) const + { + return !operator==( rhs ); + } + + char layerName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + uint32_t implementationVersion; + char description[VK_MAX_DESCRIPTION_SIZE]; + }; + static_assert( sizeof( LayerProperties ) == sizeof( VkLayerProperties ), "struct and wrapper have different size!" ); + + struct AllocationCallbacks + { + AllocationCallbacks( void* pUserData_ = nullptr, + PFN_vkAllocationFunction pfnAllocation_ = nullptr, + PFN_vkReallocationFunction pfnReallocation_ = nullptr, + PFN_vkFreeFunction pfnFree_ = nullptr, + PFN_vkInternalAllocationNotification pfnInternalAllocation_ = nullptr, + PFN_vkInternalFreeNotification pfnInternalFree_ = nullptr ) + : pUserData( pUserData_ ) + , pfnAllocation( pfnAllocation_ ) + , pfnReallocation( pfnReallocation_ ) + , pfnFree( pfnFree_ ) + , pfnInternalAllocation( pfnInternalAllocation_ ) + , pfnInternalFree( pfnInternalFree_ ) + { + } + + AllocationCallbacks( VkAllocationCallbacks const & rhs ) + { + memcpy( this, &rhs, sizeof( AllocationCallbacks ) ); + } + + AllocationCallbacks& operator=( VkAllocationCallbacks const & rhs ) + { + memcpy( this, &rhs, sizeof( AllocationCallbacks ) ); + return *this; + } + AllocationCallbacks& setPUserData( void* pUserData_ ) + { + pUserData = pUserData_; + return *this; + } + + AllocationCallbacks& setPfnAllocation( PFN_vkAllocationFunction pfnAllocation_ ) + { + pfnAllocation = pfnAllocation_; + return *this; + } + + AllocationCallbacks& setPfnReallocation( PFN_vkReallocationFunction pfnReallocation_ ) + { + pfnReallocation = pfnReallocation_; + return *this; + } + + AllocationCallbacks& setPfnFree( PFN_vkFreeFunction pfnFree_ ) + { + pfnFree = pfnFree_; + return *this; + } + + AllocationCallbacks& setPfnInternalAllocation( PFN_vkInternalAllocationNotification pfnInternalAllocation_ ) + { + pfnInternalAllocation = pfnInternalAllocation_; + return *this; + } + + AllocationCallbacks& setPfnInternalFree( PFN_vkInternalFreeNotification pfnInternalFree_ ) + { + pfnInternalFree = pfnInternalFree_; + return *this; + } + + operator VkAllocationCallbacks const&() const + { + return *reinterpret_cast(this); + } + + operator VkAllocationCallbacks &() + { + return *reinterpret_cast(this); + } + + bool operator==( AllocationCallbacks const& rhs ) const + { + return ( pUserData == rhs.pUserData ) + && ( pfnAllocation == rhs.pfnAllocation ) + && ( pfnReallocation == rhs.pfnReallocation ) + && ( pfnFree == rhs.pfnFree ) + && ( pfnInternalAllocation == rhs.pfnInternalAllocation ) + && ( pfnInternalFree == rhs.pfnInternalFree ); + } + + bool operator!=( AllocationCallbacks const& rhs ) const + { + return !operator==( rhs ); + } + + void* pUserData; + PFN_vkAllocationFunction pfnAllocation; + PFN_vkReallocationFunction pfnReallocation; + PFN_vkFreeFunction pfnFree; + PFN_vkInternalAllocationNotification pfnInternalAllocation; + PFN_vkInternalFreeNotification pfnInternalFree; + }; + static_assert( sizeof( AllocationCallbacks ) == sizeof( VkAllocationCallbacks ), "struct and wrapper have different size!" ); + + struct MemoryRequirements + { + operator VkMemoryRequirements const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryRequirements &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryRequirements const& rhs ) const + { + return ( size == rhs.size ) + && ( alignment == rhs.alignment ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( MemoryRequirements const& rhs ) const + { + return !operator==( rhs ); + } + + DeviceSize size; + DeviceSize alignment; + uint32_t memoryTypeBits; + }; + static_assert( sizeof( MemoryRequirements ) == sizeof( VkMemoryRequirements ), "struct and wrapper have different size!" ); + + struct DescriptorBufferInfo + { + DescriptorBufferInfo( Buffer buffer_ = Buffer(), + DeviceSize offset_ = 0, + DeviceSize range_ = 0 ) + : buffer( buffer_ ) + , offset( offset_ ) + , range( range_ ) + { + } + + DescriptorBufferInfo( VkDescriptorBufferInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorBufferInfo ) ); + } + + DescriptorBufferInfo& operator=( VkDescriptorBufferInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorBufferInfo ) ); + return *this; + } + DescriptorBufferInfo& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + DescriptorBufferInfo& setOffset( DeviceSize offset_ ) + { + offset = offset_; + return *this; + } + + DescriptorBufferInfo& setRange( DeviceSize range_ ) + { + range = range_; + return *this; + } + + operator VkDescriptorBufferInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorBufferInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorBufferInfo const& rhs ) const + { + return ( buffer == rhs.buffer ) + && ( offset == rhs.offset ) + && ( range == rhs.range ); + } + + bool operator!=( DescriptorBufferInfo const& rhs ) const + { + return !operator==( rhs ); + } + + Buffer buffer; + DeviceSize offset; + DeviceSize range; + }; + static_assert( sizeof( DescriptorBufferInfo ) == sizeof( VkDescriptorBufferInfo ), "struct and wrapper have different size!" ); + + struct SubresourceLayout + { + operator VkSubresourceLayout const&() const + { + return *reinterpret_cast(this); + } + + operator VkSubresourceLayout &() + { + return *reinterpret_cast(this); + } + + bool operator==( SubresourceLayout const& rhs ) const + { + return ( offset == rhs.offset ) + && ( size == rhs.size ) + && ( rowPitch == rhs.rowPitch ) + && ( arrayPitch == rhs.arrayPitch ) + && ( depthPitch == rhs.depthPitch ); + } + + bool operator!=( SubresourceLayout const& rhs ) const + { + return !operator==( rhs ); + } + + DeviceSize offset; + DeviceSize size; + DeviceSize rowPitch; + DeviceSize arrayPitch; + DeviceSize depthPitch; + }; + static_assert( sizeof( SubresourceLayout ) == sizeof( VkSubresourceLayout ), "struct and wrapper have different size!" ); + + struct BufferCopy + { + BufferCopy( DeviceSize srcOffset_ = 0, + DeviceSize dstOffset_ = 0, + DeviceSize size_ = 0 ) + : srcOffset( srcOffset_ ) + , dstOffset( dstOffset_ ) + , size( size_ ) + { + } + + BufferCopy( VkBufferCopy const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferCopy ) ); + } + + BufferCopy& operator=( VkBufferCopy const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferCopy ) ); + return *this; + } + BufferCopy& setSrcOffset( DeviceSize srcOffset_ ) + { + srcOffset = srcOffset_; + return *this; + } + + BufferCopy& setDstOffset( DeviceSize dstOffset_ ) + { + dstOffset = dstOffset_; + return *this; + } + + BufferCopy& setSize( DeviceSize size_ ) + { + size = size_; + return *this; + } + + operator VkBufferCopy const&() const + { + return *reinterpret_cast(this); + } + + operator VkBufferCopy &() + { + return *reinterpret_cast(this); + } + + bool operator==( BufferCopy const& rhs ) const + { + return ( srcOffset == rhs.srcOffset ) + && ( dstOffset == rhs.dstOffset ) + && ( size == rhs.size ); + } + + bool operator!=( BufferCopy const& rhs ) const + { + return !operator==( rhs ); + } + + DeviceSize srcOffset; + DeviceSize dstOffset; + DeviceSize size; + }; + static_assert( sizeof( BufferCopy ) == sizeof( VkBufferCopy ), "struct and wrapper have different size!" ); + + struct SpecializationMapEntry + { + SpecializationMapEntry( uint32_t constantID_ = 0, + uint32_t offset_ = 0, + size_t size_ = 0 ) + : constantID( constantID_ ) + , offset( offset_ ) + , size( size_ ) + { + } + + SpecializationMapEntry( VkSpecializationMapEntry const & rhs ) + { + memcpy( this, &rhs, sizeof( SpecializationMapEntry ) ); + } + + SpecializationMapEntry& operator=( VkSpecializationMapEntry const & rhs ) + { + memcpy( this, &rhs, sizeof( SpecializationMapEntry ) ); + return *this; + } + SpecializationMapEntry& setConstantID( uint32_t constantID_ ) + { + constantID = constantID_; + return *this; + } + + SpecializationMapEntry& setOffset( uint32_t offset_ ) + { + offset = offset_; + return *this; + } + + SpecializationMapEntry& setSize( size_t size_ ) + { + size = size_; + return *this; + } + + operator VkSpecializationMapEntry const&() const + { + return *reinterpret_cast(this); + } + + operator VkSpecializationMapEntry &() + { + return *reinterpret_cast(this); + } + + bool operator==( SpecializationMapEntry const& rhs ) const + { + return ( constantID == rhs.constantID ) + && ( offset == rhs.offset ) + && ( size == rhs.size ); + } + + bool operator!=( SpecializationMapEntry const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t constantID; + uint32_t offset; + size_t size; + }; + static_assert( sizeof( SpecializationMapEntry ) == sizeof( VkSpecializationMapEntry ), "struct and wrapper have different size!" ); + + struct SpecializationInfo + { + SpecializationInfo( uint32_t mapEntryCount_ = 0, + const SpecializationMapEntry* pMapEntries_ = nullptr, + size_t dataSize_ = 0, + const void* pData_ = nullptr ) + : mapEntryCount( mapEntryCount_ ) + , pMapEntries( pMapEntries_ ) + , dataSize( dataSize_ ) + , pData( pData_ ) + { + } + + SpecializationInfo( VkSpecializationInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SpecializationInfo ) ); + } + + SpecializationInfo& operator=( VkSpecializationInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SpecializationInfo ) ); + return *this; + } + SpecializationInfo& setMapEntryCount( uint32_t mapEntryCount_ ) + { + mapEntryCount = mapEntryCount_; + return *this; + } + + SpecializationInfo& setPMapEntries( const SpecializationMapEntry* pMapEntries_ ) + { + pMapEntries = pMapEntries_; + return *this; + } + + SpecializationInfo& setDataSize( size_t dataSize_ ) + { + dataSize = dataSize_; + return *this; + } + + SpecializationInfo& setPData( const void* pData_ ) + { + pData = pData_; + return *this; + } + + operator VkSpecializationInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkSpecializationInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( SpecializationInfo const& rhs ) const + { + return ( mapEntryCount == rhs.mapEntryCount ) + && ( pMapEntries == rhs.pMapEntries ) + && ( dataSize == rhs.dataSize ) + && ( pData == rhs.pData ); + } + + bool operator!=( SpecializationInfo const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t mapEntryCount; + const SpecializationMapEntry* pMapEntries; + size_t dataSize; + const void* pData; + }; + static_assert( sizeof( SpecializationInfo ) == sizeof( VkSpecializationInfo ), "struct and wrapper have different size!" ); + + union ClearColorValue + { + ClearColorValue( const std::array& float32_ = { {0} } ) + { + memcpy( &float32, float32_.data(), 4 * sizeof( float ) ); + } + + ClearColorValue( const std::array& int32_ ) + { + memcpy( &int32, int32_.data(), 4 * sizeof( int32_t ) ); + } + + ClearColorValue( const std::array& uint32_ ) + { + memcpy( &uint32, uint32_.data(), 4 * sizeof( uint32_t ) ); + } + + ClearColorValue& setFloat32( std::array float32_ ) + { + memcpy( &float32, float32_.data(), 4 * sizeof( float ) ); + return *this; + } + + ClearColorValue& setInt32( std::array int32_ ) + { + memcpy( &int32, int32_.data(), 4 * sizeof( int32_t ) ); + return *this; + } + + ClearColorValue& setUint32( std::array uint32_ ) + { + memcpy( &uint32, uint32_.data(), 4 * sizeof( uint32_t ) ); + return *this; + } + + operator VkClearColorValue const&() const + { + return *reinterpret_cast(this); + } + + operator VkClearColorValue &() + { + return *reinterpret_cast(this); + } + + float float32[4]; + int32_t int32[4]; + uint32_t uint32[4]; + }; + + struct ClearDepthStencilValue + { + ClearDepthStencilValue( float depth_ = 0, + uint32_t stencil_ = 0 ) + : depth( depth_ ) + , stencil( stencil_ ) + { + } + + ClearDepthStencilValue( VkClearDepthStencilValue const & rhs ) + { + memcpy( this, &rhs, sizeof( ClearDepthStencilValue ) ); + } + + ClearDepthStencilValue& operator=( VkClearDepthStencilValue const & rhs ) + { + memcpy( this, &rhs, sizeof( ClearDepthStencilValue ) ); + return *this; + } + ClearDepthStencilValue& setDepth( float depth_ ) + { + depth = depth_; + return *this; + } + + ClearDepthStencilValue& setStencil( uint32_t stencil_ ) + { + stencil = stencil_; + return *this; + } + + operator VkClearDepthStencilValue const&() const + { + return *reinterpret_cast(this); + } + + operator VkClearDepthStencilValue &() + { + return *reinterpret_cast(this); + } + + bool operator==( ClearDepthStencilValue const& rhs ) const + { + return ( depth == rhs.depth ) + && ( stencil == rhs.stencil ); + } + + bool operator!=( ClearDepthStencilValue const& rhs ) const + { + return !operator==( rhs ); + } + + float depth; + uint32_t stencil; + }; + static_assert( sizeof( ClearDepthStencilValue ) == sizeof( VkClearDepthStencilValue ), "struct and wrapper have different size!" ); + + union ClearValue + { + ClearValue( ClearColorValue color_ = ClearColorValue() ) + { + color = color_; + } + + ClearValue( ClearDepthStencilValue depthStencil_ ) + { + depthStencil = depthStencil_; + } + + ClearValue& setColor( ClearColorValue color_ ) + { + color = color_; + return *this; + } + + ClearValue& setDepthStencil( ClearDepthStencilValue depthStencil_ ) + { + depthStencil = depthStencil_; + return *this; + } + + operator VkClearValue const&() const + { + return *reinterpret_cast(this); + } + + operator VkClearValue &() + { + return *reinterpret_cast(this); + } + +#ifdef VULKAN_HPP_HAS_UNRESTRICTED_UNIONS + ClearColorValue color; + ClearDepthStencilValue depthStencil; +#else + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +#endif // VULKAN_HPP_HAS_UNRESTRICTED_UNIONS + }; + + struct PhysicalDeviceFeatures + { + PhysicalDeviceFeatures( Bool32 robustBufferAccess_ = 0, + Bool32 fullDrawIndexUint32_ = 0, + Bool32 imageCubeArray_ = 0, + Bool32 independentBlend_ = 0, + Bool32 geometryShader_ = 0, + Bool32 tessellationShader_ = 0, + Bool32 sampleRateShading_ = 0, + Bool32 dualSrcBlend_ = 0, + Bool32 logicOp_ = 0, + Bool32 multiDrawIndirect_ = 0, + Bool32 drawIndirectFirstInstance_ = 0, + Bool32 depthClamp_ = 0, + Bool32 depthBiasClamp_ = 0, + Bool32 fillModeNonSolid_ = 0, + Bool32 depthBounds_ = 0, + Bool32 wideLines_ = 0, + Bool32 largePoints_ = 0, + Bool32 alphaToOne_ = 0, + Bool32 multiViewport_ = 0, + Bool32 samplerAnisotropy_ = 0, + Bool32 textureCompressionETC2_ = 0, + Bool32 textureCompressionASTC_LDR_ = 0, + Bool32 textureCompressionBC_ = 0, + Bool32 occlusionQueryPrecise_ = 0, + Bool32 pipelineStatisticsQuery_ = 0, + Bool32 vertexPipelineStoresAndAtomics_ = 0, + Bool32 fragmentStoresAndAtomics_ = 0, + Bool32 shaderTessellationAndGeometryPointSize_ = 0, + Bool32 shaderImageGatherExtended_ = 0, + Bool32 shaderStorageImageExtendedFormats_ = 0, + Bool32 shaderStorageImageMultisample_ = 0, + Bool32 shaderStorageImageReadWithoutFormat_ = 0, + Bool32 shaderStorageImageWriteWithoutFormat_ = 0, + Bool32 shaderUniformBufferArrayDynamicIndexing_ = 0, + Bool32 shaderSampledImageArrayDynamicIndexing_ = 0, + Bool32 shaderStorageBufferArrayDynamicIndexing_ = 0, + Bool32 shaderStorageImageArrayDynamicIndexing_ = 0, + Bool32 shaderClipDistance_ = 0, + Bool32 shaderCullDistance_ = 0, + Bool32 shaderFloat64_ = 0, + Bool32 shaderInt64_ = 0, + Bool32 shaderInt16_ = 0, + Bool32 shaderResourceResidency_ = 0, + Bool32 shaderResourceMinLod_ = 0, + Bool32 sparseBinding_ = 0, + Bool32 sparseResidencyBuffer_ = 0, + Bool32 sparseResidencyImage2D_ = 0, + Bool32 sparseResidencyImage3D_ = 0, + Bool32 sparseResidency2Samples_ = 0, + Bool32 sparseResidency4Samples_ = 0, + Bool32 sparseResidency8Samples_ = 0, + Bool32 sparseResidency16Samples_ = 0, + Bool32 sparseResidencyAliased_ = 0, + Bool32 variableMultisampleRate_ = 0, + Bool32 inheritedQueries_ = 0 ) + : robustBufferAccess( robustBufferAccess_ ) + , fullDrawIndexUint32( fullDrawIndexUint32_ ) + , imageCubeArray( imageCubeArray_ ) + , independentBlend( independentBlend_ ) + , geometryShader( geometryShader_ ) + , tessellationShader( tessellationShader_ ) + , sampleRateShading( sampleRateShading_ ) + , dualSrcBlend( dualSrcBlend_ ) + , logicOp( logicOp_ ) + , multiDrawIndirect( multiDrawIndirect_ ) + , drawIndirectFirstInstance( drawIndirectFirstInstance_ ) + , depthClamp( depthClamp_ ) + , depthBiasClamp( depthBiasClamp_ ) + , fillModeNonSolid( fillModeNonSolid_ ) + , depthBounds( depthBounds_ ) + , wideLines( wideLines_ ) + , largePoints( largePoints_ ) + , alphaToOne( alphaToOne_ ) + , multiViewport( multiViewport_ ) + , samplerAnisotropy( samplerAnisotropy_ ) + , textureCompressionETC2( textureCompressionETC2_ ) + , textureCompressionASTC_LDR( textureCompressionASTC_LDR_ ) + , textureCompressionBC( textureCompressionBC_ ) + , occlusionQueryPrecise( occlusionQueryPrecise_ ) + , pipelineStatisticsQuery( pipelineStatisticsQuery_ ) + , vertexPipelineStoresAndAtomics( vertexPipelineStoresAndAtomics_ ) + , fragmentStoresAndAtomics( fragmentStoresAndAtomics_ ) + , shaderTessellationAndGeometryPointSize( shaderTessellationAndGeometryPointSize_ ) + , shaderImageGatherExtended( shaderImageGatherExtended_ ) + , shaderStorageImageExtendedFormats( shaderStorageImageExtendedFormats_ ) + , shaderStorageImageMultisample( shaderStorageImageMultisample_ ) + , shaderStorageImageReadWithoutFormat( shaderStorageImageReadWithoutFormat_ ) + , shaderStorageImageWriteWithoutFormat( shaderStorageImageWriteWithoutFormat_ ) + , shaderUniformBufferArrayDynamicIndexing( shaderUniformBufferArrayDynamicIndexing_ ) + , shaderSampledImageArrayDynamicIndexing( shaderSampledImageArrayDynamicIndexing_ ) + , shaderStorageBufferArrayDynamicIndexing( shaderStorageBufferArrayDynamicIndexing_ ) + , shaderStorageImageArrayDynamicIndexing( shaderStorageImageArrayDynamicIndexing_ ) + , shaderClipDistance( shaderClipDistance_ ) + , shaderCullDistance( shaderCullDistance_ ) + , shaderFloat64( shaderFloat64_ ) + , shaderInt64( shaderInt64_ ) + , shaderInt16( shaderInt16_ ) + , shaderResourceResidency( shaderResourceResidency_ ) + , shaderResourceMinLod( shaderResourceMinLod_ ) + , sparseBinding( sparseBinding_ ) + , sparseResidencyBuffer( sparseResidencyBuffer_ ) + , sparseResidencyImage2D( sparseResidencyImage2D_ ) + , sparseResidencyImage3D( sparseResidencyImage3D_ ) + , sparseResidency2Samples( sparseResidency2Samples_ ) + , sparseResidency4Samples( sparseResidency4Samples_ ) + , sparseResidency8Samples( sparseResidency8Samples_ ) + , sparseResidency16Samples( sparseResidency16Samples_ ) + , sparseResidencyAliased( sparseResidencyAliased_ ) + , variableMultisampleRate( variableMultisampleRate_ ) + , inheritedQueries( inheritedQueries_ ) + { + } + + PhysicalDeviceFeatures( VkPhysicalDeviceFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceFeatures ) ); + } + + PhysicalDeviceFeatures& operator=( VkPhysicalDeviceFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceFeatures ) ); + return *this; + } + PhysicalDeviceFeatures& setRobustBufferAccess( Bool32 robustBufferAccess_ ) + { + robustBufferAccess = robustBufferAccess_; + return *this; + } + + PhysicalDeviceFeatures& setFullDrawIndexUint32( Bool32 fullDrawIndexUint32_ ) + { + fullDrawIndexUint32 = fullDrawIndexUint32_; + return *this; + } + + PhysicalDeviceFeatures& setImageCubeArray( Bool32 imageCubeArray_ ) + { + imageCubeArray = imageCubeArray_; + return *this; + } + + PhysicalDeviceFeatures& setIndependentBlend( Bool32 independentBlend_ ) + { + independentBlend = independentBlend_; + return *this; + } + + PhysicalDeviceFeatures& setGeometryShader( Bool32 geometryShader_ ) + { + geometryShader = geometryShader_; + return *this; + } + + PhysicalDeviceFeatures& setTessellationShader( Bool32 tessellationShader_ ) + { + tessellationShader = tessellationShader_; + return *this; + } + + PhysicalDeviceFeatures& setSampleRateShading( Bool32 sampleRateShading_ ) + { + sampleRateShading = sampleRateShading_; + return *this; + } + + PhysicalDeviceFeatures& setDualSrcBlend( Bool32 dualSrcBlend_ ) + { + dualSrcBlend = dualSrcBlend_; + return *this; + } + + PhysicalDeviceFeatures& setLogicOp( Bool32 logicOp_ ) + { + logicOp = logicOp_; + return *this; + } + + PhysicalDeviceFeatures& setMultiDrawIndirect( Bool32 multiDrawIndirect_ ) + { + multiDrawIndirect = multiDrawIndirect_; + return *this; + } + + PhysicalDeviceFeatures& setDrawIndirectFirstInstance( Bool32 drawIndirectFirstInstance_ ) + { + drawIndirectFirstInstance = drawIndirectFirstInstance_; + return *this; + } + + PhysicalDeviceFeatures& setDepthClamp( Bool32 depthClamp_ ) + { + depthClamp = depthClamp_; + return *this; + } + + PhysicalDeviceFeatures& setDepthBiasClamp( Bool32 depthBiasClamp_ ) + { + depthBiasClamp = depthBiasClamp_; + return *this; + } + + PhysicalDeviceFeatures& setFillModeNonSolid( Bool32 fillModeNonSolid_ ) + { + fillModeNonSolid = fillModeNonSolid_; + return *this; + } + + PhysicalDeviceFeatures& setDepthBounds( Bool32 depthBounds_ ) + { + depthBounds = depthBounds_; + return *this; + } + + PhysicalDeviceFeatures& setWideLines( Bool32 wideLines_ ) + { + wideLines = wideLines_; + return *this; + } + + PhysicalDeviceFeatures& setLargePoints( Bool32 largePoints_ ) + { + largePoints = largePoints_; + return *this; + } + + PhysicalDeviceFeatures& setAlphaToOne( Bool32 alphaToOne_ ) + { + alphaToOne = alphaToOne_; + return *this; + } + + PhysicalDeviceFeatures& setMultiViewport( Bool32 multiViewport_ ) + { + multiViewport = multiViewport_; + return *this; + } + + PhysicalDeviceFeatures& setSamplerAnisotropy( Bool32 samplerAnisotropy_ ) + { + samplerAnisotropy = samplerAnisotropy_; + return *this; + } + + PhysicalDeviceFeatures& setTextureCompressionETC2( Bool32 textureCompressionETC2_ ) + { + textureCompressionETC2 = textureCompressionETC2_; + return *this; + } + + PhysicalDeviceFeatures& setTextureCompressionASTC_LDR( Bool32 textureCompressionASTC_LDR_ ) + { + textureCompressionASTC_LDR = textureCompressionASTC_LDR_; + return *this; + } + + PhysicalDeviceFeatures& setTextureCompressionBC( Bool32 textureCompressionBC_ ) + { + textureCompressionBC = textureCompressionBC_; + return *this; + } + + PhysicalDeviceFeatures& setOcclusionQueryPrecise( Bool32 occlusionQueryPrecise_ ) + { + occlusionQueryPrecise = occlusionQueryPrecise_; + return *this; + } + + PhysicalDeviceFeatures& setPipelineStatisticsQuery( Bool32 pipelineStatisticsQuery_ ) + { + pipelineStatisticsQuery = pipelineStatisticsQuery_; + return *this; + } + + PhysicalDeviceFeatures& setVertexPipelineStoresAndAtomics( Bool32 vertexPipelineStoresAndAtomics_ ) + { + vertexPipelineStoresAndAtomics = vertexPipelineStoresAndAtomics_; + return *this; + } + + PhysicalDeviceFeatures& setFragmentStoresAndAtomics( Bool32 fragmentStoresAndAtomics_ ) + { + fragmentStoresAndAtomics = fragmentStoresAndAtomics_; + return *this; + } + + PhysicalDeviceFeatures& setShaderTessellationAndGeometryPointSize( Bool32 shaderTessellationAndGeometryPointSize_ ) + { + shaderTessellationAndGeometryPointSize = shaderTessellationAndGeometryPointSize_; + return *this; + } + + PhysicalDeviceFeatures& setShaderImageGatherExtended( Bool32 shaderImageGatherExtended_ ) + { + shaderImageGatherExtended = shaderImageGatherExtended_; + return *this; + } + + PhysicalDeviceFeatures& setShaderStorageImageExtendedFormats( Bool32 shaderStorageImageExtendedFormats_ ) + { + shaderStorageImageExtendedFormats = shaderStorageImageExtendedFormats_; + return *this; + } + + PhysicalDeviceFeatures& setShaderStorageImageMultisample( Bool32 shaderStorageImageMultisample_ ) + { + shaderStorageImageMultisample = shaderStorageImageMultisample_; + return *this; + } + + PhysicalDeviceFeatures& setShaderStorageImageReadWithoutFormat( Bool32 shaderStorageImageReadWithoutFormat_ ) + { + shaderStorageImageReadWithoutFormat = shaderStorageImageReadWithoutFormat_; + return *this; + } + + PhysicalDeviceFeatures& setShaderStorageImageWriteWithoutFormat( Bool32 shaderStorageImageWriteWithoutFormat_ ) + { + shaderStorageImageWriteWithoutFormat = shaderStorageImageWriteWithoutFormat_; + return *this; + } + + PhysicalDeviceFeatures& setShaderUniformBufferArrayDynamicIndexing( Bool32 shaderUniformBufferArrayDynamicIndexing_ ) + { + shaderUniformBufferArrayDynamicIndexing = shaderUniformBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceFeatures& setShaderSampledImageArrayDynamicIndexing( Bool32 shaderSampledImageArrayDynamicIndexing_ ) + { + shaderSampledImageArrayDynamicIndexing = shaderSampledImageArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceFeatures& setShaderStorageBufferArrayDynamicIndexing( Bool32 shaderStorageBufferArrayDynamicIndexing_ ) + { + shaderStorageBufferArrayDynamicIndexing = shaderStorageBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceFeatures& setShaderStorageImageArrayDynamicIndexing( Bool32 shaderStorageImageArrayDynamicIndexing_ ) + { + shaderStorageImageArrayDynamicIndexing = shaderStorageImageArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceFeatures& setShaderClipDistance( Bool32 shaderClipDistance_ ) + { + shaderClipDistance = shaderClipDistance_; + return *this; + } + + PhysicalDeviceFeatures& setShaderCullDistance( Bool32 shaderCullDistance_ ) + { + shaderCullDistance = shaderCullDistance_; + return *this; + } + + PhysicalDeviceFeatures& setShaderFloat64( Bool32 shaderFloat64_ ) + { + shaderFloat64 = shaderFloat64_; + return *this; + } + + PhysicalDeviceFeatures& setShaderInt64( Bool32 shaderInt64_ ) + { + shaderInt64 = shaderInt64_; + return *this; + } + + PhysicalDeviceFeatures& setShaderInt16( Bool32 shaderInt16_ ) + { + shaderInt16 = shaderInt16_; + return *this; + } + + PhysicalDeviceFeatures& setShaderResourceResidency( Bool32 shaderResourceResidency_ ) + { + shaderResourceResidency = shaderResourceResidency_; + return *this; + } + + PhysicalDeviceFeatures& setShaderResourceMinLod( Bool32 shaderResourceMinLod_ ) + { + shaderResourceMinLod = shaderResourceMinLod_; + return *this; + } + + PhysicalDeviceFeatures& setSparseBinding( Bool32 sparseBinding_ ) + { + sparseBinding = sparseBinding_; + return *this; + } + + PhysicalDeviceFeatures& setSparseResidencyBuffer( Bool32 sparseResidencyBuffer_ ) + { + sparseResidencyBuffer = sparseResidencyBuffer_; + return *this; + } + + PhysicalDeviceFeatures& setSparseResidencyImage2D( Bool32 sparseResidencyImage2D_ ) + { + sparseResidencyImage2D = sparseResidencyImage2D_; + return *this; + } + + PhysicalDeviceFeatures& setSparseResidencyImage3D( Bool32 sparseResidencyImage3D_ ) + { + sparseResidencyImage3D = sparseResidencyImage3D_; + return *this; + } + + PhysicalDeviceFeatures& setSparseResidency2Samples( Bool32 sparseResidency2Samples_ ) + { + sparseResidency2Samples = sparseResidency2Samples_; + return *this; + } + + PhysicalDeviceFeatures& setSparseResidency4Samples( Bool32 sparseResidency4Samples_ ) + { + sparseResidency4Samples = sparseResidency4Samples_; + return *this; + } + + PhysicalDeviceFeatures& setSparseResidency8Samples( Bool32 sparseResidency8Samples_ ) + { + sparseResidency8Samples = sparseResidency8Samples_; + return *this; + } + + PhysicalDeviceFeatures& setSparseResidency16Samples( Bool32 sparseResidency16Samples_ ) + { + sparseResidency16Samples = sparseResidency16Samples_; + return *this; + } + + PhysicalDeviceFeatures& setSparseResidencyAliased( Bool32 sparseResidencyAliased_ ) + { + sparseResidencyAliased = sparseResidencyAliased_; + return *this; + } + + PhysicalDeviceFeatures& setVariableMultisampleRate( Bool32 variableMultisampleRate_ ) + { + variableMultisampleRate = variableMultisampleRate_; + return *this; + } + + PhysicalDeviceFeatures& setInheritedQueries( Bool32 inheritedQueries_ ) + { + inheritedQueries = inheritedQueries_; + return *this; + } + + operator VkPhysicalDeviceFeatures const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceFeatures &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceFeatures const& rhs ) const + { + return ( robustBufferAccess == rhs.robustBufferAccess ) + && ( fullDrawIndexUint32 == rhs.fullDrawIndexUint32 ) + && ( imageCubeArray == rhs.imageCubeArray ) + && ( independentBlend == rhs.independentBlend ) + && ( geometryShader == rhs.geometryShader ) + && ( tessellationShader == rhs.tessellationShader ) + && ( sampleRateShading == rhs.sampleRateShading ) + && ( dualSrcBlend == rhs.dualSrcBlend ) + && ( logicOp == rhs.logicOp ) + && ( multiDrawIndirect == rhs.multiDrawIndirect ) + && ( drawIndirectFirstInstance == rhs.drawIndirectFirstInstance ) + && ( depthClamp == rhs.depthClamp ) + && ( depthBiasClamp == rhs.depthBiasClamp ) + && ( fillModeNonSolid == rhs.fillModeNonSolid ) + && ( depthBounds == rhs.depthBounds ) + && ( wideLines == rhs.wideLines ) + && ( largePoints == rhs.largePoints ) + && ( alphaToOne == rhs.alphaToOne ) + && ( multiViewport == rhs.multiViewport ) + && ( samplerAnisotropy == rhs.samplerAnisotropy ) + && ( textureCompressionETC2 == rhs.textureCompressionETC2 ) + && ( textureCompressionASTC_LDR == rhs.textureCompressionASTC_LDR ) + && ( textureCompressionBC == rhs.textureCompressionBC ) + && ( occlusionQueryPrecise == rhs.occlusionQueryPrecise ) + && ( pipelineStatisticsQuery == rhs.pipelineStatisticsQuery ) + && ( vertexPipelineStoresAndAtomics == rhs.vertexPipelineStoresAndAtomics ) + && ( fragmentStoresAndAtomics == rhs.fragmentStoresAndAtomics ) + && ( shaderTessellationAndGeometryPointSize == rhs.shaderTessellationAndGeometryPointSize ) + && ( shaderImageGatherExtended == rhs.shaderImageGatherExtended ) + && ( shaderStorageImageExtendedFormats == rhs.shaderStorageImageExtendedFormats ) + && ( shaderStorageImageMultisample == rhs.shaderStorageImageMultisample ) + && ( shaderStorageImageReadWithoutFormat == rhs.shaderStorageImageReadWithoutFormat ) + && ( shaderStorageImageWriteWithoutFormat == rhs.shaderStorageImageWriteWithoutFormat ) + && ( shaderUniformBufferArrayDynamicIndexing == rhs.shaderUniformBufferArrayDynamicIndexing ) + && ( shaderSampledImageArrayDynamicIndexing == rhs.shaderSampledImageArrayDynamicIndexing ) + && ( shaderStorageBufferArrayDynamicIndexing == rhs.shaderStorageBufferArrayDynamicIndexing ) + && ( shaderStorageImageArrayDynamicIndexing == rhs.shaderStorageImageArrayDynamicIndexing ) + && ( shaderClipDistance == rhs.shaderClipDistance ) + && ( shaderCullDistance == rhs.shaderCullDistance ) + && ( shaderFloat64 == rhs.shaderFloat64 ) + && ( shaderInt64 == rhs.shaderInt64 ) + && ( shaderInt16 == rhs.shaderInt16 ) + && ( shaderResourceResidency == rhs.shaderResourceResidency ) + && ( shaderResourceMinLod == rhs.shaderResourceMinLod ) + && ( sparseBinding == rhs.sparseBinding ) + && ( sparseResidencyBuffer == rhs.sparseResidencyBuffer ) + && ( sparseResidencyImage2D == rhs.sparseResidencyImage2D ) + && ( sparseResidencyImage3D == rhs.sparseResidencyImage3D ) + && ( sparseResidency2Samples == rhs.sparseResidency2Samples ) + && ( sparseResidency4Samples == rhs.sparseResidency4Samples ) + && ( sparseResidency8Samples == rhs.sparseResidency8Samples ) + && ( sparseResidency16Samples == rhs.sparseResidency16Samples ) + && ( sparseResidencyAliased == rhs.sparseResidencyAliased ) + && ( variableMultisampleRate == rhs.variableMultisampleRate ) + && ( inheritedQueries == rhs.inheritedQueries ); + } + + bool operator!=( PhysicalDeviceFeatures const& rhs ) const + { + return !operator==( rhs ); + } + + Bool32 robustBufferAccess; + Bool32 fullDrawIndexUint32; + Bool32 imageCubeArray; + Bool32 independentBlend; + Bool32 geometryShader; + Bool32 tessellationShader; + Bool32 sampleRateShading; + Bool32 dualSrcBlend; + Bool32 logicOp; + Bool32 multiDrawIndirect; + Bool32 drawIndirectFirstInstance; + Bool32 depthClamp; + Bool32 depthBiasClamp; + Bool32 fillModeNonSolid; + Bool32 depthBounds; + Bool32 wideLines; + Bool32 largePoints; + Bool32 alphaToOne; + Bool32 multiViewport; + Bool32 samplerAnisotropy; + Bool32 textureCompressionETC2; + Bool32 textureCompressionASTC_LDR; + Bool32 textureCompressionBC; + Bool32 occlusionQueryPrecise; + Bool32 pipelineStatisticsQuery; + Bool32 vertexPipelineStoresAndAtomics; + Bool32 fragmentStoresAndAtomics; + Bool32 shaderTessellationAndGeometryPointSize; + Bool32 shaderImageGatherExtended; + Bool32 shaderStorageImageExtendedFormats; + Bool32 shaderStorageImageMultisample; + Bool32 shaderStorageImageReadWithoutFormat; + Bool32 shaderStorageImageWriteWithoutFormat; + Bool32 shaderUniformBufferArrayDynamicIndexing; + Bool32 shaderSampledImageArrayDynamicIndexing; + Bool32 shaderStorageBufferArrayDynamicIndexing; + Bool32 shaderStorageImageArrayDynamicIndexing; + Bool32 shaderClipDistance; + Bool32 shaderCullDistance; + Bool32 shaderFloat64; + Bool32 shaderInt64; + Bool32 shaderInt16; + Bool32 shaderResourceResidency; + Bool32 shaderResourceMinLod; + Bool32 sparseBinding; + Bool32 sparseResidencyBuffer; + Bool32 sparseResidencyImage2D; + Bool32 sparseResidencyImage3D; + Bool32 sparseResidency2Samples; + Bool32 sparseResidency4Samples; + Bool32 sparseResidency8Samples; + Bool32 sparseResidency16Samples; + Bool32 sparseResidencyAliased; + Bool32 variableMultisampleRate; + Bool32 inheritedQueries; + }; + static_assert( sizeof( PhysicalDeviceFeatures ) == sizeof( VkPhysicalDeviceFeatures ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceSparseProperties + { + operator VkPhysicalDeviceSparseProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceSparseProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceSparseProperties const& rhs ) const + { + return ( residencyStandard2DBlockShape == rhs.residencyStandard2DBlockShape ) + && ( residencyStandard2DMultisampleBlockShape == rhs.residencyStandard2DMultisampleBlockShape ) + && ( residencyStandard3DBlockShape == rhs.residencyStandard3DBlockShape ) + && ( residencyAlignedMipSize == rhs.residencyAlignedMipSize ) + && ( residencyNonResidentStrict == rhs.residencyNonResidentStrict ); + } + + bool operator!=( PhysicalDeviceSparseProperties const& rhs ) const + { + return !operator==( rhs ); + } + + Bool32 residencyStandard2DBlockShape; + Bool32 residencyStandard2DMultisampleBlockShape; + Bool32 residencyStandard3DBlockShape; + Bool32 residencyAlignedMipSize; + Bool32 residencyNonResidentStrict; + }; + static_assert( sizeof( PhysicalDeviceSparseProperties ) == sizeof( VkPhysicalDeviceSparseProperties ), "struct and wrapper have different size!" ); + + struct DrawIndirectCommand + { + DrawIndirectCommand( uint32_t vertexCount_ = 0, + uint32_t instanceCount_ = 0, + uint32_t firstVertex_ = 0, + uint32_t firstInstance_ = 0 ) + : vertexCount( vertexCount_ ) + , instanceCount( instanceCount_ ) + , firstVertex( firstVertex_ ) + , firstInstance( firstInstance_ ) + { + } + + DrawIndirectCommand( VkDrawIndirectCommand const & rhs ) + { + memcpy( this, &rhs, sizeof( DrawIndirectCommand ) ); + } + + DrawIndirectCommand& operator=( VkDrawIndirectCommand const & rhs ) + { + memcpy( this, &rhs, sizeof( DrawIndirectCommand ) ); + return *this; + } + DrawIndirectCommand& setVertexCount( uint32_t vertexCount_ ) + { + vertexCount = vertexCount_; + return *this; + } + + DrawIndirectCommand& setInstanceCount( uint32_t instanceCount_ ) + { + instanceCount = instanceCount_; + return *this; + } + + DrawIndirectCommand& setFirstVertex( uint32_t firstVertex_ ) + { + firstVertex = firstVertex_; + return *this; + } + + DrawIndirectCommand& setFirstInstance( uint32_t firstInstance_ ) + { + firstInstance = firstInstance_; + return *this; + } + + operator VkDrawIndirectCommand const&() const + { + return *reinterpret_cast(this); + } + + operator VkDrawIndirectCommand &() + { + return *reinterpret_cast(this); + } + + bool operator==( DrawIndirectCommand const& rhs ) const + { + return ( vertexCount == rhs.vertexCount ) + && ( instanceCount == rhs.instanceCount ) + && ( firstVertex == rhs.firstVertex ) + && ( firstInstance == rhs.firstInstance ); + } + + bool operator!=( DrawIndirectCommand const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t vertexCount; + uint32_t instanceCount; + uint32_t firstVertex; + uint32_t firstInstance; + }; + static_assert( sizeof( DrawIndirectCommand ) == sizeof( VkDrawIndirectCommand ), "struct and wrapper have different size!" ); + + struct DrawIndexedIndirectCommand + { + DrawIndexedIndirectCommand( uint32_t indexCount_ = 0, + uint32_t instanceCount_ = 0, + uint32_t firstIndex_ = 0, + int32_t vertexOffset_ = 0, + uint32_t firstInstance_ = 0 ) + : indexCount( indexCount_ ) + , instanceCount( instanceCount_ ) + , firstIndex( firstIndex_ ) + , vertexOffset( vertexOffset_ ) + , firstInstance( firstInstance_ ) + { + } + + DrawIndexedIndirectCommand( VkDrawIndexedIndirectCommand const & rhs ) + { + memcpy( this, &rhs, sizeof( DrawIndexedIndirectCommand ) ); + } + + DrawIndexedIndirectCommand& operator=( VkDrawIndexedIndirectCommand const & rhs ) + { + memcpy( this, &rhs, sizeof( DrawIndexedIndirectCommand ) ); + return *this; + } + DrawIndexedIndirectCommand& setIndexCount( uint32_t indexCount_ ) + { + indexCount = indexCount_; + return *this; + } + + DrawIndexedIndirectCommand& setInstanceCount( uint32_t instanceCount_ ) + { + instanceCount = instanceCount_; + return *this; + } + + DrawIndexedIndirectCommand& setFirstIndex( uint32_t firstIndex_ ) + { + firstIndex = firstIndex_; + return *this; + } + + DrawIndexedIndirectCommand& setVertexOffset( int32_t vertexOffset_ ) + { + vertexOffset = vertexOffset_; + return *this; + } + + DrawIndexedIndirectCommand& setFirstInstance( uint32_t firstInstance_ ) + { + firstInstance = firstInstance_; + return *this; + } + + operator VkDrawIndexedIndirectCommand const&() const + { + return *reinterpret_cast(this); + } + + operator VkDrawIndexedIndirectCommand &() + { + return *reinterpret_cast(this); + } + + bool operator==( DrawIndexedIndirectCommand const& rhs ) const + { + return ( indexCount == rhs.indexCount ) + && ( instanceCount == rhs.instanceCount ) + && ( firstIndex == rhs.firstIndex ) + && ( vertexOffset == rhs.vertexOffset ) + && ( firstInstance == rhs.firstInstance ); + } + + bool operator!=( DrawIndexedIndirectCommand const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t indexCount; + uint32_t instanceCount; + uint32_t firstIndex; + int32_t vertexOffset; + uint32_t firstInstance; + }; + static_assert( sizeof( DrawIndexedIndirectCommand ) == sizeof( VkDrawIndexedIndirectCommand ), "struct and wrapper have different size!" ); + + struct DispatchIndirectCommand + { + DispatchIndirectCommand( uint32_t x_ = 0, + uint32_t y_ = 0, + uint32_t z_ = 0 ) + : x( x_ ) + , y( y_ ) + , z( z_ ) + { + } + + DispatchIndirectCommand( VkDispatchIndirectCommand const & rhs ) + { + memcpy( this, &rhs, sizeof( DispatchIndirectCommand ) ); + } + + DispatchIndirectCommand& operator=( VkDispatchIndirectCommand const & rhs ) + { + memcpy( this, &rhs, sizeof( DispatchIndirectCommand ) ); + return *this; + } + DispatchIndirectCommand& setX( uint32_t x_ ) + { + x = x_; + return *this; + } + + DispatchIndirectCommand& setY( uint32_t y_ ) + { + y = y_; + return *this; + } + + DispatchIndirectCommand& setZ( uint32_t z_ ) + { + z = z_; + return *this; + } + + operator VkDispatchIndirectCommand const&() const + { + return *reinterpret_cast(this); + } + + operator VkDispatchIndirectCommand &() + { + return *reinterpret_cast(this); + } + + bool operator==( DispatchIndirectCommand const& rhs ) const + { + return ( x == rhs.x ) + && ( y == rhs.y ) + && ( z == rhs.z ); + } + + bool operator!=( DispatchIndirectCommand const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t x; + uint32_t y; + uint32_t z; + }; + static_assert( sizeof( DispatchIndirectCommand ) == sizeof( VkDispatchIndirectCommand ), "struct and wrapper have different size!" ); + + struct DisplayPlanePropertiesKHR + { + operator VkDisplayPlanePropertiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayPlanePropertiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayPlanePropertiesKHR const& rhs ) const + { + return ( currentDisplay == rhs.currentDisplay ) + && ( currentStackIndex == rhs.currentStackIndex ); + } + + bool operator!=( DisplayPlanePropertiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + DisplayKHR currentDisplay; + uint32_t currentStackIndex; + }; + static_assert( sizeof( DisplayPlanePropertiesKHR ) == sizeof( VkDisplayPlanePropertiesKHR ), "struct and wrapper have different size!" ); + + struct DisplayModeParametersKHR + { + DisplayModeParametersKHR( Extent2D visibleRegion_ = Extent2D(), + uint32_t refreshRate_ = 0 ) + : visibleRegion( visibleRegion_ ) + , refreshRate( refreshRate_ ) + { + } + + DisplayModeParametersKHR( VkDisplayModeParametersKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayModeParametersKHR ) ); + } + + DisplayModeParametersKHR& operator=( VkDisplayModeParametersKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayModeParametersKHR ) ); + return *this; + } + DisplayModeParametersKHR& setVisibleRegion( Extent2D visibleRegion_ ) + { + visibleRegion = visibleRegion_; + return *this; + } + + DisplayModeParametersKHR& setRefreshRate( uint32_t refreshRate_ ) + { + refreshRate = refreshRate_; + return *this; + } + + operator VkDisplayModeParametersKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayModeParametersKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayModeParametersKHR const& rhs ) const + { + return ( visibleRegion == rhs.visibleRegion ) + && ( refreshRate == rhs.refreshRate ); + } + + bool operator!=( DisplayModeParametersKHR const& rhs ) const + { + return !operator==( rhs ); + } + + Extent2D visibleRegion; + uint32_t refreshRate; + }; + static_assert( sizeof( DisplayModeParametersKHR ) == sizeof( VkDisplayModeParametersKHR ), "struct and wrapper have different size!" ); + + struct DisplayModePropertiesKHR + { + operator VkDisplayModePropertiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayModePropertiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayModePropertiesKHR const& rhs ) const + { + return ( displayMode == rhs.displayMode ) + && ( parameters == rhs.parameters ); + } + + bool operator!=( DisplayModePropertiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + DisplayModeKHR displayMode; + DisplayModeParametersKHR parameters; + }; + static_assert( sizeof( DisplayModePropertiesKHR ) == sizeof( VkDisplayModePropertiesKHR ), "struct and wrapper have different size!" ); + + struct RectLayerKHR + { + RectLayerKHR( Offset2D offset_ = Offset2D(), + Extent2D extent_ = Extent2D(), + uint32_t layer_ = 0 ) + : offset( offset_ ) + , extent( extent_ ) + , layer( layer_ ) + { + } + + explicit RectLayerKHR( Rect2D const& rect2D, + uint32_t layer_ = 0 ) + : offset( rect2D.offset ) + , extent( rect2D.extent ) + , layer( layer_ ) + {} + + RectLayerKHR( VkRectLayerKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( RectLayerKHR ) ); + } + + RectLayerKHR& operator=( VkRectLayerKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( RectLayerKHR ) ); + return *this; + } + RectLayerKHR& setOffset( Offset2D offset_ ) + { + offset = offset_; + return *this; + } + + RectLayerKHR& setExtent( Extent2D extent_ ) + { + extent = extent_; + return *this; + } + + RectLayerKHR& setLayer( uint32_t layer_ ) + { + layer = layer_; + return *this; + } + + operator VkRectLayerKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkRectLayerKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( RectLayerKHR const& rhs ) const + { + return ( offset == rhs.offset ) + && ( extent == rhs.extent ) + && ( layer == rhs.layer ); + } + + bool operator!=( RectLayerKHR const& rhs ) const + { + return !operator==( rhs ); + } + + Offset2D offset; + Extent2D extent; + uint32_t layer; + }; + static_assert( sizeof( RectLayerKHR ) == sizeof( VkRectLayerKHR ), "struct and wrapper have different size!" ); + + struct PresentRegionKHR + { + PresentRegionKHR( uint32_t rectangleCount_ = 0, + const RectLayerKHR* pRectangles_ = nullptr ) + : rectangleCount( rectangleCount_ ) + , pRectangles( pRectangles_ ) + { + } + + PresentRegionKHR( VkPresentRegionKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentRegionKHR ) ); + } + + PresentRegionKHR& operator=( VkPresentRegionKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentRegionKHR ) ); + return *this; + } + PresentRegionKHR& setRectangleCount( uint32_t rectangleCount_ ) + { + rectangleCount = rectangleCount_; + return *this; + } + + PresentRegionKHR& setPRectangles( const RectLayerKHR* pRectangles_ ) + { + pRectangles = pRectangles_; + return *this; + } + + operator VkPresentRegionKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkPresentRegionKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( PresentRegionKHR const& rhs ) const + { + return ( rectangleCount == rhs.rectangleCount ) + && ( pRectangles == rhs.pRectangles ); + } + + bool operator!=( PresentRegionKHR const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t rectangleCount; + const RectLayerKHR* pRectangles; + }; + static_assert( sizeof( PresentRegionKHR ) == sizeof( VkPresentRegionKHR ), "struct and wrapper have different size!" ); + + struct XYColorEXT + { + XYColorEXT( float x_ = 0, + float y_ = 0 ) + : x( x_ ) + , y( y_ ) + { + } + + XYColorEXT( VkXYColorEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( XYColorEXT ) ); + } + + XYColorEXT& operator=( VkXYColorEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( XYColorEXT ) ); + return *this; + } + XYColorEXT& setX( float x_ ) + { + x = x_; + return *this; + } + + XYColorEXT& setY( float y_ ) + { + y = y_; + return *this; + } + + operator VkXYColorEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkXYColorEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( XYColorEXT const& rhs ) const + { + return ( x == rhs.x ) + && ( y == rhs.y ); + } + + bool operator!=( XYColorEXT const& rhs ) const + { + return !operator==( rhs ); + } + + float x; + float y; + }; + static_assert( sizeof( XYColorEXT ) == sizeof( VkXYColorEXT ), "struct and wrapper have different size!" ); + + struct RefreshCycleDurationGOOGLE + { + operator VkRefreshCycleDurationGOOGLE const&() const + { + return *reinterpret_cast(this); + } + + operator VkRefreshCycleDurationGOOGLE &() + { + return *reinterpret_cast(this); + } + + bool operator==( RefreshCycleDurationGOOGLE const& rhs ) const + { + return ( refreshDuration == rhs.refreshDuration ); + } + + bool operator!=( RefreshCycleDurationGOOGLE const& rhs ) const + { + return !operator==( rhs ); + } + + uint64_t refreshDuration; + }; + static_assert( sizeof( RefreshCycleDurationGOOGLE ) == sizeof( VkRefreshCycleDurationGOOGLE ), "struct and wrapper have different size!" ); + + struct PastPresentationTimingGOOGLE + { + operator VkPastPresentationTimingGOOGLE const&() const + { + return *reinterpret_cast(this); + } + + operator VkPastPresentationTimingGOOGLE &() + { + return *reinterpret_cast(this); + } + + bool operator==( PastPresentationTimingGOOGLE const& rhs ) const + { + return ( presentID == rhs.presentID ) + && ( desiredPresentTime == rhs.desiredPresentTime ) + && ( actualPresentTime == rhs.actualPresentTime ) + && ( earliestPresentTime == rhs.earliestPresentTime ) + && ( presentMargin == rhs.presentMargin ); + } + + bool operator!=( PastPresentationTimingGOOGLE const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t presentID; + uint64_t desiredPresentTime; + uint64_t actualPresentTime; + uint64_t earliestPresentTime; + uint64_t presentMargin; + }; + static_assert( sizeof( PastPresentationTimingGOOGLE ) == sizeof( VkPastPresentationTimingGOOGLE ), "struct and wrapper have different size!" ); + + struct PresentTimeGOOGLE + { + PresentTimeGOOGLE( uint32_t presentID_ = 0, + uint64_t desiredPresentTime_ = 0 ) + : presentID( presentID_ ) + , desiredPresentTime( desiredPresentTime_ ) + { + } + + PresentTimeGOOGLE( VkPresentTimeGOOGLE const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentTimeGOOGLE ) ); + } + + PresentTimeGOOGLE& operator=( VkPresentTimeGOOGLE const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentTimeGOOGLE ) ); + return *this; + } + PresentTimeGOOGLE& setPresentID( uint32_t presentID_ ) + { + presentID = presentID_; + return *this; + } + + PresentTimeGOOGLE& setDesiredPresentTime( uint64_t desiredPresentTime_ ) + { + desiredPresentTime = desiredPresentTime_; + return *this; + } + + operator VkPresentTimeGOOGLE const&() const + { + return *reinterpret_cast(this); + } + + operator VkPresentTimeGOOGLE &() + { + return *reinterpret_cast(this); + } + + bool operator==( PresentTimeGOOGLE const& rhs ) const + { + return ( presentID == rhs.presentID ) + && ( desiredPresentTime == rhs.desiredPresentTime ); + } + + bool operator!=( PresentTimeGOOGLE const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t presentID; + uint64_t desiredPresentTime; + }; + static_assert( sizeof( PresentTimeGOOGLE ) == sizeof( VkPresentTimeGOOGLE ), "struct and wrapper have different size!" ); + + struct ViewportWScalingNV + { + ViewportWScalingNV( float xcoeff_ = 0, + float ycoeff_ = 0 ) + : xcoeff( xcoeff_ ) + , ycoeff( ycoeff_ ) + { + } + + ViewportWScalingNV( VkViewportWScalingNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ViewportWScalingNV ) ); + } + + ViewportWScalingNV& operator=( VkViewportWScalingNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ViewportWScalingNV ) ); + return *this; + } + ViewportWScalingNV& setXcoeff( float xcoeff_ ) + { + xcoeff = xcoeff_; + return *this; + } + + ViewportWScalingNV& setYcoeff( float ycoeff_ ) + { + ycoeff = ycoeff_; + return *this; + } + + operator VkViewportWScalingNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkViewportWScalingNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( ViewportWScalingNV const& rhs ) const + { + return ( xcoeff == rhs.xcoeff ) + && ( ycoeff == rhs.ycoeff ); + } + + bool operator!=( ViewportWScalingNV const& rhs ) const + { + return !operator==( rhs ); + } + + float xcoeff; + float ycoeff; + }; + static_assert( sizeof( ViewportWScalingNV ) == sizeof( VkViewportWScalingNV ), "struct and wrapper have different size!" ); + + struct SampleLocationEXT + { + SampleLocationEXT( float x_ = 0, + float y_ = 0 ) + : x( x_ ) + , y( y_ ) + { + } + + SampleLocationEXT( VkSampleLocationEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SampleLocationEXT ) ); + } + + SampleLocationEXT& operator=( VkSampleLocationEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SampleLocationEXT ) ); + return *this; + } + SampleLocationEXT& setX( float x_ ) + { + x = x_; + return *this; + } + + SampleLocationEXT& setY( float y_ ) + { + y = y_; + return *this; + } + + operator VkSampleLocationEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkSampleLocationEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( SampleLocationEXT const& rhs ) const + { + return ( x == rhs.x ) + && ( y == rhs.y ); + } + + bool operator!=( SampleLocationEXT const& rhs ) const + { + return !operator==( rhs ); + } + + float x; + float y; + }; + static_assert( sizeof( SampleLocationEXT ) == sizeof( VkSampleLocationEXT ), "struct and wrapper have different size!" ); + + struct ShaderResourceUsageAMD + { + operator VkShaderResourceUsageAMD const&() const + { + return *reinterpret_cast(this); + } + + operator VkShaderResourceUsageAMD &() + { + return *reinterpret_cast(this); + } + + bool operator==( ShaderResourceUsageAMD const& rhs ) const + { + return ( numUsedVgprs == rhs.numUsedVgprs ) + && ( numUsedSgprs == rhs.numUsedSgprs ) + && ( ldsSizePerLocalWorkGroup == rhs.ldsSizePerLocalWorkGroup ) + && ( ldsUsageSizeInBytes == rhs.ldsUsageSizeInBytes ) + && ( scratchMemUsageInBytes == rhs.scratchMemUsageInBytes ); + } + + bool operator!=( ShaderResourceUsageAMD const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t numUsedVgprs; + uint32_t numUsedSgprs; + uint32_t ldsSizePerLocalWorkGroup; + size_t ldsUsageSizeInBytes; + size_t scratchMemUsageInBytes; + }; + static_assert( sizeof( ShaderResourceUsageAMD ) == sizeof( VkShaderResourceUsageAMD ), "struct and wrapper have different size!" ); + + struct VertexInputBindingDivisorDescriptionEXT + { + VertexInputBindingDivisorDescriptionEXT( uint32_t binding_ = 0, + uint32_t divisor_ = 0 ) + : binding( binding_ ) + , divisor( divisor_ ) + { + } + + VertexInputBindingDivisorDescriptionEXT( VkVertexInputBindingDivisorDescriptionEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( VertexInputBindingDivisorDescriptionEXT ) ); + } + + VertexInputBindingDivisorDescriptionEXT& operator=( VkVertexInputBindingDivisorDescriptionEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( VertexInputBindingDivisorDescriptionEXT ) ); + return *this; + } + VertexInputBindingDivisorDescriptionEXT& setBinding( uint32_t binding_ ) + { + binding = binding_; + return *this; + } + + VertexInputBindingDivisorDescriptionEXT& setDivisor( uint32_t divisor_ ) + { + divisor = divisor_; + return *this; + } + + operator VkVertexInputBindingDivisorDescriptionEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkVertexInputBindingDivisorDescriptionEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( VertexInputBindingDivisorDescriptionEXT const& rhs ) const + { + return ( binding == rhs.binding ) + && ( divisor == rhs.divisor ); + } + + bool operator!=( VertexInputBindingDivisorDescriptionEXT const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t binding; + uint32_t divisor; + }; + static_assert( sizeof( VertexInputBindingDivisorDescriptionEXT ) == sizeof( VkVertexInputBindingDivisorDescriptionEXT ), "struct and wrapper have different size!" ); + + struct CoarseSampleLocationNV + { + CoarseSampleLocationNV( uint32_t pixelX_ = 0, + uint32_t pixelY_ = 0, + uint32_t sample_ = 0 ) + : pixelX( pixelX_ ) + , pixelY( pixelY_ ) + , sample( sample_ ) + { + } + + CoarseSampleLocationNV( VkCoarseSampleLocationNV const & rhs ) + { + memcpy( this, &rhs, sizeof( CoarseSampleLocationNV ) ); + } + + CoarseSampleLocationNV& operator=( VkCoarseSampleLocationNV const & rhs ) + { + memcpy( this, &rhs, sizeof( CoarseSampleLocationNV ) ); + return *this; + } + CoarseSampleLocationNV& setPixelX( uint32_t pixelX_ ) + { + pixelX = pixelX_; + return *this; + } + + CoarseSampleLocationNV& setPixelY( uint32_t pixelY_ ) + { + pixelY = pixelY_; + return *this; + } + + CoarseSampleLocationNV& setSample( uint32_t sample_ ) + { + sample = sample_; + return *this; + } + + operator VkCoarseSampleLocationNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkCoarseSampleLocationNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( CoarseSampleLocationNV const& rhs ) const + { + return ( pixelX == rhs.pixelX ) + && ( pixelY == rhs.pixelY ) + && ( sample == rhs.sample ); + } + + bool operator!=( CoarseSampleLocationNV const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t pixelX; + uint32_t pixelY; + uint32_t sample; + }; + static_assert( sizeof( CoarseSampleLocationNV ) == sizeof( VkCoarseSampleLocationNV ), "struct and wrapper have different size!" ); + + struct DrawMeshTasksIndirectCommandNV + { + DrawMeshTasksIndirectCommandNV( uint32_t taskCount_ = 0, + uint32_t firstTask_ = 0 ) + : taskCount( taskCount_ ) + , firstTask( firstTask_ ) + { + } + + DrawMeshTasksIndirectCommandNV( VkDrawMeshTasksIndirectCommandNV const & rhs ) + { + memcpy( this, &rhs, sizeof( DrawMeshTasksIndirectCommandNV ) ); + } + + DrawMeshTasksIndirectCommandNV& operator=( VkDrawMeshTasksIndirectCommandNV const & rhs ) + { + memcpy( this, &rhs, sizeof( DrawMeshTasksIndirectCommandNV ) ); + return *this; + } + DrawMeshTasksIndirectCommandNV& setTaskCount( uint32_t taskCount_ ) + { + taskCount = taskCount_; + return *this; + } + + DrawMeshTasksIndirectCommandNV& setFirstTask( uint32_t firstTask_ ) + { + firstTask = firstTask_; + return *this; + } + + operator VkDrawMeshTasksIndirectCommandNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkDrawMeshTasksIndirectCommandNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( DrawMeshTasksIndirectCommandNV const& rhs ) const + { + return ( taskCount == rhs.taskCount ) + && ( firstTask == rhs.firstTask ); + } + + bool operator!=( DrawMeshTasksIndirectCommandNV const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t taskCount; + uint32_t firstTask; + }; + static_assert( sizeof( DrawMeshTasksIndirectCommandNV ) == sizeof( VkDrawMeshTasksIndirectCommandNV ), "struct and wrapper have different size!" ); + + enum class ImageLayout + { + eUndefined = VK_IMAGE_LAYOUT_UNDEFINED, + eGeneral = VK_IMAGE_LAYOUT_GENERAL, + eColorAttachmentOptimal = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + eDepthStencilAttachmentOptimal = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + eDepthStencilReadOnlyOptimal = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, + eShaderReadOnlyOptimal = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + eTransferSrcOptimal = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + eTransferDstOptimal = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + ePreinitialized = VK_IMAGE_LAYOUT_PREINITIALIZED, + eDepthReadOnlyStencilAttachmentOptimal = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + eDepthReadOnlyStencilAttachmentOptimalKHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + eDepthAttachmentStencilReadOnlyOptimal = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + eDepthAttachmentStencilReadOnlyOptimalKHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + ePresentSrcKHR = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + eSharedPresentKHR = VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, + eShadingRateOptimalNV = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV + }; + + struct DescriptorImageInfo + { + DescriptorImageInfo( Sampler sampler_ = Sampler(), + ImageView imageView_ = ImageView(), + ImageLayout imageLayout_ = ImageLayout::eUndefined ) + : sampler( sampler_ ) + , imageView( imageView_ ) + , imageLayout( imageLayout_ ) + { + } + + DescriptorImageInfo( VkDescriptorImageInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorImageInfo ) ); + } + + DescriptorImageInfo& operator=( VkDescriptorImageInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorImageInfo ) ); + return *this; + } + DescriptorImageInfo& setSampler( Sampler sampler_ ) + { + sampler = sampler_; + return *this; + } + + DescriptorImageInfo& setImageView( ImageView imageView_ ) + { + imageView = imageView_; + return *this; + } + + DescriptorImageInfo& setImageLayout( ImageLayout imageLayout_ ) + { + imageLayout = imageLayout_; + return *this; + } + + operator VkDescriptorImageInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorImageInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorImageInfo const& rhs ) const + { + return ( sampler == rhs.sampler ) + && ( imageView == rhs.imageView ) + && ( imageLayout == rhs.imageLayout ); + } + + bool operator!=( DescriptorImageInfo const& rhs ) const + { + return !operator==( rhs ); + } + + Sampler sampler; + ImageView imageView; + ImageLayout imageLayout; + }; + static_assert( sizeof( DescriptorImageInfo ) == sizeof( VkDescriptorImageInfo ), "struct and wrapper have different size!" ); + + struct AttachmentReference + { + AttachmentReference( uint32_t attachment_ = 0, + ImageLayout layout_ = ImageLayout::eUndefined ) + : attachment( attachment_ ) + , layout( layout_ ) + { + } + + AttachmentReference( VkAttachmentReference const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentReference ) ); + } + + AttachmentReference& operator=( VkAttachmentReference const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentReference ) ); + return *this; + } + AttachmentReference& setAttachment( uint32_t attachment_ ) + { + attachment = attachment_; + return *this; + } + + AttachmentReference& setLayout( ImageLayout layout_ ) + { + layout = layout_; + return *this; + } + + operator VkAttachmentReference const&() const + { + return *reinterpret_cast(this); + } + + operator VkAttachmentReference &() + { + return *reinterpret_cast(this); + } + + bool operator==( AttachmentReference const& rhs ) const + { + return ( attachment == rhs.attachment ) + && ( layout == rhs.layout ); + } + + bool operator!=( AttachmentReference const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t attachment; + ImageLayout layout; + }; + static_assert( sizeof( AttachmentReference ) == sizeof( VkAttachmentReference ), "struct and wrapper have different size!" ); + + enum class AttachmentLoadOp + { + eLoad = VK_ATTACHMENT_LOAD_OP_LOAD, + eClear = VK_ATTACHMENT_LOAD_OP_CLEAR, + eDontCare = VK_ATTACHMENT_LOAD_OP_DONT_CARE + }; + + enum class AttachmentStoreOp + { + eStore = VK_ATTACHMENT_STORE_OP_STORE, + eDontCare = VK_ATTACHMENT_STORE_OP_DONT_CARE + }; + + enum class ImageType + { + e1D = VK_IMAGE_TYPE_1D, + e2D = VK_IMAGE_TYPE_2D, + e3D = VK_IMAGE_TYPE_3D + }; + + enum class ImageTiling + { + eOptimal = VK_IMAGE_TILING_OPTIMAL, + eLinear = VK_IMAGE_TILING_LINEAR + }; + + enum class ImageViewType + { + e1D = VK_IMAGE_VIEW_TYPE_1D, + e2D = VK_IMAGE_VIEW_TYPE_2D, + e3D = VK_IMAGE_VIEW_TYPE_3D, + eCube = VK_IMAGE_VIEW_TYPE_CUBE, + e1DArray = VK_IMAGE_VIEW_TYPE_1D_ARRAY, + e2DArray = VK_IMAGE_VIEW_TYPE_2D_ARRAY, + eCubeArray = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY + }; + + enum class CommandBufferLevel + { + ePrimary = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + eSecondary = VK_COMMAND_BUFFER_LEVEL_SECONDARY + }; + + enum class ComponentSwizzle + { + eIdentity = VK_COMPONENT_SWIZZLE_IDENTITY, + eZero = VK_COMPONENT_SWIZZLE_ZERO, + eOne = VK_COMPONENT_SWIZZLE_ONE, + eR = VK_COMPONENT_SWIZZLE_R, + eG = VK_COMPONENT_SWIZZLE_G, + eB = VK_COMPONENT_SWIZZLE_B, + eA = VK_COMPONENT_SWIZZLE_A + }; + + struct ComponentMapping + { + ComponentMapping( ComponentSwizzle r_ = ComponentSwizzle::eIdentity, + ComponentSwizzle g_ = ComponentSwizzle::eIdentity, + ComponentSwizzle b_ = ComponentSwizzle::eIdentity, + ComponentSwizzle a_ = ComponentSwizzle::eIdentity ) + : r( r_ ) + , g( g_ ) + , b( b_ ) + , a( a_ ) + { + } + + ComponentMapping( VkComponentMapping const & rhs ) + { + memcpy( this, &rhs, sizeof( ComponentMapping ) ); + } + + ComponentMapping& operator=( VkComponentMapping const & rhs ) + { + memcpy( this, &rhs, sizeof( ComponentMapping ) ); + return *this; + } + ComponentMapping& setR( ComponentSwizzle r_ ) + { + r = r_; + return *this; + } + + ComponentMapping& setG( ComponentSwizzle g_ ) + { + g = g_; + return *this; + } + + ComponentMapping& setB( ComponentSwizzle b_ ) + { + b = b_; + return *this; + } + + ComponentMapping& setA( ComponentSwizzle a_ ) + { + a = a_; + return *this; + } + + operator VkComponentMapping const&() const + { + return *reinterpret_cast(this); + } + + operator VkComponentMapping &() + { + return *reinterpret_cast(this); + } + + bool operator==( ComponentMapping const& rhs ) const + { + return ( r == rhs.r ) + && ( g == rhs.g ) + && ( b == rhs.b ) + && ( a == rhs.a ); + } + + bool operator!=( ComponentMapping const& rhs ) const + { + return !operator==( rhs ); + } + + ComponentSwizzle r; + ComponentSwizzle g; + ComponentSwizzle b; + ComponentSwizzle a; + }; + static_assert( sizeof( ComponentMapping ) == sizeof( VkComponentMapping ), "struct and wrapper have different size!" ); + + enum class DescriptorType + { + eSampler = VK_DESCRIPTOR_TYPE_SAMPLER, + eCombinedImageSampler = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + eSampledImage = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + eStorageImage = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + eUniformTexelBuffer = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, + eStorageTexelBuffer = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, + eUniformBuffer = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + eStorageBuffer = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + eUniformBufferDynamic = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, + eStorageBufferDynamic = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, + eInputAttachment = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, + eInlineUniformBlockEXT = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, + eAccelerationStructureNVX = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NVX + }; + + struct DescriptorPoolSize + { + DescriptorPoolSize( DescriptorType type_ = DescriptorType::eSampler, + uint32_t descriptorCount_ = 0 ) + : type( type_ ) + , descriptorCount( descriptorCount_ ) + { + } + + DescriptorPoolSize( VkDescriptorPoolSize const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorPoolSize ) ); + } + + DescriptorPoolSize& operator=( VkDescriptorPoolSize const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorPoolSize ) ); + return *this; + } + DescriptorPoolSize& setType( DescriptorType type_ ) + { + type = type_; + return *this; + } + + DescriptorPoolSize& setDescriptorCount( uint32_t descriptorCount_ ) + { + descriptorCount = descriptorCount_; + return *this; + } + + operator VkDescriptorPoolSize const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorPoolSize &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorPoolSize const& rhs ) const + { + return ( type == rhs.type ) + && ( descriptorCount == rhs.descriptorCount ); + } + + bool operator!=( DescriptorPoolSize const& rhs ) const + { + return !operator==( rhs ); + } + + DescriptorType type; + uint32_t descriptorCount; + }; + static_assert( sizeof( DescriptorPoolSize ) == sizeof( VkDescriptorPoolSize ), "struct and wrapper have different size!" ); + + struct DescriptorUpdateTemplateEntry + { + DescriptorUpdateTemplateEntry( uint32_t dstBinding_ = 0, + uint32_t dstArrayElement_ = 0, + uint32_t descriptorCount_ = 0, + DescriptorType descriptorType_ = DescriptorType::eSampler, + size_t offset_ = 0, + size_t stride_ = 0 ) + : dstBinding( dstBinding_ ) + , dstArrayElement( dstArrayElement_ ) + , descriptorCount( descriptorCount_ ) + , descriptorType( descriptorType_ ) + , offset( offset_ ) + , stride( stride_ ) + { + } + + DescriptorUpdateTemplateEntry( VkDescriptorUpdateTemplateEntry const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorUpdateTemplateEntry ) ); + } + + DescriptorUpdateTemplateEntry& operator=( VkDescriptorUpdateTemplateEntry const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorUpdateTemplateEntry ) ); + return *this; + } + DescriptorUpdateTemplateEntry& setDstBinding( uint32_t dstBinding_ ) + { + dstBinding = dstBinding_; + return *this; + } + + DescriptorUpdateTemplateEntry& setDstArrayElement( uint32_t dstArrayElement_ ) + { + dstArrayElement = dstArrayElement_; + return *this; + } + + DescriptorUpdateTemplateEntry& setDescriptorCount( uint32_t descriptorCount_ ) + { + descriptorCount = descriptorCount_; + return *this; + } + + DescriptorUpdateTemplateEntry& setDescriptorType( DescriptorType descriptorType_ ) + { + descriptorType = descriptorType_; + return *this; + } + + DescriptorUpdateTemplateEntry& setOffset( size_t offset_ ) + { + offset = offset_; + return *this; + } + + DescriptorUpdateTemplateEntry& setStride( size_t stride_ ) + { + stride = stride_; + return *this; + } + + operator VkDescriptorUpdateTemplateEntry const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorUpdateTemplateEntry &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorUpdateTemplateEntry const& rhs ) const + { + return ( dstBinding == rhs.dstBinding ) + && ( dstArrayElement == rhs.dstArrayElement ) + && ( descriptorCount == rhs.descriptorCount ) + && ( descriptorType == rhs.descriptorType ) + && ( offset == rhs.offset ) + && ( stride == rhs.stride ); + } + + bool operator!=( DescriptorUpdateTemplateEntry const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + DescriptorType descriptorType; + size_t offset; + size_t stride; + }; + static_assert( sizeof( DescriptorUpdateTemplateEntry ) == sizeof( VkDescriptorUpdateTemplateEntry ), "struct and wrapper have different size!" ); + + using DescriptorUpdateTemplateEntryKHR = DescriptorUpdateTemplateEntry; + + enum class QueryType + { + eOcclusion = VK_QUERY_TYPE_OCCLUSION, + ePipelineStatistics = VK_QUERY_TYPE_PIPELINE_STATISTICS, + eTimestamp = VK_QUERY_TYPE_TIMESTAMP, + eCompactedSizeNVX = VK_QUERY_TYPE_COMPACTED_SIZE_NVX + }; + + enum class BorderColor + { + eFloatTransparentBlack = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, + eIntTransparentBlack = VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, + eFloatOpaqueBlack = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK, + eIntOpaqueBlack = VK_BORDER_COLOR_INT_OPAQUE_BLACK, + eFloatOpaqueWhite = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE, + eIntOpaqueWhite = VK_BORDER_COLOR_INT_OPAQUE_WHITE + }; + + enum class PipelineBindPoint + { + eGraphics = VK_PIPELINE_BIND_POINT_GRAPHICS, + eCompute = VK_PIPELINE_BIND_POINT_COMPUTE, + eRaytracingNVX = VK_PIPELINE_BIND_POINT_RAYTRACING_NVX + }; + + enum class PipelineCacheHeaderVersion + { + eOne = VK_PIPELINE_CACHE_HEADER_VERSION_ONE + }; + + enum class PrimitiveTopology + { + ePointList = VK_PRIMITIVE_TOPOLOGY_POINT_LIST, + eLineList = VK_PRIMITIVE_TOPOLOGY_LINE_LIST, + eLineStrip = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP, + eTriangleList = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, + eTriangleStrip = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, + eTriangleFan = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN, + eLineListWithAdjacency = VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY, + eLineStripWithAdjacency = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY, + eTriangleListWithAdjacency = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY, + eTriangleStripWithAdjacency = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY, + ePatchList = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST + }; + + enum class SharingMode + { + eExclusive = VK_SHARING_MODE_EXCLUSIVE, + eConcurrent = VK_SHARING_MODE_CONCURRENT + }; + + enum class IndexType + { + eUint16 = VK_INDEX_TYPE_UINT16, + eUint32 = VK_INDEX_TYPE_UINT32 + }; + + enum class Filter + { + eNearest = VK_FILTER_NEAREST, + eLinear = VK_FILTER_LINEAR, + eCubicIMG = VK_FILTER_CUBIC_IMG + }; + + enum class SamplerMipmapMode + { + eNearest = VK_SAMPLER_MIPMAP_MODE_NEAREST, + eLinear = VK_SAMPLER_MIPMAP_MODE_LINEAR + }; + + enum class SamplerAddressMode + { + eRepeat = VK_SAMPLER_ADDRESS_MODE_REPEAT, + eMirroredRepeat = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT, + eClampToEdge = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, + eClampToBorder = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, + eMirrorClampToEdge = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE + }; + + enum class CompareOp + { + eNever = VK_COMPARE_OP_NEVER, + eLess = VK_COMPARE_OP_LESS, + eEqual = VK_COMPARE_OP_EQUAL, + eLessOrEqual = VK_COMPARE_OP_LESS_OR_EQUAL, + eGreater = VK_COMPARE_OP_GREATER, + eNotEqual = VK_COMPARE_OP_NOT_EQUAL, + eGreaterOrEqual = VK_COMPARE_OP_GREATER_OR_EQUAL, + eAlways = VK_COMPARE_OP_ALWAYS + }; + + enum class PolygonMode + { + eFill = VK_POLYGON_MODE_FILL, + eLine = VK_POLYGON_MODE_LINE, + ePoint = VK_POLYGON_MODE_POINT, + eFillRectangleNV = VK_POLYGON_MODE_FILL_RECTANGLE_NV + }; + + enum class CullModeFlagBits + { + eNone = VK_CULL_MODE_NONE, + eFront = VK_CULL_MODE_FRONT_BIT, + eBack = VK_CULL_MODE_BACK_BIT, + eFrontAndBack = VK_CULL_MODE_FRONT_AND_BACK + }; + + using CullModeFlags = Flags; + + VULKAN_HPP_INLINE CullModeFlags operator|( CullModeFlagBits bit0, CullModeFlagBits bit1 ) + { + return CullModeFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE CullModeFlags operator~( CullModeFlagBits bits ) + { + return ~( CullModeFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(CullModeFlagBits::eNone) | VkFlags(CullModeFlagBits::eFront) | VkFlags(CullModeFlagBits::eBack) | VkFlags(CullModeFlagBits::eFrontAndBack) + }; + }; + + enum class FrontFace + { + eCounterClockwise = VK_FRONT_FACE_COUNTER_CLOCKWISE, + eClockwise = VK_FRONT_FACE_CLOCKWISE + }; + + enum class BlendFactor + { + eZero = VK_BLEND_FACTOR_ZERO, + eOne = VK_BLEND_FACTOR_ONE, + eSrcColor = VK_BLEND_FACTOR_SRC_COLOR, + eOneMinusSrcColor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR, + eDstColor = VK_BLEND_FACTOR_DST_COLOR, + eOneMinusDstColor = VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR, + eSrcAlpha = VK_BLEND_FACTOR_SRC_ALPHA, + eOneMinusSrcAlpha = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, + eDstAlpha = VK_BLEND_FACTOR_DST_ALPHA, + eOneMinusDstAlpha = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA, + eConstantColor = VK_BLEND_FACTOR_CONSTANT_COLOR, + eOneMinusConstantColor = VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR, + eConstantAlpha = VK_BLEND_FACTOR_CONSTANT_ALPHA, + eOneMinusConstantAlpha = VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA, + eSrcAlphaSaturate = VK_BLEND_FACTOR_SRC_ALPHA_SATURATE, + eSrc1Color = VK_BLEND_FACTOR_SRC1_COLOR, + eOneMinusSrc1Color = VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR, + eSrc1Alpha = VK_BLEND_FACTOR_SRC1_ALPHA, + eOneMinusSrc1Alpha = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA + }; + + enum class BlendOp + { + eAdd = VK_BLEND_OP_ADD, + eSubtract = VK_BLEND_OP_SUBTRACT, + eReverseSubtract = VK_BLEND_OP_REVERSE_SUBTRACT, + eMin = VK_BLEND_OP_MIN, + eMax = VK_BLEND_OP_MAX, + eZeroEXT = VK_BLEND_OP_ZERO_EXT, + eSrcEXT = VK_BLEND_OP_SRC_EXT, + eDstEXT = VK_BLEND_OP_DST_EXT, + eSrcOverEXT = VK_BLEND_OP_SRC_OVER_EXT, + eDstOverEXT = VK_BLEND_OP_DST_OVER_EXT, + eSrcInEXT = VK_BLEND_OP_SRC_IN_EXT, + eDstInEXT = VK_BLEND_OP_DST_IN_EXT, + eSrcOutEXT = VK_BLEND_OP_SRC_OUT_EXT, + eDstOutEXT = VK_BLEND_OP_DST_OUT_EXT, + eSrcAtopEXT = VK_BLEND_OP_SRC_ATOP_EXT, + eDstAtopEXT = VK_BLEND_OP_DST_ATOP_EXT, + eXorEXT = VK_BLEND_OP_XOR_EXT, + eMultiplyEXT = VK_BLEND_OP_MULTIPLY_EXT, + eScreenEXT = VK_BLEND_OP_SCREEN_EXT, + eOverlayEXT = VK_BLEND_OP_OVERLAY_EXT, + eDarkenEXT = VK_BLEND_OP_DARKEN_EXT, + eLightenEXT = VK_BLEND_OP_LIGHTEN_EXT, + eColordodgeEXT = VK_BLEND_OP_COLORDODGE_EXT, + eColorburnEXT = VK_BLEND_OP_COLORBURN_EXT, + eHardlightEXT = VK_BLEND_OP_HARDLIGHT_EXT, + eSoftlightEXT = VK_BLEND_OP_SOFTLIGHT_EXT, + eDifferenceEXT = VK_BLEND_OP_DIFFERENCE_EXT, + eExclusionEXT = VK_BLEND_OP_EXCLUSION_EXT, + eInvertEXT = VK_BLEND_OP_INVERT_EXT, + eInvertRgbEXT = VK_BLEND_OP_INVERT_RGB_EXT, + eLineardodgeEXT = VK_BLEND_OP_LINEARDODGE_EXT, + eLinearburnEXT = VK_BLEND_OP_LINEARBURN_EXT, + eVividlightEXT = VK_BLEND_OP_VIVIDLIGHT_EXT, + eLinearlightEXT = VK_BLEND_OP_LINEARLIGHT_EXT, + ePinlightEXT = VK_BLEND_OP_PINLIGHT_EXT, + eHardmixEXT = VK_BLEND_OP_HARDMIX_EXT, + eHslHueEXT = VK_BLEND_OP_HSL_HUE_EXT, + eHslSaturationEXT = VK_BLEND_OP_HSL_SATURATION_EXT, + eHslColorEXT = VK_BLEND_OP_HSL_COLOR_EXT, + eHslLuminosityEXT = VK_BLEND_OP_HSL_LUMINOSITY_EXT, + ePlusEXT = VK_BLEND_OP_PLUS_EXT, + ePlusClampedEXT = VK_BLEND_OP_PLUS_CLAMPED_EXT, + ePlusClampedAlphaEXT = VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT, + ePlusDarkerEXT = VK_BLEND_OP_PLUS_DARKER_EXT, + eMinusEXT = VK_BLEND_OP_MINUS_EXT, + eMinusClampedEXT = VK_BLEND_OP_MINUS_CLAMPED_EXT, + eContrastEXT = VK_BLEND_OP_CONTRAST_EXT, + eInvertOvgEXT = VK_BLEND_OP_INVERT_OVG_EXT, + eRedEXT = VK_BLEND_OP_RED_EXT, + eGreenEXT = VK_BLEND_OP_GREEN_EXT, + eBlueEXT = VK_BLEND_OP_BLUE_EXT + }; + + enum class StencilOp + { + eKeep = VK_STENCIL_OP_KEEP, + eZero = VK_STENCIL_OP_ZERO, + eReplace = VK_STENCIL_OP_REPLACE, + eIncrementAndClamp = VK_STENCIL_OP_INCREMENT_AND_CLAMP, + eDecrementAndClamp = VK_STENCIL_OP_DECREMENT_AND_CLAMP, + eInvert = VK_STENCIL_OP_INVERT, + eIncrementAndWrap = VK_STENCIL_OP_INCREMENT_AND_WRAP, + eDecrementAndWrap = VK_STENCIL_OP_DECREMENT_AND_WRAP + }; + + struct StencilOpState + { + StencilOpState( StencilOp failOp_ = StencilOp::eKeep, + StencilOp passOp_ = StencilOp::eKeep, + StencilOp depthFailOp_ = StencilOp::eKeep, + CompareOp compareOp_ = CompareOp::eNever, + uint32_t compareMask_ = 0, + uint32_t writeMask_ = 0, + uint32_t reference_ = 0 ) + : failOp( failOp_ ) + , passOp( passOp_ ) + , depthFailOp( depthFailOp_ ) + , compareOp( compareOp_ ) + , compareMask( compareMask_ ) + , writeMask( writeMask_ ) + , reference( reference_ ) + { + } + + StencilOpState( VkStencilOpState const & rhs ) + { + memcpy( this, &rhs, sizeof( StencilOpState ) ); + } + + StencilOpState& operator=( VkStencilOpState const & rhs ) + { + memcpy( this, &rhs, sizeof( StencilOpState ) ); + return *this; + } + StencilOpState& setFailOp( StencilOp failOp_ ) + { + failOp = failOp_; + return *this; + } + + StencilOpState& setPassOp( StencilOp passOp_ ) + { + passOp = passOp_; + return *this; + } + + StencilOpState& setDepthFailOp( StencilOp depthFailOp_ ) + { + depthFailOp = depthFailOp_; + return *this; + } + + StencilOpState& setCompareOp( CompareOp compareOp_ ) + { + compareOp = compareOp_; + return *this; + } + + StencilOpState& setCompareMask( uint32_t compareMask_ ) + { + compareMask = compareMask_; + return *this; + } + + StencilOpState& setWriteMask( uint32_t writeMask_ ) + { + writeMask = writeMask_; + return *this; + } + + StencilOpState& setReference( uint32_t reference_ ) + { + reference = reference_; + return *this; + } + + operator VkStencilOpState const&() const + { + return *reinterpret_cast(this); + } + + operator VkStencilOpState &() + { + return *reinterpret_cast(this); + } + + bool operator==( StencilOpState const& rhs ) const + { + return ( failOp == rhs.failOp ) + && ( passOp == rhs.passOp ) + && ( depthFailOp == rhs.depthFailOp ) + && ( compareOp == rhs.compareOp ) + && ( compareMask == rhs.compareMask ) + && ( writeMask == rhs.writeMask ) + && ( reference == rhs.reference ); + } + + bool operator!=( StencilOpState const& rhs ) const + { + return !operator==( rhs ); + } + + StencilOp failOp; + StencilOp passOp; + StencilOp depthFailOp; + CompareOp compareOp; + uint32_t compareMask; + uint32_t writeMask; + uint32_t reference; + }; + static_assert( sizeof( StencilOpState ) == sizeof( VkStencilOpState ), "struct and wrapper have different size!" ); + + enum class LogicOp + { + eClear = VK_LOGIC_OP_CLEAR, + eAnd = VK_LOGIC_OP_AND, + eAndReverse = VK_LOGIC_OP_AND_REVERSE, + eCopy = VK_LOGIC_OP_COPY, + eAndInverted = VK_LOGIC_OP_AND_INVERTED, + eNoOp = VK_LOGIC_OP_NO_OP, + eXor = VK_LOGIC_OP_XOR, + eOr = VK_LOGIC_OP_OR, + eNor = VK_LOGIC_OP_NOR, + eEquivalent = VK_LOGIC_OP_EQUIVALENT, + eInvert = VK_LOGIC_OP_INVERT, + eOrReverse = VK_LOGIC_OP_OR_REVERSE, + eCopyInverted = VK_LOGIC_OP_COPY_INVERTED, + eOrInverted = VK_LOGIC_OP_OR_INVERTED, + eNand = VK_LOGIC_OP_NAND, + eSet = VK_LOGIC_OP_SET + }; + + enum class InternalAllocationType + { + eExecutable = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + }; + + enum class SystemAllocationScope + { + eCommand = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND, + eObject = VK_SYSTEM_ALLOCATION_SCOPE_OBJECT, + eCache = VK_SYSTEM_ALLOCATION_SCOPE_CACHE, + eDevice = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE, + eInstance = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE + }; + + enum class PhysicalDeviceType + { + eOther = VK_PHYSICAL_DEVICE_TYPE_OTHER, + eIntegratedGpu = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU, + eDiscreteGpu = VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU, + eVirtualGpu = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU, + eCpu = VK_PHYSICAL_DEVICE_TYPE_CPU + }; + + enum class VertexInputRate + { + eVertex = VK_VERTEX_INPUT_RATE_VERTEX, + eInstance = VK_VERTEX_INPUT_RATE_INSTANCE + }; + + struct VertexInputBindingDescription + { + VertexInputBindingDescription( uint32_t binding_ = 0, + uint32_t stride_ = 0, + VertexInputRate inputRate_ = VertexInputRate::eVertex ) + : binding( binding_ ) + , stride( stride_ ) + , inputRate( inputRate_ ) + { + } + + VertexInputBindingDescription( VkVertexInputBindingDescription const & rhs ) + { + memcpy( this, &rhs, sizeof( VertexInputBindingDescription ) ); + } + + VertexInputBindingDescription& operator=( VkVertexInputBindingDescription const & rhs ) + { + memcpy( this, &rhs, sizeof( VertexInputBindingDescription ) ); + return *this; + } + VertexInputBindingDescription& setBinding( uint32_t binding_ ) + { + binding = binding_; + return *this; + } + + VertexInputBindingDescription& setStride( uint32_t stride_ ) + { + stride = stride_; + return *this; + } + + VertexInputBindingDescription& setInputRate( VertexInputRate inputRate_ ) + { + inputRate = inputRate_; + return *this; + } + + operator VkVertexInputBindingDescription const&() const + { + return *reinterpret_cast(this); + } + + operator VkVertexInputBindingDescription &() + { + return *reinterpret_cast(this); + } + + bool operator==( VertexInputBindingDescription const& rhs ) const + { + return ( binding == rhs.binding ) + && ( stride == rhs.stride ) + && ( inputRate == rhs.inputRate ); + } + + bool operator!=( VertexInputBindingDescription const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t binding; + uint32_t stride; + VertexInputRate inputRate; + }; + static_assert( sizeof( VertexInputBindingDescription ) == sizeof( VkVertexInputBindingDescription ), "struct and wrapper have different size!" ); + + enum class Format + { + eUndefined = VK_FORMAT_UNDEFINED, + eR4G4UnormPack8 = VK_FORMAT_R4G4_UNORM_PACK8, + eR4G4B4A4UnormPack16 = VK_FORMAT_R4G4B4A4_UNORM_PACK16, + eB4G4R4A4UnormPack16 = VK_FORMAT_B4G4R4A4_UNORM_PACK16, + eR5G6B5UnormPack16 = VK_FORMAT_R5G6B5_UNORM_PACK16, + eB5G6R5UnormPack16 = VK_FORMAT_B5G6R5_UNORM_PACK16, + eR5G5B5A1UnormPack16 = VK_FORMAT_R5G5B5A1_UNORM_PACK16, + eB5G5R5A1UnormPack16 = VK_FORMAT_B5G5R5A1_UNORM_PACK16, + eA1R5G5B5UnormPack16 = VK_FORMAT_A1R5G5B5_UNORM_PACK16, + eR8Unorm = VK_FORMAT_R8_UNORM, + eR8Snorm = VK_FORMAT_R8_SNORM, + eR8Uscaled = VK_FORMAT_R8_USCALED, + eR8Sscaled = VK_FORMAT_R8_SSCALED, + eR8Uint = VK_FORMAT_R8_UINT, + eR8Sint = VK_FORMAT_R8_SINT, + eR8Srgb = VK_FORMAT_R8_SRGB, + eR8G8Unorm = VK_FORMAT_R8G8_UNORM, + eR8G8Snorm = VK_FORMAT_R8G8_SNORM, + eR8G8Uscaled = VK_FORMAT_R8G8_USCALED, + eR8G8Sscaled = VK_FORMAT_R8G8_SSCALED, + eR8G8Uint = VK_FORMAT_R8G8_UINT, + eR8G8Sint = VK_FORMAT_R8G8_SINT, + eR8G8Srgb = VK_FORMAT_R8G8_SRGB, + eR8G8B8Unorm = VK_FORMAT_R8G8B8_UNORM, + eR8G8B8Snorm = VK_FORMAT_R8G8B8_SNORM, + eR8G8B8Uscaled = VK_FORMAT_R8G8B8_USCALED, + eR8G8B8Sscaled = VK_FORMAT_R8G8B8_SSCALED, + eR8G8B8Uint = VK_FORMAT_R8G8B8_UINT, + eR8G8B8Sint = VK_FORMAT_R8G8B8_SINT, + eR8G8B8Srgb = VK_FORMAT_R8G8B8_SRGB, + eB8G8R8Unorm = VK_FORMAT_B8G8R8_UNORM, + eB8G8R8Snorm = VK_FORMAT_B8G8R8_SNORM, + eB8G8R8Uscaled = VK_FORMAT_B8G8R8_USCALED, + eB8G8R8Sscaled = VK_FORMAT_B8G8R8_SSCALED, + eB8G8R8Uint = VK_FORMAT_B8G8R8_UINT, + eB8G8R8Sint = VK_FORMAT_B8G8R8_SINT, + eB8G8R8Srgb = VK_FORMAT_B8G8R8_SRGB, + eR8G8B8A8Unorm = VK_FORMAT_R8G8B8A8_UNORM, + eR8G8B8A8Snorm = VK_FORMAT_R8G8B8A8_SNORM, + eR8G8B8A8Uscaled = VK_FORMAT_R8G8B8A8_USCALED, + eR8G8B8A8Sscaled = VK_FORMAT_R8G8B8A8_SSCALED, + eR8G8B8A8Uint = VK_FORMAT_R8G8B8A8_UINT, + eR8G8B8A8Sint = VK_FORMAT_R8G8B8A8_SINT, + eR8G8B8A8Srgb = VK_FORMAT_R8G8B8A8_SRGB, + eB8G8R8A8Unorm = VK_FORMAT_B8G8R8A8_UNORM, + eB8G8R8A8Snorm = VK_FORMAT_B8G8R8A8_SNORM, + eB8G8R8A8Uscaled = VK_FORMAT_B8G8R8A8_USCALED, + eB8G8R8A8Sscaled = VK_FORMAT_B8G8R8A8_SSCALED, + eB8G8R8A8Uint = VK_FORMAT_B8G8R8A8_UINT, + eB8G8R8A8Sint = VK_FORMAT_B8G8R8A8_SINT, + eB8G8R8A8Srgb = VK_FORMAT_B8G8R8A8_SRGB, + eA8B8G8R8UnormPack32 = VK_FORMAT_A8B8G8R8_UNORM_PACK32, + eA8B8G8R8SnormPack32 = VK_FORMAT_A8B8G8R8_SNORM_PACK32, + eA8B8G8R8UscaledPack32 = VK_FORMAT_A8B8G8R8_USCALED_PACK32, + eA8B8G8R8SscaledPack32 = VK_FORMAT_A8B8G8R8_SSCALED_PACK32, + eA8B8G8R8UintPack32 = VK_FORMAT_A8B8G8R8_UINT_PACK32, + eA8B8G8R8SintPack32 = VK_FORMAT_A8B8G8R8_SINT_PACK32, + eA8B8G8R8SrgbPack32 = VK_FORMAT_A8B8G8R8_SRGB_PACK32, + eA2R10G10B10UnormPack32 = VK_FORMAT_A2R10G10B10_UNORM_PACK32, + eA2R10G10B10SnormPack32 = VK_FORMAT_A2R10G10B10_SNORM_PACK32, + eA2R10G10B10UscaledPack32 = VK_FORMAT_A2R10G10B10_USCALED_PACK32, + eA2R10G10B10SscaledPack32 = VK_FORMAT_A2R10G10B10_SSCALED_PACK32, + eA2R10G10B10UintPack32 = VK_FORMAT_A2R10G10B10_UINT_PACK32, + eA2R10G10B10SintPack32 = VK_FORMAT_A2R10G10B10_SINT_PACK32, + eA2B10G10R10UnormPack32 = VK_FORMAT_A2B10G10R10_UNORM_PACK32, + eA2B10G10R10SnormPack32 = VK_FORMAT_A2B10G10R10_SNORM_PACK32, + eA2B10G10R10UscaledPack32 = VK_FORMAT_A2B10G10R10_USCALED_PACK32, + eA2B10G10R10SscaledPack32 = VK_FORMAT_A2B10G10R10_SSCALED_PACK32, + eA2B10G10R10UintPack32 = VK_FORMAT_A2B10G10R10_UINT_PACK32, + eA2B10G10R10SintPack32 = VK_FORMAT_A2B10G10R10_SINT_PACK32, + eR16Unorm = VK_FORMAT_R16_UNORM, + eR16Snorm = VK_FORMAT_R16_SNORM, + eR16Uscaled = VK_FORMAT_R16_USCALED, + eR16Sscaled = VK_FORMAT_R16_SSCALED, + eR16Uint = VK_FORMAT_R16_UINT, + eR16Sint = VK_FORMAT_R16_SINT, + eR16Sfloat = VK_FORMAT_R16_SFLOAT, + eR16G16Unorm = VK_FORMAT_R16G16_UNORM, + eR16G16Snorm = VK_FORMAT_R16G16_SNORM, + eR16G16Uscaled = VK_FORMAT_R16G16_USCALED, + eR16G16Sscaled = VK_FORMAT_R16G16_SSCALED, + eR16G16Uint = VK_FORMAT_R16G16_UINT, + eR16G16Sint = VK_FORMAT_R16G16_SINT, + eR16G16Sfloat = VK_FORMAT_R16G16_SFLOAT, + eR16G16B16Unorm = VK_FORMAT_R16G16B16_UNORM, + eR16G16B16Snorm = VK_FORMAT_R16G16B16_SNORM, + eR16G16B16Uscaled = VK_FORMAT_R16G16B16_USCALED, + eR16G16B16Sscaled = VK_FORMAT_R16G16B16_SSCALED, + eR16G16B16Uint = VK_FORMAT_R16G16B16_UINT, + eR16G16B16Sint = VK_FORMAT_R16G16B16_SINT, + eR16G16B16Sfloat = VK_FORMAT_R16G16B16_SFLOAT, + eR16G16B16A16Unorm = VK_FORMAT_R16G16B16A16_UNORM, + eR16G16B16A16Snorm = VK_FORMAT_R16G16B16A16_SNORM, + eR16G16B16A16Uscaled = VK_FORMAT_R16G16B16A16_USCALED, + eR16G16B16A16Sscaled = VK_FORMAT_R16G16B16A16_SSCALED, + eR16G16B16A16Uint = VK_FORMAT_R16G16B16A16_UINT, + eR16G16B16A16Sint = VK_FORMAT_R16G16B16A16_SINT, + eR16G16B16A16Sfloat = VK_FORMAT_R16G16B16A16_SFLOAT, + eR32Uint = VK_FORMAT_R32_UINT, + eR32Sint = VK_FORMAT_R32_SINT, + eR32Sfloat = VK_FORMAT_R32_SFLOAT, + eR32G32Uint = VK_FORMAT_R32G32_UINT, + eR32G32Sint = VK_FORMAT_R32G32_SINT, + eR32G32Sfloat = VK_FORMAT_R32G32_SFLOAT, + eR32G32B32Uint = VK_FORMAT_R32G32B32_UINT, + eR32G32B32Sint = VK_FORMAT_R32G32B32_SINT, + eR32G32B32Sfloat = VK_FORMAT_R32G32B32_SFLOAT, + eR32G32B32A32Uint = VK_FORMAT_R32G32B32A32_UINT, + eR32G32B32A32Sint = VK_FORMAT_R32G32B32A32_SINT, + eR32G32B32A32Sfloat = VK_FORMAT_R32G32B32A32_SFLOAT, + eR64Uint = VK_FORMAT_R64_UINT, + eR64Sint = VK_FORMAT_R64_SINT, + eR64Sfloat = VK_FORMAT_R64_SFLOAT, + eR64G64Uint = VK_FORMAT_R64G64_UINT, + eR64G64Sint = VK_FORMAT_R64G64_SINT, + eR64G64Sfloat = VK_FORMAT_R64G64_SFLOAT, + eR64G64B64Uint = VK_FORMAT_R64G64B64_UINT, + eR64G64B64Sint = VK_FORMAT_R64G64B64_SINT, + eR64G64B64Sfloat = VK_FORMAT_R64G64B64_SFLOAT, + eR64G64B64A64Uint = VK_FORMAT_R64G64B64A64_UINT, + eR64G64B64A64Sint = VK_FORMAT_R64G64B64A64_SINT, + eR64G64B64A64Sfloat = VK_FORMAT_R64G64B64A64_SFLOAT, + eB10G11R11UfloatPack32 = VK_FORMAT_B10G11R11_UFLOAT_PACK32, + eE5B9G9R9UfloatPack32 = VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, + eD16Unorm = VK_FORMAT_D16_UNORM, + eX8D24UnormPack32 = VK_FORMAT_X8_D24_UNORM_PACK32, + eD32Sfloat = VK_FORMAT_D32_SFLOAT, + eS8Uint = VK_FORMAT_S8_UINT, + eD16UnormS8Uint = VK_FORMAT_D16_UNORM_S8_UINT, + eD24UnormS8Uint = VK_FORMAT_D24_UNORM_S8_UINT, + eD32SfloatS8Uint = VK_FORMAT_D32_SFLOAT_S8_UINT, + eBc1RgbUnormBlock = VK_FORMAT_BC1_RGB_UNORM_BLOCK, + eBc1RgbSrgbBlock = VK_FORMAT_BC1_RGB_SRGB_BLOCK, + eBc1RgbaUnormBlock = VK_FORMAT_BC1_RGBA_UNORM_BLOCK, + eBc1RgbaSrgbBlock = VK_FORMAT_BC1_RGBA_SRGB_BLOCK, + eBc2UnormBlock = VK_FORMAT_BC2_UNORM_BLOCK, + eBc2SrgbBlock = VK_FORMAT_BC2_SRGB_BLOCK, + eBc3UnormBlock = VK_FORMAT_BC3_UNORM_BLOCK, + eBc3SrgbBlock = VK_FORMAT_BC3_SRGB_BLOCK, + eBc4UnormBlock = VK_FORMAT_BC4_UNORM_BLOCK, + eBc4SnormBlock = VK_FORMAT_BC4_SNORM_BLOCK, + eBc5UnormBlock = VK_FORMAT_BC5_UNORM_BLOCK, + eBc5SnormBlock = VK_FORMAT_BC5_SNORM_BLOCK, + eBc6HUfloatBlock = VK_FORMAT_BC6H_UFLOAT_BLOCK, + eBc6HSfloatBlock = VK_FORMAT_BC6H_SFLOAT_BLOCK, + eBc7UnormBlock = VK_FORMAT_BC7_UNORM_BLOCK, + eBc7SrgbBlock = VK_FORMAT_BC7_SRGB_BLOCK, + eEtc2R8G8B8UnormBlock = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, + eEtc2R8G8B8SrgbBlock = VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, + eEtc2R8G8B8A1UnormBlock = VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, + eEtc2R8G8B8A1SrgbBlock = VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, + eEtc2R8G8B8A8UnormBlock = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, + eEtc2R8G8B8A8SrgbBlock = VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK, + eEacR11UnormBlock = VK_FORMAT_EAC_R11_UNORM_BLOCK, + eEacR11SnormBlock = VK_FORMAT_EAC_R11_SNORM_BLOCK, + eEacR11G11UnormBlock = VK_FORMAT_EAC_R11G11_UNORM_BLOCK, + eEacR11G11SnormBlock = VK_FORMAT_EAC_R11G11_SNORM_BLOCK, + eAstc4x4UnormBlock = VK_FORMAT_ASTC_4x4_UNORM_BLOCK, + eAstc4x4SrgbBlock = VK_FORMAT_ASTC_4x4_SRGB_BLOCK, + eAstc5x4UnormBlock = VK_FORMAT_ASTC_5x4_UNORM_BLOCK, + eAstc5x4SrgbBlock = VK_FORMAT_ASTC_5x4_SRGB_BLOCK, + eAstc5x5UnormBlock = VK_FORMAT_ASTC_5x5_UNORM_BLOCK, + eAstc5x5SrgbBlock = VK_FORMAT_ASTC_5x5_SRGB_BLOCK, + eAstc6x5UnormBlock = VK_FORMAT_ASTC_6x5_UNORM_BLOCK, + eAstc6x5SrgbBlock = VK_FORMAT_ASTC_6x5_SRGB_BLOCK, + eAstc6x6UnormBlock = VK_FORMAT_ASTC_6x6_UNORM_BLOCK, + eAstc6x6SrgbBlock = VK_FORMAT_ASTC_6x6_SRGB_BLOCK, + eAstc8x5UnormBlock = VK_FORMAT_ASTC_8x5_UNORM_BLOCK, + eAstc8x5SrgbBlock = VK_FORMAT_ASTC_8x5_SRGB_BLOCK, + eAstc8x6UnormBlock = VK_FORMAT_ASTC_8x6_UNORM_BLOCK, + eAstc8x6SrgbBlock = VK_FORMAT_ASTC_8x6_SRGB_BLOCK, + eAstc8x8UnormBlock = VK_FORMAT_ASTC_8x8_UNORM_BLOCK, + eAstc8x8SrgbBlock = VK_FORMAT_ASTC_8x8_SRGB_BLOCK, + eAstc10x5UnormBlock = VK_FORMAT_ASTC_10x5_UNORM_BLOCK, + eAstc10x5SrgbBlock = VK_FORMAT_ASTC_10x5_SRGB_BLOCK, + eAstc10x6UnormBlock = VK_FORMAT_ASTC_10x6_UNORM_BLOCK, + eAstc10x6SrgbBlock = VK_FORMAT_ASTC_10x6_SRGB_BLOCK, + eAstc10x8UnormBlock = VK_FORMAT_ASTC_10x8_UNORM_BLOCK, + eAstc10x8SrgbBlock = VK_FORMAT_ASTC_10x8_SRGB_BLOCK, + eAstc10x10UnormBlock = VK_FORMAT_ASTC_10x10_UNORM_BLOCK, + eAstc10x10SrgbBlock = VK_FORMAT_ASTC_10x10_SRGB_BLOCK, + eAstc12x10UnormBlock = VK_FORMAT_ASTC_12x10_UNORM_BLOCK, + eAstc12x10SrgbBlock = VK_FORMAT_ASTC_12x10_SRGB_BLOCK, + eAstc12x12UnormBlock = VK_FORMAT_ASTC_12x12_UNORM_BLOCK, + eAstc12x12SrgbBlock = VK_FORMAT_ASTC_12x12_SRGB_BLOCK, + eG8B8G8R8422Unorm = VK_FORMAT_G8B8G8R8_422_UNORM, + eG8B8G8R8422UnormKHR = VK_FORMAT_G8B8G8R8_422_UNORM, + eB8G8R8G8422Unorm = VK_FORMAT_B8G8R8G8_422_UNORM, + eB8G8R8G8422UnormKHR = VK_FORMAT_B8G8R8G8_422_UNORM, + eG8B8R83Plane420Unorm = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + eG8B8R83Plane420UnormKHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + eG8B8R82Plane420Unorm = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + eG8B8R82Plane420UnormKHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + eG8B8R83Plane422Unorm = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + eG8B8R83Plane422UnormKHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + eG8B8R82Plane422Unorm = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + eG8B8R82Plane422UnormKHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + eG8B8R83Plane444Unorm = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + eG8B8R83Plane444UnormKHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + eR10X6UnormPack16 = VK_FORMAT_R10X6_UNORM_PACK16, + eR10X6UnormPack16KHR = VK_FORMAT_R10X6_UNORM_PACK16, + eR10X6G10X6Unorm2Pack16 = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + eR10X6G10X6Unorm2Pack16KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + eR10X6G10X6B10X6A10X6Unorm4Pack16 = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + eR10X6G10X6B10X6A10X6Unorm4Pack16KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + eG10X6B10X6G10X6R10X6422Unorm4Pack16 = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + eG10X6B10X6G10X6R10X6422Unorm4Pack16KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + eB10X6G10X6R10X6G10X6422Unorm4Pack16 = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + eB10X6G10X6R10X6G10X6422Unorm4Pack16KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + eG10X6B10X6R10X63Plane420Unorm3Pack16 = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + eG10X6B10X6R10X63Plane420Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + eG10X6B10X6R10X62Plane420Unorm3Pack16 = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + eG10X6B10X6R10X62Plane420Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + eG10X6B10X6R10X63Plane422Unorm3Pack16 = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + eG10X6B10X6R10X63Plane422Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + eG10X6B10X6R10X62Plane422Unorm3Pack16 = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + eG10X6B10X6R10X62Plane422Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + eG10X6B10X6R10X63Plane444Unorm3Pack16 = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + eG10X6B10X6R10X63Plane444Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + eR12X4UnormPack16 = VK_FORMAT_R12X4_UNORM_PACK16, + eR12X4UnormPack16KHR = VK_FORMAT_R12X4_UNORM_PACK16, + eR12X4G12X4Unorm2Pack16 = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + eR12X4G12X4Unorm2Pack16KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + eR12X4G12X4B12X4A12X4Unorm4Pack16 = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + eR12X4G12X4B12X4A12X4Unorm4Pack16KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + eG12X4B12X4G12X4R12X4422Unorm4Pack16 = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + eG12X4B12X4G12X4R12X4422Unorm4Pack16KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + eB12X4G12X4R12X4G12X4422Unorm4Pack16 = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + eB12X4G12X4R12X4G12X4422Unorm4Pack16KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + eG12X4B12X4R12X43Plane420Unorm3Pack16 = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + eG12X4B12X4R12X43Plane420Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + eG12X4B12X4R12X42Plane420Unorm3Pack16 = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + eG12X4B12X4R12X42Plane420Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + eG12X4B12X4R12X43Plane422Unorm3Pack16 = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + eG12X4B12X4R12X43Plane422Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + eG12X4B12X4R12X42Plane422Unorm3Pack16 = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + eG12X4B12X4R12X42Plane422Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + eG12X4B12X4R12X43Plane444Unorm3Pack16 = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + eG12X4B12X4R12X43Plane444Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + eG16B16G16R16422Unorm = VK_FORMAT_G16B16G16R16_422_UNORM, + eG16B16G16R16422UnormKHR = VK_FORMAT_G16B16G16R16_422_UNORM, + eB16G16R16G16422Unorm = VK_FORMAT_B16G16R16G16_422_UNORM, + eB16G16R16G16422UnormKHR = VK_FORMAT_B16G16R16G16_422_UNORM, + eG16B16R163Plane420Unorm = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + eG16B16R163Plane420UnormKHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + eG16B16R162Plane420Unorm = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + eG16B16R162Plane420UnormKHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + eG16B16R163Plane422Unorm = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + eG16B16R163Plane422UnormKHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + eG16B16R162Plane422Unorm = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + eG16B16R162Plane422UnormKHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + eG16B16R163Plane444Unorm = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + eG16B16R163Plane444UnormKHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + ePvrtc12BppUnormBlockIMG = VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG, + ePvrtc14BppUnormBlockIMG = VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG, + ePvrtc22BppUnormBlockIMG = VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG, + ePvrtc24BppUnormBlockIMG = VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG, + ePvrtc12BppSrgbBlockIMG = VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG, + ePvrtc14BppSrgbBlockIMG = VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG, + ePvrtc22BppSrgbBlockIMG = VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG, + ePvrtc24BppSrgbBlockIMG = VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG + }; + + struct VertexInputAttributeDescription + { + VertexInputAttributeDescription( uint32_t location_ = 0, + uint32_t binding_ = 0, + Format format_ = Format::eUndefined, + uint32_t offset_ = 0 ) + : location( location_ ) + , binding( binding_ ) + , format( format_ ) + , offset( offset_ ) + { + } + + VertexInputAttributeDescription( VkVertexInputAttributeDescription const & rhs ) + { + memcpy( this, &rhs, sizeof( VertexInputAttributeDescription ) ); + } + + VertexInputAttributeDescription& operator=( VkVertexInputAttributeDescription const & rhs ) + { + memcpy( this, &rhs, sizeof( VertexInputAttributeDescription ) ); + return *this; + } + VertexInputAttributeDescription& setLocation( uint32_t location_ ) + { + location = location_; + return *this; + } + + VertexInputAttributeDescription& setBinding( uint32_t binding_ ) + { + binding = binding_; + return *this; + } + + VertexInputAttributeDescription& setFormat( Format format_ ) + { + format = format_; + return *this; + } + + VertexInputAttributeDescription& setOffset( uint32_t offset_ ) + { + offset = offset_; + return *this; + } + + operator VkVertexInputAttributeDescription const&() const + { + return *reinterpret_cast(this); + } + + operator VkVertexInputAttributeDescription &() + { + return *reinterpret_cast(this); + } + + bool operator==( VertexInputAttributeDescription const& rhs ) const + { + return ( location == rhs.location ) + && ( binding == rhs.binding ) + && ( format == rhs.format ) + && ( offset == rhs.offset ); + } + + bool operator!=( VertexInputAttributeDescription const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t location; + uint32_t binding; + Format format; + uint32_t offset; + }; + static_assert( sizeof( VertexInputAttributeDescription ) == sizeof( VkVertexInputAttributeDescription ), "struct and wrapper have different size!" ); + + enum class StructureType + { + eApplicationInfo = VK_STRUCTURE_TYPE_APPLICATION_INFO, + eInstanceCreateInfo = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + eDeviceQueueCreateInfo = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + eDeviceCreateInfo = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + eSubmitInfo = VK_STRUCTURE_TYPE_SUBMIT_INFO, + eMemoryAllocateInfo = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + eMappedMemoryRange = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, + eBindSparseInfo = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, + eFenceCreateInfo = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + eSemaphoreCreateInfo = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, + eEventCreateInfo = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, + eQueryPoolCreateInfo = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, + eBufferCreateInfo = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + eBufferViewCreateInfo = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, + eImageCreateInfo = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + eImageViewCreateInfo = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + eShaderModuleCreateInfo = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + ePipelineCacheCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, + ePipelineShaderStageCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + ePipelineVertexInputStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + ePipelineInputAssemblyStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + ePipelineTessellationStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, + ePipelineViewportStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, + ePipelineRasterizationStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + ePipelineMultisampleStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + ePipelineDepthStencilStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, + ePipelineColorBlendStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + ePipelineDynamicStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, + eGraphicsPipelineCreateInfo = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, + eComputePipelineCreateInfo = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + ePipelineLayoutCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, + eSamplerCreateInfo = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, + eDescriptorSetLayoutCreateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + eDescriptorPoolCreateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + eDescriptorSetAllocateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + eWriteDescriptorSet = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + eCopyDescriptorSet = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET, + eFramebufferCreateInfo = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, + eRenderPassCreateInfo = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + eCommandPoolCreateInfo = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + eCommandBufferAllocateInfo = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + eCommandBufferInheritanceInfo = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, + eCommandBufferBeginInfo = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + eRenderPassBeginInfo = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, + eBufferMemoryBarrier = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, + eImageMemoryBarrier = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + eMemoryBarrier = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + eLoaderInstanceCreateInfo = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO, + eLoaderDeviceCreateInfo = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO, + ePhysicalDeviceSubgroupProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES, + eBindBufferMemoryInfo = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + eBindBufferMemoryInfoKHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + eBindImageMemoryInfo = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + eBindImageMemoryInfoKHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + ePhysicalDevice16BitStorageFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + ePhysicalDevice16BitStorageFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + eMemoryDedicatedRequirements = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + eMemoryDedicatedRequirementsKHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + eMemoryDedicatedAllocateInfo = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + eMemoryDedicatedAllocateInfoKHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + eMemoryAllocateFlagsInfo = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + eMemoryAllocateFlagsInfoKHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + eDeviceGroupRenderPassBeginInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + eDeviceGroupRenderPassBeginInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + eDeviceGroupCommandBufferBeginInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + eDeviceGroupCommandBufferBeginInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + eDeviceGroupSubmitInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + eDeviceGroupSubmitInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + eDeviceGroupBindSparseInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + eDeviceGroupBindSparseInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + eBindBufferMemoryDeviceGroupInfo = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + eBindBufferMemoryDeviceGroupInfoKHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + eBindImageMemoryDeviceGroupInfo = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + eBindImageMemoryDeviceGroupInfoKHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + ePhysicalDeviceGroupProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + ePhysicalDeviceGroupPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + eDeviceGroupDeviceCreateInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + eDeviceGroupDeviceCreateInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + eBufferMemoryRequirementsInfo2 = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + eBufferMemoryRequirementsInfo2KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + eImageMemoryRequirementsInfo2 = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + eImageMemoryRequirementsInfo2KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + eImageSparseMemoryRequirementsInfo2 = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + eImageSparseMemoryRequirementsInfo2KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + eMemoryRequirements2 = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + eMemoryRequirements2KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + eSparseImageMemoryRequirements2 = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + eSparseImageMemoryRequirements2KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + ePhysicalDeviceFeatures2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + ePhysicalDeviceFeatures2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + ePhysicalDeviceProperties2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + ePhysicalDeviceProperties2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + eFormatProperties2 = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + eFormatProperties2KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + eImageFormatProperties2 = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + eImageFormatProperties2KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + ePhysicalDeviceImageFormatInfo2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + ePhysicalDeviceImageFormatInfo2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + eQueueFamilyProperties2 = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + eQueueFamilyProperties2KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + ePhysicalDeviceMemoryProperties2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + ePhysicalDeviceMemoryProperties2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + eSparseImageFormatProperties2 = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + eSparseImageFormatProperties2KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + ePhysicalDeviceSparseImageFormatInfo2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + ePhysicalDeviceSparseImageFormatInfo2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + ePhysicalDevicePointClippingProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + ePhysicalDevicePointClippingPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + eRenderPassInputAttachmentAspectCreateInfo = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + eRenderPassInputAttachmentAspectCreateInfoKHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + eImageViewUsageCreateInfo = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + eImageViewUsageCreateInfoKHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + ePipelineTessellationDomainOriginStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + ePipelineTessellationDomainOriginStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + eRenderPassMultiviewCreateInfo = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + eRenderPassMultiviewCreateInfoKHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + ePhysicalDeviceMultiviewFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + ePhysicalDeviceMultiviewFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + ePhysicalDeviceMultiviewProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + ePhysicalDeviceMultiviewPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + ePhysicalDeviceVariablePointerFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES, + ePhysicalDeviceVariablePointerFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES, + eProtectedSubmitInfo = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO, + ePhysicalDeviceProtectedMemoryFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES, + ePhysicalDeviceProtectedMemoryProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES, + eDeviceQueueInfo2 = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, + eSamplerYcbcrConversionCreateInfo = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + eSamplerYcbcrConversionCreateInfoKHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + eSamplerYcbcrConversionInfo = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + eSamplerYcbcrConversionInfoKHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + eBindImagePlaneMemoryInfo = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + eBindImagePlaneMemoryInfoKHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + eImagePlaneMemoryRequirementsInfo = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + eImagePlaneMemoryRequirementsInfoKHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + ePhysicalDeviceSamplerYcbcrConversionFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + ePhysicalDeviceSamplerYcbcrConversionFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + eSamplerYcbcrConversionImageFormatProperties = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + eSamplerYcbcrConversionImageFormatPropertiesKHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + eDescriptorUpdateTemplateCreateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + eDescriptorUpdateTemplateCreateInfoKHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + ePhysicalDeviceExternalImageFormatInfo = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + ePhysicalDeviceExternalImageFormatInfoKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + eExternalImageFormatProperties = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + eExternalImageFormatPropertiesKHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + ePhysicalDeviceExternalBufferInfo = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + ePhysicalDeviceExternalBufferInfoKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + eExternalBufferProperties = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + eExternalBufferPropertiesKHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + ePhysicalDeviceIdProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + ePhysicalDeviceIdPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + eExternalMemoryBufferCreateInfo = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + eExternalMemoryBufferCreateInfoKHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + eExternalMemoryImageCreateInfo = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + eExternalMemoryImageCreateInfoKHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + eExportMemoryAllocateInfo = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + eExportMemoryAllocateInfoKHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + ePhysicalDeviceExternalFenceInfo = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + ePhysicalDeviceExternalFenceInfoKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + eExternalFenceProperties = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + eExternalFencePropertiesKHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + eExportFenceCreateInfo = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + eExportFenceCreateInfoKHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + eExportSemaphoreCreateInfo = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + eExportSemaphoreCreateInfoKHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + ePhysicalDeviceExternalSemaphoreInfo = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + ePhysicalDeviceExternalSemaphoreInfoKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + eExternalSemaphoreProperties = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + eExternalSemaphorePropertiesKHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + ePhysicalDeviceMaintenance3Properties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + ePhysicalDeviceMaintenance3PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + eDescriptorSetLayoutSupport = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + eDescriptorSetLayoutSupportKHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + ePhysicalDeviceShaderDrawParameterFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES, + eSwapchainCreateInfoKHR = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + ePresentInfoKHR = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + eDeviceGroupPresentCapabilitiesKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR, + eImageSwapchainCreateInfoKHR = VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR, + eBindImageMemorySwapchainInfoKHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR, + eAcquireNextImageInfoKHR = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR, + eDeviceGroupPresentInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR, + eDeviceGroupSwapchainCreateInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR, + eDisplayModeCreateInfoKHR = VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR, + eDisplaySurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR, + eDisplayPresentInfoKHR = VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR, + eXlibSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, + eXcbSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR, + eWaylandSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR, + eMirSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR, + eAndroidSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, + eWin32SurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, + eDebugReportCallbackCreateInfoEXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + eDebugReportCreateInfoEXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + ePipelineRasterizationStateRasterizationOrderAMD = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD, + eDebugMarkerObjectNameInfoEXT = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT, + eDebugMarkerObjectTagInfoEXT = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT, + eDebugMarkerMarkerInfoEXT = VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT, + eDedicatedAllocationImageCreateInfoNV = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV, + eDedicatedAllocationBufferCreateInfoNV = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV, + eDedicatedAllocationMemoryAllocateInfoNV = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV, + eTextureLodGatherFormatPropertiesAMD = VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD, + ePhysicalDeviceCornerSampledImageFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV, + eExternalMemoryImageCreateInfoNV = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV, + eExportMemoryAllocateInfoNV = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV, + eImportMemoryWin32HandleInfoNV = VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV, + eExportMemoryWin32HandleInfoNV = VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV, + eWin32KeyedMutexAcquireReleaseInfoNV = VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV, + eValidationFlagsEXT = VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT, + eViSurfaceCreateInfoNN = VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN, + eImageViewAstcDecodeModeEXT = VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT, + ePhysicalDeviceAstcDecodeFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT, + eImportMemoryWin32HandleInfoKHR = VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, + eExportMemoryWin32HandleInfoKHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR, + eMemoryWin32HandlePropertiesKHR = VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR, + eMemoryGetWin32HandleInfoKHR = VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR, + eImportMemoryFdInfoKHR = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, + eMemoryFdPropertiesKHR = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR, + eMemoryGetFdInfoKHR = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, + eWin32KeyedMutexAcquireReleaseInfoKHR = VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR, + eImportSemaphoreWin32HandleInfoKHR = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, + eExportSemaphoreWin32HandleInfoKHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, + eD3D12FenceSubmitInfoKHR = VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR, + eSemaphoreGetWin32HandleInfoKHR = VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, + eImportSemaphoreFdInfoKHR = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, + eSemaphoreGetFdInfoKHR = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, + ePhysicalDevicePushDescriptorPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR, + eCommandBufferInheritanceConditionalRenderingInfoEXT = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT, + ePhysicalDeviceConditionalRenderingFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT, + eConditionalRenderingBeginInfoEXT = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT, + ePresentRegionsKHR = VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR, + eObjectTableCreateInfoNVX = VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX, + eIndirectCommandsLayoutCreateInfoNVX = VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX, + eCmdProcessCommandsInfoNVX = VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX, + eCmdReserveSpaceForCommandsInfoNVX = VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX, + eDeviceGeneratedCommandsLimitsNVX = VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX, + eDeviceGeneratedCommandsFeaturesNVX = VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX, + ePipelineViewportWScalingStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV, + eSurfaceCapabilities2EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, + eDisplayPowerInfoEXT = VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT, + eDeviceEventInfoEXT = VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT, + eDisplayEventInfoEXT = VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT, + eSwapchainCounterCreateInfoEXT = VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT, + ePresentTimesInfoGOOGLE = VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE, + ePhysicalDeviceMultiviewPerViewAttributesPropertiesNVX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX, + ePipelineViewportSwizzleStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV, + ePhysicalDeviceDiscardRectanglePropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT, + ePipelineDiscardRectangleStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT, + ePhysicalDeviceConservativeRasterizationPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT, + ePipelineRasterizationConservativeStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT, + eHdrMetadataEXT = VK_STRUCTURE_TYPE_HDR_METADATA_EXT, + eAttachmentDescription2KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR, + eAttachmentReference2KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR, + eSubpassDescription2KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR, + eSubpassDependency2KHR = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR, + eRenderPassCreateInfo2KHR = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR, + eSubpassBeginInfoKHR = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, + eSubpassEndInfoKHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR, + eSharedPresentSurfaceCapabilitiesKHR = VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR, + eImportFenceWin32HandleInfoKHR = VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, + eExportFenceWin32HandleInfoKHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR, + eFenceGetWin32HandleInfoKHR = VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, + eImportFenceFdInfoKHR = VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, + eFenceGetFdInfoKHR = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, + ePhysicalDeviceSurfaceInfo2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR, + eSurfaceCapabilities2KHR = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR, + eSurfaceFormat2KHR = VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR, + eDisplayProperties2KHR = VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR, + eDisplayPlaneProperties2KHR = VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR, + eDisplayModeProperties2KHR = VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR, + eDisplayPlaneInfo2KHR = VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR, + eDisplayPlaneCapabilities2KHR = VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR, + eIosSurfaceCreateInfoMVK = VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK, + eMacosSurfaceCreateInfoMVK = VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK, + eDebugUtilsObjectNameInfoEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT, + eDebugUtilsObjectTagInfoEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT, + eDebugUtilsLabelEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, + eDebugUtilsMessengerCallbackDataEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT, + eDebugUtilsMessengerCreateInfoEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + eAndroidHardwareBufferUsageANDROID = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID, + eAndroidHardwareBufferPropertiesANDROID = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID, + eAndroidHardwareBufferFormatPropertiesANDROID = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID, + eImportAndroidHardwareBufferInfoANDROID = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID, + eMemoryGetAndroidHardwareBufferInfoANDROID = VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID, + eExternalFormatANDROID = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID, + ePhysicalDeviceSamplerFilterMinmaxPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT, + eSamplerReductionModeCreateInfoEXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT, + ePhysicalDeviceInlineUniformBlockFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT, + ePhysicalDeviceInlineUniformBlockPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT, + eWriteDescriptorSetInlineUniformBlockEXT = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT, + eDescriptorPoolInlineUniformBlockCreateInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT, + eSampleLocationsInfoEXT = VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT, + eRenderPassSampleLocationsBeginInfoEXT = VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT, + ePipelineSampleLocationsStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT, + ePhysicalDeviceSampleLocationsPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT, + eMultisamplePropertiesEXT = VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT, + eImageFormatListCreateInfoKHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR, + ePhysicalDeviceBlendOperationAdvancedFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT, + ePhysicalDeviceBlendOperationAdvancedPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT, + ePipelineColorBlendAdvancedStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT, + ePipelineCoverageToColorStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV, + ePipelineCoverageModulationStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV, + eValidationCacheCreateInfoEXT = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT, + eShaderModuleValidationCacheCreateInfoEXT = VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT, + eDescriptorSetLayoutBindingFlagsCreateInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT, + ePhysicalDeviceDescriptorIndexingFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT, + ePhysicalDeviceDescriptorIndexingPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT, + eDescriptorSetVariableDescriptorCountAllocateInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT, + eDescriptorSetVariableDescriptorCountLayoutSupportEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT, + ePipelineViewportShadingRateImageStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV, + ePhysicalDeviceShadingRateImageFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV, + ePhysicalDeviceShadingRateImagePropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV, + ePipelineViewportCoarseSampleOrderStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV, + eRaytracingPipelineCreateInfoNVX = VK_STRUCTURE_TYPE_RAYTRACING_PIPELINE_CREATE_INFO_NVX, + eAccelerationStructureCreateInfoNVX = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NVX, + eGeometryInstanceNVX = VK_STRUCTURE_TYPE_GEOMETRY_INSTANCE_NVX, + eGeometryNVX = VK_STRUCTURE_TYPE_GEOMETRY_NVX, + eGeometryTrianglesNVX = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NVX, + eGeometryAabbNVX = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NVX, + eBindAccelerationStructureMemoryInfoNVX = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NVX, + eDescriptorAccelerationStructureInfoNVX = VK_STRUCTURE_TYPE_DESCRIPTOR_ACCELERATION_STRUCTURE_INFO_NVX, + eAccelerationStructureMemoryRequirementsInfoNVX = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NVX, + ePhysicalDeviceRaytracingPropertiesNVX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAYTRACING_PROPERTIES_NVX, + eHitShaderModuleCreateInfoNVX = VK_STRUCTURE_TYPE_HIT_SHADER_MODULE_CREATE_INFO_NVX, + ePhysicalDeviceRepresentativeFragmentTestFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV, + ePipelineRepresentativeFragmentTestStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV, + eDeviceQueueGlobalPriorityCreateInfoEXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT, + ePhysicalDevice8BitStorageFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR, + eImportMemoryHostPointerInfoEXT = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT, + eMemoryHostPointerPropertiesEXT = VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT, + ePhysicalDeviceExternalMemoryHostPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT, + ePhysicalDeviceShaderCorePropertiesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD, + ePhysicalDeviceVertexAttributeDivisorPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT, + ePipelineVertexInputDivisorStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT, + ePhysicalDeviceVertexAttributeDivisorFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT, + ePhysicalDeviceComputeShaderDerivativesFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV, + ePhysicalDeviceMeshShaderFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV, + ePhysicalDeviceMeshShaderPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV, + ePhysicalDeviceFragmentShaderBarycentricFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV, + ePhysicalDeviceShaderImageFootprintFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV, + ePipelineViewportExclusiveScissorStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV, + ePhysicalDeviceExclusiveScissorFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV, + eCheckpointDataNV = VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV, + eQueueFamilyCheckpointPropertiesNV = VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV, + ePhysicalDeviceVulkanMemoryModelFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR + }; + + struct ApplicationInfo + { + ApplicationInfo( const char* pApplicationName_ = nullptr, + uint32_t applicationVersion_ = 0, + const char* pEngineName_ = nullptr, + uint32_t engineVersion_ = 0, + uint32_t apiVersion_ = 0 ) + : pApplicationName( pApplicationName_ ) + , applicationVersion( applicationVersion_ ) + , pEngineName( pEngineName_ ) + , engineVersion( engineVersion_ ) + , apiVersion( apiVersion_ ) + { + } + + ApplicationInfo( VkApplicationInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ApplicationInfo ) ); + } + + ApplicationInfo& operator=( VkApplicationInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ApplicationInfo ) ); + return *this; + } + ApplicationInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ApplicationInfo& setPApplicationName( const char* pApplicationName_ ) + { + pApplicationName = pApplicationName_; + return *this; + } + + ApplicationInfo& setApplicationVersion( uint32_t applicationVersion_ ) + { + applicationVersion = applicationVersion_; + return *this; + } + + ApplicationInfo& setPEngineName( const char* pEngineName_ ) + { + pEngineName = pEngineName_; + return *this; + } + + ApplicationInfo& setEngineVersion( uint32_t engineVersion_ ) + { + engineVersion = engineVersion_; + return *this; + } + + ApplicationInfo& setApiVersion( uint32_t apiVersion_ ) + { + apiVersion = apiVersion_; + return *this; + } + + operator VkApplicationInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkApplicationInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ApplicationInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pApplicationName == rhs.pApplicationName ) + && ( applicationVersion == rhs.applicationVersion ) + && ( pEngineName == rhs.pEngineName ) + && ( engineVersion == rhs.engineVersion ) + && ( apiVersion == rhs.apiVersion ); + } + + bool operator!=( ApplicationInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eApplicationInfo; + + public: + const void* pNext = nullptr; + const char* pApplicationName; + uint32_t applicationVersion; + const char* pEngineName; + uint32_t engineVersion; + uint32_t apiVersion; + }; + static_assert( sizeof( ApplicationInfo ) == sizeof( VkApplicationInfo ), "struct and wrapper have different size!" ); + + struct InstanceCreateInfo + { + InstanceCreateInfo( InstanceCreateFlags flags_ = InstanceCreateFlags(), + const ApplicationInfo* pApplicationInfo_ = nullptr, + uint32_t enabledLayerCount_ = 0, + const char* const* ppEnabledLayerNames_ = nullptr, + uint32_t enabledExtensionCount_ = 0, + const char* const* ppEnabledExtensionNames_ = nullptr ) + : flags( flags_ ) + , pApplicationInfo( pApplicationInfo_ ) + , enabledLayerCount( enabledLayerCount_ ) + , ppEnabledLayerNames( ppEnabledLayerNames_ ) + , enabledExtensionCount( enabledExtensionCount_ ) + , ppEnabledExtensionNames( ppEnabledExtensionNames_ ) + { + } + + InstanceCreateInfo( VkInstanceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( InstanceCreateInfo ) ); + } + + InstanceCreateInfo& operator=( VkInstanceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( InstanceCreateInfo ) ); + return *this; + } + InstanceCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + InstanceCreateInfo& setFlags( InstanceCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + InstanceCreateInfo& setPApplicationInfo( const ApplicationInfo* pApplicationInfo_ ) + { + pApplicationInfo = pApplicationInfo_; + return *this; + } + + InstanceCreateInfo& setEnabledLayerCount( uint32_t enabledLayerCount_ ) + { + enabledLayerCount = enabledLayerCount_; + return *this; + } + + InstanceCreateInfo& setPpEnabledLayerNames( const char* const* ppEnabledLayerNames_ ) + { + ppEnabledLayerNames = ppEnabledLayerNames_; + return *this; + } + + InstanceCreateInfo& setEnabledExtensionCount( uint32_t enabledExtensionCount_ ) + { + enabledExtensionCount = enabledExtensionCount_; + return *this; + } + + InstanceCreateInfo& setPpEnabledExtensionNames( const char* const* ppEnabledExtensionNames_ ) + { + ppEnabledExtensionNames = ppEnabledExtensionNames_; + return *this; + } + + operator VkInstanceCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkInstanceCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( InstanceCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pApplicationInfo == rhs.pApplicationInfo ) + && ( enabledLayerCount == rhs.enabledLayerCount ) + && ( ppEnabledLayerNames == rhs.ppEnabledLayerNames ) + && ( enabledExtensionCount == rhs.enabledExtensionCount ) + && ( ppEnabledExtensionNames == rhs.ppEnabledExtensionNames ); + } + + bool operator!=( InstanceCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eInstanceCreateInfo; + + public: + const void* pNext = nullptr; + InstanceCreateFlags flags; + const ApplicationInfo* pApplicationInfo; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + }; + static_assert( sizeof( InstanceCreateInfo ) == sizeof( VkInstanceCreateInfo ), "struct and wrapper have different size!" ); + + struct MemoryAllocateInfo + { + MemoryAllocateInfo( DeviceSize allocationSize_ = 0, + uint32_t memoryTypeIndex_ = 0 ) + : allocationSize( allocationSize_ ) + , memoryTypeIndex( memoryTypeIndex_ ) + { + } + + MemoryAllocateInfo( VkMemoryAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryAllocateInfo ) ); + } + + MemoryAllocateInfo& operator=( VkMemoryAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryAllocateInfo ) ); + return *this; + } + MemoryAllocateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MemoryAllocateInfo& setAllocationSize( DeviceSize allocationSize_ ) + { + allocationSize = allocationSize_; + return *this; + } + + MemoryAllocateInfo& setMemoryTypeIndex( uint32_t memoryTypeIndex_ ) + { + memoryTypeIndex = memoryTypeIndex_; + return *this; + } + + operator VkMemoryAllocateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryAllocateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryAllocateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( allocationSize == rhs.allocationSize ) + && ( memoryTypeIndex == rhs.memoryTypeIndex ); + } + + bool operator!=( MemoryAllocateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryAllocateInfo; + + public: + const void* pNext = nullptr; + DeviceSize allocationSize; + uint32_t memoryTypeIndex; + }; + static_assert( sizeof( MemoryAllocateInfo ) == sizeof( VkMemoryAllocateInfo ), "struct and wrapper have different size!" ); + + struct MappedMemoryRange + { + MappedMemoryRange( DeviceMemory memory_ = DeviceMemory(), + DeviceSize offset_ = 0, + DeviceSize size_ = 0 ) + : memory( memory_ ) + , offset( offset_ ) + , size( size_ ) + { + } + + MappedMemoryRange( VkMappedMemoryRange const & rhs ) + { + memcpy( this, &rhs, sizeof( MappedMemoryRange ) ); + } + + MappedMemoryRange& operator=( VkMappedMemoryRange const & rhs ) + { + memcpy( this, &rhs, sizeof( MappedMemoryRange ) ); + return *this; + } + MappedMemoryRange& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MappedMemoryRange& setMemory( DeviceMemory memory_ ) + { + memory = memory_; + return *this; + } + + MappedMemoryRange& setOffset( DeviceSize offset_ ) + { + offset = offset_; + return *this; + } + + MappedMemoryRange& setSize( DeviceSize size_ ) + { + size = size_; + return *this; + } + + operator VkMappedMemoryRange const&() const + { + return *reinterpret_cast(this); + } + + operator VkMappedMemoryRange &() + { + return *reinterpret_cast(this); + } + + bool operator==( MappedMemoryRange const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memory == rhs.memory ) + && ( offset == rhs.offset ) + && ( size == rhs.size ); + } + + bool operator!=( MappedMemoryRange const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMappedMemoryRange; + + public: + const void* pNext = nullptr; + DeviceMemory memory; + DeviceSize offset; + DeviceSize size; + }; + static_assert( sizeof( MappedMemoryRange ) == sizeof( VkMappedMemoryRange ), "struct and wrapper have different size!" ); + + struct WriteDescriptorSet + { + WriteDescriptorSet( DescriptorSet dstSet_ = DescriptorSet(), + uint32_t dstBinding_ = 0, + uint32_t dstArrayElement_ = 0, + uint32_t descriptorCount_ = 0, + DescriptorType descriptorType_ = DescriptorType::eSampler, + const DescriptorImageInfo* pImageInfo_ = nullptr, + const DescriptorBufferInfo* pBufferInfo_ = nullptr, + const BufferView* pTexelBufferView_ = nullptr ) + : dstSet( dstSet_ ) + , dstBinding( dstBinding_ ) + , dstArrayElement( dstArrayElement_ ) + , descriptorCount( descriptorCount_ ) + , descriptorType( descriptorType_ ) + , pImageInfo( pImageInfo_ ) + , pBufferInfo( pBufferInfo_ ) + , pTexelBufferView( pTexelBufferView_ ) + { + } + + WriteDescriptorSet( VkWriteDescriptorSet const & rhs ) + { + memcpy( this, &rhs, sizeof( WriteDescriptorSet ) ); + } + + WriteDescriptorSet& operator=( VkWriteDescriptorSet const & rhs ) + { + memcpy( this, &rhs, sizeof( WriteDescriptorSet ) ); + return *this; + } + WriteDescriptorSet& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + WriteDescriptorSet& setDstSet( DescriptorSet dstSet_ ) + { + dstSet = dstSet_; + return *this; + } + + WriteDescriptorSet& setDstBinding( uint32_t dstBinding_ ) + { + dstBinding = dstBinding_; + return *this; + } + + WriteDescriptorSet& setDstArrayElement( uint32_t dstArrayElement_ ) + { + dstArrayElement = dstArrayElement_; + return *this; + } + + WriteDescriptorSet& setDescriptorCount( uint32_t descriptorCount_ ) + { + descriptorCount = descriptorCount_; + return *this; + } + + WriteDescriptorSet& setDescriptorType( DescriptorType descriptorType_ ) + { + descriptorType = descriptorType_; + return *this; + } + + WriteDescriptorSet& setPImageInfo( const DescriptorImageInfo* pImageInfo_ ) + { + pImageInfo = pImageInfo_; + return *this; + } + + WriteDescriptorSet& setPBufferInfo( const DescriptorBufferInfo* pBufferInfo_ ) + { + pBufferInfo = pBufferInfo_; + return *this; + } + + WriteDescriptorSet& setPTexelBufferView( const BufferView* pTexelBufferView_ ) + { + pTexelBufferView = pTexelBufferView_; + return *this; + } + + operator VkWriteDescriptorSet const&() const + { + return *reinterpret_cast(this); + } + + operator VkWriteDescriptorSet &() + { + return *reinterpret_cast(this); + } + + bool operator==( WriteDescriptorSet const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( dstSet == rhs.dstSet ) + && ( dstBinding == rhs.dstBinding ) + && ( dstArrayElement == rhs.dstArrayElement ) + && ( descriptorCount == rhs.descriptorCount ) + && ( descriptorType == rhs.descriptorType ) + && ( pImageInfo == rhs.pImageInfo ) + && ( pBufferInfo == rhs.pBufferInfo ) + && ( pTexelBufferView == rhs.pTexelBufferView ); + } + + bool operator!=( WriteDescriptorSet const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eWriteDescriptorSet; + + public: + const void* pNext = nullptr; + DescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + DescriptorType descriptorType; + const DescriptorImageInfo* pImageInfo; + const DescriptorBufferInfo* pBufferInfo; + const BufferView* pTexelBufferView; + }; + static_assert( sizeof( WriteDescriptorSet ) == sizeof( VkWriteDescriptorSet ), "struct and wrapper have different size!" ); + + struct CopyDescriptorSet + { + CopyDescriptorSet( DescriptorSet srcSet_ = DescriptorSet(), + uint32_t srcBinding_ = 0, + uint32_t srcArrayElement_ = 0, + DescriptorSet dstSet_ = DescriptorSet(), + uint32_t dstBinding_ = 0, + uint32_t dstArrayElement_ = 0, + uint32_t descriptorCount_ = 0 ) + : srcSet( srcSet_ ) + , srcBinding( srcBinding_ ) + , srcArrayElement( srcArrayElement_ ) + , dstSet( dstSet_ ) + , dstBinding( dstBinding_ ) + , dstArrayElement( dstArrayElement_ ) + , descriptorCount( descriptorCount_ ) + { + } + + CopyDescriptorSet( VkCopyDescriptorSet const & rhs ) + { + memcpy( this, &rhs, sizeof( CopyDescriptorSet ) ); + } + + CopyDescriptorSet& operator=( VkCopyDescriptorSet const & rhs ) + { + memcpy( this, &rhs, sizeof( CopyDescriptorSet ) ); + return *this; + } + CopyDescriptorSet& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + CopyDescriptorSet& setSrcSet( DescriptorSet srcSet_ ) + { + srcSet = srcSet_; + return *this; + } + + CopyDescriptorSet& setSrcBinding( uint32_t srcBinding_ ) + { + srcBinding = srcBinding_; + return *this; + } + + CopyDescriptorSet& setSrcArrayElement( uint32_t srcArrayElement_ ) + { + srcArrayElement = srcArrayElement_; + return *this; + } + + CopyDescriptorSet& setDstSet( DescriptorSet dstSet_ ) + { + dstSet = dstSet_; + return *this; + } + + CopyDescriptorSet& setDstBinding( uint32_t dstBinding_ ) + { + dstBinding = dstBinding_; + return *this; + } + + CopyDescriptorSet& setDstArrayElement( uint32_t dstArrayElement_ ) + { + dstArrayElement = dstArrayElement_; + return *this; + } + + CopyDescriptorSet& setDescriptorCount( uint32_t descriptorCount_ ) + { + descriptorCount = descriptorCount_; + return *this; + } + + operator VkCopyDescriptorSet const&() const + { + return *reinterpret_cast(this); + } + + operator VkCopyDescriptorSet &() + { + return *reinterpret_cast(this); + } + + bool operator==( CopyDescriptorSet const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcSet == rhs.srcSet ) + && ( srcBinding == rhs.srcBinding ) + && ( srcArrayElement == rhs.srcArrayElement ) + && ( dstSet == rhs.dstSet ) + && ( dstBinding == rhs.dstBinding ) + && ( dstArrayElement == rhs.dstArrayElement ) + && ( descriptorCount == rhs.descriptorCount ); + } + + bool operator!=( CopyDescriptorSet const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eCopyDescriptorSet; + + public: + const void* pNext = nullptr; + DescriptorSet srcSet; + uint32_t srcBinding; + uint32_t srcArrayElement; + DescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + }; + static_assert( sizeof( CopyDescriptorSet ) == sizeof( VkCopyDescriptorSet ), "struct and wrapper have different size!" ); + + struct BufferViewCreateInfo + { + BufferViewCreateInfo( BufferViewCreateFlags flags_ = BufferViewCreateFlags(), + Buffer buffer_ = Buffer(), + Format format_ = Format::eUndefined, + DeviceSize offset_ = 0, + DeviceSize range_ = 0 ) + : flags( flags_ ) + , buffer( buffer_ ) + , format( format_ ) + , offset( offset_ ) + , range( range_ ) + { + } + + BufferViewCreateInfo( VkBufferViewCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferViewCreateInfo ) ); + } + + BufferViewCreateInfo& operator=( VkBufferViewCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferViewCreateInfo ) ); + return *this; + } + BufferViewCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BufferViewCreateInfo& setFlags( BufferViewCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + BufferViewCreateInfo& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + BufferViewCreateInfo& setFormat( Format format_ ) + { + format = format_; + return *this; + } + + BufferViewCreateInfo& setOffset( DeviceSize offset_ ) + { + offset = offset_; + return *this; + } + + BufferViewCreateInfo& setRange( DeviceSize range_ ) + { + range = range_; + return *this; + } + + operator VkBufferViewCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkBufferViewCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( BufferViewCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( buffer == rhs.buffer ) + && ( format == rhs.format ) + && ( offset == rhs.offset ) + && ( range == rhs.range ); + } + + bool operator!=( BufferViewCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBufferViewCreateInfo; + + public: + const void* pNext = nullptr; + BufferViewCreateFlags flags; + Buffer buffer; + Format format; + DeviceSize offset; + DeviceSize range; + }; + static_assert( sizeof( BufferViewCreateInfo ) == sizeof( VkBufferViewCreateInfo ), "struct and wrapper have different size!" ); + + struct ShaderModuleCreateInfo + { + ShaderModuleCreateInfo( ShaderModuleCreateFlags flags_ = ShaderModuleCreateFlags(), + size_t codeSize_ = 0, + const uint32_t* pCode_ = nullptr ) + : flags( flags_ ) + , codeSize( codeSize_ ) + , pCode( pCode_ ) + { + } + + ShaderModuleCreateInfo( VkShaderModuleCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ShaderModuleCreateInfo ) ); + } + + ShaderModuleCreateInfo& operator=( VkShaderModuleCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ShaderModuleCreateInfo ) ); + return *this; + } + ShaderModuleCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ShaderModuleCreateInfo& setFlags( ShaderModuleCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + ShaderModuleCreateInfo& setCodeSize( size_t codeSize_ ) + { + codeSize = codeSize_; + return *this; + } + + ShaderModuleCreateInfo& setPCode( const uint32_t* pCode_ ) + { + pCode = pCode_; + return *this; + } + + operator VkShaderModuleCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkShaderModuleCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ShaderModuleCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( codeSize == rhs.codeSize ) + && ( pCode == rhs.pCode ); + } + + bool operator!=( ShaderModuleCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eShaderModuleCreateInfo; + + public: + const void* pNext = nullptr; + ShaderModuleCreateFlags flags; + size_t codeSize; + const uint32_t* pCode; + }; + static_assert( sizeof( ShaderModuleCreateInfo ) == sizeof( VkShaderModuleCreateInfo ), "struct and wrapper have different size!" ); + + struct DescriptorSetAllocateInfo + { + DescriptorSetAllocateInfo( DescriptorPool descriptorPool_ = DescriptorPool(), + uint32_t descriptorSetCount_ = 0, + const DescriptorSetLayout* pSetLayouts_ = nullptr ) + : descriptorPool( descriptorPool_ ) + , descriptorSetCount( descriptorSetCount_ ) + , pSetLayouts( pSetLayouts_ ) + { + } + + DescriptorSetAllocateInfo( VkDescriptorSetAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetAllocateInfo ) ); + } + + DescriptorSetAllocateInfo& operator=( VkDescriptorSetAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetAllocateInfo ) ); + return *this; + } + DescriptorSetAllocateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DescriptorSetAllocateInfo& setDescriptorPool( DescriptorPool descriptorPool_ ) + { + descriptorPool = descriptorPool_; + return *this; + } + + DescriptorSetAllocateInfo& setDescriptorSetCount( uint32_t descriptorSetCount_ ) + { + descriptorSetCount = descriptorSetCount_; + return *this; + } + + DescriptorSetAllocateInfo& setPSetLayouts( const DescriptorSetLayout* pSetLayouts_ ) + { + pSetLayouts = pSetLayouts_; + return *this; + } + + operator VkDescriptorSetAllocateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorSetAllocateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorSetAllocateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( descriptorPool == rhs.descriptorPool ) + && ( descriptorSetCount == rhs.descriptorSetCount ) + && ( pSetLayouts == rhs.pSetLayouts ); + } + + bool operator!=( DescriptorSetAllocateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorSetAllocateInfo; + + public: + const void* pNext = nullptr; + DescriptorPool descriptorPool; + uint32_t descriptorSetCount; + const DescriptorSetLayout* pSetLayouts; + }; + static_assert( sizeof( DescriptorSetAllocateInfo ) == sizeof( VkDescriptorSetAllocateInfo ), "struct and wrapper have different size!" ); + + struct PipelineVertexInputStateCreateInfo + { + PipelineVertexInputStateCreateInfo( PipelineVertexInputStateCreateFlags flags_ = PipelineVertexInputStateCreateFlags(), + uint32_t vertexBindingDescriptionCount_ = 0, + const VertexInputBindingDescription* pVertexBindingDescriptions_ = nullptr, + uint32_t vertexAttributeDescriptionCount_ = 0, + const VertexInputAttributeDescription* pVertexAttributeDescriptions_ = nullptr ) + : flags( flags_ ) + , vertexBindingDescriptionCount( vertexBindingDescriptionCount_ ) + , pVertexBindingDescriptions( pVertexBindingDescriptions_ ) + , vertexAttributeDescriptionCount( vertexAttributeDescriptionCount_ ) + , pVertexAttributeDescriptions( pVertexAttributeDescriptions_ ) + { + } + + PipelineVertexInputStateCreateInfo( VkPipelineVertexInputStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineVertexInputStateCreateInfo ) ); + } + + PipelineVertexInputStateCreateInfo& operator=( VkPipelineVertexInputStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineVertexInputStateCreateInfo ) ); + return *this; + } + PipelineVertexInputStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineVertexInputStateCreateInfo& setFlags( PipelineVertexInputStateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineVertexInputStateCreateInfo& setVertexBindingDescriptionCount( uint32_t vertexBindingDescriptionCount_ ) + { + vertexBindingDescriptionCount = vertexBindingDescriptionCount_; + return *this; + } + + PipelineVertexInputStateCreateInfo& setPVertexBindingDescriptions( const VertexInputBindingDescription* pVertexBindingDescriptions_ ) + { + pVertexBindingDescriptions = pVertexBindingDescriptions_; + return *this; + } + + PipelineVertexInputStateCreateInfo& setVertexAttributeDescriptionCount( uint32_t vertexAttributeDescriptionCount_ ) + { + vertexAttributeDescriptionCount = vertexAttributeDescriptionCount_; + return *this; + } + + PipelineVertexInputStateCreateInfo& setPVertexAttributeDescriptions( const VertexInputAttributeDescription* pVertexAttributeDescriptions_ ) + { + pVertexAttributeDescriptions = pVertexAttributeDescriptions_; + return *this; + } + + operator VkPipelineVertexInputStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineVertexInputStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineVertexInputStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( vertexBindingDescriptionCount == rhs.vertexBindingDescriptionCount ) + && ( pVertexBindingDescriptions == rhs.pVertexBindingDescriptions ) + && ( vertexAttributeDescriptionCount == rhs.vertexAttributeDescriptionCount ) + && ( pVertexAttributeDescriptions == rhs.pVertexAttributeDescriptions ); + } + + bool operator!=( PipelineVertexInputStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineVertexInputStateCreateInfo; + + public: + const void* pNext = nullptr; + PipelineVertexInputStateCreateFlags flags; + uint32_t vertexBindingDescriptionCount; + const VertexInputBindingDescription* pVertexBindingDescriptions; + uint32_t vertexAttributeDescriptionCount; + const VertexInputAttributeDescription* pVertexAttributeDescriptions; + }; + static_assert( sizeof( PipelineVertexInputStateCreateInfo ) == sizeof( VkPipelineVertexInputStateCreateInfo ), "struct and wrapper have different size!" ); + + struct PipelineInputAssemblyStateCreateInfo + { + PipelineInputAssemblyStateCreateInfo( PipelineInputAssemblyStateCreateFlags flags_ = PipelineInputAssemblyStateCreateFlags(), + PrimitiveTopology topology_ = PrimitiveTopology::ePointList, + Bool32 primitiveRestartEnable_ = 0 ) + : flags( flags_ ) + , topology( topology_ ) + , primitiveRestartEnable( primitiveRestartEnable_ ) + { + } + + PipelineInputAssemblyStateCreateInfo( VkPipelineInputAssemblyStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineInputAssemblyStateCreateInfo ) ); + } + + PipelineInputAssemblyStateCreateInfo& operator=( VkPipelineInputAssemblyStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineInputAssemblyStateCreateInfo ) ); + return *this; + } + PipelineInputAssemblyStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineInputAssemblyStateCreateInfo& setFlags( PipelineInputAssemblyStateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineInputAssemblyStateCreateInfo& setTopology( PrimitiveTopology topology_ ) + { + topology = topology_; + return *this; + } + + PipelineInputAssemblyStateCreateInfo& setPrimitiveRestartEnable( Bool32 primitiveRestartEnable_ ) + { + primitiveRestartEnable = primitiveRestartEnable_; + return *this; + } + + operator VkPipelineInputAssemblyStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineInputAssemblyStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineInputAssemblyStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( topology == rhs.topology ) + && ( primitiveRestartEnable == rhs.primitiveRestartEnable ); + } + + bool operator!=( PipelineInputAssemblyStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineInputAssemblyStateCreateInfo; + + public: + const void* pNext = nullptr; + PipelineInputAssemblyStateCreateFlags flags; + PrimitiveTopology topology; + Bool32 primitiveRestartEnable; + }; + static_assert( sizeof( PipelineInputAssemblyStateCreateInfo ) == sizeof( VkPipelineInputAssemblyStateCreateInfo ), "struct and wrapper have different size!" ); + + struct PipelineTessellationStateCreateInfo + { + PipelineTessellationStateCreateInfo( PipelineTessellationStateCreateFlags flags_ = PipelineTessellationStateCreateFlags(), + uint32_t patchControlPoints_ = 0 ) + : flags( flags_ ) + , patchControlPoints( patchControlPoints_ ) + { + } + + PipelineTessellationStateCreateInfo( VkPipelineTessellationStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineTessellationStateCreateInfo ) ); + } + + PipelineTessellationStateCreateInfo& operator=( VkPipelineTessellationStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineTessellationStateCreateInfo ) ); + return *this; + } + PipelineTessellationStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineTessellationStateCreateInfo& setFlags( PipelineTessellationStateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineTessellationStateCreateInfo& setPatchControlPoints( uint32_t patchControlPoints_ ) + { + patchControlPoints = patchControlPoints_; + return *this; + } + + operator VkPipelineTessellationStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineTessellationStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineTessellationStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( patchControlPoints == rhs.patchControlPoints ); + } + + bool operator!=( PipelineTessellationStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineTessellationStateCreateInfo; + + public: + const void* pNext = nullptr; + PipelineTessellationStateCreateFlags flags; + uint32_t patchControlPoints; + }; + static_assert( sizeof( PipelineTessellationStateCreateInfo ) == sizeof( VkPipelineTessellationStateCreateInfo ), "struct and wrapper have different size!" ); + + struct PipelineViewportStateCreateInfo + { + PipelineViewportStateCreateInfo( PipelineViewportStateCreateFlags flags_ = PipelineViewportStateCreateFlags(), + uint32_t viewportCount_ = 0, + const Viewport* pViewports_ = nullptr, + uint32_t scissorCount_ = 0, + const Rect2D* pScissors_ = nullptr ) + : flags( flags_ ) + , viewportCount( viewportCount_ ) + , pViewports( pViewports_ ) + , scissorCount( scissorCount_ ) + , pScissors( pScissors_ ) + { + } + + PipelineViewportStateCreateInfo( VkPipelineViewportStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportStateCreateInfo ) ); + } + + PipelineViewportStateCreateInfo& operator=( VkPipelineViewportStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportStateCreateInfo ) ); + return *this; + } + PipelineViewportStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineViewportStateCreateInfo& setFlags( PipelineViewportStateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineViewportStateCreateInfo& setViewportCount( uint32_t viewportCount_ ) + { + viewportCount = viewportCount_; + return *this; + } + + PipelineViewportStateCreateInfo& setPViewports( const Viewport* pViewports_ ) + { + pViewports = pViewports_; + return *this; + } + + PipelineViewportStateCreateInfo& setScissorCount( uint32_t scissorCount_ ) + { + scissorCount = scissorCount_; + return *this; + } + + PipelineViewportStateCreateInfo& setPScissors( const Rect2D* pScissors_ ) + { + pScissors = pScissors_; + return *this; + } + + operator VkPipelineViewportStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineViewportStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineViewportStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( viewportCount == rhs.viewportCount ) + && ( pViewports == rhs.pViewports ) + && ( scissorCount == rhs.scissorCount ) + && ( pScissors == rhs.pScissors ); + } + + bool operator!=( PipelineViewportStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineViewportStateCreateInfo; + + public: + const void* pNext = nullptr; + PipelineViewportStateCreateFlags flags; + uint32_t viewportCount; + const Viewport* pViewports; + uint32_t scissorCount; + const Rect2D* pScissors; + }; + static_assert( sizeof( PipelineViewportStateCreateInfo ) == sizeof( VkPipelineViewportStateCreateInfo ), "struct and wrapper have different size!" ); + + struct PipelineRasterizationStateCreateInfo + { + PipelineRasterizationStateCreateInfo( PipelineRasterizationStateCreateFlags flags_ = PipelineRasterizationStateCreateFlags(), + Bool32 depthClampEnable_ = 0, + Bool32 rasterizerDiscardEnable_ = 0, + PolygonMode polygonMode_ = PolygonMode::eFill, + CullModeFlags cullMode_ = CullModeFlags(), + FrontFace frontFace_ = FrontFace::eCounterClockwise, + Bool32 depthBiasEnable_ = 0, + float depthBiasConstantFactor_ = 0, + float depthBiasClamp_ = 0, + float depthBiasSlopeFactor_ = 0, + float lineWidth_ = 0 ) + : flags( flags_ ) + , depthClampEnable( depthClampEnable_ ) + , rasterizerDiscardEnable( rasterizerDiscardEnable_ ) + , polygonMode( polygonMode_ ) + , cullMode( cullMode_ ) + , frontFace( frontFace_ ) + , depthBiasEnable( depthBiasEnable_ ) + , depthBiasConstantFactor( depthBiasConstantFactor_ ) + , depthBiasClamp( depthBiasClamp_ ) + , depthBiasSlopeFactor( depthBiasSlopeFactor_ ) + , lineWidth( lineWidth_ ) + { + } + + PipelineRasterizationStateCreateInfo( VkPipelineRasterizationStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineRasterizationStateCreateInfo ) ); + } + + PipelineRasterizationStateCreateInfo& operator=( VkPipelineRasterizationStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineRasterizationStateCreateInfo ) ); + return *this; + } + PipelineRasterizationStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setFlags( PipelineRasterizationStateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setDepthClampEnable( Bool32 depthClampEnable_ ) + { + depthClampEnable = depthClampEnable_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setRasterizerDiscardEnable( Bool32 rasterizerDiscardEnable_ ) + { + rasterizerDiscardEnable = rasterizerDiscardEnable_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setPolygonMode( PolygonMode polygonMode_ ) + { + polygonMode = polygonMode_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setCullMode( CullModeFlags cullMode_ ) + { + cullMode = cullMode_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setFrontFace( FrontFace frontFace_ ) + { + frontFace = frontFace_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setDepthBiasEnable( Bool32 depthBiasEnable_ ) + { + depthBiasEnable = depthBiasEnable_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setDepthBiasConstantFactor( float depthBiasConstantFactor_ ) + { + depthBiasConstantFactor = depthBiasConstantFactor_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setDepthBiasClamp( float depthBiasClamp_ ) + { + depthBiasClamp = depthBiasClamp_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setDepthBiasSlopeFactor( float depthBiasSlopeFactor_ ) + { + depthBiasSlopeFactor = depthBiasSlopeFactor_; + return *this; + } + + PipelineRasterizationStateCreateInfo& setLineWidth( float lineWidth_ ) + { + lineWidth = lineWidth_; + return *this; + } + + operator VkPipelineRasterizationStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineRasterizationStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineRasterizationStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( depthClampEnable == rhs.depthClampEnable ) + && ( rasterizerDiscardEnable == rhs.rasterizerDiscardEnable ) + && ( polygonMode == rhs.polygonMode ) + && ( cullMode == rhs.cullMode ) + && ( frontFace == rhs.frontFace ) + && ( depthBiasEnable == rhs.depthBiasEnable ) + && ( depthBiasConstantFactor == rhs.depthBiasConstantFactor ) + && ( depthBiasClamp == rhs.depthBiasClamp ) + && ( depthBiasSlopeFactor == rhs.depthBiasSlopeFactor ) + && ( lineWidth == rhs.lineWidth ); + } + + bool operator!=( PipelineRasterizationStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineRasterizationStateCreateInfo; + + public: + const void* pNext = nullptr; + PipelineRasterizationStateCreateFlags flags; + Bool32 depthClampEnable; + Bool32 rasterizerDiscardEnable; + PolygonMode polygonMode; + CullModeFlags cullMode; + FrontFace frontFace; + Bool32 depthBiasEnable; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; + float lineWidth; + }; + static_assert( sizeof( PipelineRasterizationStateCreateInfo ) == sizeof( VkPipelineRasterizationStateCreateInfo ), "struct and wrapper have different size!" ); + + struct PipelineDepthStencilStateCreateInfo + { + PipelineDepthStencilStateCreateInfo( PipelineDepthStencilStateCreateFlags flags_ = PipelineDepthStencilStateCreateFlags(), + Bool32 depthTestEnable_ = 0, + Bool32 depthWriteEnable_ = 0, + CompareOp depthCompareOp_ = CompareOp::eNever, + Bool32 depthBoundsTestEnable_ = 0, + Bool32 stencilTestEnable_ = 0, + StencilOpState front_ = StencilOpState(), + StencilOpState back_ = StencilOpState(), + float minDepthBounds_ = 0, + float maxDepthBounds_ = 0 ) + : flags( flags_ ) + , depthTestEnable( depthTestEnable_ ) + , depthWriteEnable( depthWriteEnable_ ) + , depthCompareOp( depthCompareOp_ ) + , depthBoundsTestEnable( depthBoundsTestEnable_ ) + , stencilTestEnable( stencilTestEnable_ ) + , front( front_ ) + , back( back_ ) + , minDepthBounds( minDepthBounds_ ) + , maxDepthBounds( maxDepthBounds_ ) + { + } + + PipelineDepthStencilStateCreateInfo( VkPipelineDepthStencilStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineDepthStencilStateCreateInfo ) ); + } + + PipelineDepthStencilStateCreateInfo& operator=( VkPipelineDepthStencilStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineDepthStencilStateCreateInfo ) ); + return *this; + } + PipelineDepthStencilStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setFlags( PipelineDepthStencilStateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setDepthTestEnable( Bool32 depthTestEnable_ ) + { + depthTestEnable = depthTestEnable_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setDepthWriteEnable( Bool32 depthWriteEnable_ ) + { + depthWriteEnable = depthWriteEnable_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setDepthCompareOp( CompareOp depthCompareOp_ ) + { + depthCompareOp = depthCompareOp_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setDepthBoundsTestEnable( Bool32 depthBoundsTestEnable_ ) + { + depthBoundsTestEnable = depthBoundsTestEnable_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setStencilTestEnable( Bool32 stencilTestEnable_ ) + { + stencilTestEnable = stencilTestEnable_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setFront( StencilOpState front_ ) + { + front = front_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setBack( StencilOpState back_ ) + { + back = back_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setMinDepthBounds( float minDepthBounds_ ) + { + minDepthBounds = minDepthBounds_; + return *this; + } + + PipelineDepthStencilStateCreateInfo& setMaxDepthBounds( float maxDepthBounds_ ) + { + maxDepthBounds = maxDepthBounds_; + return *this; + } + + operator VkPipelineDepthStencilStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineDepthStencilStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineDepthStencilStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( depthTestEnable == rhs.depthTestEnable ) + && ( depthWriteEnable == rhs.depthWriteEnable ) + && ( depthCompareOp == rhs.depthCompareOp ) + && ( depthBoundsTestEnable == rhs.depthBoundsTestEnable ) + && ( stencilTestEnable == rhs.stencilTestEnable ) + && ( front == rhs.front ) + && ( back == rhs.back ) + && ( minDepthBounds == rhs.minDepthBounds ) + && ( maxDepthBounds == rhs.maxDepthBounds ); + } + + bool operator!=( PipelineDepthStencilStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineDepthStencilStateCreateInfo; + + public: + const void* pNext = nullptr; + PipelineDepthStencilStateCreateFlags flags; + Bool32 depthTestEnable; + Bool32 depthWriteEnable; + CompareOp depthCompareOp; + Bool32 depthBoundsTestEnable; + Bool32 stencilTestEnable; + StencilOpState front; + StencilOpState back; + float minDepthBounds; + float maxDepthBounds; + }; + static_assert( sizeof( PipelineDepthStencilStateCreateInfo ) == sizeof( VkPipelineDepthStencilStateCreateInfo ), "struct and wrapper have different size!" ); + + struct PipelineCacheCreateInfo + { + PipelineCacheCreateInfo( PipelineCacheCreateFlags flags_ = PipelineCacheCreateFlags(), + size_t initialDataSize_ = 0, + const void* pInitialData_ = nullptr ) + : flags( flags_ ) + , initialDataSize( initialDataSize_ ) + , pInitialData( pInitialData_ ) + { + } + + PipelineCacheCreateInfo( VkPipelineCacheCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineCacheCreateInfo ) ); + } + + PipelineCacheCreateInfo& operator=( VkPipelineCacheCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineCacheCreateInfo ) ); + return *this; + } + PipelineCacheCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineCacheCreateInfo& setFlags( PipelineCacheCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineCacheCreateInfo& setInitialDataSize( size_t initialDataSize_ ) + { + initialDataSize = initialDataSize_; + return *this; + } + + PipelineCacheCreateInfo& setPInitialData( const void* pInitialData_ ) + { + pInitialData = pInitialData_; + return *this; + } + + operator VkPipelineCacheCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineCacheCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineCacheCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( initialDataSize == rhs.initialDataSize ) + && ( pInitialData == rhs.pInitialData ); + } + + bool operator!=( PipelineCacheCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineCacheCreateInfo; + + public: + const void* pNext = nullptr; + PipelineCacheCreateFlags flags; + size_t initialDataSize; + const void* pInitialData; + }; + static_assert( sizeof( PipelineCacheCreateInfo ) == sizeof( VkPipelineCacheCreateInfo ), "struct and wrapper have different size!" ); + + struct SamplerCreateInfo + { + SamplerCreateInfo( SamplerCreateFlags flags_ = SamplerCreateFlags(), + Filter magFilter_ = Filter::eNearest, + Filter minFilter_ = Filter::eNearest, + SamplerMipmapMode mipmapMode_ = SamplerMipmapMode::eNearest, + SamplerAddressMode addressModeU_ = SamplerAddressMode::eRepeat, + SamplerAddressMode addressModeV_ = SamplerAddressMode::eRepeat, + SamplerAddressMode addressModeW_ = SamplerAddressMode::eRepeat, + float mipLodBias_ = 0, + Bool32 anisotropyEnable_ = 0, + float maxAnisotropy_ = 0, + Bool32 compareEnable_ = 0, + CompareOp compareOp_ = CompareOp::eNever, + float minLod_ = 0, + float maxLod_ = 0, + BorderColor borderColor_ = BorderColor::eFloatTransparentBlack, + Bool32 unnormalizedCoordinates_ = 0 ) + : flags( flags_ ) + , magFilter( magFilter_ ) + , minFilter( minFilter_ ) + , mipmapMode( mipmapMode_ ) + , addressModeU( addressModeU_ ) + , addressModeV( addressModeV_ ) + , addressModeW( addressModeW_ ) + , mipLodBias( mipLodBias_ ) + , anisotropyEnable( anisotropyEnable_ ) + , maxAnisotropy( maxAnisotropy_ ) + , compareEnable( compareEnable_ ) + , compareOp( compareOp_ ) + , minLod( minLod_ ) + , maxLod( maxLod_ ) + , borderColor( borderColor_ ) + , unnormalizedCoordinates( unnormalizedCoordinates_ ) + { + } + + SamplerCreateInfo( VkSamplerCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SamplerCreateInfo ) ); + } + + SamplerCreateInfo& operator=( VkSamplerCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SamplerCreateInfo ) ); + return *this; + } + SamplerCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SamplerCreateInfo& setFlags( SamplerCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + SamplerCreateInfo& setMagFilter( Filter magFilter_ ) + { + magFilter = magFilter_; + return *this; + } + + SamplerCreateInfo& setMinFilter( Filter minFilter_ ) + { + minFilter = minFilter_; + return *this; + } + + SamplerCreateInfo& setMipmapMode( SamplerMipmapMode mipmapMode_ ) + { + mipmapMode = mipmapMode_; + return *this; + } + + SamplerCreateInfo& setAddressModeU( SamplerAddressMode addressModeU_ ) + { + addressModeU = addressModeU_; + return *this; + } + + SamplerCreateInfo& setAddressModeV( SamplerAddressMode addressModeV_ ) + { + addressModeV = addressModeV_; + return *this; + } + + SamplerCreateInfo& setAddressModeW( SamplerAddressMode addressModeW_ ) + { + addressModeW = addressModeW_; + return *this; + } + + SamplerCreateInfo& setMipLodBias( float mipLodBias_ ) + { + mipLodBias = mipLodBias_; + return *this; + } + + SamplerCreateInfo& setAnisotropyEnable( Bool32 anisotropyEnable_ ) + { + anisotropyEnable = anisotropyEnable_; + return *this; + } + + SamplerCreateInfo& setMaxAnisotropy( float maxAnisotropy_ ) + { + maxAnisotropy = maxAnisotropy_; + return *this; + } + + SamplerCreateInfo& setCompareEnable( Bool32 compareEnable_ ) + { + compareEnable = compareEnable_; + return *this; + } + + SamplerCreateInfo& setCompareOp( CompareOp compareOp_ ) + { + compareOp = compareOp_; + return *this; + } + + SamplerCreateInfo& setMinLod( float minLod_ ) + { + minLod = minLod_; + return *this; + } + + SamplerCreateInfo& setMaxLod( float maxLod_ ) + { + maxLod = maxLod_; + return *this; + } + + SamplerCreateInfo& setBorderColor( BorderColor borderColor_ ) + { + borderColor = borderColor_; + return *this; + } + + SamplerCreateInfo& setUnnormalizedCoordinates( Bool32 unnormalizedCoordinates_ ) + { + unnormalizedCoordinates = unnormalizedCoordinates_; + return *this; + } + + operator VkSamplerCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkSamplerCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( SamplerCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( magFilter == rhs.magFilter ) + && ( minFilter == rhs.minFilter ) + && ( mipmapMode == rhs.mipmapMode ) + && ( addressModeU == rhs.addressModeU ) + && ( addressModeV == rhs.addressModeV ) + && ( addressModeW == rhs.addressModeW ) + && ( mipLodBias == rhs.mipLodBias ) + && ( anisotropyEnable == rhs.anisotropyEnable ) + && ( maxAnisotropy == rhs.maxAnisotropy ) + && ( compareEnable == rhs.compareEnable ) + && ( compareOp == rhs.compareOp ) + && ( minLod == rhs.minLod ) + && ( maxLod == rhs.maxLod ) + && ( borderColor == rhs.borderColor ) + && ( unnormalizedCoordinates == rhs.unnormalizedCoordinates ); + } + + bool operator!=( SamplerCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSamplerCreateInfo; + + public: + const void* pNext = nullptr; + SamplerCreateFlags flags; + Filter magFilter; + Filter minFilter; + SamplerMipmapMode mipmapMode; + SamplerAddressMode addressModeU; + SamplerAddressMode addressModeV; + SamplerAddressMode addressModeW; + float mipLodBias; + Bool32 anisotropyEnable; + float maxAnisotropy; + Bool32 compareEnable; + CompareOp compareOp; + float minLod; + float maxLod; + BorderColor borderColor; + Bool32 unnormalizedCoordinates; + }; + static_assert( sizeof( SamplerCreateInfo ) == sizeof( VkSamplerCreateInfo ), "struct and wrapper have different size!" ); + + struct CommandBufferAllocateInfo + { + CommandBufferAllocateInfo( CommandPool commandPool_ = CommandPool(), + CommandBufferLevel level_ = CommandBufferLevel::ePrimary, + uint32_t commandBufferCount_ = 0 ) + : commandPool( commandPool_ ) + , level( level_ ) + , commandBufferCount( commandBufferCount_ ) + { + } + + CommandBufferAllocateInfo( VkCommandBufferAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandBufferAllocateInfo ) ); + } + + CommandBufferAllocateInfo& operator=( VkCommandBufferAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandBufferAllocateInfo ) ); + return *this; + } + CommandBufferAllocateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + CommandBufferAllocateInfo& setCommandPool( CommandPool commandPool_ ) + { + commandPool = commandPool_; + return *this; + } + + CommandBufferAllocateInfo& setLevel( CommandBufferLevel level_ ) + { + level = level_; + return *this; + } + + CommandBufferAllocateInfo& setCommandBufferCount( uint32_t commandBufferCount_ ) + { + commandBufferCount = commandBufferCount_; + return *this; + } + + operator VkCommandBufferAllocateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkCommandBufferAllocateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( CommandBufferAllocateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( commandPool == rhs.commandPool ) + && ( level == rhs.level ) + && ( commandBufferCount == rhs.commandBufferCount ); + } + + bool operator!=( CommandBufferAllocateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eCommandBufferAllocateInfo; + + public: + const void* pNext = nullptr; + CommandPool commandPool; + CommandBufferLevel level; + uint32_t commandBufferCount; + }; + static_assert( sizeof( CommandBufferAllocateInfo ) == sizeof( VkCommandBufferAllocateInfo ), "struct and wrapper have different size!" ); + + struct RenderPassBeginInfo + { + RenderPassBeginInfo( RenderPass renderPass_ = RenderPass(), + Framebuffer framebuffer_ = Framebuffer(), + Rect2D renderArea_ = Rect2D(), + uint32_t clearValueCount_ = 0, + const ClearValue* pClearValues_ = nullptr ) + : renderPass( renderPass_ ) + , framebuffer( framebuffer_ ) + , renderArea( renderArea_ ) + , clearValueCount( clearValueCount_ ) + , pClearValues( pClearValues_ ) + { + } + + RenderPassBeginInfo( VkRenderPassBeginInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassBeginInfo ) ); + } + + RenderPassBeginInfo& operator=( VkRenderPassBeginInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassBeginInfo ) ); + return *this; + } + RenderPassBeginInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + RenderPassBeginInfo& setRenderPass( RenderPass renderPass_ ) + { + renderPass = renderPass_; + return *this; + } + + RenderPassBeginInfo& setFramebuffer( Framebuffer framebuffer_ ) + { + framebuffer = framebuffer_; + return *this; + } + + RenderPassBeginInfo& setRenderArea( Rect2D renderArea_ ) + { + renderArea = renderArea_; + return *this; + } + + RenderPassBeginInfo& setClearValueCount( uint32_t clearValueCount_ ) + { + clearValueCount = clearValueCount_; + return *this; + } + + RenderPassBeginInfo& setPClearValues( const ClearValue* pClearValues_ ) + { + pClearValues = pClearValues_; + return *this; + } + + operator VkRenderPassBeginInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkRenderPassBeginInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( RenderPassBeginInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( renderPass == rhs.renderPass ) + && ( framebuffer == rhs.framebuffer ) + && ( renderArea == rhs.renderArea ) + && ( clearValueCount == rhs.clearValueCount ) + && ( pClearValues == rhs.pClearValues ); + } + + bool operator!=( RenderPassBeginInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eRenderPassBeginInfo; + + public: + const void* pNext = nullptr; + RenderPass renderPass; + Framebuffer framebuffer; + Rect2D renderArea; + uint32_t clearValueCount; + const ClearValue* pClearValues; + }; + static_assert( sizeof( RenderPassBeginInfo ) == sizeof( VkRenderPassBeginInfo ), "struct and wrapper have different size!" ); + + struct EventCreateInfo + { + EventCreateInfo( EventCreateFlags flags_ = EventCreateFlags() ) + : flags( flags_ ) + { + } + + EventCreateInfo( VkEventCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( EventCreateInfo ) ); + } + + EventCreateInfo& operator=( VkEventCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( EventCreateInfo ) ); + return *this; + } + EventCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + EventCreateInfo& setFlags( EventCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + operator VkEventCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkEventCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( EventCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ); + } + + bool operator!=( EventCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eEventCreateInfo; + + public: + const void* pNext = nullptr; + EventCreateFlags flags; + }; + static_assert( sizeof( EventCreateInfo ) == sizeof( VkEventCreateInfo ), "struct and wrapper have different size!" ); + + struct SemaphoreCreateInfo + { + SemaphoreCreateInfo( SemaphoreCreateFlags flags_ = SemaphoreCreateFlags() ) + : flags( flags_ ) + { + } + + SemaphoreCreateInfo( VkSemaphoreCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SemaphoreCreateInfo ) ); + } + + SemaphoreCreateInfo& operator=( VkSemaphoreCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SemaphoreCreateInfo ) ); + return *this; + } + SemaphoreCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SemaphoreCreateInfo& setFlags( SemaphoreCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + operator VkSemaphoreCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkSemaphoreCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( SemaphoreCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ); + } + + bool operator!=( SemaphoreCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSemaphoreCreateInfo; + + public: + const void* pNext = nullptr; + SemaphoreCreateFlags flags; + }; + static_assert( sizeof( SemaphoreCreateInfo ) == sizeof( VkSemaphoreCreateInfo ), "struct and wrapper have different size!" ); + + struct FramebufferCreateInfo + { + FramebufferCreateInfo( FramebufferCreateFlags flags_ = FramebufferCreateFlags(), + RenderPass renderPass_ = RenderPass(), + uint32_t attachmentCount_ = 0, + const ImageView* pAttachments_ = nullptr, + uint32_t width_ = 0, + uint32_t height_ = 0, + uint32_t layers_ = 0 ) + : flags( flags_ ) + , renderPass( renderPass_ ) + , attachmentCount( attachmentCount_ ) + , pAttachments( pAttachments_ ) + , width( width_ ) + , height( height_ ) + , layers( layers_ ) + { + } + + FramebufferCreateInfo( VkFramebufferCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( FramebufferCreateInfo ) ); + } + + FramebufferCreateInfo& operator=( VkFramebufferCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( FramebufferCreateInfo ) ); + return *this; + } + FramebufferCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + FramebufferCreateInfo& setFlags( FramebufferCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + FramebufferCreateInfo& setRenderPass( RenderPass renderPass_ ) + { + renderPass = renderPass_; + return *this; + } + + FramebufferCreateInfo& setAttachmentCount( uint32_t attachmentCount_ ) + { + attachmentCount = attachmentCount_; + return *this; + } + + FramebufferCreateInfo& setPAttachments( const ImageView* pAttachments_ ) + { + pAttachments = pAttachments_; + return *this; + } + + FramebufferCreateInfo& setWidth( uint32_t width_ ) + { + width = width_; + return *this; + } + + FramebufferCreateInfo& setHeight( uint32_t height_ ) + { + height = height_; + return *this; + } + + FramebufferCreateInfo& setLayers( uint32_t layers_ ) + { + layers = layers_; + return *this; + } + + operator VkFramebufferCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkFramebufferCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( FramebufferCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( renderPass == rhs.renderPass ) + && ( attachmentCount == rhs.attachmentCount ) + && ( pAttachments == rhs.pAttachments ) + && ( width == rhs.width ) + && ( height == rhs.height ) + && ( layers == rhs.layers ); + } + + bool operator!=( FramebufferCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eFramebufferCreateInfo; + + public: + const void* pNext = nullptr; + FramebufferCreateFlags flags; + RenderPass renderPass; + uint32_t attachmentCount; + const ImageView* pAttachments; + uint32_t width; + uint32_t height; + uint32_t layers; + }; + static_assert( sizeof( FramebufferCreateInfo ) == sizeof( VkFramebufferCreateInfo ), "struct and wrapper have different size!" ); + + struct DisplayModeCreateInfoKHR + { + DisplayModeCreateInfoKHR( DisplayModeCreateFlagsKHR flags_ = DisplayModeCreateFlagsKHR(), + DisplayModeParametersKHR parameters_ = DisplayModeParametersKHR() ) + : flags( flags_ ) + , parameters( parameters_ ) + { + } + + DisplayModeCreateInfoKHR( VkDisplayModeCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayModeCreateInfoKHR ) ); + } + + DisplayModeCreateInfoKHR& operator=( VkDisplayModeCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayModeCreateInfoKHR ) ); + return *this; + } + DisplayModeCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DisplayModeCreateInfoKHR& setFlags( DisplayModeCreateFlagsKHR flags_ ) + { + flags = flags_; + return *this; + } + + DisplayModeCreateInfoKHR& setParameters( DisplayModeParametersKHR parameters_ ) + { + parameters = parameters_; + return *this; + } + + operator VkDisplayModeCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayModeCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayModeCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( parameters == rhs.parameters ); + } + + bool operator!=( DisplayModeCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplayModeCreateInfoKHR; + + public: + const void* pNext = nullptr; + DisplayModeCreateFlagsKHR flags; + DisplayModeParametersKHR parameters; + }; + static_assert( sizeof( DisplayModeCreateInfoKHR ) == sizeof( VkDisplayModeCreateInfoKHR ), "struct and wrapper have different size!" ); + + struct DisplayPresentInfoKHR + { + DisplayPresentInfoKHR( Rect2D srcRect_ = Rect2D(), + Rect2D dstRect_ = Rect2D(), + Bool32 persistent_ = 0 ) + : srcRect( srcRect_ ) + , dstRect( dstRect_ ) + , persistent( persistent_ ) + { + } + + DisplayPresentInfoKHR( VkDisplayPresentInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayPresentInfoKHR ) ); + } + + DisplayPresentInfoKHR& operator=( VkDisplayPresentInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayPresentInfoKHR ) ); + return *this; + } + DisplayPresentInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DisplayPresentInfoKHR& setSrcRect( Rect2D srcRect_ ) + { + srcRect = srcRect_; + return *this; + } + + DisplayPresentInfoKHR& setDstRect( Rect2D dstRect_ ) + { + dstRect = dstRect_; + return *this; + } + + DisplayPresentInfoKHR& setPersistent( Bool32 persistent_ ) + { + persistent = persistent_; + return *this; + } + + operator VkDisplayPresentInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayPresentInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayPresentInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcRect == rhs.srcRect ) + && ( dstRect == rhs.dstRect ) + && ( persistent == rhs.persistent ); + } + + bool operator!=( DisplayPresentInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplayPresentInfoKHR; + + public: + const void* pNext = nullptr; + Rect2D srcRect; + Rect2D dstRect; + Bool32 persistent; + }; + static_assert( sizeof( DisplayPresentInfoKHR ) == sizeof( VkDisplayPresentInfoKHR ), "struct and wrapper have different size!" ); + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + struct AndroidSurfaceCreateInfoKHR + { + AndroidSurfaceCreateInfoKHR( AndroidSurfaceCreateFlagsKHR flags_ = AndroidSurfaceCreateFlagsKHR(), + struct ANativeWindow* window_ = nullptr ) + : flags( flags_ ) + , window( window_ ) + { + } + + AndroidSurfaceCreateInfoKHR( VkAndroidSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( AndroidSurfaceCreateInfoKHR ) ); + } + + AndroidSurfaceCreateInfoKHR& operator=( VkAndroidSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( AndroidSurfaceCreateInfoKHR ) ); + return *this; + } + AndroidSurfaceCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + AndroidSurfaceCreateInfoKHR& setFlags( AndroidSurfaceCreateFlagsKHR flags_ ) + { + flags = flags_; + return *this; + } + + AndroidSurfaceCreateInfoKHR& setWindow( struct ANativeWindow* window_ ) + { + window = window_; + return *this; + } + + operator VkAndroidSurfaceCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkAndroidSurfaceCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( AndroidSurfaceCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( window == rhs.window ); + } + + bool operator!=( AndroidSurfaceCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eAndroidSurfaceCreateInfoKHR; + + public: + const void* pNext = nullptr; + AndroidSurfaceCreateFlagsKHR flags; + struct ANativeWindow* window; + }; + static_assert( sizeof( AndroidSurfaceCreateInfoKHR ) == sizeof( VkAndroidSurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#ifdef VK_USE_PLATFORM_MIR_KHR + struct MirSurfaceCreateInfoKHR + { + MirSurfaceCreateInfoKHR( MirSurfaceCreateFlagsKHR flags_ = MirSurfaceCreateFlagsKHR(), + MirConnection* connection_ = nullptr, + MirSurface* mirSurface_ = nullptr ) + : flags( flags_ ) + , connection( connection_ ) + , mirSurface( mirSurface_ ) + { + } + + MirSurfaceCreateInfoKHR( VkMirSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( MirSurfaceCreateInfoKHR ) ); + } + + MirSurfaceCreateInfoKHR& operator=( VkMirSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( MirSurfaceCreateInfoKHR ) ); + return *this; + } + MirSurfaceCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MirSurfaceCreateInfoKHR& setFlags( MirSurfaceCreateFlagsKHR flags_ ) + { + flags = flags_; + return *this; + } + + MirSurfaceCreateInfoKHR& setConnection( MirConnection* connection_ ) + { + connection = connection_; + return *this; + } + + MirSurfaceCreateInfoKHR& setMirSurface( MirSurface* mirSurface_ ) + { + mirSurface = mirSurface_; + return *this; + } + + operator VkMirSurfaceCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkMirSurfaceCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( MirSurfaceCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( connection == rhs.connection ) + && ( mirSurface == rhs.mirSurface ); + } + + bool operator!=( MirSurfaceCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMirSurfaceCreateInfoKHR; + + public: + const void* pNext = nullptr; + MirSurfaceCreateFlagsKHR flags; + MirConnection* connection; + MirSurface* mirSurface; + }; + static_assert( sizeof( MirSurfaceCreateInfoKHR ) == sizeof( VkMirSurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + +#ifdef VK_USE_PLATFORM_VI_NN + struct ViSurfaceCreateInfoNN + { + ViSurfaceCreateInfoNN( ViSurfaceCreateFlagsNN flags_ = ViSurfaceCreateFlagsNN(), + void* window_ = nullptr ) + : flags( flags_ ) + , window( window_ ) + { + } + + ViSurfaceCreateInfoNN( VkViSurfaceCreateInfoNN const & rhs ) + { + memcpy( this, &rhs, sizeof( ViSurfaceCreateInfoNN ) ); + } + + ViSurfaceCreateInfoNN& operator=( VkViSurfaceCreateInfoNN const & rhs ) + { + memcpy( this, &rhs, sizeof( ViSurfaceCreateInfoNN ) ); + return *this; + } + ViSurfaceCreateInfoNN& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ViSurfaceCreateInfoNN& setFlags( ViSurfaceCreateFlagsNN flags_ ) + { + flags = flags_; + return *this; + } + + ViSurfaceCreateInfoNN& setWindow( void* window_ ) + { + window = window_; + return *this; + } + + operator VkViSurfaceCreateInfoNN const&() const + { + return *reinterpret_cast(this); + } + + operator VkViSurfaceCreateInfoNN &() + { + return *reinterpret_cast(this); + } + + bool operator==( ViSurfaceCreateInfoNN const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( window == rhs.window ); + } + + bool operator!=( ViSurfaceCreateInfoNN const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eViSurfaceCreateInfoNN; + + public: + const void* pNext = nullptr; + ViSurfaceCreateFlagsNN flags; + void* window; + }; + static_assert( sizeof( ViSurfaceCreateInfoNN ) == sizeof( VkViSurfaceCreateInfoNN ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + struct WaylandSurfaceCreateInfoKHR + { + WaylandSurfaceCreateInfoKHR( WaylandSurfaceCreateFlagsKHR flags_ = WaylandSurfaceCreateFlagsKHR(), + struct wl_display* display_ = nullptr, + struct wl_surface* surface_ = nullptr ) + : flags( flags_ ) + , display( display_ ) + , surface( surface_ ) + { + } + + WaylandSurfaceCreateInfoKHR( VkWaylandSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( WaylandSurfaceCreateInfoKHR ) ); + } + + WaylandSurfaceCreateInfoKHR& operator=( VkWaylandSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( WaylandSurfaceCreateInfoKHR ) ); + return *this; + } + WaylandSurfaceCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + WaylandSurfaceCreateInfoKHR& setFlags( WaylandSurfaceCreateFlagsKHR flags_ ) + { + flags = flags_; + return *this; + } + + WaylandSurfaceCreateInfoKHR& setDisplay( struct wl_display* display_ ) + { + display = display_; + return *this; + } + + WaylandSurfaceCreateInfoKHR& setSurface( struct wl_surface* surface_ ) + { + surface = surface_; + return *this; + } + + operator VkWaylandSurfaceCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkWaylandSurfaceCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( WaylandSurfaceCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( display == rhs.display ) + && ( surface == rhs.surface ); + } + + bool operator!=( WaylandSurfaceCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eWaylandSurfaceCreateInfoKHR; + + public: + const void* pNext = nullptr; + WaylandSurfaceCreateFlagsKHR flags; + struct wl_display* display; + struct wl_surface* surface; + }; + static_assert( sizeof( WaylandSurfaceCreateInfoKHR ) == sizeof( VkWaylandSurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct Win32SurfaceCreateInfoKHR + { + Win32SurfaceCreateInfoKHR( Win32SurfaceCreateFlagsKHR flags_ = Win32SurfaceCreateFlagsKHR(), + HINSTANCE hinstance_ = 0, + HWND hwnd_ = 0 ) + : flags( flags_ ) + , hinstance( hinstance_ ) + , hwnd( hwnd_ ) + { + } + + Win32SurfaceCreateInfoKHR( VkWin32SurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( Win32SurfaceCreateInfoKHR ) ); + } + + Win32SurfaceCreateInfoKHR& operator=( VkWin32SurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( Win32SurfaceCreateInfoKHR ) ); + return *this; + } + Win32SurfaceCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + Win32SurfaceCreateInfoKHR& setFlags( Win32SurfaceCreateFlagsKHR flags_ ) + { + flags = flags_; + return *this; + } + + Win32SurfaceCreateInfoKHR& setHinstance( HINSTANCE hinstance_ ) + { + hinstance = hinstance_; + return *this; + } + + Win32SurfaceCreateInfoKHR& setHwnd( HWND hwnd_ ) + { + hwnd = hwnd_; + return *this; + } + + operator VkWin32SurfaceCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkWin32SurfaceCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( Win32SurfaceCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( hinstance == rhs.hinstance ) + && ( hwnd == rhs.hwnd ); + } + + bool operator!=( Win32SurfaceCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eWin32SurfaceCreateInfoKHR; + + public: + const void* pNext = nullptr; + Win32SurfaceCreateFlagsKHR flags; + HINSTANCE hinstance; + HWND hwnd; + }; + static_assert( sizeof( Win32SurfaceCreateInfoKHR ) == sizeof( VkWin32SurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + struct XlibSurfaceCreateInfoKHR + { + XlibSurfaceCreateInfoKHR( XlibSurfaceCreateFlagsKHR flags_ = XlibSurfaceCreateFlagsKHR(), + Display* dpy_ = nullptr, + Window window_ = 0 ) + : flags( flags_ ) + , dpy( dpy_ ) + , window( window_ ) + { + } + + XlibSurfaceCreateInfoKHR( VkXlibSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( XlibSurfaceCreateInfoKHR ) ); + } + + XlibSurfaceCreateInfoKHR& operator=( VkXlibSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( XlibSurfaceCreateInfoKHR ) ); + return *this; + } + XlibSurfaceCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + XlibSurfaceCreateInfoKHR& setFlags( XlibSurfaceCreateFlagsKHR flags_ ) + { + flags = flags_; + return *this; + } + + XlibSurfaceCreateInfoKHR& setDpy( Display* dpy_ ) + { + dpy = dpy_; + return *this; + } + + XlibSurfaceCreateInfoKHR& setWindow( Window window_ ) + { + window = window_; + return *this; + } + + operator VkXlibSurfaceCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkXlibSurfaceCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( XlibSurfaceCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( dpy == rhs.dpy ) + && ( window == rhs.window ); + } + + bool operator!=( XlibSurfaceCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eXlibSurfaceCreateInfoKHR; + + public: + const void* pNext = nullptr; + XlibSurfaceCreateFlagsKHR flags; + Display* dpy; + Window window; + }; + static_assert( sizeof( XlibSurfaceCreateInfoKHR ) == sizeof( VkXlibSurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + struct XcbSurfaceCreateInfoKHR + { + XcbSurfaceCreateInfoKHR( XcbSurfaceCreateFlagsKHR flags_ = XcbSurfaceCreateFlagsKHR(), + xcb_connection_t* connection_ = nullptr, + xcb_window_t window_ = 0 ) + : flags( flags_ ) + , connection( connection_ ) + , window( window_ ) + { + } + + XcbSurfaceCreateInfoKHR( VkXcbSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( XcbSurfaceCreateInfoKHR ) ); + } + + XcbSurfaceCreateInfoKHR& operator=( VkXcbSurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( XcbSurfaceCreateInfoKHR ) ); + return *this; + } + XcbSurfaceCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + XcbSurfaceCreateInfoKHR& setFlags( XcbSurfaceCreateFlagsKHR flags_ ) + { + flags = flags_; + return *this; + } + + XcbSurfaceCreateInfoKHR& setConnection( xcb_connection_t* connection_ ) + { + connection = connection_; + return *this; + } + + XcbSurfaceCreateInfoKHR& setWindow( xcb_window_t window_ ) + { + window = window_; + return *this; + } + + operator VkXcbSurfaceCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkXcbSurfaceCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( XcbSurfaceCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( connection == rhs.connection ) + && ( window == rhs.window ); + } + + bool operator!=( XcbSurfaceCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eXcbSurfaceCreateInfoKHR; + + public: + const void* pNext = nullptr; + XcbSurfaceCreateFlagsKHR flags; + xcb_connection_t* connection; + xcb_window_t window; + }; + static_assert( sizeof( XcbSurfaceCreateInfoKHR ) == sizeof( VkXcbSurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + + struct DebugMarkerMarkerInfoEXT + { + DebugMarkerMarkerInfoEXT( const char* pMarkerName_ = nullptr, + std::array const& color_ = { { 0, 0, 0, 0 } } ) + : pMarkerName( pMarkerName_ ) + { + memcpy( &color, color_.data(), 4 * sizeof( float ) ); + } + + DebugMarkerMarkerInfoEXT( VkDebugMarkerMarkerInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugMarkerMarkerInfoEXT ) ); + } + + DebugMarkerMarkerInfoEXT& operator=( VkDebugMarkerMarkerInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugMarkerMarkerInfoEXT ) ); + return *this; + } + DebugMarkerMarkerInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DebugMarkerMarkerInfoEXT& setPMarkerName( const char* pMarkerName_ ) + { + pMarkerName = pMarkerName_; + return *this; + } + + DebugMarkerMarkerInfoEXT& setColor( std::array color_ ) + { + memcpy( &color, color_.data(), 4 * sizeof( float ) ); + return *this; + } + + operator VkDebugMarkerMarkerInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDebugMarkerMarkerInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DebugMarkerMarkerInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pMarkerName == rhs.pMarkerName ) + && ( memcmp( color, rhs.color, 4 * sizeof( float ) ) == 0 ); + } + + bool operator!=( DebugMarkerMarkerInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDebugMarkerMarkerInfoEXT; + + public: + const void* pNext = nullptr; + const char* pMarkerName; + float color[4]; + }; + static_assert( sizeof( DebugMarkerMarkerInfoEXT ) == sizeof( VkDebugMarkerMarkerInfoEXT ), "struct and wrapper have different size!" ); + + struct DedicatedAllocationImageCreateInfoNV + { + DedicatedAllocationImageCreateInfoNV( Bool32 dedicatedAllocation_ = 0 ) + : dedicatedAllocation( dedicatedAllocation_ ) + { + } + + DedicatedAllocationImageCreateInfoNV( VkDedicatedAllocationImageCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( DedicatedAllocationImageCreateInfoNV ) ); + } + + DedicatedAllocationImageCreateInfoNV& operator=( VkDedicatedAllocationImageCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( DedicatedAllocationImageCreateInfoNV ) ); + return *this; + } + DedicatedAllocationImageCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DedicatedAllocationImageCreateInfoNV& setDedicatedAllocation( Bool32 dedicatedAllocation_ ) + { + dedicatedAllocation = dedicatedAllocation_; + return *this; + } + + operator VkDedicatedAllocationImageCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkDedicatedAllocationImageCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( DedicatedAllocationImageCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( dedicatedAllocation == rhs.dedicatedAllocation ); + } + + bool operator!=( DedicatedAllocationImageCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDedicatedAllocationImageCreateInfoNV; + + public: + const void* pNext = nullptr; + Bool32 dedicatedAllocation; + }; + static_assert( sizeof( DedicatedAllocationImageCreateInfoNV ) == sizeof( VkDedicatedAllocationImageCreateInfoNV ), "struct and wrapper have different size!" ); + + struct DedicatedAllocationBufferCreateInfoNV + { + DedicatedAllocationBufferCreateInfoNV( Bool32 dedicatedAllocation_ = 0 ) + : dedicatedAllocation( dedicatedAllocation_ ) + { + } + + DedicatedAllocationBufferCreateInfoNV( VkDedicatedAllocationBufferCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( DedicatedAllocationBufferCreateInfoNV ) ); + } + + DedicatedAllocationBufferCreateInfoNV& operator=( VkDedicatedAllocationBufferCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( DedicatedAllocationBufferCreateInfoNV ) ); + return *this; + } + DedicatedAllocationBufferCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DedicatedAllocationBufferCreateInfoNV& setDedicatedAllocation( Bool32 dedicatedAllocation_ ) + { + dedicatedAllocation = dedicatedAllocation_; + return *this; + } + + operator VkDedicatedAllocationBufferCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkDedicatedAllocationBufferCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( DedicatedAllocationBufferCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( dedicatedAllocation == rhs.dedicatedAllocation ); + } + + bool operator!=( DedicatedAllocationBufferCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDedicatedAllocationBufferCreateInfoNV; + + public: + const void* pNext = nullptr; + Bool32 dedicatedAllocation; + }; + static_assert( sizeof( DedicatedAllocationBufferCreateInfoNV ) == sizeof( VkDedicatedAllocationBufferCreateInfoNV ), "struct and wrapper have different size!" ); + + struct DedicatedAllocationMemoryAllocateInfoNV + { + DedicatedAllocationMemoryAllocateInfoNV( Image image_ = Image(), + Buffer buffer_ = Buffer() ) + : image( image_ ) + , buffer( buffer_ ) + { + } + + DedicatedAllocationMemoryAllocateInfoNV( VkDedicatedAllocationMemoryAllocateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( DedicatedAllocationMemoryAllocateInfoNV ) ); + } + + DedicatedAllocationMemoryAllocateInfoNV& operator=( VkDedicatedAllocationMemoryAllocateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( DedicatedAllocationMemoryAllocateInfoNV ) ); + return *this; + } + DedicatedAllocationMemoryAllocateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DedicatedAllocationMemoryAllocateInfoNV& setImage( Image image_ ) + { + image = image_; + return *this; + } + + DedicatedAllocationMemoryAllocateInfoNV& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + operator VkDedicatedAllocationMemoryAllocateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkDedicatedAllocationMemoryAllocateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( DedicatedAllocationMemoryAllocateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( DedicatedAllocationMemoryAllocateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDedicatedAllocationMemoryAllocateInfoNV; + + public: + const void* pNext = nullptr; + Image image; + Buffer buffer; + }; + static_assert( sizeof( DedicatedAllocationMemoryAllocateInfoNV ) == sizeof( VkDedicatedAllocationMemoryAllocateInfoNV ), "struct and wrapper have different size!" ); + +#ifdef VK_USE_PLATFORM_WIN32_NV + struct ExportMemoryWin32HandleInfoNV + { + ExportMemoryWin32HandleInfoNV( const SECURITY_ATTRIBUTES* pAttributes_ = nullptr, + DWORD dwAccess_ = 0 ) + : pAttributes( pAttributes_ ) + , dwAccess( dwAccess_ ) + { + } + + ExportMemoryWin32HandleInfoNV( VkExportMemoryWin32HandleInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportMemoryWin32HandleInfoNV ) ); + } + + ExportMemoryWin32HandleInfoNV& operator=( VkExportMemoryWin32HandleInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportMemoryWin32HandleInfoNV ) ); + return *this; + } + ExportMemoryWin32HandleInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExportMemoryWin32HandleInfoNV& setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ ) + { + pAttributes = pAttributes_; + return *this; + } + + ExportMemoryWin32HandleInfoNV& setDwAccess( DWORD dwAccess_ ) + { + dwAccess = dwAccess_; + return *this; + } + + operator VkExportMemoryWin32HandleInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkExportMemoryWin32HandleInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExportMemoryWin32HandleInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pAttributes == rhs.pAttributes ) + && ( dwAccess == rhs.dwAccess ); + } + + bool operator!=( ExportMemoryWin32HandleInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExportMemoryWin32HandleInfoNV; + + public: + const void* pNext = nullptr; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + }; + static_assert( sizeof( ExportMemoryWin32HandleInfoNV ) == sizeof( VkExportMemoryWin32HandleInfoNV ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_NV*/ + +#ifdef VK_USE_PLATFORM_WIN32_NV + struct Win32KeyedMutexAcquireReleaseInfoNV + { + Win32KeyedMutexAcquireReleaseInfoNV( uint32_t acquireCount_ = 0, + const DeviceMemory* pAcquireSyncs_ = nullptr, + const uint64_t* pAcquireKeys_ = nullptr, + const uint32_t* pAcquireTimeoutMilliseconds_ = nullptr, + uint32_t releaseCount_ = 0, + const DeviceMemory* pReleaseSyncs_ = nullptr, + const uint64_t* pReleaseKeys_ = nullptr ) + : acquireCount( acquireCount_ ) + , pAcquireSyncs( pAcquireSyncs_ ) + , pAcquireKeys( pAcquireKeys_ ) + , pAcquireTimeoutMilliseconds( pAcquireTimeoutMilliseconds_ ) + , releaseCount( releaseCount_ ) + , pReleaseSyncs( pReleaseSyncs_ ) + , pReleaseKeys( pReleaseKeys_ ) + { + } + + Win32KeyedMutexAcquireReleaseInfoNV( VkWin32KeyedMutexAcquireReleaseInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( Win32KeyedMutexAcquireReleaseInfoNV ) ); + } + + Win32KeyedMutexAcquireReleaseInfoNV& operator=( VkWin32KeyedMutexAcquireReleaseInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( Win32KeyedMutexAcquireReleaseInfoNV ) ); + return *this; + } + Win32KeyedMutexAcquireReleaseInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV& setAcquireCount( uint32_t acquireCount_ ) + { + acquireCount = acquireCount_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV& setPAcquireSyncs( const DeviceMemory* pAcquireSyncs_ ) + { + pAcquireSyncs = pAcquireSyncs_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV& setPAcquireKeys( const uint64_t* pAcquireKeys_ ) + { + pAcquireKeys = pAcquireKeys_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV& setPAcquireTimeoutMilliseconds( const uint32_t* pAcquireTimeoutMilliseconds_ ) + { + pAcquireTimeoutMilliseconds = pAcquireTimeoutMilliseconds_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV& setReleaseCount( uint32_t releaseCount_ ) + { + releaseCount = releaseCount_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV& setPReleaseSyncs( const DeviceMemory* pReleaseSyncs_ ) + { + pReleaseSyncs = pReleaseSyncs_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV& setPReleaseKeys( const uint64_t* pReleaseKeys_ ) + { + pReleaseKeys = pReleaseKeys_; + return *this; + } + + operator VkWin32KeyedMutexAcquireReleaseInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkWin32KeyedMutexAcquireReleaseInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( Win32KeyedMutexAcquireReleaseInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( acquireCount == rhs.acquireCount ) + && ( pAcquireSyncs == rhs.pAcquireSyncs ) + && ( pAcquireKeys == rhs.pAcquireKeys ) + && ( pAcquireTimeoutMilliseconds == rhs.pAcquireTimeoutMilliseconds ) + && ( releaseCount == rhs.releaseCount ) + && ( pReleaseSyncs == rhs.pReleaseSyncs ) + && ( pReleaseKeys == rhs.pReleaseKeys ); + } + + bool operator!=( Win32KeyedMutexAcquireReleaseInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eWin32KeyedMutexAcquireReleaseInfoNV; + + public: + const void* pNext = nullptr; + uint32_t acquireCount; + const DeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeoutMilliseconds; + uint32_t releaseCount; + const DeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; + }; + static_assert( sizeof( Win32KeyedMutexAcquireReleaseInfoNV ) == sizeof( VkWin32KeyedMutexAcquireReleaseInfoNV ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_NV*/ + + struct DeviceGeneratedCommandsFeaturesNVX + { + DeviceGeneratedCommandsFeaturesNVX( Bool32 computeBindingPointSupport_ = 0 ) + : computeBindingPointSupport( computeBindingPointSupport_ ) + { + } + + DeviceGeneratedCommandsFeaturesNVX( VkDeviceGeneratedCommandsFeaturesNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGeneratedCommandsFeaturesNVX ) ); + } + + DeviceGeneratedCommandsFeaturesNVX& operator=( VkDeviceGeneratedCommandsFeaturesNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGeneratedCommandsFeaturesNVX ) ); + return *this; + } + DeviceGeneratedCommandsFeaturesNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceGeneratedCommandsFeaturesNVX& setComputeBindingPointSupport( Bool32 computeBindingPointSupport_ ) + { + computeBindingPointSupport = computeBindingPointSupport_; + return *this; + } + + operator VkDeviceGeneratedCommandsFeaturesNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGeneratedCommandsFeaturesNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGeneratedCommandsFeaturesNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( computeBindingPointSupport == rhs.computeBindingPointSupport ); + } + + bool operator!=( DeviceGeneratedCommandsFeaturesNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGeneratedCommandsFeaturesNVX; + + public: + const void* pNext = nullptr; + Bool32 computeBindingPointSupport; + }; + static_assert( sizeof( DeviceGeneratedCommandsFeaturesNVX ) == sizeof( VkDeviceGeneratedCommandsFeaturesNVX ), "struct and wrapper have different size!" ); + + struct DeviceGeneratedCommandsLimitsNVX + { + DeviceGeneratedCommandsLimitsNVX( uint32_t maxIndirectCommandsLayoutTokenCount_ = 0, + uint32_t maxObjectEntryCounts_ = 0, + uint32_t minSequenceCountBufferOffsetAlignment_ = 0, + uint32_t minSequenceIndexBufferOffsetAlignment_ = 0, + uint32_t minCommandsTokenBufferOffsetAlignment_ = 0 ) + : maxIndirectCommandsLayoutTokenCount( maxIndirectCommandsLayoutTokenCount_ ) + , maxObjectEntryCounts( maxObjectEntryCounts_ ) + , minSequenceCountBufferOffsetAlignment( minSequenceCountBufferOffsetAlignment_ ) + , minSequenceIndexBufferOffsetAlignment( minSequenceIndexBufferOffsetAlignment_ ) + , minCommandsTokenBufferOffsetAlignment( minCommandsTokenBufferOffsetAlignment_ ) + { + } + + DeviceGeneratedCommandsLimitsNVX( VkDeviceGeneratedCommandsLimitsNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGeneratedCommandsLimitsNVX ) ); + } + + DeviceGeneratedCommandsLimitsNVX& operator=( VkDeviceGeneratedCommandsLimitsNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGeneratedCommandsLimitsNVX ) ); + return *this; + } + DeviceGeneratedCommandsLimitsNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceGeneratedCommandsLimitsNVX& setMaxIndirectCommandsLayoutTokenCount( uint32_t maxIndirectCommandsLayoutTokenCount_ ) + { + maxIndirectCommandsLayoutTokenCount = maxIndirectCommandsLayoutTokenCount_; + return *this; + } + + DeviceGeneratedCommandsLimitsNVX& setMaxObjectEntryCounts( uint32_t maxObjectEntryCounts_ ) + { + maxObjectEntryCounts = maxObjectEntryCounts_; + return *this; + } + + DeviceGeneratedCommandsLimitsNVX& setMinSequenceCountBufferOffsetAlignment( uint32_t minSequenceCountBufferOffsetAlignment_ ) + { + minSequenceCountBufferOffsetAlignment = minSequenceCountBufferOffsetAlignment_; + return *this; + } + + DeviceGeneratedCommandsLimitsNVX& setMinSequenceIndexBufferOffsetAlignment( uint32_t minSequenceIndexBufferOffsetAlignment_ ) + { + minSequenceIndexBufferOffsetAlignment = minSequenceIndexBufferOffsetAlignment_; + return *this; + } + + DeviceGeneratedCommandsLimitsNVX& setMinCommandsTokenBufferOffsetAlignment( uint32_t minCommandsTokenBufferOffsetAlignment_ ) + { + minCommandsTokenBufferOffsetAlignment = minCommandsTokenBufferOffsetAlignment_; + return *this; + } + + operator VkDeviceGeneratedCommandsLimitsNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGeneratedCommandsLimitsNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGeneratedCommandsLimitsNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxIndirectCommandsLayoutTokenCount == rhs.maxIndirectCommandsLayoutTokenCount ) + && ( maxObjectEntryCounts == rhs.maxObjectEntryCounts ) + && ( minSequenceCountBufferOffsetAlignment == rhs.minSequenceCountBufferOffsetAlignment ) + && ( minSequenceIndexBufferOffsetAlignment == rhs.minSequenceIndexBufferOffsetAlignment ) + && ( minCommandsTokenBufferOffsetAlignment == rhs.minCommandsTokenBufferOffsetAlignment ); + } + + bool operator!=( DeviceGeneratedCommandsLimitsNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGeneratedCommandsLimitsNVX; + + public: + const void* pNext = nullptr; + uint32_t maxIndirectCommandsLayoutTokenCount; + uint32_t maxObjectEntryCounts; + uint32_t minSequenceCountBufferOffsetAlignment; + uint32_t minSequenceIndexBufferOffsetAlignment; + uint32_t minCommandsTokenBufferOffsetAlignment; + }; + static_assert( sizeof( DeviceGeneratedCommandsLimitsNVX ) == sizeof( VkDeviceGeneratedCommandsLimitsNVX ), "struct and wrapper have different size!" ); + + struct CmdReserveSpaceForCommandsInfoNVX + { + CmdReserveSpaceForCommandsInfoNVX( ObjectTableNVX objectTable_ = ObjectTableNVX(), + IndirectCommandsLayoutNVX indirectCommandsLayout_ = IndirectCommandsLayoutNVX(), + uint32_t maxSequencesCount_ = 0 ) + : objectTable( objectTable_ ) + , indirectCommandsLayout( indirectCommandsLayout_ ) + , maxSequencesCount( maxSequencesCount_ ) + { + } + + CmdReserveSpaceForCommandsInfoNVX( VkCmdReserveSpaceForCommandsInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( CmdReserveSpaceForCommandsInfoNVX ) ); + } + + CmdReserveSpaceForCommandsInfoNVX& operator=( VkCmdReserveSpaceForCommandsInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( CmdReserveSpaceForCommandsInfoNVX ) ); + return *this; + } + CmdReserveSpaceForCommandsInfoNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + CmdReserveSpaceForCommandsInfoNVX& setObjectTable( ObjectTableNVX objectTable_ ) + { + objectTable = objectTable_; + return *this; + } + + CmdReserveSpaceForCommandsInfoNVX& setIndirectCommandsLayout( IndirectCommandsLayoutNVX indirectCommandsLayout_ ) + { + indirectCommandsLayout = indirectCommandsLayout_; + return *this; + } + + CmdReserveSpaceForCommandsInfoNVX& setMaxSequencesCount( uint32_t maxSequencesCount_ ) + { + maxSequencesCount = maxSequencesCount_; + return *this; + } + + operator VkCmdReserveSpaceForCommandsInfoNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkCmdReserveSpaceForCommandsInfoNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( CmdReserveSpaceForCommandsInfoNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectTable == rhs.objectTable ) + && ( indirectCommandsLayout == rhs.indirectCommandsLayout ) + && ( maxSequencesCount == rhs.maxSequencesCount ); + } + + bool operator!=( CmdReserveSpaceForCommandsInfoNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eCmdReserveSpaceForCommandsInfoNVX; + + public: + const void* pNext = nullptr; + ObjectTableNVX objectTable; + IndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t maxSequencesCount; + }; + static_assert( sizeof( CmdReserveSpaceForCommandsInfoNVX ) == sizeof( VkCmdReserveSpaceForCommandsInfoNVX ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceFeatures2 + { + PhysicalDeviceFeatures2( PhysicalDeviceFeatures features_ = PhysicalDeviceFeatures() ) + : features( features_ ) + { + } + + PhysicalDeviceFeatures2( VkPhysicalDeviceFeatures2 const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceFeatures2 ) ); + } + + PhysicalDeviceFeatures2& operator=( VkPhysicalDeviceFeatures2 const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceFeatures2 ) ); + return *this; + } + PhysicalDeviceFeatures2& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFeatures2& setFeatures( PhysicalDeviceFeatures features_ ) + { + features = features_; + return *this; + } + + operator VkPhysicalDeviceFeatures2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceFeatures2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceFeatures2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( features == rhs.features ); + } + + bool operator!=( PhysicalDeviceFeatures2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceFeatures2; + + public: + void* pNext = nullptr; + PhysicalDeviceFeatures features; + }; + static_assert( sizeof( PhysicalDeviceFeatures2 ) == sizeof( VkPhysicalDeviceFeatures2 ), "struct and wrapper have different size!" ); + + using PhysicalDeviceFeatures2KHR = PhysicalDeviceFeatures2; + + struct PhysicalDevicePushDescriptorPropertiesKHR + { + PhysicalDevicePushDescriptorPropertiesKHR( uint32_t maxPushDescriptors_ = 0 ) + : maxPushDescriptors( maxPushDescriptors_ ) + { + } + + PhysicalDevicePushDescriptorPropertiesKHR( VkPhysicalDevicePushDescriptorPropertiesKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDevicePushDescriptorPropertiesKHR ) ); + } + + PhysicalDevicePushDescriptorPropertiesKHR& operator=( VkPhysicalDevicePushDescriptorPropertiesKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDevicePushDescriptorPropertiesKHR ) ); + return *this; + } + PhysicalDevicePushDescriptorPropertiesKHR& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDevicePushDescriptorPropertiesKHR& setMaxPushDescriptors( uint32_t maxPushDescriptors_ ) + { + maxPushDescriptors = maxPushDescriptors_; + return *this; + } + + operator VkPhysicalDevicePushDescriptorPropertiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDevicePushDescriptorPropertiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDevicePushDescriptorPropertiesKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxPushDescriptors == rhs.maxPushDescriptors ); + } + + bool operator!=( PhysicalDevicePushDescriptorPropertiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDevicePushDescriptorPropertiesKHR; + + public: + void* pNext = nullptr; + uint32_t maxPushDescriptors; + }; + static_assert( sizeof( PhysicalDevicePushDescriptorPropertiesKHR ) == sizeof( VkPhysicalDevicePushDescriptorPropertiesKHR ), "struct and wrapper have different size!" ); + + struct PresentRegionsKHR + { + PresentRegionsKHR( uint32_t swapchainCount_ = 0, + const PresentRegionKHR* pRegions_ = nullptr ) + : swapchainCount( swapchainCount_ ) + , pRegions( pRegions_ ) + { + } + + PresentRegionsKHR( VkPresentRegionsKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentRegionsKHR ) ); + } + + PresentRegionsKHR& operator=( VkPresentRegionsKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentRegionsKHR ) ); + return *this; + } + PresentRegionsKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PresentRegionsKHR& setSwapchainCount( uint32_t swapchainCount_ ) + { + swapchainCount = swapchainCount_; + return *this; + } + + PresentRegionsKHR& setPRegions( const PresentRegionKHR* pRegions_ ) + { + pRegions = pRegions_; + return *this; + } + + operator VkPresentRegionsKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkPresentRegionsKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( PresentRegionsKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchainCount == rhs.swapchainCount ) + && ( pRegions == rhs.pRegions ); + } + + bool operator!=( PresentRegionsKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePresentRegionsKHR; + + public: + const void* pNext = nullptr; + uint32_t swapchainCount; + const PresentRegionKHR* pRegions; + }; + static_assert( sizeof( PresentRegionsKHR ) == sizeof( VkPresentRegionsKHR ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceVariablePointerFeatures + { + PhysicalDeviceVariablePointerFeatures( Bool32 variablePointersStorageBuffer_ = 0, + Bool32 variablePointers_ = 0 ) + : variablePointersStorageBuffer( variablePointersStorageBuffer_ ) + , variablePointers( variablePointers_ ) + { + } + + PhysicalDeviceVariablePointerFeatures( VkPhysicalDeviceVariablePointerFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceVariablePointerFeatures ) ); + } + + PhysicalDeviceVariablePointerFeatures& operator=( VkPhysicalDeviceVariablePointerFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceVariablePointerFeatures ) ); + return *this; + } + PhysicalDeviceVariablePointerFeatures& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceVariablePointerFeatures& setVariablePointersStorageBuffer( Bool32 variablePointersStorageBuffer_ ) + { + variablePointersStorageBuffer = variablePointersStorageBuffer_; + return *this; + } + + PhysicalDeviceVariablePointerFeatures& setVariablePointers( Bool32 variablePointers_ ) + { + variablePointers = variablePointers_; + return *this; + } + + operator VkPhysicalDeviceVariablePointerFeatures const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceVariablePointerFeatures &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceVariablePointerFeatures const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( variablePointersStorageBuffer == rhs.variablePointersStorageBuffer ) + && ( variablePointers == rhs.variablePointers ); + } + + bool operator!=( PhysicalDeviceVariablePointerFeatures const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceVariablePointerFeatures; + + public: + void* pNext = nullptr; + Bool32 variablePointersStorageBuffer; + Bool32 variablePointers; + }; + static_assert( sizeof( PhysicalDeviceVariablePointerFeatures ) == sizeof( VkPhysicalDeviceVariablePointerFeatures ), "struct and wrapper have different size!" ); + + using PhysicalDeviceVariablePointerFeaturesKHR = PhysicalDeviceVariablePointerFeatures; + + struct PhysicalDeviceIDProperties + { + operator VkPhysicalDeviceIDProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceIDProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceIDProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memcmp( deviceUUID, rhs.deviceUUID, VK_UUID_SIZE * sizeof( uint8_t ) ) == 0 ) + && ( memcmp( driverUUID, rhs.driverUUID, VK_UUID_SIZE * sizeof( uint8_t ) ) == 0 ) + && ( memcmp( deviceLUID, rhs.deviceLUID, VK_LUID_SIZE * sizeof( uint8_t ) ) == 0 ) + && ( deviceNodeMask == rhs.deviceNodeMask ) + && ( deviceLUIDValid == rhs.deviceLUIDValid ); + } + + bool operator!=( PhysicalDeviceIDProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceIdProperties; + + public: + void* pNext = nullptr; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + Bool32 deviceLUIDValid; + }; + static_assert( sizeof( PhysicalDeviceIDProperties ) == sizeof( VkPhysicalDeviceIDProperties ), "struct and wrapper have different size!" ); + + using PhysicalDeviceIDPropertiesKHR = PhysicalDeviceIDProperties; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ExportMemoryWin32HandleInfoKHR + { + ExportMemoryWin32HandleInfoKHR( const SECURITY_ATTRIBUTES* pAttributes_ = nullptr, + DWORD dwAccess_ = 0, + LPCWSTR name_ = 0 ) + : pAttributes( pAttributes_ ) + , dwAccess( dwAccess_ ) + , name( name_ ) + { + } + + ExportMemoryWin32HandleInfoKHR( VkExportMemoryWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportMemoryWin32HandleInfoKHR ) ); + } + + ExportMemoryWin32HandleInfoKHR& operator=( VkExportMemoryWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportMemoryWin32HandleInfoKHR ) ); + return *this; + } + ExportMemoryWin32HandleInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExportMemoryWin32HandleInfoKHR& setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ ) + { + pAttributes = pAttributes_; + return *this; + } + + ExportMemoryWin32HandleInfoKHR& setDwAccess( DWORD dwAccess_ ) + { + dwAccess = dwAccess_; + return *this; + } + + ExportMemoryWin32HandleInfoKHR& setName( LPCWSTR name_ ) + { + name = name_; + return *this; + } + + operator VkExportMemoryWin32HandleInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkExportMemoryWin32HandleInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExportMemoryWin32HandleInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pAttributes == rhs.pAttributes ) + && ( dwAccess == rhs.dwAccess ) + && ( name == rhs.name ); + } + + bool operator!=( ExportMemoryWin32HandleInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExportMemoryWin32HandleInfoKHR; + + public: + const void* pNext = nullptr; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; + }; + static_assert( sizeof( ExportMemoryWin32HandleInfoKHR ) == sizeof( VkExportMemoryWin32HandleInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct MemoryWin32HandlePropertiesKHR + { + operator VkMemoryWin32HandlePropertiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryWin32HandlePropertiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryWin32HandlePropertiesKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( MemoryWin32HandlePropertiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryWin32HandlePropertiesKHR; + + public: + void* pNext = nullptr; + uint32_t memoryTypeBits; + }; + static_assert( sizeof( MemoryWin32HandlePropertiesKHR ) == sizeof( VkMemoryWin32HandlePropertiesKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct MemoryFdPropertiesKHR + { + operator VkMemoryFdPropertiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryFdPropertiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryFdPropertiesKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( MemoryFdPropertiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryFdPropertiesKHR; + + public: + void* pNext = nullptr; + uint32_t memoryTypeBits; + }; + static_assert( sizeof( MemoryFdPropertiesKHR ) == sizeof( VkMemoryFdPropertiesKHR ), "struct and wrapper have different size!" ); + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct Win32KeyedMutexAcquireReleaseInfoKHR + { + Win32KeyedMutexAcquireReleaseInfoKHR( uint32_t acquireCount_ = 0, + const DeviceMemory* pAcquireSyncs_ = nullptr, + const uint64_t* pAcquireKeys_ = nullptr, + const uint32_t* pAcquireTimeouts_ = nullptr, + uint32_t releaseCount_ = 0, + const DeviceMemory* pReleaseSyncs_ = nullptr, + const uint64_t* pReleaseKeys_ = nullptr ) + : acquireCount( acquireCount_ ) + , pAcquireSyncs( pAcquireSyncs_ ) + , pAcquireKeys( pAcquireKeys_ ) + , pAcquireTimeouts( pAcquireTimeouts_ ) + , releaseCount( releaseCount_ ) + , pReleaseSyncs( pReleaseSyncs_ ) + , pReleaseKeys( pReleaseKeys_ ) + { + } + + Win32KeyedMutexAcquireReleaseInfoKHR( VkWin32KeyedMutexAcquireReleaseInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( Win32KeyedMutexAcquireReleaseInfoKHR ) ); + } + + Win32KeyedMutexAcquireReleaseInfoKHR& operator=( VkWin32KeyedMutexAcquireReleaseInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( Win32KeyedMutexAcquireReleaseInfoKHR ) ); + return *this; + } + Win32KeyedMutexAcquireReleaseInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR& setAcquireCount( uint32_t acquireCount_ ) + { + acquireCount = acquireCount_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR& setPAcquireSyncs( const DeviceMemory* pAcquireSyncs_ ) + { + pAcquireSyncs = pAcquireSyncs_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR& setPAcquireKeys( const uint64_t* pAcquireKeys_ ) + { + pAcquireKeys = pAcquireKeys_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR& setPAcquireTimeouts( const uint32_t* pAcquireTimeouts_ ) + { + pAcquireTimeouts = pAcquireTimeouts_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR& setReleaseCount( uint32_t releaseCount_ ) + { + releaseCount = releaseCount_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR& setPReleaseSyncs( const DeviceMemory* pReleaseSyncs_ ) + { + pReleaseSyncs = pReleaseSyncs_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR& setPReleaseKeys( const uint64_t* pReleaseKeys_ ) + { + pReleaseKeys = pReleaseKeys_; + return *this; + } + + operator VkWin32KeyedMutexAcquireReleaseInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkWin32KeyedMutexAcquireReleaseInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( Win32KeyedMutexAcquireReleaseInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( acquireCount == rhs.acquireCount ) + && ( pAcquireSyncs == rhs.pAcquireSyncs ) + && ( pAcquireKeys == rhs.pAcquireKeys ) + && ( pAcquireTimeouts == rhs.pAcquireTimeouts ) + && ( releaseCount == rhs.releaseCount ) + && ( pReleaseSyncs == rhs.pReleaseSyncs ) + && ( pReleaseKeys == rhs.pReleaseKeys ); + } + + bool operator!=( Win32KeyedMutexAcquireReleaseInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eWin32KeyedMutexAcquireReleaseInfoKHR; + + public: + const void* pNext = nullptr; + uint32_t acquireCount; + const DeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeouts; + uint32_t releaseCount; + const DeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; + }; + static_assert( sizeof( Win32KeyedMutexAcquireReleaseInfoKHR ) == sizeof( VkWin32KeyedMutexAcquireReleaseInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ExportSemaphoreWin32HandleInfoKHR + { + ExportSemaphoreWin32HandleInfoKHR( const SECURITY_ATTRIBUTES* pAttributes_ = nullptr, + DWORD dwAccess_ = 0, + LPCWSTR name_ = 0 ) + : pAttributes( pAttributes_ ) + , dwAccess( dwAccess_ ) + , name( name_ ) + { + } + + ExportSemaphoreWin32HandleInfoKHR( VkExportSemaphoreWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportSemaphoreWin32HandleInfoKHR ) ); + } + + ExportSemaphoreWin32HandleInfoKHR& operator=( VkExportSemaphoreWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportSemaphoreWin32HandleInfoKHR ) ); + return *this; + } + ExportSemaphoreWin32HandleInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExportSemaphoreWin32HandleInfoKHR& setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ ) + { + pAttributes = pAttributes_; + return *this; + } + + ExportSemaphoreWin32HandleInfoKHR& setDwAccess( DWORD dwAccess_ ) + { + dwAccess = dwAccess_; + return *this; + } + + ExportSemaphoreWin32HandleInfoKHR& setName( LPCWSTR name_ ) + { + name = name_; + return *this; + } + + operator VkExportSemaphoreWin32HandleInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkExportSemaphoreWin32HandleInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExportSemaphoreWin32HandleInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pAttributes == rhs.pAttributes ) + && ( dwAccess == rhs.dwAccess ) + && ( name == rhs.name ); + } + + bool operator!=( ExportSemaphoreWin32HandleInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExportSemaphoreWin32HandleInfoKHR; + + public: + const void* pNext = nullptr; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; + }; + static_assert( sizeof( ExportSemaphoreWin32HandleInfoKHR ) == sizeof( VkExportSemaphoreWin32HandleInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct D3D12FenceSubmitInfoKHR + { + D3D12FenceSubmitInfoKHR( uint32_t waitSemaphoreValuesCount_ = 0, + const uint64_t* pWaitSemaphoreValues_ = nullptr, + uint32_t signalSemaphoreValuesCount_ = 0, + const uint64_t* pSignalSemaphoreValues_ = nullptr ) + : waitSemaphoreValuesCount( waitSemaphoreValuesCount_ ) + , pWaitSemaphoreValues( pWaitSemaphoreValues_ ) + , signalSemaphoreValuesCount( signalSemaphoreValuesCount_ ) + , pSignalSemaphoreValues( pSignalSemaphoreValues_ ) + { + } + + D3D12FenceSubmitInfoKHR( VkD3D12FenceSubmitInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( D3D12FenceSubmitInfoKHR ) ); + } + + D3D12FenceSubmitInfoKHR& operator=( VkD3D12FenceSubmitInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( D3D12FenceSubmitInfoKHR ) ); + return *this; + } + D3D12FenceSubmitInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + D3D12FenceSubmitInfoKHR& setWaitSemaphoreValuesCount( uint32_t waitSemaphoreValuesCount_ ) + { + waitSemaphoreValuesCount = waitSemaphoreValuesCount_; + return *this; + } + + D3D12FenceSubmitInfoKHR& setPWaitSemaphoreValues( const uint64_t* pWaitSemaphoreValues_ ) + { + pWaitSemaphoreValues = pWaitSemaphoreValues_; + return *this; + } + + D3D12FenceSubmitInfoKHR& setSignalSemaphoreValuesCount( uint32_t signalSemaphoreValuesCount_ ) + { + signalSemaphoreValuesCount = signalSemaphoreValuesCount_; + return *this; + } + + D3D12FenceSubmitInfoKHR& setPSignalSemaphoreValues( const uint64_t* pSignalSemaphoreValues_ ) + { + pSignalSemaphoreValues = pSignalSemaphoreValues_; + return *this; + } + + operator VkD3D12FenceSubmitInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkD3D12FenceSubmitInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( D3D12FenceSubmitInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreValuesCount == rhs.waitSemaphoreValuesCount ) + && ( pWaitSemaphoreValues == rhs.pWaitSemaphoreValues ) + && ( signalSemaphoreValuesCount == rhs.signalSemaphoreValuesCount ) + && ( pSignalSemaphoreValues == rhs.pSignalSemaphoreValues ); + } + + bool operator!=( D3D12FenceSubmitInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eD3D12FenceSubmitInfoKHR; + + public: + const void* pNext = nullptr; + uint32_t waitSemaphoreValuesCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValuesCount; + const uint64_t* pSignalSemaphoreValues; + }; + static_assert( sizeof( D3D12FenceSubmitInfoKHR ) == sizeof( VkD3D12FenceSubmitInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ExportFenceWin32HandleInfoKHR + { + ExportFenceWin32HandleInfoKHR( const SECURITY_ATTRIBUTES* pAttributes_ = nullptr, + DWORD dwAccess_ = 0, + LPCWSTR name_ = 0 ) + : pAttributes( pAttributes_ ) + , dwAccess( dwAccess_ ) + , name( name_ ) + { + } + + ExportFenceWin32HandleInfoKHR( VkExportFenceWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportFenceWin32HandleInfoKHR ) ); + } + + ExportFenceWin32HandleInfoKHR& operator=( VkExportFenceWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportFenceWin32HandleInfoKHR ) ); + return *this; + } + ExportFenceWin32HandleInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExportFenceWin32HandleInfoKHR& setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ ) + { + pAttributes = pAttributes_; + return *this; + } + + ExportFenceWin32HandleInfoKHR& setDwAccess( DWORD dwAccess_ ) + { + dwAccess = dwAccess_; + return *this; + } + + ExportFenceWin32HandleInfoKHR& setName( LPCWSTR name_ ) + { + name = name_; + return *this; + } + + operator VkExportFenceWin32HandleInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkExportFenceWin32HandleInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExportFenceWin32HandleInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pAttributes == rhs.pAttributes ) + && ( dwAccess == rhs.dwAccess ) + && ( name == rhs.name ); + } + + bool operator!=( ExportFenceWin32HandleInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExportFenceWin32HandleInfoKHR; + + public: + const void* pNext = nullptr; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; + }; + static_assert( sizeof( ExportFenceWin32HandleInfoKHR ) == sizeof( VkExportFenceWin32HandleInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct PhysicalDeviceMultiviewFeatures + { + PhysicalDeviceMultiviewFeatures( Bool32 multiview_ = 0, + Bool32 multiviewGeometryShader_ = 0, + Bool32 multiviewTessellationShader_ = 0 ) + : multiview( multiview_ ) + , multiviewGeometryShader( multiviewGeometryShader_ ) + , multiviewTessellationShader( multiviewTessellationShader_ ) + { + } + + PhysicalDeviceMultiviewFeatures( VkPhysicalDeviceMultiviewFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceMultiviewFeatures ) ); + } + + PhysicalDeviceMultiviewFeatures& operator=( VkPhysicalDeviceMultiviewFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceMultiviewFeatures ) ); + return *this; + } + PhysicalDeviceMultiviewFeatures& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceMultiviewFeatures& setMultiview( Bool32 multiview_ ) + { + multiview = multiview_; + return *this; + } + + PhysicalDeviceMultiviewFeatures& setMultiviewGeometryShader( Bool32 multiviewGeometryShader_ ) + { + multiviewGeometryShader = multiviewGeometryShader_; + return *this; + } + + PhysicalDeviceMultiviewFeatures& setMultiviewTessellationShader( Bool32 multiviewTessellationShader_ ) + { + multiviewTessellationShader = multiviewTessellationShader_; + return *this; + } + + operator VkPhysicalDeviceMultiviewFeatures const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceMultiviewFeatures &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceMultiviewFeatures const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( multiview == rhs.multiview ) + && ( multiviewGeometryShader == rhs.multiviewGeometryShader ) + && ( multiviewTessellationShader == rhs.multiviewTessellationShader ); + } + + bool operator!=( PhysicalDeviceMultiviewFeatures const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceMultiviewFeatures; + + public: + void* pNext = nullptr; + Bool32 multiview; + Bool32 multiviewGeometryShader; + Bool32 multiviewTessellationShader; + }; + static_assert( sizeof( PhysicalDeviceMultiviewFeatures ) == sizeof( VkPhysicalDeviceMultiviewFeatures ), "struct and wrapper have different size!" ); + + using PhysicalDeviceMultiviewFeaturesKHR = PhysicalDeviceMultiviewFeatures; + + struct PhysicalDeviceMultiviewProperties + { + operator VkPhysicalDeviceMultiviewProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceMultiviewProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceMultiviewProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxMultiviewViewCount == rhs.maxMultiviewViewCount ) + && ( maxMultiviewInstanceIndex == rhs.maxMultiviewInstanceIndex ); + } + + bool operator!=( PhysicalDeviceMultiviewProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceMultiviewProperties; + + public: + void* pNext = nullptr; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; + }; + static_assert( sizeof( PhysicalDeviceMultiviewProperties ) == sizeof( VkPhysicalDeviceMultiviewProperties ), "struct and wrapper have different size!" ); + + using PhysicalDeviceMultiviewPropertiesKHR = PhysicalDeviceMultiviewProperties; + + struct RenderPassMultiviewCreateInfo + { + RenderPassMultiviewCreateInfo( uint32_t subpassCount_ = 0, + const uint32_t* pViewMasks_ = nullptr, + uint32_t dependencyCount_ = 0, + const int32_t* pViewOffsets_ = nullptr, + uint32_t correlationMaskCount_ = 0, + const uint32_t* pCorrelationMasks_ = nullptr ) + : subpassCount( subpassCount_ ) + , pViewMasks( pViewMasks_ ) + , dependencyCount( dependencyCount_ ) + , pViewOffsets( pViewOffsets_ ) + , correlationMaskCount( correlationMaskCount_ ) + , pCorrelationMasks( pCorrelationMasks_ ) + { + } + + RenderPassMultiviewCreateInfo( VkRenderPassMultiviewCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassMultiviewCreateInfo ) ); + } + + RenderPassMultiviewCreateInfo& operator=( VkRenderPassMultiviewCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassMultiviewCreateInfo ) ); + return *this; + } + RenderPassMultiviewCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + RenderPassMultiviewCreateInfo& setSubpassCount( uint32_t subpassCount_ ) + { + subpassCount = subpassCount_; + return *this; + } + + RenderPassMultiviewCreateInfo& setPViewMasks( const uint32_t* pViewMasks_ ) + { + pViewMasks = pViewMasks_; + return *this; + } + + RenderPassMultiviewCreateInfo& setDependencyCount( uint32_t dependencyCount_ ) + { + dependencyCount = dependencyCount_; + return *this; + } + + RenderPassMultiviewCreateInfo& setPViewOffsets( const int32_t* pViewOffsets_ ) + { + pViewOffsets = pViewOffsets_; + return *this; + } + + RenderPassMultiviewCreateInfo& setCorrelationMaskCount( uint32_t correlationMaskCount_ ) + { + correlationMaskCount = correlationMaskCount_; + return *this; + } + + RenderPassMultiviewCreateInfo& setPCorrelationMasks( const uint32_t* pCorrelationMasks_ ) + { + pCorrelationMasks = pCorrelationMasks_; + return *this; + } + + operator VkRenderPassMultiviewCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkRenderPassMultiviewCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( RenderPassMultiviewCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( subpassCount == rhs.subpassCount ) + && ( pViewMasks == rhs.pViewMasks ) + && ( dependencyCount == rhs.dependencyCount ) + && ( pViewOffsets == rhs.pViewOffsets ) + && ( correlationMaskCount == rhs.correlationMaskCount ) + && ( pCorrelationMasks == rhs.pCorrelationMasks ); + } + + bool operator!=( RenderPassMultiviewCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eRenderPassMultiviewCreateInfo; + + public: + const void* pNext = nullptr; + uint32_t subpassCount; + const uint32_t* pViewMasks; + uint32_t dependencyCount; + const int32_t* pViewOffsets; + uint32_t correlationMaskCount; + const uint32_t* pCorrelationMasks; + }; + static_assert( sizeof( RenderPassMultiviewCreateInfo ) == sizeof( VkRenderPassMultiviewCreateInfo ), "struct and wrapper have different size!" ); + + using RenderPassMultiviewCreateInfoKHR = RenderPassMultiviewCreateInfo; + + struct BindBufferMemoryInfo + { + BindBufferMemoryInfo( Buffer buffer_ = Buffer(), + DeviceMemory memory_ = DeviceMemory(), + DeviceSize memoryOffset_ = 0 ) + : buffer( buffer_ ) + , memory( memory_ ) + , memoryOffset( memoryOffset_ ) + { + } + + BindBufferMemoryInfo( VkBindBufferMemoryInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindBufferMemoryInfo ) ); + } + + BindBufferMemoryInfo& operator=( VkBindBufferMemoryInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindBufferMemoryInfo ) ); + return *this; + } + BindBufferMemoryInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BindBufferMemoryInfo& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + BindBufferMemoryInfo& setMemory( DeviceMemory memory_ ) + { + memory = memory_; + return *this; + } + + BindBufferMemoryInfo& setMemoryOffset( DeviceSize memoryOffset_ ) + { + memoryOffset = memoryOffset_; + return *this; + } + + operator VkBindBufferMemoryInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkBindBufferMemoryInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( BindBufferMemoryInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( buffer == rhs.buffer ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ); + } + + bool operator!=( BindBufferMemoryInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBindBufferMemoryInfo; + + public: + const void* pNext = nullptr; + Buffer buffer; + DeviceMemory memory; + DeviceSize memoryOffset; + }; + static_assert( sizeof( BindBufferMemoryInfo ) == sizeof( VkBindBufferMemoryInfo ), "struct and wrapper have different size!" ); + + using BindBufferMemoryInfoKHR = BindBufferMemoryInfo; + + struct BindBufferMemoryDeviceGroupInfo + { + BindBufferMemoryDeviceGroupInfo( uint32_t deviceIndexCount_ = 0, + const uint32_t* pDeviceIndices_ = nullptr ) + : deviceIndexCount( deviceIndexCount_ ) + , pDeviceIndices( pDeviceIndices_ ) + { + } + + BindBufferMemoryDeviceGroupInfo( VkBindBufferMemoryDeviceGroupInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindBufferMemoryDeviceGroupInfo ) ); + } + + BindBufferMemoryDeviceGroupInfo& operator=( VkBindBufferMemoryDeviceGroupInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindBufferMemoryDeviceGroupInfo ) ); + return *this; + } + BindBufferMemoryDeviceGroupInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BindBufferMemoryDeviceGroupInfo& setDeviceIndexCount( uint32_t deviceIndexCount_ ) + { + deviceIndexCount = deviceIndexCount_; + return *this; + } + + BindBufferMemoryDeviceGroupInfo& setPDeviceIndices( const uint32_t* pDeviceIndices_ ) + { + pDeviceIndices = pDeviceIndices_; + return *this; + } + + operator VkBindBufferMemoryDeviceGroupInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkBindBufferMemoryDeviceGroupInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( BindBufferMemoryDeviceGroupInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceIndexCount == rhs.deviceIndexCount ) + && ( pDeviceIndices == rhs.pDeviceIndices ); + } + + bool operator!=( BindBufferMemoryDeviceGroupInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBindBufferMemoryDeviceGroupInfo; + + public: + const void* pNext = nullptr; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + }; + static_assert( sizeof( BindBufferMemoryDeviceGroupInfo ) == sizeof( VkBindBufferMemoryDeviceGroupInfo ), "struct and wrapper have different size!" ); + + using BindBufferMemoryDeviceGroupInfoKHR = BindBufferMemoryDeviceGroupInfo; + + struct BindImageMemoryInfo + { + BindImageMemoryInfo( Image image_ = Image(), + DeviceMemory memory_ = DeviceMemory(), + DeviceSize memoryOffset_ = 0 ) + : image( image_ ) + , memory( memory_ ) + , memoryOffset( memoryOffset_ ) + { + } + + BindImageMemoryInfo( VkBindImageMemoryInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindImageMemoryInfo ) ); + } + + BindImageMemoryInfo& operator=( VkBindImageMemoryInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindImageMemoryInfo ) ); + return *this; + } + BindImageMemoryInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BindImageMemoryInfo& setImage( Image image_ ) + { + image = image_; + return *this; + } + + BindImageMemoryInfo& setMemory( DeviceMemory memory_ ) + { + memory = memory_; + return *this; + } + + BindImageMemoryInfo& setMemoryOffset( DeviceSize memoryOffset_ ) + { + memoryOffset = memoryOffset_; + return *this; + } + + operator VkBindImageMemoryInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkBindImageMemoryInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( BindImageMemoryInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ); + } + + bool operator!=( BindImageMemoryInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBindImageMemoryInfo; + + public: + const void* pNext = nullptr; + Image image; + DeviceMemory memory; + DeviceSize memoryOffset; + }; + static_assert( sizeof( BindImageMemoryInfo ) == sizeof( VkBindImageMemoryInfo ), "struct and wrapper have different size!" ); + + using BindImageMemoryInfoKHR = BindImageMemoryInfo; + + struct BindImageMemoryDeviceGroupInfo + { + BindImageMemoryDeviceGroupInfo( uint32_t deviceIndexCount_ = 0, + const uint32_t* pDeviceIndices_ = nullptr, + uint32_t splitInstanceBindRegionCount_ = 0, + const Rect2D* pSplitInstanceBindRegions_ = nullptr ) + : deviceIndexCount( deviceIndexCount_ ) + , pDeviceIndices( pDeviceIndices_ ) + , splitInstanceBindRegionCount( splitInstanceBindRegionCount_ ) + , pSplitInstanceBindRegions( pSplitInstanceBindRegions_ ) + { + } + + BindImageMemoryDeviceGroupInfo( VkBindImageMemoryDeviceGroupInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindImageMemoryDeviceGroupInfo ) ); + } + + BindImageMemoryDeviceGroupInfo& operator=( VkBindImageMemoryDeviceGroupInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindImageMemoryDeviceGroupInfo ) ); + return *this; + } + BindImageMemoryDeviceGroupInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BindImageMemoryDeviceGroupInfo& setDeviceIndexCount( uint32_t deviceIndexCount_ ) + { + deviceIndexCount = deviceIndexCount_; + return *this; + } + + BindImageMemoryDeviceGroupInfo& setPDeviceIndices( const uint32_t* pDeviceIndices_ ) + { + pDeviceIndices = pDeviceIndices_; + return *this; + } + + BindImageMemoryDeviceGroupInfo& setSplitInstanceBindRegionCount( uint32_t splitInstanceBindRegionCount_ ) + { + splitInstanceBindRegionCount = splitInstanceBindRegionCount_; + return *this; + } + + BindImageMemoryDeviceGroupInfo& setPSplitInstanceBindRegions( const Rect2D* pSplitInstanceBindRegions_ ) + { + pSplitInstanceBindRegions = pSplitInstanceBindRegions_; + return *this; + } + + operator VkBindImageMemoryDeviceGroupInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkBindImageMemoryDeviceGroupInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( BindImageMemoryDeviceGroupInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceIndexCount == rhs.deviceIndexCount ) + && ( pDeviceIndices == rhs.pDeviceIndices ) + && ( splitInstanceBindRegionCount == rhs.splitInstanceBindRegionCount ) + && ( pSplitInstanceBindRegions == rhs.pSplitInstanceBindRegions ); + } + + bool operator!=( BindImageMemoryDeviceGroupInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBindImageMemoryDeviceGroupInfo; + + public: + const void* pNext = nullptr; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + uint32_t splitInstanceBindRegionCount; + const Rect2D* pSplitInstanceBindRegions; + }; + static_assert( sizeof( BindImageMemoryDeviceGroupInfo ) == sizeof( VkBindImageMemoryDeviceGroupInfo ), "struct and wrapper have different size!" ); + + using BindImageMemoryDeviceGroupInfoKHR = BindImageMemoryDeviceGroupInfo; + + struct DeviceGroupRenderPassBeginInfo + { + DeviceGroupRenderPassBeginInfo( uint32_t deviceMask_ = 0, + uint32_t deviceRenderAreaCount_ = 0, + const Rect2D* pDeviceRenderAreas_ = nullptr ) + : deviceMask( deviceMask_ ) + , deviceRenderAreaCount( deviceRenderAreaCount_ ) + , pDeviceRenderAreas( pDeviceRenderAreas_ ) + { + } + + DeviceGroupRenderPassBeginInfo( VkDeviceGroupRenderPassBeginInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupRenderPassBeginInfo ) ); + } + + DeviceGroupRenderPassBeginInfo& operator=( VkDeviceGroupRenderPassBeginInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupRenderPassBeginInfo ) ); + return *this; + } + DeviceGroupRenderPassBeginInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceGroupRenderPassBeginInfo& setDeviceMask( uint32_t deviceMask_ ) + { + deviceMask = deviceMask_; + return *this; + } + + DeviceGroupRenderPassBeginInfo& setDeviceRenderAreaCount( uint32_t deviceRenderAreaCount_ ) + { + deviceRenderAreaCount = deviceRenderAreaCount_; + return *this; + } + + DeviceGroupRenderPassBeginInfo& setPDeviceRenderAreas( const Rect2D* pDeviceRenderAreas_ ) + { + pDeviceRenderAreas = pDeviceRenderAreas_; + return *this; + } + + operator VkDeviceGroupRenderPassBeginInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGroupRenderPassBeginInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGroupRenderPassBeginInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceMask == rhs.deviceMask ) + && ( deviceRenderAreaCount == rhs.deviceRenderAreaCount ) + && ( pDeviceRenderAreas == rhs.pDeviceRenderAreas ); + } + + bool operator!=( DeviceGroupRenderPassBeginInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGroupRenderPassBeginInfo; + + public: + const void* pNext = nullptr; + uint32_t deviceMask; + uint32_t deviceRenderAreaCount; + const Rect2D* pDeviceRenderAreas; + }; + static_assert( sizeof( DeviceGroupRenderPassBeginInfo ) == sizeof( VkDeviceGroupRenderPassBeginInfo ), "struct and wrapper have different size!" ); + + using DeviceGroupRenderPassBeginInfoKHR = DeviceGroupRenderPassBeginInfo; + + struct DeviceGroupCommandBufferBeginInfo + { + DeviceGroupCommandBufferBeginInfo( uint32_t deviceMask_ = 0 ) + : deviceMask( deviceMask_ ) + { + } + + DeviceGroupCommandBufferBeginInfo( VkDeviceGroupCommandBufferBeginInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupCommandBufferBeginInfo ) ); + } + + DeviceGroupCommandBufferBeginInfo& operator=( VkDeviceGroupCommandBufferBeginInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupCommandBufferBeginInfo ) ); + return *this; + } + DeviceGroupCommandBufferBeginInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceGroupCommandBufferBeginInfo& setDeviceMask( uint32_t deviceMask_ ) + { + deviceMask = deviceMask_; + return *this; + } + + operator VkDeviceGroupCommandBufferBeginInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGroupCommandBufferBeginInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGroupCommandBufferBeginInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceMask == rhs.deviceMask ); + } + + bool operator!=( DeviceGroupCommandBufferBeginInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGroupCommandBufferBeginInfo; + + public: + const void* pNext = nullptr; + uint32_t deviceMask; + }; + static_assert( sizeof( DeviceGroupCommandBufferBeginInfo ) == sizeof( VkDeviceGroupCommandBufferBeginInfo ), "struct and wrapper have different size!" ); + + using DeviceGroupCommandBufferBeginInfoKHR = DeviceGroupCommandBufferBeginInfo; + + struct DeviceGroupSubmitInfo + { + DeviceGroupSubmitInfo( uint32_t waitSemaphoreCount_ = 0, + const uint32_t* pWaitSemaphoreDeviceIndices_ = nullptr, + uint32_t commandBufferCount_ = 0, + const uint32_t* pCommandBufferDeviceMasks_ = nullptr, + uint32_t signalSemaphoreCount_ = 0, + const uint32_t* pSignalSemaphoreDeviceIndices_ = nullptr ) + : waitSemaphoreCount( waitSemaphoreCount_ ) + , pWaitSemaphoreDeviceIndices( pWaitSemaphoreDeviceIndices_ ) + , commandBufferCount( commandBufferCount_ ) + , pCommandBufferDeviceMasks( pCommandBufferDeviceMasks_ ) + , signalSemaphoreCount( signalSemaphoreCount_ ) + , pSignalSemaphoreDeviceIndices( pSignalSemaphoreDeviceIndices_ ) + { + } + + DeviceGroupSubmitInfo( VkDeviceGroupSubmitInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupSubmitInfo ) ); + } + + DeviceGroupSubmitInfo& operator=( VkDeviceGroupSubmitInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupSubmitInfo ) ); + return *this; + } + DeviceGroupSubmitInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceGroupSubmitInfo& setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ ) + { + waitSemaphoreCount = waitSemaphoreCount_; + return *this; + } + + DeviceGroupSubmitInfo& setPWaitSemaphoreDeviceIndices( const uint32_t* pWaitSemaphoreDeviceIndices_ ) + { + pWaitSemaphoreDeviceIndices = pWaitSemaphoreDeviceIndices_; + return *this; + } + + DeviceGroupSubmitInfo& setCommandBufferCount( uint32_t commandBufferCount_ ) + { + commandBufferCount = commandBufferCount_; + return *this; + } + + DeviceGroupSubmitInfo& setPCommandBufferDeviceMasks( const uint32_t* pCommandBufferDeviceMasks_ ) + { + pCommandBufferDeviceMasks = pCommandBufferDeviceMasks_; + return *this; + } + + DeviceGroupSubmitInfo& setSignalSemaphoreCount( uint32_t signalSemaphoreCount_ ) + { + signalSemaphoreCount = signalSemaphoreCount_; + return *this; + } + + DeviceGroupSubmitInfo& setPSignalSemaphoreDeviceIndices( const uint32_t* pSignalSemaphoreDeviceIndices_ ) + { + pSignalSemaphoreDeviceIndices = pSignalSemaphoreDeviceIndices_; + return *this; + } + + operator VkDeviceGroupSubmitInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGroupSubmitInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGroupSubmitInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreCount == rhs.waitSemaphoreCount ) + && ( pWaitSemaphoreDeviceIndices == rhs.pWaitSemaphoreDeviceIndices ) + && ( commandBufferCount == rhs.commandBufferCount ) + && ( pCommandBufferDeviceMasks == rhs.pCommandBufferDeviceMasks ) + && ( signalSemaphoreCount == rhs.signalSemaphoreCount ) + && ( pSignalSemaphoreDeviceIndices == rhs.pSignalSemaphoreDeviceIndices ); + } + + bool operator!=( DeviceGroupSubmitInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGroupSubmitInfo; + + public: + const void* pNext = nullptr; + uint32_t waitSemaphoreCount; + const uint32_t* pWaitSemaphoreDeviceIndices; + uint32_t commandBufferCount; + const uint32_t* pCommandBufferDeviceMasks; + uint32_t signalSemaphoreCount; + const uint32_t* pSignalSemaphoreDeviceIndices; + }; + static_assert( sizeof( DeviceGroupSubmitInfo ) == sizeof( VkDeviceGroupSubmitInfo ), "struct and wrapper have different size!" ); + + using DeviceGroupSubmitInfoKHR = DeviceGroupSubmitInfo; + + struct DeviceGroupBindSparseInfo + { + DeviceGroupBindSparseInfo( uint32_t resourceDeviceIndex_ = 0, + uint32_t memoryDeviceIndex_ = 0 ) + : resourceDeviceIndex( resourceDeviceIndex_ ) + , memoryDeviceIndex( memoryDeviceIndex_ ) + { + } + + DeviceGroupBindSparseInfo( VkDeviceGroupBindSparseInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupBindSparseInfo ) ); + } + + DeviceGroupBindSparseInfo& operator=( VkDeviceGroupBindSparseInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupBindSparseInfo ) ); + return *this; + } + DeviceGroupBindSparseInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceGroupBindSparseInfo& setResourceDeviceIndex( uint32_t resourceDeviceIndex_ ) + { + resourceDeviceIndex = resourceDeviceIndex_; + return *this; + } + + DeviceGroupBindSparseInfo& setMemoryDeviceIndex( uint32_t memoryDeviceIndex_ ) + { + memoryDeviceIndex = memoryDeviceIndex_; + return *this; + } + + operator VkDeviceGroupBindSparseInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGroupBindSparseInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGroupBindSparseInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( resourceDeviceIndex == rhs.resourceDeviceIndex ) + && ( memoryDeviceIndex == rhs.memoryDeviceIndex ); + } + + bool operator!=( DeviceGroupBindSparseInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGroupBindSparseInfo; + + public: + const void* pNext = nullptr; + uint32_t resourceDeviceIndex; + uint32_t memoryDeviceIndex; + }; + static_assert( sizeof( DeviceGroupBindSparseInfo ) == sizeof( VkDeviceGroupBindSparseInfo ), "struct and wrapper have different size!" ); + + using DeviceGroupBindSparseInfoKHR = DeviceGroupBindSparseInfo; + + struct ImageSwapchainCreateInfoKHR + { + ImageSwapchainCreateInfoKHR( SwapchainKHR swapchain_ = SwapchainKHR() ) + : swapchain( swapchain_ ) + { + } + + ImageSwapchainCreateInfoKHR( VkImageSwapchainCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSwapchainCreateInfoKHR ) ); + } + + ImageSwapchainCreateInfoKHR& operator=( VkImageSwapchainCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSwapchainCreateInfoKHR ) ); + return *this; + } + ImageSwapchainCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImageSwapchainCreateInfoKHR& setSwapchain( SwapchainKHR swapchain_ ) + { + swapchain = swapchain_; + return *this; + } + + operator VkImageSwapchainCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageSwapchainCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageSwapchainCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchain == rhs.swapchain ); + } + + bool operator!=( ImageSwapchainCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageSwapchainCreateInfoKHR; + + public: + const void* pNext = nullptr; + SwapchainKHR swapchain; + }; + static_assert( sizeof( ImageSwapchainCreateInfoKHR ) == sizeof( VkImageSwapchainCreateInfoKHR ), "struct and wrapper have different size!" ); + + struct BindImageMemorySwapchainInfoKHR + { + BindImageMemorySwapchainInfoKHR( SwapchainKHR swapchain_ = SwapchainKHR(), + uint32_t imageIndex_ = 0 ) + : swapchain( swapchain_ ) + , imageIndex( imageIndex_ ) + { + } + + BindImageMemorySwapchainInfoKHR( VkBindImageMemorySwapchainInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( BindImageMemorySwapchainInfoKHR ) ); + } + + BindImageMemorySwapchainInfoKHR& operator=( VkBindImageMemorySwapchainInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( BindImageMemorySwapchainInfoKHR ) ); + return *this; + } + BindImageMemorySwapchainInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BindImageMemorySwapchainInfoKHR& setSwapchain( SwapchainKHR swapchain_ ) + { + swapchain = swapchain_; + return *this; + } + + BindImageMemorySwapchainInfoKHR& setImageIndex( uint32_t imageIndex_ ) + { + imageIndex = imageIndex_; + return *this; + } + + operator VkBindImageMemorySwapchainInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkBindImageMemorySwapchainInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( BindImageMemorySwapchainInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchain == rhs.swapchain ) + && ( imageIndex == rhs.imageIndex ); + } + + bool operator!=( BindImageMemorySwapchainInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBindImageMemorySwapchainInfoKHR; + + public: + const void* pNext = nullptr; + SwapchainKHR swapchain; + uint32_t imageIndex; + }; + static_assert( sizeof( BindImageMemorySwapchainInfoKHR ) == sizeof( VkBindImageMemorySwapchainInfoKHR ), "struct and wrapper have different size!" ); + + struct AcquireNextImageInfoKHR + { + AcquireNextImageInfoKHR( SwapchainKHR swapchain_ = SwapchainKHR(), + uint64_t timeout_ = 0, + Semaphore semaphore_ = Semaphore(), + Fence fence_ = Fence(), + uint32_t deviceMask_ = 0 ) + : swapchain( swapchain_ ) + , timeout( timeout_ ) + , semaphore( semaphore_ ) + , fence( fence_ ) + , deviceMask( deviceMask_ ) + { + } + + AcquireNextImageInfoKHR( VkAcquireNextImageInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( AcquireNextImageInfoKHR ) ); + } + + AcquireNextImageInfoKHR& operator=( VkAcquireNextImageInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( AcquireNextImageInfoKHR ) ); + return *this; + } + AcquireNextImageInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + AcquireNextImageInfoKHR& setSwapchain( SwapchainKHR swapchain_ ) + { + swapchain = swapchain_; + return *this; + } + + AcquireNextImageInfoKHR& setTimeout( uint64_t timeout_ ) + { + timeout = timeout_; + return *this; + } + + AcquireNextImageInfoKHR& setSemaphore( Semaphore semaphore_ ) + { + semaphore = semaphore_; + return *this; + } + + AcquireNextImageInfoKHR& setFence( Fence fence_ ) + { + fence = fence_; + return *this; + } + + AcquireNextImageInfoKHR& setDeviceMask( uint32_t deviceMask_ ) + { + deviceMask = deviceMask_; + return *this; + } + + operator VkAcquireNextImageInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkAcquireNextImageInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( AcquireNextImageInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchain == rhs.swapchain ) + && ( timeout == rhs.timeout ) + && ( semaphore == rhs.semaphore ) + && ( fence == rhs.fence ) + && ( deviceMask == rhs.deviceMask ); + } + + bool operator!=( AcquireNextImageInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eAcquireNextImageInfoKHR; + + public: + const void* pNext = nullptr; + SwapchainKHR swapchain; + uint64_t timeout; + Semaphore semaphore; + Fence fence; + uint32_t deviceMask; + }; + static_assert( sizeof( AcquireNextImageInfoKHR ) == sizeof( VkAcquireNextImageInfoKHR ), "struct and wrapper have different size!" ); + + struct HdrMetadataEXT + { + HdrMetadataEXT( XYColorEXT displayPrimaryRed_ = XYColorEXT(), + XYColorEXT displayPrimaryGreen_ = XYColorEXT(), + XYColorEXT displayPrimaryBlue_ = XYColorEXT(), + XYColorEXT whitePoint_ = XYColorEXT(), + float maxLuminance_ = 0, + float minLuminance_ = 0, + float maxContentLightLevel_ = 0, + float maxFrameAverageLightLevel_ = 0 ) + : displayPrimaryRed( displayPrimaryRed_ ) + , displayPrimaryGreen( displayPrimaryGreen_ ) + , displayPrimaryBlue( displayPrimaryBlue_ ) + , whitePoint( whitePoint_ ) + , maxLuminance( maxLuminance_ ) + , minLuminance( minLuminance_ ) + , maxContentLightLevel( maxContentLightLevel_ ) + , maxFrameAverageLightLevel( maxFrameAverageLightLevel_ ) + { + } + + HdrMetadataEXT( VkHdrMetadataEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( HdrMetadataEXT ) ); + } + + HdrMetadataEXT& operator=( VkHdrMetadataEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( HdrMetadataEXT ) ); + return *this; + } + HdrMetadataEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + HdrMetadataEXT& setDisplayPrimaryRed( XYColorEXT displayPrimaryRed_ ) + { + displayPrimaryRed = displayPrimaryRed_; + return *this; + } + + HdrMetadataEXT& setDisplayPrimaryGreen( XYColorEXT displayPrimaryGreen_ ) + { + displayPrimaryGreen = displayPrimaryGreen_; + return *this; + } + + HdrMetadataEXT& setDisplayPrimaryBlue( XYColorEXT displayPrimaryBlue_ ) + { + displayPrimaryBlue = displayPrimaryBlue_; + return *this; + } + + HdrMetadataEXT& setWhitePoint( XYColorEXT whitePoint_ ) + { + whitePoint = whitePoint_; + return *this; + } + + HdrMetadataEXT& setMaxLuminance( float maxLuminance_ ) + { + maxLuminance = maxLuminance_; + return *this; + } + + HdrMetadataEXT& setMinLuminance( float minLuminance_ ) + { + minLuminance = minLuminance_; + return *this; + } + + HdrMetadataEXT& setMaxContentLightLevel( float maxContentLightLevel_ ) + { + maxContentLightLevel = maxContentLightLevel_; + return *this; + } + + HdrMetadataEXT& setMaxFrameAverageLightLevel( float maxFrameAverageLightLevel_ ) + { + maxFrameAverageLightLevel = maxFrameAverageLightLevel_; + return *this; + } + + operator VkHdrMetadataEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkHdrMetadataEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( HdrMetadataEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayPrimaryRed == rhs.displayPrimaryRed ) + && ( displayPrimaryGreen == rhs.displayPrimaryGreen ) + && ( displayPrimaryBlue == rhs.displayPrimaryBlue ) + && ( whitePoint == rhs.whitePoint ) + && ( maxLuminance == rhs.maxLuminance ) + && ( minLuminance == rhs.minLuminance ) + && ( maxContentLightLevel == rhs.maxContentLightLevel ) + && ( maxFrameAverageLightLevel == rhs.maxFrameAverageLightLevel ); + } + + bool operator!=( HdrMetadataEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eHdrMetadataEXT; + + public: + const void* pNext = nullptr; + XYColorEXT displayPrimaryRed; + XYColorEXT displayPrimaryGreen; + XYColorEXT displayPrimaryBlue; + XYColorEXT whitePoint; + float maxLuminance; + float minLuminance; + float maxContentLightLevel; + float maxFrameAverageLightLevel; + }; + static_assert( sizeof( HdrMetadataEXT ) == sizeof( VkHdrMetadataEXT ), "struct and wrapper have different size!" ); + + struct PresentTimesInfoGOOGLE + { + PresentTimesInfoGOOGLE( uint32_t swapchainCount_ = 0, + const PresentTimeGOOGLE* pTimes_ = nullptr ) + : swapchainCount( swapchainCount_ ) + , pTimes( pTimes_ ) + { + } + + PresentTimesInfoGOOGLE( VkPresentTimesInfoGOOGLE const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentTimesInfoGOOGLE ) ); + } + + PresentTimesInfoGOOGLE& operator=( VkPresentTimesInfoGOOGLE const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentTimesInfoGOOGLE ) ); + return *this; + } + PresentTimesInfoGOOGLE& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PresentTimesInfoGOOGLE& setSwapchainCount( uint32_t swapchainCount_ ) + { + swapchainCount = swapchainCount_; + return *this; + } + + PresentTimesInfoGOOGLE& setPTimes( const PresentTimeGOOGLE* pTimes_ ) + { + pTimes = pTimes_; + return *this; + } + + operator VkPresentTimesInfoGOOGLE const&() const + { + return *reinterpret_cast(this); + } + + operator VkPresentTimesInfoGOOGLE &() + { + return *reinterpret_cast(this); + } + + bool operator==( PresentTimesInfoGOOGLE const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchainCount == rhs.swapchainCount ) + && ( pTimes == rhs.pTimes ); + } + + bool operator!=( PresentTimesInfoGOOGLE const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePresentTimesInfoGOOGLE; + + public: + const void* pNext = nullptr; + uint32_t swapchainCount; + const PresentTimeGOOGLE* pTimes; + }; + static_assert( sizeof( PresentTimesInfoGOOGLE ) == sizeof( VkPresentTimesInfoGOOGLE ), "struct and wrapper have different size!" ); + +#ifdef VK_USE_PLATFORM_IOS_MVK + struct IOSSurfaceCreateInfoMVK + { + IOSSurfaceCreateInfoMVK( IOSSurfaceCreateFlagsMVK flags_ = IOSSurfaceCreateFlagsMVK(), + const void* pView_ = nullptr ) + : flags( flags_ ) + , pView( pView_ ) + { + } + + IOSSurfaceCreateInfoMVK( VkIOSSurfaceCreateInfoMVK const & rhs ) + { + memcpy( this, &rhs, sizeof( IOSSurfaceCreateInfoMVK ) ); + } + + IOSSurfaceCreateInfoMVK& operator=( VkIOSSurfaceCreateInfoMVK const & rhs ) + { + memcpy( this, &rhs, sizeof( IOSSurfaceCreateInfoMVK ) ); + return *this; + } + IOSSurfaceCreateInfoMVK& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + IOSSurfaceCreateInfoMVK& setFlags( IOSSurfaceCreateFlagsMVK flags_ ) + { + flags = flags_; + return *this; + } + + IOSSurfaceCreateInfoMVK& setPView( const void* pView_ ) + { + pView = pView_; + return *this; + } + + operator VkIOSSurfaceCreateInfoMVK const&() const + { + return *reinterpret_cast(this); + } + + operator VkIOSSurfaceCreateInfoMVK &() + { + return *reinterpret_cast(this); + } + + bool operator==( IOSSurfaceCreateInfoMVK const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pView == rhs.pView ); + } + + bool operator!=( IOSSurfaceCreateInfoMVK const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eIosSurfaceCreateInfoMVK; + + public: + const void* pNext = nullptr; + IOSSurfaceCreateFlagsMVK flags; + const void* pView; + }; + static_assert( sizeof( IOSSurfaceCreateInfoMVK ) == sizeof( VkIOSSurfaceCreateInfoMVK ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + +#ifdef VK_USE_PLATFORM_MACOS_MVK + struct MacOSSurfaceCreateInfoMVK + { + MacOSSurfaceCreateInfoMVK( MacOSSurfaceCreateFlagsMVK flags_ = MacOSSurfaceCreateFlagsMVK(), + const void* pView_ = nullptr ) + : flags( flags_ ) + , pView( pView_ ) + { + } + + MacOSSurfaceCreateInfoMVK( VkMacOSSurfaceCreateInfoMVK const & rhs ) + { + memcpy( this, &rhs, sizeof( MacOSSurfaceCreateInfoMVK ) ); + } + + MacOSSurfaceCreateInfoMVK& operator=( VkMacOSSurfaceCreateInfoMVK const & rhs ) + { + memcpy( this, &rhs, sizeof( MacOSSurfaceCreateInfoMVK ) ); + return *this; + } + MacOSSurfaceCreateInfoMVK& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MacOSSurfaceCreateInfoMVK& setFlags( MacOSSurfaceCreateFlagsMVK flags_ ) + { + flags = flags_; + return *this; + } + + MacOSSurfaceCreateInfoMVK& setPView( const void* pView_ ) + { + pView = pView_; + return *this; + } + + operator VkMacOSSurfaceCreateInfoMVK const&() const + { + return *reinterpret_cast(this); + } + + operator VkMacOSSurfaceCreateInfoMVK &() + { + return *reinterpret_cast(this); + } + + bool operator==( MacOSSurfaceCreateInfoMVK const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pView == rhs.pView ); + } + + bool operator!=( MacOSSurfaceCreateInfoMVK const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMacosSurfaceCreateInfoMVK; + + public: + const void* pNext = nullptr; + MacOSSurfaceCreateFlagsMVK flags; + const void* pView; + }; + static_assert( sizeof( MacOSSurfaceCreateInfoMVK ) == sizeof( VkMacOSSurfaceCreateInfoMVK ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + struct PipelineViewportWScalingStateCreateInfoNV + { + PipelineViewportWScalingStateCreateInfoNV( Bool32 viewportWScalingEnable_ = 0, + uint32_t viewportCount_ = 0, + const ViewportWScalingNV* pViewportWScalings_ = nullptr ) + : viewportWScalingEnable( viewportWScalingEnable_ ) + , viewportCount( viewportCount_ ) + , pViewportWScalings( pViewportWScalings_ ) + { + } + + PipelineViewportWScalingStateCreateInfoNV( VkPipelineViewportWScalingStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportWScalingStateCreateInfoNV ) ); + } + + PipelineViewportWScalingStateCreateInfoNV& operator=( VkPipelineViewportWScalingStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportWScalingStateCreateInfoNV ) ); + return *this; + } + PipelineViewportWScalingStateCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineViewportWScalingStateCreateInfoNV& setViewportWScalingEnable( Bool32 viewportWScalingEnable_ ) + { + viewportWScalingEnable = viewportWScalingEnable_; + return *this; + } + + PipelineViewportWScalingStateCreateInfoNV& setViewportCount( uint32_t viewportCount_ ) + { + viewportCount = viewportCount_; + return *this; + } + + PipelineViewportWScalingStateCreateInfoNV& setPViewportWScalings( const ViewportWScalingNV* pViewportWScalings_ ) + { + pViewportWScalings = pViewportWScalings_; + return *this; + } + + operator VkPipelineViewportWScalingStateCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineViewportWScalingStateCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineViewportWScalingStateCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( viewportWScalingEnable == rhs.viewportWScalingEnable ) + && ( viewportCount == rhs.viewportCount ) + && ( pViewportWScalings == rhs.pViewportWScalings ); + } + + bool operator!=( PipelineViewportWScalingStateCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineViewportWScalingStateCreateInfoNV; + + public: + const void* pNext = nullptr; + Bool32 viewportWScalingEnable; + uint32_t viewportCount; + const ViewportWScalingNV* pViewportWScalings; + }; + static_assert( sizeof( PipelineViewportWScalingStateCreateInfoNV ) == sizeof( VkPipelineViewportWScalingStateCreateInfoNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceDiscardRectanglePropertiesEXT + { + PhysicalDeviceDiscardRectanglePropertiesEXT( uint32_t maxDiscardRectangles_ = 0 ) + : maxDiscardRectangles( maxDiscardRectangles_ ) + { + } + + PhysicalDeviceDiscardRectanglePropertiesEXT( VkPhysicalDeviceDiscardRectanglePropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceDiscardRectanglePropertiesEXT ) ); + } + + PhysicalDeviceDiscardRectanglePropertiesEXT& operator=( VkPhysicalDeviceDiscardRectanglePropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceDiscardRectanglePropertiesEXT ) ); + return *this; + } + PhysicalDeviceDiscardRectanglePropertiesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceDiscardRectanglePropertiesEXT& setMaxDiscardRectangles( uint32_t maxDiscardRectangles_ ) + { + maxDiscardRectangles = maxDiscardRectangles_; + return *this; + } + + operator VkPhysicalDeviceDiscardRectanglePropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceDiscardRectanglePropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceDiscardRectanglePropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxDiscardRectangles == rhs.maxDiscardRectangles ); + } + + bool operator!=( PhysicalDeviceDiscardRectanglePropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceDiscardRectanglePropertiesEXT; + + public: + void* pNext = nullptr; + uint32_t maxDiscardRectangles; + }; + static_assert( sizeof( PhysicalDeviceDiscardRectanglePropertiesEXT ) == sizeof( VkPhysicalDeviceDiscardRectanglePropertiesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX + { + operator VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( perViewPositionAllComponents == rhs.perViewPositionAllComponents ); + } + + bool operator!=( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + + public: + void* pNext = nullptr; + Bool32 perViewPositionAllComponents; + }; + static_assert( sizeof( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX ) == sizeof( VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceSurfaceInfo2KHR + { + PhysicalDeviceSurfaceInfo2KHR( SurfaceKHR surface_ = SurfaceKHR() ) + : surface( surface_ ) + { + } + + PhysicalDeviceSurfaceInfo2KHR( VkPhysicalDeviceSurfaceInfo2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceSurfaceInfo2KHR ) ); + } + + PhysicalDeviceSurfaceInfo2KHR& operator=( VkPhysicalDeviceSurfaceInfo2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceSurfaceInfo2KHR ) ); + return *this; + } + PhysicalDeviceSurfaceInfo2KHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceSurfaceInfo2KHR& setSurface( SurfaceKHR surface_ ) + { + surface = surface_; + return *this; + } + + operator VkPhysicalDeviceSurfaceInfo2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceSurfaceInfo2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceSurfaceInfo2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( surface == rhs.surface ); + } + + bool operator!=( PhysicalDeviceSurfaceInfo2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceSurfaceInfo2KHR; + + public: + const void* pNext = nullptr; + SurfaceKHR surface; + }; + static_assert( sizeof( PhysicalDeviceSurfaceInfo2KHR ) == sizeof( VkPhysicalDeviceSurfaceInfo2KHR ), "struct and wrapper have different size!" ); + + struct DisplayPlaneProperties2KHR + { + operator VkDisplayPlaneProperties2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayPlaneProperties2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayPlaneProperties2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayPlaneProperties == rhs.displayPlaneProperties ); + } + + bool operator!=( DisplayPlaneProperties2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplayPlaneProperties2KHR; + + public: + void* pNext = nullptr; + DisplayPlanePropertiesKHR displayPlaneProperties; + }; + static_assert( sizeof( DisplayPlaneProperties2KHR ) == sizeof( VkDisplayPlaneProperties2KHR ), "struct and wrapper have different size!" ); + + struct DisplayModeProperties2KHR + { + operator VkDisplayModeProperties2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayModeProperties2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayModeProperties2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayModeProperties == rhs.displayModeProperties ); + } + + bool operator!=( DisplayModeProperties2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplayModeProperties2KHR; + + public: + void* pNext = nullptr; + DisplayModePropertiesKHR displayModeProperties; + }; + static_assert( sizeof( DisplayModeProperties2KHR ) == sizeof( VkDisplayModeProperties2KHR ), "struct and wrapper have different size!" ); + + struct DisplayPlaneInfo2KHR + { + DisplayPlaneInfo2KHR( DisplayModeKHR mode_ = DisplayModeKHR(), + uint32_t planeIndex_ = 0 ) + : mode( mode_ ) + , planeIndex( planeIndex_ ) + { + } + + DisplayPlaneInfo2KHR( VkDisplayPlaneInfo2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayPlaneInfo2KHR ) ); + } + + DisplayPlaneInfo2KHR& operator=( VkDisplayPlaneInfo2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayPlaneInfo2KHR ) ); + return *this; + } + DisplayPlaneInfo2KHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DisplayPlaneInfo2KHR& setMode( DisplayModeKHR mode_ ) + { + mode = mode_; + return *this; + } + + DisplayPlaneInfo2KHR& setPlaneIndex( uint32_t planeIndex_ ) + { + planeIndex = planeIndex_; + return *this; + } + + operator VkDisplayPlaneInfo2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayPlaneInfo2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayPlaneInfo2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( mode == rhs.mode ) + && ( planeIndex == rhs.planeIndex ); + } + + bool operator!=( DisplayPlaneInfo2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplayPlaneInfo2KHR; + + public: + const void* pNext = nullptr; + DisplayModeKHR mode; + uint32_t planeIndex; + }; + static_assert( sizeof( DisplayPlaneInfo2KHR ) == sizeof( VkDisplayPlaneInfo2KHR ), "struct and wrapper have different size!" ); + + struct PhysicalDevice16BitStorageFeatures + { + PhysicalDevice16BitStorageFeatures( Bool32 storageBuffer16BitAccess_ = 0, + Bool32 uniformAndStorageBuffer16BitAccess_ = 0, + Bool32 storagePushConstant16_ = 0, + Bool32 storageInputOutput16_ = 0 ) + : storageBuffer16BitAccess( storageBuffer16BitAccess_ ) + , uniformAndStorageBuffer16BitAccess( uniformAndStorageBuffer16BitAccess_ ) + , storagePushConstant16( storagePushConstant16_ ) + , storageInputOutput16( storageInputOutput16_ ) + { + } + + PhysicalDevice16BitStorageFeatures( VkPhysicalDevice16BitStorageFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDevice16BitStorageFeatures ) ); + } + + PhysicalDevice16BitStorageFeatures& operator=( VkPhysicalDevice16BitStorageFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDevice16BitStorageFeatures ) ); + return *this; + } + PhysicalDevice16BitStorageFeatures& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDevice16BitStorageFeatures& setStorageBuffer16BitAccess( Bool32 storageBuffer16BitAccess_ ) + { + storageBuffer16BitAccess = storageBuffer16BitAccess_; + return *this; + } + + PhysicalDevice16BitStorageFeatures& setUniformAndStorageBuffer16BitAccess( Bool32 uniformAndStorageBuffer16BitAccess_ ) + { + uniformAndStorageBuffer16BitAccess = uniformAndStorageBuffer16BitAccess_; + return *this; + } + + PhysicalDevice16BitStorageFeatures& setStoragePushConstant16( Bool32 storagePushConstant16_ ) + { + storagePushConstant16 = storagePushConstant16_; + return *this; + } + + PhysicalDevice16BitStorageFeatures& setStorageInputOutput16( Bool32 storageInputOutput16_ ) + { + storageInputOutput16 = storageInputOutput16_; + return *this; + } + + operator VkPhysicalDevice16BitStorageFeatures const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDevice16BitStorageFeatures &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDevice16BitStorageFeatures const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( storageBuffer16BitAccess == rhs.storageBuffer16BitAccess ) + && ( uniformAndStorageBuffer16BitAccess == rhs.uniformAndStorageBuffer16BitAccess ) + && ( storagePushConstant16 == rhs.storagePushConstant16 ) + && ( storageInputOutput16 == rhs.storageInputOutput16 ); + } + + bool operator!=( PhysicalDevice16BitStorageFeatures const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDevice16BitStorageFeatures; + + public: + void* pNext = nullptr; + Bool32 storageBuffer16BitAccess; + Bool32 uniformAndStorageBuffer16BitAccess; + Bool32 storagePushConstant16; + Bool32 storageInputOutput16; + }; + static_assert( sizeof( PhysicalDevice16BitStorageFeatures ) == sizeof( VkPhysicalDevice16BitStorageFeatures ), "struct and wrapper have different size!" ); + + using PhysicalDevice16BitStorageFeaturesKHR = PhysicalDevice16BitStorageFeatures; + + struct BufferMemoryRequirementsInfo2 + { + BufferMemoryRequirementsInfo2( Buffer buffer_ = Buffer() ) + : buffer( buffer_ ) + { + } + + BufferMemoryRequirementsInfo2( VkBufferMemoryRequirementsInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferMemoryRequirementsInfo2 ) ); + } + + BufferMemoryRequirementsInfo2& operator=( VkBufferMemoryRequirementsInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferMemoryRequirementsInfo2 ) ); + return *this; + } + BufferMemoryRequirementsInfo2& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BufferMemoryRequirementsInfo2& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + operator VkBufferMemoryRequirementsInfo2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkBufferMemoryRequirementsInfo2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( BufferMemoryRequirementsInfo2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( BufferMemoryRequirementsInfo2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBufferMemoryRequirementsInfo2; + + public: + const void* pNext = nullptr; + Buffer buffer; + }; + static_assert( sizeof( BufferMemoryRequirementsInfo2 ) == sizeof( VkBufferMemoryRequirementsInfo2 ), "struct and wrapper have different size!" ); + + using BufferMemoryRequirementsInfo2KHR = BufferMemoryRequirementsInfo2; + + struct ImageMemoryRequirementsInfo2 + { + ImageMemoryRequirementsInfo2( Image image_ = Image() ) + : image( image_ ) + { + } + + ImageMemoryRequirementsInfo2( VkImageMemoryRequirementsInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageMemoryRequirementsInfo2 ) ); + } + + ImageMemoryRequirementsInfo2& operator=( VkImageMemoryRequirementsInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageMemoryRequirementsInfo2 ) ); + return *this; + } + ImageMemoryRequirementsInfo2& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImageMemoryRequirementsInfo2& setImage( Image image_ ) + { + image = image_; + return *this; + } + + operator VkImageMemoryRequirementsInfo2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageMemoryRequirementsInfo2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageMemoryRequirementsInfo2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ); + } + + bool operator!=( ImageMemoryRequirementsInfo2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageMemoryRequirementsInfo2; + + public: + const void* pNext = nullptr; + Image image; + }; + static_assert( sizeof( ImageMemoryRequirementsInfo2 ) == sizeof( VkImageMemoryRequirementsInfo2 ), "struct and wrapper have different size!" ); + + using ImageMemoryRequirementsInfo2KHR = ImageMemoryRequirementsInfo2; + + struct ImageSparseMemoryRequirementsInfo2 + { + ImageSparseMemoryRequirementsInfo2( Image image_ = Image() ) + : image( image_ ) + { + } + + ImageSparseMemoryRequirementsInfo2( VkImageSparseMemoryRequirementsInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSparseMemoryRequirementsInfo2 ) ); + } + + ImageSparseMemoryRequirementsInfo2& operator=( VkImageSparseMemoryRequirementsInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSparseMemoryRequirementsInfo2 ) ); + return *this; + } + ImageSparseMemoryRequirementsInfo2& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImageSparseMemoryRequirementsInfo2& setImage( Image image_ ) + { + image = image_; + return *this; + } + + operator VkImageSparseMemoryRequirementsInfo2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageSparseMemoryRequirementsInfo2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageSparseMemoryRequirementsInfo2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ); + } + + bool operator!=( ImageSparseMemoryRequirementsInfo2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageSparseMemoryRequirementsInfo2; + + public: + const void* pNext = nullptr; + Image image; + }; + static_assert( sizeof( ImageSparseMemoryRequirementsInfo2 ) == sizeof( VkImageSparseMemoryRequirementsInfo2 ), "struct and wrapper have different size!" ); + + using ImageSparseMemoryRequirementsInfo2KHR = ImageSparseMemoryRequirementsInfo2; + + struct MemoryRequirements2 + { + operator VkMemoryRequirements2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryRequirements2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryRequirements2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryRequirements == rhs.memoryRequirements ); + } + + bool operator!=( MemoryRequirements2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryRequirements2; + + public: + void* pNext = nullptr; + MemoryRequirements memoryRequirements; + }; + static_assert( sizeof( MemoryRequirements2 ) == sizeof( VkMemoryRequirements2 ), "struct and wrapper have different size!" ); + + using MemoryRequirements2KHR = MemoryRequirements2; + + struct MemoryDedicatedRequirements + { + operator VkMemoryDedicatedRequirements const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryDedicatedRequirements &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryDedicatedRequirements const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( prefersDedicatedAllocation == rhs.prefersDedicatedAllocation ) + && ( requiresDedicatedAllocation == rhs.requiresDedicatedAllocation ); + } + + bool operator!=( MemoryDedicatedRequirements const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryDedicatedRequirements; + + public: + void* pNext = nullptr; + Bool32 prefersDedicatedAllocation; + Bool32 requiresDedicatedAllocation; + }; + static_assert( sizeof( MemoryDedicatedRequirements ) == sizeof( VkMemoryDedicatedRequirements ), "struct and wrapper have different size!" ); + + using MemoryDedicatedRequirementsKHR = MemoryDedicatedRequirements; + + struct MemoryDedicatedAllocateInfo + { + MemoryDedicatedAllocateInfo( Image image_ = Image(), + Buffer buffer_ = Buffer() ) + : image( image_ ) + , buffer( buffer_ ) + { + } + + MemoryDedicatedAllocateInfo( VkMemoryDedicatedAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryDedicatedAllocateInfo ) ); + } + + MemoryDedicatedAllocateInfo& operator=( VkMemoryDedicatedAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryDedicatedAllocateInfo ) ); + return *this; + } + MemoryDedicatedAllocateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MemoryDedicatedAllocateInfo& setImage( Image image_ ) + { + image = image_; + return *this; + } + + MemoryDedicatedAllocateInfo& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + operator VkMemoryDedicatedAllocateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryDedicatedAllocateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryDedicatedAllocateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( MemoryDedicatedAllocateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryDedicatedAllocateInfo; + + public: + const void* pNext = nullptr; + Image image; + Buffer buffer; + }; + static_assert( sizeof( MemoryDedicatedAllocateInfo ) == sizeof( VkMemoryDedicatedAllocateInfo ), "struct and wrapper have different size!" ); + + using MemoryDedicatedAllocateInfoKHR = MemoryDedicatedAllocateInfo; + + struct SamplerYcbcrConversionInfo + { + SamplerYcbcrConversionInfo( SamplerYcbcrConversion conversion_ = SamplerYcbcrConversion() ) + : conversion( conversion_ ) + { + } + + SamplerYcbcrConversionInfo( VkSamplerYcbcrConversionInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SamplerYcbcrConversionInfo ) ); + } + + SamplerYcbcrConversionInfo& operator=( VkSamplerYcbcrConversionInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SamplerYcbcrConversionInfo ) ); + return *this; + } + SamplerYcbcrConversionInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SamplerYcbcrConversionInfo& setConversion( SamplerYcbcrConversion conversion_ ) + { + conversion = conversion_; + return *this; + } + + operator VkSamplerYcbcrConversionInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkSamplerYcbcrConversionInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( SamplerYcbcrConversionInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( conversion == rhs.conversion ); + } + + bool operator!=( SamplerYcbcrConversionInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSamplerYcbcrConversionInfo; + + public: + const void* pNext = nullptr; + SamplerYcbcrConversion conversion; + }; + static_assert( sizeof( SamplerYcbcrConversionInfo ) == sizeof( VkSamplerYcbcrConversionInfo ), "struct and wrapper have different size!" ); + + using SamplerYcbcrConversionInfoKHR = SamplerYcbcrConversionInfo; + + struct PhysicalDeviceSamplerYcbcrConversionFeatures + { + PhysicalDeviceSamplerYcbcrConversionFeatures( Bool32 samplerYcbcrConversion_ = 0 ) + : samplerYcbcrConversion( samplerYcbcrConversion_ ) + { + } + + PhysicalDeviceSamplerYcbcrConversionFeatures( VkPhysicalDeviceSamplerYcbcrConversionFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceSamplerYcbcrConversionFeatures ) ); + } + + PhysicalDeviceSamplerYcbcrConversionFeatures& operator=( VkPhysicalDeviceSamplerYcbcrConversionFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceSamplerYcbcrConversionFeatures ) ); + return *this; + } + PhysicalDeviceSamplerYcbcrConversionFeatures& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceSamplerYcbcrConversionFeatures& setSamplerYcbcrConversion( Bool32 samplerYcbcrConversion_ ) + { + samplerYcbcrConversion = samplerYcbcrConversion_; + return *this; + } + + operator VkPhysicalDeviceSamplerYcbcrConversionFeatures const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceSamplerYcbcrConversionFeatures &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceSamplerYcbcrConversionFeatures const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( samplerYcbcrConversion == rhs.samplerYcbcrConversion ); + } + + bool operator!=( PhysicalDeviceSamplerYcbcrConversionFeatures const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceSamplerYcbcrConversionFeatures; + + public: + void* pNext = nullptr; + Bool32 samplerYcbcrConversion; + }; + static_assert( sizeof( PhysicalDeviceSamplerYcbcrConversionFeatures ) == sizeof( VkPhysicalDeviceSamplerYcbcrConversionFeatures ), "struct and wrapper have different size!" ); + + using PhysicalDeviceSamplerYcbcrConversionFeaturesKHR = PhysicalDeviceSamplerYcbcrConversionFeatures; + + struct SamplerYcbcrConversionImageFormatProperties + { + operator VkSamplerYcbcrConversionImageFormatProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkSamplerYcbcrConversionImageFormatProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( SamplerYcbcrConversionImageFormatProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( combinedImageSamplerDescriptorCount == rhs.combinedImageSamplerDescriptorCount ); + } + + bool operator!=( SamplerYcbcrConversionImageFormatProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSamplerYcbcrConversionImageFormatProperties; + + public: + void* pNext = nullptr; + uint32_t combinedImageSamplerDescriptorCount; + }; + static_assert( sizeof( SamplerYcbcrConversionImageFormatProperties ) == sizeof( VkSamplerYcbcrConversionImageFormatProperties ), "struct and wrapper have different size!" ); + + using SamplerYcbcrConversionImageFormatPropertiesKHR = SamplerYcbcrConversionImageFormatProperties; + + struct TextureLODGatherFormatPropertiesAMD + { + operator VkTextureLODGatherFormatPropertiesAMD const&() const + { + return *reinterpret_cast(this); + } + + operator VkTextureLODGatherFormatPropertiesAMD &() + { + return *reinterpret_cast(this); + } + + bool operator==( TextureLODGatherFormatPropertiesAMD const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( supportsTextureGatherLODBiasAMD == rhs.supportsTextureGatherLODBiasAMD ); + } + + bool operator!=( TextureLODGatherFormatPropertiesAMD const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eTextureLodGatherFormatPropertiesAMD; + + public: + void* pNext = nullptr; + Bool32 supportsTextureGatherLODBiasAMD; + }; + static_assert( sizeof( TextureLODGatherFormatPropertiesAMD ) == sizeof( VkTextureLODGatherFormatPropertiesAMD ), "struct and wrapper have different size!" ); + + struct ProtectedSubmitInfo + { + ProtectedSubmitInfo( Bool32 protectedSubmit_ = 0 ) + : protectedSubmit( protectedSubmit_ ) + { + } + + ProtectedSubmitInfo( VkProtectedSubmitInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ProtectedSubmitInfo ) ); + } + + ProtectedSubmitInfo& operator=( VkProtectedSubmitInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ProtectedSubmitInfo ) ); + return *this; + } + ProtectedSubmitInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ProtectedSubmitInfo& setProtectedSubmit( Bool32 protectedSubmit_ ) + { + protectedSubmit = protectedSubmit_; + return *this; + } + + operator VkProtectedSubmitInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkProtectedSubmitInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ProtectedSubmitInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( protectedSubmit == rhs.protectedSubmit ); + } + + bool operator!=( ProtectedSubmitInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eProtectedSubmitInfo; + + public: + const void* pNext = nullptr; + Bool32 protectedSubmit; + }; + static_assert( sizeof( ProtectedSubmitInfo ) == sizeof( VkProtectedSubmitInfo ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceProtectedMemoryFeatures + { + PhysicalDeviceProtectedMemoryFeatures( Bool32 protectedMemory_ = 0 ) + : protectedMemory( protectedMemory_ ) + { + } + + PhysicalDeviceProtectedMemoryFeatures( VkPhysicalDeviceProtectedMemoryFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceProtectedMemoryFeatures ) ); + } + + PhysicalDeviceProtectedMemoryFeatures& operator=( VkPhysicalDeviceProtectedMemoryFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceProtectedMemoryFeatures ) ); + return *this; + } + PhysicalDeviceProtectedMemoryFeatures& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceProtectedMemoryFeatures& setProtectedMemory( Bool32 protectedMemory_ ) + { + protectedMemory = protectedMemory_; + return *this; + } + + operator VkPhysicalDeviceProtectedMemoryFeatures const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceProtectedMemoryFeatures &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceProtectedMemoryFeatures const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( protectedMemory == rhs.protectedMemory ); + } + + bool operator!=( PhysicalDeviceProtectedMemoryFeatures const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceProtectedMemoryFeatures; + + public: + void* pNext = nullptr; + Bool32 protectedMemory; + }; + static_assert( sizeof( PhysicalDeviceProtectedMemoryFeatures ) == sizeof( VkPhysicalDeviceProtectedMemoryFeatures ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceProtectedMemoryProperties + { + PhysicalDeviceProtectedMemoryProperties( Bool32 protectedNoFault_ = 0 ) + : protectedNoFault( protectedNoFault_ ) + { + } + + PhysicalDeviceProtectedMemoryProperties( VkPhysicalDeviceProtectedMemoryProperties const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceProtectedMemoryProperties ) ); + } + + PhysicalDeviceProtectedMemoryProperties& operator=( VkPhysicalDeviceProtectedMemoryProperties const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceProtectedMemoryProperties ) ); + return *this; + } + PhysicalDeviceProtectedMemoryProperties& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceProtectedMemoryProperties& setProtectedNoFault( Bool32 protectedNoFault_ ) + { + protectedNoFault = protectedNoFault_; + return *this; + } + + operator VkPhysicalDeviceProtectedMemoryProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceProtectedMemoryProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceProtectedMemoryProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( protectedNoFault == rhs.protectedNoFault ); + } + + bool operator!=( PhysicalDeviceProtectedMemoryProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceProtectedMemoryProperties; + + public: + void* pNext = nullptr; + Bool32 protectedNoFault; + }; + static_assert( sizeof( PhysicalDeviceProtectedMemoryProperties ) == sizeof( VkPhysicalDeviceProtectedMemoryProperties ), "struct and wrapper have different size!" ); + + struct PipelineCoverageToColorStateCreateInfoNV + { + PipelineCoverageToColorStateCreateInfoNV( PipelineCoverageToColorStateCreateFlagsNV flags_ = PipelineCoverageToColorStateCreateFlagsNV(), + Bool32 coverageToColorEnable_ = 0, + uint32_t coverageToColorLocation_ = 0 ) + : flags( flags_ ) + , coverageToColorEnable( coverageToColorEnable_ ) + , coverageToColorLocation( coverageToColorLocation_ ) + { + } + + PipelineCoverageToColorStateCreateInfoNV( VkPipelineCoverageToColorStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineCoverageToColorStateCreateInfoNV ) ); + } + + PipelineCoverageToColorStateCreateInfoNV& operator=( VkPipelineCoverageToColorStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineCoverageToColorStateCreateInfoNV ) ); + return *this; + } + PipelineCoverageToColorStateCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineCoverageToColorStateCreateInfoNV& setFlags( PipelineCoverageToColorStateCreateFlagsNV flags_ ) + { + flags = flags_; + return *this; + } + + PipelineCoverageToColorStateCreateInfoNV& setCoverageToColorEnable( Bool32 coverageToColorEnable_ ) + { + coverageToColorEnable = coverageToColorEnable_; + return *this; + } + + PipelineCoverageToColorStateCreateInfoNV& setCoverageToColorLocation( uint32_t coverageToColorLocation_ ) + { + coverageToColorLocation = coverageToColorLocation_; + return *this; + } + + operator VkPipelineCoverageToColorStateCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineCoverageToColorStateCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineCoverageToColorStateCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( coverageToColorEnable == rhs.coverageToColorEnable ) + && ( coverageToColorLocation == rhs.coverageToColorLocation ); + } + + bool operator!=( PipelineCoverageToColorStateCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineCoverageToColorStateCreateInfoNV; + + public: + const void* pNext = nullptr; + PipelineCoverageToColorStateCreateFlagsNV flags; + Bool32 coverageToColorEnable; + uint32_t coverageToColorLocation; + }; + static_assert( sizeof( PipelineCoverageToColorStateCreateInfoNV ) == sizeof( VkPipelineCoverageToColorStateCreateInfoNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceSamplerFilterMinmaxPropertiesEXT + { + operator VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceSamplerFilterMinmaxPropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( filterMinmaxSingleComponentFormats == rhs.filterMinmaxSingleComponentFormats ) + && ( filterMinmaxImageComponentMapping == rhs.filterMinmaxImageComponentMapping ); + } + + bool operator!=( PhysicalDeviceSamplerFilterMinmaxPropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + + public: + void* pNext = nullptr; + Bool32 filterMinmaxSingleComponentFormats; + Bool32 filterMinmaxImageComponentMapping; + }; + static_assert( sizeof( PhysicalDeviceSamplerFilterMinmaxPropertiesEXT ) == sizeof( VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT ), "struct and wrapper have different size!" ); + + struct MultisamplePropertiesEXT + { + operator VkMultisamplePropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkMultisamplePropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( MultisamplePropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxSampleLocationGridSize == rhs.maxSampleLocationGridSize ); + } + + bool operator!=( MultisamplePropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMultisamplePropertiesEXT; + + public: + void* pNext = nullptr; + Extent2D maxSampleLocationGridSize; + }; + static_assert( sizeof( MultisamplePropertiesEXT ) == sizeof( VkMultisamplePropertiesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceBlendOperationAdvancedFeaturesEXT + { + PhysicalDeviceBlendOperationAdvancedFeaturesEXT( Bool32 advancedBlendCoherentOperations_ = 0 ) + : advancedBlendCoherentOperations( advancedBlendCoherentOperations_ ) + { + } + + PhysicalDeviceBlendOperationAdvancedFeaturesEXT( VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceBlendOperationAdvancedFeaturesEXT ) ); + } + + PhysicalDeviceBlendOperationAdvancedFeaturesEXT& operator=( VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceBlendOperationAdvancedFeaturesEXT ) ); + return *this; + } + PhysicalDeviceBlendOperationAdvancedFeaturesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceBlendOperationAdvancedFeaturesEXT& setAdvancedBlendCoherentOperations( Bool32 advancedBlendCoherentOperations_ ) + { + advancedBlendCoherentOperations = advancedBlendCoherentOperations_; + return *this; + } + + operator VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceBlendOperationAdvancedFeaturesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( advancedBlendCoherentOperations == rhs.advancedBlendCoherentOperations ); + } + + bool operator!=( PhysicalDeviceBlendOperationAdvancedFeaturesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceBlendOperationAdvancedFeaturesEXT; + + public: + void* pNext = nullptr; + Bool32 advancedBlendCoherentOperations; + }; + static_assert( sizeof( PhysicalDeviceBlendOperationAdvancedFeaturesEXT ) == sizeof( VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceBlendOperationAdvancedPropertiesEXT + { + operator VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceBlendOperationAdvancedPropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( advancedBlendMaxColorAttachments == rhs.advancedBlendMaxColorAttachments ) + && ( advancedBlendIndependentBlend == rhs.advancedBlendIndependentBlend ) + && ( advancedBlendNonPremultipliedSrcColor == rhs.advancedBlendNonPremultipliedSrcColor ) + && ( advancedBlendNonPremultipliedDstColor == rhs.advancedBlendNonPremultipliedDstColor ) + && ( advancedBlendCorrelatedOverlap == rhs.advancedBlendCorrelatedOverlap ) + && ( advancedBlendAllOperations == rhs.advancedBlendAllOperations ); + } + + bool operator!=( PhysicalDeviceBlendOperationAdvancedPropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceBlendOperationAdvancedPropertiesEXT; + + public: + void* pNext = nullptr; + uint32_t advancedBlendMaxColorAttachments; + Bool32 advancedBlendIndependentBlend; + Bool32 advancedBlendNonPremultipliedSrcColor; + Bool32 advancedBlendNonPremultipliedDstColor; + Bool32 advancedBlendCorrelatedOverlap; + Bool32 advancedBlendAllOperations; + }; + static_assert( sizeof( PhysicalDeviceBlendOperationAdvancedPropertiesEXT ) == sizeof( VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceInlineUniformBlockFeaturesEXT + { + operator VkPhysicalDeviceInlineUniformBlockFeaturesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceInlineUniformBlockFeaturesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceInlineUniformBlockFeaturesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( inlineUniformBlock == rhs.inlineUniformBlock ) + && ( descriptorBindingInlineUniformBlockUpdateAfterBind == rhs.descriptorBindingInlineUniformBlockUpdateAfterBind ); + } + + bool operator!=( PhysicalDeviceInlineUniformBlockFeaturesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceInlineUniformBlockFeaturesEXT; + + public: + void* pNext = nullptr; + Bool32 inlineUniformBlock; + Bool32 descriptorBindingInlineUniformBlockUpdateAfterBind; + }; + static_assert( sizeof( PhysicalDeviceInlineUniformBlockFeaturesEXT ) == sizeof( VkPhysicalDeviceInlineUniformBlockFeaturesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceInlineUniformBlockPropertiesEXT + { + operator VkPhysicalDeviceInlineUniformBlockPropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceInlineUniformBlockPropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceInlineUniformBlockPropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxInlineUniformBlockSize == rhs.maxInlineUniformBlockSize ) + && ( maxPerStageDescriptorInlineUniformBlocks == rhs.maxPerStageDescriptorInlineUniformBlocks ) + && ( maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks == rhs.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks ) + && ( maxDescriptorSetInlineUniformBlocks == rhs.maxDescriptorSetInlineUniformBlocks ) + && ( maxDescriptorSetUpdateAfterBindInlineUniformBlocks == rhs.maxDescriptorSetUpdateAfterBindInlineUniformBlocks ); + } + + bool operator!=( PhysicalDeviceInlineUniformBlockPropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceInlineUniformBlockPropertiesEXT; + + public: + void* pNext = nullptr; + uint32_t maxInlineUniformBlockSize; + uint32_t maxPerStageDescriptorInlineUniformBlocks; + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; + uint32_t maxDescriptorSetInlineUniformBlocks; + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; + }; + static_assert( sizeof( PhysicalDeviceInlineUniformBlockPropertiesEXT ) == sizeof( VkPhysicalDeviceInlineUniformBlockPropertiesEXT ), "struct and wrapper have different size!" ); + + struct WriteDescriptorSetInlineUniformBlockEXT + { + WriteDescriptorSetInlineUniformBlockEXT( uint32_t dataSize_ = 0, + const void* pData_ = nullptr ) + : dataSize( dataSize_ ) + , pData( pData_ ) + { + } + + WriteDescriptorSetInlineUniformBlockEXT( VkWriteDescriptorSetInlineUniformBlockEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( WriteDescriptorSetInlineUniformBlockEXT ) ); + } + + WriteDescriptorSetInlineUniformBlockEXT& operator=( VkWriteDescriptorSetInlineUniformBlockEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( WriteDescriptorSetInlineUniformBlockEXT ) ); + return *this; + } + WriteDescriptorSetInlineUniformBlockEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + WriteDescriptorSetInlineUniformBlockEXT& setDataSize( uint32_t dataSize_ ) + { + dataSize = dataSize_; + return *this; + } + + WriteDescriptorSetInlineUniformBlockEXT& setPData( const void* pData_ ) + { + pData = pData_; + return *this; + } + + operator VkWriteDescriptorSetInlineUniformBlockEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkWriteDescriptorSetInlineUniformBlockEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( WriteDescriptorSetInlineUniformBlockEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( dataSize == rhs.dataSize ) + && ( pData == rhs.pData ); + } + + bool operator!=( WriteDescriptorSetInlineUniformBlockEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eWriteDescriptorSetInlineUniformBlockEXT; + + public: + const void* pNext = nullptr; + uint32_t dataSize; + const void* pData; + }; + static_assert( sizeof( WriteDescriptorSetInlineUniformBlockEXT ) == sizeof( VkWriteDescriptorSetInlineUniformBlockEXT ), "struct and wrapper have different size!" ); + + struct DescriptorPoolInlineUniformBlockCreateInfoEXT + { + DescriptorPoolInlineUniformBlockCreateInfoEXT( uint32_t maxInlineUniformBlockBindings_ = 0 ) + : maxInlineUniformBlockBindings( maxInlineUniformBlockBindings_ ) + { + } + + DescriptorPoolInlineUniformBlockCreateInfoEXT( VkDescriptorPoolInlineUniformBlockCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorPoolInlineUniformBlockCreateInfoEXT ) ); + } + + DescriptorPoolInlineUniformBlockCreateInfoEXT& operator=( VkDescriptorPoolInlineUniformBlockCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorPoolInlineUniformBlockCreateInfoEXT ) ); + return *this; + } + DescriptorPoolInlineUniformBlockCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DescriptorPoolInlineUniformBlockCreateInfoEXT& setMaxInlineUniformBlockBindings( uint32_t maxInlineUniformBlockBindings_ ) + { + maxInlineUniformBlockBindings = maxInlineUniformBlockBindings_; + return *this; + } + + operator VkDescriptorPoolInlineUniformBlockCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorPoolInlineUniformBlockCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorPoolInlineUniformBlockCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxInlineUniformBlockBindings == rhs.maxInlineUniformBlockBindings ); + } + + bool operator!=( DescriptorPoolInlineUniformBlockCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorPoolInlineUniformBlockCreateInfoEXT; + + public: + const void* pNext = nullptr; + uint32_t maxInlineUniformBlockBindings; + }; + static_assert( sizeof( DescriptorPoolInlineUniformBlockCreateInfoEXT ) == sizeof( VkDescriptorPoolInlineUniformBlockCreateInfoEXT ), "struct and wrapper have different size!" ); + + struct ImageFormatListCreateInfoKHR + { + ImageFormatListCreateInfoKHR( uint32_t viewFormatCount_ = 0, + const Format* pViewFormats_ = nullptr ) + : viewFormatCount( viewFormatCount_ ) + , pViewFormats( pViewFormats_ ) + { + } + + ImageFormatListCreateInfoKHR( VkImageFormatListCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageFormatListCreateInfoKHR ) ); + } + + ImageFormatListCreateInfoKHR& operator=( VkImageFormatListCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageFormatListCreateInfoKHR ) ); + return *this; + } + ImageFormatListCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImageFormatListCreateInfoKHR& setViewFormatCount( uint32_t viewFormatCount_ ) + { + viewFormatCount = viewFormatCount_; + return *this; + } + + ImageFormatListCreateInfoKHR& setPViewFormats( const Format* pViewFormats_ ) + { + pViewFormats = pViewFormats_; + return *this; + } + + operator VkImageFormatListCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageFormatListCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageFormatListCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( viewFormatCount == rhs.viewFormatCount ) + && ( pViewFormats == rhs.pViewFormats ); + } + + bool operator!=( ImageFormatListCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageFormatListCreateInfoKHR; + + public: + const void* pNext = nullptr; + uint32_t viewFormatCount; + const Format* pViewFormats; + }; + static_assert( sizeof( ImageFormatListCreateInfoKHR ) == sizeof( VkImageFormatListCreateInfoKHR ), "struct and wrapper have different size!" ); + + struct ValidationCacheCreateInfoEXT + { + ValidationCacheCreateInfoEXT( ValidationCacheCreateFlagsEXT flags_ = ValidationCacheCreateFlagsEXT(), + size_t initialDataSize_ = 0, + const void* pInitialData_ = nullptr ) + : flags( flags_ ) + , initialDataSize( initialDataSize_ ) + , pInitialData( pInitialData_ ) + { + } + + ValidationCacheCreateInfoEXT( VkValidationCacheCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ValidationCacheCreateInfoEXT ) ); + } + + ValidationCacheCreateInfoEXT& operator=( VkValidationCacheCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ValidationCacheCreateInfoEXT ) ); + return *this; + } + ValidationCacheCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ValidationCacheCreateInfoEXT& setFlags( ValidationCacheCreateFlagsEXT flags_ ) + { + flags = flags_; + return *this; + } + + ValidationCacheCreateInfoEXT& setInitialDataSize( size_t initialDataSize_ ) + { + initialDataSize = initialDataSize_; + return *this; + } + + ValidationCacheCreateInfoEXT& setPInitialData( const void* pInitialData_ ) + { + pInitialData = pInitialData_; + return *this; + } + + operator VkValidationCacheCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkValidationCacheCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( ValidationCacheCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( initialDataSize == rhs.initialDataSize ) + && ( pInitialData == rhs.pInitialData ); + } + + bool operator!=( ValidationCacheCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eValidationCacheCreateInfoEXT; + + public: + const void* pNext = nullptr; + ValidationCacheCreateFlagsEXT flags; + size_t initialDataSize; + const void* pInitialData; + }; + static_assert( sizeof( ValidationCacheCreateInfoEXT ) == sizeof( VkValidationCacheCreateInfoEXT ), "struct and wrapper have different size!" ); + + struct ShaderModuleValidationCacheCreateInfoEXT + { + ShaderModuleValidationCacheCreateInfoEXT( ValidationCacheEXT validationCache_ = ValidationCacheEXT() ) + : validationCache( validationCache_ ) + { + } + + ShaderModuleValidationCacheCreateInfoEXT( VkShaderModuleValidationCacheCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ShaderModuleValidationCacheCreateInfoEXT ) ); + } + + ShaderModuleValidationCacheCreateInfoEXT& operator=( VkShaderModuleValidationCacheCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ShaderModuleValidationCacheCreateInfoEXT ) ); + return *this; + } + ShaderModuleValidationCacheCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ShaderModuleValidationCacheCreateInfoEXT& setValidationCache( ValidationCacheEXT validationCache_ ) + { + validationCache = validationCache_; + return *this; + } + + operator VkShaderModuleValidationCacheCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkShaderModuleValidationCacheCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( ShaderModuleValidationCacheCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( validationCache == rhs.validationCache ); + } + + bool operator!=( ShaderModuleValidationCacheCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eShaderModuleValidationCacheCreateInfoEXT; + + public: + const void* pNext = nullptr; + ValidationCacheEXT validationCache; + }; + static_assert( sizeof( ShaderModuleValidationCacheCreateInfoEXT ) == sizeof( VkShaderModuleValidationCacheCreateInfoEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceMaintenance3Properties + { + operator VkPhysicalDeviceMaintenance3Properties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceMaintenance3Properties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceMaintenance3Properties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxPerSetDescriptors == rhs.maxPerSetDescriptors ) + && ( maxMemoryAllocationSize == rhs.maxMemoryAllocationSize ); + } + + bool operator!=( PhysicalDeviceMaintenance3Properties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceMaintenance3Properties; + + public: + void* pNext = nullptr; + uint32_t maxPerSetDescriptors; + DeviceSize maxMemoryAllocationSize; + }; + static_assert( sizeof( PhysicalDeviceMaintenance3Properties ) == sizeof( VkPhysicalDeviceMaintenance3Properties ), "struct and wrapper have different size!" ); + + using PhysicalDeviceMaintenance3PropertiesKHR = PhysicalDeviceMaintenance3Properties; + + struct DescriptorSetLayoutSupport + { + operator VkDescriptorSetLayoutSupport const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorSetLayoutSupport &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorSetLayoutSupport const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( supported == rhs.supported ); + } + + bool operator!=( DescriptorSetLayoutSupport const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorSetLayoutSupport; + + public: + void* pNext = nullptr; + Bool32 supported; + }; + static_assert( sizeof( DescriptorSetLayoutSupport ) == sizeof( VkDescriptorSetLayoutSupport ), "struct and wrapper have different size!" ); + + using DescriptorSetLayoutSupportKHR = DescriptorSetLayoutSupport; + + struct PhysicalDeviceShaderDrawParameterFeatures + { + PhysicalDeviceShaderDrawParameterFeatures( Bool32 shaderDrawParameters_ = 0 ) + : shaderDrawParameters( shaderDrawParameters_ ) + { + } + + PhysicalDeviceShaderDrawParameterFeatures( VkPhysicalDeviceShaderDrawParameterFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceShaderDrawParameterFeatures ) ); + } + + PhysicalDeviceShaderDrawParameterFeatures& operator=( VkPhysicalDeviceShaderDrawParameterFeatures const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceShaderDrawParameterFeatures ) ); + return *this; + } + PhysicalDeviceShaderDrawParameterFeatures& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderDrawParameterFeatures& setShaderDrawParameters( Bool32 shaderDrawParameters_ ) + { + shaderDrawParameters = shaderDrawParameters_; + return *this; + } + + operator VkPhysicalDeviceShaderDrawParameterFeatures const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceShaderDrawParameterFeatures &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceShaderDrawParameterFeatures const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderDrawParameters == rhs.shaderDrawParameters ); + } + + bool operator!=( PhysicalDeviceShaderDrawParameterFeatures const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceShaderDrawParameterFeatures; + + public: + void* pNext = nullptr; + Bool32 shaderDrawParameters; + }; + static_assert( sizeof( PhysicalDeviceShaderDrawParameterFeatures ) == sizeof( VkPhysicalDeviceShaderDrawParameterFeatures ), "struct and wrapper have different size!" ); + + struct DebugUtilsLabelEXT + { + DebugUtilsLabelEXT( const char* pLabelName_ = nullptr, + std::array const& color_ = { { 0, 0, 0, 0 } } ) + : pLabelName( pLabelName_ ) + { + memcpy( &color, color_.data(), 4 * sizeof( float ) ); + } + + DebugUtilsLabelEXT( VkDebugUtilsLabelEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsLabelEXT ) ); + } + + DebugUtilsLabelEXT& operator=( VkDebugUtilsLabelEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsLabelEXT ) ); + return *this; + } + DebugUtilsLabelEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DebugUtilsLabelEXT& setPLabelName( const char* pLabelName_ ) + { + pLabelName = pLabelName_; + return *this; + } + + DebugUtilsLabelEXT& setColor( std::array color_ ) + { + memcpy( &color, color_.data(), 4 * sizeof( float ) ); + return *this; + } + + operator VkDebugUtilsLabelEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDebugUtilsLabelEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DebugUtilsLabelEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pLabelName == rhs.pLabelName ) + && ( memcmp( color, rhs.color, 4 * sizeof( float ) ) == 0 ); + } + + bool operator!=( DebugUtilsLabelEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDebugUtilsLabelEXT; + + public: + const void* pNext = nullptr; + const char* pLabelName; + float color[4]; + }; + static_assert( sizeof( DebugUtilsLabelEXT ) == sizeof( VkDebugUtilsLabelEXT ), "struct and wrapper have different size!" ); + + struct MemoryHostPointerPropertiesEXT + { + MemoryHostPointerPropertiesEXT( uint32_t memoryTypeBits_ = 0 ) + : memoryTypeBits( memoryTypeBits_ ) + { + } + + MemoryHostPointerPropertiesEXT( VkMemoryHostPointerPropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryHostPointerPropertiesEXT ) ); + } + + MemoryHostPointerPropertiesEXT& operator=( VkMemoryHostPointerPropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryHostPointerPropertiesEXT ) ); + return *this; + } + MemoryHostPointerPropertiesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MemoryHostPointerPropertiesEXT& setMemoryTypeBits( uint32_t memoryTypeBits_ ) + { + memoryTypeBits = memoryTypeBits_; + return *this; + } + + operator VkMemoryHostPointerPropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryHostPointerPropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryHostPointerPropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( MemoryHostPointerPropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryHostPointerPropertiesEXT; + + public: + void* pNext = nullptr; + uint32_t memoryTypeBits; + }; + static_assert( sizeof( MemoryHostPointerPropertiesEXT ) == sizeof( VkMemoryHostPointerPropertiesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceExternalMemoryHostPropertiesEXT + { + PhysicalDeviceExternalMemoryHostPropertiesEXT( DeviceSize minImportedHostPointerAlignment_ = 0 ) + : minImportedHostPointerAlignment( minImportedHostPointerAlignment_ ) + { + } + + PhysicalDeviceExternalMemoryHostPropertiesEXT( VkPhysicalDeviceExternalMemoryHostPropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalMemoryHostPropertiesEXT ) ); + } + + PhysicalDeviceExternalMemoryHostPropertiesEXT& operator=( VkPhysicalDeviceExternalMemoryHostPropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalMemoryHostPropertiesEXT ) ); + return *this; + } + PhysicalDeviceExternalMemoryHostPropertiesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExternalMemoryHostPropertiesEXT& setMinImportedHostPointerAlignment( DeviceSize minImportedHostPointerAlignment_ ) + { + minImportedHostPointerAlignment = minImportedHostPointerAlignment_; + return *this; + } + + operator VkPhysicalDeviceExternalMemoryHostPropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceExternalMemoryHostPropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceExternalMemoryHostPropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( minImportedHostPointerAlignment == rhs.minImportedHostPointerAlignment ); + } + + bool operator!=( PhysicalDeviceExternalMemoryHostPropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceExternalMemoryHostPropertiesEXT; + + public: + void* pNext = nullptr; + DeviceSize minImportedHostPointerAlignment; + }; + static_assert( sizeof( PhysicalDeviceExternalMemoryHostPropertiesEXT ) == sizeof( VkPhysicalDeviceExternalMemoryHostPropertiesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceConservativeRasterizationPropertiesEXT + { + PhysicalDeviceConservativeRasterizationPropertiesEXT( float primitiveOverestimationSize_ = 0, + float maxExtraPrimitiveOverestimationSize_ = 0, + float extraPrimitiveOverestimationSizeGranularity_ = 0, + Bool32 primitiveUnderestimation_ = 0, + Bool32 conservativePointAndLineRasterization_ = 0, + Bool32 degenerateTrianglesRasterized_ = 0, + Bool32 degenerateLinesRasterized_ = 0, + Bool32 fullyCoveredFragmentShaderInputVariable_ = 0, + Bool32 conservativeRasterizationPostDepthCoverage_ = 0 ) + : primitiveOverestimationSize( primitiveOverestimationSize_ ) + , maxExtraPrimitiveOverestimationSize( maxExtraPrimitiveOverestimationSize_ ) + , extraPrimitiveOverestimationSizeGranularity( extraPrimitiveOverestimationSizeGranularity_ ) + , primitiveUnderestimation( primitiveUnderestimation_ ) + , conservativePointAndLineRasterization( conservativePointAndLineRasterization_ ) + , degenerateTrianglesRasterized( degenerateTrianglesRasterized_ ) + , degenerateLinesRasterized( degenerateLinesRasterized_ ) + , fullyCoveredFragmentShaderInputVariable( fullyCoveredFragmentShaderInputVariable_ ) + , conservativeRasterizationPostDepthCoverage( conservativeRasterizationPostDepthCoverage_ ) + { + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT( VkPhysicalDeviceConservativeRasterizationPropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceConservativeRasterizationPropertiesEXT ) ); + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& operator=( VkPhysicalDeviceConservativeRasterizationPropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceConservativeRasterizationPropertiesEXT ) ); + return *this; + } + PhysicalDeviceConservativeRasterizationPropertiesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& setPrimitiveOverestimationSize( float primitiveOverestimationSize_ ) + { + primitiveOverestimationSize = primitiveOverestimationSize_; + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& setMaxExtraPrimitiveOverestimationSize( float maxExtraPrimitiveOverestimationSize_ ) + { + maxExtraPrimitiveOverestimationSize = maxExtraPrimitiveOverestimationSize_; + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& setExtraPrimitiveOverestimationSizeGranularity( float extraPrimitiveOverestimationSizeGranularity_ ) + { + extraPrimitiveOverestimationSizeGranularity = extraPrimitiveOverestimationSizeGranularity_; + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& setPrimitiveUnderestimation( Bool32 primitiveUnderestimation_ ) + { + primitiveUnderestimation = primitiveUnderestimation_; + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& setConservativePointAndLineRasterization( Bool32 conservativePointAndLineRasterization_ ) + { + conservativePointAndLineRasterization = conservativePointAndLineRasterization_; + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& setDegenerateTrianglesRasterized( Bool32 degenerateTrianglesRasterized_ ) + { + degenerateTrianglesRasterized = degenerateTrianglesRasterized_; + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& setDegenerateLinesRasterized( Bool32 degenerateLinesRasterized_ ) + { + degenerateLinesRasterized = degenerateLinesRasterized_; + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& setFullyCoveredFragmentShaderInputVariable( Bool32 fullyCoveredFragmentShaderInputVariable_ ) + { + fullyCoveredFragmentShaderInputVariable = fullyCoveredFragmentShaderInputVariable_; + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT& setConservativeRasterizationPostDepthCoverage( Bool32 conservativeRasterizationPostDepthCoverage_ ) + { + conservativeRasterizationPostDepthCoverage = conservativeRasterizationPostDepthCoverage_; + return *this; + } + + operator VkPhysicalDeviceConservativeRasterizationPropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceConservativeRasterizationPropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceConservativeRasterizationPropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( primitiveOverestimationSize == rhs.primitiveOverestimationSize ) + && ( maxExtraPrimitiveOverestimationSize == rhs.maxExtraPrimitiveOverestimationSize ) + && ( extraPrimitiveOverestimationSizeGranularity == rhs.extraPrimitiveOverestimationSizeGranularity ) + && ( primitiveUnderestimation == rhs.primitiveUnderestimation ) + && ( conservativePointAndLineRasterization == rhs.conservativePointAndLineRasterization ) + && ( degenerateTrianglesRasterized == rhs.degenerateTrianglesRasterized ) + && ( degenerateLinesRasterized == rhs.degenerateLinesRasterized ) + && ( fullyCoveredFragmentShaderInputVariable == rhs.fullyCoveredFragmentShaderInputVariable ) + && ( conservativeRasterizationPostDepthCoverage == rhs.conservativeRasterizationPostDepthCoverage ); + } + + bool operator!=( PhysicalDeviceConservativeRasterizationPropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceConservativeRasterizationPropertiesEXT; + + public: + void* pNext = nullptr; + float primitiveOverestimationSize; + float maxExtraPrimitiveOverestimationSize; + float extraPrimitiveOverestimationSizeGranularity; + Bool32 primitiveUnderestimation; + Bool32 conservativePointAndLineRasterization; + Bool32 degenerateTrianglesRasterized; + Bool32 degenerateLinesRasterized; + Bool32 fullyCoveredFragmentShaderInputVariable; + Bool32 conservativeRasterizationPostDepthCoverage; + }; + static_assert( sizeof( PhysicalDeviceConservativeRasterizationPropertiesEXT ) == sizeof( VkPhysicalDeviceConservativeRasterizationPropertiesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceShaderCorePropertiesAMD + { + operator VkPhysicalDeviceShaderCorePropertiesAMD const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceShaderCorePropertiesAMD &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceShaderCorePropertiesAMD const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderEngineCount == rhs.shaderEngineCount ) + && ( shaderArraysPerEngineCount == rhs.shaderArraysPerEngineCount ) + && ( computeUnitsPerShaderArray == rhs.computeUnitsPerShaderArray ) + && ( simdPerComputeUnit == rhs.simdPerComputeUnit ) + && ( wavefrontsPerSimd == rhs.wavefrontsPerSimd ) + && ( wavefrontSize == rhs.wavefrontSize ) + && ( sgprsPerSimd == rhs.sgprsPerSimd ) + && ( minSgprAllocation == rhs.minSgprAllocation ) + && ( maxSgprAllocation == rhs.maxSgprAllocation ) + && ( sgprAllocationGranularity == rhs.sgprAllocationGranularity ) + && ( vgprsPerSimd == rhs.vgprsPerSimd ) + && ( minVgprAllocation == rhs.minVgprAllocation ) + && ( maxVgprAllocation == rhs.maxVgprAllocation ) + && ( vgprAllocationGranularity == rhs.vgprAllocationGranularity ); + } + + bool operator!=( PhysicalDeviceShaderCorePropertiesAMD const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceShaderCorePropertiesAMD; + + public: + void* pNext = nullptr; + uint32_t shaderEngineCount; + uint32_t shaderArraysPerEngineCount; + uint32_t computeUnitsPerShaderArray; + uint32_t simdPerComputeUnit; + uint32_t wavefrontsPerSimd; + uint32_t wavefrontSize; + uint32_t sgprsPerSimd; + uint32_t minSgprAllocation; + uint32_t maxSgprAllocation; + uint32_t sgprAllocationGranularity; + uint32_t vgprsPerSimd; + uint32_t minVgprAllocation; + uint32_t maxVgprAllocation; + uint32_t vgprAllocationGranularity; + }; + static_assert( sizeof( PhysicalDeviceShaderCorePropertiesAMD ) == sizeof( VkPhysicalDeviceShaderCorePropertiesAMD ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceDescriptorIndexingFeaturesEXT + { + PhysicalDeviceDescriptorIndexingFeaturesEXT( Bool32 shaderInputAttachmentArrayDynamicIndexing_ = 0, + Bool32 shaderUniformTexelBufferArrayDynamicIndexing_ = 0, + Bool32 shaderStorageTexelBufferArrayDynamicIndexing_ = 0, + Bool32 shaderUniformBufferArrayNonUniformIndexing_ = 0, + Bool32 shaderSampledImageArrayNonUniformIndexing_ = 0, + Bool32 shaderStorageBufferArrayNonUniformIndexing_ = 0, + Bool32 shaderStorageImageArrayNonUniformIndexing_ = 0, + Bool32 shaderInputAttachmentArrayNonUniformIndexing_ = 0, + Bool32 shaderUniformTexelBufferArrayNonUniformIndexing_ = 0, + Bool32 shaderStorageTexelBufferArrayNonUniformIndexing_ = 0, + Bool32 descriptorBindingUniformBufferUpdateAfterBind_ = 0, + Bool32 descriptorBindingSampledImageUpdateAfterBind_ = 0, + Bool32 descriptorBindingStorageImageUpdateAfterBind_ = 0, + Bool32 descriptorBindingStorageBufferUpdateAfterBind_ = 0, + Bool32 descriptorBindingUniformTexelBufferUpdateAfterBind_ = 0, + Bool32 descriptorBindingStorageTexelBufferUpdateAfterBind_ = 0, + Bool32 descriptorBindingUpdateUnusedWhilePending_ = 0, + Bool32 descriptorBindingPartiallyBound_ = 0, + Bool32 descriptorBindingVariableDescriptorCount_ = 0, + Bool32 runtimeDescriptorArray_ = 0 ) + : shaderInputAttachmentArrayDynamicIndexing( shaderInputAttachmentArrayDynamicIndexing_ ) + , shaderUniformTexelBufferArrayDynamicIndexing( shaderUniformTexelBufferArrayDynamicIndexing_ ) + , shaderStorageTexelBufferArrayDynamicIndexing( shaderStorageTexelBufferArrayDynamicIndexing_ ) + , shaderUniformBufferArrayNonUniformIndexing( shaderUniformBufferArrayNonUniformIndexing_ ) + , shaderSampledImageArrayNonUniformIndexing( shaderSampledImageArrayNonUniformIndexing_ ) + , shaderStorageBufferArrayNonUniformIndexing( shaderStorageBufferArrayNonUniformIndexing_ ) + , shaderStorageImageArrayNonUniformIndexing( shaderStorageImageArrayNonUniformIndexing_ ) + , shaderInputAttachmentArrayNonUniformIndexing( shaderInputAttachmentArrayNonUniformIndexing_ ) + , shaderUniformTexelBufferArrayNonUniformIndexing( shaderUniformTexelBufferArrayNonUniformIndexing_ ) + , shaderStorageTexelBufferArrayNonUniformIndexing( shaderStorageTexelBufferArrayNonUniformIndexing_ ) + , descriptorBindingUniformBufferUpdateAfterBind( descriptorBindingUniformBufferUpdateAfterBind_ ) + , descriptorBindingSampledImageUpdateAfterBind( descriptorBindingSampledImageUpdateAfterBind_ ) + , descriptorBindingStorageImageUpdateAfterBind( descriptorBindingStorageImageUpdateAfterBind_ ) + , descriptorBindingStorageBufferUpdateAfterBind( descriptorBindingStorageBufferUpdateAfterBind_ ) + , descriptorBindingUniformTexelBufferUpdateAfterBind( descriptorBindingUniformTexelBufferUpdateAfterBind_ ) + , descriptorBindingStorageTexelBufferUpdateAfterBind( descriptorBindingStorageTexelBufferUpdateAfterBind_ ) + , descriptorBindingUpdateUnusedWhilePending( descriptorBindingUpdateUnusedWhilePending_ ) + , descriptorBindingPartiallyBound( descriptorBindingPartiallyBound_ ) + , descriptorBindingVariableDescriptorCount( descriptorBindingVariableDescriptorCount_ ) + , runtimeDescriptorArray( runtimeDescriptorArray_ ) + { + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT( VkPhysicalDeviceDescriptorIndexingFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceDescriptorIndexingFeaturesEXT ) ); + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& operator=( VkPhysicalDeviceDescriptorIndexingFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceDescriptorIndexingFeaturesEXT ) ); + return *this; + } + PhysicalDeviceDescriptorIndexingFeaturesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderInputAttachmentArrayDynamicIndexing( Bool32 shaderInputAttachmentArrayDynamicIndexing_ ) + { + shaderInputAttachmentArrayDynamicIndexing = shaderInputAttachmentArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderUniformTexelBufferArrayDynamicIndexing( Bool32 shaderUniformTexelBufferArrayDynamicIndexing_ ) + { + shaderUniformTexelBufferArrayDynamicIndexing = shaderUniformTexelBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderStorageTexelBufferArrayDynamicIndexing( Bool32 shaderStorageTexelBufferArrayDynamicIndexing_ ) + { + shaderStorageTexelBufferArrayDynamicIndexing = shaderStorageTexelBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderUniformBufferArrayNonUniformIndexing( Bool32 shaderUniformBufferArrayNonUniformIndexing_ ) + { + shaderUniformBufferArrayNonUniformIndexing = shaderUniformBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderSampledImageArrayNonUniformIndexing( Bool32 shaderSampledImageArrayNonUniformIndexing_ ) + { + shaderSampledImageArrayNonUniformIndexing = shaderSampledImageArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderStorageBufferArrayNonUniformIndexing( Bool32 shaderStorageBufferArrayNonUniformIndexing_ ) + { + shaderStorageBufferArrayNonUniformIndexing = shaderStorageBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderStorageImageArrayNonUniformIndexing( Bool32 shaderStorageImageArrayNonUniformIndexing_ ) + { + shaderStorageImageArrayNonUniformIndexing = shaderStorageImageArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderInputAttachmentArrayNonUniformIndexing( Bool32 shaderInputAttachmentArrayNonUniformIndexing_ ) + { + shaderInputAttachmentArrayNonUniformIndexing = shaderInputAttachmentArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderUniformTexelBufferArrayNonUniformIndexing( Bool32 shaderUniformTexelBufferArrayNonUniformIndexing_ ) + { + shaderUniformTexelBufferArrayNonUniformIndexing = shaderUniformTexelBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setShaderStorageTexelBufferArrayNonUniformIndexing( Bool32 shaderStorageTexelBufferArrayNonUniformIndexing_ ) + { + shaderStorageTexelBufferArrayNonUniformIndexing = shaderStorageTexelBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setDescriptorBindingUniformBufferUpdateAfterBind( Bool32 descriptorBindingUniformBufferUpdateAfterBind_ ) + { + descriptorBindingUniformBufferUpdateAfterBind = descriptorBindingUniformBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setDescriptorBindingSampledImageUpdateAfterBind( Bool32 descriptorBindingSampledImageUpdateAfterBind_ ) + { + descriptorBindingSampledImageUpdateAfterBind = descriptorBindingSampledImageUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setDescriptorBindingStorageImageUpdateAfterBind( Bool32 descriptorBindingStorageImageUpdateAfterBind_ ) + { + descriptorBindingStorageImageUpdateAfterBind = descriptorBindingStorageImageUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setDescriptorBindingStorageBufferUpdateAfterBind( Bool32 descriptorBindingStorageBufferUpdateAfterBind_ ) + { + descriptorBindingStorageBufferUpdateAfterBind = descriptorBindingStorageBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setDescriptorBindingUniformTexelBufferUpdateAfterBind( Bool32 descriptorBindingUniformTexelBufferUpdateAfterBind_ ) + { + descriptorBindingUniformTexelBufferUpdateAfterBind = descriptorBindingUniformTexelBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setDescriptorBindingStorageTexelBufferUpdateAfterBind( Bool32 descriptorBindingStorageTexelBufferUpdateAfterBind_ ) + { + descriptorBindingStorageTexelBufferUpdateAfterBind = descriptorBindingStorageTexelBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setDescriptorBindingUpdateUnusedWhilePending( Bool32 descriptorBindingUpdateUnusedWhilePending_ ) + { + descriptorBindingUpdateUnusedWhilePending = descriptorBindingUpdateUnusedWhilePending_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setDescriptorBindingPartiallyBound( Bool32 descriptorBindingPartiallyBound_ ) + { + descriptorBindingPartiallyBound = descriptorBindingPartiallyBound_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setDescriptorBindingVariableDescriptorCount( Bool32 descriptorBindingVariableDescriptorCount_ ) + { + descriptorBindingVariableDescriptorCount = descriptorBindingVariableDescriptorCount_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeaturesEXT& setRuntimeDescriptorArray( Bool32 runtimeDescriptorArray_ ) + { + runtimeDescriptorArray = runtimeDescriptorArray_; + return *this; + } + + operator VkPhysicalDeviceDescriptorIndexingFeaturesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceDescriptorIndexingFeaturesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceDescriptorIndexingFeaturesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderInputAttachmentArrayDynamicIndexing == rhs.shaderInputAttachmentArrayDynamicIndexing ) + && ( shaderUniformTexelBufferArrayDynamicIndexing == rhs.shaderUniformTexelBufferArrayDynamicIndexing ) + && ( shaderStorageTexelBufferArrayDynamicIndexing == rhs.shaderStorageTexelBufferArrayDynamicIndexing ) + && ( shaderUniformBufferArrayNonUniformIndexing == rhs.shaderUniformBufferArrayNonUniformIndexing ) + && ( shaderSampledImageArrayNonUniformIndexing == rhs.shaderSampledImageArrayNonUniformIndexing ) + && ( shaderStorageBufferArrayNonUniformIndexing == rhs.shaderStorageBufferArrayNonUniformIndexing ) + && ( shaderStorageImageArrayNonUniformIndexing == rhs.shaderStorageImageArrayNonUniformIndexing ) + && ( shaderInputAttachmentArrayNonUniformIndexing == rhs.shaderInputAttachmentArrayNonUniformIndexing ) + && ( shaderUniformTexelBufferArrayNonUniformIndexing == rhs.shaderUniformTexelBufferArrayNonUniformIndexing ) + && ( shaderStorageTexelBufferArrayNonUniformIndexing == rhs.shaderStorageTexelBufferArrayNonUniformIndexing ) + && ( descriptorBindingUniformBufferUpdateAfterBind == rhs.descriptorBindingUniformBufferUpdateAfterBind ) + && ( descriptorBindingSampledImageUpdateAfterBind == rhs.descriptorBindingSampledImageUpdateAfterBind ) + && ( descriptorBindingStorageImageUpdateAfterBind == rhs.descriptorBindingStorageImageUpdateAfterBind ) + && ( descriptorBindingStorageBufferUpdateAfterBind == rhs.descriptorBindingStorageBufferUpdateAfterBind ) + && ( descriptorBindingUniformTexelBufferUpdateAfterBind == rhs.descriptorBindingUniformTexelBufferUpdateAfterBind ) + && ( descriptorBindingStorageTexelBufferUpdateAfterBind == rhs.descriptorBindingStorageTexelBufferUpdateAfterBind ) + && ( descriptorBindingUpdateUnusedWhilePending == rhs.descriptorBindingUpdateUnusedWhilePending ) + && ( descriptorBindingPartiallyBound == rhs.descriptorBindingPartiallyBound ) + && ( descriptorBindingVariableDescriptorCount == rhs.descriptorBindingVariableDescriptorCount ) + && ( runtimeDescriptorArray == rhs.runtimeDescriptorArray ); + } + + bool operator!=( PhysicalDeviceDescriptorIndexingFeaturesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceDescriptorIndexingFeaturesEXT; + + public: + void* pNext = nullptr; + Bool32 shaderInputAttachmentArrayDynamicIndexing; + Bool32 shaderUniformTexelBufferArrayDynamicIndexing; + Bool32 shaderStorageTexelBufferArrayDynamicIndexing; + Bool32 shaderUniformBufferArrayNonUniformIndexing; + Bool32 shaderSampledImageArrayNonUniformIndexing; + Bool32 shaderStorageBufferArrayNonUniformIndexing; + Bool32 shaderStorageImageArrayNonUniformIndexing; + Bool32 shaderInputAttachmentArrayNonUniformIndexing; + Bool32 shaderUniformTexelBufferArrayNonUniformIndexing; + Bool32 shaderStorageTexelBufferArrayNonUniformIndexing; + Bool32 descriptorBindingUniformBufferUpdateAfterBind; + Bool32 descriptorBindingSampledImageUpdateAfterBind; + Bool32 descriptorBindingStorageImageUpdateAfterBind; + Bool32 descriptorBindingStorageBufferUpdateAfterBind; + Bool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + Bool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + Bool32 descriptorBindingUpdateUnusedWhilePending; + Bool32 descriptorBindingPartiallyBound; + Bool32 descriptorBindingVariableDescriptorCount; + Bool32 runtimeDescriptorArray; + }; + static_assert( sizeof( PhysicalDeviceDescriptorIndexingFeaturesEXT ) == sizeof( VkPhysicalDeviceDescriptorIndexingFeaturesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceDescriptorIndexingPropertiesEXT + { + operator VkPhysicalDeviceDescriptorIndexingPropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceDescriptorIndexingPropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceDescriptorIndexingPropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxUpdateAfterBindDescriptorsInAllPools == rhs.maxUpdateAfterBindDescriptorsInAllPools ) + && ( shaderUniformBufferArrayNonUniformIndexingNative == rhs.shaderUniformBufferArrayNonUniformIndexingNative ) + && ( shaderSampledImageArrayNonUniformIndexingNative == rhs.shaderSampledImageArrayNonUniformIndexingNative ) + && ( shaderStorageBufferArrayNonUniformIndexingNative == rhs.shaderStorageBufferArrayNonUniformIndexingNative ) + && ( shaderStorageImageArrayNonUniformIndexingNative == rhs.shaderStorageImageArrayNonUniformIndexingNative ) + && ( shaderInputAttachmentArrayNonUniformIndexingNative == rhs.shaderInputAttachmentArrayNonUniformIndexingNative ) + && ( robustBufferAccessUpdateAfterBind == rhs.robustBufferAccessUpdateAfterBind ) + && ( quadDivergentImplicitLod == rhs.quadDivergentImplicitLod ) + && ( maxPerStageDescriptorUpdateAfterBindSamplers == rhs.maxPerStageDescriptorUpdateAfterBindSamplers ) + && ( maxPerStageDescriptorUpdateAfterBindUniformBuffers == rhs.maxPerStageDescriptorUpdateAfterBindUniformBuffers ) + && ( maxPerStageDescriptorUpdateAfterBindStorageBuffers == rhs.maxPerStageDescriptorUpdateAfterBindStorageBuffers ) + && ( maxPerStageDescriptorUpdateAfterBindSampledImages == rhs.maxPerStageDescriptorUpdateAfterBindSampledImages ) + && ( maxPerStageDescriptorUpdateAfterBindStorageImages == rhs.maxPerStageDescriptorUpdateAfterBindStorageImages ) + && ( maxPerStageDescriptorUpdateAfterBindInputAttachments == rhs.maxPerStageDescriptorUpdateAfterBindInputAttachments ) + && ( maxPerStageUpdateAfterBindResources == rhs.maxPerStageUpdateAfterBindResources ) + && ( maxDescriptorSetUpdateAfterBindSamplers == rhs.maxDescriptorSetUpdateAfterBindSamplers ) + && ( maxDescriptorSetUpdateAfterBindUniformBuffers == rhs.maxDescriptorSetUpdateAfterBindUniformBuffers ) + && ( maxDescriptorSetUpdateAfterBindUniformBuffersDynamic == rhs.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic ) + && ( maxDescriptorSetUpdateAfterBindStorageBuffers == rhs.maxDescriptorSetUpdateAfterBindStorageBuffers ) + && ( maxDescriptorSetUpdateAfterBindStorageBuffersDynamic == rhs.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic ) + && ( maxDescriptorSetUpdateAfterBindSampledImages == rhs.maxDescriptorSetUpdateAfterBindSampledImages ) + && ( maxDescriptorSetUpdateAfterBindStorageImages == rhs.maxDescriptorSetUpdateAfterBindStorageImages ) + && ( maxDescriptorSetUpdateAfterBindInputAttachments == rhs.maxDescriptorSetUpdateAfterBindInputAttachments ); + } + + bool operator!=( PhysicalDeviceDescriptorIndexingPropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceDescriptorIndexingPropertiesEXT; + + public: + void* pNext = nullptr; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + Bool32 shaderUniformBufferArrayNonUniformIndexingNative; + Bool32 shaderSampledImageArrayNonUniformIndexingNative; + Bool32 shaderStorageBufferArrayNonUniformIndexingNative; + Bool32 shaderStorageImageArrayNonUniformIndexingNative; + Bool32 shaderInputAttachmentArrayNonUniformIndexingNative; + Bool32 robustBufferAccessUpdateAfterBind; + Bool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; + }; + static_assert( sizeof( PhysicalDeviceDescriptorIndexingPropertiesEXT ) == sizeof( VkPhysicalDeviceDescriptorIndexingPropertiesEXT ), "struct and wrapper have different size!" ); + + struct DescriptorSetVariableDescriptorCountAllocateInfoEXT + { + DescriptorSetVariableDescriptorCountAllocateInfoEXT( uint32_t descriptorSetCount_ = 0, + const uint32_t* pDescriptorCounts_ = nullptr ) + : descriptorSetCount( descriptorSetCount_ ) + , pDescriptorCounts( pDescriptorCounts_ ) + { + } + + DescriptorSetVariableDescriptorCountAllocateInfoEXT( VkDescriptorSetVariableDescriptorCountAllocateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetVariableDescriptorCountAllocateInfoEXT ) ); + } + + DescriptorSetVariableDescriptorCountAllocateInfoEXT& operator=( VkDescriptorSetVariableDescriptorCountAllocateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetVariableDescriptorCountAllocateInfoEXT ) ); + return *this; + } + DescriptorSetVariableDescriptorCountAllocateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DescriptorSetVariableDescriptorCountAllocateInfoEXT& setDescriptorSetCount( uint32_t descriptorSetCount_ ) + { + descriptorSetCount = descriptorSetCount_; + return *this; + } + + DescriptorSetVariableDescriptorCountAllocateInfoEXT& setPDescriptorCounts( const uint32_t* pDescriptorCounts_ ) + { + pDescriptorCounts = pDescriptorCounts_; + return *this; + } + + operator VkDescriptorSetVariableDescriptorCountAllocateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorSetVariableDescriptorCountAllocateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorSetVariableDescriptorCountAllocateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( descriptorSetCount == rhs.descriptorSetCount ) + && ( pDescriptorCounts == rhs.pDescriptorCounts ); + } + + bool operator!=( DescriptorSetVariableDescriptorCountAllocateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorSetVariableDescriptorCountAllocateInfoEXT; + + public: + const void* pNext = nullptr; + uint32_t descriptorSetCount; + const uint32_t* pDescriptorCounts; + }; + static_assert( sizeof( DescriptorSetVariableDescriptorCountAllocateInfoEXT ) == sizeof( VkDescriptorSetVariableDescriptorCountAllocateInfoEXT ), "struct and wrapper have different size!" ); + + struct DescriptorSetVariableDescriptorCountLayoutSupportEXT + { + operator VkDescriptorSetVariableDescriptorCountLayoutSupportEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorSetVariableDescriptorCountLayoutSupportEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorSetVariableDescriptorCountLayoutSupportEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxVariableDescriptorCount == rhs.maxVariableDescriptorCount ); + } + + bool operator!=( DescriptorSetVariableDescriptorCountLayoutSupportEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorSetVariableDescriptorCountLayoutSupportEXT; + + public: + void* pNext = nullptr; + uint32_t maxVariableDescriptorCount; + }; + static_assert( sizeof( DescriptorSetVariableDescriptorCountLayoutSupportEXT ) == sizeof( VkDescriptorSetVariableDescriptorCountLayoutSupportEXT ), "struct and wrapper have different size!" ); + + struct SubpassEndInfoKHR + { + SubpassEndInfoKHR( ) + { + } + + SubpassEndInfoKHR( VkSubpassEndInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassEndInfoKHR ) ); + } + + SubpassEndInfoKHR& operator=( VkSubpassEndInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassEndInfoKHR ) ); + return *this; + } + SubpassEndInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + operator VkSubpassEndInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSubpassEndInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SubpassEndInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ); + } + + bool operator!=( SubpassEndInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSubpassEndInfoKHR; + + public: + const void* pNext = nullptr; + }; + static_assert( sizeof( SubpassEndInfoKHR ) == sizeof( VkSubpassEndInfoKHR ), "struct and wrapper have different size!" ); + + struct PipelineVertexInputDivisorStateCreateInfoEXT + { + PipelineVertexInputDivisorStateCreateInfoEXT( uint32_t vertexBindingDivisorCount_ = 0, + const VertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors_ = nullptr ) + : vertexBindingDivisorCount( vertexBindingDivisorCount_ ) + , pVertexBindingDivisors( pVertexBindingDivisors_ ) + { + } + + PipelineVertexInputDivisorStateCreateInfoEXT( VkPipelineVertexInputDivisorStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineVertexInputDivisorStateCreateInfoEXT ) ); + } + + PipelineVertexInputDivisorStateCreateInfoEXT& operator=( VkPipelineVertexInputDivisorStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineVertexInputDivisorStateCreateInfoEXT ) ); + return *this; + } + PipelineVertexInputDivisorStateCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineVertexInputDivisorStateCreateInfoEXT& setVertexBindingDivisorCount( uint32_t vertexBindingDivisorCount_ ) + { + vertexBindingDivisorCount = vertexBindingDivisorCount_; + return *this; + } + + PipelineVertexInputDivisorStateCreateInfoEXT& setPVertexBindingDivisors( const VertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors_ ) + { + pVertexBindingDivisors = pVertexBindingDivisors_; + return *this; + } + + operator VkPipelineVertexInputDivisorStateCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineVertexInputDivisorStateCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineVertexInputDivisorStateCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( vertexBindingDivisorCount == rhs.vertexBindingDivisorCount ) + && ( pVertexBindingDivisors == rhs.pVertexBindingDivisors ); + } + + bool operator!=( PipelineVertexInputDivisorStateCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineVertexInputDivisorStateCreateInfoEXT; + + public: + const void* pNext = nullptr; + uint32_t vertexBindingDivisorCount; + const VertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors; + }; + static_assert( sizeof( PipelineVertexInputDivisorStateCreateInfoEXT ) == sizeof( VkPipelineVertexInputDivisorStateCreateInfoEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceVertexAttributeDivisorPropertiesEXT + { + PhysicalDeviceVertexAttributeDivisorPropertiesEXT( uint32_t maxVertexAttribDivisor_ = 0 ) + : maxVertexAttribDivisor( maxVertexAttribDivisor_ ) + { + } + + PhysicalDeviceVertexAttributeDivisorPropertiesEXT( VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceVertexAttributeDivisorPropertiesEXT ) ); + } + + PhysicalDeviceVertexAttributeDivisorPropertiesEXT& operator=( VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceVertexAttributeDivisorPropertiesEXT ) ); + return *this; + } + PhysicalDeviceVertexAttributeDivisorPropertiesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceVertexAttributeDivisorPropertiesEXT& setMaxVertexAttribDivisor( uint32_t maxVertexAttribDivisor_ ) + { + maxVertexAttribDivisor = maxVertexAttribDivisor_; + return *this; + } + + operator VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceVertexAttributeDivisorPropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxVertexAttribDivisor == rhs.maxVertexAttribDivisor ); + } + + bool operator!=( PhysicalDeviceVertexAttributeDivisorPropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesEXT; + + public: + void* pNext = nullptr; + uint32_t maxVertexAttribDivisor; + }; + static_assert( sizeof( PhysicalDeviceVertexAttributeDivisorPropertiesEXT ) == sizeof( VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT ), "struct and wrapper have different size!" ); + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + struct ImportAndroidHardwareBufferInfoANDROID + { + ImportAndroidHardwareBufferInfoANDROID( struct AHardwareBuffer* buffer_ = nullptr ) + : buffer( buffer_ ) + { + } + + ImportAndroidHardwareBufferInfoANDROID( VkImportAndroidHardwareBufferInfoANDROID const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportAndroidHardwareBufferInfoANDROID ) ); + } + + ImportAndroidHardwareBufferInfoANDROID& operator=( VkImportAndroidHardwareBufferInfoANDROID const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportAndroidHardwareBufferInfoANDROID ) ); + return *this; + } + ImportAndroidHardwareBufferInfoANDROID& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImportAndroidHardwareBufferInfoANDROID& setBuffer( struct AHardwareBuffer* buffer_ ) + { + buffer = buffer_; + return *this; + } + + operator VkImportAndroidHardwareBufferInfoANDROID const&() const + { + return *reinterpret_cast(this); + } + + operator VkImportAndroidHardwareBufferInfoANDROID &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImportAndroidHardwareBufferInfoANDROID const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( ImportAndroidHardwareBufferInfoANDROID const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImportAndroidHardwareBufferInfoANDROID; + + public: + const void* pNext = nullptr; + struct AHardwareBuffer* buffer; + }; + static_assert( sizeof( ImportAndroidHardwareBufferInfoANDROID ) == sizeof( VkImportAndroidHardwareBufferInfoANDROID ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + struct AndroidHardwareBufferUsageANDROID + { + operator VkAndroidHardwareBufferUsageANDROID const&() const + { + return *reinterpret_cast(this); + } + + operator VkAndroidHardwareBufferUsageANDROID &() + { + return *reinterpret_cast(this); + } + + bool operator==( AndroidHardwareBufferUsageANDROID const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( androidHardwareBufferUsage == rhs.androidHardwareBufferUsage ); + } + + bool operator!=( AndroidHardwareBufferUsageANDROID const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eAndroidHardwareBufferUsageANDROID; + + public: + void* pNext = nullptr; + uint64_t androidHardwareBufferUsage; + }; + static_assert( sizeof( AndroidHardwareBufferUsageANDROID ) == sizeof( VkAndroidHardwareBufferUsageANDROID ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + struct AndroidHardwareBufferPropertiesANDROID + { + operator VkAndroidHardwareBufferPropertiesANDROID const&() const + { + return *reinterpret_cast(this); + } + + operator VkAndroidHardwareBufferPropertiesANDROID &() + { + return *reinterpret_cast(this); + } + + bool operator==( AndroidHardwareBufferPropertiesANDROID const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( allocationSize == rhs.allocationSize ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( AndroidHardwareBufferPropertiesANDROID const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eAndroidHardwareBufferPropertiesANDROID; + + public: + void* pNext = nullptr; + DeviceSize allocationSize; + uint32_t memoryTypeBits; + }; + static_assert( sizeof( AndroidHardwareBufferPropertiesANDROID ) == sizeof( VkAndroidHardwareBufferPropertiesANDROID ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + struct MemoryGetAndroidHardwareBufferInfoANDROID + { + MemoryGetAndroidHardwareBufferInfoANDROID( DeviceMemory memory_ = DeviceMemory() ) + : memory( memory_ ) + { + } + + MemoryGetAndroidHardwareBufferInfoANDROID( VkMemoryGetAndroidHardwareBufferInfoANDROID const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryGetAndroidHardwareBufferInfoANDROID ) ); + } + + MemoryGetAndroidHardwareBufferInfoANDROID& operator=( VkMemoryGetAndroidHardwareBufferInfoANDROID const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryGetAndroidHardwareBufferInfoANDROID ) ); + return *this; + } + MemoryGetAndroidHardwareBufferInfoANDROID& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MemoryGetAndroidHardwareBufferInfoANDROID& setMemory( DeviceMemory memory_ ) + { + memory = memory_; + return *this; + } + + operator VkMemoryGetAndroidHardwareBufferInfoANDROID const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryGetAndroidHardwareBufferInfoANDROID &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryGetAndroidHardwareBufferInfoANDROID const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memory == rhs.memory ); + } + + bool operator!=( MemoryGetAndroidHardwareBufferInfoANDROID const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryGetAndroidHardwareBufferInfoANDROID; + + public: + const void* pNext = nullptr; + DeviceMemory memory; + }; + static_assert( sizeof( MemoryGetAndroidHardwareBufferInfoANDROID ) == sizeof( VkMemoryGetAndroidHardwareBufferInfoANDROID ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + + struct CommandBufferInheritanceConditionalRenderingInfoEXT + { + CommandBufferInheritanceConditionalRenderingInfoEXT( Bool32 conditionalRenderingEnable_ = 0 ) + : conditionalRenderingEnable( conditionalRenderingEnable_ ) + { + } + + CommandBufferInheritanceConditionalRenderingInfoEXT( VkCommandBufferInheritanceConditionalRenderingInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandBufferInheritanceConditionalRenderingInfoEXT ) ); + } + + CommandBufferInheritanceConditionalRenderingInfoEXT& operator=( VkCommandBufferInheritanceConditionalRenderingInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandBufferInheritanceConditionalRenderingInfoEXT ) ); + return *this; + } + CommandBufferInheritanceConditionalRenderingInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + CommandBufferInheritanceConditionalRenderingInfoEXT& setConditionalRenderingEnable( Bool32 conditionalRenderingEnable_ ) + { + conditionalRenderingEnable = conditionalRenderingEnable_; + return *this; + } + + operator VkCommandBufferInheritanceConditionalRenderingInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkCommandBufferInheritanceConditionalRenderingInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( CommandBufferInheritanceConditionalRenderingInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( conditionalRenderingEnable == rhs.conditionalRenderingEnable ); + } + + bool operator!=( CommandBufferInheritanceConditionalRenderingInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eCommandBufferInheritanceConditionalRenderingInfoEXT; + + public: + const void* pNext = nullptr; + Bool32 conditionalRenderingEnable; + }; + static_assert( sizeof( CommandBufferInheritanceConditionalRenderingInfoEXT ) == sizeof( VkCommandBufferInheritanceConditionalRenderingInfoEXT ), "struct and wrapper have different size!" ); + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + struct ExternalFormatANDROID + { + ExternalFormatANDROID( uint64_t externalFormat_ = 0 ) + : externalFormat( externalFormat_ ) + { + } + + ExternalFormatANDROID( VkExternalFormatANDROID const & rhs ) + { + memcpy( this, &rhs, sizeof( ExternalFormatANDROID ) ); + } + + ExternalFormatANDROID& operator=( VkExternalFormatANDROID const & rhs ) + { + memcpy( this, &rhs, sizeof( ExternalFormatANDROID ) ); + return *this; + } + ExternalFormatANDROID& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExternalFormatANDROID& setExternalFormat( uint64_t externalFormat_ ) + { + externalFormat = externalFormat_; + return *this; + } + + operator VkExternalFormatANDROID const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalFormatANDROID &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalFormatANDROID const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( externalFormat == rhs.externalFormat ); + } + + bool operator!=( ExternalFormatANDROID const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExternalFormatANDROID; + + public: + void* pNext = nullptr; + uint64_t externalFormat; + }; + static_assert( sizeof( ExternalFormatANDROID ) == sizeof( VkExternalFormatANDROID ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + + struct PhysicalDevice8BitStorageFeaturesKHR + { + PhysicalDevice8BitStorageFeaturesKHR( Bool32 storageBuffer8BitAccess_ = 0, + Bool32 uniformAndStorageBuffer8BitAccess_ = 0, + Bool32 storagePushConstant8_ = 0 ) + : storageBuffer8BitAccess( storageBuffer8BitAccess_ ) + , uniformAndStorageBuffer8BitAccess( uniformAndStorageBuffer8BitAccess_ ) + , storagePushConstant8( storagePushConstant8_ ) + { + } + + PhysicalDevice8BitStorageFeaturesKHR( VkPhysicalDevice8BitStorageFeaturesKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDevice8BitStorageFeaturesKHR ) ); + } + + PhysicalDevice8BitStorageFeaturesKHR& operator=( VkPhysicalDevice8BitStorageFeaturesKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDevice8BitStorageFeaturesKHR ) ); + return *this; + } + PhysicalDevice8BitStorageFeaturesKHR& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDevice8BitStorageFeaturesKHR& setStorageBuffer8BitAccess( Bool32 storageBuffer8BitAccess_ ) + { + storageBuffer8BitAccess = storageBuffer8BitAccess_; + return *this; + } + + PhysicalDevice8BitStorageFeaturesKHR& setUniformAndStorageBuffer8BitAccess( Bool32 uniformAndStorageBuffer8BitAccess_ ) + { + uniformAndStorageBuffer8BitAccess = uniformAndStorageBuffer8BitAccess_; + return *this; + } + + PhysicalDevice8BitStorageFeaturesKHR& setStoragePushConstant8( Bool32 storagePushConstant8_ ) + { + storagePushConstant8 = storagePushConstant8_; + return *this; + } + + operator VkPhysicalDevice8BitStorageFeaturesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDevice8BitStorageFeaturesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDevice8BitStorageFeaturesKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( storageBuffer8BitAccess == rhs.storageBuffer8BitAccess ) + && ( uniformAndStorageBuffer8BitAccess == rhs.uniformAndStorageBuffer8BitAccess ) + && ( storagePushConstant8 == rhs.storagePushConstant8 ); + } + + bool operator!=( PhysicalDevice8BitStorageFeaturesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDevice8BitStorageFeaturesKHR; + + public: + void* pNext = nullptr; + Bool32 storageBuffer8BitAccess; + Bool32 uniformAndStorageBuffer8BitAccess; + Bool32 storagePushConstant8; + }; + static_assert( sizeof( PhysicalDevice8BitStorageFeaturesKHR ) == sizeof( VkPhysicalDevice8BitStorageFeaturesKHR ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceConditionalRenderingFeaturesEXT + { + PhysicalDeviceConditionalRenderingFeaturesEXT( Bool32 conditionalRendering_ = 0, + Bool32 inheritedConditionalRendering_ = 0 ) + : conditionalRendering( conditionalRendering_ ) + , inheritedConditionalRendering( inheritedConditionalRendering_ ) + { + } + + PhysicalDeviceConditionalRenderingFeaturesEXT( VkPhysicalDeviceConditionalRenderingFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceConditionalRenderingFeaturesEXT ) ); + } + + PhysicalDeviceConditionalRenderingFeaturesEXT& operator=( VkPhysicalDeviceConditionalRenderingFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceConditionalRenderingFeaturesEXT ) ); + return *this; + } + PhysicalDeviceConditionalRenderingFeaturesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceConditionalRenderingFeaturesEXT& setConditionalRendering( Bool32 conditionalRendering_ ) + { + conditionalRendering = conditionalRendering_; + return *this; + } + + PhysicalDeviceConditionalRenderingFeaturesEXT& setInheritedConditionalRendering( Bool32 inheritedConditionalRendering_ ) + { + inheritedConditionalRendering = inheritedConditionalRendering_; + return *this; + } + + operator VkPhysicalDeviceConditionalRenderingFeaturesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceConditionalRenderingFeaturesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceConditionalRenderingFeaturesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( conditionalRendering == rhs.conditionalRendering ) + && ( inheritedConditionalRendering == rhs.inheritedConditionalRendering ); + } + + bool operator!=( PhysicalDeviceConditionalRenderingFeaturesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceConditionalRenderingFeaturesEXT; + + public: + void* pNext = nullptr; + Bool32 conditionalRendering; + Bool32 inheritedConditionalRendering; + }; + static_assert( sizeof( PhysicalDeviceConditionalRenderingFeaturesEXT ) == sizeof( VkPhysicalDeviceConditionalRenderingFeaturesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceVulkanMemoryModelFeaturesKHR + { + operator VkPhysicalDeviceVulkanMemoryModelFeaturesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceVulkanMemoryModelFeaturesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceVulkanMemoryModelFeaturesKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( vulkanMemoryModel == rhs.vulkanMemoryModel ) + && ( vulkanMemoryModelDeviceScope == rhs.vulkanMemoryModelDeviceScope ); + } + + bool operator!=( PhysicalDeviceVulkanMemoryModelFeaturesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceVulkanMemoryModelFeaturesKHR; + + public: + void* pNext = nullptr; + Bool32 vulkanMemoryModel; + Bool32 vulkanMemoryModelDeviceScope; + }; + static_assert( sizeof( PhysicalDeviceVulkanMemoryModelFeaturesKHR ) == sizeof( VkPhysicalDeviceVulkanMemoryModelFeaturesKHR ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceVertexAttributeDivisorFeaturesEXT + { + PhysicalDeviceVertexAttributeDivisorFeaturesEXT( Bool32 vertexAttributeInstanceRateDivisor_ = 0, + Bool32 vertexAttributeInstanceRateZeroDivisor_ = 0 ) + : vertexAttributeInstanceRateDivisor( vertexAttributeInstanceRateDivisor_ ) + , vertexAttributeInstanceRateZeroDivisor( vertexAttributeInstanceRateZeroDivisor_ ) + { + } + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT( VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceVertexAttributeDivisorFeaturesEXT ) ); + } + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT& operator=( VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceVertexAttributeDivisorFeaturesEXT ) ); + return *this; + } + PhysicalDeviceVertexAttributeDivisorFeaturesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT& setVertexAttributeInstanceRateDivisor( Bool32 vertexAttributeInstanceRateDivisor_ ) + { + vertexAttributeInstanceRateDivisor = vertexAttributeInstanceRateDivisor_; + return *this; + } + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT& setVertexAttributeInstanceRateZeroDivisor( Bool32 vertexAttributeInstanceRateZeroDivisor_ ) + { + vertexAttributeInstanceRateZeroDivisor = vertexAttributeInstanceRateZeroDivisor_; + return *this; + } + + operator VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceVertexAttributeDivisorFeaturesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( vertexAttributeInstanceRateDivisor == rhs.vertexAttributeInstanceRateDivisor ) + && ( vertexAttributeInstanceRateZeroDivisor == rhs.vertexAttributeInstanceRateZeroDivisor ); + } + + bool operator!=( PhysicalDeviceVertexAttributeDivisorFeaturesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesEXT; + + public: + void* pNext = nullptr; + Bool32 vertexAttributeInstanceRateDivisor; + Bool32 vertexAttributeInstanceRateZeroDivisor; + }; + static_assert( sizeof( PhysicalDeviceVertexAttributeDivisorFeaturesEXT ) == sizeof( VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT ), "struct and wrapper have different size!" ); + + struct ImageViewASTCDecodeModeEXT + { + ImageViewASTCDecodeModeEXT( Format decodeMode_ = Format::eUndefined ) + : decodeMode( decodeMode_ ) + { + } + + ImageViewASTCDecodeModeEXT( VkImageViewASTCDecodeModeEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageViewASTCDecodeModeEXT ) ); + } + + ImageViewASTCDecodeModeEXT& operator=( VkImageViewASTCDecodeModeEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageViewASTCDecodeModeEXT ) ); + return *this; + } + ImageViewASTCDecodeModeEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImageViewASTCDecodeModeEXT& setDecodeMode( Format decodeMode_ ) + { + decodeMode = decodeMode_; + return *this; + } + + operator VkImageViewASTCDecodeModeEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageViewASTCDecodeModeEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageViewASTCDecodeModeEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( decodeMode == rhs.decodeMode ); + } + + bool operator!=( ImageViewASTCDecodeModeEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageViewAstcDecodeModeEXT; + + public: + const void* pNext = nullptr; + Format decodeMode; + }; + static_assert( sizeof( ImageViewASTCDecodeModeEXT ) == sizeof( VkImageViewASTCDecodeModeEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceASTCDecodeFeaturesEXT + { + PhysicalDeviceASTCDecodeFeaturesEXT( Bool32 decodeModeSharedExponent_ = 0 ) + : decodeModeSharedExponent( decodeModeSharedExponent_ ) + { + } + + PhysicalDeviceASTCDecodeFeaturesEXT( VkPhysicalDeviceASTCDecodeFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceASTCDecodeFeaturesEXT ) ); + } + + PhysicalDeviceASTCDecodeFeaturesEXT& operator=( VkPhysicalDeviceASTCDecodeFeaturesEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceASTCDecodeFeaturesEXT ) ); + return *this; + } + PhysicalDeviceASTCDecodeFeaturesEXT& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceASTCDecodeFeaturesEXT& setDecodeModeSharedExponent( Bool32 decodeModeSharedExponent_ ) + { + decodeModeSharedExponent = decodeModeSharedExponent_; + return *this; + } + + operator VkPhysicalDeviceASTCDecodeFeaturesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceASTCDecodeFeaturesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceASTCDecodeFeaturesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( decodeModeSharedExponent == rhs.decodeModeSharedExponent ); + } + + bool operator!=( PhysicalDeviceASTCDecodeFeaturesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceAstcDecodeFeaturesEXT; + + public: + void* pNext = nullptr; + Bool32 decodeModeSharedExponent; + }; + static_assert( sizeof( PhysicalDeviceASTCDecodeFeaturesEXT ) == sizeof( VkPhysicalDeviceASTCDecodeFeaturesEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceRepresentativeFragmentTestFeaturesNV + { + PhysicalDeviceRepresentativeFragmentTestFeaturesNV( Bool32 representativeFragmentTest_ = 0 ) + : representativeFragmentTest( representativeFragmentTest_ ) + { + } + + PhysicalDeviceRepresentativeFragmentTestFeaturesNV( VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceRepresentativeFragmentTestFeaturesNV ) ); + } + + PhysicalDeviceRepresentativeFragmentTestFeaturesNV& operator=( VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceRepresentativeFragmentTestFeaturesNV ) ); + return *this; + } + PhysicalDeviceRepresentativeFragmentTestFeaturesNV& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceRepresentativeFragmentTestFeaturesNV& setRepresentativeFragmentTest( Bool32 representativeFragmentTest_ ) + { + representativeFragmentTest = representativeFragmentTest_; + return *this; + } + + operator VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceRepresentativeFragmentTestFeaturesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( representativeFragmentTest == rhs.representativeFragmentTest ); + } + + bool operator!=( PhysicalDeviceRepresentativeFragmentTestFeaturesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceRepresentativeFragmentTestFeaturesNV; + + public: + void* pNext = nullptr; + Bool32 representativeFragmentTest; + }; + static_assert( sizeof( PhysicalDeviceRepresentativeFragmentTestFeaturesNV ) == sizeof( VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV ), "struct and wrapper have different size!" ); + + struct PipelineRepresentativeFragmentTestStateCreateInfoNV + { + PipelineRepresentativeFragmentTestStateCreateInfoNV( Bool32 representativeFragmentTestEnable_ = 0 ) + : representativeFragmentTestEnable( representativeFragmentTestEnable_ ) + { + } + + PipelineRepresentativeFragmentTestStateCreateInfoNV( VkPipelineRepresentativeFragmentTestStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineRepresentativeFragmentTestStateCreateInfoNV ) ); + } + + PipelineRepresentativeFragmentTestStateCreateInfoNV& operator=( VkPipelineRepresentativeFragmentTestStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineRepresentativeFragmentTestStateCreateInfoNV ) ); + return *this; + } + PipelineRepresentativeFragmentTestStateCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineRepresentativeFragmentTestStateCreateInfoNV& setRepresentativeFragmentTestEnable( Bool32 representativeFragmentTestEnable_ ) + { + representativeFragmentTestEnable = representativeFragmentTestEnable_; + return *this; + } + + operator VkPipelineRepresentativeFragmentTestStateCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineRepresentativeFragmentTestStateCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineRepresentativeFragmentTestStateCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( representativeFragmentTestEnable == rhs.representativeFragmentTestEnable ); + } + + bool operator!=( PipelineRepresentativeFragmentTestStateCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineRepresentativeFragmentTestStateCreateInfoNV; + + public: + const void* pNext = nullptr; + Bool32 representativeFragmentTestEnable; + }; + static_assert( sizeof( PipelineRepresentativeFragmentTestStateCreateInfoNV ) == sizeof( VkPipelineRepresentativeFragmentTestStateCreateInfoNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceExclusiveScissorFeaturesNV + { + PhysicalDeviceExclusiveScissorFeaturesNV( Bool32 exclusiveScissor_ = 0 ) + : exclusiveScissor( exclusiveScissor_ ) + { + } + + PhysicalDeviceExclusiveScissorFeaturesNV( VkPhysicalDeviceExclusiveScissorFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExclusiveScissorFeaturesNV ) ); + } + + PhysicalDeviceExclusiveScissorFeaturesNV& operator=( VkPhysicalDeviceExclusiveScissorFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExclusiveScissorFeaturesNV ) ); + return *this; + } + PhysicalDeviceExclusiveScissorFeaturesNV& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExclusiveScissorFeaturesNV& setExclusiveScissor( Bool32 exclusiveScissor_ ) + { + exclusiveScissor = exclusiveScissor_; + return *this; + } + + operator VkPhysicalDeviceExclusiveScissorFeaturesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceExclusiveScissorFeaturesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceExclusiveScissorFeaturesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( exclusiveScissor == rhs.exclusiveScissor ); + } + + bool operator!=( PhysicalDeviceExclusiveScissorFeaturesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceExclusiveScissorFeaturesNV; + + public: + void* pNext = nullptr; + Bool32 exclusiveScissor; + }; + static_assert( sizeof( PhysicalDeviceExclusiveScissorFeaturesNV ) == sizeof( VkPhysicalDeviceExclusiveScissorFeaturesNV ), "struct and wrapper have different size!" ); + + struct PipelineViewportExclusiveScissorStateCreateInfoNV + { + PipelineViewportExclusiveScissorStateCreateInfoNV( uint32_t exclusiveScissorCount_ = 0, + const Rect2D* pExclusiveScissors_ = nullptr ) + : exclusiveScissorCount( exclusiveScissorCount_ ) + , pExclusiveScissors( pExclusiveScissors_ ) + { + } + + PipelineViewportExclusiveScissorStateCreateInfoNV( VkPipelineViewportExclusiveScissorStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportExclusiveScissorStateCreateInfoNV ) ); + } + + PipelineViewportExclusiveScissorStateCreateInfoNV& operator=( VkPipelineViewportExclusiveScissorStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportExclusiveScissorStateCreateInfoNV ) ); + return *this; + } + PipelineViewportExclusiveScissorStateCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineViewportExclusiveScissorStateCreateInfoNV& setExclusiveScissorCount( uint32_t exclusiveScissorCount_ ) + { + exclusiveScissorCount = exclusiveScissorCount_; + return *this; + } + + PipelineViewportExclusiveScissorStateCreateInfoNV& setPExclusiveScissors( const Rect2D* pExclusiveScissors_ ) + { + pExclusiveScissors = pExclusiveScissors_; + return *this; + } + + operator VkPipelineViewportExclusiveScissorStateCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineViewportExclusiveScissorStateCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineViewportExclusiveScissorStateCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( exclusiveScissorCount == rhs.exclusiveScissorCount ) + && ( pExclusiveScissors == rhs.pExclusiveScissors ); + } + + bool operator!=( PipelineViewportExclusiveScissorStateCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineViewportExclusiveScissorStateCreateInfoNV; + + public: + const void* pNext = nullptr; + uint32_t exclusiveScissorCount; + const Rect2D* pExclusiveScissors; + }; + static_assert( sizeof( PipelineViewportExclusiveScissorStateCreateInfoNV ) == sizeof( VkPipelineViewportExclusiveScissorStateCreateInfoNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceCornerSampledImageFeaturesNV + { + PhysicalDeviceCornerSampledImageFeaturesNV( Bool32 cornerSampledImage_ = 0 ) + : cornerSampledImage( cornerSampledImage_ ) + { + } + + PhysicalDeviceCornerSampledImageFeaturesNV( VkPhysicalDeviceCornerSampledImageFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceCornerSampledImageFeaturesNV ) ); + } + + PhysicalDeviceCornerSampledImageFeaturesNV& operator=( VkPhysicalDeviceCornerSampledImageFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceCornerSampledImageFeaturesNV ) ); + return *this; + } + PhysicalDeviceCornerSampledImageFeaturesNV& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceCornerSampledImageFeaturesNV& setCornerSampledImage( Bool32 cornerSampledImage_ ) + { + cornerSampledImage = cornerSampledImage_; + return *this; + } + + operator VkPhysicalDeviceCornerSampledImageFeaturesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceCornerSampledImageFeaturesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceCornerSampledImageFeaturesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( cornerSampledImage == rhs.cornerSampledImage ); + } + + bool operator!=( PhysicalDeviceCornerSampledImageFeaturesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceCornerSampledImageFeaturesNV; + + public: + void* pNext = nullptr; + Bool32 cornerSampledImage; + }; + static_assert( sizeof( PhysicalDeviceCornerSampledImageFeaturesNV ) == sizeof( VkPhysicalDeviceCornerSampledImageFeaturesNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceComputeShaderDerivativesFeaturesNV + { + PhysicalDeviceComputeShaderDerivativesFeaturesNV( Bool32 computeDerivativeGroupQuads_ = 0, + Bool32 computeDerivativeGroupLinear_ = 0 ) + : computeDerivativeGroupQuads( computeDerivativeGroupQuads_ ) + , computeDerivativeGroupLinear( computeDerivativeGroupLinear_ ) + { + } + + PhysicalDeviceComputeShaderDerivativesFeaturesNV( VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceComputeShaderDerivativesFeaturesNV ) ); + } + + PhysicalDeviceComputeShaderDerivativesFeaturesNV& operator=( VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceComputeShaderDerivativesFeaturesNV ) ); + return *this; + } + PhysicalDeviceComputeShaderDerivativesFeaturesNV& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceComputeShaderDerivativesFeaturesNV& setComputeDerivativeGroupQuads( Bool32 computeDerivativeGroupQuads_ ) + { + computeDerivativeGroupQuads = computeDerivativeGroupQuads_; + return *this; + } + + PhysicalDeviceComputeShaderDerivativesFeaturesNV& setComputeDerivativeGroupLinear( Bool32 computeDerivativeGroupLinear_ ) + { + computeDerivativeGroupLinear = computeDerivativeGroupLinear_; + return *this; + } + + operator VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceComputeShaderDerivativesFeaturesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceComputeShaderDerivativesFeaturesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( computeDerivativeGroupQuads == rhs.computeDerivativeGroupQuads ) + && ( computeDerivativeGroupLinear == rhs.computeDerivativeGroupLinear ); + } + + bool operator!=( PhysicalDeviceComputeShaderDerivativesFeaturesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesNV; + + public: + void* pNext = nullptr; + Bool32 computeDerivativeGroupQuads; + Bool32 computeDerivativeGroupLinear; + }; + static_assert( sizeof( PhysicalDeviceComputeShaderDerivativesFeaturesNV ) == sizeof( VkPhysicalDeviceComputeShaderDerivativesFeaturesNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceFragmentShaderBarycentricFeaturesNV + { + PhysicalDeviceFragmentShaderBarycentricFeaturesNV( Bool32 fragmentShaderBarycentric_ = 0 ) + : fragmentShaderBarycentric( fragmentShaderBarycentric_ ) + { + } + + PhysicalDeviceFragmentShaderBarycentricFeaturesNV( VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceFragmentShaderBarycentricFeaturesNV ) ); + } + + PhysicalDeviceFragmentShaderBarycentricFeaturesNV& operator=( VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceFragmentShaderBarycentricFeaturesNV ) ); + return *this; + } + PhysicalDeviceFragmentShaderBarycentricFeaturesNV& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentShaderBarycentricFeaturesNV& setFragmentShaderBarycentric( Bool32 fragmentShaderBarycentric_ ) + { + fragmentShaderBarycentric = fragmentShaderBarycentric_; + return *this; + } + + operator VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceFragmentShaderBarycentricFeaturesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentShaderBarycentric == rhs.fragmentShaderBarycentric ); + } + + bool operator!=( PhysicalDeviceFragmentShaderBarycentricFeaturesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceFragmentShaderBarycentricFeaturesNV; + + public: + void* pNext = nullptr; + Bool32 fragmentShaderBarycentric; + }; + static_assert( sizeof( PhysicalDeviceFragmentShaderBarycentricFeaturesNV ) == sizeof( VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceShaderImageFootprintFeaturesNV + { + PhysicalDeviceShaderImageFootprintFeaturesNV( Bool32 imageFootprint_ = 0 ) + : imageFootprint( imageFootprint_ ) + { + } + + PhysicalDeviceShaderImageFootprintFeaturesNV( VkPhysicalDeviceShaderImageFootprintFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceShaderImageFootprintFeaturesNV ) ); + } + + PhysicalDeviceShaderImageFootprintFeaturesNV& operator=( VkPhysicalDeviceShaderImageFootprintFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceShaderImageFootprintFeaturesNV ) ); + return *this; + } + PhysicalDeviceShaderImageFootprintFeaturesNV& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderImageFootprintFeaturesNV& setImageFootprint( Bool32 imageFootprint_ ) + { + imageFootprint = imageFootprint_; + return *this; + } + + operator VkPhysicalDeviceShaderImageFootprintFeaturesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceShaderImageFootprintFeaturesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceShaderImageFootprintFeaturesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( imageFootprint == rhs.imageFootprint ); + } + + bool operator!=( PhysicalDeviceShaderImageFootprintFeaturesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceShaderImageFootprintFeaturesNV; + + public: + void* pNext = nullptr; + Bool32 imageFootprint; + }; + static_assert( sizeof( PhysicalDeviceShaderImageFootprintFeaturesNV ) == sizeof( VkPhysicalDeviceShaderImageFootprintFeaturesNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceShadingRateImageFeaturesNV + { + PhysicalDeviceShadingRateImageFeaturesNV( Bool32 shadingRateImage_ = 0, + Bool32 shadingRateCoarseSampleOrder_ = 0 ) + : shadingRateImage( shadingRateImage_ ) + , shadingRateCoarseSampleOrder( shadingRateCoarseSampleOrder_ ) + { + } + + PhysicalDeviceShadingRateImageFeaturesNV( VkPhysicalDeviceShadingRateImageFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceShadingRateImageFeaturesNV ) ); + } + + PhysicalDeviceShadingRateImageFeaturesNV& operator=( VkPhysicalDeviceShadingRateImageFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceShadingRateImageFeaturesNV ) ); + return *this; + } + PhysicalDeviceShadingRateImageFeaturesNV& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShadingRateImageFeaturesNV& setShadingRateImage( Bool32 shadingRateImage_ ) + { + shadingRateImage = shadingRateImage_; + return *this; + } + + PhysicalDeviceShadingRateImageFeaturesNV& setShadingRateCoarseSampleOrder( Bool32 shadingRateCoarseSampleOrder_ ) + { + shadingRateCoarseSampleOrder = shadingRateCoarseSampleOrder_; + return *this; + } + + operator VkPhysicalDeviceShadingRateImageFeaturesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceShadingRateImageFeaturesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceShadingRateImageFeaturesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shadingRateImage == rhs.shadingRateImage ) + && ( shadingRateCoarseSampleOrder == rhs.shadingRateCoarseSampleOrder ); + } + + bool operator!=( PhysicalDeviceShadingRateImageFeaturesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceShadingRateImageFeaturesNV; + + public: + void* pNext = nullptr; + Bool32 shadingRateImage; + Bool32 shadingRateCoarseSampleOrder; + }; + static_assert( sizeof( PhysicalDeviceShadingRateImageFeaturesNV ) == sizeof( VkPhysicalDeviceShadingRateImageFeaturesNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceShadingRateImagePropertiesNV + { + operator VkPhysicalDeviceShadingRateImagePropertiesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceShadingRateImagePropertiesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceShadingRateImagePropertiesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shadingRateTexelSize == rhs.shadingRateTexelSize ) + && ( shadingRatePaletteSize == rhs.shadingRatePaletteSize ) + && ( shadingRateMaxCoarseSamples == rhs.shadingRateMaxCoarseSamples ); + } + + bool operator!=( PhysicalDeviceShadingRateImagePropertiesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceShadingRateImagePropertiesNV; + + public: + void* pNext = nullptr; + Extent2D shadingRateTexelSize; + uint32_t shadingRatePaletteSize; + uint32_t shadingRateMaxCoarseSamples; + }; + static_assert( sizeof( PhysicalDeviceShadingRateImagePropertiesNV ) == sizeof( VkPhysicalDeviceShadingRateImagePropertiesNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceMeshShaderFeaturesNV + { + PhysicalDeviceMeshShaderFeaturesNV( Bool32 taskShader_ = 0, + Bool32 meshShader_ = 0 ) + : taskShader( taskShader_ ) + , meshShader( meshShader_ ) + { + } + + PhysicalDeviceMeshShaderFeaturesNV( VkPhysicalDeviceMeshShaderFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceMeshShaderFeaturesNV ) ); + } + + PhysicalDeviceMeshShaderFeaturesNV& operator=( VkPhysicalDeviceMeshShaderFeaturesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceMeshShaderFeaturesNV ) ); + return *this; + } + PhysicalDeviceMeshShaderFeaturesNV& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceMeshShaderFeaturesNV& setTaskShader( Bool32 taskShader_ ) + { + taskShader = taskShader_; + return *this; + } + + PhysicalDeviceMeshShaderFeaturesNV& setMeshShader( Bool32 meshShader_ ) + { + meshShader = meshShader_; + return *this; + } + + operator VkPhysicalDeviceMeshShaderFeaturesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceMeshShaderFeaturesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceMeshShaderFeaturesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( taskShader == rhs.taskShader ) + && ( meshShader == rhs.meshShader ); + } + + bool operator!=( PhysicalDeviceMeshShaderFeaturesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceMeshShaderFeaturesNV; + + public: + void* pNext = nullptr; + Bool32 taskShader; + Bool32 meshShader; + }; + static_assert( sizeof( PhysicalDeviceMeshShaderFeaturesNV ) == sizeof( VkPhysicalDeviceMeshShaderFeaturesNV ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceMeshShaderPropertiesNV + { + PhysicalDeviceMeshShaderPropertiesNV( uint32_t maxDrawMeshTasksCount_ = 0, + uint32_t maxTaskWorkGroupInvocations_ = 0, + std::array const& maxTaskWorkGroupSize_ = { { 0, 0, 0 } }, + uint32_t maxTaskTotalMemorySize_ = 0, + uint32_t maxTaskOutputCount_ = 0, + uint32_t maxMeshWorkGroupInvocations_ = 0, + std::array const& maxMeshWorkGroupSize_ = { { 0, 0, 0 } }, + uint32_t maxMeshTotalMemorySize_ = 0, + uint32_t maxMeshOutputVertices_ = 0, + uint32_t maxMeshOutputPrimitives_ = 0, + uint32_t maxMeshMultiviewViewCount_ = 0, + uint32_t meshOutputPerVertexGranularity_ = 0, + uint32_t meshOutputPerPrimitiveGranularity_ = 0 ) + : maxDrawMeshTasksCount( maxDrawMeshTasksCount_ ) + , maxTaskWorkGroupInvocations( maxTaskWorkGroupInvocations_ ) + , maxTaskTotalMemorySize( maxTaskTotalMemorySize_ ) + , maxTaskOutputCount( maxTaskOutputCount_ ) + , maxMeshWorkGroupInvocations( maxMeshWorkGroupInvocations_ ) + , maxMeshTotalMemorySize( maxMeshTotalMemorySize_ ) + , maxMeshOutputVertices( maxMeshOutputVertices_ ) + , maxMeshOutputPrimitives( maxMeshOutputPrimitives_ ) + , maxMeshMultiviewViewCount( maxMeshMultiviewViewCount_ ) + , meshOutputPerVertexGranularity( meshOutputPerVertexGranularity_ ) + , meshOutputPerPrimitiveGranularity( meshOutputPerPrimitiveGranularity_ ) + { + memcpy( &maxTaskWorkGroupSize, maxTaskWorkGroupSize_.data(), 3 * sizeof( uint32_t ) ); + memcpy( &maxMeshWorkGroupSize, maxMeshWorkGroupSize_.data(), 3 * sizeof( uint32_t ) ); + } + + PhysicalDeviceMeshShaderPropertiesNV( VkPhysicalDeviceMeshShaderPropertiesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceMeshShaderPropertiesNV ) ); + } + + PhysicalDeviceMeshShaderPropertiesNV& operator=( VkPhysicalDeviceMeshShaderPropertiesNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceMeshShaderPropertiesNV ) ); + return *this; + } + PhysicalDeviceMeshShaderPropertiesNV& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxDrawMeshTasksCount( uint32_t maxDrawMeshTasksCount_ ) + { + maxDrawMeshTasksCount = maxDrawMeshTasksCount_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxTaskWorkGroupInvocations( uint32_t maxTaskWorkGroupInvocations_ ) + { + maxTaskWorkGroupInvocations = maxTaskWorkGroupInvocations_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxTaskWorkGroupSize( std::array maxTaskWorkGroupSize_ ) + { + memcpy( &maxTaskWorkGroupSize, maxTaskWorkGroupSize_.data(), 3 * sizeof( uint32_t ) ); + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxTaskTotalMemorySize( uint32_t maxTaskTotalMemorySize_ ) + { + maxTaskTotalMemorySize = maxTaskTotalMemorySize_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxTaskOutputCount( uint32_t maxTaskOutputCount_ ) + { + maxTaskOutputCount = maxTaskOutputCount_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxMeshWorkGroupInvocations( uint32_t maxMeshWorkGroupInvocations_ ) + { + maxMeshWorkGroupInvocations = maxMeshWorkGroupInvocations_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxMeshWorkGroupSize( std::array maxMeshWorkGroupSize_ ) + { + memcpy( &maxMeshWorkGroupSize, maxMeshWorkGroupSize_.data(), 3 * sizeof( uint32_t ) ); + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxMeshTotalMemorySize( uint32_t maxMeshTotalMemorySize_ ) + { + maxMeshTotalMemorySize = maxMeshTotalMemorySize_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxMeshOutputVertices( uint32_t maxMeshOutputVertices_ ) + { + maxMeshOutputVertices = maxMeshOutputVertices_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxMeshOutputPrimitives( uint32_t maxMeshOutputPrimitives_ ) + { + maxMeshOutputPrimitives = maxMeshOutputPrimitives_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMaxMeshMultiviewViewCount( uint32_t maxMeshMultiviewViewCount_ ) + { + maxMeshMultiviewViewCount = maxMeshMultiviewViewCount_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMeshOutputPerVertexGranularity( uint32_t meshOutputPerVertexGranularity_ ) + { + meshOutputPerVertexGranularity = meshOutputPerVertexGranularity_; + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV& setMeshOutputPerPrimitiveGranularity( uint32_t meshOutputPerPrimitiveGranularity_ ) + { + meshOutputPerPrimitiveGranularity = meshOutputPerPrimitiveGranularity_; + return *this; + } + + operator VkPhysicalDeviceMeshShaderPropertiesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceMeshShaderPropertiesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceMeshShaderPropertiesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxDrawMeshTasksCount == rhs.maxDrawMeshTasksCount ) + && ( maxTaskWorkGroupInvocations == rhs.maxTaskWorkGroupInvocations ) + && ( memcmp( maxTaskWorkGroupSize, rhs.maxTaskWorkGroupSize, 3 * sizeof( uint32_t ) ) == 0 ) + && ( maxTaskTotalMemorySize == rhs.maxTaskTotalMemorySize ) + && ( maxTaskOutputCount == rhs.maxTaskOutputCount ) + && ( maxMeshWorkGroupInvocations == rhs.maxMeshWorkGroupInvocations ) + && ( memcmp( maxMeshWorkGroupSize, rhs.maxMeshWorkGroupSize, 3 * sizeof( uint32_t ) ) == 0 ) + && ( maxMeshTotalMemorySize == rhs.maxMeshTotalMemorySize ) + && ( maxMeshOutputVertices == rhs.maxMeshOutputVertices ) + && ( maxMeshOutputPrimitives == rhs.maxMeshOutputPrimitives ) + && ( maxMeshMultiviewViewCount == rhs.maxMeshMultiviewViewCount ) + && ( meshOutputPerVertexGranularity == rhs.meshOutputPerVertexGranularity ) + && ( meshOutputPerPrimitiveGranularity == rhs.meshOutputPerPrimitiveGranularity ); + } + + bool operator!=( PhysicalDeviceMeshShaderPropertiesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceMeshShaderPropertiesNV; + + public: + void* pNext = nullptr; + uint32_t maxDrawMeshTasksCount; + uint32_t maxTaskWorkGroupInvocations; + uint32_t maxTaskWorkGroupSize[3]; + uint32_t maxTaskTotalMemorySize; + uint32_t maxTaskOutputCount; + uint32_t maxMeshWorkGroupInvocations; + uint32_t maxMeshWorkGroupSize[3]; + uint32_t maxMeshTotalMemorySize; + uint32_t maxMeshOutputVertices; + uint32_t maxMeshOutputPrimitives; + uint32_t maxMeshMultiviewViewCount; + uint32_t meshOutputPerVertexGranularity; + uint32_t meshOutputPerPrimitiveGranularity; + }; + static_assert( sizeof( PhysicalDeviceMeshShaderPropertiesNV ) == sizeof( VkPhysicalDeviceMeshShaderPropertiesNV ), "struct and wrapper have different size!" ); + + struct GeometryTrianglesNVX + { + GeometryTrianglesNVX( Buffer vertexData_ = Buffer(), + DeviceSize vertexOffset_ = 0, + uint32_t vertexCount_ = 0, + DeviceSize vertexStride_ = 0, + Format vertexFormat_ = Format::eUndefined, + Buffer indexData_ = Buffer(), + DeviceSize indexOffset_ = 0, + uint32_t indexCount_ = 0, + IndexType indexType_ = IndexType::eUint16, + Buffer transformData_ = Buffer(), + DeviceSize transformOffset_ = 0 ) + : vertexData( vertexData_ ) + , vertexOffset( vertexOffset_ ) + , vertexCount( vertexCount_ ) + , vertexStride( vertexStride_ ) + , vertexFormat( vertexFormat_ ) + , indexData( indexData_ ) + , indexOffset( indexOffset_ ) + , indexCount( indexCount_ ) + , indexType( indexType_ ) + , transformData( transformData_ ) + , transformOffset( transformOffset_ ) + { + } + + GeometryTrianglesNVX( VkGeometryTrianglesNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( GeometryTrianglesNVX ) ); + } + + GeometryTrianglesNVX& operator=( VkGeometryTrianglesNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( GeometryTrianglesNVX ) ); + return *this; + } + GeometryTrianglesNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + GeometryTrianglesNVX& setVertexData( Buffer vertexData_ ) + { + vertexData = vertexData_; + return *this; + } + + GeometryTrianglesNVX& setVertexOffset( DeviceSize vertexOffset_ ) + { + vertexOffset = vertexOffset_; + return *this; + } + + GeometryTrianglesNVX& setVertexCount( uint32_t vertexCount_ ) + { + vertexCount = vertexCount_; + return *this; + } + + GeometryTrianglesNVX& setVertexStride( DeviceSize vertexStride_ ) + { + vertexStride = vertexStride_; + return *this; + } + + GeometryTrianglesNVX& setVertexFormat( Format vertexFormat_ ) + { + vertexFormat = vertexFormat_; + return *this; + } + + GeometryTrianglesNVX& setIndexData( Buffer indexData_ ) + { + indexData = indexData_; + return *this; + } + + GeometryTrianglesNVX& setIndexOffset( DeviceSize indexOffset_ ) + { + indexOffset = indexOffset_; + return *this; + } + + GeometryTrianglesNVX& setIndexCount( uint32_t indexCount_ ) + { + indexCount = indexCount_; + return *this; + } + + GeometryTrianglesNVX& setIndexType( IndexType indexType_ ) + { + indexType = indexType_; + return *this; + } + + GeometryTrianglesNVX& setTransformData( Buffer transformData_ ) + { + transformData = transformData_; + return *this; + } + + GeometryTrianglesNVX& setTransformOffset( DeviceSize transformOffset_ ) + { + transformOffset = transformOffset_; + return *this; + } + + operator VkGeometryTrianglesNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkGeometryTrianglesNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( GeometryTrianglesNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( vertexData == rhs.vertexData ) + && ( vertexOffset == rhs.vertexOffset ) + && ( vertexCount == rhs.vertexCount ) + && ( vertexStride == rhs.vertexStride ) + && ( vertexFormat == rhs.vertexFormat ) + && ( indexData == rhs.indexData ) + && ( indexOffset == rhs.indexOffset ) + && ( indexCount == rhs.indexCount ) + && ( indexType == rhs.indexType ) + && ( transformData == rhs.transformData ) + && ( transformOffset == rhs.transformOffset ); + } + + bool operator!=( GeometryTrianglesNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eGeometryTrianglesNVX; + + public: + const void* pNext = nullptr; + Buffer vertexData; + DeviceSize vertexOffset; + uint32_t vertexCount; + DeviceSize vertexStride; + Format vertexFormat; + Buffer indexData; + DeviceSize indexOffset; + uint32_t indexCount; + IndexType indexType; + Buffer transformData; + DeviceSize transformOffset; + }; + static_assert( sizeof( GeometryTrianglesNVX ) == sizeof( VkGeometryTrianglesNVX ), "struct and wrapper have different size!" ); + + struct GeometryAABBNVX + { + GeometryAABBNVX( Buffer aabbData_ = Buffer(), + uint32_t numAABBs_ = 0, + uint32_t stride_ = 0, + DeviceSize offset_ = 0 ) + : aabbData( aabbData_ ) + , numAABBs( numAABBs_ ) + , stride( stride_ ) + , offset( offset_ ) + { + } + + GeometryAABBNVX( VkGeometryAABBNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( GeometryAABBNVX ) ); + } + + GeometryAABBNVX& operator=( VkGeometryAABBNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( GeometryAABBNVX ) ); + return *this; + } + GeometryAABBNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + GeometryAABBNVX& setAabbData( Buffer aabbData_ ) + { + aabbData = aabbData_; + return *this; + } + + GeometryAABBNVX& setNumAABBs( uint32_t numAABBs_ ) + { + numAABBs = numAABBs_; + return *this; + } + + GeometryAABBNVX& setStride( uint32_t stride_ ) + { + stride = stride_; + return *this; + } + + GeometryAABBNVX& setOffset( DeviceSize offset_ ) + { + offset = offset_; + return *this; + } + + operator VkGeometryAABBNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkGeometryAABBNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( GeometryAABBNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( aabbData == rhs.aabbData ) + && ( numAABBs == rhs.numAABBs ) + && ( stride == rhs.stride ) + && ( offset == rhs.offset ); + } + + bool operator!=( GeometryAABBNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eGeometryAabbNVX; + + public: + const void* pNext = nullptr; + Buffer aabbData; + uint32_t numAABBs; + uint32_t stride; + DeviceSize offset; + }; + static_assert( sizeof( GeometryAABBNVX ) == sizeof( VkGeometryAABBNVX ), "struct and wrapper have different size!" ); + + struct GeometryDataNVX + { + GeometryDataNVX( GeometryTrianglesNVX triangles_ = GeometryTrianglesNVX(), + GeometryAABBNVX aabbs_ = GeometryAABBNVX() ) + : triangles( triangles_ ) + , aabbs( aabbs_ ) + { + } + + GeometryDataNVX( VkGeometryDataNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( GeometryDataNVX ) ); + } + + GeometryDataNVX& operator=( VkGeometryDataNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( GeometryDataNVX ) ); + return *this; + } + GeometryDataNVX& setTriangles( GeometryTrianglesNVX triangles_ ) + { + triangles = triangles_; + return *this; + } + + GeometryDataNVX& setAabbs( GeometryAABBNVX aabbs_ ) + { + aabbs = aabbs_; + return *this; + } + + operator VkGeometryDataNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkGeometryDataNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( GeometryDataNVX const& rhs ) const + { + return ( triangles == rhs.triangles ) + && ( aabbs == rhs.aabbs ); + } + + bool operator!=( GeometryDataNVX const& rhs ) const + { + return !operator==( rhs ); + } + + GeometryTrianglesNVX triangles; + GeometryAABBNVX aabbs; + }; + static_assert( sizeof( GeometryDataNVX ) == sizeof( VkGeometryDataNVX ), "struct and wrapper have different size!" ); + + struct BindAccelerationStructureMemoryInfoNVX + { + BindAccelerationStructureMemoryInfoNVX( AccelerationStructureNVX accelerationStructure_ = AccelerationStructureNVX(), + DeviceMemory memory_ = DeviceMemory(), + DeviceSize memoryOffset_ = 0, + uint32_t deviceIndexCount_ = 0, + const uint32_t* pDeviceIndices_ = nullptr ) + : accelerationStructure( accelerationStructure_ ) + , memory( memory_ ) + , memoryOffset( memoryOffset_ ) + , deviceIndexCount( deviceIndexCount_ ) + , pDeviceIndices( pDeviceIndices_ ) + { + } + + BindAccelerationStructureMemoryInfoNVX( VkBindAccelerationStructureMemoryInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( BindAccelerationStructureMemoryInfoNVX ) ); + } + + BindAccelerationStructureMemoryInfoNVX& operator=( VkBindAccelerationStructureMemoryInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( BindAccelerationStructureMemoryInfoNVX ) ); + return *this; + } + BindAccelerationStructureMemoryInfoNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BindAccelerationStructureMemoryInfoNVX& setAccelerationStructure( AccelerationStructureNVX accelerationStructure_ ) + { + accelerationStructure = accelerationStructure_; + return *this; + } + + BindAccelerationStructureMemoryInfoNVX& setMemory( DeviceMemory memory_ ) + { + memory = memory_; + return *this; + } + + BindAccelerationStructureMemoryInfoNVX& setMemoryOffset( DeviceSize memoryOffset_ ) + { + memoryOffset = memoryOffset_; + return *this; + } + + BindAccelerationStructureMemoryInfoNVX& setDeviceIndexCount( uint32_t deviceIndexCount_ ) + { + deviceIndexCount = deviceIndexCount_; + return *this; + } + + BindAccelerationStructureMemoryInfoNVX& setPDeviceIndices( const uint32_t* pDeviceIndices_ ) + { + pDeviceIndices = pDeviceIndices_; + return *this; + } + + operator VkBindAccelerationStructureMemoryInfoNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkBindAccelerationStructureMemoryInfoNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( BindAccelerationStructureMemoryInfoNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructure == rhs.accelerationStructure ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ) + && ( deviceIndexCount == rhs.deviceIndexCount ) + && ( pDeviceIndices == rhs.pDeviceIndices ); + } + + bool operator!=( BindAccelerationStructureMemoryInfoNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBindAccelerationStructureMemoryInfoNVX; + + public: + const void* pNext = nullptr; + AccelerationStructureNVX accelerationStructure; + DeviceMemory memory; + DeviceSize memoryOffset; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + }; + static_assert( sizeof( BindAccelerationStructureMemoryInfoNVX ) == sizeof( VkBindAccelerationStructureMemoryInfoNVX ), "struct and wrapper have different size!" ); + + struct DescriptorAccelerationStructureInfoNVX + { + DescriptorAccelerationStructureInfoNVX( uint32_t accelerationStructureCount_ = 0, + const AccelerationStructureNVX* pAccelerationStructures_ = nullptr ) + : accelerationStructureCount( accelerationStructureCount_ ) + , pAccelerationStructures( pAccelerationStructures_ ) + { + } + + DescriptorAccelerationStructureInfoNVX( VkDescriptorAccelerationStructureInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorAccelerationStructureInfoNVX ) ); + } + + DescriptorAccelerationStructureInfoNVX& operator=( VkDescriptorAccelerationStructureInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorAccelerationStructureInfoNVX ) ); + return *this; + } + DescriptorAccelerationStructureInfoNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DescriptorAccelerationStructureInfoNVX& setAccelerationStructureCount( uint32_t accelerationStructureCount_ ) + { + accelerationStructureCount = accelerationStructureCount_; + return *this; + } + + DescriptorAccelerationStructureInfoNVX& setPAccelerationStructures( const AccelerationStructureNVX* pAccelerationStructures_ ) + { + pAccelerationStructures = pAccelerationStructures_; + return *this; + } + + operator VkDescriptorAccelerationStructureInfoNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorAccelerationStructureInfoNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorAccelerationStructureInfoNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructureCount == rhs.accelerationStructureCount ) + && ( pAccelerationStructures == rhs.pAccelerationStructures ); + } + + bool operator!=( DescriptorAccelerationStructureInfoNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorAccelerationStructureInfoNVX; + + public: + const void* pNext = nullptr; + uint32_t accelerationStructureCount; + const AccelerationStructureNVX* pAccelerationStructures; + }; + static_assert( sizeof( DescriptorAccelerationStructureInfoNVX ) == sizeof( VkDescriptorAccelerationStructureInfoNVX ), "struct and wrapper have different size!" ); + + struct AccelerationStructureMemoryRequirementsInfoNVX + { + AccelerationStructureMemoryRequirementsInfoNVX( AccelerationStructureNVX accelerationStructure_ = AccelerationStructureNVX() ) + : accelerationStructure( accelerationStructure_ ) + { + } + + AccelerationStructureMemoryRequirementsInfoNVX( VkAccelerationStructureMemoryRequirementsInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( AccelerationStructureMemoryRequirementsInfoNVX ) ); + } + + AccelerationStructureMemoryRequirementsInfoNVX& operator=( VkAccelerationStructureMemoryRequirementsInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( AccelerationStructureMemoryRequirementsInfoNVX ) ); + return *this; + } + AccelerationStructureMemoryRequirementsInfoNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + AccelerationStructureMemoryRequirementsInfoNVX& setAccelerationStructure( AccelerationStructureNVX accelerationStructure_ ) + { + accelerationStructure = accelerationStructure_; + return *this; + } + + operator VkAccelerationStructureMemoryRequirementsInfoNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkAccelerationStructureMemoryRequirementsInfoNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( AccelerationStructureMemoryRequirementsInfoNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructure == rhs.accelerationStructure ); + } + + bool operator!=( AccelerationStructureMemoryRequirementsInfoNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eAccelerationStructureMemoryRequirementsInfoNVX; + + public: + const void* pNext = nullptr; + AccelerationStructureNVX accelerationStructure; + }; + static_assert( sizeof( AccelerationStructureMemoryRequirementsInfoNVX ) == sizeof( VkAccelerationStructureMemoryRequirementsInfoNVX ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceRaytracingPropertiesNVX + { + PhysicalDeviceRaytracingPropertiesNVX( uint32_t shaderHeaderSize_ = 0, + uint32_t maxRecursionDepth_ = 0, + uint32_t maxGeometryCount_ = 0 ) + : shaderHeaderSize( shaderHeaderSize_ ) + , maxRecursionDepth( maxRecursionDepth_ ) + , maxGeometryCount( maxGeometryCount_ ) + { + } + + PhysicalDeviceRaytracingPropertiesNVX( VkPhysicalDeviceRaytracingPropertiesNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceRaytracingPropertiesNVX ) ); + } + + PhysicalDeviceRaytracingPropertiesNVX& operator=( VkPhysicalDeviceRaytracingPropertiesNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceRaytracingPropertiesNVX ) ); + return *this; + } + PhysicalDeviceRaytracingPropertiesNVX& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceRaytracingPropertiesNVX& setShaderHeaderSize( uint32_t shaderHeaderSize_ ) + { + shaderHeaderSize = shaderHeaderSize_; + return *this; + } + + PhysicalDeviceRaytracingPropertiesNVX& setMaxRecursionDepth( uint32_t maxRecursionDepth_ ) + { + maxRecursionDepth = maxRecursionDepth_; + return *this; + } + + PhysicalDeviceRaytracingPropertiesNVX& setMaxGeometryCount( uint32_t maxGeometryCount_ ) + { + maxGeometryCount = maxGeometryCount_; + return *this; + } + + operator VkPhysicalDeviceRaytracingPropertiesNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceRaytracingPropertiesNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceRaytracingPropertiesNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderHeaderSize == rhs.shaderHeaderSize ) + && ( maxRecursionDepth == rhs.maxRecursionDepth ) + && ( maxGeometryCount == rhs.maxGeometryCount ); + } + + bool operator!=( PhysicalDeviceRaytracingPropertiesNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceRaytracingPropertiesNVX; + + public: + void* pNext = nullptr; + uint32_t shaderHeaderSize; + uint32_t maxRecursionDepth; + uint32_t maxGeometryCount; + }; + static_assert( sizeof( PhysicalDeviceRaytracingPropertiesNVX ) == sizeof( VkPhysicalDeviceRaytracingPropertiesNVX ), "struct and wrapper have different size!" ); + + enum class SubpassContents + { + eInline = VK_SUBPASS_CONTENTS_INLINE, + eSecondaryCommandBuffers = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS + }; + + struct SubpassBeginInfoKHR + { + SubpassBeginInfoKHR( SubpassContents contents_ = SubpassContents::eInline ) + : contents( contents_ ) + { + } + + SubpassBeginInfoKHR( VkSubpassBeginInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassBeginInfoKHR ) ); + } + + SubpassBeginInfoKHR& operator=( VkSubpassBeginInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassBeginInfoKHR ) ); + return *this; + } + SubpassBeginInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SubpassBeginInfoKHR& setContents( SubpassContents contents_ ) + { + contents = contents_; + return *this; + } + + operator VkSubpassBeginInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSubpassBeginInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SubpassBeginInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( contents == rhs.contents ); + } + + bool operator!=( SubpassBeginInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSubpassBeginInfoKHR; + + public: + const void* pNext = nullptr; + SubpassContents contents; + }; + static_assert( sizeof( SubpassBeginInfoKHR ) == sizeof( VkSubpassBeginInfoKHR ), "struct and wrapper have different size!" ); + + struct PresentInfoKHR + { + PresentInfoKHR( uint32_t waitSemaphoreCount_ = 0, + const Semaphore* pWaitSemaphores_ = nullptr, + uint32_t swapchainCount_ = 0, + const SwapchainKHR* pSwapchains_ = nullptr, + const uint32_t* pImageIndices_ = nullptr, + Result* pResults_ = nullptr ) + : waitSemaphoreCount( waitSemaphoreCount_ ) + , pWaitSemaphores( pWaitSemaphores_ ) + , swapchainCount( swapchainCount_ ) + , pSwapchains( pSwapchains_ ) + , pImageIndices( pImageIndices_ ) + , pResults( pResults_ ) + { + } + + PresentInfoKHR( VkPresentInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentInfoKHR ) ); + } + + PresentInfoKHR& operator=( VkPresentInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( PresentInfoKHR ) ); + return *this; + } + PresentInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PresentInfoKHR& setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ ) + { + waitSemaphoreCount = waitSemaphoreCount_; + return *this; + } + + PresentInfoKHR& setPWaitSemaphores( const Semaphore* pWaitSemaphores_ ) + { + pWaitSemaphores = pWaitSemaphores_; + return *this; + } + + PresentInfoKHR& setSwapchainCount( uint32_t swapchainCount_ ) + { + swapchainCount = swapchainCount_; + return *this; + } + + PresentInfoKHR& setPSwapchains( const SwapchainKHR* pSwapchains_ ) + { + pSwapchains = pSwapchains_; + return *this; + } + + PresentInfoKHR& setPImageIndices( const uint32_t* pImageIndices_ ) + { + pImageIndices = pImageIndices_; + return *this; + } + + PresentInfoKHR& setPResults( Result* pResults_ ) + { + pResults = pResults_; + return *this; + } + + operator VkPresentInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkPresentInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( PresentInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreCount == rhs.waitSemaphoreCount ) + && ( pWaitSemaphores == rhs.pWaitSemaphores ) + && ( swapchainCount == rhs.swapchainCount ) + && ( pSwapchains == rhs.pSwapchains ) + && ( pImageIndices == rhs.pImageIndices ) + && ( pResults == rhs.pResults ); + } + + bool operator!=( PresentInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePresentInfoKHR; + + public: + const void* pNext = nullptr; + uint32_t waitSemaphoreCount; + const Semaphore* pWaitSemaphores; + uint32_t swapchainCount; + const SwapchainKHR* pSwapchains; + const uint32_t* pImageIndices; + Result* pResults; + }; + static_assert( sizeof( PresentInfoKHR ) == sizeof( VkPresentInfoKHR ), "struct and wrapper have different size!" ); + + enum class DynamicState + { + eViewport = VK_DYNAMIC_STATE_VIEWPORT, + eScissor = VK_DYNAMIC_STATE_SCISSOR, + eLineWidth = VK_DYNAMIC_STATE_LINE_WIDTH, + eDepthBias = VK_DYNAMIC_STATE_DEPTH_BIAS, + eBlendConstants = VK_DYNAMIC_STATE_BLEND_CONSTANTS, + eDepthBounds = VK_DYNAMIC_STATE_DEPTH_BOUNDS, + eStencilCompareMask = VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, + eStencilWriteMask = VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, + eStencilReference = VK_DYNAMIC_STATE_STENCIL_REFERENCE, + eViewportWScalingNV = VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, + eDiscardRectangleEXT = VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, + eSampleLocationsEXT = VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, + eViewportShadingRatePaletteNV = VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV, + eViewportCoarseSampleOrderNV = VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV, + eExclusiveScissorNV = VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV + }; + + struct PipelineDynamicStateCreateInfo + { + PipelineDynamicStateCreateInfo( PipelineDynamicStateCreateFlags flags_ = PipelineDynamicStateCreateFlags(), + uint32_t dynamicStateCount_ = 0, + const DynamicState* pDynamicStates_ = nullptr ) + : flags( flags_ ) + , dynamicStateCount( dynamicStateCount_ ) + , pDynamicStates( pDynamicStates_ ) + { + } + + PipelineDynamicStateCreateInfo( VkPipelineDynamicStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineDynamicStateCreateInfo ) ); + } + + PipelineDynamicStateCreateInfo& operator=( VkPipelineDynamicStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineDynamicStateCreateInfo ) ); + return *this; + } + PipelineDynamicStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineDynamicStateCreateInfo& setFlags( PipelineDynamicStateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineDynamicStateCreateInfo& setDynamicStateCount( uint32_t dynamicStateCount_ ) + { + dynamicStateCount = dynamicStateCount_; + return *this; + } + + PipelineDynamicStateCreateInfo& setPDynamicStates( const DynamicState* pDynamicStates_ ) + { + pDynamicStates = pDynamicStates_; + return *this; + } + + operator VkPipelineDynamicStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineDynamicStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineDynamicStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( dynamicStateCount == rhs.dynamicStateCount ) + && ( pDynamicStates == rhs.pDynamicStates ); + } + + bool operator!=( PipelineDynamicStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineDynamicStateCreateInfo; + + public: + const void* pNext = nullptr; + PipelineDynamicStateCreateFlags flags; + uint32_t dynamicStateCount; + const DynamicState* pDynamicStates; + }; + static_assert( sizeof( PipelineDynamicStateCreateInfo ) == sizeof( VkPipelineDynamicStateCreateInfo ), "struct and wrapper have different size!" ); + + enum class DescriptorUpdateTemplateType + { + eDescriptorSet = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + eDescriptorSetKHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + ePushDescriptorsKHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR + }; + + struct DescriptorUpdateTemplateCreateInfo + { + DescriptorUpdateTemplateCreateInfo( DescriptorUpdateTemplateCreateFlags flags_ = DescriptorUpdateTemplateCreateFlags(), + uint32_t descriptorUpdateEntryCount_ = 0, + const DescriptorUpdateTemplateEntry* pDescriptorUpdateEntries_ = nullptr, + DescriptorUpdateTemplateType templateType_ = DescriptorUpdateTemplateType::eDescriptorSet, + DescriptorSetLayout descriptorSetLayout_ = DescriptorSetLayout(), + PipelineBindPoint pipelineBindPoint_ = PipelineBindPoint::eGraphics, + PipelineLayout pipelineLayout_ = PipelineLayout(), + uint32_t set_ = 0 ) + : flags( flags_ ) + , descriptorUpdateEntryCount( descriptorUpdateEntryCount_ ) + , pDescriptorUpdateEntries( pDescriptorUpdateEntries_ ) + , templateType( templateType_ ) + , descriptorSetLayout( descriptorSetLayout_ ) + , pipelineBindPoint( pipelineBindPoint_ ) + , pipelineLayout( pipelineLayout_ ) + , set( set_ ) + { + } + + DescriptorUpdateTemplateCreateInfo( VkDescriptorUpdateTemplateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorUpdateTemplateCreateInfo ) ); + } + + DescriptorUpdateTemplateCreateInfo& operator=( VkDescriptorUpdateTemplateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorUpdateTemplateCreateInfo ) ); + return *this; + } + DescriptorUpdateTemplateCreateInfo& setPNext( void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo& setFlags( DescriptorUpdateTemplateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo& setDescriptorUpdateEntryCount( uint32_t descriptorUpdateEntryCount_ ) + { + descriptorUpdateEntryCount = descriptorUpdateEntryCount_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo& setPDescriptorUpdateEntries( const DescriptorUpdateTemplateEntry* pDescriptorUpdateEntries_ ) + { + pDescriptorUpdateEntries = pDescriptorUpdateEntries_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo& setTemplateType( DescriptorUpdateTemplateType templateType_ ) + { + templateType = templateType_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo& setDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout_ ) + { + descriptorSetLayout = descriptorSetLayout_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo& setPipelineBindPoint( PipelineBindPoint pipelineBindPoint_ ) + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo& setPipelineLayout( PipelineLayout pipelineLayout_ ) + { + pipelineLayout = pipelineLayout_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo& setSet( uint32_t set_ ) + { + set = set_; + return *this; + } + + operator VkDescriptorUpdateTemplateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorUpdateTemplateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorUpdateTemplateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( descriptorUpdateEntryCount == rhs.descriptorUpdateEntryCount ) + && ( pDescriptorUpdateEntries == rhs.pDescriptorUpdateEntries ) + && ( templateType == rhs.templateType ) + && ( descriptorSetLayout == rhs.descriptorSetLayout ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( pipelineLayout == rhs.pipelineLayout ) + && ( set == rhs.set ); + } + + bool operator!=( DescriptorUpdateTemplateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorUpdateTemplateCreateInfo; + + public: + void* pNext = nullptr; + DescriptorUpdateTemplateCreateFlags flags; + uint32_t descriptorUpdateEntryCount; + const DescriptorUpdateTemplateEntry* pDescriptorUpdateEntries; + DescriptorUpdateTemplateType templateType; + DescriptorSetLayout descriptorSetLayout; + PipelineBindPoint pipelineBindPoint; + PipelineLayout pipelineLayout; + uint32_t set; + }; + static_assert( sizeof( DescriptorUpdateTemplateCreateInfo ) == sizeof( VkDescriptorUpdateTemplateCreateInfo ), "struct and wrapper have different size!" ); + + using DescriptorUpdateTemplateCreateInfoKHR = DescriptorUpdateTemplateCreateInfo; + + enum class ObjectType + { + eUnknown = VK_OBJECT_TYPE_UNKNOWN, + eInstance = VK_OBJECT_TYPE_INSTANCE, + ePhysicalDevice = VK_OBJECT_TYPE_PHYSICAL_DEVICE, + eDevice = VK_OBJECT_TYPE_DEVICE, + eQueue = VK_OBJECT_TYPE_QUEUE, + eSemaphore = VK_OBJECT_TYPE_SEMAPHORE, + eCommandBuffer = VK_OBJECT_TYPE_COMMAND_BUFFER, + eFence = VK_OBJECT_TYPE_FENCE, + eDeviceMemory = VK_OBJECT_TYPE_DEVICE_MEMORY, + eBuffer = VK_OBJECT_TYPE_BUFFER, + eImage = VK_OBJECT_TYPE_IMAGE, + eEvent = VK_OBJECT_TYPE_EVENT, + eQueryPool = VK_OBJECT_TYPE_QUERY_POOL, + eBufferView = VK_OBJECT_TYPE_BUFFER_VIEW, + eImageView = VK_OBJECT_TYPE_IMAGE_VIEW, + eShaderModule = VK_OBJECT_TYPE_SHADER_MODULE, + ePipelineCache = VK_OBJECT_TYPE_PIPELINE_CACHE, + ePipelineLayout = VK_OBJECT_TYPE_PIPELINE_LAYOUT, + eRenderPass = VK_OBJECT_TYPE_RENDER_PASS, + ePipeline = VK_OBJECT_TYPE_PIPELINE, + eDescriptorSetLayout = VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, + eSampler = VK_OBJECT_TYPE_SAMPLER, + eDescriptorPool = VK_OBJECT_TYPE_DESCRIPTOR_POOL, + eDescriptorSet = VK_OBJECT_TYPE_DESCRIPTOR_SET, + eFramebuffer = VK_OBJECT_TYPE_FRAMEBUFFER, + eCommandPool = VK_OBJECT_TYPE_COMMAND_POOL, + eSamplerYcbcrConversion = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + eSamplerYcbcrConversionKHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + eDescriptorUpdateTemplate = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + eDescriptorUpdateTemplateKHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + eSurfaceKHR = VK_OBJECT_TYPE_SURFACE_KHR, + eSwapchainKHR = VK_OBJECT_TYPE_SWAPCHAIN_KHR, + eDisplayKHR = VK_OBJECT_TYPE_DISPLAY_KHR, + eDisplayModeKHR = VK_OBJECT_TYPE_DISPLAY_MODE_KHR, + eDebugReportCallbackEXT = VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT, + eObjectTableNVX = VK_OBJECT_TYPE_OBJECT_TABLE_NVX, + eIndirectCommandsLayoutNVX = VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX, + eDebugUtilsMessengerEXT = VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT, + eValidationCacheEXT = VK_OBJECT_TYPE_VALIDATION_CACHE_EXT, + eAccelerationStructureNVX = VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX + }; + + struct DebugUtilsObjectNameInfoEXT + { + DebugUtilsObjectNameInfoEXT( ObjectType objectType_ = ObjectType::eUnknown, + uint64_t objectHandle_ = 0, + const char* pObjectName_ = nullptr ) + : objectType( objectType_ ) + , objectHandle( objectHandle_ ) + , pObjectName( pObjectName_ ) + { + } + + DebugUtilsObjectNameInfoEXT( VkDebugUtilsObjectNameInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsObjectNameInfoEXT ) ); + } + + DebugUtilsObjectNameInfoEXT& operator=( VkDebugUtilsObjectNameInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsObjectNameInfoEXT ) ); + return *this; + } + DebugUtilsObjectNameInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DebugUtilsObjectNameInfoEXT& setObjectType( ObjectType objectType_ ) + { + objectType = objectType_; + return *this; + } + + DebugUtilsObjectNameInfoEXT& setObjectHandle( uint64_t objectHandle_ ) + { + objectHandle = objectHandle_; + return *this; + } + + DebugUtilsObjectNameInfoEXT& setPObjectName( const char* pObjectName_ ) + { + pObjectName = pObjectName_; + return *this; + } + + operator VkDebugUtilsObjectNameInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDebugUtilsObjectNameInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DebugUtilsObjectNameInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectType == rhs.objectType ) + && ( objectHandle == rhs.objectHandle ) + && ( pObjectName == rhs.pObjectName ); + } + + bool operator!=( DebugUtilsObjectNameInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDebugUtilsObjectNameInfoEXT; + + public: + const void* pNext = nullptr; + ObjectType objectType; + uint64_t objectHandle; + const char* pObjectName; + }; + static_assert( sizeof( DebugUtilsObjectNameInfoEXT ) == sizeof( VkDebugUtilsObjectNameInfoEXT ), "struct and wrapper have different size!" ); + + struct DebugUtilsObjectTagInfoEXT + { + DebugUtilsObjectTagInfoEXT( ObjectType objectType_ = ObjectType::eUnknown, + uint64_t objectHandle_ = 0, + uint64_t tagName_ = 0, + size_t tagSize_ = 0, + const void* pTag_ = nullptr ) + : objectType( objectType_ ) + , objectHandle( objectHandle_ ) + , tagName( tagName_ ) + , tagSize( tagSize_ ) + , pTag( pTag_ ) + { + } + + DebugUtilsObjectTagInfoEXT( VkDebugUtilsObjectTagInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsObjectTagInfoEXT ) ); + } + + DebugUtilsObjectTagInfoEXT& operator=( VkDebugUtilsObjectTagInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsObjectTagInfoEXT ) ); + return *this; + } + DebugUtilsObjectTagInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DebugUtilsObjectTagInfoEXT& setObjectType( ObjectType objectType_ ) + { + objectType = objectType_; + return *this; + } + + DebugUtilsObjectTagInfoEXT& setObjectHandle( uint64_t objectHandle_ ) + { + objectHandle = objectHandle_; + return *this; + } + + DebugUtilsObjectTagInfoEXT& setTagName( uint64_t tagName_ ) + { + tagName = tagName_; + return *this; + } + + DebugUtilsObjectTagInfoEXT& setTagSize( size_t tagSize_ ) + { + tagSize = tagSize_; + return *this; + } + + DebugUtilsObjectTagInfoEXT& setPTag( const void* pTag_ ) + { + pTag = pTag_; + return *this; + } + + operator VkDebugUtilsObjectTagInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDebugUtilsObjectTagInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DebugUtilsObjectTagInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectType == rhs.objectType ) + && ( objectHandle == rhs.objectHandle ) + && ( tagName == rhs.tagName ) + && ( tagSize == rhs.tagSize ) + && ( pTag == rhs.pTag ); + } + + bool operator!=( DebugUtilsObjectTagInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDebugUtilsObjectTagInfoEXT; + + public: + const void* pNext = nullptr; + ObjectType objectType; + uint64_t objectHandle; + uint64_t tagName; + size_t tagSize; + const void* pTag; + }; + static_assert( sizeof( DebugUtilsObjectTagInfoEXT ) == sizeof( VkDebugUtilsObjectTagInfoEXT ), "struct and wrapper have different size!" ); + + struct DebugUtilsMessengerCallbackDataEXT + { + DebugUtilsMessengerCallbackDataEXT( DebugUtilsMessengerCallbackDataFlagsEXT flags_ = DebugUtilsMessengerCallbackDataFlagsEXT(), + const char* pMessageIdName_ = nullptr, + int32_t messageIdNumber_ = 0, + const char* pMessage_ = nullptr, + uint32_t queueLabelCount_ = 0, + DebugUtilsLabelEXT* pQueueLabels_ = nullptr, + uint32_t cmdBufLabelCount_ = 0, + DebugUtilsLabelEXT* pCmdBufLabels_ = nullptr, + uint32_t objectCount_ = 0, + DebugUtilsObjectNameInfoEXT* pObjects_ = nullptr ) + : flags( flags_ ) + , pMessageIdName( pMessageIdName_ ) + , messageIdNumber( messageIdNumber_ ) + , pMessage( pMessage_ ) + , queueLabelCount( queueLabelCount_ ) + , pQueueLabels( pQueueLabels_ ) + , cmdBufLabelCount( cmdBufLabelCount_ ) + , pCmdBufLabels( pCmdBufLabels_ ) + , objectCount( objectCount_ ) + , pObjects( pObjects_ ) + { + } + + DebugUtilsMessengerCallbackDataEXT( VkDebugUtilsMessengerCallbackDataEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsMessengerCallbackDataEXT ) ); + } + + DebugUtilsMessengerCallbackDataEXT& operator=( VkDebugUtilsMessengerCallbackDataEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsMessengerCallbackDataEXT ) ); + return *this; + } + DebugUtilsMessengerCallbackDataEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setFlags( DebugUtilsMessengerCallbackDataFlagsEXT flags_ ) + { + flags = flags_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setPMessageIdName( const char* pMessageIdName_ ) + { + pMessageIdName = pMessageIdName_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setMessageIdNumber( int32_t messageIdNumber_ ) + { + messageIdNumber = messageIdNumber_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setPMessage( const char* pMessage_ ) + { + pMessage = pMessage_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setQueueLabelCount( uint32_t queueLabelCount_ ) + { + queueLabelCount = queueLabelCount_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setPQueueLabels( DebugUtilsLabelEXT* pQueueLabels_ ) + { + pQueueLabels = pQueueLabels_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setCmdBufLabelCount( uint32_t cmdBufLabelCount_ ) + { + cmdBufLabelCount = cmdBufLabelCount_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setPCmdBufLabels( DebugUtilsLabelEXT* pCmdBufLabels_ ) + { + pCmdBufLabels = pCmdBufLabels_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setObjectCount( uint32_t objectCount_ ) + { + objectCount = objectCount_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT& setPObjects( DebugUtilsObjectNameInfoEXT* pObjects_ ) + { + pObjects = pObjects_; + return *this; + } + + operator VkDebugUtilsMessengerCallbackDataEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDebugUtilsMessengerCallbackDataEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DebugUtilsMessengerCallbackDataEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pMessageIdName == rhs.pMessageIdName ) + && ( messageIdNumber == rhs.messageIdNumber ) + && ( pMessage == rhs.pMessage ) + && ( queueLabelCount == rhs.queueLabelCount ) + && ( pQueueLabels == rhs.pQueueLabels ) + && ( cmdBufLabelCount == rhs.cmdBufLabelCount ) + && ( pCmdBufLabels == rhs.pCmdBufLabels ) + && ( objectCount == rhs.objectCount ) + && ( pObjects == rhs.pObjects ); + } + + bool operator!=( DebugUtilsMessengerCallbackDataEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDebugUtilsMessengerCallbackDataEXT; + + public: + const void* pNext = nullptr; + DebugUtilsMessengerCallbackDataFlagsEXT flags; + const char* pMessageIdName; + int32_t messageIdNumber; + const char* pMessage; + uint32_t queueLabelCount; + DebugUtilsLabelEXT* pQueueLabels; + uint32_t cmdBufLabelCount; + DebugUtilsLabelEXT* pCmdBufLabels; + uint32_t objectCount; + DebugUtilsObjectNameInfoEXT* pObjects; + }; + static_assert( sizeof( DebugUtilsMessengerCallbackDataEXT ) == sizeof( VkDebugUtilsMessengerCallbackDataEXT ), "struct and wrapper have different size!" ); + + enum class QueueFlagBits + { + eGraphics = VK_QUEUE_GRAPHICS_BIT, + eCompute = VK_QUEUE_COMPUTE_BIT, + eTransfer = VK_QUEUE_TRANSFER_BIT, + eSparseBinding = VK_QUEUE_SPARSE_BINDING_BIT, + eProtected = VK_QUEUE_PROTECTED_BIT + }; + + using QueueFlags = Flags; + + VULKAN_HPP_INLINE QueueFlags operator|( QueueFlagBits bit0, QueueFlagBits bit1 ) + { + return QueueFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE QueueFlags operator~( QueueFlagBits bits ) + { + return ~( QueueFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(QueueFlagBits::eGraphics) | VkFlags(QueueFlagBits::eCompute) | VkFlags(QueueFlagBits::eTransfer) | VkFlags(QueueFlagBits::eSparseBinding) | VkFlags(QueueFlagBits::eProtected) + }; + }; + + struct QueueFamilyProperties + { + operator VkQueueFamilyProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkQueueFamilyProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( QueueFamilyProperties const& rhs ) const + { + return ( queueFlags == rhs.queueFlags ) + && ( queueCount == rhs.queueCount ) + && ( timestampValidBits == rhs.timestampValidBits ) + && ( minImageTransferGranularity == rhs.minImageTransferGranularity ); + } + + bool operator!=( QueueFamilyProperties const& rhs ) const + { + return !operator==( rhs ); + } + + QueueFlags queueFlags; + uint32_t queueCount; + uint32_t timestampValidBits; + Extent3D minImageTransferGranularity; + }; + static_assert( sizeof( QueueFamilyProperties ) == sizeof( VkQueueFamilyProperties ), "struct and wrapper have different size!" ); + + struct QueueFamilyProperties2 + { + operator VkQueueFamilyProperties2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkQueueFamilyProperties2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( QueueFamilyProperties2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( queueFamilyProperties == rhs.queueFamilyProperties ); + } + + bool operator!=( QueueFamilyProperties2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eQueueFamilyProperties2; + + public: + void* pNext = nullptr; + QueueFamilyProperties queueFamilyProperties; + }; + static_assert( sizeof( QueueFamilyProperties2 ) == sizeof( VkQueueFamilyProperties2 ), "struct and wrapper have different size!" ); + + using QueueFamilyProperties2KHR = QueueFamilyProperties2; + + enum class DeviceQueueCreateFlagBits + { + eProtected = VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT + }; + + using DeviceQueueCreateFlags = Flags; + + VULKAN_HPP_INLINE DeviceQueueCreateFlags operator|( DeviceQueueCreateFlagBits bit0, DeviceQueueCreateFlagBits bit1 ) + { + return DeviceQueueCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DeviceQueueCreateFlags operator~( DeviceQueueCreateFlagBits bits ) + { + return ~( DeviceQueueCreateFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DeviceQueueCreateFlagBits::eProtected) + }; + }; + + struct DeviceQueueCreateInfo + { + DeviceQueueCreateInfo( DeviceQueueCreateFlags flags_ = DeviceQueueCreateFlags(), + uint32_t queueFamilyIndex_ = 0, + uint32_t queueCount_ = 0, + const float* pQueuePriorities_ = nullptr ) + : flags( flags_ ) + , queueFamilyIndex( queueFamilyIndex_ ) + , queueCount( queueCount_ ) + , pQueuePriorities( pQueuePriorities_ ) + { + } + + DeviceQueueCreateInfo( VkDeviceQueueCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceQueueCreateInfo ) ); + } + + DeviceQueueCreateInfo& operator=( VkDeviceQueueCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceQueueCreateInfo ) ); + return *this; + } + DeviceQueueCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceQueueCreateInfo& setFlags( DeviceQueueCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + DeviceQueueCreateInfo& setQueueFamilyIndex( uint32_t queueFamilyIndex_ ) + { + queueFamilyIndex = queueFamilyIndex_; + return *this; + } + + DeviceQueueCreateInfo& setQueueCount( uint32_t queueCount_ ) + { + queueCount = queueCount_; + return *this; + } + + DeviceQueueCreateInfo& setPQueuePriorities( const float* pQueuePriorities_ ) + { + pQueuePriorities = pQueuePriorities_; + return *this; + } + + operator VkDeviceQueueCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceQueueCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceQueueCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queueFamilyIndex == rhs.queueFamilyIndex ) + && ( queueCount == rhs.queueCount ) + && ( pQueuePriorities == rhs.pQueuePriorities ); + } + + bool operator!=( DeviceQueueCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceQueueCreateInfo; + + public: + const void* pNext = nullptr; + DeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; + }; + static_assert( sizeof( DeviceQueueCreateInfo ) == sizeof( VkDeviceQueueCreateInfo ), "struct and wrapper have different size!" ); + + struct DeviceCreateInfo + { + DeviceCreateInfo( DeviceCreateFlags flags_ = DeviceCreateFlags(), + uint32_t queueCreateInfoCount_ = 0, + const DeviceQueueCreateInfo* pQueueCreateInfos_ = nullptr, + uint32_t enabledLayerCount_ = 0, + const char* const* ppEnabledLayerNames_ = nullptr, + uint32_t enabledExtensionCount_ = 0, + const char* const* ppEnabledExtensionNames_ = nullptr, + const PhysicalDeviceFeatures* pEnabledFeatures_ = nullptr ) + : flags( flags_ ) + , queueCreateInfoCount( queueCreateInfoCount_ ) + , pQueueCreateInfos( pQueueCreateInfos_ ) + , enabledLayerCount( enabledLayerCount_ ) + , ppEnabledLayerNames( ppEnabledLayerNames_ ) + , enabledExtensionCount( enabledExtensionCount_ ) + , ppEnabledExtensionNames( ppEnabledExtensionNames_ ) + , pEnabledFeatures( pEnabledFeatures_ ) + { + } + + DeviceCreateInfo( VkDeviceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceCreateInfo ) ); + } + + DeviceCreateInfo& operator=( VkDeviceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceCreateInfo ) ); + return *this; + } + DeviceCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceCreateInfo& setFlags( DeviceCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + DeviceCreateInfo& setQueueCreateInfoCount( uint32_t queueCreateInfoCount_ ) + { + queueCreateInfoCount = queueCreateInfoCount_; + return *this; + } + + DeviceCreateInfo& setPQueueCreateInfos( const DeviceQueueCreateInfo* pQueueCreateInfos_ ) + { + pQueueCreateInfos = pQueueCreateInfos_; + return *this; + } + + DeviceCreateInfo& setEnabledLayerCount( uint32_t enabledLayerCount_ ) + { + enabledLayerCount = enabledLayerCount_; + return *this; + } + + DeviceCreateInfo& setPpEnabledLayerNames( const char* const* ppEnabledLayerNames_ ) + { + ppEnabledLayerNames = ppEnabledLayerNames_; + return *this; + } + + DeviceCreateInfo& setEnabledExtensionCount( uint32_t enabledExtensionCount_ ) + { + enabledExtensionCount = enabledExtensionCount_; + return *this; + } + + DeviceCreateInfo& setPpEnabledExtensionNames( const char* const* ppEnabledExtensionNames_ ) + { + ppEnabledExtensionNames = ppEnabledExtensionNames_; + return *this; + } + + DeviceCreateInfo& setPEnabledFeatures( const PhysicalDeviceFeatures* pEnabledFeatures_ ) + { + pEnabledFeatures = pEnabledFeatures_; + return *this; + } + + operator VkDeviceCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queueCreateInfoCount == rhs.queueCreateInfoCount ) + && ( pQueueCreateInfos == rhs.pQueueCreateInfos ) + && ( enabledLayerCount == rhs.enabledLayerCount ) + && ( ppEnabledLayerNames == rhs.ppEnabledLayerNames ) + && ( enabledExtensionCount == rhs.enabledExtensionCount ) + && ( ppEnabledExtensionNames == rhs.ppEnabledExtensionNames ) + && ( pEnabledFeatures == rhs.pEnabledFeatures ); + } + + bool operator!=( DeviceCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceCreateInfo; + + public: + const void* pNext = nullptr; + DeviceCreateFlags flags; + uint32_t queueCreateInfoCount; + const DeviceQueueCreateInfo* pQueueCreateInfos; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const PhysicalDeviceFeatures* pEnabledFeatures; + }; + static_assert( sizeof( DeviceCreateInfo ) == sizeof( VkDeviceCreateInfo ), "struct and wrapper have different size!" ); + + struct DeviceQueueInfo2 + { + DeviceQueueInfo2( DeviceQueueCreateFlags flags_ = DeviceQueueCreateFlags(), + uint32_t queueFamilyIndex_ = 0, + uint32_t queueIndex_ = 0 ) + : flags( flags_ ) + , queueFamilyIndex( queueFamilyIndex_ ) + , queueIndex( queueIndex_ ) + { + } + + DeviceQueueInfo2( VkDeviceQueueInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceQueueInfo2 ) ); + } + + DeviceQueueInfo2& operator=( VkDeviceQueueInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceQueueInfo2 ) ); + return *this; + } + DeviceQueueInfo2& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceQueueInfo2& setFlags( DeviceQueueCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + DeviceQueueInfo2& setQueueFamilyIndex( uint32_t queueFamilyIndex_ ) + { + queueFamilyIndex = queueFamilyIndex_; + return *this; + } + + DeviceQueueInfo2& setQueueIndex( uint32_t queueIndex_ ) + { + queueIndex = queueIndex_; + return *this; + } + + operator VkDeviceQueueInfo2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceQueueInfo2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceQueueInfo2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queueFamilyIndex == rhs.queueFamilyIndex ) + && ( queueIndex == rhs.queueIndex ); + } + + bool operator!=( DeviceQueueInfo2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceQueueInfo2; + + public: + const void* pNext = nullptr; + DeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueIndex; + }; + static_assert( sizeof( DeviceQueueInfo2 ) == sizeof( VkDeviceQueueInfo2 ), "struct and wrapper have different size!" ); + + enum class MemoryPropertyFlagBits + { + eDeviceLocal = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, + eHostVisible = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, + eHostCoherent = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + eHostCached = VK_MEMORY_PROPERTY_HOST_CACHED_BIT, + eLazilyAllocated = VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, + eProtected = VK_MEMORY_PROPERTY_PROTECTED_BIT + }; + + using MemoryPropertyFlags = Flags; + + VULKAN_HPP_INLINE MemoryPropertyFlags operator|( MemoryPropertyFlagBits bit0, MemoryPropertyFlagBits bit1 ) + { + return MemoryPropertyFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE MemoryPropertyFlags operator~( MemoryPropertyFlagBits bits ) + { + return ~( MemoryPropertyFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(MemoryPropertyFlagBits::eDeviceLocal) | VkFlags(MemoryPropertyFlagBits::eHostVisible) | VkFlags(MemoryPropertyFlagBits::eHostCoherent) | VkFlags(MemoryPropertyFlagBits::eHostCached) | VkFlags(MemoryPropertyFlagBits::eLazilyAllocated) | VkFlags(MemoryPropertyFlagBits::eProtected) + }; + }; + + struct MemoryType + { + operator VkMemoryType const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryType &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryType const& rhs ) const + { + return ( propertyFlags == rhs.propertyFlags ) + && ( heapIndex == rhs.heapIndex ); + } + + bool operator!=( MemoryType const& rhs ) const + { + return !operator==( rhs ); + } + + MemoryPropertyFlags propertyFlags; + uint32_t heapIndex; + }; + static_assert( sizeof( MemoryType ) == sizeof( VkMemoryType ), "struct and wrapper have different size!" ); + + enum class MemoryHeapFlagBits + { + eDeviceLocal = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT, + eMultiInstance = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, + eMultiInstanceKHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT + }; + + using MemoryHeapFlags = Flags; + + VULKAN_HPP_INLINE MemoryHeapFlags operator|( MemoryHeapFlagBits bit0, MemoryHeapFlagBits bit1 ) + { + return MemoryHeapFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE MemoryHeapFlags operator~( MemoryHeapFlagBits bits ) + { + return ~( MemoryHeapFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(MemoryHeapFlagBits::eDeviceLocal) | VkFlags(MemoryHeapFlagBits::eMultiInstance) + }; + }; + + struct MemoryHeap + { + operator VkMemoryHeap const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryHeap &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryHeap const& rhs ) const + { + return ( size == rhs.size ) + && ( flags == rhs.flags ); + } + + bool operator!=( MemoryHeap const& rhs ) const + { + return !operator==( rhs ); + } + + DeviceSize size; + MemoryHeapFlags flags; + }; + static_assert( sizeof( MemoryHeap ) == sizeof( VkMemoryHeap ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceMemoryProperties + { + operator VkPhysicalDeviceMemoryProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceMemoryProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceMemoryProperties const& rhs ) const + { + return ( memoryTypeCount == rhs.memoryTypeCount ) + && ( memcmp( memoryTypes, rhs.memoryTypes, VK_MAX_MEMORY_TYPES * sizeof( MemoryType ) ) == 0 ) + && ( memoryHeapCount == rhs.memoryHeapCount ) + && ( memcmp( memoryHeaps, rhs.memoryHeaps, VK_MAX_MEMORY_HEAPS * sizeof( MemoryHeap ) ) == 0 ); + } + + bool operator!=( PhysicalDeviceMemoryProperties const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t memoryTypeCount; + MemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; + uint32_t memoryHeapCount; + MemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; + }; + static_assert( sizeof( PhysicalDeviceMemoryProperties ) == sizeof( VkPhysicalDeviceMemoryProperties ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceMemoryProperties2 + { + operator VkPhysicalDeviceMemoryProperties2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceMemoryProperties2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceMemoryProperties2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryProperties == rhs.memoryProperties ); + } + + bool operator!=( PhysicalDeviceMemoryProperties2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceMemoryProperties2; + + public: + void* pNext = nullptr; + PhysicalDeviceMemoryProperties memoryProperties; + }; + static_assert( sizeof( PhysicalDeviceMemoryProperties2 ) == sizeof( VkPhysicalDeviceMemoryProperties2 ), "struct and wrapper have different size!" ); + + using PhysicalDeviceMemoryProperties2KHR = PhysicalDeviceMemoryProperties2; + + enum class AccessFlagBits + { + eIndirectCommandRead = VK_ACCESS_INDIRECT_COMMAND_READ_BIT, + eIndexRead = VK_ACCESS_INDEX_READ_BIT, + eVertexAttributeRead = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, + eUniformRead = VK_ACCESS_UNIFORM_READ_BIT, + eInputAttachmentRead = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, + eShaderRead = VK_ACCESS_SHADER_READ_BIT, + eShaderWrite = VK_ACCESS_SHADER_WRITE_BIT, + eColorAttachmentRead = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, + eColorAttachmentWrite = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + eDepthStencilAttachmentRead = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, + eDepthStencilAttachmentWrite = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, + eTransferRead = VK_ACCESS_TRANSFER_READ_BIT, + eTransferWrite = VK_ACCESS_TRANSFER_WRITE_BIT, + eHostRead = VK_ACCESS_HOST_READ_BIT, + eHostWrite = VK_ACCESS_HOST_WRITE_BIT, + eMemoryRead = VK_ACCESS_MEMORY_READ_BIT, + eMemoryWrite = VK_ACCESS_MEMORY_WRITE_BIT, + eConditionalRenderingReadEXT = VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT, + eCommandProcessReadNVX = VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX, + eCommandProcessWriteNVX = VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX, + eColorAttachmentReadNoncoherentEXT = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT, + eShadingRateImageReadNV = VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV, + eAccelerationStructureReadNVX = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NVX, + eAccelerationStructureWriteNVX = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NVX + }; + + using AccessFlags = Flags; + + VULKAN_HPP_INLINE AccessFlags operator|( AccessFlagBits bit0, AccessFlagBits bit1 ) + { + return AccessFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE AccessFlags operator~( AccessFlagBits bits ) + { + return ~( AccessFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(AccessFlagBits::eIndirectCommandRead) | VkFlags(AccessFlagBits::eIndexRead) | VkFlags(AccessFlagBits::eVertexAttributeRead) | VkFlags(AccessFlagBits::eUniformRead) | VkFlags(AccessFlagBits::eInputAttachmentRead) | VkFlags(AccessFlagBits::eShaderRead) | VkFlags(AccessFlagBits::eShaderWrite) | VkFlags(AccessFlagBits::eColorAttachmentRead) | VkFlags(AccessFlagBits::eColorAttachmentWrite) | VkFlags(AccessFlagBits::eDepthStencilAttachmentRead) | VkFlags(AccessFlagBits::eDepthStencilAttachmentWrite) | VkFlags(AccessFlagBits::eTransferRead) | VkFlags(AccessFlagBits::eTransferWrite) | VkFlags(AccessFlagBits::eHostRead) | VkFlags(AccessFlagBits::eHostWrite) | VkFlags(AccessFlagBits::eMemoryRead) | VkFlags(AccessFlagBits::eMemoryWrite) | VkFlags(AccessFlagBits::eConditionalRenderingReadEXT) | VkFlags(AccessFlagBits::eCommandProcessReadNVX) | VkFlags(AccessFlagBits::eCommandProcessWriteNVX) | VkFlags(AccessFlagBits::eColorAttachmentReadNoncoherentEXT) | VkFlags(AccessFlagBits::eShadingRateImageReadNV) | VkFlags(AccessFlagBits::eAccelerationStructureReadNVX) | VkFlags(AccessFlagBits::eAccelerationStructureWriteNVX) + }; + }; + + struct MemoryBarrier + { + MemoryBarrier( AccessFlags srcAccessMask_ = AccessFlags(), + AccessFlags dstAccessMask_ = AccessFlags() ) + : srcAccessMask( srcAccessMask_ ) + , dstAccessMask( dstAccessMask_ ) + { + } + + MemoryBarrier( VkMemoryBarrier const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryBarrier ) ); + } + + MemoryBarrier& operator=( VkMemoryBarrier const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryBarrier ) ); + return *this; + } + MemoryBarrier& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MemoryBarrier& setSrcAccessMask( AccessFlags srcAccessMask_ ) + { + srcAccessMask = srcAccessMask_; + return *this; + } + + MemoryBarrier& setDstAccessMask( AccessFlags dstAccessMask_ ) + { + dstAccessMask = dstAccessMask_; + return *this; + } + + operator VkMemoryBarrier const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryBarrier &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryBarrier const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ); + } + + bool operator!=( MemoryBarrier const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryBarrier; + + public: + const void* pNext = nullptr; + AccessFlags srcAccessMask; + AccessFlags dstAccessMask; + }; + static_assert( sizeof( MemoryBarrier ) == sizeof( VkMemoryBarrier ), "struct and wrapper have different size!" ); + + struct BufferMemoryBarrier + { + BufferMemoryBarrier( AccessFlags srcAccessMask_ = AccessFlags(), + AccessFlags dstAccessMask_ = AccessFlags(), + uint32_t srcQueueFamilyIndex_ = 0, + uint32_t dstQueueFamilyIndex_ = 0, + Buffer buffer_ = Buffer(), + DeviceSize offset_ = 0, + DeviceSize size_ = 0 ) + : srcAccessMask( srcAccessMask_ ) + , dstAccessMask( dstAccessMask_ ) + , srcQueueFamilyIndex( srcQueueFamilyIndex_ ) + , dstQueueFamilyIndex( dstQueueFamilyIndex_ ) + , buffer( buffer_ ) + , offset( offset_ ) + , size( size_ ) + { + } + + BufferMemoryBarrier( VkBufferMemoryBarrier const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferMemoryBarrier ) ); + } + + BufferMemoryBarrier& operator=( VkBufferMemoryBarrier const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferMemoryBarrier ) ); + return *this; + } + BufferMemoryBarrier& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BufferMemoryBarrier& setSrcAccessMask( AccessFlags srcAccessMask_ ) + { + srcAccessMask = srcAccessMask_; + return *this; + } + + BufferMemoryBarrier& setDstAccessMask( AccessFlags dstAccessMask_ ) + { + dstAccessMask = dstAccessMask_; + return *this; + } + + BufferMemoryBarrier& setSrcQueueFamilyIndex( uint32_t srcQueueFamilyIndex_ ) + { + srcQueueFamilyIndex = srcQueueFamilyIndex_; + return *this; + } + + BufferMemoryBarrier& setDstQueueFamilyIndex( uint32_t dstQueueFamilyIndex_ ) + { + dstQueueFamilyIndex = dstQueueFamilyIndex_; + return *this; + } + + BufferMemoryBarrier& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + BufferMemoryBarrier& setOffset( DeviceSize offset_ ) + { + offset = offset_; + return *this; + } + + BufferMemoryBarrier& setSize( DeviceSize size_ ) + { + size = size_; + return *this; + } + + operator VkBufferMemoryBarrier const&() const + { + return *reinterpret_cast(this); + } + + operator VkBufferMemoryBarrier &() + { + return *reinterpret_cast(this); + } + + bool operator==( BufferMemoryBarrier const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ) + && ( srcQueueFamilyIndex == rhs.srcQueueFamilyIndex ) + && ( dstQueueFamilyIndex == rhs.dstQueueFamilyIndex ) + && ( buffer == rhs.buffer ) + && ( offset == rhs.offset ) + && ( size == rhs.size ); + } + + bool operator!=( BufferMemoryBarrier const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBufferMemoryBarrier; + + public: + const void* pNext = nullptr; + AccessFlags srcAccessMask; + AccessFlags dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + Buffer buffer; + DeviceSize offset; + DeviceSize size; + }; + static_assert( sizeof( BufferMemoryBarrier ) == sizeof( VkBufferMemoryBarrier ), "struct and wrapper have different size!" ); + + enum class BufferUsageFlagBits + { + eTransferSrc = VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + eTransferDst = VK_BUFFER_USAGE_TRANSFER_DST_BIT, + eUniformTexelBuffer = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, + eStorageTexelBuffer = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, + eUniformBuffer = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, + eStorageBuffer = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, + eIndexBuffer = VK_BUFFER_USAGE_INDEX_BUFFER_BIT, + eVertexBuffer = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, + eIndirectBuffer = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, + eConditionalRenderingEXT = VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT, + eRaytracingNVX = VK_BUFFER_USAGE_RAYTRACING_BIT_NVX + }; + + using BufferUsageFlags = Flags; + + VULKAN_HPP_INLINE BufferUsageFlags operator|( BufferUsageFlagBits bit0, BufferUsageFlagBits bit1 ) + { + return BufferUsageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE BufferUsageFlags operator~( BufferUsageFlagBits bits ) + { + return ~( BufferUsageFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(BufferUsageFlagBits::eTransferSrc) | VkFlags(BufferUsageFlagBits::eTransferDst) | VkFlags(BufferUsageFlagBits::eUniformTexelBuffer) | VkFlags(BufferUsageFlagBits::eStorageTexelBuffer) | VkFlags(BufferUsageFlagBits::eUniformBuffer) | VkFlags(BufferUsageFlagBits::eStorageBuffer) | VkFlags(BufferUsageFlagBits::eIndexBuffer) | VkFlags(BufferUsageFlagBits::eVertexBuffer) | VkFlags(BufferUsageFlagBits::eIndirectBuffer) | VkFlags(BufferUsageFlagBits::eConditionalRenderingEXT) | VkFlags(BufferUsageFlagBits::eRaytracingNVX) + }; + }; + + enum class BufferCreateFlagBits + { + eSparseBinding = VK_BUFFER_CREATE_SPARSE_BINDING_BIT, + eSparseResidency = VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT, + eSparseAliased = VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, + eProtected = VK_BUFFER_CREATE_PROTECTED_BIT + }; + + using BufferCreateFlags = Flags; + + VULKAN_HPP_INLINE BufferCreateFlags operator|( BufferCreateFlagBits bit0, BufferCreateFlagBits bit1 ) + { + return BufferCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE BufferCreateFlags operator~( BufferCreateFlagBits bits ) + { + return ~( BufferCreateFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(BufferCreateFlagBits::eSparseBinding) | VkFlags(BufferCreateFlagBits::eSparseResidency) | VkFlags(BufferCreateFlagBits::eSparseAliased) | VkFlags(BufferCreateFlagBits::eProtected) + }; + }; + + struct BufferCreateInfo + { + BufferCreateInfo( BufferCreateFlags flags_ = BufferCreateFlags(), + DeviceSize size_ = 0, + BufferUsageFlags usage_ = BufferUsageFlags(), + SharingMode sharingMode_ = SharingMode::eExclusive, + uint32_t queueFamilyIndexCount_ = 0, + const uint32_t* pQueueFamilyIndices_ = nullptr ) + : flags( flags_ ) + , size( size_ ) + , usage( usage_ ) + , sharingMode( sharingMode_ ) + , queueFamilyIndexCount( queueFamilyIndexCount_ ) + , pQueueFamilyIndices( pQueueFamilyIndices_ ) + { + } + + BufferCreateInfo( VkBufferCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferCreateInfo ) ); + } + + BufferCreateInfo& operator=( VkBufferCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferCreateInfo ) ); + return *this; + } + BufferCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BufferCreateInfo& setFlags( BufferCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + BufferCreateInfo& setSize( DeviceSize size_ ) + { + size = size_; + return *this; + } + + BufferCreateInfo& setUsage( BufferUsageFlags usage_ ) + { + usage = usage_; + return *this; + } + + BufferCreateInfo& setSharingMode( SharingMode sharingMode_ ) + { + sharingMode = sharingMode_; + return *this; + } + + BufferCreateInfo& setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ ) + { + queueFamilyIndexCount = queueFamilyIndexCount_; + return *this; + } + + BufferCreateInfo& setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ ) + { + pQueueFamilyIndices = pQueueFamilyIndices_; + return *this; + } + + operator VkBufferCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkBufferCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( BufferCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( size == rhs.size ) + && ( usage == rhs.usage ) + && ( sharingMode == rhs.sharingMode ) + && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount ) + && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices ); + } + + bool operator!=( BufferCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBufferCreateInfo; + + public: + const void* pNext = nullptr; + BufferCreateFlags flags; + DeviceSize size; + BufferUsageFlags usage; + SharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + }; + static_assert( sizeof( BufferCreateInfo ) == sizeof( VkBufferCreateInfo ), "struct and wrapper have different size!" ); + + enum class ShaderStageFlagBits + { + eVertex = VK_SHADER_STAGE_VERTEX_BIT, + eTessellationControl = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, + eTessellationEvaluation = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, + eGeometry = VK_SHADER_STAGE_GEOMETRY_BIT, + eFragment = VK_SHADER_STAGE_FRAGMENT_BIT, + eCompute = VK_SHADER_STAGE_COMPUTE_BIT, + eAllGraphics = VK_SHADER_STAGE_ALL_GRAPHICS, + eAll = VK_SHADER_STAGE_ALL, + eRaygenNVX = VK_SHADER_STAGE_RAYGEN_BIT_NVX, + eAnyHitNVX = VK_SHADER_STAGE_ANY_HIT_BIT_NVX, + eClosestHitNVX = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NVX, + eMissNVX = VK_SHADER_STAGE_MISS_BIT_NVX, + eIntersectionNVX = VK_SHADER_STAGE_INTERSECTION_BIT_NVX, + eCallableNVX = VK_SHADER_STAGE_CALLABLE_BIT_NVX, + eTaskNV = VK_SHADER_STAGE_TASK_BIT_NV, + eMeshNV = VK_SHADER_STAGE_MESH_BIT_NV + }; + + using ShaderStageFlags = Flags; + + VULKAN_HPP_INLINE ShaderStageFlags operator|( ShaderStageFlagBits bit0, ShaderStageFlagBits bit1 ) + { + return ShaderStageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ShaderStageFlags operator~( ShaderStageFlagBits bits ) + { + return ~( ShaderStageFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ShaderStageFlagBits::eVertex) | VkFlags(ShaderStageFlagBits::eTessellationControl) | VkFlags(ShaderStageFlagBits::eTessellationEvaluation) | VkFlags(ShaderStageFlagBits::eGeometry) | VkFlags(ShaderStageFlagBits::eFragment) | VkFlags(ShaderStageFlagBits::eCompute) | VkFlags(ShaderStageFlagBits::eAllGraphics) | VkFlags(ShaderStageFlagBits::eAll) | VkFlags(ShaderStageFlagBits::eRaygenNVX) | VkFlags(ShaderStageFlagBits::eAnyHitNVX) | VkFlags(ShaderStageFlagBits::eClosestHitNVX) | VkFlags(ShaderStageFlagBits::eMissNVX) | VkFlags(ShaderStageFlagBits::eIntersectionNVX) | VkFlags(ShaderStageFlagBits::eCallableNVX) | VkFlags(ShaderStageFlagBits::eTaskNV) | VkFlags(ShaderStageFlagBits::eMeshNV) + }; + }; + + struct DescriptorSetLayoutBinding + { + DescriptorSetLayoutBinding( uint32_t binding_ = 0, + DescriptorType descriptorType_ = DescriptorType::eSampler, + uint32_t descriptorCount_ = 0, + ShaderStageFlags stageFlags_ = ShaderStageFlags(), + const Sampler* pImmutableSamplers_ = nullptr ) + : binding( binding_ ) + , descriptorType( descriptorType_ ) + , descriptorCount( descriptorCount_ ) + , stageFlags( stageFlags_ ) + , pImmutableSamplers( pImmutableSamplers_ ) + { + } + + DescriptorSetLayoutBinding( VkDescriptorSetLayoutBinding const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetLayoutBinding ) ); + } + + DescriptorSetLayoutBinding& operator=( VkDescriptorSetLayoutBinding const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetLayoutBinding ) ); + return *this; + } + DescriptorSetLayoutBinding& setBinding( uint32_t binding_ ) + { + binding = binding_; + return *this; + } + + DescriptorSetLayoutBinding& setDescriptorType( DescriptorType descriptorType_ ) + { + descriptorType = descriptorType_; + return *this; + } + + DescriptorSetLayoutBinding& setDescriptorCount( uint32_t descriptorCount_ ) + { + descriptorCount = descriptorCount_; + return *this; + } + + DescriptorSetLayoutBinding& setStageFlags( ShaderStageFlags stageFlags_ ) + { + stageFlags = stageFlags_; + return *this; + } + + DescriptorSetLayoutBinding& setPImmutableSamplers( const Sampler* pImmutableSamplers_ ) + { + pImmutableSamplers = pImmutableSamplers_; + return *this; + } + + operator VkDescriptorSetLayoutBinding const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorSetLayoutBinding &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorSetLayoutBinding const& rhs ) const + { + return ( binding == rhs.binding ) + && ( descriptorType == rhs.descriptorType ) + && ( descriptorCount == rhs.descriptorCount ) + && ( stageFlags == rhs.stageFlags ) + && ( pImmutableSamplers == rhs.pImmutableSamplers ); + } + + bool operator!=( DescriptorSetLayoutBinding const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t binding; + DescriptorType descriptorType; + uint32_t descriptorCount; + ShaderStageFlags stageFlags; + const Sampler* pImmutableSamplers; + }; + static_assert( sizeof( DescriptorSetLayoutBinding ) == sizeof( VkDescriptorSetLayoutBinding ), "struct and wrapper have different size!" ); + + struct PipelineShaderStageCreateInfo + { + PipelineShaderStageCreateInfo( PipelineShaderStageCreateFlags flags_ = PipelineShaderStageCreateFlags(), + ShaderStageFlagBits stage_ = ShaderStageFlagBits::eVertex, + ShaderModule module_ = ShaderModule(), + const char* pName_ = nullptr, + const SpecializationInfo* pSpecializationInfo_ = nullptr ) + : flags( flags_ ) + , stage( stage_ ) + , module( module_ ) + , pName( pName_ ) + , pSpecializationInfo( pSpecializationInfo_ ) + { + } + + PipelineShaderStageCreateInfo( VkPipelineShaderStageCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineShaderStageCreateInfo ) ); + } + + PipelineShaderStageCreateInfo& operator=( VkPipelineShaderStageCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineShaderStageCreateInfo ) ); + return *this; + } + PipelineShaderStageCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineShaderStageCreateInfo& setFlags( PipelineShaderStageCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineShaderStageCreateInfo& setStage( ShaderStageFlagBits stage_ ) + { + stage = stage_; + return *this; + } + + PipelineShaderStageCreateInfo& setModule( ShaderModule module_ ) + { + module = module_; + return *this; + } + + PipelineShaderStageCreateInfo& setPName( const char* pName_ ) + { + pName = pName_; + return *this; + } + + PipelineShaderStageCreateInfo& setPSpecializationInfo( const SpecializationInfo* pSpecializationInfo_ ) + { + pSpecializationInfo = pSpecializationInfo_; + return *this; + } + + operator VkPipelineShaderStageCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineShaderStageCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineShaderStageCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( stage == rhs.stage ) + && ( module == rhs.module ) + && ( pName == rhs.pName ) + && ( pSpecializationInfo == rhs.pSpecializationInfo ); + } + + bool operator!=( PipelineShaderStageCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineShaderStageCreateInfo; + + public: + const void* pNext = nullptr; + PipelineShaderStageCreateFlags flags; + ShaderStageFlagBits stage; + ShaderModule module; + const char* pName; + const SpecializationInfo* pSpecializationInfo; + }; + static_assert( sizeof( PipelineShaderStageCreateInfo ) == sizeof( VkPipelineShaderStageCreateInfo ), "struct and wrapper have different size!" ); + + struct PushConstantRange + { + PushConstantRange( ShaderStageFlags stageFlags_ = ShaderStageFlags(), + uint32_t offset_ = 0, + uint32_t size_ = 0 ) + : stageFlags( stageFlags_ ) + , offset( offset_ ) + , size( size_ ) + { + } + + PushConstantRange( VkPushConstantRange const & rhs ) + { + memcpy( this, &rhs, sizeof( PushConstantRange ) ); + } + + PushConstantRange& operator=( VkPushConstantRange const & rhs ) + { + memcpy( this, &rhs, sizeof( PushConstantRange ) ); + return *this; + } + PushConstantRange& setStageFlags( ShaderStageFlags stageFlags_ ) + { + stageFlags = stageFlags_; + return *this; + } + + PushConstantRange& setOffset( uint32_t offset_ ) + { + offset = offset_; + return *this; + } + + PushConstantRange& setSize( uint32_t size_ ) + { + size = size_; + return *this; + } + + operator VkPushConstantRange const&() const + { + return *reinterpret_cast(this); + } + + operator VkPushConstantRange &() + { + return *reinterpret_cast(this); + } + + bool operator==( PushConstantRange const& rhs ) const + { + return ( stageFlags == rhs.stageFlags ) + && ( offset == rhs.offset ) + && ( size == rhs.size ); + } + + bool operator!=( PushConstantRange const& rhs ) const + { + return !operator==( rhs ); + } + + ShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; + }; + static_assert( sizeof( PushConstantRange ) == sizeof( VkPushConstantRange ), "struct and wrapper have different size!" ); + + struct PipelineLayoutCreateInfo + { + PipelineLayoutCreateInfo( PipelineLayoutCreateFlags flags_ = PipelineLayoutCreateFlags(), + uint32_t setLayoutCount_ = 0, + const DescriptorSetLayout* pSetLayouts_ = nullptr, + uint32_t pushConstantRangeCount_ = 0, + const PushConstantRange* pPushConstantRanges_ = nullptr ) + : flags( flags_ ) + , setLayoutCount( setLayoutCount_ ) + , pSetLayouts( pSetLayouts_ ) + , pushConstantRangeCount( pushConstantRangeCount_ ) + , pPushConstantRanges( pPushConstantRanges_ ) + { + } + + PipelineLayoutCreateInfo( VkPipelineLayoutCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineLayoutCreateInfo ) ); + } + + PipelineLayoutCreateInfo& operator=( VkPipelineLayoutCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineLayoutCreateInfo ) ); + return *this; + } + PipelineLayoutCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineLayoutCreateInfo& setFlags( PipelineLayoutCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineLayoutCreateInfo& setSetLayoutCount( uint32_t setLayoutCount_ ) + { + setLayoutCount = setLayoutCount_; + return *this; + } + + PipelineLayoutCreateInfo& setPSetLayouts( const DescriptorSetLayout* pSetLayouts_ ) + { + pSetLayouts = pSetLayouts_; + return *this; + } + + PipelineLayoutCreateInfo& setPushConstantRangeCount( uint32_t pushConstantRangeCount_ ) + { + pushConstantRangeCount = pushConstantRangeCount_; + return *this; + } + + PipelineLayoutCreateInfo& setPPushConstantRanges( const PushConstantRange* pPushConstantRanges_ ) + { + pPushConstantRanges = pPushConstantRanges_; + return *this; + } + + operator VkPipelineLayoutCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineLayoutCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineLayoutCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( setLayoutCount == rhs.setLayoutCount ) + && ( pSetLayouts == rhs.pSetLayouts ) + && ( pushConstantRangeCount == rhs.pushConstantRangeCount ) + && ( pPushConstantRanges == rhs.pPushConstantRanges ); + } + + bool operator!=( PipelineLayoutCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineLayoutCreateInfo; + + public: + const void* pNext = nullptr; + PipelineLayoutCreateFlags flags; + uint32_t setLayoutCount; + const DescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const PushConstantRange* pPushConstantRanges; + }; + static_assert( sizeof( PipelineLayoutCreateInfo ) == sizeof( VkPipelineLayoutCreateInfo ), "struct and wrapper have different size!" ); + + struct ShaderStatisticsInfoAMD + { + operator VkShaderStatisticsInfoAMD const&() const + { + return *reinterpret_cast(this); + } + + operator VkShaderStatisticsInfoAMD &() + { + return *reinterpret_cast(this); + } + + bool operator==( ShaderStatisticsInfoAMD const& rhs ) const + { + return ( shaderStageMask == rhs.shaderStageMask ) + && ( resourceUsage == rhs.resourceUsage ) + && ( numPhysicalVgprs == rhs.numPhysicalVgprs ) + && ( numPhysicalSgprs == rhs.numPhysicalSgprs ) + && ( numAvailableVgprs == rhs.numAvailableVgprs ) + && ( numAvailableSgprs == rhs.numAvailableSgprs ) + && ( memcmp( computeWorkGroupSize, rhs.computeWorkGroupSize, 3 * sizeof( uint32_t ) ) == 0 ); + } + + bool operator!=( ShaderStatisticsInfoAMD const& rhs ) const + { + return !operator==( rhs ); + } + + ShaderStageFlags shaderStageMask; + ShaderResourceUsageAMD resourceUsage; + uint32_t numPhysicalVgprs; + uint32_t numPhysicalSgprs; + uint32_t numAvailableVgprs; + uint32_t numAvailableSgprs; + uint32_t computeWorkGroupSize[3]; + }; + static_assert( sizeof( ShaderStatisticsInfoAMD ) == sizeof( VkShaderStatisticsInfoAMD ), "struct and wrapper have different size!" ); + + enum class ImageUsageFlagBits + { + eTransferSrc = VK_IMAGE_USAGE_TRANSFER_SRC_BIT, + eTransferDst = VK_IMAGE_USAGE_TRANSFER_DST_BIT, + eSampled = VK_IMAGE_USAGE_SAMPLED_BIT, + eStorage = VK_IMAGE_USAGE_STORAGE_BIT, + eColorAttachment = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, + eDepthStencilAttachment = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, + eTransientAttachment = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT, + eInputAttachment = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, + eShadingRateImageNV = VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV + }; + + using ImageUsageFlags = Flags; + + VULKAN_HPP_INLINE ImageUsageFlags operator|( ImageUsageFlagBits bit0, ImageUsageFlagBits bit1 ) + { + return ImageUsageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ImageUsageFlags operator~( ImageUsageFlagBits bits ) + { + return ~( ImageUsageFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ImageUsageFlagBits::eTransferSrc) | VkFlags(ImageUsageFlagBits::eTransferDst) | VkFlags(ImageUsageFlagBits::eSampled) | VkFlags(ImageUsageFlagBits::eStorage) | VkFlags(ImageUsageFlagBits::eColorAttachment) | VkFlags(ImageUsageFlagBits::eDepthStencilAttachment) | VkFlags(ImageUsageFlagBits::eTransientAttachment) | VkFlags(ImageUsageFlagBits::eInputAttachment) | VkFlags(ImageUsageFlagBits::eShadingRateImageNV) + }; + }; + + struct SharedPresentSurfaceCapabilitiesKHR + { + operator VkSharedPresentSurfaceCapabilitiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSharedPresentSurfaceCapabilitiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SharedPresentSurfaceCapabilitiesKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sharedPresentSupportedUsageFlags == rhs.sharedPresentSupportedUsageFlags ); + } + + bool operator!=( SharedPresentSurfaceCapabilitiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSharedPresentSurfaceCapabilitiesKHR; + + public: + void* pNext = nullptr; + ImageUsageFlags sharedPresentSupportedUsageFlags; + }; + static_assert( sizeof( SharedPresentSurfaceCapabilitiesKHR ) == sizeof( VkSharedPresentSurfaceCapabilitiesKHR ), "struct and wrapper have different size!" ); + + struct ImageViewUsageCreateInfo + { + ImageViewUsageCreateInfo( ImageUsageFlags usage_ = ImageUsageFlags() ) + : usage( usage_ ) + { + } + + ImageViewUsageCreateInfo( VkImageViewUsageCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageViewUsageCreateInfo ) ); + } + + ImageViewUsageCreateInfo& operator=( VkImageViewUsageCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageViewUsageCreateInfo ) ); + return *this; + } + ImageViewUsageCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImageViewUsageCreateInfo& setUsage( ImageUsageFlags usage_ ) + { + usage = usage_; + return *this; + } + + operator VkImageViewUsageCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageViewUsageCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageViewUsageCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( usage == rhs.usage ); + } + + bool operator!=( ImageViewUsageCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageViewUsageCreateInfo; + + public: + const void* pNext = nullptr; + ImageUsageFlags usage; + }; + static_assert( sizeof( ImageViewUsageCreateInfo ) == sizeof( VkImageViewUsageCreateInfo ), "struct and wrapper have different size!" ); + + using ImageViewUsageCreateInfoKHR = ImageViewUsageCreateInfo; + + enum class ImageCreateFlagBits + { + eSparseBinding = VK_IMAGE_CREATE_SPARSE_BINDING_BIT, + eSparseResidency = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, + eSparseAliased = VK_IMAGE_CREATE_SPARSE_ALIASED_BIT, + eMutableFormat = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, + eCubeCompatible = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, + eAlias = VK_IMAGE_CREATE_ALIAS_BIT, + eAliasKHR = VK_IMAGE_CREATE_ALIAS_BIT, + eSplitInstanceBindRegions = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + eSplitInstanceBindRegionsKHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + e2DArrayCompatible = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + e2DArrayCompatibleKHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + eBlockTexelViewCompatible = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + eBlockTexelViewCompatibleKHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + eExtendedUsage = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + eExtendedUsageKHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + eProtected = VK_IMAGE_CREATE_PROTECTED_BIT, + eDisjoint = VK_IMAGE_CREATE_DISJOINT_BIT, + eDisjointKHR = VK_IMAGE_CREATE_DISJOINT_BIT, + eCornerSampledNV = VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV, + eSampleLocationsCompatibleDepthEXT = VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT + }; + + using ImageCreateFlags = Flags; + + VULKAN_HPP_INLINE ImageCreateFlags operator|( ImageCreateFlagBits bit0, ImageCreateFlagBits bit1 ) + { + return ImageCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ImageCreateFlags operator~( ImageCreateFlagBits bits ) + { + return ~( ImageCreateFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ImageCreateFlagBits::eSparseBinding) | VkFlags(ImageCreateFlagBits::eSparseResidency) | VkFlags(ImageCreateFlagBits::eSparseAliased) | VkFlags(ImageCreateFlagBits::eMutableFormat) | VkFlags(ImageCreateFlagBits::eCubeCompatible) | VkFlags(ImageCreateFlagBits::eAlias) | VkFlags(ImageCreateFlagBits::eSplitInstanceBindRegions) | VkFlags(ImageCreateFlagBits::e2DArrayCompatible) | VkFlags(ImageCreateFlagBits::eBlockTexelViewCompatible) | VkFlags(ImageCreateFlagBits::eExtendedUsage) | VkFlags(ImageCreateFlagBits::eProtected) | VkFlags(ImageCreateFlagBits::eDisjoint) | VkFlags(ImageCreateFlagBits::eCornerSampledNV) | VkFlags(ImageCreateFlagBits::eSampleLocationsCompatibleDepthEXT) + }; + }; + + struct PhysicalDeviceImageFormatInfo2 + { + PhysicalDeviceImageFormatInfo2( Format format_ = Format::eUndefined, + ImageType type_ = ImageType::e1D, + ImageTiling tiling_ = ImageTiling::eOptimal, + ImageUsageFlags usage_ = ImageUsageFlags(), + ImageCreateFlags flags_ = ImageCreateFlags() ) + : format( format_ ) + , type( type_ ) + , tiling( tiling_ ) + , usage( usage_ ) + , flags( flags_ ) + { + } + + PhysicalDeviceImageFormatInfo2( VkPhysicalDeviceImageFormatInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceImageFormatInfo2 ) ); + } + + PhysicalDeviceImageFormatInfo2& operator=( VkPhysicalDeviceImageFormatInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceImageFormatInfo2 ) ); + return *this; + } + PhysicalDeviceImageFormatInfo2& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceImageFormatInfo2& setFormat( Format format_ ) + { + format = format_; + return *this; + } + + PhysicalDeviceImageFormatInfo2& setType( ImageType type_ ) + { + type = type_; + return *this; + } + + PhysicalDeviceImageFormatInfo2& setTiling( ImageTiling tiling_ ) + { + tiling = tiling_; + return *this; + } + + PhysicalDeviceImageFormatInfo2& setUsage( ImageUsageFlags usage_ ) + { + usage = usage_; + return *this; + } + + PhysicalDeviceImageFormatInfo2& setFlags( ImageCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + operator VkPhysicalDeviceImageFormatInfo2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceImageFormatInfo2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceImageFormatInfo2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( format == rhs.format ) + && ( type == rhs.type ) + && ( tiling == rhs.tiling ) + && ( usage == rhs.usage ) + && ( flags == rhs.flags ); + } + + bool operator!=( PhysicalDeviceImageFormatInfo2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceImageFormatInfo2; + + public: + const void* pNext = nullptr; + Format format; + ImageType type; + ImageTiling tiling; + ImageUsageFlags usage; + ImageCreateFlags flags; + }; + static_assert( sizeof( PhysicalDeviceImageFormatInfo2 ) == sizeof( VkPhysicalDeviceImageFormatInfo2 ), "struct and wrapper have different size!" ); + + using PhysicalDeviceImageFormatInfo2KHR = PhysicalDeviceImageFormatInfo2; + + enum class PipelineCreateFlagBits + { + eDisableOptimization = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, + eAllowDerivatives = VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT, + eDerivative = VK_PIPELINE_CREATE_DERIVATIVE_BIT, + eViewIndexFromDeviceIndex = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + eViewIndexFromDeviceIndexKHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + eDispatchBase = VK_PIPELINE_CREATE_DISPATCH_BASE, + eDispatchBaseKHR = VK_PIPELINE_CREATE_DISPATCH_BASE, + eDeferCompileNVX = VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NVX + }; + + using PipelineCreateFlags = Flags; + + VULKAN_HPP_INLINE PipelineCreateFlags operator|( PipelineCreateFlagBits bit0, PipelineCreateFlagBits bit1 ) + { + return PipelineCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE PipelineCreateFlags operator~( PipelineCreateFlagBits bits ) + { + return ~( PipelineCreateFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(PipelineCreateFlagBits::eDisableOptimization) | VkFlags(PipelineCreateFlagBits::eAllowDerivatives) | VkFlags(PipelineCreateFlagBits::eDerivative) | VkFlags(PipelineCreateFlagBits::eViewIndexFromDeviceIndex) | VkFlags(PipelineCreateFlagBits::eDispatchBase) | VkFlags(PipelineCreateFlagBits::eDeferCompileNVX) + }; + }; + + struct ComputePipelineCreateInfo + { + ComputePipelineCreateInfo( PipelineCreateFlags flags_ = PipelineCreateFlags(), + PipelineShaderStageCreateInfo stage_ = PipelineShaderStageCreateInfo(), + PipelineLayout layout_ = PipelineLayout(), + Pipeline basePipelineHandle_ = Pipeline(), + int32_t basePipelineIndex_ = 0 ) + : flags( flags_ ) + , stage( stage_ ) + , layout( layout_ ) + , basePipelineHandle( basePipelineHandle_ ) + , basePipelineIndex( basePipelineIndex_ ) + { + } + + ComputePipelineCreateInfo( VkComputePipelineCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ComputePipelineCreateInfo ) ); + } + + ComputePipelineCreateInfo& operator=( VkComputePipelineCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ComputePipelineCreateInfo ) ); + return *this; + } + ComputePipelineCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ComputePipelineCreateInfo& setFlags( PipelineCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + ComputePipelineCreateInfo& setStage( PipelineShaderStageCreateInfo stage_ ) + { + stage = stage_; + return *this; + } + + ComputePipelineCreateInfo& setLayout( PipelineLayout layout_ ) + { + layout = layout_; + return *this; + } + + ComputePipelineCreateInfo& setBasePipelineHandle( Pipeline basePipelineHandle_ ) + { + basePipelineHandle = basePipelineHandle_; + return *this; + } + + ComputePipelineCreateInfo& setBasePipelineIndex( int32_t basePipelineIndex_ ) + { + basePipelineIndex = basePipelineIndex_; + return *this; + } + + operator VkComputePipelineCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkComputePipelineCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ComputePipelineCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( stage == rhs.stage ) + && ( layout == rhs.layout ) + && ( basePipelineHandle == rhs.basePipelineHandle ) + && ( basePipelineIndex == rhs.basePipelineIndex ); + } + + bool operator!=( ComputePipelineCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eComputePipelineCreateInfo; + + public: + const void* pNext = nullptr; + PipelineCreateFlags flags; + PipelineShaderStageCreateInfo stage; + PipelineLayout layout; + Pipeline basePipelineHandle; + int32_t basePipelineIndex; + }; + static_assert( sizeof( ComputePipelineCreateInfo ) == sizeof( VkComputePipelineCreateInfo ), "struct and wrapper have different size!" ); + + struct RaytracingPipelineCreateInfoNVX + { + RaytracingPipelineCreateInfoNVX( PipelineCreateFlags flags_ = PipelineCreateFlags(), + uint32_t stageCount_ = 0, + const PipelineShaderStageCreateInfo* pStages_ = nullptr, + const uint32_t* pGroupNumbers_ = nullptr, + uint32_t maxRecursionDepth_ = 0, + PipelineLayout layout_ = PipelineLayout(), + Pipeline basePipelineHandle_ = Pipeline(), + int32_t basePipelineIndex_ = 0 ) + : flags( flags_ ) + , stageCount( stageCount_ ) + , pStages( pStages_ ) + , pGroupNumbers( pGroupNumbers_ ) + , maxRecursionDepth( maxRecursionDepth_ ) + , layout( layout_ ) + , basePipelineHandle( basePipelineHandle_ ) + , basePipelineIndex( basePipelineIndex_ ) + { + } + + RaytracingPipelineCreateInfoNVX( VkRaytracingPipelineCreateInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( RaytracingPipelineCreateInfoNVX ) ); + } + + RaytracingPipelineCreateInfoNVX& operator=( VkRaytracingPipelineCreateInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( RaytracingPipelineCreateInfoNVX ) ); + return *this; + } + RaytracingPipelineCreateInfoNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + RaytracingPipelineCreateInfoNVX& setFlags( PipelineCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + RaytracingPipelineCreateInfoNVX& setStageCount( uint32_t stageCount_ ) + { + stageCount = stageCount_; + return *this; + } + + RaytracingPipelineCreateInfoNVX& setPStages( const PipelineShaderStageCreateInfo* pStages_ ) + { + pStages = pStages_; + return *this; + } + + RaytracingPipelineCreateInfoNVX& setPGroupNumbers( const uint32_t* pGroupNumbers_ ) + { + pGroupNumbers = pGroupNumbers_; + return *this; + } + + RaytracingPipelineCreateInfoNVX& setMaxRecursionDepth( uint32_t maxRecursionDepth_ ) + { + maxRecursionDepth = maxRecursionDepth_; + return *this; + } + + RaytracingPipelineCreateInfoNVX& setLayout( PipelineLayout layout_ ) + { + layout = layout_; + return *this; + } + + RaytracingPipelineCreateInfoNVX& setBasePipelineHandle( Pipeline basePipelineHandle_ ) + { + basePipelineHandle = basePipelineHandle_; + return *this; + } + + RaytracingPipelineCreateInfoNVX& setBasePipelineIndex( int32_t basePipelineIndex_ ) + { + basePipelineIndex = basePipelineIndex_; + return *this; + } + + operator VkRaytracingPipelineCreateInfoNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkRaytracingPipelineCreateInfoNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( RaytracingPipelineCreateInfoNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( stageCount == rhs.stageCount ) + && ( pStages == rhs.pStages ) + && ( pGroupNumbers == rhs.pGroupNumbers ) + && ( maxRecursionDepth == rhs.maxRecursionDepth ) + && ( layout == rhs.layout ) + && ( basePipelineHandle == rhs.basePipelineHandle ) + && ( basePipelineIndex == rhs.basePipelineIndex ); + } + + bool operator!=( RaytracingPipelineCreateInfoNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eRaytracingPipelineCreateInfoNVX; + + public: + const void* pNext = nullptr; + PipelineCreateFlags flags; + uint32_t stageCount; + const PipelineShaderStageCreateInfo* pStages; + const uint32_t* pGroupNumbers; + uint32_t maxRecursionDepth; + PipelineLayout layout; + Pipeline basePipelineHandle; + int32_t basePipelineIndex; + }; + static_assert( sizeof( RaytracingPipelineCreateInfoNVX ) == sizeof( VkRaytracingPipelineCreateInfoNVX ), "struct and wrapper have different size!" ); + + enum class ColorComponentFlagBits + { + eR = VK_COLOR_COMPONENT_R_BIT, + eG = VK_COLOR_COMPONENT_G_BIT, + eB = VK_COLOR_COMPONENT_B_BIT, + eA = VK_COLOR_COMPONENT_A_BIT + }; + + using ColorComponentFlags = Flags; + + VULKAN_HPP_INLINE ColorComponentFlags operator|( ColorComponentFlagBits bit0, ColorComponentFlagBits bit1 ) + { + return ColorComponentFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ColorComponentFlags operator~( ColorComponentFlagBits bits ) + { + return ~( ColorComponentFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ColorComponentFlagBits::eR) | VkFlags(ColorComponentFlagBits::eG) | VkFlags(ColorComponentFlagBits::eB) | VkFlags(ColorComponentFlagBits::eA) + }; + }; + + struct PipelineColorBlendAttachmentState + { + PipelineColorBlendAttachmentState( Bool32 blendEnable_ = 0, + BlendFactor srcColorBlendFactor_ = BlendFactor::eZero, + BlendFactor dstColorBlendFactor_ = BlendFactor::eZero, + BlendOp colorBlendOp_ = BlendOp::eAdd, + BlendFactor srcAlphaBlendFactor_ = BlendFactor::eZero, + BlendFactor dstAlphaBlendFactor_ = BlendFactor::eZero, + BlendOp alphaBlendOp_ = BlendOp::eAdd, + ColorComponentFlags colorWriteMask_ = ColorComponentFlags() ) + : blendEnable( blendEnable_ ) + , srcColorBlendFactor( srcColorBlendFactor_ ) + , dstColorBlendFactor( dstColorBlendFactor_ ) + , colorBlendOp( colorBlendOp_ ) + , srcAlphaBlendFactor( srcAlphaBlendFactor_ ) + , dstAlphaBlendFactor( dstAlphaBlendFactor_ ) + , alphaBlendOp( alphaBlendOp_ ) + , colorWriteMask( colorWriteMask_ ) + { + } + + PipelineColorBlendAttachmentState( VkPipelineColorBlendAttachmentState const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineColorBlendAttachmentState ) ); + } + + PipelineColorBlendAttachmentState& operator=( VkPipelineColorBlendAttachmentState const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineColorBlendAttachmentState ) ); + return *this; + } + PipelineColorBlendAttachmentState& setBlendEnable( Bool32 blendEnable_ ) + { + blendEnable = blendEnable_; + return *this; + } + + PipelineColorBlendAttachmentState& setSrcColorBlendFactor( BlendFactor srcColorBlendFactor_ ) + { + srcColorBlendFactor = srcColorBlendFactor_; + return *this; + } + + PipelineColorBlendAttachmentState& setDstColorBlendFactor( BlendFactor dstColorBlendFactor_ ) + { + dstColorBlendFactor = dstColorBlendFactor_; + return *this; + } + + PipelineColorBlendAttachmentState& setColorBlendOp( BlendOp colorBlendOp_ ) + { + colorBlendOp = colorBlendOp_; + return *this; + } + + PipelineColorBlendAttachmentState& setSrcAlphaBlendFactor( BlendFactor srcAlphaBlendFactor_ ) + { + srcAlphaBlendFactor = srcAlphaBlendFactor_; + return *this; + } + + PipelineColorBlendAttachmentState& setDstAlphaBlendFactor( BlendFactor dstAlphaBlendFactor_ ) + { + dstAlphaBlendFactor = dstAlphaBlendFactor_; + return *this; + } + + PipelineColorBlendAttachmentState& setAlphaBlendOp( BlendOp alphaBlendOp_ ) + { + alphaBlendOp = alphaBlendOp_; + return *this; + } + + PipelineColorBlendAttachmentState& setColorWriteMask( ColorComponentFlags colorWriteMask_ ) + { + colorWriteMask = colorWriteMask_; + return *this; + } + + operator VkPipelineColorBlendAttachmentState const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineColorBlendAttachmentState &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineColorBlendAttachmentState const& rhs ) const + { + return ( blendEnable == rhs.blendEnable ) + && ( srcColorBlendFactor == rhs.srcColorBlendFactor ) + && ( dstColorBlendFactor == rhs.dstColorBlendFactor ) + && ( colorBlendOp == rhs.colorBlendOp ) + && ( srcAlphaBlendFactor == rhs.srcAlphaBlendFactor ) + && ( dstAlphaBlendFactor == rhs.dstAlphaBlendFactor ) + && ( alphaBlendOp == rhs.alphaBlendOp ) + && ( colorWriteMask == rhs.colorWriteMask ); + } + + bool operator!=( PipelineColorBlendAttachmentState const& rhs ) const + { + return !operator==( rhs ); + } + + Bool32 blendEnable; + BlendFactor srcColorBlendFactor; + BlendFactor dstColorBlendFactor; + BlendOp colorBlendOp; + BlendFactor srcAlphaBlendFactor; + BlendFactor dstAlphaBlendFactor; + BlendOp alphaBlendOp; + ColorComponentFlags colorWriteMask; + }; + static_assert( sizeof( PipelineColorBlendAttachmentState ) == sizeof( VkPipelineColorBlendAttachmentState ), "struct and wrapper have different size!" ); + + struct PipelineColorBlendStateCreateInfo + { + PipelineColorBlendStateCreateInfo( PipelineColorBlendStateCreateFlags flags_ = PipelineColorBlendStateCreateFlags(), + Bool32 logicOpEnable_ = 0, + LogicOp logicOp_ = LogicOp::eClear, + uint32_t attachmentCount_ = 0, + const PipelineColorBlendAttachmentState* pAttachments_ = nullptr, + std::array const& blendConstants_ = { { 0, 0, 0, 0 } } ) + : flags( flags_ ) + , logicOpEnable( logicOpEnable_ ) + , logicOp( logicOp_ ) + , attachmentCount( attachmentCount_ ) + , pAttachments( pAttachments_ ) + { + memcpy( &blendConstants, blendConstants_.data(), 4 * sizeof( float ) ); + } + + PipelineColorBlendStateCreateInfo( VkPipelineColorBlendStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineColorBlendStateCreateInfo ) ); + } + + PipelineColorBlendStateCreateInfo& operator=( VkPipelineColorBlendStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineColorBlendStateCreateInfo ) ); + return *this; + } + PipelineColorBlendStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineColorBlendStateCreateInfo& setFlags( PipelineColorBlendStateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineColorBlendStateCreateInfo& setLogicOpEnable( Bool32 logicOpEnable_ ) + { + logicOpEnable = logicOpEnable_; + return *this; + } + + PipelineColorBlendStateCreateInfo& setLogicOp( LogicOp logicOp_ ) + { + logicOp = logicOp_; + return *this; + } + + PipelineColorBlendStateCreateInfo& setAttachmentCount( uint32_t attachmentCount_ ) + { + attachmentCount = attachmentCount_; + return *this; + } + + PipelineColorBlendStateCreateInfo& setPAttachments( const PipelineColorBlendAttachmentState* pAttachments_ ) + { + pAttachments = pAttachments_; + return *this; + } + + PipelineColorBlendStateCreateInfo& setBlendConstants( std::array blendConstants_ ) + { + memcpy( &blendConstants, blendConstants_.data(), 4 * sizeof( float ) ); + return *this; + } + + operator VkPipelineColorBlendStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineColorBlendStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineColorBlendStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( logicOpEnable == rhs.logicOpEnable ) + && ( logicOp == rhs.logicOp ) + && ( attachmentCount == rhs.attachmentCount ) + && ( pAttachments == rhs.pAttachments ) + && ( memcmp( blendConstants, rhs.blendConstants, 4 * sizeof( float ) ) == 0 ); + } + + bool operator!=( PipelineColorBlendStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineColorBlendStateCreateInfo; + + public: + const void* pNext = nullptr; + PipelineColorBlendStateCreateFlags flags; + Bool32 logicOpEnable; + LogicOp logicOp; + uint32_t attachmentCount; + const PipelineColorBlendAttachmentState* pAttachments; + float blendConstants[4]; + }; + static_assert( sizeof( PipelineColorBlendStateCreateInfo ) == sizeof( VkPipelineColorBlendStateCreateInfo ), "struct and wrapper have different size!" ); + + enum class FenceCreateFlagBits + { + eSignaled = VK_FENCE_CREATE_SIGNALED_BIT + }; + + using FenceCreateFlags = Flags; + + VULKAN_HPP_INLINE FenceCreateFlags operator|( FenceCreateFlagBits bit0, FenceCreateFlagBits bit1 ) + { + return FenceCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE FenceCreateFlags operator~( FenceCreateFlagBits bits ) + { + return ~( FenceCreateFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(FenceCreateFlagBits::eSignaled) + }; + }; + + struct FenceCreateInfo + { + FenceCreateInfo( FenceCreateFlags flags_ = FenceCreateFlags() ) + : flags( flags_ ) + { + } + + FenceCreateInfo( VkFenceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( FenceCreateInfo ) ); + } + + FenceCreateInfo& operator=( VkFenceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( FenceCreateInfo ) ); + return *this; + } + FenceCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + FenceCreateInfo& setFlags( FenceCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + operator VkFenceCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkFenceCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( FenceCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ); + } + + bool operator!=( FenceCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eFenceCreateInfo; + + public: + const void* pNext = nullptr; + FenceCreateFlags flags; + }; + static_assert( sizeof( FenceCreateInfo ) == sizeof( VkFenceCreateInfo ), "struct and wrapper have different size!" ); + + enum class FormatFeatureFlagBits + { + eSampledImage = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT, + eStorageImage = VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT, + eStorageImageAtomic = VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT, + eUniformTexelBuffer = VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT, + eStorageTexelBuffer = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT, + eStorageTexelBufferAtomic = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT, + eVertexBuffer = VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT, + eColorAttachment = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, + eColorAttachmentBlend = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT, + eDepthStencilAttachment = VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT, + eBlitSrc = VK_FORMAT_FEATURE_BLIT_SRC_BIT, + eBlitDst = VK_FORMAT_FEATURE_BLIT_DST_BIT, + eSampledImageFilterLinear = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT, + eTransferSrc = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + eTransferSrcKHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + eTransferDst = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + eTransferDstKHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + eMidpointChromaSamples = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + eMidpointChromaSamplesKHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + eSampledImageYcbcrConversionLinearFilter = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + eSampledImageYcbcrConversionLinearFilterKHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + eSampledImageYcbcrConversionSeparateReconstructionFilter = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + eSampledImageYcbcrConversionSeparateReconstructionFilterKHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + eSampledImageYcbcrConversionChromaReconstructionExplicit = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + eSampledImageYcbcrConversionChromaReconstructionExplicitKHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + eSampledImageYcbcrConversionChromaReconstructionExplicitForceable = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + eSampledImageYcbcrConversionChromaReconstructionExplicitForceableKHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + eDisjoint = VK_FORMAT_FEATURE_DISJOINT_BIT, + eDisjointKHR = VK_FORMAT_FEATURE_DISJOINT_BIT, + eCositedChromaSamples = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + eCositedChromaSamplesKHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + eSampledImageFilterCubicIMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, + eSampledImageFilterMinmaxEXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT + }; + + using FormatFeatureFlags = Flags; + + VULKAN_HPP_INLINE FormatFeatureFlags operator|( FormatFeatureFlagBits bit0, FormatFeatureFlagBits bit1 ) + { + return FormatFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE FormatFeatureFlags operator~( FormatFeatureFlagBits bits ) + { + return ~( FormatFeatureFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(FormatFeatureFlagBits::eSampledImage) | VkFlags(FormatFeatureFlagBits::eStorageImage) | VkFlags(FormatFeatureFlagBits::eStorageImageAtomic) | VkFlags(FormatFeatureFlagBits::eUniformTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBufferAtomic) | VkFlags(FormatFeatureFlagBits::eVertexBuffer) | VkFlags(FormatFeatureFlagBits::eColorAttachment) | VkFlags(FormatFeatureFlagBits::eColorAttachmentBlend) | VkFlags(FormatFeatureFlagBits::eDepthStencilAttachment) | VkFlags(FormatFeatureFlagBits::eBlitSrc) | VkFlags(FormatFeatureFlagBits::eBlitDst) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterLinear) | VkFlags(FormatFeatureFlagBits::eTransferSrc) | VkFlags(FormatFeatureFlagBits::eTransferDst) | VkFlags(FormatFeatureFlagBits::eMidpointChromaSamples) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionLinearFilter) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionSeparateReconstructionFilter) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicit) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable) | VkFlags(FormatFeatureFlagBits::eDisjoint) | VkFlags(FormatFeatureFlagBits::eCositedChromaSamples) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterCubicIMG) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterMinmaxEXT) + }; + }; + + struct FormatProperties + { + operator VkFormatProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkFormatProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( FormatProperties const& rhs ) const + { + return ( linearTilingFeatures == rhs.linearTilingFeatures ) + && ( optimalTilingFeatures == rhs.optimalTilingFeatures ) + && ( bufferFeatures == rhs.bufferFeatures ); + } + + bool operator!=( FormatProperties const& rhs ) const + { + return !operator==( rhs ); + } + + FormatFeatureFlags linearTilingFeatures; + FormatFeatureFlags optimalTilingFeatures; + FormatFeatureFlags bufferFeatures; + }; + static_assert( sizeof( FormatProperties ) == sizeof( VkFormatProperties ), "struct and wrapper have different size!" ); + + struct FormatProperties2 + { + operator VkFormatProperties2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkFormatProperties2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( FormatProperties2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( formatProperties == rhs.formatProperties ); + } + + bool operator!=( FormatProperties2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eFormatProperties2; + + public: + void* pNext = nullptr; + FormatProperties formatProperties; + }; + static_assert( sizeof( FormatProperties2 ) == sizeof( VkFormatProperties2 ), "struct and wrapper have different size!" ); + + using FormatProperties2KHR = FormatProperties2; + + enum class QueryControlFlagBits + { + ePrecise = VK_QUERY_CONTROL_PRECISE_BIT + }; + + using QueryControlFlags = Flags; + + VULKAN_HPP_INLINE QueryControlFlags operator|( QueryControlFlagBits bit0, QueryControlFlagBits bit1 ) + { + return QueryControlFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE QueryControlFlags operator~( QueryControlFlagBits bits ) + { + return ~( QueryControlFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(QueryControlFlagBits::ePrecise) + }; + }; + + enum class QueryResultFlagBits + { + e64 = VK_QUERY_RESULT_64_BIT, + eWait = VK_QUERY_RESULT_WAIT_BIT, + eWithAvailability = VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, + ePartial = VK_QUERY_RESULT_PARTIAL_BIT + }; + + using QueryResultFlags = Flags; + + VULKAN_HPP_INLINE QueryResultFlags operator|( QueryResultFlagBits bit0, QueryResultFlagBits bit1 ) + { + return QueryResultFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE QueryResultFlags operator~( QueryResultFlagBits bits ) + { + return ~( QueryResultFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(QueryResultFlagBits::e64) | VkFlags(QueryResultFlagBits::eWait) | VkFlags(QueryResultFlagBits::eWithAvailability) | VkFlags(QueryResultFlagBits::ePartial) + }; + }; + + enum class CommandBufferUsageFlagBits + { + eOneTimeSubmit = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + eRenderPassContinue = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, + eSimultaneousUse = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT + }; + + using CommandBufferUsageFlags = Flags; + + VULKAN_HPP_INLINE CommandBufferUsageFlags operator|( CommandBufferUsageFlagBits bit0, CommandBufferUsageFlagBits bit1 ) + { + return CommandBufferUsageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE CommandBufferUsageFlags operator~( CommandBufferUsageFlagBits bits ) + { + return ~( CommandBufferUsageFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(CommandBufferUsageFlagBits::eOneTimeSubmit) | VkFlags(CommandBufferUsageFlagBits::eRenderPassContinue) | VkFlags(CommandBufferUsageFlagBits::eSimultaneousUse) + }; + }; + + enum class QueryPipelineStatisticFlagBits + { + eInputAssemblyVertices = VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT, + eInputAssemblyPrimitives = VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT, + eVertexShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT, + eGeometryShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT, + eGeometryShaderPrimitives = VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT, + eClippingInvocations = VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT, + eClippingPrimitives = VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT, + eFragmentShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT, + eTessellationControlShaderPatches = VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT, + eTessellationEvaluationShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT, + eComputeShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT + }; + + using QueryPipelineStatisticFlags = Flags; + + VULKAN_HPP_INLINE QueryPipelineStatisticFlags operator|( QueryPipelineStatisticFlagBits bit0, QueryPipelineStatisticFlagBits bit1 ) + { + return QueryPipelineStatisticFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE QueryPipelineStatisticFlags operator~( QueryPipelineStatisticFlagBits bits ) + { + return ~( QueryPipelineStatisticFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(QueryPipelineStatisticFlagBits::eInputAssemblyVertices) | VkFlags(QueryPipelineStatisticFlagBits::eInputAssemblyPrimitives) | VkFlags(QueryPipelineStatisticFlagBits::eVertexShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eGeometryShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eGeometryShaderPrimitives) | VkFlags(QueryPipelineStatisticFlagBits::eClippingInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eClippingPrimitives) | VkFlags(QueryPipelineStatisticFlagBits::eFragmentShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches) | VkFlags(QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eComputeShaderInvocations) + }; + }; + + struct CommandBufferInheritanceInfo + { + CommandBufferInheritanceInfo( RenderPass renderPass_ = RenderPass(), + uint32_t subpass_ = 0, + Framebuffer framebuffer_ = Framebuffer(), + Bool32 occlusionQueryEnable_ = 0, + QueryControlFlags queryFlags_ = QueryControlFlags(), + QueryPipelineStatisticFlags pipelineStatistics_ = QueryPipelineStatisticFlags() ) + : renderPass( renderPass_ ) + , subpass( subpass_ ) + , framebuffer( framebuffer_ ) + , occlusionQueryEnable( occlusionQueryEnable_ ) + , queryFlags( queryFlags_ ) + , pipelineStatistics( pipelineStatistics_ ) + { + } + + CommandBufferInheritanceInfo( VkCommandBufferInheritanceInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandBufferInheritanceInfo ) ); + } + + CommandBufferInheritanceInfo& operator=( VkCommandBufferInheritanceInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandBufferInheritanceInfo ) ); + return *this; + } + CommandBufferInheritanceInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + CommandBufferInheritanceInfo& setRenderPass( RenderPass renderPass_ ) + { + renderPass = renderPass_; + return *this; + } + + CommandBufferInheritanceInfo& setSubpass( uint32_t subpass_ ) + { + subpass = subpass_; + return *this; + } + + CommandBufferInheritanceInfo& setFramebuffer( Framebuffer framebuffer_ ) + { + framebuffer = framebuffer_; + return *this; + } + + CommandBufferInheritanceInfo& setOcclusionQueryEnable( Bool32 occlusionQueryEnable_ ) + { + occlusionQueryEnable = occlusionQueryEnable_; + return *this; + } + + CommandBufferInheritanceInfo& setQueryFlags( QueryControlFlags queryFlags_ ) + { + queryFlags = queryFlags_; + return *this; + } + + CommandBufferInheritanceInfo& setPipelineStatistics( QueryPipelineStatisticFlags pipelineStatistics_ ) + { + pipelineStatistics = pipelineStatistics_; + return *this; + } + + operator VkCommandBufferInheritanceInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkCommandBufferInheritanceInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( CommandBufferInheritanceInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( renderPass == rhs.renderPass ) + && ( subpass == rhs.subpass ) + && ( framebuffer == rhs.framebuffer ) + && ( occlusionQueryEnable == rhs.occlusionQueryEnable ) + && ( queryFlags == rhs.queryFlags ) + && ( pipelineStatistics == rhs.pipelineStatistics ); + } + + bool operator!=( CommandBufferInheritanceInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eCommandBufferInheritanceInfo; + + public: + const void* pNext = nullptr; + RenderPass renderPass; + uint32_t subpass; + Framebuffer framebuffer; + Bool32 occlusionQueryEnable; + QueryControlFlags queryFlags; + QueryPipelineStatisticFlags pipelineStatistics; + }; + static_assert( sizeof( CommandBufferInheritanceInfo ) == sizeof( VkCommandBufferInheritanceInfo ), "struct and wrapper have different size!" ); + + struct CommandBufferBeginInfo + { + CommandBufferBeginInfo( CommandBufferUsageFlags flags_ = CommandBufferUsageFlags(), + const CommandBufferInheritanceInfo* pInheritanceInfo_ = nullptr ) + : flags( flags_ ) + , pInheritanceInfo( pInheritanceInfo_ ) + { + } + + CommandBufferBeginInfo( VkCommandBufferBeginInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandBufferBeginInfo ) ); + } + + CommandBufferBeginInfo& operator=( VkCommandBufferBeginInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandBufferBeginInfo ) ); + return *this; + } + CommandBufferBeginInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + CommandBufferBeginInfo& setFlags( CommandBufferUsageFlags flags_ ) + { + flags = flags_; + return *this; + } + + CommandBufferBeginInfo& setPInheritanceInfo( const CommandBufferInheritanceInfo* pInheritanceInfo_ ) + { + pInheritanceInfo = pInheritanceInfo_; + return *this; + } + + operator VkCommandBufferBeginInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkCommandBufferBeginInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( CommandBufferBeginInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pInheritanceInfo == rhs.pInheritanceInfo ); + } + + bool operator!=( CommandBufferBeginInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eCommandBufferBeginInfo; + + public: + const void* pNext = nullptr; + CommandBufferUsageFlags flags; + const CommandBufferInheritanceInfo* pInheritanceInfo; + }; + static_assert( sizeof( CommandBufferBeginInfo ) == sizeof( VkCommandBufferBeginInfo ), "struct and wrapper have different size!" ); + + struct QueryPoolCreateInfo + { + QueryPoolCreateInfo( QueryPoolCreateFlags flags_ = QueryPoolCreateFlags(), + QueryType queryType_ = QueryType::eOcclusion, + uint32_t queryCount_ = 0, + QueryPipelineStatisticFlags pipelineStatistics_ = QueryPipelineStatisticFlags() ) + : flags( flags_ ) + , queryType( queryType_ ) + , queryCount( queryCount_ ) + , pipelineStatistics( pipelineStatistics_ ) + { + } + + QueryPoolCreateInfo( VkQueryPoolCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( QueryPoolCreateInfo ) ); + } + + QueryPoolCreateInfo& operator=( VkQueryPoolCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( QueryPoolCreateInfo ) ); + return *this; + } + QueryPoolCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + QueryPoolCreateInfo& setFlags( QueryPoolCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + QueryPoolCreateInfo& setQueryType( QueryType queryType_ ) + { + queryType = queryType_; + return *this; + } + + QueryPoolCreateInfo& setQueryCount( uint32_t queryCount_ ) + { + queryCount = queryCount_; + return *this; + } + + QueryPoolCreateInfo& setPipelineStatistics( QueryPipelineStatisticFlags pipelineStatistics_ ) + { + pipelineStatistics = pipelineStatistics_; + return *this; + } + + operator VkQueryPoolCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkQueryPoolCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( QueryPoolCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queryType == rhs.queryType ) + && ( queryCount == rhs.queryCount ) + && ( pipelineStatistics == rhs.pipelineStatistics ); + } + + bool operator!=( QueryPoolCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eQueryPoolCreateInfo; + + public: + const void* pNext = nullptr; + QueryPoolCreateFlags flags; + QueryType queryType; + uint32_t queryCount; + QueryPipelineStatisticFlags pipelineStatistics; + }; + static_assert( sizeof( QueryPoolCreateInfo ) == sizeof( VkQueryPoolCreateInfo ), "struct and wrapper have different size!" ); + + enum class ImageAspectFlagBits + { + eColor = VK_IMAGE_ASPECT_COLOR_BIT, + eDepth = VK_IMAGE_ASPECT_DEPTH_BIT, + eStencil = VK_IMAGE_ASPECT_STENCIL_BIT, + eMetadata = VK_IMAGE_ASPECT_METADATA_BIT, + ePlane0 = VK_IMAGE_ASPECT_PLANE_0_BIT, + ePlane0KHR = VK_IMAGE_ASPECT_PLANE_0_BIT, + ePlane1 = VK_IMAGE_ASPECT_PLANE_1_BIT, + ePlane1KHR = VK_IMAGE_ASPECT_PLANE_1_BIT, + ePlane2 = VK_IMAGE_ASPECT_PLANE_2_BIT, + ePlane2KHR = VK_IMAGE_ASPECT_PLANE_2_BIT + }; + + using ImageAspectFlags = Flags; + + VULKAN_HPP_INLINE ImageAspectFlags operator|( ImageAspectFlagBits bit0, ImageAspectFlagBits bit1 ) + { + return ImageAspectFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ImageAspectFlags operator~( ImageAspectFlagBits bits ) + { + return ~( ImageAspectFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ImageAspectFlagBits::eColor) | VkFlags(ImageAspectFlagBits::eDepth) | VkFlags(ImageAspectFlagBits::eStencil) | VkFlags(ImageAspectFlagBits::eMetadata) | VkFlags(ImageAspectFlagBits::ePlane0) | VkFlags(ImageAspectFlagBits::ePlane1) | VkFlags(ImageAspectFlagBits::ePlane2) + }; + }; + + struct ImageSubresource + { + ImageSubresource( ImageAspectFlags aspectMask_ = ImageAspectFlags(), + uint32_t mipLevel_ = 0, + uint32_t arrayLayer_ = 0 ) + : aspectMask( aspectMask_ ) + , mipLevel( mipLevel_ ) + , arrayLayer( arrayLayer_ ) + { + } + + ImageSubresource( VkImageSubresource const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSubresource ) ); + } + + ImageSubresource& operator=( VkImageSubresource const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSubresource ) ); + return *this; + } + ImageSubresource& setAspectMask( ImageAspectFlags aspectMask_ ) + { + aspectMask = aspectMask_; + return *this; + } + + ImageSubresource& setMipLevel( uint32_t mipLevel_ ) + { + mipLevel = mipLevel_; + return *this; + } + + ImageSubresource& setArrayLayer( uint32_t arrayLayer_ ) + { + arrayLayer = arrayLayer_; + return *this; + } + + operator VkImageSubresource const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageSubresource &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageSubresource const& rhs ) const + { + return ( aspectMask == rhs.aspectMask ) + && ( mipLevel == rhs.mipLevel ) + && ( arrayLayer == rhs.arrayLayer ); + } + + bool operator!=( ImageSubresource const& rhs ) const + { + return !operator==( rhs ); + } + + ImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t arrayLayer; + }; + static_assert( sizeof( ImageSubresource ) == sizeof( VkImageSubresource ), "struct and wrapper have different size!" ); + + struct ImageSubresourceLayers + { + ImageSubresourceLayers( ImageAspectFlags aspectMask_ = ImageAspectFlags(), + uint32_t mipLevel_ = 0, + uint32_t baseArrayLayer_ = 0, + uint32_t layerCount_ = 0 ) + : aspectMask( aspectMask_ ) + , mipLevel( mipLevel_ ) + , baseArrayLayer( baseArrayLayer_ ) + , layerCount( layerCount_ ) + { + } + + ImageSubresourceLayers( VkImageSubresourceLayers const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSubresourceLayers ) ); + } + + ImageSubresourceLayers& operator=( VkImageSubresourceLayers const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSubresourceLayers ) ); + return *this; + } + ImageSubresourceLayers& setAspectMask( ImageAspectFlags aspectMask_ ) + { + aspectMask = aspectMask_; + return *this; + } + + ImageSubresourceLayers& setMipLevel( uint32_t mipLevel_ ) + { + mipLevel = mipLevel_; + return *this; + } + + ImageSubresourceLayers& setBaseArrayLayer( uint32_t baseArrayLayer_ ) + { + baseArrayLayer = baseArrayLayer_; + return *this; + } + + ImageSubresourceLayers& setLayerCount( uint32_t layerCount_ ) + { + layerCount = layerCount_; + return *this; + } + + operator VkImageSubresourceLayers const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageSubresourceLayers &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageSubresourceLayers const& rhs ) const + { + return ( aspectMask == rhs.aspectMask ) + && ( mipLevel == rhs.mipLevel ) + && ( baseArrayLayer == rhs.baseArrayLayer ) + && ( layerCount == rhs.layerCount ); + } + + bool operator!=( ImageSubresourceLayers const& rhs ) const + { + return !operator==( rhs ); + } + + ImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t baseArrayLayer; + uint32_t layerCount; + }; + static_assert( sizeof( ImageSubresourceLayers ) == sizeof( VkImageSubresourceLayers ), "struct and wrapper have different size!" ); + + struct ImageSubresourceRange + { + ImageSubresourceRange( ImageAspectFlags aspectMask_ = ImageAspectFlags(), + uint32_t baseMipLevel_ = 0, + uint32_t levelCount_ = 0, + uint32_t baseArrayLayer_ = 0, + uint32_t layerCount_ = 0 ) + : aspectMask( aspectMask_ ) + , baseMipLevel( baseMipLevel_ ) + , levelCount( levelCount_ ) + , baseArrayLayer( baseArrayLayer_ ) + , layerCount( layerCount_ ) + { + } + + ImageSubresourceRange( VkImageSubresourceRange const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSubresourceRange ) ); + } + + ImageSubresourceRange& operator=( VkImageSubresourceRange const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageSubresourceRange ) ); + return *this; + } + ImageSubresourceRange& setAspectMask( ImageAspectFlags aspectMask_ ) + { + aspectMask = aspectMask_; + return *this; + } + + ImageSubresourceRange& setBaseMipLevel( uint32_t baseMipLevel_ ) + { + baseMipLevel = baseMipLevel_; + return *this; + } + + ImageSubresourceRange& setLevelCount( uint32_t levelCount_ ) + { + levelCount = levelCount_; + return *this; + } + + ImageSubresourceRange& setBaseArrayLayer( uint32_t baseArrayLayer_ ) + { + baseArrayLayer = baseArrayLayer_; + return *this; + } + + ImageSubresourceRange& setLayerCount( uint32_t layerCount_ ) + { + layerCount = layerCount_; + return *this; + } + + operator VkImageSubresourceRange const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageSubresourceRange &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageSubresourceRange const& rhs ) const + { + return ( aspectMask == rhs.aspectMask ) + && ( baseMipLevel == rhs.baseMipLevel ) + && ( levelCount == rhs.levelCount ) + && ( baseArrayLayer == rhs.baseArrayLayer ) + && ( layerCount == rhs.layerCount ); + } + + bool operator!=( ImageSubresourceRange const& rhs ) const + { + return !operator==( rhs ); + } + + ImageAspectFlags aspectMask; + uint32_t baseMipLevel; + uint32_t levelCount; + uint32_t baseArrayLayer; + uint32_t layerCount; + }; + static_assert( sizeof( ImageSubresourceRange ) == sizeof( VkImageSubresourceRange ), "struct and wrapper have different size!" ); + + struct ImageMemoryBarrier + { + ImageMemoryBarrier( AccessFlags srcAccessMask_ = AccessFlags(), + AccessFlags dstAccessMask_ = AccessFlags(), + ImageLayout oldLayout_ = ImageLayout::eUndefined, + ImageLayout newLayout_ = ImageLayout::eUndefined, + uint32_t srcQueueFamilyIndex_ = 0, + uint32_t dstQueueFamilyIndex_ = 0, + Image image_ = Image(), + ImageSubresourceRange subresourceRange_ = ImageSubresourceRange() ) + : srcAccessMask( srcAccessMask_ ) + , dstAccessMask( dstAccessMask_ ) + , oldLayout( oldLayout_ ) + , newLayout( newLayout_ ) + , srcQueueFamilyIndex( srcQueueFamilyIndex_ ) + , dstQueueFamilyIndex( dstQueueFamilyIndex_ ) + , image( image_ ) + , subresourceRange( subresourceRange_ ) + { + } + + ImageMemoryBarrier( VkImageMemoryBarrier const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageMemoryBarrier ) ); + } + + ImageMemoryBarrier& operator=( VkImageMemoryBarrier const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageMemoryBarrier ) ); + return *this; + } + ImageMemoryBarrier& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImageMemoryBarrier& setSrcAccessMask( AccessFlags srcAccessMask_ ) + { + srcAccessMask = srcAccessMask_; + return *this; + } + + ImageMemoryBarrier& setDstAccessMask( AccessFlags dstAccessMask_ ) + { + dstAccessMask = dstAccessMask_; + return *this; + } + + ImageMemoryBarrier& setOldLayout( ImageLayout oldLayout_ ) + { + oldLayout = oldLayout_; + return *this; + } + + ImageMemoryBarrier& setNewLayout( ImageLayout newLayout_ ) + { + newLayout = newLayout_; + return *this; + } + + ImageMemoryBarrier& setSrcQueueFamilyIndex( uint32_t srcQueueFamilyIndex_ ) + { + srcQueueFamilyIndex = srcQueueFamilyIndex_; + return *this; + } + + ImageMemoryBarrier& setDstQueueFamilyIndex( uint32_t dstQueueFamilyIndex_ ) + { + dstQueueFamilyIndex = dstQueueFamilyIndex_; + return *this; + } + + ImageMemoryBarrier& setImage( Image image_ ) + { + image = image_; + return *this; + } + + ImageMemoryBarrier& setSubresourceRange( ImageSubresourceRange subresourceRange_ ) + { + subresourceRange = subresourceRange_; + return *this; + } + + operator VkImageMemoryBarrier const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageMemoryBarrier &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageMemoryBarrier const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ) + && ( oldLayout == rhs.oldLayout ) + && ( newLayout == rhs.newLayout ) + && ( srcQueueFamilyIndex == rhs.srcQueueFamilyIndex ) + && ( dstQueueFamilyIndex == rhs.dstQueueFamilyIndex ) + && ( image == rhs.image ) + && ( subresourceRange == rhs.subresourceRange ); + } + + bool operator!=( ImageMemoryBarrier const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageMemoryBarrier; + + public: + const void* pNext = nullptr; + AccessFlags srcAccessMask; + AccessFlags dstAccessMask; + ImageLayout oldLayout; + ImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + Image image; + ImageSubresourceRange subresourceRange; + }; + static_assert( sizeof( ImageMemoryBarrier ) == sizeof( VkImageMemoryBarrier ), "struct and wrapper have different size!" ); + + struct ImageViewCreateInfo + { + ImageViewCreateInfo( ImageViewCreateFlags flags_ = ImageViewCreateFlags(), + Image image_ = Image(), + ImageViewType viewType_ = ImageViewType::e1D, + Format format_ = Format::eUndefined, + ComponentMapping components_ = ComponentMapping(), + ImageSubresourceRange subresourceRange_ = ImageSubresourceRange() ) + : flags( flags_ ) + , image( image_ ) + , viewType( viewType_ ) + , format( format_ ) + , components( components_ ) + , subresourceRange( subresourceRange_ ) + { + } + + ImageViewCreateInfo( VkImageViewCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageViewCreateInfo ) ); + } + + ImageViewCreateInfo& operator=( VkImageViewCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageViewCreateInfo ) ); + return *this; + } + ImageViewCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImageViewCreateInfo& setFlags( ImageViewCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + ImageViewCreateInfo& setImage( Image image_ ) + { + image = image_; + return *this; + } + + ImageViewCreateInfo& setViewType( ImageViewType viewType_ ) + { + viewType = viewType_; + return *this; + } + + ImageViewCreateInfo& setFormat( Format format_ ) + { + format = format_; + return *this; + } + + ImageViewCreateInfo& setComponents( ComponentMapping components_ ) + { + components = components_; + return *this; + } + + ImageViewCreateInfo& setSubresourceRange( ImageSubresourceRange subresourceRange_ ) + { + subresourceRange = subresourceRange_; + return *this; + } + + operator VkImageViewCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageViewCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageViewCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( image == rhs.image ) + && ( viewType == rhs.viewType ) + && ( format == rhs.format ) + && ( components == rhs.components ) + && ( subresourceRange == rhs.subresourceRange ); + } + + bool operator!=( ImageViewCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageViewCreateInfo; + + public: + const void* pNext = nullptr; + ImageViewCreateFlags flags; + Image image; + ImageViewType viewType; + Format format; + ComponentMapping components; + ImageSubresourceRange subresourceRange; + }; + static_assert( sizeof( ImageViewCreateInfo ) == sizeof( VkImageViewCreateInfo ), "struct and wrapper have different size!" ); + + struct ImageCopy + { + ImageCopy( ImageSubresourceLayers srcSubresource_ = ImageSubresourceLayers(), + Offset3D srcOffset_ = Offset3D(), + ImageSubresourceLayers dstSubresource_ = ImageSubresourceLayers(), + Offset3D dstOffset_ = Offset3D(), + Extent3D extent_ = Extent3D() ) + : srcSubresource( srcSubresource_ ) + , srcOffset( srcOffset_ ) + , dstSubresource( dstSubresource_ ) + , dstOffset( dstOffset_ ) + , extent( extent_ ) + { + } + + ImageCopy( VkImageCopy const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageCopy ) ); + } + + ImageCopy& operator=( VkImageCopy const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageCopy ) ); + return *this; + } + ImageCopy& setSrcSubresource( ImageSubresourceLayers srcSubresource_ ) + { + srcSubresource = srcSubresource_; + return *this; + } + + ImageCopy& setSrcOffset( Offset3D srcOffset_ ) + { + srcOffset = srcOffset_; + return *this; + } + + ImageCopy& setDstSubresource( ImageSubresourceLayers dstSubresource_ ) + { + dstSubresource = dstSubresource_; + return *this; + } + + ImageCopy& setDstOffset( Offset3D dstOffset_ ) + { + dstOffset = dstOffset_; + return *this; + } + + ImageCopy& setExtent( Extent3D extent_ ) + { + extent = extent_; + return *this; + } + + operator VkImageCopy const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageCopy &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageCopy const& rhs ) const + { + return ( srcSubresource == rhs.srcSubresource ) + && ( srcOffset == rhs.srcOffset ) + && ( dstSubresource == rhs.dstSubresource ) + && ( dstOffset == rhs.dstOffset ) + && ( extent == rhs.extent ); + } + + bool operator!=( ImageCopy const& rhs ) const + { + return !operator==( rhs ); + } + + ImageSubresourceLayers srcSubresource; + Offset3D srcOffset; + ImageSubresourceLayers dstSubresource; + Offset3D dstOffset; + Extent3D extent; + }; + static_assert( sizeof( ImageCopy ) == sizeof( VkImageCopy ), "struct and wrapper have different size!" ); + + struct ImageBlit + { + ImageBlit( ImageSubresourceLayers srcSubresource_ = ImageSubresourceLayers(), + std::array const& srcOffsets_ = { { Offset3D(), Offset3D() } }, + ImageSubresourceLayers dstSubresource_ = ImageSubresourceLayers(), + std::array const& dstOffsets_ = { { Offset3D(), Offset3D() } } ) + : srcSubresource( srcSubresource_ ) + , dstSubresource( dstSubresource_ ) + { + memcpy( &srcOffsets, srcOffsets_.data(), 2 * sizeof( Offset3D ) ); + memcpy( &dstOffsets, dstOffsets_.data(), 2 * sizeof( Offset3D ) ); + } + + ImageBlit( VkImageBlit const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageBlit ) ); + } + + ImageBlit& operator=( VkImageBlit const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageBlit ) ); + return *this; + } + ImageBlit& setSrcSubresource( ImageSubresourceLayers srcSubresource_ ) + { + srcSubresource = srcSubresource_; + return *this; + } + + ImageBlit& setSrcOffsets( std::array srcOffsets_ ) + { + memcpy( &srcOffsets, srcOffsets_.data(), 2 * sizeof( Offset3D ) ); + return *this; + } + + ImageBlit& setDstSubresource( ImageSubresourceLayers dstSubresource_ ) + { + dstSubresource = dstSubresource_; + return *this; + } + + ImageBlit& setDstOffsets( std::array dstOffsets_ ) + { + memcpy( &dstOffsets, dstOffsets_.data(), 2 * sizeof( Offset3D ) ); + return *this; + } + + operator VkImageBlit const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageBlit &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageBlit const& rhs ) const + { + return ( srcSubresource == rhs.srcSubresource ) + && ( memcmp( srcOffsets, rhs.srcOffsets, 2 * sizeof( Offset3D ) ) == 0 ) + && ( dstSubresource == rhs.dstSubresource ) + && ( memcmp( dstOffsets, rhs.dstOffsets, 2 * sizeof( Offset3D ) ) == 0 ); + } + + bool operator!=( ImageBlit const& rhs ) const + { + return !operator==( rhs ); + } + + ImageSubresourceLayers srcSubresource; + Offset3D srcOffsets[2]; + ImageSubresourceLayers dstSubresource; + Offset3D dstOffsets[2]; + }; + static_assert( sizeof( ImageBlit ) == sizeof( VkImageBlit ), "struct and wrapper have different size!" ); + + struct BufferImageCopy + { + BufferImageCopy( DeviceSize bufferOffset_ = 0, + uint32_t bufferRowLength_ = 0, + uint32_t bufferImageHeight_ = 0, + ImageSubresourceLayers imageSubresource_ = ImageSubresourceLayers(), + Offset3D imageOffset_ = Offset3D(), + Extent3D imageExtent_ = Extent3D() ) + : bufferOffset( bufferOffset_ ) + , bufferRowLength( bufferRowLength_ ) + , bufferImageHeight( bufferImageHeight_ ) + , imageSubresource( imageSubresource_ ) + , imageOffset( imageOffset_ ) + , imageExtent( imageExtent_ ) + { + } + + BufferImageCopy( VkBufferImageCopy const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferImageCopy ) ); + } + + BufferImageCopy& operator=( VkBufferImageCopy const & rhs ) + { + memcpy( this, &rhs, sizeof( BufferImageCopy ) ); + return *this; + } + BufferImageCopy& setBufferOffset( DeviceSize bufferOffset_ ) + { + bufferOffset = bufferOffset_; + return *this; + } + + BufferImageCopy& setBufferRowLength( uint32_t bufferRowLength_ ) + { + bufferRowLength = bufferRowLength_; + return *this; + } + + BufferImageCopy& setBufferImageHeight( uint32_t bufferImageHeight_ ) + { + bufferImageHeight = bufferImageHeight_; + return *this; + } + + BufferImageCopy& setImageSubresource( ImageSubresourceLayers imageSubresource_ ) + { + imageSubresource = imageSubresource_; + return *this; + } + + BufferImageCopy& setImageOffset( Offset3D imageOffset_ ) + { + imageOffset = imageOffset_; + return *this; + } + + BufferImageCopy& setImageExtent( Extent3D imageExtent_ ) + { + imageExtent = imageExtent_; + return *this; + } + + operator VkBufferImageCopy const&() const + { + return *reinterpret_cast(this); + } + + operator VkBufferImageCopy &() + { + return *reinterpret_cast(this); + } + + bool operator==( BufferImageCopy const& rhs ) const + { + return ( bufferOffset == rhs.bufferOffset ) + && ( bufferRowLength == rhs.bufferRowLength ) + && ( bufferImageHeight == rhs.bufferImageHeight ) + && ( imageSubresource == rhs.imageSubresource ) + && ( imageOffset == rhs.imageOffset ) + && ( imageExtent == rhs.imageExtent ); + } + + bool operator!=( BufferImageCopy const& rhs ) const + { + return !operator==( rhs ); + } + + DeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + ImageSubresourceLayers imageSubresource; + Offset3D imageOffset; + Extent3D imageExtent; + }; + static_assert( sizeof( BufferImageCopy ) == sizeof( VkBufferImageCopy ), "struct and wrapper have different size!" ); + + struct ImageResolve + { + ImageResolve( ImageSubresourceLayers srcSubresource_ = ImageSubresourceLayers(), + Offset3D srcOffset_ = Offset3D(), + ImageSubresourceLayers dstSubresource_ = ImageSubresourceLayers(), + Offset3D dstOffset_ = Offset3D(), + Extent3D extent_ = Extent3D() ) + : srcSubresource( srcSubresource_ ) + , srcOffset( srcOffset_ ) + , dstSubresource( dstSubresource_ ) + , dstOffset( dstOffset_ ) + , extent( extent_ ) + { + } + + ImageResolve( VkImageResolve const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageResolve ) ); + } + + ImageResolve& operator=( VkImageResolve const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageResolve ) ); + return *this; + } + ImageResolve& setSrcSubresource( ImageSubresourceLayers srcSubresource_ ) + { + srcSubresource = srcSubresource_; + return *this; + } + + ImageResolve& setSrcOffset( Offset3D srcOffset_ ) + { + srcOffset = srcOffset_; + return *this; + } + + ImageResolve& setDstSubresource( ImageSubresourceLayers dstSubresource_ ) + { + dstSubresource = dstSubresource_; + return *this; + } + + ImageResolve& setDstOffset( Offset3D dstOffset_ ) + { + dstOffset = dstOffset_; + return *this; + } + + ImageResolve& setExtent( Extent3D extent_ ) + { + extent = extent_; + return *this; + } + + operator VkImageResolve const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageResolve &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageResolve const& rhs ) const + { + return ( srcSubresource == rhs.srcSubresource ) + && ( srcOffset == rhs.srcOffset ) + && ( dstSubresource == rhs.dstSubresource ) + && ( dstOffset == rhs.dstOffset ) + && ( extent == rhs.extent ); + } + + bool operator!=( ImageResolve const& rhs ) const + { + return !operator==( rhs ); + } + + ImageSubresourceLayers srcSubresource; + Offset3D srcOffset; + ImageSubresourceLayers dstSubresource; + Offset3D dstOffset; + Extent3D extent; + }; + static_assert( sizeof( ImageResolve ) == sizeof( VkImageResolve ), "struct and wrapper have different size!" ); + + struct ClearAttachment + { + ClearAttachment( ImageAspectFlags aspectMask_ = ImageAspectFlags(), + uint32_t colorAttachment_ = 0, + ClearValue clearValue_ = ClearValue() ) + : aspectMask( aspectMask_ ) + , colorAttachment( colorAttachment_ ) + , clearValue( clearValue_ ) + { + } + + ClearAttachment( VkClearAttachment const & rhs ) + { + memcpy( this, &rhs, sizeof( ClearAttachment ) ); + } + + ClearAttachment& operator=( VkClearAttachment const & rhs ) + { + memcpy( this, &rhs, sizeof( ClearAttachment ) ); + return *this; + } + ClearAttachment& setAspectMask( ImageAspectFlags aspectMask_ ) + { + aspectMask = aspectMask_; + return *this; + } + + ClearAttachment& setColorAttachment( uint32_t colorAttachment_ ) + { + colorAttachment = colorAttachment_; + return *this; + } + + ClearAttachment& setClearValue( ClearValue clearValue_ ) + { + clearValue = clearValue_; + return *this; + } + + operator VkClearAttachment const&() const + { + return *reinterpret_cast(this); + } + + operator VkClearAttachment &() + { + return *reinterpret_cast(this); + } + + ImageAspectFlags aspectMask; + uint32_t colorAttachment; + ClearValue clearValue; + }; + static_assert( sizeof( ClearAttachment ) == sizeof( VkClearAttachment ), "struct and wrapper have different size!" ); + + struct InputAttachmentAspectReference + { + InputAttachmentAspectReference( uint32_t subpass_ = 0, + uint32_t inputAttachmentIndex_ = 0, + ImageAspectFlags aspectMask_ = ImageAspectFlags() ) + : subpass( subpass_ ) + , inputAttachmentIndex( inputAttachmentIndex_ ) + , aspectMask( aspectMask_ ) + { + } + + InputAttachmentAspectReference( VkInputAttachmentAspectReference const & rhs ) + { + memcpy( this, &rhs, sizeof( InputAttachmentAspectReference ) ); + } + + InputAttachmentAspectReference& operator=( VkInputAttachmentAspectReference const & rhs ) + { + memcpy( this, &rhs, sizeof( InputAttachmentAspectReference ) ); + return *this; + } + InputAttachmentAspectReference& setSubpass( uint32_t subpass_ ) + { + subpass = subpass_; + return *this; + } + + InputAttachmentAspectReference& setInputAttachmentIndex( uint32_t inputAttachmentIndex_ ) + { + inputAttachmentIndex = inputAttachmentIndex_; + return *this; + } + + InputAttachmentAspectReference& setAspectMask( ImageAspectFlags aspectMask_ ) + { + aspectMask = aspectMask_; + return *this; + } + + operator VkInputAttachmentAspectReference const&() const + { + return *reinterpret_cast(this); + } + + operator VkInputAttachmentAspectReference &() + { + return *reinterpret_cast(this); + } + + bool operator==( InputAttachmentAspectReference const& rhs ) const + { + return ( subpass == rhs.subpass ) + && ( inputAttachmentIndex == rhs.inputAttachmentIndex ) + && ( aspectMask == rhs.aspectMask ); + } + + bool operator!=( InputAttachmentAspectReference const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t subpass; + uint32_t inputAttachmentIndex; + ImageAspectFlags aspectMask; + }; + static_assert( sizeof( InputAttachmentAspectReference ) == sizeof( VkInputAttachmentAspectReference ), "struct and wrapper have different size!" ); + + using InputAttachmentAspectReferenceKHR = InputAttachmentAspectReference; + + struct RenderPassInputAttachmentAspectCreateInfo + { + RenderPassInputAttachmentAspectCreateInfo( uint32_t aspectReferenceCount_ = 0, + const InputAttachmentAspectReference* pAspectReferences_ = nullptr ) + : aspectReferenceCount( aspectReferenceCount_ ) + , pAspectReferences( pAspectReferences_ ) + { + } + + RenderPassInputAttachmentAspectCreateInfo( VkRenderPassInputAttachmentAspectCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassInputAttachmentAspectCreateInfo ) ); + } + + RenderPassInputAttachmentAspectCreateInfo& operator=( VkRenderPassInputAttachmentAspectCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassInputAttachmentAspectCreateInfo ) ); + return *this; + } + RenderPassInputAttachmentAspectCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + RenderPassInputAttachmentAspectCreateInfo& setAspectReferenceCount( uint32_t aspectReferenceCount_ ) + { + aspectReferenceCount = aspectReferenceCount_; + return *this; + } + + RenderPassInputAttachmentAspectCreateInfo& setPAspectReferences( const InputAttachmentAspectReference* pAspectReferences_ ) + { + pAspectReferences = pAspectReferences_; + return *this; + } + + operator VkRenderPassInputAttachmentAspectCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkRenderPassInputAttachmentAspectCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( RenderPassInputAttachmentAspectCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( aspectReferenceCount == rhs.aspectReferenceCount ) + && ( pAspectReferences == rhs.pAspectReferences ); + } + + bool operator!=( RenderPassInputAttachmentAspectCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eRenderPassInputAttachmentAspectCreateInfo; + + public: + const void* pNext = nullptr; + uint32_t aspectReferenceCount; + const InputAttachmentAspectReference* pAspectReferences; + }; + static_assert( sizeof( RenderPassInputAttachmentAspectCreateInfo ) == sizeof( VkRenderPassInputAttachmentAspectCreateInfo ), "struct and wrapper have different size!" ); + + using RenderPassInputAttachmentAspectCreateInfoKHR = RenderPassInputAttachmentAspectCreateInfo; + + struct BindImagePlaneMemoryInfo + { + BindImagePlaneMemoryInfo( ImageAspectFlagBits planeAspect_ = ImageAspectFlagBits::eColor ) + : planeAspect( planeAspect_ ) + { + } + + BindImagePlaneMemoryInfo( VkBindImagePlaneMemoryInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindImagePlaneMemoryInfo ) ); + } + + BindImagePlaneMemoryInfo& operator=( VkBindImagePlaneMemoryInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindImagePlaneMemoryInfo ) ); + return *this; + } + BindImagePlaneMemoryInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BindImagePlaneMemoryInfo& setPlaneAspect( ImageAspectFlagBits planeAspect_ ) + { + planeAspect = planeAspect_; + return *this; + } + + operator VkBindImagePlaneMemoryInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkBindImagePlaneMemoryInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( BindImagePlaneMemoryInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( planeAspect == rhs.planeAspect ); + } + + bool operator!=( BindImagePlaneMemoryInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBindImagePlaneMemoryInfo; + + public: + const void* pNext = nullptr; + ImageAspectFlagBits planeAspect; + }; + static_assert( sizeof( BindImagePlaneMemoryInfo ) == sizeof( VkBindImagePlaneMemoryInfo ), "struct and wrapper have different size!" ); + + using BindImagePlaneMemoryInfoKHR = BindImagePlaneMemoryInfo; + + struct ImagePlaneMemoryRequirementsInfo + { + ImagePlaneMemoryRequirementsInfo( ImageAspectFlagBits planeAspect_ = ImageAspectFlagBits::eColor ) + : planeAspect( planeAspect_ ) + { + } + + ImagePlaneMemoryRequirementsInfo( VkImagePlaneMemoryRequirementsInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ImagePlaneMemoryRequirementsInfo ) ); + } + + ImagePlaneMemoryRequirementsInfo& operator=( VkImagePlaneMemoryRequirementsInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ImagePlaneMemoryRequirementsInfo ) ); + return *this; + } + ImagePlaneMemoryRequirementsInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImagePlaneMemoryRequirementsInfo& setPlaneAspect( ImageAspectFlagBits planeAspect_ ) + { + planeAspect = planeAspect_; + return *this; + } + + operator VkImagePlaneMemoryRequirementsInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkImagePlaneMemoryRequirementsInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImagePlaneMemoryRequirementsInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( planeAspect == rhs.planeAspect ); + } + + bool operator!=( ImagePlaneMemoryRequirementsInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImagePlaneMemoryRequirementsInfo; + + public: + const void* pNext = nullptr; + ImageAspectFlagBits planeAspect; + }; + static_assert( sizeof( ImagePlaneMemoryRequirementsInfo ) == sizeof( VkImagePlaneMemoryRequirementsInfo ), "struct and wrapper have different size!" ); + + using ImagePlaneMemoryRequirementsInfoKHR = ImagePlaneMemoryRequirementsInfo; + + struct AttachmentReference2KHR + { + AttachmentReference2KHR( uint32_t attachment_ = 0, + ImageLayout layout_ = ImageLayout::eUndefined, + ImageAspectFlags aspectMask_ = ImageAspectFlags() ) + : attachment( attachment_ ) + , layout( layout_ ) + , aspectMask( aspectMask_ ) + { + } + + AttachmentReference2KHR( VkAttachmentReference2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentReference2KHR ) ); + } + + AttachmentReference2KHR& operator=( VkAttachmentReference2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentReference2KHR ) ); + return *this; + } + AttachmentReference2KHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + AttachmentReference2KHR& setAttachment( uint32_t attachment_ ) + { + attachment = attachment_; + return *this; + } + + AttachmentReference2KHR& setLayout( ImageLayout layout_ ) + { + layout = layout_; + return *this; + } + + AttachmentReference2KHR& setAspectMask( ImageAspectFlags aspectMask_ ) + { + aspectMask = aspectMask_; + return *this; + } + + operator VkAttachmentReference2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkAttachmentReference2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( AttachmentReference2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( attachment == rhs.attachment ) + && ( layout == rhs.layout ) + && ( aspectMask == rhs.aspectMask ); + } + + bool operator!=( AttachmentReference2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eAttachmentReference2KHR; + + public: + const void* pNext = nullptr; + uint32_t attachment; + ImageLayout layout; + ImageAspectFlags aspectMask; + }; + static_assert( sizeof( AttachmentReference2KHR ) == sizeof( VkAttachmentReference2KHR ), "struct and wrapper have different size!" ); + + enum class SparseImageFormatFlagBits + { + eSingleMiptail = VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT, + eAlignedMipSize = VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT, + eNonstandardBlockSize = VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT + }; + + using SparseImageFormatFlags = Flags; + + VULKAN_HPP_INLINE SparseImageFormatFlags operator|( SparseImageFormatFlagBits bit0, SparseImageFormatFlagBits bit1 ) + { + return SparseImageFormatFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE SparseImageFormatFlags operator~( SparseImageFormatFlagBits bits ) + { + return ~( SparseImageFormatFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(SparseImageFormatFlagBits::eSingleMiptail) | VkFlags(SparseImageFormatFlagBits::eAlignedMipSize) | VkFlags(SparseImageFormatFlagBits::eNonstandardBlockSize) + }; + }; + + struct SparseImageFormatProperties + { + operator VkSparseImageFormatProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkSparseImageFormatProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( SparseImageFormatProperties const& rhs ) const + { + return ( aspectMask == rhs.aspectMask ) + && ( imageGranularity == rhs.imageGranularity ) + && ( flags == rhs.flags ); + } + + bool operator!=( SparseImageFormatProperties const& rhs ) const + { + return !operator==( rhs ); + } + + ImageAspectFlags aspectMask; + Extent3D imageGranularity; + SparseImageFormatFlags flags; + }; + static_assert( sizeof( SparseImageFormatProperties ) == sizeof( VkSparseImageFormatProperties ), "struct and wrapper have different size!" ); + + struct SparseImageMemoryRequirements + { + operator VkSparseImageMemoryRequirements const&() const + { + return *reinterpret_cast(this); + } + + operator VkSparseImageMemoryRequirements &() + { + return *reinterpret_cast(this); + } + + bool operator==( SparseImageMemoryRequirements const& rhs ) const + { + return ( formatProperties == rhs.formatProperties ) + && ( imageMipTailFirstLod == rhs.imageMipTailFirstLod ) + && ( imageMipTailSize == rhs.imageMipTailSize ) + && ( imageMipTailOffset == rhs.imageMipTailOffset ) + && ( imageMipTailStride == rhs.imageMipTailStride ); + } + + bool operator!=( SparseImageMemoryRequirements const& rhs ) const + { + return !operator==( rhs ); + } + + SparseImageFormatProperties formatProperties; + uint32_t imageMipTailFirstLod; + DeviceSize imageMipTailSize; + DeviceSize imageMipTailOffset; + DeviceSize imageMipTailStride; + }; + static_assert( sizeof( SparseImageMemoryRequirements ) == sizeof( VkSparseImageMemoryRequirements ), "struct and wrapper have different size!" ); + + struct SparseImageFormatProperties2 + { + operator VkSparseImageFormatProperties2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkSparseImageFormatProperties2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( SparseImageFormatProperties2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( properties == rhs.properties ); + } + + bool operator!=( SparseImageFormatProperties2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSparseImageFormatProperties2; + + public: + void* pNext = nullptr; + SparseImageFormatProperties properties; + }; + static_assert( sizeof( SparseImageFormatProperties2 ) == sizeof( VkSparseImageFormatProperties2 ), "struct and wrapper have different size!" ); + + using SparseImageFormatProperties2KHR = SparseImageFormatProperties2; + + struct SparseImageMemoryRequirements2 + { + operator VkSparseImageMemoryRequirements2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkSparseImageMemoryRequirements2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( SparseImageMemoryRequirements2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryRequirements == rhs.memoryRequirements ); + } + + bool operator!=( SparseImageMemoryRequirements2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSparseImageMemoryRequirements2; + + public: + void* pNext = nullptr; + SparseImageMemoryRequirements memoryRequirements; + }; + static_assert( sizeof( SparseImageMemoryRequirements2 ) == sizeof( VkSparseImageMemoryRequirements2 ), "struct and wrapper have different size!" ); + + using SparseImageMemoryRequirements2KHR = SparseImageMemoryRequirements2; + + enum class SparseMemoryBindFlagBits + { + eMetadata = VK_SPARSE_MEMORY_BIND_METADATA_BIT + }; + + using SparseMemoryBindFlags = Flags; + + VULKAN_HPP_INLINE SparseMemoryBindFlags operator|( SparseMemoryBindFlagBits bit0, SparseMemoryBindFlagBits bit1 ) + { + return SparseMemoryBindFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE SparseMemoryBindFlags operator~( SparseMemoryBindFlagBits bits ) + { + return ~( SparseMemoryBindFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(SparseMemoryBindFlagBits::eMetadata) + }; + }; + + struct SparseMemoryBind + { + SparseMemoryBind( DeviceSize resourceOffset_ = 0, + DeviceSize size_ = 0, + DeviceMemory memory_ = DeviceMemory(), + DeviceSize memoryOffset_ = 0, + SparseMemoryBindFlags flags_ = SparseMemoryBindFlags() ) + : resourceOffset( resourceOffset_ ) + , size( size_ ) + , memory( memory_ ) + , memoryOffset( memoryOffset_ ) + , flags( flags_ ) + { + } + + SparseMemoryBind( VkSparseMemoryBind const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseMemoryBind ) ); + } + + SparseMemoryBind& operator=( VkSparseMemoryBind const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseMemoryBind ) ); + return *this; + } + SparseMemoryBind& setResourceOffset( DeviceSize resourceOffset_ ) + { + resourceOffset = resourceOffset_; + return *this; + } + + SparseMemoryBind& setSize( DeviceSize size_ ) + { + size = size_; + return *this; + } + + SparseMemoryBind& setMemory( DeviceMemory memory_ ) + { + memory = memory_; + return *this; + } + + SparseMemoryBind& setMemoryOffset( DeviceSize memoryOffset_ ) + { + memoryOffset = memoryOffset_; + return *this; + } + + SparseMemoryBind& setFlags( SparseMemoryBindFlags flags_ ) + { + flags = flags_; + return *this; + } + + operator VkSparseMemoryBind const&() const + { + return *reinterpret_cast(this); + } + + operator VkSparseMemoryBind &() + { + return *reinterpret_cast(this); + } + + bool operator==( SparseMemoryBind const& rhs ) const + { + return ( resourceOffset == rhs.resourceOffset ) + && ( size == rhs.size ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ) + && ( flags == rhs.flags ); + } + + bool operator!=( SparseMemoryBind const& rhs ) const + { + return !operator==( rhs ); + } + + DeviceSize resourceOffset; + DeviceSize size; + DeviceMemory memory; + DeviceSize memoryOffset; + SparseMemoryBindFlags flags; + }; + static_assert( sizeof( SparseMemoryBind ) == sizeof( VkSparseMemoryBind ), "struct and wrapper have different size!" ); + + struct SparseImageMemoryBind + { + SparseImageMemoryBind( ImageSubresource subresource_ = ImageSubresource(), + Offset3D offset_ = Offset3D(), + Extent3D extent_ = Extent3D(), + DeviceMemory memory_ = DeviceMemory(), + DeviceSize memoryOffset_ = 0, + SparseMemoryBindFlags flags_ = SparseMemoryBindFlags() ) + : subresource( subresource_ ) + , offset( offset_ ) + , extent( extent_ ) + , memory( memory_ ) + , memoryOffset( memoryOffset_ ) + , flags( flags_ ) + { + } + + SparseImageMemoryBind( VkSparseImageMemoryBind const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseImageMemoryBind ) ); + } + + SparseImageMemoryBind& operator=( VkSparseImageMemoryBind const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseImageMemoryBind ) ); + return *this; + } + SparseImageMemoryBind& setSubresource( ImageSubresource subresource_ ) + { + subresource = subresource_; + return *this; + } + + SparseImageMemoryBind& setOffset( Offset3D offset_ ) + { + offset = offset_; + return *this; + } + + SparseImageMemoryBind& setExtent( Extent3D extent_ ) + { + extent = extent_; + return *this; + } + + SparseImageMemoryBind& setMemory( DeviceMemory memory_ ) + { + memory = memory_; + return *this; + } + + SparseImageMemoryBind& setMemoryOffset( DeviceSize memoryOffset_ ) + { + memoryOffset = memoryOffset_; + return *this; + } + + SparseImageMemoryBind& setFlags( SparseMemoryBindFlags flags_ ) + { + flags = flags_; + return *this; + } + + operator VkSparseImageMemoryBind const&() const + { + return *reinterpret_cast(this); + } + + operator VkSparseImageMemoryBind &() + { + return *reinterpret_cast(this); + } + + bool operator==( SparseImageMemoryBind const& rhs ) const + { + return ( subresource == rhs.subresource ) + && ( offset == rhs.offset ) + && ( extent == rhs.extent ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ) + && ( flags == rhs.flags ); + } + + bool operator!=( SparseImageMemoryBind const& rhs ) const + { + return !operator==( rhs ); + } + + ImageSubresource subresource; + Offset3D offset; + Extent3D extent; + DeviceMemory memory; + DeviceSize memoryOffset; + SparseMemoryBindFlags flags; + }; + static_assert( sizeof( SparseImageMemoryBind ) == sizeof( VkSparseImageMemoryBind ), "struct and wrapper have different size!" ); + + struct SparseBufferMemoryBindInfo + { + SparseBufferMemoryBindInfo( Buffer buffer_ = Buffer(), + uint32_t bindCount_ = 0, + const SparseMemoryBind* pBinds_ = nullptr ) + : buffer( buffer_ ) + , bindCount( bindCount_ ) + , pBinds( pBinds_ ) + { + } + + SparseBufferMemoryBindInfo( VkSparseBufferMemoryBindInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseBufferMemoryBindInfo ) ); + } + + SparseBufferMemoryBindInfo& operator=( VkSparseBufferMemoryBindInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseBufferMemoryBindInfo ) ); + return *this; + } + SparseBufferMemoryBindInfo& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + SparseBufferMemoryBindInfo& setBindCount( uint32_t bindCount_ ) + { + bindCount = bindCount_; + return *this; + } + + SparseBufferMemoryBindInfo& setPBinds( const SparseMemoryBind* pBinds_ ) + { + pBinds = pBinds_; + return *this; + } + + operator VkSparseBufferMemoryBindInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkSparseBufferMemoryBindInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( SparseBufferMemoryBindInfo const& rhs ) const + { + return ( buffer == rhs.buffer ) + && ( bindCount == rhs.bindCount ) + && ( pBinds == rhs.pBinds ); + } + + bool operator!=( SparseBufferMemoryBindInfo const& rhs ) const + { + return !operator==( rhs ); + } + + Buffer buffer; + uint32_t bindCount; + const SparseMemoryBind* pBinds; + }; + static_assert( sizeof( SparseBufferMemoryBindInfo ) == sizeof( VkSparseBufferMemoryBindInfo ), "struct and wrapper have different size!" ); + + struct SparseImageOpaqueMemoryBindInfo + { + SparseImageOpaqueMemoryBindInfo( Image image_ = Image(), + uint32_t bindCount_ = 0, + const SparseMemoryBind* pBinds_ = nullptr ) + : image( image_ ) + , bindCount( bindCount_ ) + , pBinds( pBinds_ ) + { + } + + SparseImageOpaqueMemoryBindInfo( VkSparseImageOpaqueMemoryBindInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseImageOpaqueMemoryBindInfo ) ); + } + + SparseImageOpaqueMemoryBindInfo& operator=( VkSparseImageOpaqueMemoryBindInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseImageOpaqueMemoryBindInfo ) ); + return *this; + } + SparseImageOpaqueMemoryBindInfo& setImage( Image image_ ) + { + image = image_; + return *this; + } + + SparseImageOpaqueMemoryBindInfo& setBindCount( uint32_t bindCount_ ) + { + bindCount = bindCount_; + return *this; + } + + SparseImageOpaqueMemoryBindInfo& setPBinds( const SparseMemoryBind* pBinds_ ) + { + pBinds = pBinds_; + return *this; + } + + operator VkSparseImageOpaqueMemoryBindInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkSparseImageOpaqueMemoryBindInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( SparseImageOpaqueMemoryBindInfo const& rhs ) const + { + return ( image == rhs.image ) + && ( bindCount == rhs.bindCount ) + && ( pBinds == rhs.pBinds ); + } + + bool operator!=( SparseImageOpaqueMemoryBindInfo const& rhs ) const + { + return !operator==( rhs ); + } + + Image image; + uint32_t bindCount; + const SparseMemoryBind* pBinds; + }; + static_assert( sizeof( SparseImageOpaqueMemoryBindInfo ) == sizeof( VkSparseImageOpaqueMemoryBindInfo ), "struct and wrapper have different size!" ); + + struct SparseImageMemoryBindInfo + { + SparseImageMemoryBindInfo( Image image_ = Image(), + uint32_t bindCount_ = 0, + const SparseImageMemoryBind* pBinds_ = nullptr ) + : image( image_ ) + , bindCount( bindCount_ ) + , pBinds( pBinds_ ) + { + } + + SparseImageMemoryBindInfo( VkSparseImageMemoryBindInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseImageMemoryBindInfo ) ); + } + + SparseImageMemoryBindInfo& operator=( VkSparseImageMemoryBindInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SparseImageMemoryBindInfo ) ); + return *this; + } + SparseImageMemoryBindInfo& setImage( Image image_ ) + { + image = image_; + return *this; + } + + SparseImageMemoryBindInfo& setBindCount( uint32_t bindCount_ ) + { + bindCount = bindCount_; + return *this; + } + + SparseImageMemoryBindInfo& setPBinds( const SparseImageMemoryBind* pBinds_ ) + { + pBinds = pBinds_; + return *this; + } + + operator VkSparseImageMemoryBindInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkSparseImageMemoryBindInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( SparseImageMemoryBindInfo const& rhs ) const + { + return ( image == rhs.image ) + && ( bindCount == rhs.bindCount ) + && ( pBinds == rhs.pBinds ); + } + + bool operator!=( SparseImageMemoryBindInfo const& rhs ) const + { + return !operator==( rhs ); + } + + Image image; + uint32_t bindCount; + const SparseImageMemoryBind* pBinds; + }; + static_assert( sizeof( SparseImageMemoryBindInfo ) == sizeof( VkSparseImageMemoryBindInfo ), "struct and wrapper have different size!" ); + + struct BindSparseInfo + { + BindSparseInfo( uint32_t waitSemaphoreCount_ = 0, + const Semaphore* pWaitSemaphores_ = nullptr, + uint32_t bufferBindCount_ = 0, + const SparseBufferMemoryBindInfo* pBufferBinds_ = nullptr, + uint32_t imageOpaqueBindCount_ = 0, + const SparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds_ = nullptr, + uint32_t imageBindCount_ = 0, + const SparseImageMemoryBindInfo* pImageBinds_ = nullptr, + uint32_t signalSemaphoreCount_ = 0, + const Semaphore* pSignalSemaphores_ = nullptr ) + : waitSemaphoreCount( waitSemaphoreCount_ ) + , pWaitSemaphores( pWaitSemaphores_ ) + , bufferBindCount( bufferBindCount_ ) + , pBufferBinds( pBufferBinds_ ) + , imageOpaqueBindCount( imageOpaqueBindCount_ ) + , pImageOpaqueBinds( pImageOpaqueBinds_ ) + , imageBindCount( imageBindCount_ ) + , pImageBinds( pImageBinds_ ) + , signalSemaphoreCount( signalSemaphoreCount_ ) + , pSignalSemaphores( pSignalSemaphores_ ) + { + } + + BindSparseInfo( VkBindSparseInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindSparseInfo ) ); + } + + BindSparseInfo& operator=( VkBindSparseInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( BindSparseInfo ) ); + return *this; + } + BindSparseInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + BindSparseInfo& setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ ) + { + waitSemaphoreCount = waitSemaphoreCount_; + return *this; + } + + BindSparseInfo& setPWaitSemaphores( const Semaphore* pWaitSemaphores_ ) + { + pWaitSemaphores = pWaitSemaphores_; + return *this; + } + + BindSparseInfo& setBufferBindCount( uint32_t bufferBindCount_ ) + { + bufferBindCount = bufferBindCount_; + return *this; + } + + BindSparseInfo& setPBufferBinds( const SparseBufferMemoryBindInfo* pBufferBinds_ ) + { + pBufferBinds = pBufferBinds_; + return *this; + } + + BindSparseInfo& setImageOpaqueBindCount( uint32_t imageOpaqueBindCount_ ) + { + imageOpaqueBindCount = imageOpaqueBindCount_; + return *this; + } + + BindSparseInfo& setPImageOpaqueBinds( const SparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds_ ) + { + pImageOpaqueBinds = pImageOpaqueBinds_; + return *this; + } + + BindSparseInfo& setImageBindCount( uint32_t imageBindCount_ ) + { + imageBindCount = imageBindCount_; + return *this; + } + + BindSparseInfo& setPImageBinds( const SparseImageMemoryBindInfo* pImageBinds_ ) + { + pImageBinds = pImageBinds_; + return *this; + } + + BindSparseInfo& setSignalSemaphoreCount( uint32_t signalSemaphoreCount_ ) + { + signalSemaphoreCount = signalSemaphoreCount_; + return *this; + } + + BindSparseInfo& setPSignalSemaphores( const Semaphore* pSignalSemaphores_ ) + { + pSignalSemaphores = pSignalSemaphores_; + return *this; + } + + operator VkBindSparseInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkBindSparseInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( BindSparseInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreCount == rhs.waitSemaphoreCount ) + && ( pWaitSemaphores == rhs.pWaitSemaphores ) + && ( bufferBindCount == rhs.bufferBindCount ) + && ( pBufferBinds == rhs.pBufferBinds ) + && ( imageOpaqueBindCount == rhs.imageOpaqueBindCount ) + && ( pImageOpaqueBinds == rhs.pImageOpaqueBinds ) + && ( imageBindCount == rhs.imageBindCount ) + && ( pImageBinds == rhs.pImageBinds ) + && ( signalSemaphoreCount == rhs.signalSemaphoreCount ) + && ( pSignalSemaphores == rhs.pSignalSemaphores ); + } + + bool operator!=( BindSparseInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eBindSparseInfo; + + public: + const void* pNext = nullptr; + uint32_t waitSemaphoreCount; + const Semaphore* pWaitSemaphores; + uint32_t bufferBindCount; + const SparseBufferMemoryBindInfo* pBufferBinds; + uint32_t imageOpaqueBindCount; + const SparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + uint32_t imageBindCount; + const SparseImageMemoryBindInfo* pImageBinds; + uint32_t signalSemaphoreCount; + const Semaphore* pSignalSemaphores; + }; + static_assert( sizeof( BindSparseInfo ) == sizeof( VkBindSparseInfo ), "struct and wrapper have different size!" ); + + enum class PipelineStageFlagBits + { + eTopOfPipe = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + eDrawIndirect = VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, + eVertexInput = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, + eVertexShader = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, + eTessellationControlShader = VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, + eTessellationEvaluationShader = VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, + eGeometryShader = VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, + eFragmentShader = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, + eEarlyFragmentTests = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, + eLateFragmentTests = VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, + eColorAttachmentOutput = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + eComputeShader = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + eTransfer = VK_PIPELINE_STAGE_TRANSFER_BIT, + eBottomOfPipe = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + eHost = VK_PIPELINE_STAGE_HOST_BIT, + eAllGraphics = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, + eAllCommands = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + eConditionalRenderingEXT = VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, + eCommandProcessNVX = VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, + eShadingRateImageNV = VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, + eRaytracingNVX = VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX, + eTaskShaderNV = VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, + eMeshShaderNV = VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV + }; + + using PipelineStageFlags = Flags; + + VULKAN_HPP_INLINE PipelineStageFlags operator|( PipelineStageFlagBits bit0, PipelineStageFlagBits bit1 ) + { + return PipelineStageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE PipelineStageFlags operator~( PipelineStageFlagBits bits ) + { + return ~( PipelineStageFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(PipelineStageFlagBits::eTopOfPipe) | VkFlags(PipelineStageFlagBits::eDrawIndirect) | VkFlags(PipelineStageFlagBits::eVertexInput) | VkFlags(PipelineStageFlagBits::eVertexShader) | VkFlags(PipelineStageFlagBits::eTessellationControlShader) | VkFlags(PipelineStageFlagBits::eTessellationEvaluationShader) | VkFlags(PipelineStageFlagBits::eGeometryShader) | VkFlags(PipelineStageFlagBits::eFragmentShader) | VkFlags(PipelineStageFlagBits::eEarlyFragmentTests) | VkFlags(PipelineStageFlagBits::eLateFragmentTests) | VkFlags(PipelineStageFlagBits::eColorAttachmentOutput) | VkFlags(PipelineStageFlagBits::eComputeShader) | VkFlags(PipelineStageFlagBits::eTransfer) | VkFlags(PipelineStageFlagBits::eBottomOfPipe) | VkFlags(PipelineStageFlagBits::eHost) | VkFlags(PipelineStageFlagBits::eAllGraphics) | VkFlags(PipelineStageFlagBits::eAllCommands) | VkFlags(PipelineStageFlagBits::eConditionalRenderingEXT) | VkFlags(PipelineStageFlagBits::eCommandProcessNVX) | VkFlags(PipelineStageFlagBits::eShadingRateImageNV) | VkFlags(PipelineStageFlagBits::eRaytracingNVX) | VkFlags(PipelineStageFlagBits::eTaskShaderNV) | VkFlags(PipelineStageFlagBits::eMeshShaderNV) + }; + }; + + struct QueueFamilyCheckpointPropertiesNV + { + operator VkQueueFamilyCheckpointPropertiesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkQueueFamilyCheckpointPropertiesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( QueueFamilyCheckpointPropertiesNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( checkpointExecutionStageMask == rhs.checkpointExecutionStageMask ); + } + + bool operator!=( QueueFamilyCheckpointPropertiesNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eQueueFamilyCheckpointPropertiesNV; + + public: + void* pNext = nullptr; + PipelineStageFlags checkpointExecutionStageMask; + }; + static_assert( sizeof( QueueFamilyCheckpointPropertiesNV ) == sizeof( VkQueueFamilyCheckpointPropertiesNV ), "struct and wrapper have different size!" ); + + struct CheckpointDataNV + { + operator VkCheckpointDataNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkCheckpointDataNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( CheckpointDataNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( stage == rhs.stage ) + && ( pCheckpointMarker == rhs.pCheckpointMarker ); + } + + bool operator!=( CheckpointDataNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eCheckpointDataNV; + + public: + void* pNext = nullptr; + PipelineStageFlagBits stage; + void* pCheckpointMarker; + }; + static_assert( sizeof( CheckpointDataNV ) == sizeof( VkCheckpointDataNV ), "struct and wrapper have different size!" ); + + enum class CommandPoolCreateFlagBits + { + eTransient = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, + eResetCommandBuffer = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, + eProtected = VK_COMMAND_POOL_CREATE_PROTECTED_BIT + }; + + using CommandPoolCreateFlags = Flags; + + VULKAN_HPP_INLINE CommandPoolCreateFlags operator|( CommandPoolCreateFlagBits bit0, CommandPoolCreateFlagBits bit1 ) + { + return CommandPoolCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE CommandPoolCreateFlags operator~( CommandPoolCreateFlagBits bits ) + { + return ~( CommandPoolCreateFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(CommandPoolCreateFlagBits::eTransient) | VkFlags(CommandPoolCreateFlagBits::eResetCommandBuffer) | VkFlags(CommandPoolCreateFlagBits::eProtected) + }; + }; + + struct CommandPoolCreateInfo + { + CommandPoolCreateInfo( CommandPoolCreateFlags flags_ = CommandPoolCreateFlags(), + uint32_t queueFamilyIndex_ = 0 ) + : flags( flags_ ) + , queueFamilyIndex( queueFamilyIndex_ ) + { + } + + CommandPoolCreateInfo( VkCommandPoolCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandPoolCreateInfo ) ); + } + + CommandPoolCreateInfo& operator=( VkCommandPoolCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( CommandPoolCreateInfo ) ); + return *this; + } + CommandPoolCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + CommandPoolCreateInfo& setFlags( CommandPoolCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + CommandPoolCreateInfo& setQueueFamilyIndex( uint32_t queueFamilyIndex_ ) + { + queueFamilyIndex = queueFamilyIndex_; + return *this; + } + + operator VkCommandPoolCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkCommandPoolCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( CommandPoolCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queueFamilyIndex == rhs.queueFamilyIndex ); + } + + bool operator!=( CommandPoolCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eCommandPoolCreateInfo; + + public: + const void* pNext = nullptr; + CommandPoolCreateFlags flags; + uint32_t queueFamilyIndex; + }; + static_assert( sizeof( CommandPoolCreateInfo ) == sizeof( VkCommandPoolCreateInfo ), "struct and wrapper have different size!" ); + + enum class CommandPoolResetFlagBits + { + eReleaseResources = VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT + }; + + using CommandPoolResetFlags = Flags; + + VULKAN_HPP_INLINE CommandPoolResetFlags operator|( CommandPoolResetFlagBits bit0, CommandPoolResetFlagBits bit1 ) + { + return CommandPoolResetFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE CommandPoolResetFlags operator~( CommandPoolResetFlagBits bits ) + { + return ~( CommandPoolResetFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(CommandPoolResetFlagBits::eReleaseResources) + }; + }; + + enum class CommandBufferResetFlagBits + { + eReleaseResources = VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT + }; + + using CommandBufferResetFlags = Flags; + + VULKAN_HPP_INLINE CommandBufferResetFlags operator|( CommandBufferResetFlagBits bit0, CommandBufferResetFlagBits bit1 ) + { + return CommandBufferResetFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE CommandBufferResetFlags operator~( CommandBufferResetFlagBits bits ) + { + return ~( CommandBufferResetFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(CommandBufferResetFlagBits::eReleaseResources) + }; + }; + + enum class SampleCountFlagBits + { + e1 = VK_SAMPLE_COUNT_1_BIT, + e2 = VK_SAMPLE_COUNT_2_BIT, + e4 = VK_SAMPLE_COUNT_4_BIT, + e8 = VK_SAMPLE_COUNT_8_BIT, + e16 = VK_SAMPLE_COUNT_16_BIT, + e32 = VK_SAMPLE_COUNT_32_BIT, + e64 = VK_SAMPLE_COUNT_64_BIT + }; + + using SampleCountFlags = Flags; + + VULKAN_HPP_INLINE SampleCountFlags operator|( SampleCountFlagBits bit0, SampleCountFlagBits bit1 ) + { + return SampleCountFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE SampleCountFlags operator~( SampleCountFlagBits bits ) + { + return ~( SampleCountFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(SampleCountFlagBits::e1) | VkFlags(SampleCountFlagBits::e2) | VkFlags(SampleCountFlagBits::e4) | VkFlags(SampleCountFlagBits::e8) | VkFlags(SampleCountFlagBits::e16) | VkFlags(SampleCountFlagBits::e32) | VkFlags(SampleCountFlagBits::e64) + }; + }; + + struct ImageFormatProperties + { + operator VkImageFormatProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageFormatProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageFormatProperties const& rhs ) const + { + return ( maxExtent == rhs.maxExtent ) + && ( maxMipLevels == rhs.maxMipLevels ) + && ( maxArrayLayers == rhs.maxArrayLayers ) + && ( sampleCounts == rhs.sampleCounts ) + && ( maxResourceSize == rhs.maxResourceSize ); + } + + bool operator!=( ImageFormatProperties const& rhs ) const + { + return !operator==( rhs ); + } + + Extent3D maxExtent; + uint32_t maxMipLevels; + uint32_t maxArrayLayers; + SampleCountFlags sampleCounts; + DeviceSize maxResourceSize; + }; + static_assert( sizeof( ImageFormatProperties ) == sizeof( VkImageFormatProperties ), "struct and wrapper have different size!" ); + + struct ImageCreateInfo + { + ImageCreateInfo( ImageCreateFlags flags_ = ImageCreateFlags(), + ImageType imageType_ = ImageType::e1D, + Format format_ = Format::eUndefined, + Extent3D extent_ = Extent3D(), + uint32_t mipLevels_ = 0, + uint32_t arrayLayers_ = 0, + SampleCountFlagBits samples_ = SampleCountFlagBits::e1, + ImageTiling tiling_ = ImageTiling::eOptimal, + ImageUsageFlags usage_ = ImageUsageFlags(), + SharingMode sharingMode_ = SharingMode::eExclusive, + uint32_t queueFamilyIndexCount_ = 0, + const uint32_t* pQueueFamilyIndices_ = nullptr, + ImageLayout initialLayout_ = ImageLayout::eUndefined ) + : flags( flags_ ) + , imageType( imageType_ ) + , format( format_ ) + , extent( extent_ ) + , mipLevels( mipLevels_ ) + , arrayLayers( arrayLayers_ ) + , samples( samples_ ) + , tiling( tiling_ ) + , usage( usage_ ) + , sharingMode( sharingMode_ ) + , queueFamilyIndexCount( queueFamilyIndexCount_ ) + , pQueueFamilyIndices( pQueueFamilyIndices_ ) + , initialLayout( initialLayout_ ) + { + } + + ImageCreateInfo( VkImageCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageCreateInfo ) ); + } + + ImageCreateInfo& operator=( VkImageCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ImageCreateInfo ) ); + return *this; + } + ImageCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImageCreateInfo& setFlags( ImageCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + ImageCreateInfo& setImageType( ImageType imageType_ ) + { + imageType = imageType_; + return *this; + } + + ImageCreateInfo& setFormat( Format format_ ) + { + format = format_; + return *this; + } + + ImageCreateInfo& setExtent( Extent3D extent_ ) + { + extent = extent_; + return *this; + } + + ImageCreateInfo& setMipLevels( uint32_t mipLevels_ ) + { + mipLevels = mipLevels_; + return *this; + } + + ImageCreateInfo& setArrayLayers( uint32_t arrayLayers_ ) + { + arrayLayers = arrayLayers_; + return *this; + } + + ImageCreateInfo& setSamples( SampleCountFlagBits samples_ ) + { + samples = samples_; + return *this; + } + + ImageCreateInfo& setTiling( ImageTiling tiling_ ) + { + tiling = tiling_; + return *this; + } + + ImageCreateInfo& setUsage( ImageUsageFlags usage_ ) + { + usage = usage_; + return *this; + } + + ImageCreateInfo& setSharingMode( SharingMode sharingMode_ ) + { + sharingMode = sharingMode_; + return *this; + } + + ImageCreateInfo& setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ ) + { + queueFamilyIndexCount = queueFamilyIndexCount_; + return *this; + } + + ImageCreateInfo& setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ ) + { + pQueueFamilyIndices = pQueueFamilyIndices_; + return *this; + } + + ImageCreateInfo& setInitialLayout( ImageLayout initialLayout_ ) + { + initialLayout = initialLayout_; + return *this; + } + + operator VkImageCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( imageType == rhs.imageType ) + && ( format == rhs.format ) + && ( extent == rhs.extent ) + && ( mipLevels == rhs.mipLevels ) + && ( arrayLayers == rhs.arrayLayers ) + && ( samples == rhs.samples ) + && ( tiling == rhs.tiling ) + && ( usage == rhs.usage ) + && ( sharingMode == rhs.sharingMode ) + && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount ) + && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices ) + && ( initialLayout == rhs.initialLayout ); + } + + bool operator!=( ImageCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageCreateInfo; + + public: + const void* pNext = nullptr; + ImageCreateFlags flags; + ImageType imageType; + Format format; + Extent3D extent; + uint32_t mipLevels; + uint32_t arrayLayers; + SampleCountFlagBits samples; + ImageTiling tiling; + ImageUsageFlags usage; + SharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + ImageLayout initialLayout; + }; + static_assert( sizeof( ImageCreateInfo ) == sizeof( VkImageCreateInfo ), "struct and wrapper have different size!" ); + + struct PipelineMultisampleStateCreateInfo + { + PipelineMultisampleStateCreateInfo( PipelineMultisampleStateCreateFlags flags_ = PipelineMultisampleStateCreateFlags(), + SampleCountFlagBits rasterizationSamples_ = SampleCountFlagBits::e1, + Bool32 sampleShadingEnable_ = 0, + float minSampleShading_ = 0, + const SampleMask* pSampleMask_ = nullptr, + Bool32 alphaToCoverageEnable_ = 0, + Bool32 alphaToOneEnable_ = 0 ) + : flags( flags_ ) + , rasterizationSamples( rasterizationSamples_ ) + , sampleShadingEnable( sampleShadingEnable_ ) + , minSampleShading( minSampleShading_ ) + , pSampleMask( pSampleMask_ ) + , alphaToCoverageEnable( alphaToCoverageEnable_ ) + , alphaToOneEnable( alphaToOneEnable_ ) + { + } + + PipelineMultisampleStateCreateInfo( VkPipelineMultisampleStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineMultisampleStateCreateInfo ) ); + } + + PipelineMultisampleStateCreateInfo& operator=( VkPipelineMultisampleStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineMultisampleStateCreateInfo ) ); + return *this; + } + PipelineMultisampleStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineMultisampleStateCreateInfo& setFlags( PipelineMultisampleStateCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PipelineMultisampleStateCreateInfo& setRasterizationSamples( SampleCountFlagBits rasterizationSamples_ ) + { + rasterizationSamples = rasterizationSamples_; + return *this; + } + + PipelineMultisampleStateCreateInfo& setSampleShadingEnable( Bool32 sampleShadingEnable_ ) + { + sampleShadingEnable = sampleShadingEnable_; + return *this; + } + + PipelineMultisampleStateCreateInfo& setMinSampleShading( float minSampleShading_ ) + { + minSampleShading = minSampleShading_; + return *this; + } + + PipelineMultisampleStateCreateInfo& setPSampleMask( const SampleMask* pSampleMask_ ) + { + pSampleMask = pSampleMask_; + return *this; + } + + PipelineMultisampleStateCreateInfo& setAlphaToCoverageEnable( Bool32 alphaToCoverageEnable_ ) + { + alphaToCoverageEnable = alphaToCoverageEnable_; + return *this; + } + + PipelineMultisampleStateCreateInfo& setAlphaToOneEnable( Bool32 alphaToOneEnable_ ) + { + alphaToOneEnable = alphaToOneEnable_; + return *this; + } + + operator VkPipelineMultisampleStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineMultisampleStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineMultisampleStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( rasterizationSamples == rhs.rasterizationSamples ) + && ( sampleShadingEnable == rhs.sampleShadingEnable ) + && ( minSampleShading == rhs.minSampleShading ) + && ( pSampleMask == rhs.pSampleMask ) + && ( alphaToCoverageEnable == rhs.alphaToCoverageEnable ) + && ( alphaToOneEnable == rhs.alphaToOneEnable ); + } + + bool operator!=( PipelineMultisampleStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineMultisampleStateCreateInfo; + + public: + const void* pNext = nullptr; + PipelineMultisampleStateCreateFlags flags; + SampleCountFlagBits rasterizationSamples; + Bool32 sampleShadingEnable; + float minSampleShading; + const SampleMask* pSampleMask; + Bool32 alphaToCoverageEnable; + Bool32 alphaToOneEnable; + }; + static_assert( sizeof( PipelineMultisampleStateCreateInfo ) == sizeof( VkPipelineMultisampleStateCreateInfo ), "struct and wrapper have different size!" ); + + struct GraphicsPipelineCreateInfo + { + GraphicsPipelineCreateInfo( PipelineCreateFlags flags_ = PipelineCreateFlags(), + uint32_t stageCount_ = 0, + const PipelineShaderStageCreateInfo* pStages_ = nullptr, + const PipelineVertexInputStateCreateInfo* pVertexInputState_ = nullptr, + const PipelineInputAssemblyStateCreateInfo* pInputAssemblyState_ = nullptr, + const PipelineTessellationStateCreateInfo* pTessellationState_ = nullptr, + const PipelineViewportStateCreateInfo* pViewportState_ = nullptr, + const PipelineRasterizationStateCreateInfo* pRasterizationState_ = nullptr, + const PipelineMultisampleStateCreateInfo* pMultisampleState_ = nullptr, + const PipelineDepthStencilStateCreateInfo* pDepthStencilState_ = nullptr, + const PipelineColorBlendStateCreateInfo* pColorBlendState_ = nullptr, + const PipelineDynamicStateCreateInfo* pDynamicState_ = nullptr, + PipelineLayout layout_ = PipelineLayout(), + RenderPass renderPass_ = RenderPass(), + uint32_t subpass_ = 0, + Pipeline basePipelineHandle_ = Pipeline(), + int32_t basePipelineIndex_ = 0 ) + : flags( flags_ ) + , stageCount( stageCount_ ) + , pStages( pStages_ ) + , pVertexInputState( pVertexInputState_ ) + , pInputAssemblyState( pInputAssemblyState_ ) + , pTessellationState( pTessellationState_ ) + , pViewportState( pViewportState_ ) + , pRasterizationState( pRasterizationState_ ) + , pMultisampleState( pMultisampleState_ ) + , pDepthStencilState( pDepthStencilState_ ) + , pColorBlendState( pColorBlendState_ ) + , pDynamicState( pDynamicState_ ) + , layout( layout_ ) + , renderPass( renderPass_ ) + , subpass( subpass_ ) + , basePipelineHandle( basePipelineHandle_ ) + , basePipelineIndex( basePipelineIndex_ ) + { + } + + GraphicsPipelineCreateInfo( VkGraphicsPipelineCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( GraphicsPipelineCreateInfo ) ); + } + + GraphicsPipelineCreateInfo& operator=( VkGraphicsPipelineCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( GraphicsPipelineCreateInfo ) ); + return *this; + } + GraphicsPipelineCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + GraphicsPipelineCreateInfo& setFlags( PipelineCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + GraphicsPipelineCreateInfo& setStageCount( uint32_t stageCount_ ) + { + stageCount = stageCount_; + return *this; + } + + GraphicsPipelineCreateInfo& setPStages( const PipelineShaderStageCreateInfo* pStages_ ) + { + pStages = pStages_; + return *this; + } + + GraphicsPipelineCreateInfo& setPVertexInputState( const PipelineVertexInputStateCreateInfo* pVertexInputState_ ) + { + pVertexInputState = pVertexInputState_; + return *this; + } + + GraphicsPipelineCreateInfo& setPInputAssemblyState( const PipelineInputAssemblyStateCreateInfo* pInputAssemblyState_ ) + { + pInputAssemblyState = pInputAssemblyState_; + return *this; + } + + GraphicsPipelineCreateInfo& setPTessellationState( const PipelineTessellationStateCreateInfo* pTessellationState_ ) + { + pTessellationState = pTessellationState_; + return *this; + } + + GraphicsPipelineCreateInfo& setPViewportState( const PipelineViewportStateCreateInfo* pViewportState_ ) + { + pViewportState = pViewportState_; + return *this; + } + + GraphicsPipelineCreateInfo& setPRasterizationState( const PipelineRasterizationStateCreateInfo* pRasterizationState_ ) + { + pRasterizationState = pRasterizationState_; + return *this; + } + + GraphicsPipelineCreateInfo& setPMultisampleState( const PipelineMultisampleStateCreateInfo* pMultisampleState_ ) + { + pMultisampleState = pMultisampleState_; + return *this; + } + + GraphicsPipelineCreateInfo& setPDepthStencilState( const PipelineDepthStencilStateCreateInfo* pDepthStencilState_ ) + { + pDepthStencilState = pDepthStencilState_; + return *this; + } + + GraphicsPipelineCreateInfo& setPColorBlendState( const PipelineColorBlendStateCreateInfo* pColorBlendState_ ) + { + pColorBlendState = pColorBlendState_; + return *this; + } + + GraphicsPipelineCreateInfo& setPDynamicState( const PipelineDynamicStateCreateInfo* pDynamicState_ ) + { + pDynamicState = pDynamicState_; + return *this; + } + + GraphicsPipelineCreateInfo& setLayout( PipelineLayout layout_ ) + { + layout = layout_; + return *this; + } + + GraphicsPipelineCreateInfo& setRenderPass( RenderPass renderPass_ ) + { + renderPass = renderPass_; + return *this; + } + + GraphicsPipelineCreateInfo& setSubpass( uint32_t subpass_ ) + { + subpass = subpass_; + return *this; + } + + GraphicsPipelineCreateInfo& setBasePipelineHandle( Pipeline basePipelineHandle_ ) + { + basePipelineHandle = basePipelineHandle_; + return *this; + } + + GraphicsPipelineCreateInfo& setBasePipelineIndex( int32_t basePipelineIndex_ ) + { + basePipelineIndex = basePipelineIndex_; + return *this; + } + + operator VkGraphicsPipelineCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkGraphicsPipelineCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( GraphicsPipelineCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( stageCount == rhs.stageCount ) + && ( pStages == rhs.pStages ) + && ( pVertexInputState == rhs.pVertexInputState ) + && ( pInputAssemblyState == rhs.pInputAssemblyState ) + && ( pTessellationState == rhs.pTessellationState ) + && ( pViewportState == rhs.pViewportState ) + && ( pRasterizationState == rhs.pRasterizationState ) + && ( pMultisampleState == rhs.pMultisampleState ) + && ( pDepthStencilState == rhs.pDepthStencilState ) + && ( pColorBlendState == rhs.pColorBlendState ) + && ( pDynamicState == rhs.pDynamicState ) + && ( layout == rhs.layout ) + && ( renderPass == rhs.renderPass ) + && ( subpass == rhs.subpass ) + && ( basePipelineHandle == rhs.basePipelineHandle ) + && ( basePipelineIndex == rhs.basePipelineIndex ); + } + + bool operator!=( GraphicsPipelineCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eGraphicsPipelineCreateInfo; + + public: + const void* pNext = nullptr; + PipelineCreateFlags flags; + uint32_t stageCount; + const PipelineShaderStageCreateInfo* pStages; + const PipelineVertexInputStateCreateInfo* pVertexInputState; + const PipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + const PipelineTessellationStateCreateInfo* pTessellationState; + const PipelineViewportStateCreateInfo* pViewportState; + const PipelineRasterizationStateCreateInfo* pRasterizationState; + const PipelineMultisampleStateCreateInfo* pMultisampleState; + const PipelineDepthStencilStateCreateInfo* pDepthStencilState; + const PipelineColorBlendStateCreateInfo* pColorBlendState; + const PipelineDynamicStateCreateInfo* pDynamicState; + PipelineLayout layout; + RenderPass renderPass; + uint32_t subpass; + Pipeline basePipelineHandle; + int32_t basePipelineIndex; + }; + static_assert( sizeof( GraphicsPipelineCreateInfo ) == sizeof( VkGraphicsPipelineCreateInfo ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceLimits + { + operator VkPhysicalDeviceLimits const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceLimits &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceLimits const& rhs ) const + { + return ( maxImageDimension1D == rhs.maxImageDimension1D ) + && ( maxImageDimension2D == rhs.maxImageDimension2D ) + && ( maxImageDimension3D == rhs.maxImageDimension3D ) + && ( maxImageDimensionCube == rhs.maxImageDimensionCube ) + && ( maxImageArrayLayers == rhs.maxImageArrayLayers ) + && ( maxTexelBufferElements == rhs.maxTexelBufferElements ) + && ( maxUniformBufferRange == rhs.maxUniformBufferRange ) + && ( maxStorageBufferRange == rhs.maxStorageBufferRange ) + && ( maxPushConstantsSize == rhs.maxPushConstantsSize ) + && ( maxMemoryAllocationCount == rhs.maxMemoryAllocationCount ) + && ( maxSamplerAllocationCount == rhs.maxSamplerAllocationCount ) + && ( bufferImageGranularity == rhs.bufferImageGranularity ) + && ( sparseAddressSpaceSize == rhs.sparseAddressSpaceSize ) + && ( maxBoundDescriptorSets == rhs.maxBoundDescriptorSets ) + && ( maxPerStageDescriptorSamplers == rhs.maxPerStageDescriptorSamplers ) + && ( maxPerStageDescriptorUniformBuffers == rhs.maxPerStageDescriptorUniformBuffers ) + && ( maxPerStageDescriptorStorageBuffers == rhs.maxPerStageDescriptorStorageBuffers ) + && ( maxPerStageDescriptorSampledImages == rhs.maxPerStageDescriptorSampledImages ) + && ( maxPerStageDescriptorStorageImages == rhs.maxPerStageDescriptorStorageImages ) + && ( maxPerStageDescriptorInputAttachments == rhs.maxPerStageDescriptorInputAttachments ) + && ( maxPerStageResources == rhs.maxPerStageResources ) + && ( maxDescriptorSetSamplers == rhs.maxDescriptorSetSamplers ) + && ( maxDescriptorSetUniformBuffers == rhs.maxDescriptorSetUniformBuffers ) + && ( maxDescriptorSetUniformBuffersDynamic == rhs.maxDescriptorSetUniformBuffersDynamic ) + && ( maxDescriptorSetStorageBuffers == rhs.maxDescriptorSetStorageBuffers ) + && ( maxDescriptorSetStorageBuffersDynamic == rhs.maxDescriptorSetStorageBuffersDynamic ) + && ( maxDescriptorSetSampledImages == rhs.maxDescriptorSetSampledImages ) + && ( maxDescriptorSetStorageImages == rhs.maxDescriptorSetStorageImages ) + && ( maxDescriptorSetInputAttachments == rhs.maxDescriptorSetInputAttachments ) + && ( maxVertexInputAttributes == rhs.maxVertexInputAttributes ) + && ( maxVertexInputBindings == rhs.maxVertexInputBindings ) + && ( maxVertexInputAttributeOffset == rhs.maxVertexInputAttributeOffset ) + && ( maxVertexInputBindingStride == rhs.maxVertexInputBindingStride ) + && ( maxVertexOutputComponents == rhs.maxVertexOutputComponents ) + && ( maxTessellationGenerationLevel == rhs.maxTessellationGenerationLevel ) + && ( maxTessellationPatchSize == rhs.maxTessellationPatchSize ) + && ( maxTessellationControlPerVertexInputComponents == rhs.maxTessellationControlPerVertexInputComponents ) + && ( maxTessellationControlPerVertexOutputComponents == rhs.maxTessellationControlPerVertexOutputComponents ) + && ( maxTessellationControlPerPatchOutputComponents == rhs.maxTessellationControlPerPatchOutputComponents ) + && ( maxTessellationControlTotalOutputComponents == rhs.maxTessellationControlTotalOutputComponents ) + && ( maxTessellationEvaluationInputComponents == rhs.maxTessellationEvaluationInputComponents ) + && ( maxTessellationEvaluationOutputComponents == rhs.maxTessellationEvaluationOutputComponents ) + && ( maxGeometryShaderInvocations == rhs.maxGeometryShaderInvocations ) + && ( maxGeometryInputComponents == rhs.maxGeometryInputComponents ) + && ( maxGeometryOutputComponents == rhs.maxGeometryOutputComponents ) + && ( maxGeometryOutputVertices == rhs.maxGeometryOutputVertices ) + && ( maxGeometryTotalOutputComponents == rhs.maxGeometryTotalOutputComponents ) + && ( maxFragmentInputComponents == rhs.maxFragmentInputComponents ) + && ( maxFragmentOutputAttachments == rhs.maxFragmentOutputAttachments ) + && ( maxFragmentDualSrcAttachments == rhs.maxFragmentDualSrcAttachments ) + && ( maxFragmentCombinedOutputResources == rhs.maxFragmentCombinedOutputResources ) + && ( maxComputeSharedMemorySize == rhs.maxComputeSharedMemorySize ) + && ( memcmp( maxComputeWorkGroupCount, rhs.maxComputeWorkGroupCount, 3 * sizeof( uint32_t ) ) == 0 ) + && ( maxComputeWorkGroupInvocations == rhs.maxComputeWorkGroupInvocations ) + && ( memcmp( maxComputeWorkGroupSize, rhs.maxComputeWorkGroupSize, 3 * sizeof( uint32_t ) ) == 0 ) + && ( subPixelPrecisionBits == rhs.subPixelPrecisionBits ) + && ( subTexelPrecisionBits == rhs.subTexelPrecisionBits ) + && ( mipmapPrecisionBits == rhs.mipmapPrecisionBits ) + && ( maxDrawIndexedIndexValue == rhs.maxDrawIndexedIndexValue ) + && ( maxDrawIndirectCount == rhs.maxDrawIndirectCount ) + && ( maxSamplerLodBias == rhs.maxSamplerLodBias ) + && ( maxSamplerAnisotropy == rhs.maxSamplerAnisotropy ) + && ( maxViewports == rhs.maxViewports ) + && ( memcmp( maxViewportDimensions, rhs.maxViewportDimensions, 2 * sizeof( uint32_t ) ) == 0 ) + && ( memcmp( viewportBoundsRange, rhs.viewportBoundsRange, 2 * sizeof( float ) ) == 0 ) + && ( viewportSubPixelBits == rhs.viewportSubPixelBits ) + && ( minMemoryMapAlignment == rhs.minMemoryMapAlignment ) + && ( minTexelBufferOffsetAlignment == rhs.minTexelBufferOffsetAlignment ) + && ( minUniformBufferOffsetAlignment == rhs.minUniformBufferOffsetAlignment ) + && ( minStorageBufferOffsetAlignment == rhs.minStorageBufferOffsetAlignment ) + && ( minTexelOffset == rhs.minTexelOffset ) + && ( maxTexelOffset == rhs.maxTexelOffset ) + && ( minTexelGatherOffset == rhs.minTexelGatherOffset ) + && ( maxTexelGatherOffset == rhs.maxTexelGatherOffset ) + && ( minInterpolationOffset == rhs.minInterpolationOffset ) + && ( maxInterpolationOffset == rhs.maxInterpolationOffset ) + && ( subPixelInterpolationOffsetBits == rhs.subPixelInterpolationOffsetBits ) + && ( maxFramebufferWidth == rhs.maxFramebufferWidth ) + && ( maxFramebufferHeight == rhs.maxFramebufferHeight ) + && ( maxFramebufferLayers == rhs.maxFramebufferLayers ) + && ( framebufferColorSampleCounts == rhs.framebufferColorSampleCounts ) + && ( framebufferDepthSampleCounts == rhs.framebufferDepthSampleCounts ) + && ( framebufferStencilSampleCounts == rhs.framebufferStencilSampleCounts ) + && ( framebufferNoAttachmentsSampleCounts == rhs.framebufferNoAttachmentsSampleCounts ) + && ( maxColorAttachments == rhs.maxColorAttachments ) + && ( sampledImageColorSampleCounts == rhs.sampledImageColorSampleCounts ) + && ( sampledImageIntegerSampleCounts == rhs.sampledImageIntegerSampleCounts ) + && ( sampledImageDepthSampleCounts == rhs.sampledImageDepthSampleCounts ) + && ( sampledImageStencilSampleCounts == rhs.sampledImageStencilSampleCounts ) + && ( storageImageSampleCounts == rhs.storageImageSampleCounts ) + && ( maxSampleMaskWords == rhs.maxSampleMaskWords ) + && ( timestampComputeAndGraphics == rhs.timestampComputeAndGraphics ) + && ( timestampPeriod == rhs.timestampPeriod ) + && ( maxClipDistances == rhs.maxClipDistances ) + && ( maxCullDistances == rhs.maxCullDistances ) + && ( maxCombinedClipAndCullDistances == rhs.maxCombinedClipAndCullDistances ) + && ( discreteQueuePriorities == rhs.discreteQueuePriorities ) + && ( memcmp( pointSizeRange, rhs.pointSizeRange, 2 * sizeof( float ) ) == 0 ) + && ( memcmp( lineWidthRange, rhs.lineWidthRange, 2 * sizeof( float ) ) == 0 ) + && ( pointSizeGranularity == rhs.pointSizeGranularity ) + && ( lineWidthGranularity == rhs.lineWidthGranularity ) + && ( strictLines == rhs.strictLines ) + && ( standardSampleLocations == rhs.standardSampleLocations ) + && ( optimalBufferCopyOffsetAlignment == rhs.optimalBufferCopyOffsetAlignment ) + && ( optimalBufferCopyRowPitchAlignment == rhs.optimalBufferCopyRowPitchAlignment ) + && ( nonCoherentAtomSize == rhs.nonCoherentAtomSize ); + } + + bool operator!=( PhysicalDeviceLimits const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t maxImageDimension1D; + uint32_t maxImageDimension2D; + uint32_t maxImageDimension3D; + uint32_t maxImageDimensionCube; + uint32_t maxImageArrayLayers; + uint32_t maxTexelBufferElements; + uint32_t maxUniformBufferRange; + uint32_t maxStorageBufferRange; + uint32_t maxPushConstantsSize; + uint32_t maxMemoryAllocationCount; + uint32_t maxSamplerAllocationCount; + DeviceSize bufferImageGranularity; + DeviceSize sparseAddressSpaceSize; + uint32_t maxBoundDescriptorSets; + uint32_t maxPerStageDescriptorSamplers; + uint32_t maxPerStageDescriptorUniformBuffers; + uint32_t maxPerStageDescriptorStorageBuffers; + uint32_t maxPerStageDescriptorSampledImages; + uint32_t maxPerStageDescriptorStorageImages; + uint32_t maxPerStageDescriptorInputAttachments; + uint32_t maxPerStageResources; + uint32_t maxDescriptorSetSamplers; + uint32_t maxDescriptorSetUniformBuffers; + uint32_t maxDescriptorSetUniformBuffersDynamic; + uint32_t maxDescriptorSetStorageBuffers; + uint32_t maxDescriptorSetStorageBuffersDynamic; + uint32_t maxDescriptorSetSampledImages; + uint32_t maxDescriptorSetStorageImages; + uint32_t maxDescriptorSetInputAttachments; + uint32_t maxVertexInputAttributes; + uint32_t maxVertexInputBindings; + uint32_t maxVertexInputAttributeOffset; + uint32_t maxVertexInputBindingStride; + uint32_t maxVertexOutputComponents; + uint32_t maxTessellationGenerationLevel; + uint32_t maxTessellationPatchSize; + uint32_t maxTessellationControlPerVertexInputComponents; + uint32_t maxTessellationControlPerVertexOutputComponents; + uint32_t maxTessellationControlPerPatchOutputComponents; + uint32_t maxTessellationControlTotalOutputComponents; + uint32_t maxTessellationEvaluationInputComponents; + uint32_t maxTessellationEvaluationOutputComponents; + uint32_t maxGeometryShaderInvocations; + uint32_t maxGeometryInputComponents; + uint32_t maxGeometryOutputComponents; + uint32_t maxGeometryOutputVertices; + uint32_t maxGeometryTotalOutputComponents; + uint32_t maxFragmentInputComponents; + uint32_t maxFragmentOutputAttachments; + uint32_t maxFragmentDualSrcAttachments; + uint32_t maxFragmentCombinedOutputResources; + uint32_t maxComputeSharedMemorySize; + uint32_t maxComputeWorkGroupCount[3]; + uint32_t maxComputeWorkGroupInvocations; + uint32_t maxComputeWorkGroupSize[3]; + uint32_t subPixelPrecisionBits; + uint32_t subTexelPrecisionBits; + uint32_t mipmapPrecisionBits; + uint32_t maxDrawIndexedIndexValue; + uint32_t maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32_t maxViewports; + uint32_t maxViewportDimensions[2]; + float viewportBoundsRange[2]; + uint32_t viewportSubPixelBits; + size_t minMemoryMapAlignment; + DeviceSize minTexelBufferOffsetAlignment; + DeviceSize minUniformBufferOffsetAlignment; + DeviceSize minStorageBufferOffsetAlignment; + int32_t minTexelOffset; + uint32_t maxTexelOffset; + int32_t minTexelGatherOffset; + uint32_t maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32_t subPixelInterpolationOffsetBits; + uint32_t maxFramebufferWidth; + uint32_t maxFramebufferHeight; + uint32_t maxFramebufferLayers; + SampleCountFlags framebufferColorSampleCounts; + SampleCountFlags framebufferDepthSampleCounts; + SampleCountFlags framebufferStencilSampleCounts; + SampleCountFlags framebufferNoAttachmentsSampleCounts; + uint32_t maxColorAttachments; + SampleCountFlags sampledImageColorSampleCounts; + SampleCountFlags sampledImageIntegerSampleCounts; + SampleCountFlags sampledImageDepthSampleCounts; + SampleCountFlags sampledImageStencilSampleCounts; + SampleCountFlags storageImageSampleCounts; + uint32_t maxSampleMaskWords; + Bool32 timestampComputeAndGraphics; + float timestampPeriod; + uint32_t maxClipDistances; + uint32_t maxCullDistances; + uint32_t maxCombinedClipAndCullDistances; + uint32_t discreteQueuePriorities; + float pointSizeRange[2]; + float lineWidthRange[2]; + float pointSizeGranularity; + float lineWidthGranularity; + Bool32 strictLines; + Bool32 standardSampleLocations; + DeviceSize optimalBufferCopyOffsetAlignment; + DeviceSize optimalBufferCopyRowPitchAlignment; + DeviceSize nonCoherentAtomSize; + }; + static_assert( sizeof( PhysicalDeviceLimits ) == sizeof( VkPhysicalDeviceLimits ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceProperties + { + operator VkPhysicalDeviceProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceProperties const& rhs ) const + { + return ( apiVersion == rhs.apiVersion ) + && ( driverVersion == rhs.driverVersion ) + && ( vendorID == rhs.vendorID ) + && ( deviceID == rhs.deviceID ) + && ( deviceType == rhs.deviceType ) + && ( memcmp( deviceName, rhs.deviceName, VK_MAX_PHYSICAL_DEVICE_NAME_SIZE * sizeof( char ) ) == 0 ) + && ( memcmp( pipelineCacheUUID, rhs.pipelineCacheUUID, VK_UUID_SIZE * sizeof( uint8_t ) ) == 0 ) + && ( limits == rhs.limits ) + && ( sparseProperties == rhs.sparseProperties ); + } + + bool operator!=( PhysicalDeviceProperties const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t apiVersion; + uint32_t driverVersion; + uint32_t vendorID; + uint32_t deviceID; + PhysicalDeviceType deviceType; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + PhysicalDeviceLimits limits; + PhysicalDeviceSparseProperties sparseProperties; + }; + static_assert( sizeof( PhysicalDeviceProperties ) == sizeof( VkPhysicalDeviceProperties ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceProperties2 + { + operator VkPhysicalDeviceProperties2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceProperties2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceProperties2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( properties == rhs.properties ); + } + + bool operator!=( PhysicalDeviceProperties2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceProperties2; + + public: + void* pNext = nullptr; + PhysicalDeviceProperties properties; + }; + static_assert( sizeof( PhysicalDeviceProperties2 ) == sizeof( VkPhysicalDeviceProperties2 ), "struct and wrapper have different size!" ); + + using PhysicalDeviceProperties2KHR = PhysicalDeviceProperties2; + + struct ImageFormatProperties2 + { + operator VkImageFormatProperties2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkImageFormatProperties2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImageFormatProperties2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( imageFormatProperties == rhs.imageFormatProperties ); + } + + bool operator!=( ImageFormatProperties2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImageFormatProperties2; + + public: + void* pNext = nullptr; + ImageFormatProperties imageFormatProperties; + }; + static_assert( sizeof( ImageFormatProperties2 ) == sizeof( VkImageFormatProperties2 ), "struct and wrapper have different size!" ); + + using ImageFormatProperties2KHR = ImageFormatProperties2; + + struct PhysicalDeviceSparseImageFormatInfo2 + { + PhysicalDeviceSparseImageFormatInfo2( Format format_ = Format::eUndefined, + ImageType type_ = ImageType::e1D, + SampleCountFlagBits samples_ = SampleCountFlagBits::e1, + ImageUsageFlags usage_ = ImageUsageFlags(), + ImageTiling tiling_ = ImageTiling::eOptimal ) + : format( format_ ) + , type( type_ ) + , samples( samples_ ) + , usage( usage_ ) + , tiling( tiling_ ) + { + } + + PhysicalDeviceSparseImageFormatInfo2( VkPhysicalDeviceSparseImageFormatInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceSparseImageFormatInfo2 ) ); + } + + PhysicalDeviceSparseImageFormatInfo2& operator=( VkPhysicalDeviceSparseImageFormatInfo2 const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceSparseImageFormatInfo2 ) ); + return *this; + } + PhysicalDeviceSparseImageFormatInfo2& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2& setFormat( Format format_ ) + { + format = format_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2& setType( ImageType type_ ) + { + type = type_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2& setSamples( SampleCountFlagBits samples_ ) + { + samples = samples_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2& setUsage( ImageUsageFlags usage_ ) + { + usage = usage_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2& setTiling( ImageTiling tiling_ ) + { + tiling = tiling_; + return *this; + } + + operator VkPhysicalDeviceSparseImageFormatInfo2 const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceSparseImageFormatInfo2 &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceSparseImageFormatInfo2 const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( format == rhs.format ) + && ( type == rhs.type ) + && ( samples == rhs.samples ) + && ( usage == rhs.usage ) + && ( tiling == rhs.tiling ); + } + + bool operator!=( PhysicalDeviceSparseImageFormatInfo2 const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceSparseImageFormatInfo2; + + public: + const void* pNext = nullptr; + Format format; + ImageType type; + SampleCountFlagBits samples; + ImageUsageFlags usage; + ImageTiling tiling; + }; + static_assert( sizeof( PhysicalDeviceSparseImageFormatInfo2 ) == sizeof( VkPhysicalDeviceSparseImageFormatInfo2 ), "struct and wrapper have different size!" ); + + using PhysicalDeviceSparseImageFormatInfo2KHR = PhysicalDeviceSparseImageFormatInfo2; + + struct SampleLocationsInfoEXT + { + SampleLocationsInfoEXT( SampleCountFlagBits sampleLocationsPerPixel_ = SampleCountFlagBits::e1, + Extent2D sampleLocationGridSize_ = Extent2D(), + uint32_t sampleLocationsCount_ = 0, + const SampleLocationEXT* pSampleLocations_ = nullptr ) + : sampleLocationsPerPixel( sampleLocationsPerPixel_ ) + , sampleLocationGridSize( sampleLocationGridSize_ ) + , sampleLocationsCount( sampleLocationsCount_ ) + , pSampleLocations( pSampleLocations_ ) + { + } + + SampleLocationsInfoEXT( VkSampleLocationsInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SampleLocationsInfoEXT ) ); + } + + SampleLocationsInfoEXT& operator=( VkSampleLocationsInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SampleLocationsInfoEXT ) ); + return *this; + } + SampleLocationsInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SampleLocationsInfoEXT& setSampleLocationsPerPixel( SampleCountFlagBits sampleLocationsPerPixel_ ) + { + sampleLocationsPerPixel = sampleLocationsPerPixel_; + return *this; + } + + SampleLocationsInfoEXT& setSampleLocationGridSize( Extent2D sampleLocationGridSize_ ) + { + sampleLocationGridSize = sampleLocationGridSize_; + return *this; + } + + SampleLocationsInfoEXT& setSampleLocationsCount( uint32_t sampleLocationsCount_ ) + { + sampleLocationsCount = sampleLocationsCount_; + return *this; + } + + SampleLocationsInfoEXT& setPSampleLocations( const SampleLocationEXT* pSampleLocations_ ) + { + pSampleLocations = pSampleLocations_; + return *this; + } + + operator VkSampleLocationsInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkSampleLocationsInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( SampleLocationsInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleLocationsPerPixel == rhs.sampleLocationsPerPixel ) + && ( sampleLocationGridSize == rhs.sampleLocationGridSize ) + && ( sampleLocationsCount == rhs.sampleLocationsCount ) + && ( pSampleLocations == rhs.pSampleLocations ); + } + + bool operator!=( SampleLocationsInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSampleLocationsInfoEXT; + + public: + const void* pNext = nullptr; + SampleCountFlagBits sampleLocationsPerPixel; + Extent2D sampleLocationGridSize; + uint32_t sampleLocationsCount; + const SampleLocationEXT* pSampleLocations; + }; + static_assert( sizeof( SampleLocationsInfoEXT ) == sizeof( VkSampleLocationsInfoEXT ), "struct and wrapper have different size!" ); + + struct AttachmentSampleLocationsEXT + { + AttachmentSampleLocationsEXT( uint32_t attachmentIndex_ = 0, + SampleLocationsInfoEXT sampleLocationsInfo_ = SampleLocationsInfoEXT() ) + : attachmentIndex( attachmentIndex_ ) + , sampleLocationsInfo( sampleLocationsInfo_ ) + { + } + + AttachmentSampleLocationsEXT( VkAttachmentSampleLocationsEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentSampleLocationsEXT ) ); + } + + AttachmentSampleLocationsEXT& operator=( VkAttachmentSampleLocationsEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentSampleLocationsEXT ) ); + return *this; + } + AttachmentSampleLocationsEXT& setAttachmentIndex( uint32_t attachmentIndex_ ) + { + attachmentIndex = attachmentIndex_; + return *this; + } + + AttachmentSampleLocationsEXT& setSampleLocationsInfo( SampleLocationsInfoEXT sampleLocationsInfo_ ) + { + sampleLocationsInfo = sampleLocationsInfo_; + return *this; + } + + operator VkAttachmentSampleLocationsEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkAttachmentSampleLocationsEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( AttachmentSampleLocationsEXT const& rhs ) const + { + return ( attachmentIndex == rhs.attachmentIndex ) + && ( sampleLocationsInfo == rhs.sampleLocationsInfo ); + } + + bool operator!=( AttachmentSampleLocationsEXT const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t attachmentIndex; + SampleLocationsInfoEXT sampleLocationsInfo; + }; + static_assert( sizeof( AttachmentSampleLocationsEXT ) == sizeof( VkAttachmentSampleLocationsEXT ), "struct and wrapper have different size!" ); + + struct SubpassSampleLocationsEXT + { + SubpassSampleLocationsEXT( uint32_t subpassIndex_ = 0, + SampleLocationsInfoEXT sampleLocationsInfo_ = SampleLocationsInfoEXT() ) + : subpassIndex( subpassIndex_ ) + , sampleLocationsInfo( sampleLocationsInfo_ ) + { + } + + SubpassSampleLocationsEXT( VkSubpassSampleLocationsEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassSampleLocationsEXT ) ); + } + + SubpassSampleLocationsEXT& operator=( VkSubpassSampleLocationsEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassSampleLocationsEXT ) ); + return *this; + } + SubpassSampleLocationsEXT& setSubpassIndex( uint32_t subpassIndex_ ) + { + subpassIndex = subpassIndex_; + return *this; + } + + SubpassSampleLocationsEXT& setSampleLocationsInfo( SampleLocationsInfoEXT sampleLocationsInfo_ ) + { + sampleLocationsInfo = sampleLocationsInfo_; + return *this; + } + + operator VkSubpassSampleLocationsEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkSubpassSampleLocationsEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( SubpassSampleLocationsEXT const& rhs ) const + { + return ( subpassIndex == rhs.subpassIndex ) + && ( sampleLocationsInfo == rhs.sampleLocationsInfo ); + } + + bool operator!=( SubpassSampleLocationsEXT const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t subpassIndex; + SampleLocationsInfoEXT sampleLocationsInfo; + }; + static_assert( sizeof( SubpassSampleLocationsEXT ) == sizeof( VkSubpassSampleLocationsEXT ), "struct and wrapper have different size!" ); + + struct RenderPassSampleLocationsBeginInfoEXT + { + RenderPassSampleLocationsBeginInfoEXT( uint32_t attachmentInitialSampleLocationsCount_ = 0, + const AttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations_ = nullptr, + uint32_t postSubpassSampleLocationsCount_ = 0, + const SubpassSampleLocationsEXT* pPostSubpassSampleLocations_ = nullptr ) + : attachmentInitialSampleLocationsCount( attachmentInitialSampleLocationsCount_ ) + , pAttachmentInitialSampleLocations( pAttachmentInitialSampleLocations_ ) + , postSubpassSampleLocationsCount( postSubpassSampleLocationsCount_ ) + , pPostSubpassSampleLocations( pPostSubpassSampleLocations_ ) + { + } + + RenderPassSampleLocationsBeginInfoEXT( VkRenderPassSampleLocationsBeginInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassSampleLocationsBeginInfoEXT ) ); + } + + RenderPassSampleLocationsBeginInfoEXT& operator=( VkRenderPassSampleLocationsBeginInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassSampleLocationsBeginInfoEXT ) ); + return *this; + } + RenderPassSampleLocationsBeginInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + RenderPassSampleLocationsBeginInfoEXT& setAttachmentInitialSampleLocationsCount( uint32_t attachmentInitialSampleLocationsCount_ ) + { + attachmentInitialSampleLocationsCount = attachmentInitialSampleLocationsCount_; + return *this; + } + + RenderPassSampleLocationsBeginInfoEXT& setPAttachmentInitialSampleLocations( const AttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations_ ) + { + pAttachmentInitialSampleLocations = pAttachmentInitialSampleLocations_; + return *this; + } + + RenderPassSampleLocationsBeginInfoEXT& setPostSubpassSampleLocationsCount( uint32_t postSubpassSampleLocationsCount_ ) + { + postSubpassSampleLocationsCount = postSubpassSampleLocationsCount_; + return *this; + } + + RenderPassSampleLocationsBeginInfoEXT& setPPostSubpassSampleLocations( const SubpassSampleLocationsEXT* pPostSubpassSampleLocations_ ) + { + pPostSubpassSampleLocations = pPostSubpassSampleLocations_; + return *this; + } + + operator VkRenderPassSampleLocationsBeginInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkRenderPassSampleLocationsBeginInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( RenderPassSampleLocationsBeginInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( attachmentInitialSampleLocationsCount == rhs.attachmentInitialSampleLocationsCount ) + && ( pAttachmentInitialSampleLocations == rhs.pAttachmentInitialSampleLocations ) + && ( postSubpassSampleLocationsCount == rhs.postSubpassSampleLocationsCount ) + && ( pPostSubpassSampleLocations == rhs.pPostSubpassSampleLocations ); + } + + bool operator!=( RenderPassSampleLocationsBeginInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eRenderPassSampleLocationsBeginInfoEXT; + + public: + const void* pNext = nullptr; + uint32_t attachmentInitialSampleLocationsCount; + const AttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; + uint32_t postSubpassSampleLocationsCount; + const SubpassSampleLocationsEXT* pPostSubpassSampleLocations; + }; + static_assert( sizeof( RenderPassSampleLocationsBeginInfoEXT ) == sizeof( VkRenderPassSampleLocationsBeginInfoEXT ), "struct and wrapper have different size!" ); + + struct PipelineSampleLocationsStateCreateInfoEXT + { + PipelineSampleLocationsStateCreateInfoEXT( Bool32 sampleLocationsEnable_ = 0, + SampleLocationsInfoEXT sampleLocationsInfo_ = SampleLocationsInfoEXT() ) + : sampleLocationsEnable( sampleLocationsEnable_ ) + , sampleLocationsInfo( sampleLocationsInfo_ ) + { + } + + PipelineSampleLocationsStateCreateInfoEXT( VkPipelineSampleLocationsStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineSampleLocationsStateCreateInfoEXT ) ); + } + + PipelineSampleLocationsStateCreateInfoEXT& operator=( VkPipelineSampleLocationsStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineSampleLocationsStateCreateInfoEXT ) ); + return *this; + } + PipelineSampleLocationsStateCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineSampleLocationsStateCreateInfoEXT& setSampleLocationsEnable( Bool32 sampleLocationsEnable_ ) + { + sampleLocationsEnable = sampleLocationsEnable_; + return *this; + } + + PipelineSampleLocationsStateCreateInfoEXT& setSampleLocationsInfo( SampleLocationsInfoEXT sampleLocationsInfo_ ) + { + sampleLocationsInfo = sampleLocationsInfo_; + return *this; + } + + operator VkPipelineSampleLocationsStateCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineSampleLocationsStateCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineSampleLocationsStateCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleLocationsEnable == rhs.sampleLocationsEnable ) + && ( sampleLocationsInfo == rhs.sampleLocationsInfo ); + } + + bool operator!=( PipelineSampleLocationsStateCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineSampleLocationsStateCreateInfoEXT; + + public: + const void* pNext = nullptr; + Bool32 sampleLocationsEnable; + SampleLocationsInfoEXT sampleLocationsInfo; + }; + static_assert( sizeof( PipelineSampleLocationsStateCreateInfoEXT ) == sizeof( VkPipelineSampleLocationsStateCreateInfoEXT ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceSampleLocationsPropertiesEXT + { + operator VkPhysicalDeviceSampleLocationsPropertiesEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceSampleLocationsPropertiesEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceSampleLocationsPropertiesEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleLocationSampleCounts == rhs.sampleLocationSampleCounts ) + && ( maxSampleLocationGridSize == rhs.maxSampleLocationGridSize ) + && ( memcmp( sampleLocationCoordinateRange, rhs.sampleLocationCoordinateRange, 2 * sizeof( float ) ) == 0 ) + && ( sampleLocationSubPixelBits == rhs.sampleLocationSubPixelBits ) + && ( variableSampleLocations == rhs.variableSampleLocations ); + } + + bool operator!=( PhysicalDeviceSampleLocationsPropertiesEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceSampleLocationsPropertiesEXT; + + public: + void* pNext = nullptr; + SampleCountFlags sampleLocationSampleCounts; + Extent2D maxSampleLocationGridSize; + float sampleLocationCoordinateRange[2]; + uint32_t sampleLocationSubPixelBits; + Bool32 variableSampleLocations; + }; + static_assert( sizeof( PhysicalDeviceSampleLocationsPropertiesEXT ) == sizeof( VkPhysicalDeviceSampleLocationsPropertiesEXT ), "struct and wrapper have different size!" ); + + enum class AttachmentDescriptionFlagBits + { + eMayAlias = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT + }; + + using AttachmentDescriptionFlags = Flags; + + VULKAN_HPP_INLINE AttachmentDescriptionFlags operator|( AttachmentDescriptionFlagBits bit0, AttachmentDescriptionFlagBits bit1 ) + { + return AttachmentDescriptionFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE AttachmentDescriptionFlags operator~( AttachmentDescriptionFlagBits bits ) + { + return ~( AttachmentDescriptionFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(AttachmentDescriptionFlagBits::eMayAlias) + }; + }; + + struct AttachmentDescription + { + AttachmentDescription( AttachmentDescriptionFlags flags_ = AttachmentDescriptionFlags(), + Format format_ = Format::eUndefined, + SampleCountFlagBits samples_ = SampleCountFlagBits::e1, + AttachmentLoadOp loadOp_ = AttachmentLoadOp::eLoad, + AttachmentStoreOp storeOp_ = AttachmentStoreOp::eStore, + AttachmentLoadOp stencilLoadOp_ = AttachmentLoadOp::eLoad, + AttachmentStoreOp stencilStoreOp_ = AttachmentStoreOp::eStore, + ImageLayout initialLayout_ = ImageLayout::eUndefined, + ImageLayout finalLayout_ = ImageLayout::eUndefined ) + : flags( flags_ ) + , format( format_ ) + , samples( samples_ ) + , loadOp( loadOp_ ) + , storeOp( storeOp_ ) + , stencilLoadOp( stencilLoadOp_ ) + , stencilStoreOp( stencilStoreOp_ ) + , initialLayout( initialLayout_ ) + , finalLayout( finalLayout_ ) + { + } + + AttachmentDescription( VkAttachmentDescription const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentDescription ) ); + } + + AttachmentDescription& operator=( VkAttachmentDescription const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentDescription ) ); + return *this; + } + AttachmentDescription& setFlags( AttachmentDescriptionFlags flags_ ) + { + flags = flags_; + return *this; + } + + AttachmentDescription& setFormat( Format format_ ) + { + format = format_; + return *this; + } + + AttachmentDescription& setSamples( SampleCountFlagBits samples_ ) + { + samples = samples_; + return *this; + } + + AttachmentDescription& setLoadOp( AttachmentLoadOp loadOp_ ) + { + loadOp = loadOp_; + return *this; + } + + AttachmentDescription& setStoreOp( AttachmentStoreOp storeOp_ ) + { + storeOp = storeOp_; + return *this; + } + + AttachmentDescription& setStencilLoadOp( AttachmentLoadOp stencilLoadOp_ ) + { + stencilLoadOp = stencilLoadOp_; + return *this; + } + + AttachmentDescription& setStencilStoreOp( AttachmentStoreOp stencilStoreOp_ ) + { + stencilStoreOp = stencilStoreOp_; + return *this; + } + + AttachmentDescription& setInitialLayout( ImageLayout initialLayout_ ) + { + initialLayout = initialLayout_; + return *this; + } + + AttachmentDescription& setFinalLayout( ImageLayout finalLayout_ ) + { + finalLayout = finalLayout_; + return *this; + } + + operator VkAttachmentDescription const&() const + { + return *reinterpret_cast(this); + } + + operator VkAttachmentDescription &() + { + return *reinterpret_cast(this); + } + + bool operator==( AttachmentDescription const& rhs ) const + { + return ( flags == rhs.flags ) + && ( format == rhs.format ) + && ( samples == rhs.samples ) + && ( loadOp == rhs.loadOp ) + && ( storeOp == rhs.storeOp ) + && ( stencilLoadOp == rhs.stencilLoadOp ) + && ( stencilStoreOp == rhs.stencilStoreOp ) + && ( initialLayout == rhs.initialLayout ) + && ( finalLayout == rhs.finalLayout ); + } + + bool operator!=( AttachmentDescription const& rhs ) const + { + return !operator==( rhs ); + } + + AttachmentDescriptionFlags flags; + Format format; + SampleCountFlagBits samples; + AttachmentLoadOp loadOp; + AttachmentStoreOp storeOp; + AttachmentLoadOp stencilLoadOp; + AttachmentStoreOp stencilStoreOp; + ImageLayout initialLayout; + ImageLayout finalLayout; + }; + static_assert( sizeof( AttachmentDescription ) == sizeof( VkAttachmentDescription ), "struct and wrapper have different size!" ); + + struct AttachmentDescription2KHR + { + AttachmentDescription2KHR( AttachmentDescriptionFlags flags_ = AttachmentDescriptionFlags(), + Format format_ = Format::eUndefined, + SampleCountFlagBits samples_ = SampleCountFlagBits::e1, + AttachmentLoadOp loadOp_ = AttachmentLoadOp::eLoad, + AttachmentStoreOp storeOp_ = AttachmentStoreOp::eStore, + AttachmentLoadOp stencilLoadOp_ = AttachmentLoadOp::eLoad, + AttachmentStoreOp stencilStoreOp_ = AttachmentStoreOp::eStore, + ImageLayout initialLayout_ = ImageLayout::eUndefined, + ImageLayout finalLayout_ = ImageLayout::eUndefined ) + : flags( flags_ ) + , format( format_ ) + , samples( samples_ ) + , loadOp( loadOp_ ) + , storeOp( storeOp_ ) + , stencilLoadOp( stencilLoadOp_ ) + , stencilStoreOp( stencilStoreOp_ ) + , initialLayout( initialLayout_ ) + , finalLayout( finalLayout_ ) + { + } + + AttachmentDescription2KHR( VkAttachmentDescription2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentDescription2KHR ) ); + } + + AttachmentDescription2KHR& operator=( VkAttachmentDescription2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( AttachmentDescription2KHR ) ); + return *this; + } + AttachmentDescription2KHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + AttachmentDescription2KHR& setFlags( AttachmentDescriptionFlags flags_ ) + { + flags = flags_; + return *this; + } + + AttachmentDescription2KHR& setFormat( Format format_ ) + { + format = format_; + return *this; + } + + AttachmentDescription2KHR& setSamples( SampleCountFlagBits samples_ ) + { + samples = samples_; + return *this; + } + + AttachmentDescription2KHR& setLoadOp( AttachmentLoadOp loadOp_ ) + { + loadOp = loadOp_; + return *this; + } + + AttachmentDescription2KHR& setStoreOp( AttachmentStoreOp storeOp_ ) + { + storeOp = storeOp_; + return *this; + } + + AttachmentDescription2KHR& setStencilLoadOp( AttachmentLoadOp stencilLoadOp_ ) + { + stencilLoadOp = stencilLoadOp_; + return *this; + } + + AttachmentDescription2KHR& setStencilStoreOp( AttachmentStoreOp stencilStoreOp_ ) + { + stencilStoreOp = stencilStoreOp_; + return *this; + } + + AttachmentDescription2KHR& setInitialLayout( ImageLayout initialLayout_ ) + { + initialLayout = initialLayout_; + return *this; + } + + AttachmentDescription2KHR& setFinalLayout( ImageLayout finalLayout_ ) + { + finalLayout = finalLayout_; + return *this; + } + + operator VkAttachmentDescription2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkAttachmentDescription2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( AttachmentDescription2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( format == rhs.format ) + && ( samples == rhs.samples ) + && ( loadOp == rhs.loadOp ) + && ( storeOp == rhs.storeOp ) + && ( stencilLoadOp == rhs.stencilLoadOp ) + && ( stencilStoreOp == rhs.stencilStoreOp ) + && ( initialLayout == rhs.initialLayout ) + && ( finalLayout == rhs.finalLayout ); + } + + bool operator!=( AttachmentDescription2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eAttachmentDescription2KHR; + + public: + const void* pNext = nullptr; + AttachmentDescriptionFlags flags; + Format format; + SampleCountFlagBits samples; + AttachmentLoadOp loadOp; + AttachmentStoreOp storeOp; + AttachmentLoadOp stencilLoadOp; + AttachmentStoreOp stencilStoreOp; + ImageLayout initialLayout; + ImageLayout finalLayout; + }; + static_assert( sizeof( AttachmentDescription2KHR ) == sizeof( VkAttachmentDescription2KHR ), "struct and wrapper have different size!" ); + + enum class StencilFaceFlagBits + { + eFront = VK_STENCIL_FACE_FRONT_BIT, + eBack = VK_STENCIL_FACE_BACK_BIT, + eVkStencilFrontAndBack = VK_STENCIL_FRONT_AND_BACK + }; + + using StencilFaceFlags = Flags; + + VULKAN_HPP_INLINE StencilFaceFlags operator|( StencilFaceFlagBits bit0, StencilFaceFlagBits bit1 ) + { + return StencilFaceFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE StencilFaceFlags operator~( StencilFaceFlagBits bits ) + { + return ~( StencilFaceFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(StencilFaceFlagBits::eFront) | VkFlags(StencilFaceFlagBits::eBack) | VkFlags(StencilFaceFlagBits::eVkStencilFrontAndBack) + }; + }; + + enum class DescriptorPoolCreateFlagBits + { + eFreeDescriptorSet = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, + eUpdateAfterBindEXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT + }; + + using DescriptorPoolCreateFlags = Flags; + + VULKAN_HPP_INLINE DescriptorPoolCreateFlags operator|( DescriptorPoolCreateFlagBits bit0, DescriptorPoolCreateFlagBits bit1 ) + { + return DescriptorPoolCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DescriptorPoolCreateFlags operator~( DescriptorPoolCreateFlagBits bits ) + { + return ~( DescriptorPoolCreateFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DescriptorPoolCreateFlagBits::eFreeDescriptorSet) | VkFlags(DescriptorPoolCreateFlagBits::eUpdateAfterBindEXT) + }; + }; + + struct DescriptorPoolCreateInfo + { + DescriptorPoolCreateInfo( DescriptorPoolCreateFlags flags_ = DescriptorPoolCreateFlags(), + uint32_t maxSets_ = 0, + uint32_t poolSizeCount_ = 0, + const DescriptorPoolSize* pPoolSizes_ = nullptr ) + : flags( flags_ ) + , maxSets( maxSets_ ) + , poolSizeCount( poolSizeCount_ ) + , pPoolSizes( pPoolSizes_ ) + { + } + + DescriptorPoolCreateInfo( VkDescriptorPoolCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorPoolCreateInfo ) ); + } + + DescriptorPoolCreateInfo& operator=( VkDescriptorPoolCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorPoolCreateInfo ) ); + return *this; + } + DescriptorPoolCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DescriptorPoolCreateInfo& setFlags( DescriptorPoolCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + DescriptorPoolCreateInfo& setMaxSets( uint32_t maxSets_ ) + { + maxSets = maxSets_; + return *this; + } + + DescriptorPoolCreateInfo& setPoolSizeCount( uint32_t poolSizeCount_ ) + { + poolSizeCount = poolSizeCount_; + return *this; + } + + DescriptorPoolCreateInfo& setPPoolSizes( const DescriptorPoolSize* pPoolSizes_ ) + { + pPoolSizes = pPoolSizes_; + return *this; + } + + operator VkDescriptorPoolCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorPoolCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorPoolCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( maxSets == rhs.maxSets ) + && ( poolSizeCount == rhs.poolSizeCount ) + && ( pPoolSizes == rhs.pPoolSizes ); + } + + bool operator!=( DescriptorPoolCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorPoolCreateInfo; + + public: + const void* pNext = nullptr; + DescriptorPoolCreateFlags flags; + uint32_t maxSets; + uint32_t poolSizeCount; + const DescriptorPoolSize* pPoolSizes; + }; + static_assert( sizeof( DescriptorPoolCreateInfo ) == sizeof( VkDescriptorPoolCreateInfo ), "struct and wrapper have different size!" ); + + enum class DependencyFlagBits + { + eByRegion = VK_DEPENDENCY_BY_REGION_BIT, + eDeviceGroup = VK_DEPENDENCY_DEVICE_GROUP_BIT, + eDeviceGroupKHR = VK_DEPENDENCY_DEVICE_GROUP_BIT, + eViewLocal = VK_DEPENDENCY_VIEW_LOCAL_BIT, + eViewLocalKHR = VK_DEPENDENCY_VIEW_LOCAL_BIT + }; + + using DependencyFlags = Flags; + + VULKAN_HPP_INLINE DependencyFlags operator|( DependencyFlagBits bit0, DependencyFlagBits bit1 ) + { + return DependencyFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DependencyFlags operator~( DependencyFlagBits bits ) + { + return ~( DependencyFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DependencyFlagBits::eByRegion) | VkFlags(DependencyFlagBits::eDeviceGroup) | VkFlags(DependencyFlagBits::eViewLocal) + }; + }; + + struct SubpassDependency + { + SubpassDependency( uint32_t srcSubpass_ = 0, + uint32_t dstSubpass_ = 0, + PipelineStageFlags srcStageMask_ = PipelineStageFlags(), + PipelineStageFlags dstStageMask_ = PipelineStageFlags(), + AccessFlags srcAccessMask_ = AccessFlags(), + AccessFlags dstAccessMask_ = AccessFlags(), + DependencyFlags dependencyFlags_ = DependencyFlags() ) + : srcSubpass( srcSubpass_ ) + , dstSubpass( dstSubpass_ ) + , srcStageMask( srcStageMask_ ) + , dstStageMask( dstStageMask_ ) + , srcAccessMask( srcAccessMask_ ) + , dstAccessMask( dstAccessMask_ ) + , dependencyFlags( dependencyFlags_ ) + { + } + + SubpassDependency( VkSubpassDependency const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassDependency ) ); + } + + SubpassDependency& operator=( VkSubpassDependency const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassDependency ) ); + return *this; + } + SubpassDependency& setSrcSubpass( uint32_t srcSubpass_ ) + { + srcSubpass = srcSubpass_; + return *this; + } + + SubpassDependency& setDstSubpass( uint32_t dstSubpass_ ) + { + dstSubpass = dstSubpass_; + return *this; + } + + SubpassDependency& setSrcStageMask( PipelineStageFlags srcStageMask_ ) + { + srcStageMask = srcStageMask_; + return *this; + } + + SubpassDependency& setDstStageMask( PipelineStageFlags dstStageMask_ ) + { + dstStageMask = dstStageMask_; + return *this; + } + + SubpassDependency& setSrcAccessMask( AccessFlags srcAccessMask_ ) + { + srcAccessMask = srcAccessMask_; + return *this; + } + + SubpassDependency& setDstAccessMask( AccessFlags dstAccessMask_ ) + { + dstAccessMask = dstAccessMask_; + return *this; + } + + SubpassDependency& setDependencyFlags( DependencyFlags dependencyFlags_ ) + { + dependencyFlags = dependencyFlags_; + return *this; + } + + operator VkSubpassDependency const&() const + { + return *reinterpret_cast(this); + } + + operator VkSubpassDependency &() + { + return *reinterpret_cast(this); + } + + bool operator==( SubpassDependency const& rhs ) const + { + return ( srcSubpass == rhs.srcSubpass ) + && ( dstSubpass == rhs.dstSubpass ) + && ( srcStageMask == rhs.srcStageMask ) + && ( dstStageMask == rhs.dstStageMask ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ) + && ( dependencyFlags == rhs.dependencyFlags ); + } + + bool operator!=( SubpassDependency const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t srcSubpass; + uint32_t dstSubpass; + PipelineStageFlags srcStageMask; + PipelineStageFlags dstStageMask; + AccessFlags srcAccessMask; + AccessFlags dstAccessMask; + DependencyFlags dependencyFlags; + }; + static_assert( sizeof( SubpassDependency ) == sizeof( VkSubpassDependency ), "struct and wrapper have different size!" ); + + struct SubpassDependency2KHR + { + SubpassDependency2KHR( uint32_t srcSubpass_ = 0, + uint32_t dstSubpass_ = 0, + PipelineStageFlags srcStageMask_ = PipelineStageFlags(), + PipelineStageFlags dstStageMask_ = PipelineStageFlags(), + AccessFlags srcAccessMask_ = AccessFlags(), + AccessFlags dstAccessMask_ = AccessFlags(), + DependencyFlags dependencyFlags_ = DependencyFlags(), + int32_t viewOffset_ = 0 ) + : srcSubpass( srcSubpass_ ) + , dstSubpass( dstSubpass_ ) + , srcStageMask( srcStageMask_ ) + , dstStageMask( dstStageMask_ ) + , srcAccessMask( srcAccessMask_ ) + , dstAccessMask( dstAccessMask_ ) + , dependencyFlags( dependencyFlags_ ) + , viewOffset( viewOffset_ ) + { + } + + SubpassDependency2KHR( VkSubpassDependency2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassDependency2KHR ) ); + } + + SubpassDependency2KHR& operator=( VkSubpassDependency2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassDependency2KHR ) ); + return *this; + } + SubpassDependency2KHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SubpassDependency2KHR& setSrcSubpass( uint32_t srcSubpass_ ) + { + srcSubpass = srcSubpass_; + return *this; + } + + SubpassDependency2KHR& setDstSubpass( uint32_t dstSubpass_ ) + { + dstSubpass = dstSubpass_; + return *this; + } + + SubpassDependency2KHR& setSrcStageMask( PipelineStageFlags srcStageMask_ ) + { + srcStageMask = srcStageMask_; + return *this; + } + + SubpassDependency2KHR& setDstStageMask( PipelineStageFlags dstStageMask_ ) + { + dstStageMask = dstStageMask_; + return *this; + } + + SubpassDependency2KHR& setSrcAccessMask( AccessFlags srcAccessMask_ ) + { + srcAccessMask = srcAccessMask_; + return *this; + } + + SubpassDependency2KHR& setDstAccessMask( AccessFlags dstAccessMask_ ) + { + dstAccessMask = dstAccessMask_; + return *this; + } + + SubpassDependency2KHR& setDependencyFlags( DependencyFlags dependencyFlags_ ) + { + dependencyFlags = dependencyFlags_; + return *this; + } + + SubpassDependency2KHR& setViewOffset( int32_t viewOffset_ ) + { + viewOffset = viewOffset_; + return *this; + } + + operator VkSubpassDependency2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSubpassDependency2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SubpassDependency2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcSubpass == rhs.srcSubpass ) + && ( dstSubpass == rhs.dstSubpass ) + && ( srcStageMask == rhs.srcStageMask ) + && ( dstStageMask == rhs.dstStageMask ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ) + && ( dependencyFlags == rhs.dependencyFlags ) + && ( viewOffset == rhs.viewOffset ); + } + + bool operator!=( SubpassDependency2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSubpassDependency2KHR; + + public: + const void* pNext = nullptr; + uint32_t srcSubpass; + uint32_t dstSubpass; + PipelineStageFlags srcStageMask; + PipelineStageFlags dstStageMask; + AccessFlags srcAccessMask; + AccessFlags dstAccessMask; + DependencyFlags dependencyFlags; + int32_t viewOffset; + }; + static_assert( sizeof( SubpassDependency2KHR ) == sizeof( VkSubpassDependency2KHR ), "struct and wrapper have different size!" ); + + enum class PresentModeKHR + { + eImmediate = VK_PRESENT_MODE_IMMEDIATE_KHR, + eMailbox = VK_PRESENT_MODE_MAILBOX_KHR, + eFifo = VK_PRESENT_MODE_FIFO_KHR, + eFifoRelaxed = VK_PRESENT_MODE_FIFO_RELAXED_KHR, + eSharedDemandRefresh = VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR, + eSharedContinuousRefresh = VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR + }; + + enum class ColorSpaceKHR + { + eSrgbNonlinear = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + eVkColorspaceSrgbNonlinear = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + eDisplayP3NonlinearEXT = VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT, + eExtendedSrgbLinearEXT = VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT, + eDciP3LinearEXT = VK_COLOR_SPACE_DCI_P3_LINEAR_EXT, + eDciP3NonlinearEXT = VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT, + eBt709LinearEXT = VK_COLOR_SPACE_BT709_LINEAR_EXT, + eBt709NonlinearEXT = VK_COLOR_SPACE_BT709_NONLINEAR_EXT, + eBt2020LinearEXT = VK_COLOR_SPACE_BT2020_LINEAR_EXT, + eHdr10St2084EXT = VK_COLOR_SPACE_HDR10_ST2084_EXT, + eDolbyvisionEXT = VK_COLOR_SPACE_DOLBYVISION_EXT, + eHdr10HlgEXT = VK_COLOR_SPACE_HDR10_HLG_EXT, + eAdobergbLinearEXT = VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT, + eAdobergbNonlinearEXT = VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT, + ePassThroughEXT = VK_COLOR_SPACE_PASS_THROUGH_EXT, + eExtendedSrgbNonlinearEXT = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT + }; + + struct SurfaceFormatKHR + { + operator VkSurfaceFormatKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSurfaceFormatKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SurfaceFormatKHR const& rhs ) const + { + return ( format == rhs.format ) + && ( colorSpace == rhs.colorSpace ); + } + + bool operator!=( SurfaceFormatKHR const& rhs ) const + { + return !operator==( rhs ); + } + + Format format; + ColorSpaceKHR colorSpace; + }; + static_assert( sizeof( SurfaceFormatKHR ) == sizeof( VkSurfaceFormatKHR ), "struct and wrapper have different size!" ); + + struct SurfaceFormat2KHR + { + operator VkSurfaceFormat2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSurfaceFormat2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SurfaceFormat2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( surfaceFormat == rhs.surfaceFormat ); + } + + bool operator!=( SurfaceFormat2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSurfaceFormat2KHR; + + public: + void* pNext = nullptr; + SurfaceFormatKHR surfaceFormat; + }; + static_assert( sizeof( SurfaceFormat2KHR ) == sizeof( VkSurfaceFormat2KHR ), "struct and wrapper have different size!" ); + + enum class DisplayPlaneAlphaFlagBitsKHR + { + eOpaque = VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR, + eGlobal = VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR, + ePerPixel = VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR, + ePerPixelPremultiplied = VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR + }; + + using DisplayPlaneAlphaFlagsKHR = Flags; + + VULKAN_HPP_INLINE DisplayPlaneAlphaFlagsKHR operator|( DisplayPlaneAlphaFlagBitsKHR bit0, DisplayPlaneAlphaFlagBitsKHR bit1 ) + { + return DisplayPlaneAlphaFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DisplayPlaneAlphaFlagsKHR operator~( DisplayPlaneAlphaFlagBitsKHR bits ) + { + return ~( DisplayPlaneAlphaFlagsKHR( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DisplayPlaneAlphaFlagBitsKHR::eOpaque) | VkFlags(DisplayPlaneAlphaFlagBitsKHR::eGlobal) | VkFlags(DisplayPlaneAlphaFlagBitsKHR::ePerPixel) | VkFlags(DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied) + }; + }; + + struct DisplayPlaneCapabilitiesKHR + { + operator VkDisplayPlaneCapabilitiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayPlaneCapabilitiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayPlaneCapabilitiesKHR const& rhs ) const + { + return ( supportedAlpha == rhs.supportedAlpha ) + && ( minSrcPosition == rhs.minSrcPosition ) + && ( maxSrcPosition == rhs.maxSrcPosition ) + && ( minSrcExtent == rhs.minSrcExtent ) + && ( maxSrcExtent == rhs.maxSrcExtent ) + && ( minDstPosition == rhs.minDstPosition ) + && ( maxDstPosition == rhs.maxDstPosition ) + && ( minDstExtent == rhs.minDstExtent ) + && ( maxDstExtent == rhs.maxDstExtent ); + } + + bool operator!=( DisplayPlaneCapabilitiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + DisplayPlaneAlphaFlagsKHR supportedAlpha; + Offset2D minSrcPosition; + Offset2D maxSrcPosition; + Extent2D minSrcExtent; + Extent2D maxSrcExtent; + Offset2D minDstPosition; + Offset2D maxDstPosition; + Extent2D minDstExtent; + Extent2D maxDstExtent; + }; + static_assert( sizeof( DisplayPlaneCapabilitiesKHR ) == sizeof( VkDisplayPlaneCapabilitiesKHR ), "struct and wrapper have different size!" ); + + struct DisplayPlaneCapabilities2KHR + { + operator VkDisplayPlaneCapabilities2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayPlaneCapabilities2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayPlaneCapabilities2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( capabilities == rhs.capabilities ); + } + + bool operator!=( DisplayPlaneCapabilities2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplayPlaneCapabilities2KHR; + + public: + void* pNext = nullptr; + DisplayPlaneCapabilitiesKHR capabilities; + }; + static_assert( sizeof( DisplayPlaneCapabilities2KHR ) == sizeof( VkDisplayPlaneCapabilities2KHR ), "struct and wrapper have different size!" ); + + enum class CompositeAlphaFlagBitsKHR + { + eOpaque = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, + ePreMultiplied = VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR, + ePostMultiplied = VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR, + eInherit = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR + }; + + using CompositeAlphaFlagsKHR = Flags; + + VULKAN_HPP_INLINE CompositeAlphaFlagsKHR operator|( CompositeAlphaFlagBitsKHR bit0, CompositeAlphaFlagBitsKHR bit1 ) + { + return CompositeAlphaFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE CompositeAlphaFlagsKHR operator~( CompositeAlphaFlagBitsKHR bits ) + { + return ~( CompositeAlphaFlagsKHR( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(CompositeAlphaFlagBitsKHR::eOpaque) | VkFlags(CompositeAlphaFlagBitsKHR::ePreMultiplied) | VkFlags(CompositeAlphaFlagBitsKHR::ePostMultiplied) | VkFlags(CompositeAlphaFlagBitsKHR::eInherit) + }; + }; + + enum class SurfaceTransformFlagBitsKHR + { + eIdentity = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR, + eRotate90 = VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR, + eRotate180 = VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR, + eRotate270 = VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR, + eHorizontalMirror = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR, + eHorizontalMirrorRotate90 = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR, + eHorizontalMirrorRotate180 = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR, + eHorizontalMirrorRotate270 = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR, + eInherit = VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR + }; + + using SurfaceTransformFlagsKHR = Flags; + + VULKAN_HPP_INLINE SurfaceTransformFlagsKHR operator|( SurfaceTransformFlagBitsKHR bit0, SurfaceTransformFlagBitsKHR bit1 ) + { + return SurfaceTransformFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE SurfaceTransformFlagsKHR operator~( SurfaceTransformFlagBitsKHR bits ) + { + return ~( SurfaceTransformFlagsKHR( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(SurfaceTransformFlagBitsKHR::eIdentity) | VkFlags(SurfaceTransformFlagBitsKHR::eRotate90) | VkFlags(SurfaceTransformFlagBitsKHR::eRotate180) | VkFlags(SurfaceTransformFlagBitsKHR::eRotate270) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirror) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate90) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270) | VkFlags(SurfaceTransformFlagBitsKHR::eInherit) + }; + }; + + struct DisplayPropertiesKHR + { + operator VkDisplayPropertiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayPropertiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayPropertiesKHR const& rhs ) const + { + return ( display == rhs.display ) + && ( displayName == rhs.displayName ) + && ( physicalDimensions == rhs.physicalDimensions ) + && ( physicalResolution == rhs.physicalResolution ) + && ( supportedTransforms == rhs.supportedTransforms ) + && ( planeReorderPossible == rhs.planeReorderPossible ) + && ( persistentContent == rhs.persistentContent ); + } + + bool operator!=( DisplayPropertiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + DisplayKHR display; + const char* displayName; + Extent2D physicalDimensions; + Extent2D physicalResolution; + SurfaceTransformFlagsKHR supportedTransforms; + Bool32 planeReorderPossible; + Bool32 persistentContent; + }; + static_assert( sizeof( DisplayPropertiesKHR ) == sizeof( VkDisplayPropertiesKHR ), "struct and wrapper have different size!" ); + + struct DisplaySurfaceCreateInfoKHR + { + DisplaySurfaceCreateInfoKHR( DisplaySurfaceCreateFlagsKHR flags_ = DisplaySurfaceCreateFlagsKHR(), + DisplayModeKHR displayMode_ = DisplayModeKHR(), + uint32_t planeIndex_ = 0, + uint32_t planeStackIndex_ = 0, + SurfaceTransformFlagBitsKHR transform_ = SurfaceTransformFlagBitsKHR::eIdentity, + float globalAlpha_ = 0, + DisplayPlaneAlphaFlagBitsKHR alphaMode_ = DisplayPlaneAlphaFlagBitsKHR::eOpaque, + Extent2D imageExtent_ = Extent2D() ) + : flags( flags_ ) + , displayMode( displayMode_ ) + , planeIndex( planeIndex_ ) + , planeStackIndex( planeStackIndex_ ) + , transform( transform_ ) + , globalAlpha( globalAlpha_ ) + , alphaMode( alphaMode_ ) + , imageExtent( imageExtent_ ) + { + } + + DisplaySurfaceCreateInfoKHR( VkDisplaySurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplaySurfaceCreateInfoKHR ) ); + } + + DisplaySurfaceCreateInfoKHR& operator=( VkDisplaySurfaceCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplaySurfaceCreateInfoKHR ) ); + return *this; + } + DisplaySurfaceCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DisplaySurfaceCreateInfoKHR& setFlags( DisplaySurfaceCreateFlagsKHR flags_ ) + { + flags = flags_; + return *this; + } + + DisplaySurfaceCreateInfoKHR& setDisplayMode( DisplayModeKHR displayMode_ ) + { + displayMode = displayMode_; + return *this; + } + + DisplaySurfaceCreateInfoKHR& setPlaneIndex( uint32_t planeIndex_ ) + { + planeIndex = planeIndex_; + return *this; + } + + DisplaySurfaceCreateInfoKHR& setPlaneStackIndex( uint32_t planeStackIndex_ ) + { + planeStackIndex = planeStackIndex_; + return *this; + } + + DisplaySurfaceCreateInfoKHR& setTransform( SurfaceTransformFlagBitsKHR transform_ ) + { + transform = transform_; + return *this; + } + + DisplaySurfaceCreateInfoKHR& setGlobalAlpha( float globalAlpha_ ) + { + globalAlpha = globalAlpha_; + return *this; + } + + DisplaySurfaceCreateInfoKHR& setAlphaMode( DisplayPlaneAlphaFlagBitsKHR alphaMode_ ) + { + alphaMode = alphaMode_; + return *this; + } + + DisplaySurfaceCreateInfoKHR& setImageExtent( Extent2D imageExtent_ ) + { + imageExtent = imageExtent_; + return *this; + } + + operator VkDisplaySurfaceCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplaySurfaceCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplaySurfaceCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( displayMode == rhs.displayMode ) + && ( planeIndex == rhs.planeIndex ) + && ( planeStackIndex == rhs.planeStackIndex ) + && ( transform == rhs.transform ) + && ( globalAlpha == rhs.globalAlpha ) + && ( alphaMode == rhs.alphaMode ) + && ( imageExtent == rhs.imageExtent ); + } + + bool operator!=( DisplaySurfaceCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplaySurfaceCreateInfoKHR; + + public: + const void* pNext = nullptr; + DisplaySurfaceCreateFlagsKHR flags; + DisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + SurfaceTransformFlagBitsKHR transform; + float globalAlpha; + DisplayPlaneAlphaFlagBitsKHR alphaMode; + Extent2D imageExtent; + }; + static_assert( sizeof( DisplaySurfaceCreateInfoKHR ) == sizeof( VkDisplaySurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); + + struct SurfaceCapabilitiesKHR + { + operator VkSurfaceCapabilitiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSurfaceCapabilitiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SurfaceCapabilitiesKHR const& rhs ) const + { + return ( minImageCount == rhs.minImageCount ) + && ( maxImageCount == rhs.maxImageCount ) + && ( currentExtent == rhs.currentExtent ) + && ( minImageExtent == rhs.minImageExtent ) + && ( maxImageExtent == rhs.maxImageExtent ) + && ( maxImageArrayLayers == rhs.maxImageArrayLayers ) + && ( supportedTransforms == rhs.supportedTransforms ) + && ( currentTransform == rhs.currentTransform ) + && ( supportedCompositeAlpha == rhs.supportedCompositeAlpha ) + && ( supportedUsageFlags == rhs.supportedUsageFlags ); + } + + bool operator!=( SurfaceCapabilitiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t minImageCount; + uint32_t maxImageCount; + Extent2D currentExtent; + Extent2D minImageExtent; + Extent2D maxImageExtent; + uint32_t maxImageArrayLayers; + SurfaceTransformFlagsKHR supportedTransforms; + SurfaceTransformFlagBitsKHR currentTransform; + CompositeAlphaFlagsKHR supportedCompositeAlpha; + ImageUsageFlags supportedUsageFlags; + }; + static_assert( sizeof( SurfaceCapabilitiesKHR ) == sizeof( VkSurfaceCapabilitiesKHR ), "struct and wrapper have different size!" ); + + struct SurfaceCapabilities2KHR + { + operator VkSurfaceCapabilities2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSurfaceCapabilities2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SurfaceCapabilities2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( surfaceCapabilities == rhs.surfaceCapabilities ); + } + + bool operator!=( SurfaceCapabilities2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSurfaceCapabilities2KHR; + + public: + void* pNext = nullptr; + SurfaceCapabilitiesKHR surfaceCapabilities; + }; + static_assert( sizeof( SurfaceCapabilities2KHR ) == sizeof( VkSurfaceCapabilities2KHR ), "struct and wrapper have different size!" ); + + struct DisplayProperties2KHR + { + operator VkDisplayProperties2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayProperties2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayProperties2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayProperties == rhs.displayProperties ); + } + + bool operator!=( DisplayProperties2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplayProperties2KHR; + + public: + void* pNext = nullptr; + DisplayPropertiesKHR displayProperties; + }; + static_assert( sizeof( DisplayProperties2KHR ) == sizeof( VkDisplayProperties2KHR ), "struct and wrapper have different size!" ); + + enum class DebugReportFlagBitsEXT + { + eInformation = VK_DEBUG_REPORT_INFORMATION_BIT_EXT, + eWarning = VK_DEBUG_REPORT_WARNING_BIT_EXT, + ePerformanceWarning = VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, + eError = VK_DEBUG_REPORT_ERROR_BIT_EXT, + eDebug = VK_DEBUG_REPORT_DEBUG_BIT_EXT + }; + + using DebugReportFlagsEXT = Flags; + + VULKAN_HPP_INLINE DebugReportFlagsEXT operator|( DebugReportFlagBitsEXT bit0, DebugReportFlagBitsEXT bit1 ) + { + return DebugReportFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DebugReportFlagsEXT operator~( DebugReportFlagBitsEXT bits ) + { + return ~( DebugReportFlagsEXT( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DebugReportFlagBitsEXT::eInformation) | VkFlags(DebugReportFlagBitsEXT::eWarning) | VkFlags(DebugReportFlagBitsEXT::ePerformanceWarning) | VkFlags(DebugReportFlagBitsEXT::eError) | VkFlags(DebugReportFlagBitsEXT::eDebug) + }; + }; + + struct DebugReportCallbackCreateInfoEXT + { + DebugReportCallbackCreateInfoEXT( DebugReportFlagsEXT flags_ = DebugReportFlagsEXT(), + PFN_vkDebugReportCallbackEXT pfnCallback_ = nullptr, + void* pUserData_ = nullptr ) + : flags( flags_ ) + , pfnCallback( pfnCallback_ ) + , pUserData( pUserData_ ) + { + } + + DebugReportCallbackCreateInfoEXT( VkDebugReportCallbackCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugReportCallbackCreateInfoEXT ) ); + } + + DebugReportCallbackCreateInfoEXT& operator=( VkDebugReportCallbackCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugReportCallbackCreateInfoEXT ) ); + return *this; + } + DebugReportCallbackCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DebugReportCallbackCreateInfoEXT& setFlags( DebugReportFlagsEXT flags_ ) + { + flags = flags_; + return *this; + } + + DebugReportCallbackCreateInfoEXT& setPfnCallback( PFN_vkDebugReportCallbackEXT pfnCallback_ ) + { + pfnCallback = pfnCallback_; + return *this; + } + + DebugReportCallbackCreateInfoEXT& setPUserData( void* pUserData_ ) + { + pUserData = pUserData_; + return *this; + } + + operator VkDebugReportCallbackCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDebugReportCallbackCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DebugReportCallbackCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pfnCallback == rhs.pfnCallback ) + && ( pUserData == rhs.pUserData ); + } + + bool operator!=( DebugReportCallbackCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDebugReportCallbackCreateInfoEXT; + + public: + const void* pNext = nullptr; + DebugReportFlagsEXT flags; + PFN_vkDebugReportCallbackEXT pfnCallback; + void* pUserData; + }; + static_assert( sizeof( DebugReportCallbackCreateInfoEXT ) == sizeof( VkDebugReportCallbackCreateInfoEXT ), "struct and wrapper have different size!" ); + + enum class DebugReportObjectTypeEXT + { + eUnknown = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, + eInstance = VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, + ePhysicalDevice = VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, + eDevice = VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, + eQueue = VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, + eSemaphore = VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, + eCommandBuffer = VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, + eFence = VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, + eDeviceMemory = VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, + eBuffer = VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, + eImage = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, + eEvent = VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, + eQueryPool = VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, + eBufferView = VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, + eImageView = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, + eShaderModule = VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, + ePipelineCache = VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, + ePipelineLayout = VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, + eRenderPass = VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, + ePipeline = VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, + eDescriptorSetLayout = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, + eSampler = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, + eDescriptorPool = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, + eDescriptorSet = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, + eFramebuffer = VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, + eCommandPool = VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, + eSurfaceKhr = VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, + eSwapchainKhr = VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, + eDebugReportCallbackExt = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + eDebugReport = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + eDisplayKhr = VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT, + eDisplayModeKhr = VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT, + eObjectTableNvx = VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT, + eIndirectCommandsLayoutNvx = VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT, + eValidationCacheExt = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + eValidationCache = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + eSamplerYcbcrConversion = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + eSamplerYcbcrConversionKHR = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + eDescriptorUpdateTemplate = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + eDescriptorUpdateTemplateKHR = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + eAccelerationStructureNVX = VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX_EXT + }; + + struct DebugMarkerObjectNameInfoEXT + { + DebugMarkerObjectNameInfoEXT( DebugReportObjectTypeEXT objectType_ = DebugReportObjectTypeEXT::eUnknown, + uint64_t object_ = 0, + const char* pObjectName_ = nullptr ) + : objectType( objectType_ ) + , object( object_ ) + , pObjectName( pObjectName_ ) + { + } + + DebugMarkerObjectNameInfoEXT( VkDebugMarkerObjectNameInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugMarkerObjectNameInfoEXT ) ); + } + + DebugMarkerObjectNameInfoEXT& operator=( VkDebugMarkerObjectNameInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugMarkerObjectNameInfoEXT ) ); + return *this; + } + DebugMarkerObjectNameInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DebugMarkerObjectNameInfoEXT& setObjectType( DebugReportObjectTypeEXT objectType_ ) + { + objectType = objectType_; + return *this; + } + + DebugMarkerObjectNameInfoEXT& setObject( uint64_t object_ ) + { + object = object_; + return *this; + } + + DebugMarkerObjectNameInfoEXT& setPObjectName( const char* pObjectName_ ) + { + pObjectName = pObjectName_; + return *this; + } + + operator VkDebugMarkerObjectNameInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDebugMarkerObjectNameInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DebugMarkerObjectNameInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectType == rhs.objectType ) + && ( object == rhs.object ) + && ( pObjectName == rhs.pObjectName ); + } + + bool operator!=( DebugMarkerObjectNameInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDebugMarkerObjectNameInfoEXT; + + public: + const void* pNext = nullptr; + DebugReportObjectTypeEXT objectType; + uint64_t object; + const char* pObjectName; + }; + static_assert( sizeof( DebugMarkerObjectNameInfoEXT ) == sizeof( VkDebugMarkerObjectNameInfoEXT ), "struct and wrapper have different size!" ); + + struct DebugMarkerObjectTagInfoEXT + { + DebugMarkerObjectTagInfoEXT( DebugReportObjectTypeEXT objectType_ = DebugReportObjectTypeEXT::eUnknown, + uint64_t object_ = 0, + uint64_t tagName_ = 0, + size_t tagSize_ = 0, + const void* pTag_ = nullptr ) + : objectType( objectType_ ) + , object( object_ ) + , tagName( tagName_ ) + , tagSize( tagSize_ ) + , pTag( pTag_ ) + { + } + + DebugMarkerObjectTagInfoEXT( VkDebugMarkerObjectTagInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugMarkerObjectTagInfoEXT ) ); + } + + DebugMarkerObjectTagInfoEXT& operator=( VkDebugMarkerObjectTagInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugMarkerObjectTagInfoEXT ) ); + return *this; + } + DebugMarkerObjectTagInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DebugMarkerObjectTagInfoEXT& setObjectType( DebugReportObjectTypeEXT objectType_ ) + { + objectType = objectType_; + return *this; + } + + DebugMarkerObjectTagInfoEXT& setObject( uint64_t object_ ) + { + object = object_; + return *this; + } + + DebugMarkerObjectTagInfoEXT& setTagName( uint64_t tagName_ ) + { + tagName = tagName_; + return *this; + } + + DebugMarkerObjectTagInfoEXT& setTagSize( size_t tagSize_ ) + { + tagSize = tagSize_; + return *this; + } + + DebugMarkerObjectTagInfoEXT& setPTag( const void* pTag_ ) + { + pTag = pTag_; + return *this; + } + + operator VkDebugMarkerObjectTagInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDebugMarkerObjectTagInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DebugMarkerObjectTagInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectType == rhs.objectType ) + && ( object == rhs.object ) + && ( tagName == rhs.tagName ) + && ( tagSize == rhs.tagSize ) + && ( pTag == rhs.pTag ); + } + + bool operator!=( DebugMarkerObjectTagInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDebugMarkerObjectTagInfoEXT; + + public: + const void* pNext = nullptr; + DebugReportObjectTypeEXT objectType; + uint64_t object; + uint64_t tagName; + size_t tagSize; + const void* pTag; + }; + static_assert( sizeof( DebugMarkerObjectTagInfoEXT ) == sizeof( VkDebugMarkerObjectTagInfoEXT ), "struct and wrapper have different size!" ); + + enum class RasterizationOrderAMD + { + eStrict = VK_RASTERIZATION_ORDER_STRICT_AMD, + eRelaxed = VK_RASTERIZATION_ORDER_RELAXED_AMD + }; + + struct PipelineRasterizationStateRasterizationOrderAMD + { + PipelineRasterizationStateRasterizationOrderAMD( RasterizationOrderAMD rasterizationOrder_ = RasterizationOrderAMD::eStrict ) + : rasterizationOrder( rasterizationOrder_ ) + { + } + + PipelineRasterizationStateRasterizationOrderAMD( VkPipelineRasterizationStateRasterizationOrderAMD const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineRasterizationStateRasterizationOrderAMD ) ); + } + + PipelineRasterizationStateRasterizationOrderAMD& operator=( VkPipelineRasterizationStateRasterizationOrderAMD const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineRasterizationStateRasterizationOrderAMD ) ); + return *this; + } + PipelineRasterizationStateRasterizationOrderAMD& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineRasterizationStateRasterizationOrderAMD& setRasterizationOrder( RasterizationOrderAMD rasterizationOrder_ ) + { + rasterizationOrder = rasterizationOrder_; + return *this; + } + + operator VkPipelineRasterizationStateRasterizationOrderAMD const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineRasterizationStateRasterizationOrderAMD &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineRasterizationStateRasterizationOrderAMD const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( rasterizationOrder == rhs.rasterizationOrder ); + } + + bool operator!=( PipelineRasterizationStateRasterizationOrderAMD const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineRasterizationStateRasterizationOrderAMD; + + public: + const void* pNext = nullptr; + RasterizationOrderAMD rasterizationOrder; + }; + static_assert( sizeof( PipelineRasterizationStateRasterizationOrderAMD ) == sizeof( VkPipelineRasterizationStateRasterizationOrderAMD ), "struct and wrapper have different size!" ); + + enum class ExternalMemoryHandleTypeFlagBitsNV + { + eOpaqueWin32 = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV, + eOpaqueWin32Kmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV, + eD3D11Image = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV, + eD3D11ImageKmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV + }; + + using ExternalMemoryHandleTypeFlagsNV = Flags; + + VULKAN_HPP_INLINE ExternalMemoryHandleTypeFlagsNV operator|( ExternalMemoryHandleTypeFlagBitsNV bit0, ExternalMemoryHandleTypeFlagBitsNV bit1 ) + { + return ExternalMemoryHandleTypeFlagsNV( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ExternalMemoryHandleTypeFlagsNV operator~( ExternalMemoryHandleTypeFlagBitsNV bits ) + { + return ~( ExternalMemoryHandleTypeFlagsNV( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32) | VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt) | VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image) | VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt) + }; + }; + + struct ExternalMemoryImageCreateInfoNV + { + ExternalMemoryImageCreateInfoNV( ExternalMemoryHandleTypeFlagsNV handleTypes_ = ExternalMemoryHandleTypeFlagsNV() ) + : handleTypes( handleTypes_ ) + { + } + + ExternalMemoryImageCreateInfoNV( VkExternalMemoryImageCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ExternalMemoryImageCreateInfoNV ) ); + } + + ExternalMemoryImageCreateInfoNV& operator=( VkExternalMemoryImageCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ExternalMemoryImageCreateInfoNV ) ); + return *this; + } + ExternalMemoryImageCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExternalMemoryImageCreateInfoNV& setHandleTypes( ExternalMemoryHandleTypeFlagsNV handleTypes_ ) + { + handleTypes = handleTypes_; + return *this; + } + + operator VkExternalMemoryImageCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalMemoryImageCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalMemoryImageCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExternalMemoryImageCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExternalMemoryImageCreateInfoNV; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlagsNV handleTypes; + }; + static_assert( sizeof( ExternalMemoryImageCreateInfoNV ) == sizeof( VkExternalMemoryImageCreateInfoNV ), "struct and wrapper have different size!" ); + + struct ExportMemoryAllocateInfoNV + { + ExportMemoryAllocateInfoNV( ExternalMemoryHandleTypeFlagsNV handleTypes_ = ExternalMemoryHandleTypeFlagsNV() ) + : handleTypes( handleTypes_ ) + { + } + + ExportMemoryAllocateInfoNV( VkExportMemoryAllocateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportMemoryAllocateInfoNV ) ); + } + + ExportMemoryAllocateInfoNV& operator=( VkExportMemoryAllocateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportMemoryAllocateInfoNV ) ); + return *this; + } + ExportMemoryAllocateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExportMemoryAllocateInfoNV& setHandleTypes( ExternalMemoryHandleTypeFlagsNV handleTypes_ ) + { + handleTypes = handleTypes_; + return *this; + } + + operator VkExportMemoryAllocateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkExportMemoryAllocateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExportMemoryAllocateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExportMemoryAllocateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExportMemoryAllocateInfoNV; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlagsNV handleTypes; + }; + static_assert( sizeof( ExportMemoryAllocateInfoNV ) == sizeof( VkExportMemoryAllocateInfoNV ), "struct and wrapper have different size!" ); + +#ifdef VK_USE_PLATFORM_WIN32_NV + struct ImportMemoryWin32HandleInfoNV + { + ImportMemoryWin32HandleInfoNV( ExternalMemoryHandleTypeFlagsNV handleType_ = ExternalMemoryHandleTypeFlagsNV(), + HANDLE handle_ = 0 ) + : handleType( handleType_ ) + , handle( handle_ ) + { + } + + ImportMemoryWin32HandleInfoNV( VkImportMemoryWin32HandleInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportMemoryWin32HandleInfoNV ) ); + } + + ImportMemoryWin32HandleInfoNV& operator=( VkImportMemoryWin32HandleInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportMemoryWin32HandleInfoNV ) ); + return *this; + } + ImportMemoryWin32HandleInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImportMemoryWin32HandleInfoNV& setHandleType( ExternalMemoryHandleTypeFlagsNV handleType_ ) + { + handleType = handleType_; + return *this; + } + + ImportMemoryWin32HandleInfoNV& setHandle( HANDLE handle_ ) + { + handle = handle_; + return *this; + } + + operator VkImportMemoryWin32HandleInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkImportMemoryWin32HandleInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImportMemoryWin32HandleInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ) + && ( handle == rhs.handle ); + } + + bool operator!=( ImportMemoryWin32HandleInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImportMemoryWin32HandleInfoNV; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlagsNV handleType; + HANDLE handle; + }; + static_assert( sizeof( ImportMemoryWin32HandleInfoNV ) == sizeof( VkImportMemoryWin32HandleInfoNV ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_NV*/ + + enum class ExternalMemoryFeatureFlagBitsNV + { + eDedicatedOnly = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV, + eExportable = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV, + eImportable = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV + }; + + using ExternalMemoryFeatureFlagsNV = Flags; + + VULKAN_HPP_INLINE ExternalMemoryFeatureFlagsNV operator|( ExternalMemoryFeatureFlagBitsNV bit0, ExternalMemoryFeatureFlagBitsNV bit1 ) + { + return ExternalMemoryFeatureFlagsNV( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ExternalMemoryFeatureFlagsNV operator~( ExternalMemoryFeatureFlagBitsNV bits ) + { + return ~( ExternalMemoryFeatureFlagsNV( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly) | VkFlags(ExternalMemoryFeatureFlagBitsNV::eExportable) | VkFlags(ExternalMemoryFeatureFlagBitsNV::eImportable) + }; + }; + + struct ExternalImageFormatPropertiesNV + { + operator VkExternalImageFormatPropertiesNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalImageFormatPropertiesNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalImageFormatPropertiesNV const& rhs ) const + { + return ( imageFormatProperties == rhs.imageFormatProperties ) + && ( externalMemoryFeatures == rhs.externalMemoryFeatures ) + && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes ) + && ( compatibleHandleTypes == rhs.compatibleHandleTypes ); + } + + bool operator!=( ExternalImageFormatPropertiesNV const& rhs ) const + { + return !operator==( rhs ); + } + + ImageFormatProperties imageFormatProperties; + ExternalMemoryFeatureFlagsNV externalMemoryFeatures; + ExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; + ExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; + }; + static_assert( sizeof( ExternalImageFormatPropertiesNV ) == sizeof( VkExternalImageFormatPropertiesNV ), "struct and wrapper have different size!" ); + + enum class ValidationCheckEXT + { + eAll = VK_VALIDATION_CHECK_ALL_EXT, + eShaders = VK_VALIDATION_CHECK_SHADERS_EXT + }; + + struct ValidationFlagsEXT + { + ValidationFlagsEXT( uint32_t disabledValidationCheckCount_ = 0, + const ValidationCheckEXT* pDisabledValidationChecks_ = nullptr ) + : disabledValidationCheckCount( disabledValidationCheckCount_ ) + , pDisabledValidationChecks( pDisabledValidationChecks_ ) + { + } + + ValidationFlagsEXT( VkValidationFlagsEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ValidationFlagsEXT ) ); + } + + ValidationFlagsEXT& operator=( VkValidationFlagsEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ValidationFlagsEXT ) ); + return *this; + } + ValidationFlagsEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ValidationFlagsEXT& setDisabledValidationCheckCount( uint32_t disabledValidationCheckCount_ ) + { + disabledValidationCheckCount = disabledValidationCheckCount_; + return *this; + } + + ValidationFlagsEXT& setPDisabledValidationChecks( const ValidationCheckEXT* pDisabledValidationChecks_ ) + { + pDisabledValidationChecks = pDisabledValidationChecks_; + return *this; + } + + operator VkValidationFlagsEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkValidationFlagsEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( ValidationFlagsEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( disabledValidationCheckCount == rhs.disabledValidationCheckCount ) + && ( pDisabledValidationChecks == rhs.pDisabledValidationChecks ); + } + + bool operator!=( ValidationFlagsEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eValidationFlagsEXT; + + public: + const void* pNext = nullptr; + uint32_t disabledValidationCheckCount; + const ValidationCheckEXT* pDisabledValidationChecks; + }; + static_assert( sizeof( ValidationFlagsEXT ) == sizeof( VkValidationFlagsEXT ), "struct and wrapper have different size!" ); + + enum class SubgroupFeatureFlagBits + { + eBasic = VK_SUBGROUP_FEATURE_BASIC_BIT, + eVote = VK_SUBGROUP_FEATURE_VOTE_BIT, + eArithmetic = VK_SUBGROUP_FEATURE_ARITHMETIC_BIT, + eBallot = VK_SUBGROUP_FEATURE_BALLOT_BIT, + eShuffle = VK_SUBGROUP_FEATURE_SHUFFLE_BIT, + eShuffleRelative = VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT, + eClustered = VK_SUBGROUP_FEATURE_CLUSTERED_BIT, + eQuad = VK_SUBGROUP_FEATURE_QUAD_BIT, + ePartitionedNV = VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV + }; + + using SubgroupFeatureFlags = Flags; + + VULKAN_HPP_INLINE SubgroupFeatureFlags operator|( SubgroupFeatureFlagBits bit0, SubgroupFeatureFlagBits bit1 ) + { + return SubgroupFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE SubgroupFeatureFlags operator~( SubgroupFeatureFlagBits bits ) + { + return ~( SubgroupFeatureFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(SubgroupFeatureFlagBits::eBasic) | VkFlags(SubgroupFeatureFlagBits::eVote) | VkFlags(SubgroupFeatureFlagBits::eArithmetic) | VkFlags(SubgroupFeatureFlagBits::eBallot) | VkFlags(SubgroupFeatureFlagBits::eShuffle) | VkFlags(SubgroupFeatureFlagBits::eShuffleRelative) | VkFlags(SubgroupFeatureFlagBits::eClustered) | VkFlags(SubgroupFeatureFlagBits::eQuad) | VkFlags(SubgroupFeatureFlagBits::ePartitionedNV) + }; + }; + + struct PhysicalDeviceSubgroupProperties + { + operator VkPhysicalDeviceSubgroupProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceSubgroupProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceSubgroupProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( subgroupSize == rhs.subgroupSize ) + && ( supportedStages == rhs.supportedStages ) + && ( supportedOperations == rhs.supportedOperations ) + && ( quadOperationsInAllStages == rhs.quadOperationsInAllStages ); + } + + bool operator!=( PhysicalDeviceSubgroupProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceSubgroupProperties; + + public: + void* pNext = nullptr; + uint32_t subgroupSize; + ShaderStageFlags supportedStages; + SubgroupFeatureFlags supportedOperations; + Bool32 quadOperationsInAllStages; + }; + static_assert( sizeof( PhysicalDeviceSubgroupProperties ) == sizeof( VkPhysicalDeviceSubgroupProperties ), "struct and wrapper have different size!" ); + + enum class IndirectCommandsLayoutUsageFlagBitsNVX + { + eUnorderedSequences = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX, + eSparseSequences = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX, + eEmptyExecutions = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX, + eIndexedSequences = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX + }; + + using IndirectCommandsLayoutUsageFlagsNVX = Flags; + + VULKAN_HPP_INLINE IndirectCommandsLayoutUsageFlagsNVX operator|( IndirectCommandsLayoutUsageFlagBitsNVX bit0, IndirectCommandsLayoutUsageFlagBitsNVX bit1 ) + { + return IndirectCommandsLayoutUsageFlagsNVX( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE IndirectCommandsLayoutUsageFlagsNVX operator~( IndirectCommandsLayoutUsageFlagBitsNVX bits ) + { + return ~( IndirectCommandsLayoutUsageFlagsNVX( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(IndirectCommandsLayoutUsageFlagBitsNVX::eUnorderedSequences) | VkFlags(IndirectCommandsLayoutUsageFlagBitsNVX::eSparseSequences) | VkFlags(IndirectCommandsLayoutUsageFlagBitsNVX::eEmptyExecutions) | VkFlags(IndirectCommandsLayoutUsageFlagBitsNVX::eIndexedSequences) + }; + }; + + enum class ObjectEntryUsageFlagBitsNVX + { + eGraphics = VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX, + eCompute = VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX + }; + + using ObjectEntryUsageFlagsNVX = Flags; + + VULKAN_HPP_INLINE ObjectEntryUsageFlagsNVX operator|( ObjectEntryUsageFlagBitsNVX bit0, ObjectEntryUsageFlagBitsNVX bit1 ) + { + return ObjectEntryUsageFlagsNVX( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ObjectEntryUsageFlagsNVX operator~( ObjectEntryUsageFlagBitsNVX bits ) + { + return ~( ObjectEntryUsageFlagsNVX( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ObjectEntryUsageFlagBitsNVX::eGraphics) | VkFlags(ObjectEntryUsageFlagBitsNVX::eCompute) + }; + }; + + enum class IndirectCommandsTokenTypeNVX + { + ePipeline = VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX, + eDescriptorSet = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DESCRIPTOR_SET_NVX, + eIndexBuffer = VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NVX, + eVertexBuffer = VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NVX, + ePushConstant = VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NVX, + eDrawIndexed = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NVX, + eDraw = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NVX, + eDispatch = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX + }; + + struct IndirectCommandsTokenNVX + { + IndirectCommandsTokenNVX( IndirectCommandsTokenTypeNVX tokenType_ = IndirectCommandsTokenTypeNVX::ePipeline, + Buffer buffer_ = Buffer(), + DeviceSize offset_ = 0 ) + : tokenType( tokenType_ ) + , buffer( buffer_ ) + , offset( offset_ ) + { + } + + IndirectCommandsTokenNVX( VkIndirectCommandsTokenNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( IndirectCommandsTokenNVX ) ); + } + + IndirectCommandsTokenNVX& operator=( VkIndirectCommandsTokenNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( IndirectCommandsTokenNVX ) ); + return *this; + } + IndirectCommandsTokenNVX& setTokenType( IndirectCommandsTokenTypeNVX tokenType_ ) + { + tokenType = tokenType_; + return *this; + } + + IndirectCommandsTokenNVX& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + IndirectCommandsTokenNVX& setOffset( DeviceSize offset_ ) + { + offset = offset_; + return *this; + } + + operator VkIndirectCommandsTokenNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkIndirectCommandsTokenNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( IndirectCommandsTokenNVX const& rhs ) const + { + return ( tokenType == rhs.tokenType ) + && ( buffer == rhs.buffer ) + && ( offset == rhs.offset ); + } + + bool operator!=( IndirectCommandsTokenNVX const& rhs ) const + { + return !operator==( rhs ); + } + + IndirectCommandsTokenTypeNVX tokenType; + Buffer buffer; + DeviceSize offset; + }; + static_assert( sizeof( IndirectCommandsTokenNVX ) == sizeof( VkIndirectCommandsTokenNVX ), "struct and wrapper have different size!" ); + + struct IndirectCommandsLayoutTokenNVX + { + IndirectCommandsLayoutTokenNVX( IndirectCommandsTokenTypeNVX tokenType_ = IndirectCommandsTokenTypeNVX::ePipeline, + uint32_t bindingUnit_ = 0, + uint32_t dynamicCount_ = 0, + uint32_t divisor_ = 0 ) + : tokenType( tokenType_ ) + , bindingUnit( bindingUnit_ ) + , dynamicCount( dynamicCount_ ) + , divisor( divisor_ ) + { + } + + IndirectCommandsLayoutTokenNVX( VkIndirectCommandsLayoutTokenNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( IndirectCommandsLayoutTokenNVX ) ); + } + + IndirectCommandsLayoutTokenNVX& operator=( VkIndirectCommandsLayoutTokenNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( IndirectCommandsLayoutTokenNVX ) ); + return *this; + } + IndirectCommandsLayoutTokenNVX& setTokenType( IndirectCommandsTokenTypeNVX tokenType_ ) + { + tokenType = tokenType_; + return *this; + } + + IndirectCommandsLayoutTokenNVX& setBindingUnit( uint32_t bindingUnit_ ) + { + bindingUnit = bindingUnit_; + return *this; + } + + IndirectCommandsLayoutTokenNVX& setDynamicCount( uint32_t dynamicCount_ ) + { + dynamicCount = dynamicCount_; + return *this; + } + + IndirectCommandsLayoutTokenNVX& setDivisor( uint32_t divisor_ ) + { + divisor = divisor_; + return *this; + } + + operator VkIndirectCommandsLayoutTokenNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkIndirectCommandsLayoutTokenNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( IndirectCommandsLayoutTokenNVX const& rhs ) const + { + return ( tokenType == rhs.tokenType ) + && ( bindingUnit == rhs.bindingUnit ) + && ( dynamicCount == rhs.dynamicCount ) + && ( divisor == rhs.divisor ); + } + + bool operator!=( IndirectCommandsLayoutTokenNVX const& rhs ) const + { + return !operator==( rhs ); + } + + IndirectCommandsTokenTypeNVX tokenType; + uint32_t bindingUnit; + uint32_t dynamicCount; + uint32_t divisor; + }; + static_assert( sizeof( IndirectCommandsLayoutTokenNVX ) == sizeof( VkIndirectCommandsLayoutTokenNVX ), "struct and wrapper have different size!" ); + + struct IndirectCommandsLayoutCreateInfoNVX + { + IndirectCommandsLayoutCreateInfoNVX( PipelineBindPoint pipelineBindPoint_ = PipelineBindPoint::eGraphics, + IndirectCommandsLayoutUsageFlagsNVX flags_ = IndirectCommandsLayoutUsageFlagsNVX(), + uint32_t tokenCount_ = 0, + const IndirectCommandsLayoutTokenNVX* pTokens_ = nullptr ) + : pipelineBindPoint( pipelineBindPoint_ ) + , flags( flags_ ) + , tokenCount( tokenCount_ ) + , pTokens( pTokens_ ) + { + } + + IndirectCommandsLayoutCreateInfoNVX( VkIndirectCommandsLayoutCreateInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( IndirectCommandsLayoutCreateInfoNVX ) ); + } + + IndirectCommandsLayoutCreateInfoNVX& operator=( VkIndirectCommandsLayoutCreateInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( IndirectCommandsLayoutCreateInfoNVX ) ); + return *this; + } + IndirectCommandsLayoutCreateInfoNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + IndirectCommandsLayoutCreateInfoNVX& setPipelineBindPoint( PipelineBindPoint pipelineBindPoint_ ) + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + IndirectCommandsLayoutCreateInfoNVX& setFlags( IndirectCommandsLayoutUsageFlagsNVX flags_ ) + { + flags = flags_; + return *this; + } + + IndirectCommandsLayoutCreateInfoNVX& setTokenCount( uint32_t tokenCount_ ) + { + tokenCount = tokenCount_; + return *this; + } + + IndirectCommandsLayoutCreateInfoNVX& setPTokens( const IndirectCommandsLayoutTokenNVX* pTokens_ ) + { + pTokens = pTokens_; + return *this; + } + + operator VkIndirectCommandsLayoutCreateInfoNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkIndirectCommandsLayoutCreateInfoNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( IndirectCommandsLayoutCreateInfoNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( flags == rhs.flags ) + && ( tokenCount == rhs.tokenCount ) + && ( pTokens == rhs.pTokens ); + } + + bool operator!=( IndirectCommandsLayoutCreateInfoNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eIndirectCommandsLayoutCreateInfoNVX; + + public: + const void* pNext = nullptr; + PipelineBindPoint pipelineBindPoint; + IndirectCommandsLayoutUsageFlagsNVX flags; + uint32_t tokenCount; + const IndirectCommandsLayoutTokenNVX* pTokens; + }; + static_assert( sizeof( IndirectCommandsLayoutCreateInfoNVX ) == sizeof( VkIndirectCommandsLayoutCreateInfoNVX ), "struct and wrapper have different size!" ); + + enum class ObjectEntryTypeNVX + { + eDescriptorSet = VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX, + ePipeline = VK_OBJECT_ENTRY_TYPE_PIPELINE_NVX, + eIndexBuffer = VK_OBJECT_ENTRY_TYPE_INDEX_BUFFER_NVX, + eVertexBuffer = VK_OBJECT_ENTRY_TYPE_VERTEX_BUFFER_NVX, + ePushConstant = VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX + }; + + struct ObjectTableCreateInfoNVX + { + ObjectTableCreateInfoNVX( uint32_t objectCount_ = 0, + const ObjectEntryTypeNVX* pObjectEntryTypes_ = nullptr, + const uint32_t* pObjectEntryCounts_ = nullptr, + const ObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags_ = nullptr, + uint32_t maxUniformBuffersPerDescriptor_ = 0, + uint32_t maxStorageBuffersPerDescriptor_ = 0, + uint32_t maxStorageImagesPerDescriptor_ = 0, + uint32_t maxSampledImagesPerDescriptor_ = 0, + uint32_t maxPipelineLayouts_ = 0 ) + : objectCount( objectCount_ ) + , pObjectEntryTypes( pObjectEntryTypes_ ) + , pObjectEntryCounts( pObjectEntryCounts_ ) + , pObjectEntryUsageFlags( pObjectEntryUsageFlags_ ) + , maxUniformBuffersPerDescriptor( maxUniformBuffersPerDescriptor_ ) + , maxStorageBuffersPerDescriptor( maxStorageBuffersPerDescriptor_ ) + , maxStorageImagesPerDescriptor( maxStorageImagesPerDescriptor_ ) + , maxSampledImagesPerDescriptor( maxSampledImagesPerDescriptor_ ) + , maxPipelineLayouts( maxPipelineLayouts_ ) + { + } + + ObjectTableCreateInfoNVX( VkObjectTableCreateInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableCreateInfoNVX ) ); + } + + ObjectTableCreateInfoNVX& operator=( VkObjectTableCreateInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableCreateInfoNVX ) ); + return *this; + } + ObjectTableCreateInfoNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ObjectTableCreateInfoNVX& setObjectCount( uint32_t objectCount_ ) + { + objectCount = objectCount_; + return *this; + } + + ObjectTableCreateInfoNVX& setPObjectEntryTypes( const ObjectEntryTypeNVX* pObjectEntryTypes_ ) + { + pObjectEntryTypes = pObjectEntryTypes_; + return *this; + } + + ObjectTableCreateInfoNVX& setPObjectEntryCounts( const uint32_t* pObjectEntryCounts_ ) + { + pObjectEntryCounts = pObjectEntryCounts_; + return *this; + } + + ObjectTableCreateInfoNVX& setPObjectEntryUsageFlags( const ObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags_ ) + { + pObjectEntryUsageFlags = pObjectEntryUsageFlags_; + return *this; + } + + ObjectTableCreateInfoNVX& setMaxUniformBuffersPerDescriptor( uint32_t maxUniformBuffersPerDescriptor_ ) + { + maxUniformBuffersPerDescriptor = maxUniformBuffersPerDescriptor_; + return *this; + } + + ObjectTableCreateInfoNVX& setMaxStorageBuffersPerDescriptor( uint32_t maxStorageBuffersPerDescriptor_ ) + { + maxStorageBuffersPerDescriptor = maxStorageBuffersPerDescriptor_; + return *this; + } + + ObjectTableCreateInfoNVX& setMaxStorageImagesPerDescriptor( uint32_t maxStorageImagesPerDescriptor_ ) + { + maxStorageImagesPerDescriptor = maxStorageImagesPerDescriptor_; + return *this; + } + + ObjectTableCreateInfoNVX& setMaxSampledImagesPerDescriptor( uint32_t maxSampledImagesPerDescriptor_ ) + { + maxSampledImagesPerDescriptor = maxSampledImagesPerDescriptor_; + return *this; + } + + ObjectTableCreateInfoNVX& setMaxPipelineLayouts( uint32_t maxPipelineLayouts_ ) + { + maxPipelineLayouts = maxPipelineLayouts_; + return *this; + } + + operator VkObjectTableCreateInfoNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkObjectTableCreateInfoNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( ObjectTableCreateInfoNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectCount == rhs.objectCount ) + && ( pObjectEntryTypes == rhs.pObjectEntryTypes ) + && ( pObjectEntryCounts == rhs.pObjectEntryCounts ) + && ( pObjectEntryUsageFlags == rhs.pObjectEntryUsageFlags ) + && ( maxUniformBuffersPerDescriptor == rhs.maxUniformBuffersPerDescriptor ) + && ( maxStorageBuffersPerDescriptor == rhs.maxStorageBuffersPerDescriptor ) + && ( maxStorageImagesPerDescriptor == rhs.maxStorageImagesPerDescriptor ) + && ( maxSampledImagesPerDescriptor == rhs.maxSampledImagesPerDescriptor ) + && ( maxPipelineLayouts == rhs.maxPipelineLayouts ); + } + + bool operator!=( ObjectTableCreateInfoNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eObjectTableCreateInfoNVX; + + public: + const void* pNext = nullptr; + uint32_t objectCount; + const ObjectEntryTypeNVX* pObjectEntryTypes; + const uint32_t* pObjectEntryCounts; + const ObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags; + uint32_t maxUniformBuffersPerDescriptor; + uint32_t maxStorageBuffersPerDescriptor; + uint32_t maxStorageImagesPerDescriptor; + uint32_t maxSampledImagesPerDescriptor; + uint32_t maxPipelineLayouts; + }; + static_assert( sizeof( ObjectTableCreateInfoNVX ) == sizeof( VkObjectTableCreateInfoNVX ), "struct and wrapper have different size!" ); + + struct ObjectTableEntryNVX + { + ObjectTableEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eDescriptorSet, + ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX() ) + : type( type_ ) + , flags( flags_ ) + { + } + + ObjectTableEntryNVX( VkObjectTableEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableEntryNVX ) ); + } + + ObjectTableEntryNVX& operator=( VkObjectTableEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableEntryNVX ) ); + return *this; + } + ObjectTableEntryNVX& setType( ObjectEntryTypeNVX type_ ) + { + type = type_; + return *this; + } + + ObjectTableEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ ) + { + flags = flags_; + return *this; + } + + operator VkObjectTableEntryNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkObjectTableEntryNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( ObjectTableEntryNVX const& rhs ) const + { + return ( type == rhs.type ) + && ( flags == rhs.flags ); + } + + bool operator!=( ObjectTableEntryNVX const& rhs ) const + { + return !operator==( rhs ); + } + + ObjectEntryTypeNVX type; + ObjectEntryUsageFlagsNVX flags; + }; + static_assert( sizeof( ObjectTableEntryNVX ) == sizeof( VkObjectTableEntryNVX ), "struct and wrapper have different size!" ); + + struct ObjectTablePipelineEntryNVX + { + ObjectTablePipelineEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eDescriptorSet, + ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), + Pipeline pipeline_ = Pipeline() ) + : type( type_ ) + , flags( flags_ ) + , pipeline( pipeline_ ) + { + } + + explicit ObjectTablePipelineEntryNVX( ObjectTableEntryNVX const& objectTableEntryNVX, + Pipeline pipeline_ = Pipeline() ) + : type( objectTableEntryNVX.type ) + , flags( objectTableEntryNVX.flags ) + , pipeline( pipeline_ ) + {} + + ObjectTablePipelineEntryNVX( VkObjectTablePipelineEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTablePipelineEntryNVX ) ); + } + + ObjectTablePipelineEntryNVX& operator=( VkObjectTablePipelineEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTablePipelineEntryNVX ) ); + return *this; + } + ObjectTablePipelineEntryNVX& setType( ObjectEntryTypeNVX type_ ) + { + type = type_; + return *this; + } + + ObjectTablePipelineEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ ) + { + flags = flags_; + return *this; + } + + ObjectTablePipelineEntryNVX& setPipeline( Pipeline pipeline_ ) + { + pipeline = pipeline_; + return *this; + } + + operator VkObjectTablePipelineEntryNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkObjectTablePipelineEntryNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( ObjectTablePipelineEntryNVX const& rhs ) const + { + return ( type == rhs.type ) + && ( flags == rhs.flags ) + && ( pipeline == rhs.pipeline ); + } + + bool operator!=( ObjectTablePipelineEntryNVX const& rhs ) const + { + return !operator==( rhs ); + } + + ObjectEntryTypeNVX type; + ObjectEntryUsageFlagsNVX flags; + Pipeline pipeline; + }; + static_assert( sizeof( ObjectTablePipelineEntryNVX ) == sizeof( VkObjectTablePipelineEntryNVX ), "struct and wrapper have different size!" ); + + struct ObjectTableDescriptorSetEntryNVX + { + ObjectTableDescriptorSetEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eDescriptorSet, + ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), + PipelineLayout pipelineLayout_ = PipelineLayout(), + DescriptorSet descriptorSet_ = DescriptorSet() ) + : type( type_ ) + , flags( flags_ ) + , pipelineLayout( pipelineLayout_ ) + , descriptorSet( descriptorSet_ ) + { + } + + explicit ObjectTableDescriptorSetEntryNVX( ObjectTableEntryNVX const& objectTableEntryNVX, + PipelineLayout pipelineLayout_ = PipelineLayout(), + DescriptorSet descriptorSet_ = DescriptorSet() ) + : type( objectTableEntryNVX.type ) + , flags( objectTableEntryNVX.flags ) + , pipelineLayout( pipelineLayout_ ) + , descriptorSet( descriptorSet_ ) + {} + + ObjectTableDescriptorSetEntryNVX( VkObjectTableDescriptorSetEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableDescriptorSetEntryNVX ) ); + } + + ObjectTableDescriptorSetEntryNVX& operator=( VkObjectTableDescriptorSetEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableDescriptorSetEntryNVX ) ); + return *this; + } + ObjectTableDescriptorSetEntryNVX& setType( ObjectEntryTypeNVX type_ ) + { + type = type_; + return *this; + } + + ObjectTableDescriptorSetEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ ) + { + flags = flags_; + return *this; + } + + ObjectTableDescriptorSetEntryNVX& setPipelineLayout( PipelineLayout pipelineLayout_ ) + { + pipelineLayout = pipelineLayout_; + return *this; + } + + ObjectTableDescriptorSetEntryNVX& setDescriptorSet( DescriptorSet descriptorSet_ ) + { + descriptorSet = descriptorSet_; + return *this; + } + + operator VkObjectTableDescriptorSetEntryNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkObjectTableDescriptorSetEntryNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( ObjectTableDescriptorSetEntryNVX const& rhs ) const + { + return ( type == rhs.type ) + && ( flags == rhs.flags ) + && ( pipelineLayout == rhs.pipelineLayout ) + && ( descriptorSet == rhs.descriptorSet ); + } + + bool operator!=( ObjectTableDescriptorSetEntryNVX const& rhs ) const + { + return !operator==( rhs ); + } + + ObjectEntryTypeNVX type; + ObjectEntryUsageFlagsNVX flags; + PipelineLayout pipelineLayout; + DescriptorSet descriptorSet; + }; + static_assert( sizeof( ObjectTableDescriptorSetEntryNVX ) == sizeof( VkObjectTableDescriptorSetEntryNVX ), "struct and wrapper have different size!" ); + + struct ObjectTableVertexBufferEntryNVX + { + ObjectTableVertexBufferEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eDescriptorSet, + ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), + Buffer buffer_ = Buffer() ) + : type( type_ ) + , flags( flags_ ) + , buffer( buffer_ ) + { + } + + explicit ObjectTableVertexBufferEntryNVX( ObjectTableEntryNVX const& objectTableEntryNVX, + Buffer buffer_ = Buffer() ) + : type( objectTableEntryNVX.type ) + , flags( objectTableEntryNVX.flags ) + , buffer( buffer_ ) + {} + + ObjectTableVertexBufferEntryNVX( VkObjectTableVertexBufferEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableVertexBufferEntryNVX ) ); + } + + ObjectTableVertexBufferEntryNVX& operator=( VkObjectTableVertexBufferEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableVertexBufferEntryNVX ) ); + return *this; + } + ObjectTableVertexBufferEntryNVX& setType( ObjectEntryTypeNVX type_ ) + { + type = type_; + return *this; + } + + ObjectTableVertexBufferEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ ) + { + flags = flags_; + return *this; + } + + ObjectTableVertexBufferEntryNVX& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + operator VkObjectTableVertexBufferEntryNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkObjectTableVertexBufferEntryNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( ObjectTableVertexBufferEntryNVX const& rhs ) const + { + return ( type == rhs.type ) + && ( flags == rhs.flags ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( ObjectTableVertexBufferEntryNVX const& rhs ) const + { + return !operator==( rhs ); + } + + ObjectEntryTypeNVX type; + ObjectEntryUsageFlagsNVX flags; + Buffer buffer; + }; + static_assert( sizeof( ObjectTableVertexBufferEntryNVX ) == sizeof( VkObjectTableVertexBufferEntryNVX ), "struct and wrapper have different size!" ); + + struct ObjectTableIndexBufferEntryNVX + { + ObjectTableIndexBufferEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eDescriptorSet, + ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), + Buffer buffer_ = Buffer(), + IndexType indexType_ = IndexType::eUint16 ) + : type( type_ ) + , flags( flags_ ) + , buffer( buffer_ ) + , indexType( indexType_ ) + { + } + + explicit ObjectTableIndexBufferEntryNVX( ObjectTableEntryNVX const& objectTableEntryNVX, + Buffer buffer_ = Buffer(), + IndexType indexType_ = IndexType::eUint16 ) + : type( objectTableEntryNVX.type ) + , flags( objectTableEntryNVX.flags ) + , buffer( buffer_ ) + , indexType( indexType_ ) + {} + + ObjectTableIndexBufferEntryNVX( VkObjectTableIndexBufferEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableIndexBufferEntryNVX ) ); + } + + ObjectTableIndexBufferEntryNVX& operator=( VkObjectTableIndexBufferEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTableIndexBufferEntryNVX ) ); + return *this; + } + ObjectTableIndexBufferEntryNVX& setType( ObjectEntryTypeNVX type_ ) + { + type = type_; + return *this; + } + + ObjectTableIndexBufferEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ ) + { + flags = flags_; + return *this; + } + + ObjectTableIndexBufferEntryNVX& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + ObjectTableIndexBufferEntryNVX& setIndexType( IndexType indexType_ ) + { + indexType = indexType_; + return *this; + } + + operator VkObjectTableIndexBufferEntryNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkObjectTableIndexBufferEntryNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( ObjectTableIndexBufferEntryNVX const& rhs ) const + { + return ( type == rhs.type ) + && ( flags == rhs.flags ) + && ( buffer == rhs.buffer ) + && ( indexType == rhs.indexType ); + } + + bool operator!=( ObjectTableIndexBufferEntryNVX const& rhs ) const + { + return !operator==( rhs ); + } + + ObjectEntryTypeNVX type; + ObjectEntryUsageFlagsNVX flags; + Buffer buffer; + IndexType indexType; + }; + static_assert( sizeof( ObjectTableIndexBufferEntryNVX ) == sizeof( VkObjectTableIndexBufferEntryNVX ), "struct and wrapper have different size!" ); + + struct ObjectTablePushConstantEntryNVX + { + ObjectTablePushConstantEntryNVX( ObjectEntryTypeNVX type_ = ObjectEntryTypeNVX::eDescriptorSet, + ObjectEntryUsageFlagsNVX flags_ = ObjectEntryUsageFlagsNVX(), + PipelineLayout pipelineLayout_ = PipelineLayout(), + ShaderStageFlags stageFlags_ = ShaderStageFlags() ) + : type( type_ ) + , flags( flags_ ) + , pipelineLayout( pipelineLayout_ ) + , stageFlags( stageFlags_ ) + { + } + + explicit ObjectTablePushConstantEntryNVX( ObjectTableEntryNVX const& objectTableEntryNVX, + PipelineLayout pipelineLayout_ = PipelineLayout(), + ShaderStageFlags stageFlags_ = ShaderStageFlags() ) + : type( objectTableEntryNVX.type ) + , flags( objectTableEntryNVX.flags ) + , pipelineLayout( pipelineLayout_ ) + , stageFlags( stageFlags_ ) + {} + + ObjectTablePushConstantEntryNVX( VkObjectTablePushConstantEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTablePushConstantEntryNVX ) ); + } + + ObjectTablePushConstantEntryNVX& operator=( VkObjectTablePushConstantEntryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( ObjectTablePushConstantEntryNVX ) ); + return *this; + } + ObjectTablePushConstantEntryNVX& setType( ObjectEntryTypeNVX type_ ) + { + type = type_; + return *this; + } + + ObjectTablePushConstantEntryNVX& setFlags( ObjectEntryUsageFlagsNVX flags_ ) + { + flags = flags_; + return *this; + } + + ObjectTablePushConstantEntryNVX& setPipelineLayout( PipelineLayout pipelineLayout_ ) + { + pipelineLayout = pipelineLayout_; + return *this; + } + + ObjectTablePushConstantEntryNVX& setStageFlags( ShaderStageFlags stageFlags_ ) + { + stageFlags = stageFlags_; + return *this; + } + + operator VkObjectTablePushConstantEntryNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkObjectTablePushConstantEntryNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( ObjectTablePushConstantEntryNVX const& rhs ) const + { + return ( type == rhs.type ) + && ( flags == rhs.flags ) + && ( pipelineLayout == rhs.pipelineLayout ) + && ( stageFlags == rhs.stageFlags ); + } + + bool operator!=( ObjectTablePushConstantEntryNVX const& rhs ) const + { + return !operator==( rhs ); + } + + ObjectEntryTypeNVX type; + ObjectEntryUsageFlagsNVX flags; + PipelineLayout pipelineLayout; + ShaderStageFlags stageFlags; + }; + static_assert( sizeof( ObjectTablePushConstantEntryNVX ) == sizeof( VkObjectTablePushConstantEntryNVX ), "struct and wrapper have different size!" ); + + enum class DescriptorSetLayoutCreateFlagBits + { + ePushDescriptorKHR = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR, + eUpdateAfterBindPoolEXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT + }; + + using DescriptorSetLayoutCreateFlags = Flags; + + VULKAN_HPP_INLINE DescriptorSetLayoutCreateFlags operator|( DescriptorSetLayoutCreateFlagBits bit0, DescriptorSetLayoutCreateFlagBits bit1 ) + { + return DescriptorSetLayoutCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DescriptorSetLayoutCreateFlags operator~( DescriptorSetLayoutCreateFlagBits bits ) + { + return ~( DescriptorSetLayoutCreateFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR) | VkFlags(DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPoolEXT) + }; + }; + + struct DescriptorSetLayoutCreateInfo + { + DescriptorSetLayoutCreateInfo( DescriptorSetLayoutCreateFlags flags_ = DescriptorSetLayoutCreateFlags(), + uint32_t bindingCount_ = 0, + const DescriptorSetLayoutBinding* pBindings_ = nullptr ) + : flags( flags_ ) + , bindingCount( bindingCount_ ) + , pBindings( pBindings_ ) + { + } + + DescriptorSetLayoutCreateInfo( VkDescriptorSetLayoutCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetLayoutCreateInfo ) ); + } + + DescriptorSetLayoutCreateInfo& operator=( VkDescriptorSetLayoutCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetLayoutCreateInfo ) ); + return *this; + } + DescriptorSetLayoutCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DescriptorSetLayoutCreateInfo& setFlags( DescriptorSetLayoutCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + DescriptorSetLayoutCreateInfo& setBindingCount( uint32_t bindingCount_ ) + { + bindingCount = bindingCount_; + return *this; + } + + DescriptorSetLayoutCreateInfo& setPBindings( const DescriptorSetLayoutBinding* pBindings_ ) + { + pBindings = pBindings_; + return *this; + } + + operator VkDescriptorSetLayoutCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorSetLayoutCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorSetLayoutCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( bindingCount == rhs.bindingCount ) + && ( pBindings == rhs.pBindings ); + } + + bool operator!=( DescriptorSetLayoutCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorSetLayoutCreateInfo; + + public: + const void* pNext = nullptr; + DescriptorSetLayoutCreateFlags flags; + uint32_t bindingCount; + const DescriptorSetLayoutBinding* pBindings; + }; + static_assert( sizeof( DescriptorSetLayoutCreateInfo ) == sizeof( VkDescriptorSetLayoutCreateInfo ), "struct and wrapper have different size!" ); + + enum class ExternalMemoryHandleTypeFlagBits + { + eOpaqueFd = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + eOpaqueFdKHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + eOpaqueWin32 = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + eOpaqueWin32KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + eOpaqueWin32Kmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + eOpaqueWin32KmtKHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + eD3D11Texture = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + eD3D11TextureKHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + eD3D11TextureKmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + eD3D11TextureKmtKHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + eD3D12Heap = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + eD3D12HeapKHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + eD3D12Resource = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + eD3D12ResourceKHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + eDmaBufEXT = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + eAndroidHardwareBufferANDROID = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, + eHostAllocationEXT = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, + eHostMappedForeignMemoryEXT = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT + }; + + using ExternalMemoryHandleTypeFlags = Flags; + + VULKAN_HPP_INLINE ExternalMemoryHandleTypeFlags operator|( ExternalMemoryHandleTypeFlagBits bit0, ExternalMemoryHandleTypeFlagBits bit1 ) + { + return ExternalMemoryHandleTypeFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ExternalMemoryHandleTypeFlags operator~( ExternalMemoryHandleTypeFlagBits bits ) + { + return ~( ExternalMemoryHandleTypeFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ExternalMemoryHandleTypeFlagBits::eOpaqueFd) | VkFlags(ExternalMemoryHandleTypeFlagBits::eOpaqueWin32) | VkFlags(ExternalMemoryHandleTypeFlagBits::eOpaqueWin32Kmt) | VkFlags(ExternalMemoryHandleTypeFlagBits::eD3D11Texture) | VkFlags(ExternalMemoryHandleTypeFlagBits::eD3D11TextureKmt) | VkFlags(ExternalMemoryHandleTypeFlagBits::eD3D12Heap) | VkFlags(ExternalMemoryHandleTypeFlagBits::eD3D12Resource) | VkFlags(ExternalMemoryHandleTypeFlagBits::eDmaBufEXT) | VkFlags(ExternalMemoryHandleTypeFlagBits::eAndroidHardwareBufferANDROID) | VkFlags(ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT) | VkFlags(ExternalMemoryHandleTypeFlagBits::eHostMappedForeignMemoryEXT) + }; + }; + + using ExternalMemoryHandleTypeFlagsKHR = ExternalMemoryHandleTypeFlags; + + struct PhysicalDeviceExternalImageFormatInfo + { + PhysicalDeviceExternalImageFormatInfo( ExternalMemoryHandleTypeFlagBits handleType_ = ExternalMemoryHandleTypeFlagBits::eOpaqueFd ) + : handleType( handleType_ ) + { + } + + PhysicalDeviceExternalImageFormatInfo( VkPhysicalDeviceExternalImageFormatInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalImageFormatInfo ) ); + } + + PhysicalDeviceExternalImageFormatInfo& operator=( VkPhysicalDeviceExternalImageFormatInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalImageFormatInfo ) ); + return *this; + } + PhysicalDeviceExternalImageFormatInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExternalImageFormatInfo& setHandleType( ExternalMemoryHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkPhysicalDeviceExternalImageFormatInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceExternalImageFormatInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceExternalImageFormatInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( PhysicalDeviceExternalImageFormatInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceExternalImageFormatInfo; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlagBits handleType; + }; + static_assert( sizeof( PhysicalDeviceExternalImageFormatInfo ) == sizeof( VkPhysicalDeviceExternalImageFormatInfo ), "struct and wrapper have different size!" ); + + using PhysicalDeviceExternalImageFormatInfoKHR = PhysicalDeviceExternalImageFormatInfo; + + struct PhysicalDeviceExternalBufferInfo + { + PhysicalDeviceExternalBufferInfo( BufferCreateFlags flags_ = BufferCreateFlags(), + BufferUsageFlags usage_ = BufferUsageFlags(), + ExternalMemoryHandleTypeFlagBits handleType_ = ExternalMemoryHandleTypeFlagBits::eOpaqueFd ) + : flags( flags_ ) + , usage( usage_ ) + , handleType( handleType_ ) + { + } + + PhysicalDeviceExternalBufferInfo( VkPhysicalDeviceExternalBufferInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalBufferInfo ) ); + } + + PhysicalDeviceExternalBufferInfo& operator=( VkPhysicalDeviceExternalBufferInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalBufferInfo ) ); + return *this; + } + PhysicalDeviceExternalBufferInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExternalBufferInfo& setFlags( BufferCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + PhysicalDeviceExternalBufferInfo& setUsage( BufferUsageFlags usage_ ) + { + usage = usage_; + return *this; + } + + PhysicalDeviceExternalBufferInfo& setHandleType( ExternalMemoryHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkPhysicalDeviceExternalBufferInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceExternalBufferInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceExternalBufferInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( usage == rhs.usage ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( PhysicalDeviceExternalBufferInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceExternalBufferInfo; + + public: + const void* pNext = nullptr; + BufferCreateFlags flags; + BufferUsageFlags usage; + ExternalMemoryHandleTypeFlagBits handleType; + }; + static_assert( sizeof( PhysicalDeviceExternalBufferInfo ) == sizeof( VkPhysicalDeviceExternalBufferInfo ), "struct and wrapper have different size!" ); + + using PhysicalDeviceExternalBufferInfoKHR = PhysicalDeviceExternalBufferInfo; + + struct ExternalMemoryImageCreateInfo + { + ExternalMemoryImageCreateInfo( ExternalMemoryHandleTypeFlags handleTypes_ = ExternalMemoryHandleTypeFlags() ) + : handleTypes( handleTypes_ ) + { + } + + ExternalMemoryImageCreateInfo( VkExternalMemoryImageCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExternalMemoryImageCreateInfo ) ); + } + + ExternalMemoryImageCreateInfo& operator=( VkExternalMemoryImageCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExternalMemoryImageCreateInfo ) ); + return *this; + } + ExternalMemoryImageCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExternalMemoryImageCreateInfo& setHandleTypes( ExternalMemoryHandleTypeFlags handleTypes_ ) + { + handleTypes = handleTypes_; + return *this; + } + + operator VkExternalMemoryImageCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalMemoryImageCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalMemoryImageCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExternalMemoryImageCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExternalMemoryImageCreateInfo; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlags handleTypes; + }; + static_assert( sizeof( ExternalMemoryImageCreateInfo ) == sizeof( VkExternalMemoryImageCreateInfo ), "struct and wrapper have different size!" ); + + using ExternalMemoryImageCreateInfoKHR = ExternalMemoryImageCreateInfo; + + struct ExternalMemoryBufferCreateInfo + { + ExternalMemoryBufferCreateInfo( ExternalMemoryHandleTypeFlags handleTypes_ = ExternalMemoryHandleTypeFlags() ) + : handleTypes( handleTypes_ ) + { + } + + ExternalMemoryBufferCreateInfo( VkExternalMemoryBufferCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExternalMemoryBufferCreateInfo ) ); + } + + ExternalMemoryBufferCreateInfo& operator=( VkExternalMemoryBufferCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExternalMemoryBufferCreateInfo ) ); + return *this; + } + ExternalMemoryBufferCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExternalMemoryBufferCreateInfo& setHandleTypes( ExternalMemoryHandleTypeFlags handleTypes_ ) + { + handleTypes = handleTypes_; + return *this; + } + + operator VkExternalMemoryBufferCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalMemoryBufferCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalMemoryBufferCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExternalMemoryBufferCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExternalMemoryBufferCreateInfo; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlags handleTypes; + }; + static_assert( sizeof( ExternalMemoryBufferCreateInfo ) == sizeof( VkExternalMemoryBufferCreateInfo ), "struct and wrapper have different size!" ); + + using ExternalMemoryBufferCreateInfoKHR = ExternalMemoryBufferCreateInfo; + + struct ExportMemoryAllocateInfo + { + ExportMemoryAllocateInfo( ExternalMemoryHandleTypeFlags handleTypes_ = ExternalMemoryHandleTypeFlags() ) + : handleTypes( handleTypes_ ) + { + } + + ExportMemoryAllocateInfo( VkExportMemoryAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportMemoryAllocateInfo ) ); + } + + ExportMemoryAllocateInfo& operator=( VkExportMemoryAllocateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportMemoryAllocateInfo ) ); + return *this; + } + ExportMemoryAllocateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExportMemoryAllocateInfo& setHandleTypes( ExternalMemoryHandleTypeFlags handleTypes_ ) + { + handleTypes = handleTypes_; + return *this; + } + + operator VkExportMemoryAllocateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkExportMemoryAllocateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExportMemoryAllocateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExportMemoryAllocateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExportMemoryAllocateInfo; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlags handleTypes; + }; + static_assert( sizeof( ExportMemoryAllocateInfo ) == sizeof( VkExportMemoryAllocateInfo ), "struct and wrapper have different size!" ); + + using ExportMemoryAllocateInfoKHR = ExportMemoryAllocateInfo; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ImportMemoryWin32HandleInfoKHR + { + ImportMemoryWin32HandleInfoKHR( ExternalMemoryHandleTypeFlagBits handleType_ = ExternalMemoryHandleTypeFlagBits::eOpaqueFd, + HANDLE handle_ = 0, + LPCWSTR name_ = 0 ) + : handleType( handleType_ ) + , handle( handle_ ) + , name( name_ ) + { + } + + ImportMemoryWin32HandleInfoKHR( VkImportMemoryWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportMemoryWin32HandleInfoKHR ) ); + } + + ImportMemoryWin32HandleInfoKHR& operator=( VkImportMemoryWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportMemoryWin32HandleInfoKHR ) ); + return *this; + } + ImportMemoryWin32HandleInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImportMemoryWin32HandleInfoKHR& setHandleType( ExternalMemoryHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + ImportMemoryWin32HandleInfoKHR& setHandle( HANDLE handle_ ) + { + handle = handle_; + return *this; + } + + ImportMemoryWin32HandleInfoKHR& setName( LPCWSTR name_ ) + { + name = name_; + return *this; + } + + operator VkImportMemoryWin32HandleInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkImportMemoryWin32HandleInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImportMemoryWin32HandleInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ) + && ( handle == rhs.handle ) + && ( name == rhs.name ); + } + + bool operator!=( ImportMemoryWin32HandleInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImportMemoryWin32HandleInfoKHR; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; + }; + static_assert( sizeof( ImportMemoryWin32HandleInfoKHR ) == sizeof( VkImportMemoryWin32HandleInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct MemoryGetWin32HandleInfoKHR + { + MemoryGetWin32HandleInfoKHR( DeviceMemory memory_ = DeviceMemory(), + ExternalMemoryHandleTypeFlagBits handleType_ = ExternalMemoryHandleTypeFlagBits::eOpaqueFd ) + : memory( memory_ ) + , handleType( handleType_ ) + { + } + + MemoryGetWin32HandleInfoKHR( VkMemoryGetWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryGetWin32HandleInfoKHR ) ); + } + + MemoryGetWin32HandleInfoKHR& operator=( VkMemoryGetWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryGetWin32HandleInfoKHR ) ); + return *this; + } + MemoryGetWin32HandleInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MemoryGetWin32HandleInfoKHR& setMemory( DeviceMemory memory_ ) + { + memory = memory_; + return *this; + } + + MemoryGetWin32HandleInfoKHR& setHandleType( ExternalMemoryHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkMemoryGetWin32HandleInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryGetWin32HandleInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryGetWin32HandleInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memory == rhs.memory ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( MemoryGetWin32HandleInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryGetWin32HandleInfoKHR; + + public: + const void* pNext = nullptr; + DeviceMemory memory; + ExternalMemoryHandleTypeFlagBits handleType; + }; + static_assert( sizeof( MemoryGetWin32HandleInfoKHR ) == sizeof( VkMemoryGetWin32HandleInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct ImportMemoryFdInfoKHR + { + ImportMemoryFdInfoKHR( ExternalMemoryHandleTypeFlagBits handleType_ = ExternalMemoryHandleTypeFlagBits::eOpaqueFd, + int fd_ = 0 ) + : handleType( handleType_ ) + , fd( fd_ ) + { + } + + ImportMemoryFdInfoKHR( VkImportMemoryFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportMemoryFdInfoKHR ) ); + } + + ImportMemoryFdInfoKHR& operator=( VkImportMemoryFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportMemoryFdInfoKHR ) ); + return *this; + } + ImportMemoryFdInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImportMemoryFdInfoKHR& setHandleType( ExternalMemoryHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + ImportMemoryFdInfoKHR& setFd( int fd_ ) + { + fd = fd_; + return *this; + } + + operator VkImportMemoryFdInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkImportMemoryFdInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImportMemoryFdInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ) + && ( fd == rhs.fd ); + } + + bool operator!=( ImportMemoryFdInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImportMemoryFdInfoKHR; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlagBits handleType; + int fd; + }; + static_assert( sizeof( ImportMemoryFdInfoKHR ) == sizeof( VkImportMemoryFdInfoKHR ), "struct and wrapper have different size!" ); + + struct MemoryGetFdInfoKHR + { + MemoryGetFdInfoKHR( DeviceMemory memory_ = DeviceMemory(), + ExternalMemoryHandleTypeFlagBits handleType_ = ExternalMemoryHandleTypeFlagBits::eOpaqueFd ) + : memory( memory_ ) + , handleType( handleType_ ) + { + } + + MemoryGetFdInfoKHR( VkMemoryGetFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryGetFdInfoKHR ) ); + } + + MemoryGetFdInfoKHR& operator=( VkMemoryGetFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryGetFdInfoKHR ) ); + return *this; + } + MemoryGetFdInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MemoryGetFdInfoKHR& setMemory( DeviceMemory memory_ ) + { + memory = memory_; + return *this; + } + + MemoryGetFdInfoKHR& setHandleType( ExternalMemoryHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkMemoryGetFdInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryGetFdInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryGetFdInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memory == rhs.memory ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( MemoryGetFdInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryGetFdInfoKHR; + + public: + const void* pNext = nullptr; + DeviceMemory memory; + ExternalMemoryHandleTypeFlagBits handleType; + }; + static_assert( sizeof( MemoryGetFdInfoKHR ) == sizeof( VkMemoryGetFdInfoKHR ), "struct and wrapper have different size!" ); + + struct ImportMemoryHostPointerInfoEXT + { + ImportMemoryHostPointerInfoEXT( ExternalMemoryHandleTypeFlagBits handleType_ = ExternalMemoryHandleTypeFlagBits::eOpaqueFd, + void* pHostPointer_ = nullptr ) + : handleType( handleType_ ) + , pHostPointer( pHostPointer_ ) + { + } + + ImportMemoryHostPointerInfoEXT( VkImportMemoryHostPointerInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportMemoryHostPointerInfoEXT ) ); + } + + ImportMemoryHostPointerInfoEXT& operator=( VkImportMemoryHostPointerInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportMemoryHostPointerInfoEXT ) ); + return *this; + } + ImportMemoryHostPointerInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImportMemoryHostPointerInfoEXT& setHandleType( ExternalMemoryHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + ImportMemoryHostPointerInfoEXT& setPHostPointer( void* pHostPointer_ ) + { + pHostPointer = pHostPointer_; + return *this; + } + + operator VkImportMemoryHostPointerInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkImportMemoryHostPointerInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImportMemoryHostPointerInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ) + && ( pHostPointer == rhs.pHostPointer ); + } + + bool operator!=( ImportMemoryHostPointerInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImportMemoryHostPointerInfoEXT; + + public: + const void* pNext = nullptr; + ExternalMemoryHandleTypeFlagBits handleType; + void* pHostPointer; + }; + static_assert( sizeof( ImportMemoryHostPointerInfoEXT ) == sizeof( VkImportMemoryHostPointerInfoEXT ), "struct and wrapper have different size!" ); + + enum class ExternalMemoryFeatureFlagBits + { + eDedicatedOnly = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + eDedicatedOnlyKHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + eExportable = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + eExportableKHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + eImportable = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, + eImportableKHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT + }; + + using ExternalMemoryFeatureFlags = Flags; + + VULKAN_HPP_INLINE ExternalMemoryFeatureFlags operator|( ExternalMemoryFeatureFlagBits bit0, ExternalMemoryFeatureFlagBits bit1 ) + { + return ExternalMemoryFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ExternalMemoryFeatureFlags operator~( ExternalMemoryFeatureFlagBits bits ) + { + return ~( ExternalMemoryFeatureFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ExternalMemoryFeatureFlagBits::eDedicatedOnly) | VkFlags(ExternalMemoryFeatureFlagBits::eExportable) | VkFlags(ExternalMemoryFeatureFlagBits::eImportable) + }; + }; + + using ExternalMemoryFeatureFlagsKHR = ExternalMemoryFeatureFlags; + + struct ExternalMemoryProperties + { + operator VkExternalMemoryProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalMemoryProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalMemoryProperties const& rhs ) const + { + return ( externalMemoryFeatures == rhs.externalMemoryFeatures ) + && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes ) + && ( compatibleHandleTypes == rhs.compatibleHandleTypes ); + } + + bool operator!=( ExternalMemoryProperties const& rhs ) const + { + return !operator==( rhs ); + } + + ExternalMemoryFeatureFlags externalMemoryFeatures; + ExternalMemoryHandleTypeFlags exportFromImportedHandleTypes; + ExternalMemoryHandleTypeFlags compatibleHandleTypes; + }; + static_assert( sizeof( ExternalMemoryProperties ) == sizeof( VkExternalMemoryProperties ), "struct and wrapper have different size!" ); + + using ExternalMemoryPropertiesKHR = ExternalMemoryProperties; + + struct ExternalImageFormatProperties + { + operator VkExternalImageFormatProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalImageFormatProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalImageFormatProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( externalMemoryProperties == rhs.externalMemoryProperties ); + } + + bool operator!=( ExternalImageFormatProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExternalImageFormatProperties; + + public: + void* pNext = nullptr; + ExternalMemoryProperties externalMemoryProperties; + }; + static_assert( sizeof( ExternalImageFormatProperties ) == sizeof( VkExternalImageFormatProperties ), "struct and wrapper have different size!" ); + + using ExternalImageFormatPropertiesKHR = ExternalImageFormatProperties; + + struct ExternalBufferProperties + { + operator VkExternalBufferProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalBufferProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalBufferProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( externalMemoryProperties == rhs.externalMemoryProperties ); + } + + bool operator!=( ExternalBufferProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExternalBufferProperties; + + public: + void* pNext = nullptr; + ExternalMemoryProperties externalMemoryProperties; + }; + static_assert( sizeof( ExternalBufferProperties ) == sizeof( VkExternalBufferProperties ), "struct and wrapper have different size!" ); + + using ExternalBufferPropertiesKHR = ExternalBufferProperties; + + enum class ExternalSemaphoreHandleTypeFlagBits + { + eOpaqueFd = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + eOpaqueFdKHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + eOpaqueWin32 = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + eOpaqueWin32KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + eOpaqueWin32Kmt = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + eOpaqueWin32KmtKHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + eD3D12Fence = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + eD3D12FenceKHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + eSyncFd = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + eSyncFdKHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT + }; + + using ExternalSemaphoreHandleTypeFlags = Flags; + + VULKAN_HPP_INLINE ExternalSemaphoreHandleTypeFlags operator|( ExternalSemaphoreHandleTypeFlagBits bit0, ExternalSemaphoreHandleTypeFlagBits bit1 ) + { + return ExternalSemaphoreHandleTypeFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ExternalSemaphoreHandleTypeFlags operator~( ExternalSemaphoreHandleTypeFlagBits bits ) + { + return ~( ExternalSemaphoreHandleTypeFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd) | VkFlags(ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32) | VkFlags(ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32Kmt) | VkFlags(ExternalSemaphoreHandleTypeFlagBits::eD3D12Fence) | VkFlags(ExternalSemaphoreHandleTypeFlagBits::eSyncFd) + }; + }; + + using ExternalSemaphoreHandleTypeFlagsKHR = ExternalSemaphoreHandleTypeFlags; + + struct PhysicalDeviceExternalSemaphoreInfo + { + PhysicalDeviceExternalSemaphoreInfo( ExternalSemaphoreHandleTypeFlagBits handleType_ = ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd ) + : handleType( handleType_ ) + { + } + + PhysicalDeviceExternalSemaphoreInfo( VkPhysicalDeviceExternalSemaphoreInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalSemaphoreInfo ) ); + } + + PhysicalDeviceExternalSemaphoreInfo& operator=( VkPhysicalDeviceExternalSemaphoreInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalSemaphoreInfo ) ); + return *this; + } + PhysicalDeviceExternalSemaphoreInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExternalSemaphoreInfo& setHandleType( ExternalSemaphoreHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkPhysicalDeviceExternalSemaphoreInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceExternalSemaphoreInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceExternalSemaphoreInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( PhysicalDeviceExternalSemaphoreInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceExternalSemaphoreInfo; + + public: + const void* pNext = nullptr; + ExternalSemaphoreHandleTypeFlagBits handleType; + }; + static_assert( sizeof( PhysicalDeviceExternalSemaphoreInfo ) == sizeof( VkPhysicalDeviceExternalSemaphoreInfo ), "struct and wrapper have different size!" ); + + using PhysicalDeviceExternalSemaphoreInfoKHR = PhysicalDeviceExternalSemaphoreInfo; + + struct ExportSemaphoreCreateInfo + { + ExportSemaphoreCreateInfo( ExternalSemaphoreHandleTypeFlags handleTypes_ = ExternalSemaphoreHandleTypeFlags() ) + : handleTypes( handleTypes_ ) + { + } + + ExportSemaphoreCreateInfo( VkExportSemaphoreCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportSemaphoreCreateInfo ) ); + } + + ExportSemaphoreCreateInfo& operator=( VkExportSemaphoreCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportSemaphoreCreateInfo ) ); + return *this; + } + ExportSemaphoreCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExportSemaphoreCreateInfo& setHandleTypes( ExternalSemaphoreHandleTypeFlags handleTypes_ ) + { + handleTypes = handleTypes_; + return *this; + } + + operator VkExportSemaphoreCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkExportSemaphoreCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExportSemaphoreCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExportSemaphoreCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExportSemaphoreCreateInfo; + + public: + const void* pNext = nullptr; + ExternalSemaphoreHandleTypeFlags handleTypes; + }; + static_assert( sizeof( ExportSemaphoreCreateInfo ) == sizeof( VkExportSemaphoreCreateInfo ), "struct and wrapper have different size!" ); + + using ExportSemaphoreCreateInfoKHR = ExportSemaphoreCreateInfo; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct SemaphoreGetWin32HandleInfoKHR + { + SemaphoreGetWin32HandleInfoKHR( Semaphore semaphore_ = Semaphore(), + ExternalSemaphoreHandleTypeFlagBits handleType_ = ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd ) + : semaphore( semaphore_ ) + , handleType( handleType_ ) + { + } + + SemaphoreGetWin32HandleInfoKHR( VkSemaphoreGetWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SemaphoreGetWin32HandleInfoKHR ) ); + } + + SemaphoreGetWin32HandleInfoKHR& operator=( VkSemaphoreGetWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SemaphoreGetWin32HandleInfoKHR ) ); + return *this; + } + SemaphoreGetWin32HandleInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SemaphoreGetWin32HandleInfoKHR& setSemaphore( Semaphore semaphore_ ) + { + semaphore = semaphore_; + return *this; + } + + SemaphoreGetWin32HandleInfoKHR& setHandleType( ExternalSemaphoreHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkSemaphoreGetWin32HandleInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSemaphoreGetWin32HandleInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SemaphoreGetWin32HandleInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphore == rhs.semaphore ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( SemaphoreGetWin32HandleInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSemaphoreGetWin32HandleInfoKHR; + + public: + const void* pNext = nullptr; + Semaphore semaphore; + ExternalSemaphoreHandleTypeFlagBits handleType; + }; + static_assert( sizeof( SemaphoreGetWin32HandleInfoKHR ) == sizeof( VkSemaphoreGetWin32HandleInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct SemaphoreGetFdInfoKHR + { + SemaphoreGetFdInfoKHR( Semaphore semaphore_ = Semaphore(), + ExternalSemaphoreHandleTypeFlagBits handleType_ = ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd ) + : semaphore( semaphore_ ) + , handleType( handleType_ ) + { + } + + SemaphoreGetFdInfoKHR( VkSemaphoreGetFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SemaphoreGetFdInfoKHR ) ); + } + + SemaphoreGetFdInfoKHR& operator=( VkSemaphoreGetFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SemaphoreGetFdInfoKHR ) ); + return *this; + } + SemaphoreGetFdInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SemaphoreGetFdInfoKHR& setSemaphore( Semaphore semaphore_ ) + { + semaphore = semaphore_; + return *this; + } + + SemaphoreGetFdInfoKHR& setHandleType( ExternalSemaphoreHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkSemaphoreGetFdInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSemaphoreGetFdInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SemaphoreGetFdInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphore == rhs.semaphore ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( SemaphoreGetFdInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSemaphoreGetFdInfoKHR; + + public: + const void* pNext = nullptr; + Semaphore semaphore; + ExternalSemaphoreHandleTypeFlagBits handleType; + }; + static_assert( sizeof( SemaphoreGetFdInfoKHR ) == sizeof( VkSemaphoreGetFdInfoKHR ), "struct and wrapper have different size!" ); + + enum class ExternalSemaphoreFeatureFlagBits + { + eExportable = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + eExportableKHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + eImportable = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT, + eImportableKHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT + }; + + using ExternalSemaphoreFeatureFlags = Flags; + + VULKAN_HPP_INLINE ExternalSemaphoreFeatureFlags operator|( ExternalSemaphoreFeatureFlagBits bit0, ExternalSemaphoreFeatureFlagBits bit1 ) + { + return ExternalSemaphoreFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ExternalSemaphoreFeatureFlags operator~( ExternalSemaphoreFeatureFlagBits bits ) + { + return ~( ExternalSemaphoreFeatureFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ExternalSemaphoreFeatureFlagBits::eExportable) | VkFlags(ExternalSemaphoreFeatureFlagBits::eImportable) + }; + }; + + using ExternalSemaphoreFeatureFlagsKHR = ExternalSemaphoreFeatureFlags; + + struct ExternalSemaphoreProperties + { + operator VkExternalSemaphoreProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalSemaphoreProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalSemaphoreProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes ) + && ( compatibleHandleTypes == rhs.compatibleHandleTypes ) + && ( externalSemaphoreFeatures == rhs.externalSemaphoreFeatures ); + } + + bool operator!=( ExternalSemaphoreProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExternalSemaphoreProperties; + + public: + void* pNext = nullptr; + ExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes; + ExternalSemaphoreHandleTypeFlags compatibleHandleTypes; + ExternalSemaphoreFeatureFlags externalSemaphoreFeatures; + }; + static_assert( sizeof( ExternalSemaphoreProperties ) == sizeof( VkExternalSemaphoreProperties ), "struct and wrapper have different size!" ); + + using ExternalSemaphorePropertiesKHR = ExternalSemaphoreProperties; + + enum class SemaphoreImportFlagBits + { + eTemporary = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, + eTemporaryKHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT + }; + + using SemaphoreImportFlags = Flags; + + VULKAN_HPP_INLINE SemaphoreImportFlags operator|( SemaphoreImportFlagBits bit0, SemaphoreImportFlagBits bit1 ) + { + return SemaphoreImportFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE SemaphoreImportFlags operator~( SemaphoreImportFlagBits bits ) + { + return ~( SemaphoreImportFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(SemaphoreImportFlagBits::eTemporary) + }; + }; + + using SemaphoreImportFlagsKHR = SemaphoreImportFlags; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ImportSemaphoreWin32HandleInfoKHR + { + ImportSemaphoreWin32HandleInfoKHR( Semaphore semaphore_ = Semaphore(), + SemaphoreImportFlags flags_ = SemaphoreImportFlags(), + ExternalSemaphoreHandleTypeFlagBits handleType_ = ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, + HANDLE handle_ = 0, + LPCWSTR name_ = 0 ) + : semaphore( semaphore_ ) + , flags( flags_ ) + , handleType( handleType_ ) + , handle( handle_ ) + , name( name_ ) + { + } + + ImportSemaphoreWin32HandleInfoKHR( VkImportSemaphoreWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportSemaphoreWin32HandleInfoKHR ) ); + } + + ImportSemaphoreWin32HandleInfoKHR& operator=( VkImportSemaphoreWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportSemaphoreWin32HandleInfoKHR ) ); + return *this; + } + ImportSemaphoreWin32HandleInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR& setSemaphore( Semaphore semaphore_ ) + { + semaphore = semaphore_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR& setFlags( SemaphoreImportFlags flags_ ) + { + flags = flags_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR& setHandleType( ExternalSemaphoreHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR& setHandle( HANDLE handle_ ) + { + handle = handle_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR& setName( LPCWSTR name_ ) + { + name = name_; + return *this; + } + + operator VkImportSemaphoreWin32HandleInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkImportSemaphoreWin32HandleInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImportSemaphoreWin32HandleInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphore == rhs.semaphore ) + && ( flags == rhs.flags ) + && ( handleType == rhs.handleType ) + && ( handle == rhs.handle ) + && ( name == rhs.name ); + } + + bool operator!=( ImportSemaphoreWin32HandleInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImportSemaphoreWin32HandleInfoKHR; + + public: + const void* pNext = nullptr; + Semaphore semaphore; + SemaphoreImportFlags flags; + ExternalSemaphoreHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; + }; + static_assert( sizeof( ImportSemaphoreWin32HandleInfoKHR ) == sizeof( VkImportSemaphoreWin32HandleInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct ImportSemaphoreFdInfoKHR + { + ImportSemaphoreFdInfoKHR( Semaphore semaphore_ = Semaphore(), + SemaphoreImportFlags flags_ = SemaphoreImportFlags(), + ExternalSemaphoreHandleTypeFlagBits handleType_ = ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, + int fd_ = 0 ) + : semaphore( semaphore_ ) + , flags( flags_ ) + , handleType( handleType_ ) + , fd( fd_ ) + { + } + + ImportSemaphoreFdInfoKHR( VkImportSemaphoreFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportSemaphoreFdInfoKHR ) ); + } + + ImportSemaphoreFdInfoKHR& operator=( VkImportSemaphoreFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportSemaphoreFdInfoKHR ) ); + return *this; + } + ImportSemaphoreFdInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImportSemaphoreFdInfoKHR& setSemaphore( Semaphore semaphore_ ) + { + semaphore = semaphore_; + return *this; + } + + ImportSemaphoreFdInfoKHR& setFlags( SemaphoreImportFlags flags_ ) + { + flags = flags_; + return *this; + } + + ImportSemaphoreFdInfoKHR& setHandleType( ExternalSemaphoreHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + ImportSemaphoreFdInfoKHR& setFd( int fd_ ) + { + fd = fd_; + return *this; + } + + operator VkImportSemaphoreFdInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkImportSemaphoreFdInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImportSemaphoreFdInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphore == rhs.semaphore ) + && ( flags == rhs.flags ) + && ( handleType == rhs.handleType ) + && ( fd == rhs.fd ); + } + + bool operator!=( ImportSemaphoreFdInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImportSemaphoreFdInfoKHR; + + public: + const void* pNext = nullptr; + Semaphore semaphore; + SemaphoreImportFlags flags; + ExternalSemaphoreHandleTypeFlagBits handleType; + int fd; + }; + static_assert( sizeof( ImportSemaphoreFdInfoKHR ) == sizeof( VkImportSemaphoreFdInfoKHR ), "struct and wrapper have different size!" ); + + enum class ExternalFenceHandleTypeFlagBits + { + eOpaqueFd = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + eOpaqueFdKHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + eOpaqueWin32 = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + eOpaqueWin32KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + eOpaqueWin32Kmt = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + eOpaqueWin32KmtKHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + eSyncFd = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, + eSyncFdKHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT + }; + + using ExternalFenceHandleTypeFlags = Flags; + + VULKAN_HPP_INLINE ExternalFenceHandleTypeFlags operator|( ExternalFenceHandleTypeFlagBits bit0, ExternalFenceHandleTypeFlagBits bit1 ) + { + return ExternalFenceHandleTypeFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ExternalFenceHandleTypeFlags operator~( ExternalFenceHandleTypeFlagBits bits ) + { + return ~( ExternalFenceHandleTypeFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ExternalFenceHandleTypeFlagBits::eOpaqueFd) | VkFlags(ExternalFenceHandleTypeFlagBits::eOpaqueWin32) | VkFlags(ExternalFenceHandleTypeFlagBits::eOpaqueWin32Kmt) | VkFlags(ExternalFenceHandleTypeFlagBits::eSyncFd) + }; + }; + + using ExternalFenceHandleTypeFlagsKHR = ExternalFenceHandleTypeFlags; + + struct PhysicalDeviceExternalFenceInfo + { + PhysicalDeviceExternalFenceInfo( ExternalFenceHandleTypeFlagBits handleType_ = ExternalFenceHandleTypeFlagBits::eOpaqueFd ) + : handleType( handleType_ ) + { + } + + PhysicalDeviceExternalFenceInfo( VkPhysicalDeviceExternalFenceInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalFenceInfo ) ); + } + + PhysicalDeviceExternalFenceInfo& operator=( VkPhysicalDeviceExternalFenceInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PhysicalDeviceExternalFenceInfo ) ); + return *this; + } + PhysicalDeviceExternalFenceInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExternalFenceInfo& setHandleType( ExternalFenceHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkPhysicalDeviceExternalFenceInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceExternalFenceInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceExternalFenceInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( PhysicalDeviceExternalFenceInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceExternalFenceInfo; + + public: + const void* pNext = nullptr; + ExternalFenceHandleTypeFlagBits handleType; + }; + static_assert( sizeof( PhysicalDeviceExternalFenceInfo ) == sizeof( VkPhysicalDeviceExternalFenceInfo ), "struct and wrapper have different size!" ); + + using PhysicalDeviceExternalFenceInfoKHR = PhysicalDeviceExternalFenceInfo; + + struct ExportFenceCreateInfo + { + ExportFenceCreateInfo( ExternalFenceHandleTypeFlags handleTypes_ = ExternalFenceHandleTypeFlags() ) + : handleTypes( handleTypes_ ) + { + } + + ExportFenceCreateInfo( VkExportFenceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportFenceCreateInfo ) ); + } + + ExportFenceCreateInfo& operator=( VkExportFenceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( ExportFenceCreateInfo ) ); + return *this; + } + ExportFenceCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ExportFenceCreateInfo& setHandleTypes( ExternalFenceHandleTypeFlags handleTypes_ ) + { + handleTypes = handleTypes_; + return *this; + } + + operator VkExportFenceCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkExportFenceCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExportFenceCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExportFenceCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExportFenceCreateInfo; + + public: + const void* pNext = nullptr; + ExternalFenceHandleTypeFlags handleTypes; + }; + static_assert( sizeof( ExportFenceCreateInfo ) == sizeof( VkExportFenceCreateInfo ), "struct and wrapper have different size!" ); + + using ExportFenceCreateInfoKHR = ExportFenceCreateInfo; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct FenceGetWin32HandleInfoKHR + { + FenceGetWin32HandleInfoKHR( Fence fence_ = Fence(), + ExternalFenceHandleTypeFlagBits handleType_ = ExternalFenceHandleTypeFlagBits::eOpaqueFd ) + : fence( fence_ ) + , handleType( handleType_ ) + { + } + + FenceGetWin32HandleInfoKHR( VkFenceGetWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( FenceGetWin32HandleInfoKHR ) ); + } + + FenceGetWin32HandleInfoKHR& operator=( VkFenceGetWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( FenceGetWin32HandleInfoKHR ) ); + return *this; + } + FenceGetWin32HandleInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + FenceGetWin32HandleInfoKHR& setFence( Fence fence_ ) + { + fence = fence_; + return *this; + } + + FenceGetWin32HandleInfoKHR& setHandleType( ExternalFenceHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkFenceGetWin32HandleInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkFenceGetWin32HandleInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( FenceGetWin32HandleInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fence == rhs.fence ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( FenceGetWin32HandleInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eFenceGetWin32HandleInfoKHR; + + public: + const void* pNext = nullptr; + Fence fence; + ExternalFenceHandleTypeFlagBits handleType; + }; + static_assert( sizeof( FenceGetWin32HandleInfoKHR ) == sizeof( VkFenceGetWin32HandleInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct FenceGetFdInfoKHR + { + FenceGetFdInfoKHR( Fence fence_ = Fence(), + ExternalFenceHandleTypeFlagBits handleType_ = ExternalFenceHandleTypeFlagBits::eOpaqueFd ) + : fence( fence_ ) + , handleType( handleType_ ) + { + } + + FenceGetFdInfoKHR( VkFenceGetFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( FenceGetFdInfoKHR ) ); + } + + FenceGetFdInfoKHR& operator=( VkFenceGetFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( FenceGetFdInfoKHR ) ); + return *this; + } + FenceGetFdInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + FenceGetFdInfoKHR& setFence( Fence fence_ ) + { + fence = fence_; + return *this; + } + + FenceGetFdInfoKHR& setHandleType( ExternalFenceHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + operator VkFenceGetFdInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkFenceGetFdInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( FenceGetFdInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fence == rhs.fence ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( FenceGetFdInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eFenceGetFdInfoKHR; + + public: + const void* pNext = nullptr; + Fence fence; + ExternalFenceHandleTypeFlagBits handleType; + }; + static_assert( sizeof( FenceGetFdInfoKHR ) == sizeof( VkFenceGetFdInfoKHR ), "struct and wrapper have different size!" ); + + enum class ExternalFenceFeatureFlagBits + { + eExportable = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + eExportableKHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + eImportable = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT, + eImportableKHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT + }; + + using ExternalFenceFeatureFlags = Flags; + + VULKAN_HPP_INLINE ExternalFenceFeatureFlags operator|( ExternalFenceFeatureFlagBits bit0, ExternalFenceFeatureFlagBits bit1 ) + { + return ExternalFenceFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ExternalFenceFeatureFlags operator~( ExternalFenceFeatureFlagBits bits ) + { + return ~( ExternalFenceFeatureFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ExternalFenceFeatureFlagBits::eExportable) | VkFlags(ExternalFenceFeatureFlagBits::eImportable) + }; + }; + + using ExternalFenceFeatureFlagsKHR = ExternalFenceFeatureFlags; + + struct ExternalFenceProperties + { + operator VkExternalFenceProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkExternalFenceProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( ExternalFenceProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes ) + && ( compatibleHandleTypes == rhs.compatibleHandleTypes ) + && ( externalFenceFeatures == rhs.externalFenceFeatures ); + } + + bool operator!=( ExternalFenceProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eExternalFenceProperties; + + public: + void* pNext = nullptr; + ExternalFenceHandleTypeFlags exportFromImportedHandleTypes; + ExternalFenceHandleTypeFlags compatibleHandleTypes; + ExternalFenceFeatureFlags externalFenceFeatures; + }; + static_assert( sizeof( ExternalFenceProperties ) == sizeof( VkExternalFenceProperties ), "struct and wrapper have different size!" ); + + using ExternalFencePropertiesKHR = ExternalFenceProperties; + + enum class FenceImportFlagBits + { + eTemporary = VK_FENCE_IMPORT_TEMPORARY_BIT, + eTemporaryKHR = VK_FENCE_IMPORT_TEMPORARY_BIT + }; + + using FenceImportFlags = Flags; + + VULKAN_HPP_INLINE FenceImportFlags operator|( FenceImportFlagBits bit0, FenceImportFlagBits bit1 ) + { + return FenceImportFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE FenceImportFlags operator~( FenceImportFlagBits bits ) + { + return ~( FenceImportFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(FenceImportFlagBits::eTemporary) + }; + }; + + using FenceImportFlagsKHR = FenceImportFlags; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ImportFenceWin32HandleInfoKHR + { + ImportFenceWin32HandleInfoKHR( Fence fence_ = Fence(), + FenceImportFlags flags_ = FenceImportFlags(), + ExternalFenceHandleTypeFlagBits handleType_ = ExternalFenceHandleTypeFlagBits::eOpaqueFd, + HANDLE handle_ = 0, + LPCWSTR name_ = 0 ) + : fence( fence_ ) + , flags( flags_ ) + , handleType( handleType_ ) + , handle( handle_ ) + , name( name_ ) + { + } + + ImportFenceWin32HandleInfoKHR( VkImportFenceWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportFenceWin32HandleInfoKHR ) ); + } + + ImportFenceWin32HandleInfoKHR& operator=( VkImportFenceWin32HandleInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportFenceWin32HandleInfoKHR ) ); + return *this; + } + ImportFenceWin32HandleInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImportFenceWin32HandleInfoKHR& setFence( Fence fence_ ) + { + fence = fence_; + return *this; + } + + ImportFenceWin32HandleInfoKHR& setFlags( FenceImportFlags flags_ ) + { + flags = flags_; + return *this; + } + + ImportFenceWin32HandleInfoKHR& setHandleType( ExternalFenceHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + ImportFenceWin32HandleInfoKHR& setHandle( HANDLE handle_ ) + { + handle = handle_; + return *this; + } + + ImportFenceWin32HandleInfoKHR& setName( LPCWSTR name_ ) + { + name = name_; + return *this; + } + + operator VkImportFenceWin32HandleInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkImportFenceWin32HandleInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImportFenceWin32HandleInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fence == rhs.fence ) + && ( flags == rhs.flags ) + && ( handleType == rhs.handleType ) + && ( handle == rhs.handle ) + && ( name == rhs.name ); + } + + bool operator!=( ImportFenceWin32HandleInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImportFenceWin32HandleInfoKHR; + + public: + const void* pNext = nullptr; + Fence fence; + FenceImportFlags flags; + ExternalFenceHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; + }; + static_assert( sizeof( ImportFenceWin32HandleInfoKHR ) == sizeof( VkImportFenceWin32HandleInfoKHR ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct ImportFenceFdInfoKHR + { + ImportFenceFdInfoKHR( Fence fence_ = Fence(), + FenceImportFlags flags_ = FenceImportFlags(), + ExternalFenceHandleTypeFlagBits handleType_ = ExternalFenceHandleTypeFlagBits::eOpaqueFd, + int fd_ = 0 ) + : fence( fence_ ) + , flags( flags_ ) + , handleType( handleType_ ) + , fd( fd_ ) + { + } + + ImportFenceFdInfoKHR( VkImportFenceFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportFenceFdInfoKHR ) ); + } + + ImportFenceFdInfoKHR& operator=( VkImportFenceFdInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( ImportFenceFdInfoKHR ) ); + return *this; + } + ImportFenceFdInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ImportFenceFdInfoKHR& setFence( Fence fence_ ) + { + fence = fence_; + return *this; + } + + ImportFenceFdInfoKHR& setFlags( FenceImportFlags flags_ ) + { + flags = flags_; + return *this; + } + + ImportFenceFdInfoKHR& setHandleType( ExternalFenceHandleTypeFlagBits handleType_ ) + { + handleType = handleType_; + return *this; + } + + ImportFenceFdInfoKHR& setFd( int fd_ ) + { + fd = fd_; + return *this; + } + + operator VkImportFenceFdInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkImportFenceFdInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( ImportFenceFdInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fence == rhs.fence ) + && ( flags == rhs.flags ) + && ( handleType == rhs.handleType ) + && ( fd == rhs.fd ); + } + + bool operator!=( ImportFenceFdInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eImportFenceFdInfoKHR; + + public: + const void* pNext = nullptr; + Fence fence; + FenceImportFlags flags; + ExternalFenceHandleTypeFlagBits handleType; + int fd; + }; + static_assert( sizeof( ImportFenceFdInfoKHR ) == sizeof( VkImportFenceFdInfoKHR ), "struct and wrapper have different size!" ); + + enum class SurfaceCounterFlagBitsEXT + { + eVblank = VK_SURFACE_COUNTER_VBLANK_EXT + }; + + using SurfaceCounterFlagsEXT = Flags; + + VULKAN_HPP_INLINE SurfaceCounterFlagsEXT operator|( SurfaceCounterFlagBitsEXT bit0, SurfaceCounterFlagBitsEXT bit1 ) + { + return SurfaceCounterFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE SurfaceCounterFlagsEXT operator~( SurfaceCounterFlagBitsEXT bits ) + { + return ~( SurfaceCounterFlagsEXT( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(SurfaceCounterFlagBitsEXT::eVblank) + }; + }; + + struct SurfaceCapabilities2EXT + { + operator VkSurfaceCapabilities2EXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkSurfaceCapabilities2EXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( SurfaceCapabilities2EXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( minImageCount == rhs.minImageCount ) + && ( maxImageCount == rhs.maxImageCount ) + && ( currentExtent == rhs.currentExtent ) + && ( minImageExtent == rhs.minImageExtent ) + && ( maxImageExtent == rhs.maxImageExtent ) + && ( maxImageArrayLayers == rhs.maxImageArrayLayers ) + && ( supportedTransforms == rhs.supportedTransforms ) + && ( currentTransform == rhs.currentTransform ) + && ( supportedCompositeAlpha == rhs.supportedCompositeAlpha ) + && ( supportedUsageFlags == rhs.supportedUsageFlags ) + && ( supportedSurfaceCounters == rhs.supportedSurfaceCounters ); + } + + bool operator!=( SurfaceCapabilities2EXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSurfaceCapabilities2EXT; + + public: + void* pNext = nullptr; + uint32_t minImageCount; + uint32_t maxImageCount; + Extent2D currentExtent; + Extent2D minImageExtent; + Extent2D maxImageExtent; + uint32_t maxImageArrayLayers; + SurfaceTransformFlagsKHR supportedTransforms; + SurfaceTransformFlagBitsKHR currentTransform; + CompositeAlphaFlagsKHR supportedCompositeAlpha; + ImageUsageFlags supportedUsageFlags; + SurfaceCounterFlagsEXT supportedSurfaceCounters; + }; + static_assert( sizeof( SurfaceCapabilities2EXT ) == sizeof( VkSurfaceCapabilities2EXT ), "struct and wrapper have different size!" ); + + struct SwapchainCounterCreateInfoEXT + { + SwapchainCounterCreateInfoEXT( SurfaceCounterFlagsEXT surfaceCounters_ = SurfaceCounterFlagsEXT() ) + : surfaceCounters( surfaceCounters_ ) + { + } + + SwapchainCounterCreateInfoEXT( VkSwapchainCounterCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SwapchainCounterCreateInfoEXT ) ); + } + + SwapchainCounterCreateInfoEXT& operator=( VkSwapchainCounterCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SwapchainCounterCreateInfoEXT ) ); + return *this; + } + SwapchainCounterCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SwapchainCounterCreateInfoEXT& setSurfaceCounters( SurfaceCounterFlagsEXT surfaceCounters_ ) + { + surfaceCounters = surfaceCounters_; + return *this; + } + + operator VkSwapchainCounterCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkSwapchainCounterCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( SwapchainCounterCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( surfaceCounters == rhs.surfaceCounters ); + } + + bool operator!=( SwapchainCounterCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSwapchainCounterCreateInfoEXT; + + public: + const void* pNext = nullptr; + SurfaceCounterFlagsEXT surfaceCounters; + }; + static_assert( sizeof( SwapchainCounterCreateInfoEXT ) == sizeof( VkSwapchainCounterCreateInfoEXT ), "struct and wrapper have different size!" ); + + enum class DisplayPowerStateEXT + { + eOff = VK_DISPLAY_POWER_STATE_OFF_EXT, + eSuspend = VK_DISPLAY_POWER_STATE_SUSPEND_EXT, + eOn = VK_DISPLAY_POWER_STATE_ON_EXT + }; + + struct DisplayPowerInfoEXT + { + DisplayPowerInfoEXT( DisplayPowerStateEXT powerState_ = DisplayPowerStateEXT::eOff ) + : powerState( powerState_ ) + { + } + + DisplayPowerInfoEXT( VkDisplayPowerInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayPowerInfoEXT ) ); + } + + DisplayPowerInfoEXT& operator=( VkDisplayPowerInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayPowerInfoEXT ) ); + return *this; + } + DisplayPowerInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DisplayPowerInfoEXT& setPowerState( DisplayPowerStateEXT powerState_ ) + { + powerState = powerState_; + return *this; + } + + operator VkDisplayPowerInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayPowerInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayPowerInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( powerState == rhs.powerState ); + } + + bool operator!=( DisplayPowerInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplayPowerInfoEXT; + + public: + const void* pNext = nullptr; + DisplayPowerStateEXT powerState; + }; + static_assert( sizeof( DisplayPowerInfoEXT ) == sizeof( VkDisplayPowerInfoEXT ), "struct and wrapper have different size!" ); + + enum class DeviceEventTypeEXT + { + eDisplayHotplug = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT + }; + + struct DeviceEventInfoEXT + { + DeviceEventInfoEXT( DeviceEventTypeEXT deviceEvent_ = DeviceEventTypeEXT::eDisplayHotplug ) + : deviceEvent( deviceEvent_ ) + { + } + + DeviceEventInfoEXT( VkDeviceEventInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceEventInfoEXT ) ); + } + + DeviceEventInfoEXT& operator=( VkDeviceEventInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceEventInfoEXT ) ); + return *this; + } + DeviceEventInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceEventInfoEXT& setDeviceEvent( DeviceEventTypeEXT deviceEvent_ ) + { + deviceEvent = deviceEvent_; + return *this; + } + + operator VkDeviceEventInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceEventInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceEventInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceEvent == rhs.deviceEvent ); + } + + bool operator!=( DeviceEventInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceEventInfoEXT; + + public: + const void* pNext = nullptr; + DeviceEventTypeEXT deviceEvent; + }; + static_assert( sizeof( DeviceEventInfoEXT ) == sizeof( VkDeviceEventInfoEXT ), "struct and wrapper have different size!" ); + + enum class DisplayEventTypeEXT + { + eFirstPixelOut = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT + }; + + struct DisplayEventInfoEXT + { + DisplayEventInfoEXT( DisplayEventTypeEXT displayEvent_ = DisplayEventTypeEXT::eFirstPixelOut ) + : displayEvent( displayEvent_ ) + { + } + + DisplayEventInfoEXT( VkDisplayEventInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayEventInfoEXT ) ); + } + + DisplayEventInfoEXT& operator=( VkDisplayEventInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DisplayEventInfoEXT ) ); + return *this; + } + DisplayEventInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DisplayEventInfoEXT& setDisplayEvent( DisplayEventTypeEXT displayEvent_ ) + { + displayEvent = displayEvent_; + return *this; + } + + operator VkDisplayEventInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDisplayEventInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DisplayEventInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayEvent == rhs.displayEvent ); + } + + bool operator!=( DisplayEventInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDisplayEventInfoEXT; + + public: + const void* pNext = nullptr; + DisplayEventTypeEXT displayEvent; + }; + static_assert( sizeof( DisplayEventInfoEXT ) == sizeof( VkDisplayEventInfoEXT ), "struct and wrapper have different size!" ); + + enum class PeerMemoryFeatureFlagBits + { + eCopySrc = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + eCopySrcKHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + eCopyDst = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + eCopyDstKHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + eGenericSrc = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + eGenericSrcKHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + eGenericDst = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT, + eGenericDstKHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT + }; + + using PeerMemoryFeatureFlags = Flags; + + VULKAN_HPP_INLINE PeerMemoryFeatureFlags operator|( PeerMemoryFeatureFlagBits bit0, PeerMemoryFeatureFlagBits bit1 ) + { + return PeerMemoryFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE PeerMemoryFeatureFlags operator~( PeerMemoryFeatureFlagBits bits ) + { + return ~( PeerMemoryFeatureFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(PeerMemoryFeatureFlagBits::eCopySrc) | VkFlags(PeerMemoryFeatureFlagBits::eCopyDst) | VkFlags(PeerMemoryFeatureFlagBits::eGenericSrc) | VkFlags(PeerMemoryFeatureFlagBits::eGenericDst) + }; + }; + + using PeerMemoryFeatureFlagsKHR = PeerMemoryFeatureFlags; + + enum class MemoryAllocateFlagBits + { + eDeviceMask = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, + eDeviceMaskKHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT + }; + + using MemoryAllocateFlags = Flags; + + VULKAN_HPP_INLINE MemoryAllocateFlags operator|( MemoryAllocateFlagBits bit0, MemoryAllocateFlagBits bit1 ) + { + return MemoryAllocateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE MemoryAllocateFlags operator~( MemoryAllocateFlagBits bits ) + { + return ~( MemoryAllocateFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(MemoryAllocateFlagBits::eDeviceMask) + }; + }; + + using MemoryAllocateFlagsKHR = MemoryAllocateFlags; + + struct MemoryAllocateFlagsInfo + { + MemoryAllocateFlagsInfo( MemoryAllocateFlags flags_ = MemoryAllocateFlags(), + uint32_t deviceMask_ = 0 ) + : flags( flags_ ) + , deviceMask( deviceMask_ ) + { + } + + MemoryAllocateFlagsInfo( VkMemoryAllocateFlagsInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryAllocateFlagsInfo ) ); + } + + MemoryAllocateFlagsInfo& operator=( VkMemoryAllocateFlagsInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( MemoryAllocateFlagsInfo ) ); + return *this; + } + MemoryAllocateFlagsInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + MemoryAllocateFlagsInfo& setFlags( MemoryAllocateFlags flags_ ) + { + flags = flags_; + return *this; + } + + MemoryAllocateFlagsInfo& setDeviceMask( uint32_t deviceMask_ ) + { + deviceMask = deviceMask_; + return *this; + } + + operator VkMemoryAllocateFlagsInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkMemoryAllocateFlagsInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( MemoryAllocateFlagsInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( deviceMask == rhs.deviceMask ); + } + + bool operator!=( MemoryAllocateFlagsInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eMemoryAllocateFlagsInfo; + + public: + const void* pNext = nullptr; + MemoryAllocateFlags flags; + uint32_t deviceMask; + }; + static_assert( sizeof( MemoryAllocateFlagsInfo ) == sizeof( VkMemoryAllocateFlagsInfo ), "struct and wrapper have different size!" ); + + using MemoryAllocateFlagsInfoKHR = MemoryAllocateFlagsInfo; + + enum class DeviceGroupPresentModeFlagBitsKHR + { + eLocal = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR, + eRemote = VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR, + eSum = VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR, + eLocalMultiDevice = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR + }; + + using DeviceGroupPresentModeFlagsKHR = Flags; + + VULKAN_HPP_INLINE DeviceGroupPresentModeFlagsKHR operator|( DeviceGroupPresentModeFlagBitsKHR bit0, DeviceGroupPresentModeFlagBitsKHR bit1 ) + { + return DeviceGroupPresentModeFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DeviceGroupPresentModeFlagsKHR operator~( DeviceGroupPresentModeFlagBitsKHR bits ) + { + return ~( DeviceGroupPresentModeFlagsKHR( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DeviceGroupPresentModeFlagBitsKHR::eLocal) | VkFlags(DeviceGroupPresentModeFlagBitsKHR::eRemote) | VkFlags(DeviceGroupPresentModeFlagBitsKHR::eSum) | VkFlags(DeviceGroupPresentModeFlagBitsKHR::eLocalMultiDevice) + }; + }; + + struct DeviceGroupPresentCapabilitiesKHR + { + operator VkDeviceGroupPresentCapabilitiesKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGroupPresentCapabilitiesKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGroupPresentCapabilitiesKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memcmp( presentMask, rhs.presentMask, VK_MAX_DEVICE_GROUP_SIZE * sizeof( uint32_t ) ) == 0 ) + && ( modes == rhs.modes ); + } + + bool operator!=( DeviceGroupPresentCapabilitiesKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGroupPresentCapabilitiesKHR; + + public: + const void* pNext = nullptr; + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE]; + DeviceGroupPresentModeFlagsKHR modes; + }; + static_assert( sizeof( DeviceGroupPresentCapabilitiesKHR ) == sizeof( VkDeviceGroupPresentCapabilitiesKHR ), "struct and wrapper have different size!" ); + + struct DeviceGroupPresentInfoKHR + { + DeviceGroupPresentInfoKHR( uint32_t swapchainCount_ = 0, + const uint32_t* pDeviceMasks_ = nullptr, + DeviceGroupPresentModeFlagBitsKHR mode_ = DeviceGroupPresentModeFlagBitsKHR::eLocal ) + : swapchainCount( swapchainCount_ ) + , pDeviceMasks( pDeviceMasks_ ) + , mode( mode_ ) + { + } + + DeviceGroupPresentInfoKHR( VkDeviceGroupPresentInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupPresentInfoKHR ) ); + } + + DeviceGroupPresentInfoKHR& operator=( VkDeviceGroupPresentInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupPresentInfoKHR ) ); + return *this; + } + DeviceGroupPresentInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceGroupPresentInfoKHR& setSwapchainCount( uint32_t swapchainCount_ ) + { + swapchainCount = swapchainCount_; + return *this; + } + + DeviceGroupPresentInfoKHR& setPDeviceMasks( const uint32_t* pDeviceMasks_ ) + { + pDeviceMasks = pDeviceMasks_; + return *this; + } + + DeviceGroupPresentInfoKHR& setMode( DeviceGroupPresentModeFlagBitsKHR mode_ ) + { + mode = mode_; + return *this; + } + + operator VkDeviceGroupPresentInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGroupPresentInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGroupPresentInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchainCount == rhs.swapchainCount ) + && ( pDeviceMasks == rhs.pDeviceMasks ) + && ( mode == rhs.mode ); + } + + bool operator!=( DeviceGroupPresentInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGroupPresentInfoKHR; + + public: + const void* pNext = nullptr; + uint32_t swapchainCount; + const uint32_t* pDeviceMasks; + DeviceGroupPresentModeFlagBitsKHR mode; + }; + static_assert( sizeof( DeviceGroupPresentInfoKHR ) == sizeof( VkDeviceGroupPresentInfoKHR ), "struct and wrapper have different size!" ); + + struct DeviceGroupSwapchainCreateInfoKHR + { + DeviceGroupSwapchainCreateInfoKHR( DeviceGroupPresentModeFlagsKHR modes_ = DeviceGroupPresentModeFlagsKHR() ) + : modes( modes_ ) + { + } + + DeviceGroupSwapchainCreateInfoKHR( VkDeviceGroupSwapchainCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupSwapchainCreateInfoKHR ) ); + } + + DeviceGroupSwapchainCreateInfoKHR& operator=( VkDeviceGroupSwapchainCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupSwapchainCreateInfoKHR ) ); + return *this; + } + DeviceGroupSwapchainCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceGroupSwapchainCreateInfoKHR& setModes( DeviceGroupPresentModeFlagsKHR modes_ ) + { + modes = modes_; + return *this; + } + + operator VkDeviceGroupSwapchainCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGroupSwapchainCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGroupSwapchainCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( modes == rhs.modes ); + } + + bool operator!=( DeviceGroupSwapchainCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGroupSwapchainCreateInfoKHR; + + public: + const void* pNext = nullptr; + DeviceGroupPresentModeFlagsKHR modes; + }; + static_assert( sizeof( DeviceGroupSwapchainCreateInfoKHR ) == sizeof( VkDeviceGroupSwapchainCreateInfoKHR ), "struct and wrapper have different size!" ); + + enum class SwapchainCreateFlagBitsKHR + { + eSplitInstanceBindRegions = VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR, + eProtected = VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR + }; + + using SwapchainCreateFlagsKHR = Flags; + + VULKAN_HPP_INLINE SwapchainCreateFlagsKHR operator|( SwapchainCreateFlagBitsKHR bit0, SwapchainCreateFlagBitsKHR bit1 ) + { + return SwapchainCreateFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE SwapchainCreateFlagsKHR operator~( SwapchainCreateFlagBitsKHR bits ) + { + return ~( SwapchainCreateFlagsKHR( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(SwapchainCreateFlagBitsKHR::eSplitInstanceBindRegions) | VkFlags(SwapchainCreateFlagBitsKHR::eProtected) + }; + }; + + struct SwapchainCreateInfoKHR + { + SwapchainCreateInfoKHR( SwapchainCreateFlagsKHR flags_ = SwapchainCreateFlagsKHR(), + SurfaceKHR surface_ = SurfaceKHR(), + uint32_t minImageCount_ = 0, + Format imageFormat_ = Format::eUndefined, + ColorSpaceKHR imageColorSpace_ = ColorSpaceKHR::eSrgbNonlinear, + Extent2D imageExtent_ = Extent2D(), + uint32_t imageArrayLayers_ = 0, + ImageUsageFlags imageUsage_ = ImageUsageFlags(), + SharingMode imageSharingMode_ = SharingMode::eExclusive, + uint32_t queueFamilyIndexCount_ = 0, + const uint32_t* pQueueFamilyIndices_ = nullptr, + SurfaceTransformFlagBitsKHR preTransform_ = SurfaceTransformFlagBitsKHR::eIdentity, + CompositeAlphaFlagBitsKHR compositeAlpha_ = CompositeAlphaFlagBitsKHR::eOpaque, + PresentModeKHR presentMode_ = PresentModeKHR::eImmediate, + Bool32 clipped_ = 0, + SwapchainKHR oldSwapchain_ = SwapchainKHR() ) + : flags( flags_ ) + , surface( surface_ ) + , minImageCount( minImageCount_ ) + , imageFormat( imageFormat_ ) + , imageColorSpace( imageColorSpace_ ) + , imageExtent( imageExtent_ ) + , imageArrayLayers( imageArrayLayers_ ) + , imageUsage( imageUsage_ ) + , imageSharingMode( imageSharingMode_ ) + , queueFamilyIndexCount( queueFamilyIndexCount_ ) + , pQueueFamilyIndices( pQueueFamilyIndices_ ) + , preTransform( preTransform_ ) + , compositeAlpha( compositeAlpha_ ) + , presentMode( presentMode_ ) + , clipped( clipped_ ) + , oldSwapchain( oldSwapchain_ ) + { + } + + SwapchainCreateInfoKHR( VkSwapchainCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SwapchainCreateInfoKHR ) ); + } + + SwapchainCreateInfoKHR& operator=( VkSwapchainCreateInfoKHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SwapchainCreateInfoKHR ) ); + return *this; + } + SwapchainCreateInfoKHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SwapchainCreateInfoKHR& setFlags( SwapchainCreateFlagsKHR flags_ ) + { + flags = flags_; + return *this; + } + + SwapchainCreateInfoKHR& setSurface( SurfaceKHR surface_ ) + { + surface = surface_; + return *this; + } + + SwapchainCreateInfoKHR& setMinImageCount( uint32_t minImageCount_ ) + { + minImageCount = minImageCount_; + return *this; + } + + SwapchainCreateInfoKHR& setImageFormat( Format imageFormat_ ) + { + imageFormat = imageFormat_; + return *this; + } + + SwapchainCreateInfoKHR& setImageColorSpace( ColorSpaceKHR imageColorSpace_ ) + { + imageColorSpace = imageColorSpace_; + return *this; + } + + SwapchainCreateInfoKHR& setImageExtent( Extent2D imageExtent_ ) + { + imageExtent = imageExtent_; + return *this; + } + + SwapchainCreateInfoKHR& setImageArrayLayers( uint32_t imageArrayLayers_ ) + { + imageArrayLayers = imageArrayLayers_; + return *this; + } + + SwapchainCreateInfoKHR& setImageUsage( ImageUsageFlags imageUsage_ ) + { + imageUsage = imageUsage_; + return *this; + } + + SwapchainCreateInfoKHR& setImageSharingMode( SharingMode imageSharingMode_ ) + { + imageSharingMode = imageSharingMode_; + return *this; + } + + SwapchainCreateInfoKHR& setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ ) + { + queueFamilyIndexCount = queueFamilyIndexCount_; + return *this; + } + + SwapchainCreateInfoKHR& setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ ) + { + pQueueFamilyIndices = pQueueFamilyIndices_; + return *this; + } + + SwapchainCreateInfoKHR& setPreTransform( SurfaceTransformFlagBitsKHR preTransform_ ) + { + preTransform = preTransform_; + return *this; + } + + SwapchainCreateInfoKHR& setCompositeAlpha( CompositeAlphaFlagBitsKHR compositeAlpha_ ) + { + compositeAlpha = compositeAlpha_; + return *this; + } + + SwapchainCreateInfoKHR& setPresentMode( PresentModeKHR presentMode_ ) + { + presentMode = presentMode_; + return *this; + } + + SwapchainCreateInfoKHR& setClipped( Bool32 clipped_ ) + { + clipped = clipped_; + return *this; + } + + SwapchainCreateInfoKHR& setOldSwapchain( SwapchainKHR oldSwapchain_ ) + { + oldSwapchain = oldSwapchain_; + return *this; + } + + operator VkSwapchainCreateInfoKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSwapchainCreateInfoKHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SwapchainCreateInfoKHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( surface == rhs.surface ) + && ( minImageCount == rhs.minImageCount ) + && ( imageFormat == rhs.imageFormat ) + && ( imageColorSpace == rhs.imageColorSpace ) + && ( imageExtent == rhs.imageExtent ) + && ( imageArrayLayers == rhs.imageArrayLayers ) + && ( imageUsage == rhs.imageUsage ) + && ( imageSharingMode == rhs.imageSharingMode ) + && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount ) + && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices ) + && ( preTransform == rhs.preTransform ) + && ( compositeAlpha == rhs.compositeAlpha ) + && ( presentMode == rhs.presentMode ) + && ( clipped == rhs.clipped ) + && ( oldSwapchain == rhs.oldSwapchain ); + } + + bool operator!=( SwapchainCreateInfoKHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSwapchainCreateInfoKHR; + + public: + const void* pNext = nullptr; + SwapchainCreateFlagsKHR flags; + SurfaceKHR surface; + uint32_t minImageCount; + Format imageFormat; + ColorSpaceKHR imageColorSpace; + Extent2D imageExtent; + uint32_t imageArrayLayers; + ImageUsageFlags imageUsage; + SharingMode imageSharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + SurfaceTransformFlagBitsKHR preTransform; + CompositeAlphaFlagBitsKHR compositeAlpha; + PresentModeKHR presentMode; + Bool32 clipped; + SwapchainKHR oldSwapchain; + }; + static_assert( sizeof( SwapchainCreateInfoKHR ) == sizeof( VkSwapchainCreateInfoKHR ), "struct and wrapper have different size!" ); + + enum class ViewportCoordinateSwizzleNV + { + ePositiveX = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, + eNegativeX = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV, + ePositiveY = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV, + eNegativeY = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV, + ePositiveZ = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV, + eNegativeZ = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV, + ePositiveW = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV, + eNegativeW = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV + }; + + struct ViewportSwizzleNV + { + ViewportSwizzleNV( ViewportCoordinateSwizzleNV x_ = ViewportCoordinateSwizzleNV::ePositiveX, + ViewportCoordinateSwizzleNV y_ = ViewportCoordinateSwizzleNV::ePositiveX, + ViewportCoordinateSwizzleNV z_ = ViewportCoordinateSwizzleNV::ePositiveX, + ViewportCoordinateSwizzleNV w_ = ViewportCoordinateSwizzleNV::ePositiveX ) + : x( x_ ) + , y( y_ ) + , z( z_ ) + , w( w_ ) + { + } + + ViewportSwizzleNV( VkViewportSwizzleNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ViewportSwizzleNV ) ); + } + + ViewportSwizzleNV& operator=( VkViewportSwizzleNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ViewportSwizzleNV ) ); + return *this; + } + ViewportSwizzleNV& setX( ViewportCoordinateSwizzleNV x_ ) + { + x = x_; + return *this; + } + + ViewportSwizzleNV& setY( ViewportCoordinateSwizzleNV y_ ) + { + y = y_; + return *this; + } + + ViewportSwizzleNV& setZ( ViewportCoordinateSwizzleNV z_ ) + { + z = z_; + return *this; + } + + ViewportSwizzleNV& setW( ViewportCoordinateSwizzleNV w_ ) + { + w = w_; + return *this; + } + + operator VkViewportSwizzleNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkViewportSwizzleNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( ViewportSwizzleNV const& rhs ) const + { + return ( x == rhs.x ) + && ( y == rhs.y ) + && ( z == rhs.z ) + && ( w == rhs.w ); + } + + bool operator!=( ViewportSwizzleNV const& rhs ) const + { + return !operator==( rhs ); + } + + ViewportCoordinateSwizzleNV x; + ViewportCoordinateSwizzleNV y; + ViewportCoordinateSwizzleNV z; + ViewportCoordinateSwizzleNV w; + }; + static_assert( sizeof( ViewportSwizzleNV ) == sizeof( VkViewportSwizzleNV ), "struct and wrapper have different size!" ); + + struct PipelineViewportSwizzleStateCreateInfoNV + { + PipelineViewportSwizzleStateCreateInfoNV( PipelineViewportSwizzleStateCreateFlagsNV flags_ = PipelineViewportSwizzleStateCreateFlagsNV(), + uint32_t viewportCount_ = 0, + const ViewportSwizzleNV* pViewportSwizzles_ = nullptr ) + : flags( flags_ ) + , viewportCount( viewportCount_ ) + , pViewportSwizzles( pViewportSwizzles_ ) + { + } + + PipelineViewportSwizzleStateCreateInfoNV( VkPipelineViewportSwizzleStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportSwizzleStateCreateInfoNV ) ); + } + + PipelineViewportSwizzleStateCreateInfoNV& operator=( VkPipelineViewportSwizzleStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportSwizzleStateCreateInfoNV ) ); + return *this; + } + PipelineViewportSwizzleStateCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineViewportSwizzleStateCreateInfoNV& setFlags( PipelineViewportSwizzleStateCreateFlagsNV flags_ ) + { + flags = flags_; + return *this; + } + + PipelineViewportSwizzleStateCreateInfoNV& setViewportCount( uint32_t viewportCount_ ) + { + viewportCount = viewportCount_; + return *this; + } + + PipelineViewportSwizzleStateCreateInfoNV& setPViewportSwizzles( const ViewportSwizzleNV* pViewportSwizzles_ ) + { + pViewportSwizzles = pViewportSwizzles_; + return *this; + } + + operator VkPipelineViewportSwizzleStateCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineViewportSwizzleStateCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineViewportSwizzleStateCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( viewportCount == rhs.viewportCount ) + && ( pViewportSwizzles == rhs.pViewportSwizzles ); + } + + bool operator!=( PipelineViewportSwizzleStateCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineViewportSwizzleStateCreateInfoNV; + + public: + const void* pNext = nullptr; + PipelineViewportSwizzleStateCreateFlagsNV flags; + uint32_t viewportCount; + const ViewportSwizzleNV* pViewportSwizzles; + }; + static_assert( sizeof( PipelineViewportSwizzleStateCreateInfoNV ) == sizeof( VkPipelineViewportSwizzleStateCreateInfoNV ), "struct and wrapper have different size!" ); + + enum class DiscardRectangleModeEXT + { + eInclusive = VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT, + eExclusive = VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT + }; + + struct PipelineDiscardRectangleStateCreateInfoEXT + { + PipelineDiscardRectangleStateCreateInfoEXT( PipelineDiscardRectangleStateCreateFlagsEXT flags_ = PipelineDiscardRectangleStateCreateFlagsEXT(), + DiscardRectangleModeEXT discardRectangleMode_ = DiscardRectangleModeEXT::eInclusive, + uint32_t discardRectangleCount_ = 0, + const Rect2D* pDiscardRectangles_ = nullptr ) + : flags( flags_ ) + , discardRectangleMode( discardRectangleMode_ ) + , discardRectangleCount( discardRectangleCount_ ) + , pDiscardRectangles( pDiscardRectangles_ ) + { + } + + PipelineDiscardRectangleStateCreateInfoEXT( VkPipelineDiscardRectangleStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineDiscardRectangleStateCreateInfoEXT ) ); + } + + PipelineDiscardRectangleStateCreateInfoEXT& operator=( VkPipelineDiscardRectangleStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineDiscardRectangleStateCreateInfoEXT ) ); + return *this; + } + PipelineDiscardRectangleStateCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT& setFlags( PipelineDiscardRectangleStateCreateFlagsEXT flags_ ) + { + flags = flags_; + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT& setDiscardRectangleMode( DiscardRectangleModeEXT discardRectangleMode_ ) + { + discardRectangleMode = discardRectangleMode_; + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT& setDiscardRectangleCount( uint32_t discardRectangleCount_ ) + { + discardRectangleCount = discardRectangleCount_; + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT& setPDiscardRectangles( const Rect2D* pDiscardRectangles_ ) + { + pDiscardRectangles = pDiscardRectangles_; + return *this; + } + + operator VkPipelineDiscardRectangleStateCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineDiscardRectangleStateCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineDiscardRectangleStateCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( discardRectangleMode == rhs.discardRectangleMode ) + && ( discardRectangleCount == rhs.discardRectangleCount ) + && ( pDiscardRectangles == rhs.pDiscardRectangles ); + } + + bool operator!=( PipelineDiscardRectangleStateCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineDiscardRectangleStateCreateInfoEXT; + + public: + const void* pNext = nullptr; + PipelineDiscardRectangleStateCreateFlagsEXT flags; + DiscardRectangleModeEXT discardRectangleMode; + uint32_t discardRectangleCount; + const Rect2D* pDiscardRectangles; + }; + static_assert( sizeof( PipelineDiscardRectangleStateCreateInfoEXT ) == sizeof( VkPipelineDiscardRectangleStateCreateInfoEXT ), "struct and wrapper have different size!" ); + + enum class SubpassDescriptionFlagBits + { + ePerViewAttributesNVX = VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX, + ePerViewPositionXOnlyNVX = VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX + }; + + using SubpassDescriptionFlags = Flags; + + VULKAN_HPP_INLINE SubpassDescriptionFlags operator|( SubpassDescriptionFlagBits bit0, SubpassDescriptionFlagBits bit1 ) + { + return SubpassDescriptionFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE SubpassDescriptionFlags operator~( SubpassDescriptionFlagBits bits ) + { + return ~( SubpassDescriptionFlags( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(SubpassDescriptionFlagBits::ePerViewAttributesNVX) | VkFlags(SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX) + }; + }; + + struct SubpassDescription + { + SubpassDescription( SubpassDescriptionFlags flags_ = SubpassDescriptionFlags(), + PipelineBindPoint pipelineBindPoint_ = PipelineBindPoint::eGraphics, + uint32_t inputAttachmentCount_ = 0, + const AttachmentReference* pInputAttachments_ = nullptr, + uint32_t colorAttachmentCount_ = 0, + const AttachmentReference* pColorAttachments_ = nullptr, + const AttachmentReference* pResolveAttachments_ = nullptr, + const AttachmentReference* pDepthStencilAttachment_ = nullptr, + uint32_t preserveAttachmentCount_ = 0, + const uint32_t* pPreserveAttachments_ = nullptr ) + : flags( flags_ ) + , pipelineBindPoint( pipelineBindPoint_ ) + , inputAttachmentCount( inputAttachmentCount_ ) + , pInputAttachments( pInputAttachments_ ) + , colorAttachmentCount( colorAttachmentCount_ ) + , pColorAttachments( pColorAttachments_ ) + , pResolveAttachments( pResolveAttachments_ ) + , pDepthStencilAttachment( pDepthStencilAttachment_ ) + , preserveAttachmentCount( preserveAttachmentCount_ ) + , pPreserveAttachments( pPreserveAttachments_ ) + { + } + + SubpassDescription( VkSubpassDescription const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassDescription ) ); + } + + SubpassDescription& operator=( VkSubpassDescription const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassDescription ) ); + return *this; + } + SubpassDescription& setFlags( SubpassDescriptionFlags flags_ ) + { + flags = flags_; + return *this; + } + + SubpassDescription& setPipelineBindPoint( PipelineBindPoint pipelineBindPoint_ ) + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + SubpassDescription& setInputAttachmentCount( uint32_t inputAttachmentCount_ ) + { + inputAttachmentCount = inputAttachmentCount_; + return *this; + } + + SubpassDescription& setPInputAttachments( const AttachmentReference* pInputAttachments_ ) + { + pInputAttachments = pInputAttachments_; + return *this; + } + + SubpassDescription& setColorAttachmentCount( uint32_t colorAttachmentCount_ ) + { + colorAttachmentCount = colorAttachmentCount_; + return *this; + } + + SubpassDescription& setPColorAttachments( const AttachmentReference* pColorAttachments_ ) + { + pColorAttachments = pColorAttachments_; + return *this; + } + + SubpassDescription& setPResolveAttachments( const AttachmentReference* pResolveAttachments_ ) + { + pResolveAttachments = pResolveAttachments_; + return *this; + } + + SubpassDescription& setPDepthStencilAttachment( const AttachmentReference* pDepthStencilAttachment_ ) + { + pDepthStencilAttachment = pDepthStencilAttachment_; + return *this; + } + + SubpassDescription& setPreserveAttachmentCount( uint32_t preserveAttachmentCount_ ) + { + preserveAttachmentCount = preserveAttachmentCount_; + return *this; + } + + SubpassDescription& setPPreserveAttachments( const uint32_t* pPreserveAttachments_ ) + { + pPreserveAttachments = pPreserveAttachments_; + return *this; + } + + operator VkSubpassDescription const&() const + { + return *reinterpret_cast(this); + } + + operator VkSubpassDescription &() + { + return *reinterpret_cast(this); + } + + bool operator==( SubpassDescription const& rhs ) const + { + return ( flags == rhs.flags ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( inputAttachmentCount == rhs.inputAttachmentCount ) + && ( pInputAttachments == rhs.pInputAttachments ) + && ( colorAttachmentCount == rhs.colorAttachmentCount ) + && ( pColorAttachments == rhs.pColorAttachments ) + && ( pResolveAttachments == rhs.pResolveAttachments ) + && ( pDepthStencilAttachment == rhs.pDepthStencilAttachment ) + && ( preserveAttachmentCount == rhs.preserveAttachmentCount ) + && ( pPreserveAttachments == rhs.pPreserveAttachments ); + } + + bool operator!=( SubpassDescription const& rhs ) const + { + return !operator==( rhs ); + } + + SubpassDescriptionFlags flags; + PipelineBindPoint pipelineBindPoint; + uint32_t inputAttachmentCount; + const AttachmentReference* pInputAttachments; + uint32_t colorAttachmentCount; + const AttachmentReference* pColorAttachments; + const AttachmentReference* pResolveAttachments; + const AttachmentReference* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; + }; + static_assert( sizeof( SubpassDescription ) == sizeof( VkSubpassDescription ), "struct and wrapper have different size!" ); + + struct RenderPassCreateInfo + { + RenderPassCreateInfo( RenderPassCreateFlags flags_ = RenderPassCreateFlags(), + uint32_t attachmentCount_ = 0, + const AttachmentDescription* pAttachments_ = nullptr, + uint32_t subpassCount_ = 0, + const SubpassDescription* pSubpasses_ = nullptr, + uint32_t dependencyCount_ = 0, + const SubpassDependency* pDependencies_ = nullptr ) + : flags( flags_ ) + , attachmentCount( attachmentCount_ ) + , pAttachments( pAttachments_ ) + , subpassCount( subpassCount_ ) + , pSubpasses( pSubpasses_ ) + , dependencyCount( dependencyCount_ ) + , pDependencies( pDependencies_ ) + { + } + + RenderPassCreateInfo( VkRenderPassCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassCreateInfo ) ); + } + + RenderPassCreateInfo& operator=( VkRenderPassCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassCreateInfo ) ); + return *this; + } + RenderPassCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + RenderPassCreateInfo& setFlags( RenderPassCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + RenderPassCreateInfo& setAttachmentCount( uint32_t attachmentCount_ ) + { + attachmentCount = attachmentCount_; + return *this; + } + + RenderPassCreateInfo& setPAttachments( const AttachmentDescription* pAttachments_ ) + { + pAttachments = pAttachments_; + return *this; + } + + RenderPassCreateInfo& setSubpassCount( uint32_t subpassCount_ ) + { + subpassCount = subpassCount_; + return *this; + } + + RenderPassCreateInfo& setPSubpasses( const SubpassDescription* pSubpasses_ ) + { + pSubpasses = pSubpasses_; + return *this; + } + + RenderPassCreateInfo& setDependencyCount( uint32_t dependencyCount_ ) + { + dependencyCount = dependencyCount_; + return *this; + } + + RenderPassCreateInfo& setPDependencies( const SubpassDependency* pDependencies_ ) + { + pDependencies = pDependencies_; + return *this; + } + + operator VkRenderPassCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkRenderPassCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( RenderPassCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( attachmentCount == rhs.attachmentCount ) + && ( pAttachments == rhs.pAttachments ) + && ( subpassCount == rhs.subpassCount ) + && ( pSubpasses == rhs.pSubpasses ) + && ( dependencyCount == rhs.dependencyCount ) + && ( pDependencies == rhs.pDependencies ); + } + + bool operator!=( RenderPassCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eRenderPassCreateInfo; + + public: + const void* pNext = nullptr; + RenderPassCreateFlags flags; + uint32_t attachmentCount; + const AttachmentDescription* pAttachments; + uint32_t subpassCount; + const SubpassDescription* pSubpasses; + uint32_t dependencyCount; + const SubpassDependency* pDependencies; + }; + static_assert( sizeof( RenderPassCreateInfo ) == sizeof( VkRenderPassCreateInfo ), "struct and wrapper have different size!" ); + + struct SubpassDescription2KHR + { + SubpassDescription2KHR( SubpassDescriptionFlags flags_ = SubpassDescriptionFlags(), + PipelineBindPoint pipelineBindPoint_ = PipelineBindPoint::eGraphics, + uint32_t viewMask_ = 0, + uint32_t inputAttachmentCount_ = 0, + const AttachmentReference2KHR* pInputAttachments_ = nullptr, + uint32_t colorAttachmentCount_ = 0, + const AttachmentReference2KHR* pColorAttachments_ = nullptr, + const AttachmentReference2KHR* pResolveAttachments_ = nullptr, + const AttachmentReference2KHR* pDepthStencilAttachment_ = nullptr, + uint32_t preserveAttachmentCount_ = 0, + const uint32_t* pPreserveAttachments_ = nullptr ) + : flags( flags_ ) + , pipelineBindPoint( pipelineBindPoint_ ) + , viewMask( viewMask_ ) + , inputAttachmentCount( inputAttachmentCount_ ) + , pInputAttachments( pInputAttachments_ ) + , colorAttachmentCount( colorAttachmentCount_ ) + , pColorAttachments( pColorAttachments_ ) + , pResolveAttachments( pResolveAttachments_ ) + , pDepthStencilAttachment( pDepthStencilAttachment_ ) + , preserveAttachmentCount( preserveAttachmentCount_ ) + , pPreserveAttachments( pPreserveAttachments_ ) + { + } + + SubpassDescription2KHR( VkSubpassDescription2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassDescription2KHR ) ); + } + + SubpassDescription2KHR& operator=( VkSubpassDescription2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( SubpassDescription2KHR ) ); + return *this; + } + SubpassDescription2KHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SubpassDescription2KHR& setFlags( SubpassDescriptionFlags flags_ ) + { + flags = flags_; + return *this; + } + + SubpassDescription2KHR& setPipelineBindPoint( PipelineBindPoint pipelineBindPoint_ ) + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + SubpassDescription2KHR& setViewMask( uint32_t viewMask_ ) + { + viewMask = viewMask_; + return *this; + } + + SubpassDescription2KHR& setInputAttachmentCount( uint32_t inputAttachmentCount_ ) + { + inputAttachmentCount = inputAttachmentCount_; + return *this; + } + + SubpassDescription2KHR& setPInputAttachments( const AttachmentReference2KHR* pInputAttachments_ ) + { + pInputAttachments = pInputAttachments_; + return *this; + } + + SubpassDescription2KHR& setColorAttachmentCount( uint32_t colorAttachmentCount_ ) + { + colorAttachmentCount = colorAttachmentCount_; + return *this; + } + + SubpassDescription2KHR& setPColorAttachments( const AttachmentReference2KHR* pColorAttachments_ ) + { + pColorAttachments = pColorAttachments_; + return *this; + } + + SubpassDescription2KHR& setPResolveAttachments( const AttachmentReference2KHR* pResolveAttachments_ ) + { + pResolveAttachments = pResolveAttachments_; + return *this; + } + + SubpassDescription2KHR& setPDepthStencilAttachment( const AttachmentReference2KHR* pDepthStencilAttachment_ ) + { + pDepthStencilAttachment = pDepthStencilAttachment_; + return *this; + } + + SubpassDescription2KHR& setPreserveAttachmentCount( uint32_t preserveAttachmentCount_ ) + { + preserveAttachmentCount = preserveAttachmentCount_; + return *this; + } + + SubpassDescription2KHR& setPPreserveAttachments( const uint32_t* pPreserveAttachments_ ) + { + pPreserveAttachments = pPreserveAttachments_; + return *this; + } + + operator VkSubpassDescription2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkSubpassDescription2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( SubpassDescription2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( viewMask == rhs.viewMask ) + && ( inputAttachmentCount == rhs.inputAttachmentCount ) + && ( pInputAttachments == rhs.pInputAttachments ) + && ( colorAttachmentCount == rhs.colorAttachmentCount ) + && ( pColorAttachments == rhs.pColorAttachments ) + && ( pResolveAttachments == rhs.pResolveAttachments ) + && ( pDepthStencilAttachment == rhs.pDepthStencilAttachment ) + && ( preserveAttachmentCount == rhs.preserveAttachmentCount ) + && ( pPreserveAttachments == rhs.pPreserveAttachments ); + } + + bool operator!=( SubpassDescription2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSubpassDescription2KHR; + + public: + const void* pNext = nullptr; + SubpassDescriptionFlags flags; + PipelineBindPoint pipelineBindPoint; + uint32_t viewMask; + uint32_t inputAttachmentCount; + const AttachmentReference2KHR* pInputAttachments; + uint32_t colorAttachmentCount; + const AttachmentReference2KHR* pColorAttachments; + const AttachmentReference2KHR* pResolveAttachments; + const AttachmentReference2KHR* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; + }; + static_assert( sizeof( SubpassDescription2KHR ) == sizeof( VkSubpassDescription2KHR ), "struct and wrapper have different size!" ); + + struct RenderPassCreateInfo2KHR + { + RenderPassCreateInfo2KHR( RenderPassCreateFlags flags_ = RenderPassCreateFlags(), + uint32_t attachmentCount_ = 0, + const AttachmentDescription2KHR* pAttachments_ = nullptr, + uint32_t subpassCount_ = 0, + const SubpassDescription2KHR* pSubpasses_ = nullptr, + uint32_t dependencyCount_ = 0, + const SubpassDependency2KHR* pDependencies_ = nullptr, + uint32_t correlatedViewMaskCount_ = 0, + const uint32_t* pCorrelatedViewMasks_ = nullptr ) + : flags( flags_ ) + , attachmentCount( attachmentCount_ ) + , pAttachments( pAttachments_ ) + , subpassCount( subpassCount_ ) + , pSubpasses( pSubpasses_ ) + , dependencyCount( dependencyCount_ ) + , pDependencies( pDependencies_ ) + , correlatedViewMaskCount( correlatedViewMaskCount_ ) + , pCorrelatedViewMasks( pCorrelatedViewMasks_ ) + { + } + + RenderPassCreateInfo2KHR( VkRenderPassCreateInfo2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassCreateInfo2KHR ) ); + } + + RenderPassCreateInfo2KHR& operator=( VkRenderPassCreateInfo2KHR const & rhs ) + { + memcpy( this, &rhs, sizeof( RenderPassCreateInfo2KHR ) ); + return *this; + } + RenderPassCreateInfo2KHR& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + RenderPassCreateInfo2KHR& setFlags( RenderPassCreateFlags flags_ ) + { + flags = flags_; + return *this; + } + + RenderPassCreateInfo2KHR& setAttachmentCount( uint32_t attachmentCount_ ) + { + attachmentCount = attachmentCount_; + return *this; + } + + RenderPassCreateInfo2KHR& setPAttachments( const AttachmentDescription2KHR* pAttachments_ ) + { + pAttachments = pAttachments_; + return *this; + } + + RenderPassCreateInfo2KHR& setSubpassCount( uint32_t subpassCount_ ) + { + subpassCount = subpassCount_; + return *this; + } + + RenderPassCreateInfo2KHR& setPSubpasses( const SubpassDescription2KHR* pSubpasses_ ) + { + pSubpasses = pSubpasses_; + return *this; + } + + RenderPassCreateInfo2KHR& setDependencyCount( uint32_t dependencyCount_ ) + { + dependencyCount = dependencyCount_; + return *this; + } + + RenderPassCreateInfo2KHR& setPDependencies( const SubpassDependency2KHR* pDependencies_ ) + { + pDependencies = pDependencies_; + return *this; + } + + RenderPassCreateInfo2KHR& setCorrelatedViewMaskCount( uint32_t correlatedViewMaskCount_ ) + { + correlatedViewMaskCount = correlatedViewMaskCount_; + return *this; + } + + RenderPassCreateInfo2KHR& setPCorrelatedViewMasks( const uint32_t* pCorrelatedViewMasks_ ) + { + pCorrelatedViewMasks = pCorrelatedViewMasks_; + return *this; + } + + operator VkRenderPassCreateInfo2KHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkRenderPassCreateInfo2KHR &() + { + return *reinterpret_cast(this); + } + + bool operator==( RenderPassCreateInfo2KHR const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( attachmentCount == rhs.attachmentCount ) + && ( pAttachments == rhs.pAttachments ) + && ( subpassCount == rhs.subpassCount ) + && ( pSubpasses == rhs.pSubpasses ) + && ( dependencyCount == rhs.dependencyCount ) + && ( pDependencies == rhs.pDependencies ) + && ( correlatedViewMaskCount == rhs.correlatedViewMaskCount ) + && ( pCorrelatedViewMasks == rhs.pCorrelatedViewMasks ); + } + + bool operator!=( RenderPassCreateInfo2KHR const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eRenderPassCreateInfo2KHR; + + public: + const void* pNext = nullptr; + RenderPassCreateFlags flags; + uint32_t attachmentCount; + const AttachmentDescription2KHR* pAttachments; + uint32_t subpassCount; + const SubpassDescription2KHR* pSubpasses; + uint32_t dependencyCount; + const SubpassDependency2KHR* pDependencies; + uint32_t correlatedViewMaskCount; + const uint32_t* pCorrelatedViewMasks; + }; + static_assert( sizeof( RenderPassCreateInfo2KHR ) == sizeof( VkRenderPassCreateInfo2KHR ), "struct and wrapper have different size!" ); + + enum class PointClippingBehavior + { + eAllClipPlanes = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + eAllClipPlanesKHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + eUserClipPlanesOnly = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + eUserClipPlanesOnlyKHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY + }; + + struct PhysicalDevicePointClippingProperties + { + operator VkPhysicalDevicePointClippingProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDevicePointClippingProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDevicePointClippingProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pointClippingBehavior == rhs.pointClippingBehavior ); + } + + bool operator!=( PhysicalDevicePointClippingProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDevicePointClippingProperties; + + public: + void* pNext = nullptr; + PointClippingBehavior pointClippingBehavior; + }; + static_assert( sizeof( PhysicalDevicePointClippingProperties ) == sizeof( VkPhysicalDevicePointClippingProperties ), "struct and wrapper have different size!" ); + + using PhysicalDevicePointClippingPropertiesKHR = PhysicalDevicePointClippingProperties; + + enum class SamplerReductionModeEXT + { + eWeightedAverage = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT, + eMin = VK_SAMPLER_REDUCTION_MODE_MIN_EXT, + eMax = VK_SAMPLER_REDUCTION_MODE_MAX_EXT + }; + + struct SamplerReductionModeCreateInfoEXT + { + SamplerReductionModeCreateInfoEXT( SamplerReductionModeEXT reductionMode_ = SamplerReductionModeEXT::eWeightedAverage ) + : reductionMode( reductionMode_ ) + { + } + + SamplerReductionModeCreateInfoEXT( VkSamplerReductionModeCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SamplerReductionModeCreateInfoEXT ) ); + } + + SamplerReductionModeCreateInfoEXT& operator=( VkSamplerReductionModeCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( SamplerReductionModeCreateInfoEXT ) ); + return *this; + } + SamplerReductionModeCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SamplerReductionModeCreateInfoEXT& setReductionMode( SamplerReductionModeEXT reductionMode_ ) + { + reductionMode = reductionMode_; + return *this; + } + + operator VkSamplerReductionModeCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkSamplerReductionModeCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( SamplerReductionModeCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( reductionMode == rhs.reductionMode ); + } + + bool operator!=( SamplerReductionModeCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSamplerReductionModeCreateInfoEXT; + + public: + const void* pNext = nullptr; + SamplerReductionModeEXT reductionMode; + }; + static_assert( sizeof( SamplerReductionModeCreateInfoEXT ) == sizeof( VkSamplerReductionModeCreateInfoEXT ), "struct and wrapper have different size!" ); + + enum class TessellationDomainOrigin + { + eUpperLeft = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + eUpperLeftKHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + eLowerLeft = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + eLowerLeftKHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT + }; + + struct PipelineTessellationDomainOriginStateCreateInfo + { + PipelineTessellationDomainOriginStateCreateInfo( TessellationDomainOrigin domainOrigin_ = TessellationDomainOrigin::eUpperLeft ) + : domainOrigin( domainOrigin_ ) + { + } + + PipelineTessellationDomainOriginStateCreateInfo( VkPipelineTessellationDomainOriginStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineTessellationDomainOriginStateCreateInfo ) ); + } + + PipelineTessellationDomainOriginStateCreateInfo& operator=( VkPipelineTessellationDomainOriginStateCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineTessellationDomainOriginStateCreateInfo ) ); + return *this; + } + PipelineTessellationDomainOriginStateCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineTessellationDomainOriginStateCreateInfo& setDomainOrigin( TessellationDomainOrigin domainOrigin_ ) + { + domainOrigin = domainOrigin_; + return *this; + } + + operator VkPipelineTessellationDomainOriginStateCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineTessellationDomainOriginStateCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineTessellationDomainOriginStateCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( domainOrigin == rhs.domainOrigin ); + } + + bool operator!=( PipelineTessellationDomainOriginStateCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineTessellationDomainOriginStateCreateInfo; + + public: + const void* pNext = nullptr; + TessellationDomainOrigin domainOrigin; + }; + static_assert( sizeof( PipelineTessellationDomainOriginStateCreateInfo ) == sizeof( VkPipelineTessellationDomainOriginStateCreateInfo ), "struct and wrapper have different size!" ); + + using PipelineTessellationDomainOriginStateCreateInfoKHR = PipelineTessellationDomainOriginStateCreateInfo; + + enum class SamplerYcbcrModelConversion + { + eRgbIdentity = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + eRgbIdentityKHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + eYcbcrIdentity = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + eYcbcrIdentityKHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + eYcbcr709 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + eYcbcr709KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + eYcbcr601 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + eYcbcr601KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + eYcbcr2020 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + eYcbcr2020KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 + }; + + enum class SamplerYcbcrRange + { + eItuFull = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + eItuFullKHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + eItuNarrow = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + eItuNarrowKHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW + }; + + enum class ChromaLocation + { + eCositedEven = VK_CHROMA_LOCATION_COSITED_EVEN, + eCositedEvenKHR = VK_CHROMA_LOCATION_COSITED_EVEN, + eMidpoint = VK_CHROMA_LOCATION_MIDPOINT, + eMidpointKHR = VK_CHROMA_LOCATION_MIDPOINT + }; + + struct SamplerYcbcrConversionCreateInfo + { + SamplerYcbcrConversionCreateInfo( Format format_ = Format::eUndefined, + SamplerYcbcrModelConversion ycbcrModel_ = SamplerYcbcrModelConversion::eRgbIdentity, + SamplerYcbcrRange ycbcrRange_ = SamplerYcbcrRange::eItuFull, + ComponentMapping components_ = ComponentMapping(), + ChromaLocation xChromaOffset_ = ChromaLocation::eCositedEven, + ChromaLocation yChromaOffset_ = ChromaLocation::eCositedEven, + Filter chromaFilter_ = Filter::eNearest, + Bool32 forceExplicitReconstruction_ = 0 ) + : format( format_ ) + , ycbcrModel( ycbcrModel_ ) + , ycbcrRange( ycbcrRange_ ) + , components( components_ ) + , xChromaOffset( xChromaOffset_ ) + , yChromaOffset( yChromaOffset_ ) + , chromaFilter( chromaFilter_ ) + , forceExplicitReconstruction( forceExplicitReconstruction_ ) + { + } + + SamplerYcbcrConversionCreateInfo( VkSamplerYcbcrConversionCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SamplerYcbcrConversionCreateInfo ) ); + } + + SamplerYcbcrConversionCreateInfo& operator=( VkSamplerYcbcrConversionCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SamplerYcbcrConversionCreateInfo ) ); + return *this; + } + SamplerYcbcrConversionCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SamplerYcbcrConversionCreateInfo& setFormat( Format format_ ) + { + format = format_; + return *this; + } + + SamplerYcbcrConversionCreateInfo& setYcbcrModel( SamplerYcbcrModelConversion ycbcrModel_ ) + { + ycbcrModel = ycbcrModel_; + return *this; + } + + SamplerYcbcrConversionCreateInfo& setYcbcrRange( SamplerYcbcrRange ycbcrRange_ ) + { + ycbcrRange = ycbcrRange_; + return *this; + } + + SamplerYcbcrConversionCreateInfo& setComponents( ComponentMapping components_ ) + { + components = components_; + return *this; + } + + SamplerYcbcrConversionCreateInfo& setXChromaOffset( ChromaLocation xChromaOffset_ ) + { + xChromaOffset = xChromaOffset_; + return *this; + } + + SamplerYcbcrConversionCreateInfo& setYChromaOffset( ChromaLocation yChromaOffset_ ) + { + yChromaOffset = yChromaOffset_; + return *this; + } + + SamplerYcbcrConversionCreateInfo& setChromaFilter( Filter chromaFilter_ ) + { + chromaFilter = chromaFilter_; + return *this; + } + + SamplerYcbcrConversionCreateInfo& setForceExplicitReconstruction( Bool32 forceExplicitReconstruction_ ) + { + forceExplicitReconstruction = forceExplicitReconstruction_; + return *this; + } + + operator VkSamplerYcbcrConversionCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkSamplerYcbcrConversionCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( SamplerYcbcrConversionCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( format == rhs.format ) + && ( ycbcrModel == rhs.ycbcrModel ) + && ( ycbcrRange == rhs.ycbcrRange ) + && ( components == rhs.components ) + && ( xChromaOffset == rhs.xChromaOffset ) + && ( yChromaOffset == rhs.yChromaOffset ) + && ( chromaFilter == rhs.chromaFilter ) + && ( forceExplicitReconstruction == rhs.forceExplicitReconstruction ); + } + + bool operator!=( SamplerYcbcrConversionCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSamplerYcbcrConversionCreateInfo; + + public: + const void* pNext = nullptr; + Format format; + SamplerYcbcrModelConversion ycbcrModel; + SamplerYcbcrRange ycbcrRange; + ComponentMapping components; + ChromaLocation xChromaOffset; + ChromaLocation yChromaOffset; + Filter chromaFilter; + Bool32 forceExplicitReconstruction; + }; + static_assert( sizeof( SamplerYcbcrConversionCreateInfo ) == sizeof( VkSamplerYcbcrConversionCreateInfo ), "struct and wrapper have different size!" ); + + using SamplerYcbcrConversionCreateInfoKHR = SamplerYcbcrConversionCreateInfo; + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + struct AndroidHardwareBufferFormatPropertiesANDROID + { + operator VkAndroidHardwareBufferFormatPropertiesANDROID const&() const + { + return *reinterpret_cast(this); + } + + operator VkAndroidHardwareBufferFormatPropertiesANDROID &() + { + return *reinterpret_cast(this); + } + + bool operator==( AndroidHardwareBufferFormatPropertiesANDROID const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( format == rhs.format ) + && ( externalFormat == rhs.externalFormat ) + && ( formatFeatures == rhs.formatFeatures ) + && ( samplerYcbcrConversionComponents == rhs.samplerYcbcrConversionComponents ) + && ( suggestedYcbcrModel == rhs.suggestedYcbcrModel ) + && ( suggestedYcbcrRange == rhs.suggestedYcbcrRange ) + && ( suggestedXChromaOffset == rhs.suggestedXChromaOffset ) + && ( suggestedYChromaOffset == rhs.suggestedYChromaOffset ); + } + + bool operator!=( AndroidHardwareBufferFormatPropertiesANDROID const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eAndroidHardwareBufferFormatPropertiesANDROID; + + public: + void* pNext = nullptr; + Format format; + uint64_t externalFormat; + FormatFeatureFlags formatFeatures; + ComponentMapping samplerYcbcrConversionComponents; + SamplerYcbcrModelConversion suggestedYcbcrModel; + SamplerYcbcrRange suggestedYcbcrRange; + ChromaLocation suggestedXChromaOffset; + ChromaLocation suggestedYChromaOffset; + }; + static_assert( sizeof( AndroidHardwareBufferFormatPropertiesANDROID ) == sizeof( VkAndroidHardwareBufferFormatPropertiesANDROID ), "struct and wrapper have different size!" ); +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + + enum class BlendOverlapEXT + { + eUncorrelated = VK_BLEND_OVERLAP_UNCORRELATED_EXT, + eDisjoint = VK_BLEND_OVERLAP_DISJOINT_EXT, + eConjoint = VK_BLEND_OVERLAP_CONJOINT_EXT + }; + + struct PipelineColorBlendAdvancedStateCreateInfoEXT + { + PipelineColorBlendAdvancedStateCreateInfoEXT( Bool32 srcPremultiplied_ = 0, + Bool32 dstPremultiplied_ = 0, + BlendOverlapEXT blendOverlap_ = BlendOverlapEXT::eUncorrelated ) + : srcPremultiplied( srcPremultiplied_ ) + , dstPremultiplied( dstPremultiplied_ ) + , blendOverlap( blendOverlap_ ) + { + } + + PipelineColorBlendAdvancedStateCreateInfoEXT( VkPipelineColorBlendAdvancedStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineColorBlendAdvancedStateCreateInfoEXT ) ); + } + + PipelineColorBlendAdvancedStateCreateInfoEXT& operator=( VkPipelineColorBlendAdvancedStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineColorBlendAdvancedStateCreateInfoEXT ) ); + return *this; + } + PipelineColorBlendAdvancedStateCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineColorBlendAdvancedStateCreateInfoEXT& setSrcPremultiplied( Bool32 srcPremultiplied_ ) + { + srcPremultiplied = srcPremultiplied_; + return *this; + } + + PipelineColorBlendAdvancedStateCreateInfoEXT& setDstPremultiplied( Bool32 dstPremultiplied_ ) + { + dstPremultiplied = dstPremultiplied_; + return *this; + } + + PipelineColorBlendAdvancedStateCreateInfoEXT& setBlendOverlap( BlendOverlapEXT blendOverlap_ ) + { + blendOverlap = blendOverlap_; + return *this; + } + + operator VkPipelineColorBlendAdvancedStateCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineColorBlendAdvancedStateCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineColorBlendAdvancedStateCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcPremultiplied == rhs.srcPremultiplied ) + && ( dstPremultiplied == rhs.dstPremultiplied ) + && ( blendOverlap == rhs.blendOverlap ); + } + + bool operator!=( PipelineColorBlendAdvancedStateCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineColorBlendAdvancedStateCreateInfoEXT; + + public: + const void* pNext = nullptr; + Bool32 srcPremultiplied; + Bool32 dstPremultiplied; + BlendOverlapEXT blendOverlap; + }; + static_assert( sizeof( PipelineColorBlendAdvancedStateCreateInfoEXT ) == sizeof( VkPipelineColorBlendAdvancedStateCreateInfoEXT ), "struct and wrapper have different size!" ); + + enum class CoverageModulationModeNV + { + eNone = VK_COVERAGE_MODULATION_MODE_NONE_NV, + eRgb = VK_COVERAGE_MODULATION_MODE_RGB_NV, + eAlpha = VK_COVERAGE_MODULATION_MODE_ALPHA_NV, + eRgba = VK_COVERAGE_MODULATION_MODE_RGBA_NV + }; + + struct PipelineCoverageModulationStateCreateInfoNV + { + PipelineCoverageModulationStateCreateInfoNV( PipelineCoverageModulationStateCreateFlagsNV flags_ = PipelineCoverageModulationStateCreateFlagsNV(), + CoverageModulationModeNV coverageModulationMode_ = CoverageModulationModeNV::eNone, + Bool32 coverageModulationTableEnable_ = 0, + uint32_t coverageModulationTableCount_ = 0, + const float* pCoverageModulationTable_ = nullptr ) + : flags( flags_ ) + , coverageModulationMode( coverageModulationMode_ ) + , coverageModulationTableEnable( coverageModulationTableEnable_ ) + , coverageModulationTableCount( coverageModulationTableCount_ ) + , pCoverageModulationTable( pCoverageModulationTable_ ) + { + } + + PipelineCoverageModulationStateCreateInfoNV( VkPipelineCoverageModulationStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineCoverageModulationStateCreateInfoNV ) ); + } + + PipelineCoverageModulationStateCreateInfoNV& operator=( VkPipelineCoverageModulationStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineCoverageModulationStateCreateInfoNV ) ); + return *this; + } + PipelineCoverageModulationStateCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV& setFlags( PipelineCoverageModulationStateCreateFlagsNV flags_ ) + { + flags = flags_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV& setCoverageModulationMode( CoverageModulationModeNV coverageModulationMode_ ) + { + coverageModulationMode = coverageModulationMode_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV& setCoverageModulationTableEnable( Bool32 coverageModulationTableEnable_ ) + { + coverageModulationTableEnable = coverageModulationTableEnable_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV& setCoverageModulationTableCount( uint32_t coverageModulationTableCount_ ) + { + coverageModulationTableCount = coverageModulationTableCount_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV& setPCoverageModulationTable( const float* pCoverageModulationTable_ ) + { + pCoverageModulationTable = pCoverageModulationTable_; + return *this; + } + + operator VkPipelineCoverageModulationStateCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineCoverageModulationStateCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineCoverageModulationStateCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( coverageModulationMode == rhs.coverageModulationMode ) + && ( coverageModulationTableEnable == rhs.coverageModulationTableEnable ) + && ( coverageModulationTableCount == rhs.coverageModulationTableCount ) + && ( pCoverageModulationTable == rhs.pCoverageModulationTable ); + } + + bool operator!=( PipelineCoverageModulationStateCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineCoverageModulationStateCreateInfoNV; + + public: + const void* pNext = nullptr; + PipelineCoverageModulationStateCreateFlagsNV flags; + CoverageModulationModeNV coverageModulationMode; + Bool32 coverageModulationTableEnable; + uint32_t coverageModulationTableCount; + const float* pCoverageModulationTable; + }; + static_assert( sizeof( PipelineCoverageModulationStateCreateInfoNV ) == sizeof( VkPipelineCoverageModulationStateCreateInfoNV ), "struct and wrapper have different size!" ); + + enum class ValidationCacheHeaderVersionEXT + { + eOne = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT + }; + + enum class ShaderInfoTypeAMD + { + eStatistics = VK_SHADER_INFO_TYPE_STATISTICS_AMD, + eBinary = VK_SHADER_INFO_TYPE_BINARY_AMD, + eDisassembly = VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD + }; + + enum class QueueGlobalPriorityEXT + { + eLow = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT, + eMedium = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT, + eHigh = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT, + eRealtime = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT + }; + + struct DeviceQueueGlobalPriorityCreateInfoEXT + { + DeviceQueueGlobalPriorityCreateInfoEXT( QueueGlobalPriorityEXT globalPriority_ = QueueGlobalPriorityEXT::eLow ) + : globalPriority( globalPriority_ ) + { + } + + DeviceQueueGlobalPriorityCreateInfoEXT( VkDeviceQueueGlobalPriorityCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceQueueGlobalPriorityCreateInfoEXT ) ); + } + + DeviceQueueGlobalPriorityCreateInfoEXT& operator=( VkDeviceQueueGlobalPriorityCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceQueueGlobalPriorityCreateInfoEXT ) ); + return *this; + } + DeviceQueueGlobalPriorityCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceQueueGlobalPriorityCreateInfoEXT& setGlobalPriority( QueueGlobalPriorityEXT globalPriority_ ) + { + globalPriority = globalPriority_; + return *this; + } + + operator VkDeviceQueueGlobalPriorityCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceQueueGlobalPriorityCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceQueueGlobalPriorityCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( globalPriority == rhs.globalPriority ); + } + + bool operator!=( DeviceQueueGlobalPriorityCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceQueueGlobalPriorityCreateInfoEXT; + + public: + const void* pNext = nullptr; + QueueGlobalPriorityEXT globalPriority; + }; + static_assert( sizeof( DeviceQueueGlobalPriorityCreateInfoEXT ) == sizeof( VkDeviceQueueGlobalPriorityCreateInfoEXT ), "struct and wrapper have different size!" ); + + enum class DebugUtilsMessageSeverityFlagBitsEXT + { + eVerbose = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT, + eInfo = VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT, + eWarning = VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT, + eError = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT + }; + + using DebugUtilsMessageSeverityFlagsEXT = Flags; + + VULKAN_HPP_INLINE DebugUtilsMessageSeverityFlagsEXT operator|( DebugUtilsMessageSeverityFlagBitsEXT bit0, DebugUtilsMessageSeverityFlagBitsEXT bit1 ) + { + return DebugUtilsMessageSeverityFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DebugUtilsMessageSeverityFlagsEXT operator~( DebugUtilsMessageSeverityFlagBitsEXT bits ) + { + return ~( DebugUtilsMessageSeverityFlagsEXT( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DebugUtilsMessageSeverityFlagBitsEXT::eVerbose) | VkFlags(DebugUtilsMessageSeverityFlagBitsEXT::eInfo) | VkFlags(DebugUtilsMessageSeverityFlagBitsEXT::eWarning) | VkFlags(DebugUtilsMessageSeverityFlagBitsEXT::eError) + }; + }; + + enum class DebugUtilsMessageTypeFlagBitsEXT + { + eGeneral = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT, + eValidation = VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT, + ePerformance = VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT + }; + + using DebugUtilsMessageTypeFlagsEXT = Flags; + + VULKAN_HPP_INLINE DebugUtilsMessageTypeFlagsEXT operator|( DebugUtilsMessageTypeFlagBitsEXT bit0, DebugUtilsMessageTypeFlagBitsEXT bit1 ) + { + return DebugUtilsMessageTypeFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DebugUtilsMessageTypeFlagsEXT operator~( DebugUtilsMessageTypeFlagBitsEXT bits ) + { + return ~( DebugUtilsMessageTypeFlagsEXT( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DebugUtilsMessageTypeFlagBitsEXT::eGeneral) | VkFlags(DebugUtilsMessageTypeFlagBitsEXT::eValidation) | VkFlags(DebugUtilsMessageTypeFlagBitsEXT::ePerformance) + }; + }; + + struct DebugUtilsMessengerCreateInfoEXT + { + DebugUtilsMessengerCreateInfoEXT( DebugUtilsMessengerCreateFlagsEXT flags_ = DebugUtilsMessengerCreateFlagsEXT(), + DebugUtilsMessageSeverityFlagsEXT messageSeverity_ = DebugUtilsMessageSeverityFlagsEXT(), + DebugUtilsMessageTypeFlagsEXT messageType_ = DebugUtilsMessageTypeFlagsEXT(), + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback_ = nullptr, + void* pUserData_ = nullptr ) + : flags( flags_ ) + , messageSeverity( messageSeverity_ ) + , messageType( messageType_ ) + , pfnUserCallback( pfnUserCallback_ ) + , pUserData( pUserData_ ) + { + } + + DebugUtilsMessengerCreateInfoEXT( VkDebugUtilsMessengerCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsMessengerCreateInfoEXT ) ); + } + + DebugUtilsMessengerCreateInfoEXT& operator=( VkDebugUtilsMessengerCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DebugUtilsMessengerCreateInfoEXT ) ); + return *this; + } + DebugUtilsMessengerCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT& setFlags( DebugUtilsMessengerCreateFlagsEXT flags_ ) + { + flags = flags_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT& setMessageSeverity( DebugUtilsMessageSeverityFlagsEXT messageSeverity_ ) + { + messageSeverity = messageSeverity_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT& setMessageType( DebugUtilsMessageTypeFlagsEXT messageType_ ) + { + messageType = messageType_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT& setPfnUserCallback( PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback_ ) + { + pfnUserCallback = pfnUserCallback_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT& setPUserData( void* pUserData_ ) + { + pUserData = pUserData_; + return *this; + } + + operator VkDebugUtilsMessengerCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDebugUtilsMessengerCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DebugUtilsMessengerCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( messageSeverity == rhs.messageSeverity ) + && ( messageType == rhs.messageType ) + && ( pfnUserCallback == rhs.pfnUserCallback ) + && ( pUserData == rhs.pUserData ); + } + + bool operator!=( DebugUtilsMessengerCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDebugUtilsMessengerCreateInfoEXT; + + public: + const void* pNext = nullptr; + DebugUtilsMessengerCreateFlagsEXT flags; + DebugUtilsMessageSeverityFlagsEXT messageSeverity; + DebugUtilsMessageTypeFlagsEXT messageType; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; + void* pUserData; + }; + static_assert( sizeof( DebugUtilsMessengerCreateInfoEXT ) == sizeof( VkDebugUtilsMessengerCreateInfoEXT ), "struct and wrapper have different size!" ); + + enum class ConservativeRasterizationModeEXT + { + eDisabled = VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT, + eOverestimate = VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT, + eUnderestimate = VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT + }; + + struct PipelineRasterizationConservativeStateCreateInfoEXT + { + PipelineRasterizationConservativeStateCreateInfoEXT( PipelineRasterizationConservativeStateCreateFlagsEXT flags_ = PipelineRasterizationConservativeStateCreateFlagsEXT(), + ConservativeRasterizationModeEXT conservativeRasterizationMode_ = ConservativeRasterizationModeEXT::eDisabled, + float extraPrimitiveOverestimationSize_ = 0 ) + : flags( flags_ ) + , conservativeRasterizationMode( conservativeRasterizationMode_ ) + , extraPrimitiveOverestimationSize( extraPrimitiveOverestimationSize_ ) + { + } + + PipelineRasterizationConservativeStateCreateInfoEXT( VkPipelineRasterizationConservativeStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineRasterizationConservativeStateCreateInfoEXT ) ); + } + + PipelineRasterizationConservativeStateCreateInfoEXT& operator=( VkPipelineRasterizationConservativeStateCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineRasterizationConservativeStateCreateInfoEXT ) ); + return *this; + } + PipelineRasterizationConservativeStateCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineRasterizationConservativeStateCreateInfoEXT& setFlags( PipelineRasterizationConservativeStateCreateFlagsEXT flags_ ) + { + flags = flags_; + return *this; + } + + PipelineRasterizationConservativeStateCreateInfoEXT& setConservativeRasterizationMode( ConservativeRasterizationModeEXT conservativeRasterizationMode_ ) + { + conservativeRasterizationMode = conservativeRasterizationMode_; + return *this; + } + + PipelineRasterizationConservativeStateCreateInfoEXT& setExtraPrimitiveOverestimationSize( float extraPrimitiveOverestimationSize_ ) + { + extraPrimitiveOverestimationSize = extraPrimitiveOverestimationSize_; + return *this; + } + + operator VkPipelineRasterizationConservativeStateCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineRasterizationConservativeStateCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineRasterizationConservativeStateCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( conservativeRasterizationMode == rhs.conservativeRasterizationMode ) + && ( extraPrimitiveOverestimationSize == rhs.extraPrimitiveOverestimationSize ); + } + + bool operator!=( PipelineRasterizationConservativeStateCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineRasterizationConservativeStateCreateInfoEXT; + + public: + const void* pNext = nullptr; + PipelineRasterizationConservativeStateCreateFlagsEXT flags; + ConservativeRasterizationModeEXT conservativeRasterizationMode; + float extraPrimitiveOverestimationSize; + }; + static_assert( sizeof( PipelineRasterizationConservativeStateCreateInfoEXT ) == sizeof( VkPipelineRasterizationConservativeStateCreateInfoEXT ), "struct and wrapper have different size!" ); + + enum class DescriptorBindingFlagBitsEXT + { + eUpdateAfterBind = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT, + eUpdateUnusedWhilePending = VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT, + ePartiallyBound = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT, + eVariableDescriptorCount = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT + }; + + using DescriptorBindingFlagsEXT = Flags; + + VULKAN_HPP_INLINE DescriptorBindingFlagsEXT operator|( DescriptorBindingFlagBitsEXT bit0, DescriptorBindingFlagBitsEXT bit1 ) + { + return DescriptorBindingFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE DescriptorBindingFlagsEXT operator~( DescriptorBindingFlagBitsEXT bits ) + { + return ~( DescriptorBindingFlagsEXT( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(DescriptorBindingFlagBitsEXT::eUpdateAfterBind) | VkFlags(DescriptorBindingFlagBitsEXT::eUpdateUnusedWhilePending) | VkFlags(DescriptorBindingFlagBitsEXT::ePartiallyBound) | VkFlags(DescriptorBindingFlagBitsEXT::eVariableDescriptorCount) + }; + }; + + struct DescriptorSetLayoutBindingFlagsCreateInfoEXT + { + DescriptorSetLayoutBindingFlagsCreateInfoEXT( uint32_t bindingCount_ = 0, + const DescriptorBindingFlagsEXT* pBindingFlags_ = nullptr ) + : bindingCount( bindingCount_ ) + , pBindingFlags( pBindingFlags_ ) + { + } + + DescriptorSetLayoutBindingFlagsCreateInfoEXT( VkDescriptorSetLayoutBindingFlagsCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetLayoutBindingFlagsCreateInfoEXT ) ); + } + + DescriptorSetLayoutBindingFlagsCreateInfoEXT& operator=( VkDescriptorSetLayoutBindingFlagsCreateInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( DescriptorSetLayoutBindingFlagsCreateInfoEXT ) ); + return *this; + } + DescriptorSetLayoutBindingFlagsCreateInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DescriptorSetLayoutBindingFlagsCreateInfoEXT& setBindingCount( uint32_t bindingCount_ ) + { + bindingCount = bindingCount_; + return *this; + } + + DescriptorSetLayoutBindingFlagsCreateInfoEXT& setPBindingFlags( const DescriptorBindingFlagsEXT* pBindingFlags_ ) + { + pBindingFlags = pBindingFlags_; + return *this; + } + + operator VkDescriptorSetLayoutBindingFlagsCreateInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkDescriptorSetLayoutBindingFlagsCreateInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( DescriptorSetLayoutBindingFlagsCreateInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( bindingCount == rhs.bindingCount ) + && ( pBindingFlags == rhs.pBindingFlags ); + } + + bool operator!=( DescriptorSetLayoutBindingFlagsCreateInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDescriptorSetLayoutBindingFlagsCreateInfoEXT; + + public: + const void* pNext = nullptr; + uint32_t bindingCount; + const DescriptorBindingFlagsEXT* pBindingFlags; + }; + static_assert( sizeof( DescriptorSetLayoutBindingFlagsCreateInfoEXT ) == sizeof( VkDescriptorSetLayoutBindingFlagsCreateInfoEXT ), "struct and wrapper have different size!" ); + + enum class VendorId + { + eViv = VK_VENDOR_ID_VIV, + eVsi = VK_VENDOR_ID_VSI, + eKazan = VK_VENDOR_ID_KAZAN + }; + + enum class ConditionalRenderingFlagBitsEXT + { + eInverted = VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT + }; + + using ConditionalRenderingFlagsEXT = Flags; + + VULKAN_HPP_INLINE ConditionalRenderingFlagsEXT operator|( ConditionalRenderingFlagBitsEXT bit0, ConditionalRenderingFlagBitsEXT bit1 ) + { + return ConditionalRenderingFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE ConditionalRenderingFlagsEXT operator~( ConditionalRenderingFlagBitsEXT bits ) + { + return ~( ConditionalRenderingFlagsEXT( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(ConditionalRenderingFlagBitsEXT::eInverted) + }; + }; + + struct ConditionalRenderingBeginInfoEXT + { + ConditionalRenderingBeginInfoEXT( Buffer buffer_ = Buffer(), + DeviceSize offset_ = 0, + ConditionalRenderingFlagsEXT flags_ = ConditionalRenderingFlagsEXT() ) + : buffer( buffer_ ) + , offset( offset_ ) + , flags( flags_ ) + { + } + + ConditionalRenderingBeginInfoEXT( VkConditionalRenderingBeginInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ConditionalRenderingBeginInfoEXT ) ); + } + + ConditionalRenderingBeginInfoEXT& operator=( VkConditionalRenderingBeginInfoEXT const & rhs ) + { + memcpy( this, &rhs, sizeof( ConditionalRenderingBeginInfoEXT ) ); + return *this; + } + ConditionalRenderingBeginInfoEXT& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + ConditionalRenderingBeginInfoEXT& setBuffer( Buffer buffer_ ) + { + buffer = buffer_; + return *this; + } + + ConditionalRenderingBeginInfoEXT& setOffset( DeviceSize offset_ ) + { + offset = offset_; + return *this; + } + + ConditionalRenderingBeginInfoEXT& setFlags( ConditionalRenderingFlagsEXT flags_ ) + { + flags = flags_; + return *this; + } + + operator VkConditionalRenderingBeginInfoEXT const&() const + { + return *reinterpret_cast(this); + } + + operator VkConditionalRenderingBeginInfoEXT &() + { + return *reinterpret_cast(this); + } + + bool operator==( ConditionalRenderingBeginInfoEXT const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( buffer == rhs.buffer ) + && ( offset == rhs.offset ) + && ( flags == rhs.flags ); + } + + bool operator!=( ConditionalRenderingBeginInfoEXT const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eConditionalRenderingBeginInfoEXT; + + public: + const void* pNext = nullptr; + Buffer buffer; + DeviceSize offset; + ConditionalRenderingFlagsEXT flags; + }; + static_assert( sizeof( ConditionalRenderingBeginInfoEXT ) == sizeof( VkConditionalRenderingBeginInfoEXT ), "struct and wrapper have different size!" ); + + enum class ShadingRatePaletteEntryNV + { + eNoInvocations = VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV, + e16InvocationsPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV, + e8InvocationsPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV, + e4InvocationsPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV, + e2InvocationsPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV, + e1InvocationPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV, + e1InvocationPer2X1Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV, + e1InvocationPer1X2Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, + e1InvocationPer2X2Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV, + e1InvocationPer4X2Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV, + e1InvocationPer2X4Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV, + e1InvocationPer4X4Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV + }; + + struct ShadingRatePaletteNV + { + ShadingRatePaletteNV( uint32_t shadingRatePaletteEntryCount_ = 0, + const ShadingRatePaletteEntryNV* pShadingRatePaletteEntries_ = nullptr ) + : shadingRatePaletteEntryCount( shadingRatePaletteEntryCount_ ) + , pShadingRatePaletteEntries( pShadingRatePaletteEntries_ ) + { + } + + ShadingRatePaletteNV( VkShadingRatePaletteNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ShadingRatePaletteNV ) ); + } + + ShadingRatePaletteNV& operator=( VkShadingRatePaletteNV const & rhs ) + { + memcpy( this, &rhs, sizeof( ShadingRatePaletteNV ) ); + return *this; + } + ShadingRatePaletteNV& setShadingRatePaletteEntryCount( uint32_t shadingRatePaletteEntryCount_ ) + { + shadingRatePaletteEntryCount = shadingRatePaletteEntryCount_; + return *this; + } + + ShadingRatePaletteNV& setPShadingRatePaletteEntries( const ShadingRatePaletteEntryNV* pShadingRatePaletteEntries_ ) + { + pShadingRatePaletteEntries = pShadingRatePaletteEntries_; + return *this; + } + + operator VkShadingRatePaletteNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkShadingRatePaletteNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( ShadingRatePaletteNV const& rhs ) const + { + return ( shadingRatePaletteEntryCount == rhs.shadingRatePaletteEntryCount ) + && ( pShadingRatePaletteEntries == rhs.pShadingRatePaletteEntries ); + } + + bool operator!=( ShadingRatePaletteNV const& rhs ) const + { + return !operator==( rhs ); + } + + uint32_t shadingRatePaletteEntryCount; + const ShadingRatePaletteEntryNV* pShadingRatePaletteEntries; + }; + static_assert( sizeof( ShadingRatePaletteNV ) == sizeof( VkShadingRatePaletteNV ), "struct and wrapper have different size!" ); + + struct PipelineViewportShadingRateImageStateCreateInfoNV + { + PipelineViewportShadingRateImageStateCreateInfoNV( Bool32 shadingRateImageEnable_ = 0, + uint32_t viewportCount_ = 0, + const ShadingRatePaletteNV* pShadingRatePalettes_ = nullptr ) + : shadingRateImageEnable( shadingRateImageEnable_ ) + , viewportCount( viewportCount_ ) + , pShadingRatePalettes( pShadingRatePalettes_ ) + { + } + + PipelineViewportShadingRateImageStateCreateInfoNV( VkPipelineViewportShadingRateImageStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportShadingRateImageStateCreateInfoNV ) ); + } + + PipelineViewportShadingRateImageStateCreateInfoNV& operator=( VkPipelineViewportShadingRateImageStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportShadingRateImageStateCreateInfoNV ) ); + return *this; + } + PipelineViewportShadingRateImageStateCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineViewportShadingRateImageStateCreateInfoNV& setShadingRateImageEnable( Bool32 shadingRateImageEnable_ ) + { + shadingRateImageEnable = shadingRateImageEnable_; + return *this; + } + + PipelineViewportShadingRateImageStateCreateInfoNV& setViewportCount( uint32_t viewportCount_ ) + { + viewportCount = viewportCount_; + return *this; + } + + PipelineViewportShadingRateImageStateCreateInfoNV& setPShadingRatePalettes( const ShadingRatePaletteNV* pShadingRatePalettes_ ) + { + pShadingRatePalettes = pShadingRatePalettes_; + return *this; + } + + operator VkPipelineViewportShadingRateImageStateCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineViewportShadingRateImageStateCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineViewportShadingRateImageStateCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shadingRateImageEnable == rhs.shadingRateImageEnable ) + && ( viewportCount == rhs.viewportCount ) + && ( pShadingRatePalettes == rhs.pShadingRatePalettes ); + } + + bool operator!=( PipelineViewportShadingRateImageStateCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineViewportShadingRateImageStateCreateInfoNV; + + public: + const void* pNext = nullptr; + Bool32 shadingRateImageEnable; + uint32_t viewportCount; + const ShadingRatePaletteNV* pShadingRatePalettes; + }; + static_assert( sizeof( PipelineViewportShadingRateImageStateCreateInfoNV ) == sizeof( VkPipelineViewportShadingRateImageStateCreateInfoNV ), "struct and wrapper have different size!" ); + + struct CoarseSampleOrderCustomNV + { + CoarseSampleOrderCustomNV( ShadingRatePaletteEntryNV shadingRate_ = ShadingRatePaletteEntryNV::eNoInvocations, + uint32_t sampleCount_ = 0, + uint32_t sampleLocationCount_ = 0, + const CoarseSampleLocationNV* pSampleLocations_ = nullptr ) + : shadingRate( shadingRate_ ) + , sampleCount( sampleCount_ ) + , sampleLocationCount( sampleLocationCount_ ) + , pSampleLocations( pSampleLocations_ ) + { + } + + CoarseSampleOrderCustomNV( VkCoarseSampleOrderCustomNV const & rhs ) + { + memcpy( this, &rhs, sizeof( CoarseSampleOrderCustomNV ) ); + } + + CoarseSampleOrderCustomNV& operator=( VkCoarseSampleOrderCustomNV const & rhs ) + { + memcpy( this, &rhs, sizeof( CoarseSampleOrderCustomNV ) ); + return *this; + } + CoarseSampleOrderCustomNV& setShadingRate( ShadingRatePaletteEntryNV shadingRate_ ) + { + shadingRate = shadingRate_; + return *this; + } + + CoarseSampleOrderCustomNV& setSampleCount( uint32_t sampleCount_ ) + { + sampleCount = sampleCount_; + return *this; + } + + CoarseSampleOrderCustomNV& setSampleLocationCount( uint32_t sampleLocationCount_ ) + { + sampleLocationCount = sampleLocationCount_; + return *this; + } + + CoarseSampleOrderCustomNV& setPSampleLocations( const CoarseSampleLocationNV* pSampleLocations_ ) + { + pSampleLocations = pSampleLocations_; + return *this; + } + + operator VkCoarseSampleOrderCustomNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkCoarseSampleOrderCustomNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( CoarseSampleOrderCustomNV const& rhs ) const + { + return ( shadingRate == rhs.shadingRate ) + && ( sampleCount == rhs.sampleCount ) + && ( sampleLocationCount == rhs.sampleLocationCount ) + && ( pSampleLocations == rhs.pSampleLocations ); + } + + bool operator!=( CoarseSampleOrderCustomNV const& rhs ) const + { + return !operator==( rhs ); + } + + ShadingRatePaletteEntryNV shadingRate; + uint32_t sampleCount; + uint32_t sampleLocationCount; + const CoarseSampleLocationNV* pSampleLocations; + }; + static_assert( sizeof( CoarseSampleOrderCustomNV ) == sizeof( VkCoarseSampleOrderCustomNV ), "struct and wrapper have different size!" ); + + enum class CoarseSampleOrderTypeNV + { + eDefault = VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV, + eCustom = VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV, + ePixelMajor = VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV, + eSampleMajor = VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV + }; + + struct PipelineViewportCoarseSampleOrderStateCreateInfoNV + { + PipelineViewportCoarseSampleOrderStateCreateInfoNV( CoarseSampleOrderTypeNV sampleOrderType_ = CoarseSampleOrderTypeNV::eDefault, + uint32_t customSampleOrderCount_ = 0, + const CoarseSampleOrderCustomNV* pCustomSampleOrders_ = nullptr ) + : sampleOrderType( sampleOrderType_ ) + , customSampleOrderCount( customSampleOrderCount_ ) + , pCustomSampleOrders( pCustomSampleOrders_ ) + { + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV( VkPipelineViewportCoarseSampleOrderStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportCoarseSampleOrderStateCreateInfoNV ) ); + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV& operator=( VkPipelineViewportCoarseSampleOrderStateCreateInfoNV const & rhs ) + { + memcpy( this, &rhs, sizeof( PipelineViewportCoarseSampleOrderStateCreateInfoNV ) ); + return *this; + } + PipelineViewportCoarseSampleOrderStateCreateInfoNV& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV& setSampleOrderType( CoarseSampleOrderTypeNV sampleOrderType_ ) + { + sampleOrderType = sampleOrderType_; + return *this; + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV& setCustomSampleOrderCount( uint32_t customSampleOrderCount_ ) + { + customSampleOrderCount = customSampleOrderCount_; + return *this; + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV& setPCustomSampleOrders( const CoarseSampleOrderCustomNV* pCustomSampleOrders_ ) + { + pCustomSampleOrders = pCustomSampleOrders_; + return *this; + } + + operator VkPipelineViewportCoarseSampleOrderStateCreateInfoNV const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineViewportCoarseSampleOrderStateCreateInfoNV &() + { + return *reinterpret_cast(this); + } + + bool operator==( PipelineViewportCoarseSampleOrderStateCreateInfoNV const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleOrderType == rhs.sampleOrderType ) + && ( customSampleOrderCount == rhs.customSampleOrderCount ) + && ( pCustomSampleOrders == rhs.pCustomSampleOrders ); + } + + bool operator!=( PipelineViewportCoarseSampleOrderStateCreateInfoNV const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePipelineViewportCoarseSampleOrderStateCreateInfoNV; + + public: + const void* pNext = nullptr; + CoarseSampleOrderTypeNV sampleOrderType; + uint32_t customSampleOrderCount; + const CoarseSampleOrderCustomNV* pCustomSampleOrders; + }; + static_assert( sizeof( PipelineViewportCoarseSampleOrderStateCreateInfoNV ) == sizeof( VkPipelineViewportCoarseSampleOrderStateCreateInfoNV ), "struct and wrapper have different size!" ); + + enum class GeometryInstanceFlagBitsNVX + { + eTriangleCullDisable = VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NVX, + eTriangleCullFlipWinding = VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_FLIP_WINDING_BIT_NVX, + eForceOpaque = VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NVX, + eForceNoOpaque = VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NVX + }; + + using GeometryInstanceFlagsNVX = Flags; + + VULKAN_HPP_INLINE GeometryInstanceFlagsNVX operator|( GeometryInstanceFlagBitsNVX bit0, GeometryInstanceFlagBitsNVX bit1 ) + { + return GeometryInstanceFlagsNVX( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE GeometryInstanceFlagsNVX operator~( GeometryInstanceFlagBitsNVX bits ) + { + return ~( GeometryInstanceFlagsNVX( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(GeometryInstanceFlagBitsNVX::eTriangleCullDisable) | VkFlags(GeometryInstanceFlagBitsNVX::eTriangleCullFlipWinding) | VkFlags(GeometryInstanceFlagBitsNVX::eForceOpaque) | VkFlags(GeometryInstanceFlagBitsNVX::eForceNoOpaque) + }; + }; + + enum class GeometryFlagBitsNVX + { + eOpaque = VK_GEOMETRY_OPAQUE_BIT_NVX, + eNoDuplicateAnyHitInvocation = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NVX + }; + + using GeometryFlagsNVX = Flags; + + VULKAN_HPP_INLINE GeometryFlagsNVX operator|( GeometryFlagBitsNVX bit0, GeometryFlagBitsNVX bit1 ) + { + return GeometryFlagsNVX( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE GeometryFlagsNVX operator~( GeometryFlagBitsNVX bits ) + { + return ~( GeometryFlagsNVX( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(GeometryFlagBitsNVX::eOpaque) | VkFlags(GeometryFlagBitsNVX::eNoDuplicateAnyHitInvocation) + }; + }; + + enum class BuildAccelerationStructureFlagBitsNVX + { + eAllowUpdate = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NVX, + eAllowCompaction = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NVX, + ePreferFastTrace = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NVX, + ePreferFastBuild = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NVX, + eLowMemory = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NVX + }; + + using BuildAccelerationStructureFlagsNVX = Flags; + + VULKAN_HPP_INLINE BuildAccelerationStructureFlagsNVX operator|( BuildAccelerationStructureFlagBitsNVX bit0, BuildAccelerationStructureFlagBitsNVX bit1 ) + { + return BuildAccelerationStructureFlagsNVX( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE BuildAccelerationStructureFlagsNVX operator~( BuildAccelerationStructureFlagBitsNVX bits ) + { + return ~( BuildAccelerationStructureFlagsNVX( bits ) ); + } + + template <> struct FlagTraits + { + enum + { + allFlags = VkFlags(BuildAccelerationStructureFlagBitsNVX::eAllowUpdate) | VkFlags(BuildAccelerationStructureFlagBitsNVX::eAllowCompaction) | VkFlags(BuildAccelerationStructureFlagBitsNVX::ePreferFastTrace) | VkFlags(BuildAccelerationStructureFlagBitsNVX::ePreferFastBuild) | VkFlags(BuildAccelerationStructureFlagBitsNVX::eLowMemory) + }; + }; + + enum class CopyAccelerationStructureModeNVX + { + eClone = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NVX, + eCompact = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NVX + }; + + enum class AccelerationStructureTypeNVX + { + eTopLevel = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NVX, + eBottomLevel = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NVX + }; + + enum class GeometryTypeNVX + { + eTriangles = VK_GEOMETRY_TYPE_TRIANGLES_NVX, + eAabbs = VK_GEOMETRY_TYPE_AABBS_NVX + }; + + struct GeometryNVX + { + GeometryNVX( GeometryTypeNVX geometryType_ = GeometryTypeNVX::eTriangles, + GeometryDataNVX geometry_ = GeometryDataNVX(), + GeometryFlagsNVX flags_ = GeometryFlagsNVX() ) + : geometryType( geometryType_ ) + , geometry( geometry_ ) + , flags( flags_ ) + { + } + + GeometryNVX( VkGeometryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( GeometryNVX ) ); + } + + GeometryNVX& operator=( VkGeometryNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( GeometryNVX ) ); + return *this; + } + GeometryNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + GeometryNVX& setGeometryType( GeometryTypeNVX geometryType_ ) + { + geometryType = geometryType_; + return *this; + } + + GeometryNVX& setGeometry( GeometryDataNVX geometry_ ) + { + geometry = geometry_; + return *this; + } + + GeometryNVX& setFlags( GeometryFlagsNVX flags_ ) + { + flags = flags_; + return *this; + } + + operator VkGeometryNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkGeometryNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( GeometryNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( geometryType == rhs.geometryType ) + && ( geometry == rhs.geometry ) + && ( flags == rhs.flags ); + } + + bool operator!=( GeometryNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eGeometryNVX; + + public: + const void* pNext = nullptr; + GeometryTypeNVX geometryType; + GeometryDataNVX geometry; + GeometryFlagsNVX flags; + }; + static_assert( sizeof( GeometryNVX ) == sizeof( VkGeometryNVX ), "struct and wrapper have different size!" ); + + struct AccelerationStructureCreateInfoNVX + { + AccelerationStructureCreateInfoNVX( AccelerationStructureTypeNVX type_ = AccelerationStructureTypeNVX::eTopLevel, + BuildAccelerationStructureFlagsNVX flags_ = BuildAccelerationStructureFlagsNVX(), + DeviceSize compactedSize_ = 0, + uint32_t instanceCount_ = 0, + uint32_t geometryCount_ = 0, + const GeometryNVX* pGeometries_ = nullptr ) + : type( type_ ) + , flags( flags_ ) + , compactedSize( compactedSize_ ) + , instanceCount( instanceCount_ ) + , geometryCount( geometryCount_ ) + , pGeometries( pGeometries_ ) + { + } + + AccelerationStructureCreateInfoNVX( VkAccelerationStructureCreateInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( AccelerationStructureCreateInfoNVX ) ); + } + + AccelerationStructureCreateInfoNVX& operator=( VkAccelerationStructureCreateInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( AccelerationStructureCreateInfoNVX ) ); + return *this; + } + AccelerationStructureCreateInfoNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + AccelerationStructureCreateInfoNVX& setType( AccelerationStructureTypeNVX type_ ) + { + type = type_; + return *this; + } + + AccelerationStructureCreateInfoNVX& setFlags( BuildAccelerationStructureFlagsNVX flags_ ) + { + flags = flags_; + return *this; + } + + AccelerationStructureCreateInfoNVX& setCompactedSize( DeviceSize compactedSize_ ) + { + compactedSize = compactedSize_; + return *this; + } + + AccelerationStructureCreateInfoNVX& setInstanceCount( uint32_t instanceCount_ ) + { + instanceCount = instanceCount_; + return *this; + } + + AccelerationStructureCreateInfoNVX& setGeometryCount( uint32_t geometryCount_ ) + { + geometryCount = geometryCount_; + return *this; + } + + AccelerationStructureCreateInfoNVX& setPGeometries( const GeometryNVX* pGeometries_ ) + { + pGeometries = pGeometries_; + return *this; + } + + operator VkAccelerationStructureCreateInfoNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkAccelerationStructureCreateInfoNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( AccelerationStructureCreateInfoNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( type == rhs.type ) + && ( flags == rhs.flags ) + && ( compactedSize == rhs.compactedSize ) + && ( instanceCount == rhs.instanceCount ) + && ( geometryCount == rhs.geometryCount ) + && ( pGeometries == rhs.pGeometries ); + } + + bool operator!=( AccelerationStructureCreateInfoNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eAccelerationStructureCreateInfoNVX; + + public: + const void* pNext = nullptr; + AccelerationStructureTypeNVX type; + BuildAccelerationStructureFlagsNVX flags; + DeviceSize compactedSize; + uint32_t instanceCount; + uint32_t geometryCount; + const GeometryNVX* pGeometries; + }; + static_assert( sizeof( AccelerationStructureCreateInfoNVX ) == sizeof( VkAccelerationStructureCreateInfoNVX ), "struct and wrapper have different size!" ); + + template + Result enumerateInstanceVersion( uint32_t* pApiVersion, Dispatch const &d = Dispatch() ); +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type enumerateInstanceVersion(Dispatch const &d = Dispatch() ); +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result enumerateInstanceVersion( uint32_t* pApiVersion, Dispatch const &d) + { + return static_cast( d.vkEnumerateInstanceVersion( pApiVersion ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type enumerateInstanceVersion(Dispatch const &d ) + { + uint32_t apiVersion; + Result result = static_cast( d.vkEnumerateInstanceVersion( &apiVersion ) ); + return createResultValue( result, apiVersion, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceVersion" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, LayerProperties* pProperties, Dispatch const &d = Dispatch() ); +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type enumerateInstanceLayerProperties(Dispatch const &d = Dispatch() ); +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, LayerProperties* pProperties, Dispatch const &d) + { + return static_cast( d.vkEnumerateInstanceLayerProperties( pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type enumerateInstanceLayerProperties(Dispatch const &d ) + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateInstanceLayerProperties( &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceLayerProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, ExtensionProperties* pProperties, Dispatch const &d = Dispatch() ); +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type enumerateInstanceExtensionProperties( Optional layerName = nullptr, Dispatch const &d = Dispatch() ); +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, ExtensionProperties* pProperties, Dispatch const &d) + { + return static_cast( d.vkEnumerateInstanceExtensionProperties( pLayerName, pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type enumerateInstanceExtensionProperties( Optional layerName, Dispatch const &d ) + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceExtensionProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + // forward declarations + struct CmdProcessCommandsInfoNVX; + + class CommandBuffer + { + public: + VULKAN_HPP_CONSTEXPR CommandBuffer() + : m_commandBuffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR CommandBuffer( std::nullptr_t ) + : m_commandBuffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT CommandBuffer( VkCommandBuffer commandBuffer ) + : m_commandBuffer( commandBuffer ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + CommandBuffer & operator=(VkCommandBuffer commandBuffer) + { + m_commandBuffer = commandBuffer; + return *this; + } +#endif + + CommandBuffer & operator=( std::nullptr_t ) + { + m_commandBuffer = VK_NULL_HANDLE; + return *this; + } + + bool operator==( CommandBuffer const & rhs ) const + { + return m_commandBuffer == rhs.m_commandBuffer; + } + + bool operator!=(CommandBuffer const & rhs ) const + { + return m_commandBuffer != rhs.m_commandBuffer; + } + + bool operator<(CommandBuffer const & rhs ) const + { + return m_commandBuffer < rhs.m_commandBuffer; + } + + template + Result begin( const CommandBufferBeginInfo* pBeginInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type begin( const CommandBufferBeginInfo & beginInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result end(Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type end(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result reset( CommandBufferResetFlags flags, Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type reset( CommandBufferResetFlags flags, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void bindPipeline( PipelineBindPoint pipelineBindPoint, Pipeline pipeline, Dispatch const &d = Dispatch() ) const; + + template + void setViewport( uint32_t firstViewport, uint32_t viewportCount, const Viewport* pViewports, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setViewport( uint32_t firstViewport, ArrayProxy viewports, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void setScissor( uint32_t firstScissor, uint32_t scissorCount, const Rect2D* pScissors, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setScissor( uint32_t firstScissor, ArrayProxy scissors, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void setLineWidth( float lineWidth, Dispatch const &d = Dispatch() ) const; + + template + void setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const &d = Dispatch() ) const; + + template + void setBlendConstants( const float blendConstants[4], Dispatch const &d = Dispatch() ) const; + + template + void setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const &d = Dispatch() ) const; + + template + void setStencilCompareMask( StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const &d = Dispatch() ) const; + + template + void setStencilWriteMask( StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const &d = Dispatch() ) const; + + template + void setStencilReference( StencilFaceFlags faceMask, uint32_t reference, Dispatch const &d = Dispatch() ) const; + + template + void bindDescriptorSets( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void bindDescriptorSets( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t firstSet, ArrayProxy descriptorSets, ArrayProxy dynamicOffsets, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void bindIndexBuffer( Buffer buffer, DeviceSize offset, IndexType indexType, Dispatch const &d = Dispatch() ) const; + + template + void bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const Buffer* pBuffers, const DeviceSize* pOffsets, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void bindVertexBuffers( uint32_t firstBinding, ArrayProxy buffers, ArrayProxy offsets, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const &d = Dispatch() ) const; + + template + void drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const &d = Dispatch() ) const; + + template + void drawIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d = Dispatch() ) const; + + template + void drawIndexedIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d = Dispatch() ) const; + + template + void dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d = Dispatch() ) const; + + template + void dispatchIndirect( Buffer buffer, DeviceSize offset, Dispatch const &d = Dispatch() ) const; + + template + void copyBuffer( Buffer srcBuffer, Buffer dstBuffer, uint32_t regionCount, const BufferCopy* pRegions, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyBuffer( Buffer srcBuffer, Buffer dstBuffer, ArrayProxy regions, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void copyImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageCopy* pRegions, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy regions, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void blitImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageBlit* pRegions, Filter filter, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void blitImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy regions, Filter filter, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void copyBufferToImage( Buffer srcBuffer, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const BufferImageCopy* pRegions, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyBufferToImage( Buffer srcBuffer, Image dstImage, ImageLayout dstImageLayout, ArrayProxy regions, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void copyImageToBuffer( Image srcImage, ImageLayout srcImageLayout, Buffer dstBuffer, uint32_t regionCount, const BufferImageCopy* pRegions, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyImageToBuffer( Image srcImage, ImageLayout srcImageLayout, Buffer dstBuffer, ArrayProxy regions, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void updateBuffer( Buffer dstBuffer, DeviceSize dstOffset, DeviceSize dataSize, const void* pData, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void updateBuffer( Buffer dstBuffer, DeviceSize dstOffset, ArrayProxy data, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void fillBuffer( Buffer dstBuffer, DeviceSize dstOffset, DeviceSize size, uint32_t data, Dispatch const &d = Dispatch() ) const; + + template + void clearColorImage( Image image, ImageLayout imageLayout, const ClearColorValue* pColor, uint32_t rangeCount, const ImageSubresourceRange* pRanges, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void clearColorImage( Image image, ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy ranges, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void clearDepthStencilImage( Image image, ImageLayout imageLayout, const ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const ImageSubresourceRange* pRanges, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void clearDepthStencilImage( Image image, ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy ranges, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void clearAttachments( uint32_t attachmentCount, const ClearAttachment* pAttachments, uint32_t rectCount, const ClearRect* pRects, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void clearAttachments( ArrayProxy attachments, ArrayProxy rects, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void resolveImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageResolve* pRegions, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void resolveImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy regions, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void setEvent( Event event, PipelineStageFlags stageMask, Dispatch const &d = Dispatch() ) const; + + template + void resetEvent( Event event, PipelineStageFlags stageMask, Dispatch const &d = Dispatch() ) const; + + template + void waitEvents( uint32_t eventCount, const Event* pEvents, PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void waitEvents( ArrayProxy events, PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, ArrayProxy memoryBarriers, ArrayProxy bufferMemoryBarriers, ArrayProxy imageMemoryBarriers, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void pipelineBarrier( PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pipelineBarrier( PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, DependencyFlags dependencyFlags, ArrayProxy memoryBarriers, ArrayProxy bufferMemoryBarriers, ArrayProxy imageMemoryBarriers, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void beginQuery( QueryPool queryPool, uint32_t query, QueryControlFlags flags, Dispatch const &d = Dispatch() ) const; + + template + void endQuery( QueryPool queryPool, uint32_t query, Dispatch const &d = Dispatch() ) const; + + template + void beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void endConditionalRenderingEXT(Dispatch const &d = Dispatch() ) const; + + template + void resetQueryPool( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d = Dispatch() ) const; + + template + void writeTimestamp( PipelineStageFlagBits pipelineStage, QueryPool queryPool, uint32_t query, Dispatch const &d = Dispatch() ) const; + + template + void copyQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Buffer dstBuffer, DeviceSize dstOffset, DeviceSize stride, QueryResultFlags flags, Dispatch const &d = Dispatch() ) const; + + template + void pushConstants( PipelineLayout layout, ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pushConstants( PipelineLayout layout, ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy values, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void beginRenderPass( const RenderPassBeginInfo* pRenderPassBegin, SubpassContents contents, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginRenderPass( const RenderPassBeginInfo & renderPassBegin, SubpassContents contents, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void nextSubpass( SubpassContents contents, Dispatch const &d = Dispatch() ) const; + + template + void endRenderPass(Dispatch const &d = Dispatch() ) const; + + template + void executeCommands( uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void executeCommands( ArrayProxy commandBuffers, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void debugMarkerEndEXT(Dispatch const &d = Dispatch() ) const; + + template + void debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void drawIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d = Dispatch() ) const; + + template + void drawIndexedIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d = Dispatch() ) const; + + template + void processCommandsNVX( const CmdProcessCommandsInfoNVX* pProcessCommandsInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void processCommandsNVX( const CmdProcessCommandsInfoNVX & processCommandsInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void reserveSpaceForCommandsNVX( const CmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void reserveSpaceForCommandsNVX( const CmdReserveSpaceForCommandsInfoNVX & reserveSpaceInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void pushDescriptorSetKHR( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const WriteDescriptorSet* pDescriptorWrites, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pushDescriptorSetKHR( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t set, ArrayProxy descriptorWrites, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void setDeviceMask( uint32_t deviceMask, Dispatch const &d = Dispatch() ) const; + + template + void setDeviceMaskKHR( uint32_t deviceMask, Dispatch const &d = Dispatch() ) const; + + template + void dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d = Dispatch() ) const; + + template + void dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d = Dispatch() ) const; + + template + void pushDescriptorSetWithTemplateKHR( DescriptorUpdateTemplate descriptorUpdateTemplate, PipelineLayout layout, uint32_t set, const void* pData, Dispatch const &d = Dispatch() ) const; + + template + void setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const ViewportWScalingNV* pViewportWScalings, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setViewportWScalingNV( uint32_t firstViewport, ArrayProxy viewportWScalings, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const Rect2D* pDiscardRectangles, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy discardRectangles, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void setSampleLocationsEXT( const SampleLocationsInfoEXT* pSampleLocationsInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setSampleLocationsEXT( const SampleLocationsInfoEXT & sampleLocationsInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void endDebugUtilsLabelEXT(Dispatch const &d = Dispatch() ) const; + + template + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void writeBufferMarkerAMD( PipelineStageFlagBits pipelineStage, Buffer dstBuffer, DeviceSize dstOffset, uint32_t marker, Dispatch const &d = Dispatch() ) const; + + template + void beginRenderPass2KHR( const RenderPassBeginInfo* pRenderPassBegin, const SubpassBeginInfoKHR* pSubpassBeginInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginRenderPass2KHR( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfoKHR & subpassBeginInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void nextSubpass2KHR( const SubpassBeginInfoKHR* pSubpassBeginInfo, const SubpassEndInfoKHR* pSubpassEndInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void nextSubpass2KHR( const SubpassBeginInfoKHR & subpassBeginInfo, const SubpassEndInfoKHR & subpassEndInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void endRenderPass2KHR( const SubpassEndInfoKHR* pSubpassEndInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void endRenderPass2KHR( const SubpassEndInfoKHR & subpassEndInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void drawIndirectCountKHR( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d = Dispatch() ) const; + + template + void drawIndexedIndirectCountKHR( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d = Dispatch() ) const; + + template + void setCheckpointNV( const void* pCheckpointMarker, Dispatch const &d = Dispatch() ) const; + + template + void setExclusiveScissorNV( uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const Rect2D* pExclusiveScissors, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setExclusiveScissorNV( uint32_t firstExclusiveScissor, ArrayProxy exclusiveScissors, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void bindShadingRateImageNV( ImageView imageView, ImageLayout imageLayout, Dispatch const &d = Dispatch() ) const; + + template + void setViewportShadingRatePaletteNV( uint32_t firstViewport, uint32_t viewportCount, const ShadingRatePaletteNV* pShadingRatePalettes, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setViewportShadingRatePaletteNV( uint32_t firstViewport, ArrayProxy shadingRatePalettes, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void setCoarseSampleOrderNV( CoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const CoarseSampleOrderCustomNV* pCustomSampleOrders, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setCoarseSampleOrderNV( CoarseSampleOrderTypeNV sampleOrderType, ArrayProxy customSampleOrders, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const &d = Dispatch() ) const; + + template + void drawMeshTasksIndirectNV( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d = Dispatch() ) const; + + template + void drawMeshTasksIndirectCountNV( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d = Dispatch() ) const; + + template + void copyAccelerationStructureNVX( AccelerationStructureNVX dst, AccelerationStructureNVX src, CopyAccelerationStructureModeNVX mode, Dispatch const &d = Dispatch() ) const; + + template + void writeAccelerationStructurePropertiesNVX( AccelerationStructureNVX accelerationStructure, QueryType queryType, QueryPool queryPool, uint32_t query, Dispatch const &d = Dispatch() ) const; + + template + void buildAccelerationStructureNVX( AccelerationStructureTypeNVX type, uint32_t instanceCount, Buffer instanceData, DeviceSize instanceOffset, uint32_t geometryCount, const GeometryNVX* pGeometries, BuildAccelerationStructureFlagsNVX flags, Bool32 update, AccelerationStructureNVX dst, AccelerationStructureNVX src, Buffer scratch, DeviceSize scratchOffset, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void buildAccelerationStructureNVX( AccelerationStructureTypeNVX type, uint32_t instanceCount, Buffer instanceData, DeviceSize instanceOffset, ArrayProxy geometries, BuildAccelerationStructureFlagsNVX flags, Bool32 update, AccelerationStructureNVX dst, AccelerationStructureNVX src, Buffer scratch, DeviceSize scratchOffset, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void traceRaysNVX( Buffer raygenShaderBindingTableBuffer, DeviceSize raygenShaderBindingOffset, Buffer missShaderBindingTableBuffer, DeviceSize missShaderBindingOffset, DeviceSize missShaderBindingStride, Buffer hitShaderBindingTableBuffer, DeviceSize hitShaderBindingOffset, DeviceSize hitShaderBindingStride, uint32_t width, uint32_t height, Dispatch const &d = Dispatch() ) const; + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCommandBuffer() const + { + return m_commandBuffer; + } + + explicit operator bool() const + { + return m_commandBuffer != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_commandBuffer == VK_NULL_HANDLE; + } + + private: + VkCommandBuffer m_commandBuffer; + }; + + static_assert( sizeof( CommandBuffer ) == sizeof( VkCommandBuffer ), "handle and wrapper have different size!" ); + + template + VULKAN_HPP_INLINE Result CommandBuffer::begin( const CommandBufferBeginInfo* pBeginInfo, Dispatch const &d) const + { + return static_cast( d.vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast( pBeginInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type CommandBuffer::begin( const CommandBufferBeginInfo & beginInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast( &beginInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::CommandBuffer::begin" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result CommandBuffer::end(Dispatch const &d) const + { + return static_cast( d.vkEndCommandBuffer( m_commandBuffer ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type CommandBuffer::end(Dispatch const &d ) const + { + Result result = static_cast( d.vkEndCommandBuffer( m_commandBuffer ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::CommandBuffer::end" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result CommandBuffer::reset( CommandBufferResetFlags flags, Dispatch const &d) const + { + return static_cast( d.vkResetCommandBuffer( m_commandBuffer, static_cast( flags ) ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type CommandBuffer::reset( CommandBufferResetFlags flags, Dispatch const &d ) const + { + Result result = static_cast( d.vkResetCommandBuffer( m_commandBuffer, static_cast( flags ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::CommandBuffer::reset" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindPipeline( PipelineBindPoint pipelineBindPoint, Pipeline pipeline, Dispatch const &d) const + { + d.vkCmdBindPipeline( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( pipeline ) ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::bindPipeline( PipelineBindPoint pipelineBindPoint, Pipeline pipeline, Dispatch const &d ) const + { + d.vkCmdBindPipeline( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( pipeline ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, uint32_t viewportCount, const Viewport* pViewports, Dispatch const &d) const + { + d.vkCmdSetViewport( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast( pViewports ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, ArrayProxy viewports, Dispatch const &d ) const + { + d.vkCmdSetViewport( m_commandBuffer, firstViewport, viewports.size() , reinterpret_cast( viewports.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, uint32_t scissorCount, const Rect2D* pScissors, Dispatch const &d) const + { + d.vkCmdSetScissor( m_commandBuffer, firstScissor, scissorCount, reinterpret_cast( pScissors ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, ArrayProxy scissors, Dispatch const &d ) const + { + d.vkCmdSetScissor( m_commandBuffer, firstScissor, scissors.size() , reinterpret_cast( scissors.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setLineWidth( float lineWidth, Dispatch const &d) const + { + d.vkCmdSetLineWidth( m_commandBuffer, lineWidth ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setLineWidth( float lineWidth, Dispatch const &d ) const + { + d.vkCmdSetLineWidth( m_commandBuffer, lineWidth ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const &d) const + { + d.vkCmdSetDepthBias( m_commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const &d ) const + { + d.vkCmdSetDepthBias( m_commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setBlendConstants( const float blendConstants[4], Dispatch const &d) const + { + d.vkCmdSetBlendConstants( m_commandBuffer, blendConstants ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setBlendConstants( const float blendConstants[4], Dispatch const &d ) const + { + d.vkCmdSetBlendConstants( m_commandBuffer, blendConstants ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const &d) const + { + d.vkCmdSetDepthBounds( m_commandBuffer, minDepthBounds, maxDepthBounds ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const &d ) const + { + d.vkCmdSetDepthBounds( m_commandBuffer, minDepthBounds, maxDepthBounds ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilCompareMask( StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const &d) const + { + d.vkCmdSetStencilCompareMask( m_commandBuffer, static_cast( faceMask ), compareMask ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilCompareMask( StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const &d ) const + { + d.vkCmdSetStencilCompareMask( m_commandBuffer, static_cast( faceMask ), compareMask ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilWriteMask( StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const &d) const + { + d.vkCmdSetStencilWriteMask( m_commandBuffer, static_cast( faceMask ), writeMask ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilWriteMask( StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const &d ) const + { + d.vkCmdSetStencilWriteMask( m_commandBuffer, static_cast( faceMask ), writeMask ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilReference( StencilFaceFlags faceMask, uint32_t reference, Dispatch const &d) const + { + d.vkCmdSetStencilReference( m_commandBuffer, static_cast( faceMask ), reference ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilReference( StencilFaceFlags faceMask, uint32_t reference, Dispatch const &d ) const + { + d.vkCmdSetStencilReference( m_commandBuffer, static_cast( faceMask ), reference ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets, Dispatch const &d) const + { + d.vkCmdBindDescriptorSets( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( layout ), firstSet, descriptorSetCount, reinterpret_cast( pDescriptorSets ), dynamicOffsetCount, pDynamicOffsets ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t firstSet, ArrayProxy descriptorSets, ArrayProxy dynamicOffsets, Dispatch const &d ) const + { + d.vkCmdBindDescriptorSets( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( layout ), firstSet, descriptorSets.size() , reinterpret_cast( descriptorSets.data() ), dynamicOffsets.size() , dynamicOffsets.data() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindIndexBuffer( Buffer buffer, DeviceSize offset, IndexType indexType, Dispatch const &d) const + { + d.vkCmdBindIndexBuffer( m_commandBuffer, static_cast( buffer ), offset, static_cast( indexType ) ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::bindIndexBuffer( Buffer buffer, DeviceSize offset, IndexType indexType, Dispatch const &d ) const + { + d.vkCmdBindIndexBuffer( m_commandBuffer, static_cast( buffer ), offset, static_cast( indexType ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const Buffer* pBuffers, const DeviceSize* pOffsets, Dispatch const &d) const + { + d.vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast( pBuffers ), pOffsets ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, ArrayProxy buffers, ArrayProxy offsets, Dispatch const &d ) const + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); +#else + if ( buffers.size() != offsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindVertexBuffers: buffers.size() != offsets.size()" ); + } +#endif // VULKAN_HPP_NO_EXCEPTIONS + d.vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, buffers.size() , reinterpret_cast( buffers.data() ), offsets.data() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const &d) const + { + d.vkCmdDraw( m_commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const &d ) const + { + d.vkCmdDraw( m_commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const &d) const + { + d.vkCmdDrawIndexed( m_commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const &d ) const + { + d.vkCmdDrawIndexed( m_commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d) const + { + d.vkCmdDrawIndirect( m_commandBuffer, static_cast( buffer ), offset, drawCount, stride ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d ) const + { + d.vkCmdDrawIndirect( m_commandBuffer, static_cast( buffer ), offset, drawCount, stride ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d) const + { + d.vkCmdDrawIndexedIndirect( m_commandBuffer, static_cast( buffer ), offset, drawCount, stride ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirect( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d ) const + { + d.vkCmdDrawIndexedIndirect( m_commandBuffer, static_cast( buffer ), offset, drawCount, stride ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d) const + { + d.vkCmdDispatch( m_commandBuffer, groupCountX, groupCountY, groupCountZ ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d ) const + { + d.vkCmdDispatch( m_commandBuffer, groupCountX, groupCountY, groupCountZ ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::dispatchIndirect( Buffer buffer, DeviceSize offset, Dispatch const &d) const + { + d.vkCmdDispatchIndirect( m_commandBuffer, static_cast( buffer ), offset ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::dispatchIndirect( Buffer buffer, DeviceSize offset, Dispatch const &d ) const + { + d.vkCmdDispatchIndirect( m_commandBuffer, static_cast( buffer ), offset ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( Buffer srcBuffer, Buffer dstBuffer, uint32_t regionCount, const BufferCopy* pRegions, Dispatch const &d) const + { + d.vkCmdCopyBuffer( m_commandBuffer, static_cast( srcBuffer ), static_cast( dstBuffer ), regionCount, reinterpret_cast( pRegions ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( Buffer srcBuffer, Buffer dstBuffer, ArrayProxy regions, Dispatch const &d ) const + { + d.vkCmdCopyBuffer( m_commandBuffer, static_cast( srcBuffer ), static_cast( dstBuffer ), regions.size() , reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::copyImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageCopy* pRegions, Dispatch const &d) const + { + d.vkCmdCopyImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regionCount, reinterpret_cast( pRegions ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy regions, Dispatch const &d ) const + { + d.vkCmdCopyImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regions.size() , reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::blitImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageBlit* pRegions, Filter filter, Dispatch const &d) const + { + d.vkCmdBlitImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regionCount, reinterpret_cast( pRegions ), static_cast( filter ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::blitImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy regions, Filter filter, Dispatch const &d ) const + { + d.vkCmdBlitImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regions.size() , reinterpret_cast( regions.data() ), static_cast( filter ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( Buffer srcBuffer, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const BufferImageCopy* pRegions, Dispatch const &d) const + { + d.vkCmdCopyBufferToImage( m_commandBuffer, static_cast( srcBuffer ), static_cast( dstImage ), static_cast( dstImageLayout ), regionCount, reinterpret_cast( pRegions ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( Buffer srcBuffer, Image dstImage, ImageLayout dstImageLayout, ArrayProxy regions, Dispatch const &d ) const + { + d.vkCmdCopyBufferToImage( m_commandBuffer, static_cast( srcBuffer ), static_cast( dstImage ), static_cast( dstImageLayout ), regions.size() , reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( Image srcImage, ImageLayout srcImageLayout, Buffer dstBuffer, uint32_t regionCount, const BufferImageCopy* pRegions, Dispatch const &d) const + { + d.vkCmdCopyImageToBuffer( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstBuffer ), regionCount, reinterpret_cast( pRegions ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( Image srcImage, ImageLayout srcImageLayout, Buffer dstBuffer, ArrayProxy regions, Dispatch const &d ) const + { + d.vkCmdCopyImageToBuffer( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstBuffer ), regions.size() , reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( Buffer dstBuffer, DeviceSize dstOffset, DeviceSize dataSize, const void* pData, Dispatch const &d) const + { + d.vkCmdUpdateBuffer( m_commandBuffer, static_cast( dstBuffer ), dstOffset, dataSize, pData ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( Buffer dstBuffer, DeviceSize dstOffset, ArrayProxy data, Dispatch const &d ) const + { + d.vkCmdUpdateBuffer( m_commandBuffer, static_cast( dstBuffer ), dstOffset, data.size() * sizeof( T ) , reinterpret_cast( data.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::fillBuffer( Buffer dstBuffer, DeviceSize dstOffset, DeviceSize size, uint32_t data, Dispatch const &d) const + { + d.vkCmdFillBuffer( m_commandBuffer, static_cast( dstBuffer ), dstOffset, size, data ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::fillBuffer( Buffer dstBuffer, DeviceSize dstOffset, DeviceSize size, uint32_t data, Dispatch const &d ) const + { + d.vkCmdFillBuffer( m_commandBuffer, static_cast( dstBuffer ), dstOffset, size, data ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( Image image, ImageLayout imageLayout, const ClearColorValue* pColor, uint32_t rangeCount, const ImageSubresourceRange* pRanges, Dispatch const &d) const + { + d.vkCmdClearColorImage( m_commandBuffer, static_cast( image ), static_cast( imageLayout ), reinterpret_cast( pColor ), rangeCount, reinterpret_cast( pRanges ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( Image image, ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy ranges, Dispatch const &d ) const + { + d.vkCmdClearColorImage( m_commandBuffer, static_cast( image ), static_cast( imageLayout ), reinterpret_cast( &color ), ranges.size() , reinterpret_cast( ranges.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( Image image, ImageLayout imageLayout, const ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const ImageSubresourceRange* pRanges, Dispatch const &d) const + { + d.vkCmdClearDepthStencilImage( m_commandBuffer, static_cast( image ), static_cast( imageLayout ), reinterpret_cast( pDepthStencil ), rangeCount, reinterpret_cast( pRanges ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( Image image, ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy ranges, Dispatch const &d ) const + { + d.vkCmdClearDepthStencilImage( m_commandBuffer, static_cast( image ), static_cast( imageLayout ), reinterpret_cast( &depthStencil ), ranges.size() , reinterpret_cast( ranges.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( uint32_t attachmentCount, const ClearAttachment* pAttachments, uint32_t rectCount, const ClearRect* pRects, Dispatch const &d) const + { + d.vkCmdClearAttachments( m_commandBuffer, attachmentCount, reinterpret_cast( pAttachments ), rectCount, reinterpret_cast( pRects ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( ArrayProxy attachments, ArrayProxy rects, Dispatch const &d ) const + { + d.vkCmdClearAttachments( m_commandBuffer, attachments.size() , reinterpret_cast( attachments.data() ), rects.size() , reinterpret_cast( rects.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::resolveImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, uint32_t regionCount, const ImageResolve* pRegions, Dispatch const &d) const + { + d.vkCmdResolveImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regionCount, reinterpret_cast( pRegions ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::resolveImage( Image srcImage, ImageLayout srcImageLayout, Image dstImage, ImageLayout dstImageLayout, ArrayProxy regions, Dispatch const &d ) const + { + d.vkCmdResolveImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regions.size() , reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setEvent( Event event, PipelineStageFlags stageMask, Dispatch const &d) const + { + d.vkCmdSetEvent( m_commandBuffer, static_cast( event ), static_cast( stageMask ) ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setEvent( Event event, PipelineStageFlags stageMask, Dispatch const &d ) const + { + d.vkCmdSetEvent( m_commandBuffer, static_cast( event ), static_cast( stageMask ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::resetEvent( Event event, PipelineStageFlags stageMask, Dispatch const &d) const + { + d.vkCmdResetEvent( m_commandBuffer, static_cast( event ), static_cast( stageMask ) ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::resetEvent( Event event, PipelineStageFlags stageMask, Dispatch const &d ) const + { + d.vkCmdResetEvent( m_commandBuffer, static_cast( event ), static_cast( stageMask ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::waitEvents( uint32_t eventCount, const Event* pEvents, PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const &d) const + { + d.vkCmdWaitEvents( m_commandBuffer, eventCount, reinterpret_cast( pEvents ), static_cast( srcStageMask ), static_cast( dstStageMask ), memoryBarrierCount, reinterpret_cast( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast( pImageMemoryBarriers ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::waitEvents( ArrayProxy events, PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, ArrayProxy memoryBarriers, ArrayProxy bufferMemoryBarriers, ArrayProxy imageMemoryBarriers, Dispatch const &d ) const + { + d.vkCmdWaitEvents( m_commandBuffer, events.size() , reinterpret_cast( events.data() ), static_cast( srcStageMask ), static_cast( dstStageMask ), memoryBarriers.size() , reinterpret_cast( memoryBarriers.data() ), bufferMemoryBarriers.size() , reinterpret_cast( bufferMemoryBarriers.data() ), imageMemoryBarriers.size() , reinterpret_cast( imageMemoryBarriers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const &d) const + { + d.vkCmdPipelineBarrier( m_commandBuffer, static_cast( srcStageMask ), static_cast( dstStageMask ), static_cast( dependencyFlags ), memoryBarrierCount, reinterpret_cast( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast( pImageMemoryBarriers ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( PipelineStageFlags srcStageMask, PipelineStageFlags dstStageMask, DependencyFlags dependencyFlags, ArrayProxy memoryBarriers, ArrayProxy bufferMemoryBarriers, ArrayProxy imageMemoryBarriers, Dispatch const &d ) const + { + d.vkCmdPipelineBarrier( m_commandBuffer, static_cast( srcStageMask ), static_cast( dstStageMask ), static_cast( dependencyFlags ), memoryBarriers.size() , reinterpret_cast( memoryBarriers.data() ), bufferMemoryBarriers.size() , reinterpret_cast( bufferMemoryBarriers.data() ), imageMemoryBarriers.size() , reinterpret_cast( imageMemoryBarriers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginQuery( QueryPool queryPool, uint32_t query, QueryControlFlags flags, Dispatch const &d) const + { + d.vkCmdBeginQuery( m_commandBuffer, static_cast( queryPool ), query, static_cast( flags ) ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::beginQuery( QueryPool queryPool, uint32_t query, QueryControlFlags flags, Dispatch const &d ) const + { + d.vkCmdBeginQuery( m_commandBuffer, static_cast( queryPool ), query, static_cast( flags ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::endQuery( QueryPool queryPool, uint32_t query, Dispatch const &d) const + { + d.vkCmdEndQuery( m_commandBuffer, static_cast( queryPool ), query ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::endQuery( QueryPool queryPool, uint32_t query, Dispatch const &d ) const + { + d.vkCmdEndQuery( m_commandBuffer, static_cast( queryPool ), query ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin, Dispatch const &d) const + { + d.vkCmdBeginConditionalRenderingEXT( m_commandBuffer, reinterpret_cast( pConditionalRenderingBegin ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin, Dispatch const &d ) const + { + d.vkCmdBeginConditionalRenderingEXT( m_commandBuffer, reinterpret_cast( &conditionalRenderingBegin ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::endConditionalRenderingEXT(Dispatch const &d) const + { + d.vkCmdEndConditionalRenderingEXT( m_commandBuffer ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::endConditionalRenderingEXT(Dispatch const &d ) const + { + d.vkCmdEndConditionalRenderingEXT( m_commandBuffer ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::resetQueryPool( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d) const + { + d.vkCmdResetQueryPool( m_commandBuffer, static_cast( queryPool ), firstQuery, queryCount ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::resetQueryPool( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const &d ) const + { + d.vkCmdResetQueryPool( m_commandBuffer, static_cast( queryPool ), firstQuery, queryCount ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::writeTimestamp( PipelineStageFlagBits pipelineStage, QueryPool queryPool, uint32_t query, Dispatch const &d) const + { + d.vkCmdWriteTimestamp( m_commandBuffer, static_cast( pipelineStage ), static_cast( queryPool ), query ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::writeTimestamp( PipelineStageFlagBits pipelineStage, QueryPool queryPool, uint32_t query, Dispatch const &d ) const + { + d.vkCmdWriteTimestamp( m_commandBuffer, static_cast( pipelineStage ), static_cast( queryPool ), query ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Buffer dstBuffer, DeviceSize dstOffset, DeviceSize stride, QueryResultFlags flags, Dispatch const &d) const + { + d.vkCmdCopyQueryPoolResults( m_commandBuffer, static_cast( queryPool ), firstQuery, queryCount, static_cast( dstBuffer ), dstOffset, stride, static_cast( flags ) ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::copyQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Buffer dstBuffer, DeviceSize dstOffset, DeviceSize stride, QueryResultFlags flags, Dispatch const &d ) const + { + d.vkCmdCopyQueryPoolResults( m_commandBuffer, static_cast( queryPool ), firstQuery, queryCount, static_cast( dstBuffer ), dstOffset, stride, static_cast( flags ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::pushConstants( PipelineLayout layout, ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues, Dispatch const &d) const + { + d.vkCmdPushConstants( m_commandBuffer, static_cast( layout ), static_cast( stageFlags ), offset, size, pValues ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pushConstants( PipelineLayout layout, ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy values, Dispatch const &d ) const + { + d.vkCmdPushConstants( m_commandBuffer, static_cast( layout ), static_cast( stageFlags ), offset, values.size() * sizeof( T ) , reinterpret_cast( values.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const RenderPassBeginInfo* pRenderPassBegin, SubpassContents contents, Dispatch const &d) const + { + d.vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast( pRenderPassBegin ), static_cast( contents ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const RenderPassBeginInfo & renderPassBegin, SubpassContents contents, Dispatch const &d ) const + { + d.vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast( &renderPassBegin ), static_cast( contents ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass( SubpassContents contents, Dispatch const &d) const + { + d.vkCmdNextSubpass( m_commandBuffer, static_cast( contents ) ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass( SubpassContents contents, Dispatch const &d ) const + { + d.vkCmdNextSubpass( m_commandBuffer, static_cast( contents ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass(Dispatch const &d) const + { + d.vkCmdEndRenderPass( m_commandBuffer ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass(Dispatch const &d ) const + { + d.vkCmdEndRenderPass( m_commandBuffer ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::executeCommands( uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers, Dispatch const &d) const + { + d.vkCmdExecuteCommands( m_commandBuffer, commandBufferCount, reinterpret_cast( pCommandBuffers ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::executeCommands( ArrayProxy commandBuffers, Dispatch const &d ) const + { + d.vkCmdExecuteCommands( m_commandBuffer, commandBuffers.size() , reinterpret_cast( commandBuffers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const &d) const + { + d.vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast( pMarkerInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const &d ) const + { + d.vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast( &markerInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerEndEXT(Dispatch const &d) const + { + d.vkCmdDebugMarkerEndEXT( m_commandBuffer ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerEndEXT(Dispatch const &d ) const + { + d.vkCmdDebugMarkerEndEXT( m_commandBuffer ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const &d) const + { + d.vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast( pMarkerInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const &d ) const + { + d.vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast( &markerInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const + { + d.vkCmdDrawIndirectCountAMD( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const + { + d.vkCmdDrawIndirectCountAMD( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const + { + d.vkCmdDrawIndexedIndirectCountAMD( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountAMD( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const + { + d.vkCmdDrawIndexedIndirectCountAMD( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::processCommandsNVX( const CmdProcessCommandsInfoNVX* pProcessCommandsInfo, Dispatch const &d) const + { + d.vkCmdProcessCommandsNVX( m_commandBuffer, reinterpret_cast( pProcessCommandsInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::processCommandsNVX( const CmdProcessCommandsInfoNVX & processCommandsInfo, Dispatch const &d ) const + { + d.vkCmdProcessCommandsNVX( m_commandBuffer, reinterpret_cast( &processCommandsInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::reserveSpaceForCommandsNVX( const CmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo, Dispatch const &d) const + { + d.vkCmdReserveSpaceForCommandsNVX( m_commandBuffer, reinterpret_cast( pReserveSpaceInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::reserveSpaceForCommandsNVX( const CmdReserveSpaceForCommandsInfoNVX & reserveSpaceInfo, Dispatch const &d ) const + { + d.vkCmdReserveSpaceForCommandsNVX( m_commandBuffer, reinterpret_cast( &reserveSpaceInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const WriteDescriptorSet* pDescriptorWrites, Dispatch const &d) const + { + d.vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( layout ), set, descriptorWriteCount, reinterpret_cast( pDescriptorWrites ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( PipelineBindPoint pipelineBindPoint, PipelineLayout layout, uint32_t set, ArrayProxy descriptorWrites, Dispatch const &d ) const + { + d.vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( layout ), set, descriptorWrites.size() , reinterpret_cast( descriptorWrites.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setDeviceMask( uint32_t deviceMask, Dispatch const &d) const + { + d.vkCmdSetDeviceMask( m_commandBuffer, deviceMask ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setDeviceMask( uint32_t deviceMask, Dispatch const &d ) const + { + d.vkCmdSetDeviceMask( m_commandBuffer, deviceMask ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setDeviceMaskKHR( uint32_t deviceMask, Dispatch const &d) const + { + d.vkCmdSetDeviceMaskKHR( m_commandBuffer, deviceMask ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setDeviceMaskKHR( uint32_t deviceMask, Dispatch const &d ) const + { + d.vkCmdSetDeviceMaskKHR( m_commandBuffer, deviceMask ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d) const + { + d.vkCmdDispatchBase( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d ) const + { + d.vkCmdDispatchBase( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d) const + { + d.vkCmdDispatchBaseKHR( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const &d ) const + { + d.vkCmdDispatchBaseKHR( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplateKHR( DescriptorUpdateTemplate descriptorUpdateTemplate, PipelineLayout layout, uint32_t set, const void* pData, Dispatch const &d) const + { + d.vkCmdPushDescriptorSetWithTemplateKHR( m_commandBuffer, static_cast( descriptorUpdateTemplate ), static_cast( layout ), set, pData ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplateKHR( DescriptorUpdateTemplate descriptorUpdateTemplate, PipelineLayout layout, uint32_t set, const void* pData, Dispatch const &d ) const + { + d.vkCmdPushDescriptorSetWithTemplateKHR( m_commandBuffer, static_cast( descriptorUpdateTemplate ), static_cast( layout ), set, pData ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const ViewportWScalingNV* pViewportWScalings, Dispatch const &d) const + { + d.vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast( pViewportWScalings ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, ArrayProxy viewportWScalings, Dispatch const &d ) const + { + d.vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportWScalings.size() , reinterpret_cast( viewportWScalings.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const Rect2D* pDiscardRectangles, Dispatch const &d) const + { + d.vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangleCount, reinterpret_cast( pDiscardRectangles ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy discardRectangles, Dispatch const &d ) const + { + d.vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangles.size() , reinterpret_cast( discardRectangles.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::setSampleLocationsEXT( const SampleLocationsInfoEXT* pSampleLocationsInfo, Dispatch const &d) const + { + d.vkCmdSetSampleLocationsEXT( m_commandBuffer, reinterpret_cast( pSampleLocationsInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setSampleLocationsEXT( const SampleLocationsInfoEXT & sampleLocationsInfo, Dispatch const &d ) const + { + d.vkCmdSetSampleLocationsEXT( m_commandBuffer, reinterpret_cast( &sampleLocationsInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d) const + { + d.vkCmdBeginDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast( pLabelInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d ) const + { + d.vkCmdBeginDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast( &labelInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::endDebugUtilsLabelEXT(Dispatch const &d) const + { + d.vkCmdEndDebugUtilsLabelEXT( m_commandBuffer ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::endDebugUtilsLabelEXT(Dispatch const &d ) const + { + d.vkCmdEndDebugUtilsLabelEXT( m_commandBuffer ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d) const + { + d.vkCmdInsertDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast( pLabelInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d ) const + { + d.vkCmdInsertDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast( &labelInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::writeBufferMarkerAMD( PipelineStageFlagBits pipelineStage, Buffer dstBuffer, DeviceSize dstOffset, uint32_t marker, Dispatch const &d) const + { + d.vkCmdWriteBufferMarkerAMD( m_commandBuffer, static_cast( pipelineStage ), static_cast( dstBuffer ), dstOffset, marker ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::writeBufferMarkerAMD( PipelineStageFlagBits pipelineStage, Buffer dstBuffer, DeviceSize dstOffset, uint32_t marker, Dispatch const &d ) const + { + d.vkCmdWriteBufferMarkerAMD( m_commandBuffer, static_cast( pipelineStage ), static_cast( dstBuffer ), dstOffset, marker ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2KHR( const RenderPassBeginInfo* pRenderPassBegin, const SubpassBeginInfoKHR* pSubpassBeginInfo, Dispatch const &d) const + { + d.vkCmdBeginRenderPass2KHR( m_commandBuffer, reinterpret_cast( pRenderPassBegin ), reinterpret_cast( pSubpassBeginInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2KHR( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfoKHR & subpassBeginInfo, Dispatch const &d ) const + { + d.vkCmdBeginRenderPass2KHR( m_commandBuffer, reinterpret_cast( &renderPassBegin ), reinterpret_cast( &subpassBeginInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2KHR( const SubpassBeginInfoKHR* pSubpassBeginInfo, const SubpassEndInfoKHR* pSubpassEndInfo, Dispatch const &d) const + { + d.vkCmdNextSubpass2KHR( m_commandBuffer, reinterpret_cast( pSubpassBeginInfo ), reinterpret_cast( pSubpassEndInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2KHR( const SubpassBeginInfoKHR & subpassBeginInfo, const SubpassEndInfoKHR & subpassEndInfo, Dispatch const &d ) const + { + d.vkCmdNextSubpass2KHR( m_commandBuffer, reinterpret_cast( &subpassBeginInfo ), reinterpret_cast( &subpassEndInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2KHR( const SubpassEndInfoKHR* pSubpassEndInfo, Dispatch const &d) const + { + d.vkCmdEndRenderPass2KHR( m_commandBuffer, reinterpret_cast( pSubpassEndInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2KHR( const SubpassEndInfoKHR & subpassEndInfo, Dispatch const &d ) const + { + d.vkCmdEndRenderPass2KHR( m_commandBuffer, reinterpret_cast( &subpassEndInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountKHR( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const + { + d.vkCmdDrawIndirectCountKHR( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountKHR( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const + { + d.vkCmdDrawIndirectCountKHR( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountKHR( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const + { + d.vkCmdDrawIndexedIndirectCountKHR( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountKHR( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const + { + d.vkCmdDrawIndexedIndirectCountKHR( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setCheckpointNV( const void* pCheckpointMarker, Dispatch const &d) const + { + d.vkCmdSetCheckpointNV( m_commandBuffer, pCheckpointMarker ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::setCheckpointNV( const void* pCheckpointMarker, Dispatch const &d ) const + { + d.vkCmdSetCheckpointNV( m_commandBuffer, pCheckpointMarker ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::setExclusiveScissorNV( uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const Rect2D* pExclusiveScissors, Dispatch const &d) const + { + d.vkCmdSetExclusiveScissorNV( m_commandBuffer, firstExclusiveScissor, exclusiveScissorCount, reinterpret_cast( pExclusiveScissors ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setExclusiveScissorNV( uint32_t firstExclusiveScissor, ArrayProxy exclusiveScissors, Dispatch const &d ) const + { + d.vkCmdSetExclusiveScissorNV( m_commandBuffer, firstExclusiveScissor, exclusiveScissors.size() , reinterpret_cast( exclusiveScissors.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindShadingRateImageNV( ImageView imageView, ImageLayout imageLayout, Dispatch const &d) const + { + d.vkCmdBindShadingRateImageNV( m_commandBuffer, static_cast( imageView ), static_cast( imageLayout ) ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::bindShadingRateImageNV( ImageView imageView, ImageLayout imageLayout, Dispatch const &d ) const + { + d.vkCmdBindShadingRateImageNV( m_commandBuffer, static_cast( imageView ), static_cast( imageLayout ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportShadingRatePaletteNV( uint32_t firstViewport, uint32_t viewportCount, const ShadingRatePaletteNV* pShadingRatePalettes, Dispatch const &d) const + { + d.vkCmdSetViewportShadingRatePaletteNV( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast( pShadingRatePalettes ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportShadingRatePaletteNV( uint32_t firstViewport, ArrayProxy shadingRatePalettes, Dispatch const &d ) const + { + d.vkCmdSetViewportShadingRatePaletteNV( m_commandBuffer, firstViewport, shadingRatePalettes.size() , reinterpret_cast( shadingRatePalettes.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::setCoarseSampleOrderNV( CoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const CoarseSampleOrderCustomNV* pCustomSampleOrders, Dispatch const &d) const + { + d.vkCmdSetCoarseSampleOrderNV( m_commandBuffer, static_cast( sampleOrderType ), customSampleOrderCount, reinterpret_cast( pCustomSampleOrders ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setCoarseSampleOrderNV( CoarseSampleOrderTypeNV sampleOrderType, ArrayProxy customSampleOrders, Dispatch const &d ) const + { + d.vkCmdSetCoarseSampleOrderNV( m_commandBuffer, static_cast( sampleOrderType ), customSampleOrders.size() , reinterpret_cast( customSampleOrders.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const &d) const + { + d.vkCmdDrawMeshTasksNV( m_commandBuffer, taskCount, firstTask ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const &d ) const + { + d.vkCmdDrawMeshTasksNV( m_commandBuffer, taskCount, firstTask ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectNV( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d) const + { + d.vkCmdDrawMeshTasksIndirectNV( m_commandBuffer, static_cast( buffer ), offset, drawCount, stride ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectNV( Buffer buffer, DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const &d ) const + { + d.vkCmdDrawMeshTasksIndirectNV( m_commandBuffer, static_cast( buffer ), offset, drawCount, stride ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectCountNV( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d) const + { + d.vkCmdDrawMeshTasksIndirectCountNV( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectCountNV( Buffer buffer, DeviceSize offset, Buffer countBuffer, DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const &d ) const + { + d.vkCmdDrawMeshTasksIndirectCountNV( m_commandBuffer, static_cast( buffer ), offset, static_cast( countBuffer ), countBufferOffset, maxDrawCount, stride ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureNVX( AccelerationStructureNVX dst, AccelerationStructureNVX src, CopyAccelerationStructureModeNVX mode, Dispatch const &d) const + { + d.vkCmdCopyAccelerationStructureNVX( m_commandBuffer, static_cast( dst ), static_cast( src ), static_cast( mode ) ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureNVX( AccelerationStructureNVX dst, AccelerationStructureNVX src, CopyAccelerationStructureModeNVX mode, Dispatch const &d ) const + { + d.vkCmdCopyAccelerationStructureNVX( m_commandBuffer, static_cast( dst ), static_cast( src ), static_cast( mode ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructurePropertiesNVX( AccelerationStructureNVX accelerationStructure, QueryType queryType, QueryPool queryPool, uint32_t query, Dispatch const &d) const + { + d.vkCmdWriteAccelerationStructurePropertiesNVX( m_commandBuffer, static_cast( accelerationStructure ), static_cast( queryType ), static_cast( queryPool ), query ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructurePropertiesNVX( AccelerationStructureNVX accelerationStructure, QueryType queryType, QueryPool queryPool, uint32_t query, Dispatch const &d ) const + { + d.vkCmdWriteAccelerationStructurePropertiesNVX( m_commandBuffer, static_cast( accelerationStructure ), static_cast( queryType ), static_cast( queryPool ), query ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNVX( AccelerationStructureTypeNVX type, uint32_t instanceCount, Buffer instanceData, DeviceSize instanceOffset, uint32_t geometryCount, const GeometryNVX* pGeometries, BuildAccelerationStructureFlagsNVX flags, Bool32 update, AccelerationStructureNVX dst, AccelerationStructureNVX src, Buffer scratch, DeviceSize scratchOffset, Dispatch const &d) const + { + d.vkCmdBuildAccelerationStructureNVX( m_commandBuffer, static_cast( type ), instanceCount, static_cast( instanceData ), instanceOffset, geometryCount, reinterpret_cast( pGeometries ), static_cast( flags ), update, static_cast( dst ), static_cast( src ), static_cast( scratch ), scratchOffset ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNVX( AccelerationStructureTypeNVX type, uint32_t instanceCount, Buffer instanceData, DeviceSize instanceOffset, ArrayProxy geometries, BuildAccelerationStructureFlagsNVX flags, Bool32 update, AccelerationStructureNVX dst, AccelerationStructureNVX src, Buffer scratch, DeviceSize scratchOffset, Dispatch const &d ) const + { + d.vkCmdBuildAccelerationStructureNVX( m_commandBuffer, static_cast( type ), instanceCount, static_cast( instanceData ), instanceOffset, geometries.size() , reinterpret_cast( geometries.data() ), static_cast( flags ), update, static_cast( dst ), static_cast( src ), static_cast( scratch ), scratchOffset ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::traceRaysNVX( Buffer raygenShaderBindingTableBuffer, DeviceSize raygenShaderBindingOffset, Buffer missShaderBindingTableBuffer, DeviceSize missShaderBindingOffset, DeviceSize missShaderBindingStride, Buffer hitShaderBindingTableBuffer, DeviceSize hitShaderBindingOffset, DeviceSize hitShaderBindingStride, uint32_t width, uint32_t height, Dispatch const &d) const + { + d.vkCmdTraceRaysNVX( m_commandBuffer, static_cast( raygenShaderBindingTableBuffer ), raygenShaderBindingOffset, static_cast( missShaderBindingTableBuffer ), missShaderBindingOffset, missShaderBindingStride, static_cast( hitShaderBindingTableBuffer ), hitShaderBindingOffset, hitShaderBindingStride, width, height ); + } +#else + template + VULKAN_HPP_INLINE void CommandBuffer::traceRaysNVX( Buffer raygenShaderBindingTableBuffer, DeviceSize raygenShaderBindingOffset, Buffer missShaderBindingTableBuffer, DeviceSize missShaderBindingOffset, DeviceSize missShaderBindingStride, Buffer hitShaderBindingTableBuffer, DeviceSize hitShaderBindingOffset, DeviceSize hitShaderBindingStride, uint32_t width, uint32_t height, Dispatch const &d ) const + { + d.vkCmdTraceRaysNVX( m_commandBuffer, static_cast( raygenShaderBindingTableBuffer ), raygenShaderBindingOffset, static_cast( missShaderBindingTableBuffer ), missShaderBindingOffset, missShaderBindingStride, static_cast( hitShaderBindingTableBuffer ), hitShaderBindingOffset, hitShaderBindingStride, width, height ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + struct SubmitInfo + { + SubmitInfo( uint32_t waitSemaphoreCount_ = 0, + const Semaphore* pWaitSemaphores_ = nullptr, + const PipelineStageFlags* pWaitDstStageMask_ = nullptr, + uint32_t commandBufferCount_ = 0, + const CommandBuffer* pCommandBuffers_ = nullptr, + uint32_t signalSemaphoreCount_ = 0, + const Semaphore* pSignalSemaphores_ = nullptr ) + : waitSemaphoreCount( waitSemaphoreCount_ ) + , pWaitSemaphores( pWaitSemaphores_ ) + , pWaitDstStageMask( pWaitDstStageMask_ ) + , commandBufferCount( commandBufferCount_ ) + , pCommandBuffers( pCommandBuffers_ ) + , signalSemaphoreCount( signalSemaphoreCount_ ) + , pSignalSemaphores( pSignalSemaphores_ ) + { + } + + SubmitInfo( VkSubmitInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SubmitInfo ) ); + } + + SubmitInfo& operator=( VkSubmitInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( SubmitInfo ) ); + return *this; + } + SubmitInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + SubmitInfo& setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ ) + { + waitSemaphoreCount = waitSemaphoreCount_; + return *this; + } + + SubmitInfo& setPWaitSemaphores( const Semaphore* pWaitSemaphores_ ) + { + pWaitSemaphores = pWaitSemaphores_; + return *this; + } + + SubmitInfo& setPWaitDstStageMask( const PipelineStageFlags* pWaitDstStageMask_ ) + { + pWaitDstStageMask = pWaitDstStageMask_; + return *this; + } + + SubmitInfo& setCommandBufferCount( uint32_t commandBufferCount_ ) + { + commandBufferCount = commandBufferCount_; + return *this; + } + + SubmitInfo& setPCommandBuffers( const CommandBuffer* pCommandBuffers_ ) + { + pCommandBuffers = pCommandBuffers_; + return *this; + } + + SubmitInfo& setSignalSemaphoreCount( uint32_t signalSemaphoreCount_ ) + { + signalSemaphoreCount = signalSemaphoreCount_; + return *this; + } + + SubmitInfo& setPSignalSemaphores( const Semaphore* pSignalSemaphores_ ) + { + pSignalSemaphores = pSignalSemaphores_; + return *this; + } + + operator VkSubmitInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkSubmitInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( SubmitInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreCount == rhs.waitSemaphoreCount ) + && ( pWaitSemaphores == rhs.pWaitSemaphores ) + && ( pWaitDstStageMask == rhs.pWaitDstStageMask ) + && ( commandBufferCount == rhs.commandBufferCount ) + && ( pCommandBuffers == rhs.pCommandBuffers ) + && ( signalSemaphoreCount == rhs.signalSemaphoreCount ) + && ( pSignalSemaphores == rhs.pSignalSemaphores ); + } + + bool operator!=( SubmitInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eSubmitInfo; + + public: + const void* pNext = nullptr; + uint32_t waitSemaphoreCount; + const Semaphore* pWaitSemaphores; + const PipelineStageFlags* pWaitDstStageMask; + uint32_t commandBufferCount; + const CommandBuffer* pCommandBuffers; + uint32_t signalSemaphoreCount; + const Semaphore* pSignalSemaphores; + }; + static_assert( sizeof( SubmitInfo ) == sizeof( VkSubmitInfo ), "struct and wrapper have different size!" ); + + class Queue + { + public: + VULKAN_HPP_CONSTEXPR Queue() + : m_queue(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Queue( std::nullptr_t ) + : m_queue(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Queue( VkQueue queue ) + : m_queue( queue ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Queue & operator=(VkQueue queue) + { + m_queue = queue; + return *this; + } +#endif + + Queue & operator=( std::nullptr_t ) + { + m_queue = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Queue const & rhs ) const + { + return m_queue == rhs.m_queue; + } + + bool operator!=(Queue const & rhs ) const + { + return m_queue != rhs.m_queue; + } + + bool operator<(Queue const & rhs ) const + { + return m_queue < rhs.m_queue; + } + + template + Result submit( uint32_t submitCount, const SubmitInfo* pSubmits, Fence fence, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type submit( ArrayProxy submits, Fence fence, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result waitIdle(Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type waitIdle(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result bindSparse( uint32_t bindInfoCount, const BindSparseInfo* pBindInfo, Fence fence, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type bindSparse( ArrayProxy bindInfo, Fence fence, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result presentKHR( const PresentInfoKHR* pPresentInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result presentKHR( const PresentInfoKHR & presentInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void endDebugUtilsLabelEXT(Dispatch const &d = Dispatch() ) const; + + template + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getCheckpointDataNV( uint32_t* pCheckpointDataCount, CheckpointDataNV* pCheckpointData, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getCheckpointDataNV(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkQueue() const + { + return m_queue; + } + + explicit operator bool() const + { + return m_queue != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_queue == VK_NULL_HANDLE; + } + + private: + VkQueue m_queue; + }; + + static_assert( sizeof( Queue ) == sizeof( VkQueue ), "handle and wrapper have different size!" ); + + template + VULKAN_HPP_INLINE Result Queue::submit( uint32_t submitCount, const SubmitInfo* pSubmits, Fence fence, Dispatch const &d) const + { + return static_cast( d.vkQueueSubmit( m_queue, submitCount, reinterpret_cast( pSubmits ), static_cast( fence ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Queue::submit( ArrayProxy submits, Fence fence, Dispatch const &d ) const + { + Result result = static_cast( d.vkQueueSubmit( m_queue, submits.size() , reinterpret_cast( submits.data() ), static_cast( fence ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Queue::submit" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Queue::waitIdle(Dispatch const &d) const + { + return static_cast( d.vkQueueWaitIdle( m_queue ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type Queue::waitIdle(Dispatch const &d ) const + { + Result result = static_cast( d.vkQueueWaitIdle( m_queue ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Queue::waitIdle" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Queue::bindSparse( uint32_t bindInfoCount, const BindSparseInfo* pBindInfo, Fence fence, Dispatch const &d) const + { + return static_cast( d.vkQueueBindSparse( m_queue, bindInfoCount, reinterpret_cast( pBindInfo ), static_cast( fence ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Queue::bindSparse( ArrayProxy bindInfo, Fence fence, Dispatch const &d ) const + { + Result result = static_cast( d.vkQueueBindSparse( m_queue, bindInfo.size() , reinterpret_cast( bindInfo.data() ), static_cast( fence ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Queue::bindSparse" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Queue::presentKHR( const PresentInfoKHR* pPresentInfo, Dispatch const &d) const + { + return static_cast( d.vkQueuePresentKHR( m_queue, reinterpret_cast( pPresentInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Queue::presentKHR( const PresentInfoKHR & presentInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkQueuePresentKHR( m_queue, reinterpret_cast( &presentInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Queue::presentKHR", { Result::eSuccess, Result::eSuboptimalKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Queue::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d) const + { + d.vkQueueBeginDebugUtilsLabelEXT( m_queue, reinterpret_cast( pLabelInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Queue::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d ) const + { + d.vkQueueBeginDebugUtilsLabelEXT( m_queue, reinterpret_cast( &labelInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Queue::endDebugUtilsLabelEXT(Dispatch const &d) const + { + d.vkQueueEndDebugUtilsLabelEXT( m_queue ); + } +#else + template + VULKAN_HPP_INLINE void Queue::endDebugUtilsLabelEXT(Dispatch const &d ) const + { + d.vkQueueEndDebugUtilsLabelEXT( m_queue ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Queue::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT* pLabelInfo, Dispatch const &d) const + { + d.vkQueueInsertDebugUtilsLabelEXT( m_queue, reinterpret_cast( pLabelInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Queue::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const &d ) const + { + d.vkQueueInsertDebugUtilsLabelEXT( m_queue, reinterpret_cast( &labelInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Queue::getCheckpointDataNV( uint32_t* pCheckpointDataCount, CheckpointDataNV* pCheckpointData, Dispatch const &d) const + { + d.vkGetQueueCheckpointDataNV( m_queue, pCheckpointDataCount, reinterpret_cast( pCheckpointData ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector Queue::getCheckpointDataNV(Dispatch const &d ) const + { + std::vector checkpointData; + uint32_t checkpointDataCount; + d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, nullptr ); + checkpointData.resize( checkpointDataCount ); + d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, reinterpret_cast( checkpointData.data() ) ); + return checkpointData; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + class Device; + + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueAccelerationStructureNVX = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueBuffer = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueBufferView = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = PoolFree; }; + using UniqueCommandBuffer = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueCommandPool = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueDescriptorPool = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = PoolFree; }; + using UniqueDescriptorSet = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueDescriptorSetLayout = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueDescriptorUpdateTemplate = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectFree; }; + using UniqueDeviceMemory = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueEvent = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueFence = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueFramebuffer = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueImage = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueImageView = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueIndirectCommandsLayoutNVX = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueObjectTableNVX = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniquePipeline = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniquePipelineCache = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniquePipelineLayout = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueQueryPool = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueRenderPass = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueSampler = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueSamplerYcbcrConversion = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueSemaphore = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueShaderModule = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueSwapchainKHR = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueValidationCacheEXT = UniqueHandle; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + + class Device + { + public: + VULKAN_HPP_CONSTEXPR Device() + : m_device(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Device( std::nullptr_t ) + : m_device(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Device( VkDevice device ) + : m_device( device ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Device & operator=(VkDevice device) + { + m_device = device; + return *this; + } +#endif + + Device & operator=( std::nullptr_t ) + { + m_device = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Device const & rhs ) const + { + return m_device == rhs.m_device; + } + + bool operator!=(Device const & rhs ) const + { + return m_device != rhs.m_device; + } + + bool operator<(Device const & rhs ) const + { + return m_device < rhs.m_device; + } + + template + PFN_vkVoidFunction getProcAddr( const char* pName, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PFN_vkVoidFunction getProcAddr( const std::string & name, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Queue* pQueue, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Queue getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result waitIdle(Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type waitIdle(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result allocateMemory( const MemoryAllocateInfo* pAllocateInfo, const AllocationCallbacks* pAllocator, DeviceMemory* pMemory, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void freeMemory( DeviceMemory memory, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void freeMemory( DeviceMemory memory, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void free( DeviceMemory memory, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void free( DeviceMemory memory, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result mapMemory( DeviceMemory memory, DeviceSize offset, DeviceSize size, MemoryMapFlags flags, void** ppData, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type mapMemory( DeviceMemory memory, DeviceSize offset, DeviceSize size, MemoryMapFlags flags = MemoryMapFlags(), Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void unmapMemory( DeviceMemory memory, Dispatch const &d = Dispatch() ) const; + + template + Result flushMappedMemoryRanges( uint32_t memoryRangeCount, const MappedMemoryRange* pMemoryRanges, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type flushMappedMemoryRanges( ArrayProxy memoryRanges, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const MappedMemoryRange* pMemoryRanges, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type invalidateMappedMemoryRanges( ArrayProxy memoryRanges, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getMemoryCommitment( DeviceMemory memory, DeviceSize* pCommittedMemoryInBytes, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + DeviceSize getMemoryCommitment( DeviceMemory memory, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getBufferMemoryRequirements( Buffer buffer, MemoryRequirements* pMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + MemoryRequirements getBufferMemoryRequirements( Buffer buffer, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result bindBufferMemory( Buffer buffer, DeviceMemory memory, DeviceSize memoryOffset, Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type bindBufferMemory( Buffer buffer, DeviceMemory memory, DeviceSize memoryOffset, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getImageMemoryRequirements( Image image, MemoryRequirements* pMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + MemoryRequirements getImageMemoryRequirements( Image image, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result bindImageMemory( Image image, DeviceMemory memory, DeviceSize memoryOffset, Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type bindImageMemory( Image image, DeviceMemory memory, DeviceSize memoryOffset, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getImageSparseMemoryRequirements( Image image, uint32_t* pSparseMemoryRequirementCount, SparseImageMemoryRequirements* pSparseMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getImageSparseMemoryRequirements( Image image, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createFence( const FenceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Fence* pFence, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createFence( const FenceCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createFenceUnique( const FenceCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyFence( Fence fence, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyFence( Fence fence, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( Fence fence, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Fence fence, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result resetFences( uint32_t fenceCount, const Fence* pFences, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type resetFences( ArrayProxy fences, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getFenceStatus( Fence fence, Dispatch const &d = Dispatch() ) const; + + template + Result waitForFences( uint32_t fenceCount, const Fence* pFences, Bool32 waitAll, uint64_t timeout, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result waitForFences( ArrayProxy fences, Bool32 waitAll, uint64_t timeout, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createSemaphore( const SemaphoreCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Semaphore* pSemaphore, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createSemaphore( const SemaphoreCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroySemaphore( Semaphore semaphore, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySemaphore( Semaphore semaphore, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( Semaphore semaphore, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Semaphore semaphore, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createEvent( const EventCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Event* pEvent, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createEvent( const EventCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createEventUnique( const EventCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyEvent( Event event, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyEvent( Event event, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( Event event, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Event event, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getEventStatus( Event event, Dispatch const &d = Dispatch() ) const; + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result setEvent( Event event, Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type setEvent( Event event, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result resetEvent( Event event, Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type resetEvent( Event event, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createQueryPool( const QueryPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, QueryPool* pQueryPool, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createQueryPool( const QueryPoolCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyQueryPool( QueryPool queryPool, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyQueryPool( QueryPool queryPool, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( QueryPool queryPool, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( QueryPool queryPool, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, DeviceSize stride, QueryResultFlags flags, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result getQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, ArrayProxy data, DeviceSize stride, QueryResultFlags flags, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createBuffer( const BufferCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Buffer* pBuffer, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createBuffer( const BufferCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createBufferUnique( const BufferCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyBuffer( Buffer buffer, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyBuffer( Buffer buffer, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( Buffer buffer, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Buffer buffer, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createBufferView( const BufferViewCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, BufferView* pView, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createBufferView( const BufferViewCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyBufferView( BufferView bufferView, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyBufferView( BufferView bufferView, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( BufferView bufferView, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( BufferView bufferView, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createImage( const ImageCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Image* pImage, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createImage( const ImageCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createImageUnique( const ImageCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyImage( Image image, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyImage( Image image, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( Image image, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Image image, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getImageSubresourceLayout( Image image, const ImageSubresource* pSubresource, SubresourceLayout* pLayout, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + SubresourceLayout getImageSubresourceLayout( Image image, const ImageSubresource & subresource, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createImageView( const ImageViewCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, ImageView* pView, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createImageView( const ImageViewCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyImageView( ImageView imageView, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyImageView( ImageView imageView, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( ImageView imageView, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( ImageView imageView, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createShaderModule( const ShaderModuleCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, ShaderModule* pShaderModule, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyShaderModule( ShaderModule shaderModule, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyShaderModule( ShaderModule shaderModule, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( ShaderModule shaderModule, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( ShaderModule shaderModule, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createPipelineCache( const PipelineCacheCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, PipelineCache* pPipelineCache, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyPipelineCache( PipelineCache pipelineCache, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyPipelineCache( PipelineCache pipelineCache, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( PipelineCache pipelineCache, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( PipelineCache pipelineCache, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getPipelineCacheData( PipelineCache pipelineCache, size_t* pDataSize, void* pData, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getPipelineCacheData( PipelineCache pipelineCache, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result mergePipelineCaches( PipelineCache dstCache, uint32_t srcCacheCount, const PipelineCache* pSrcCaches, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type mergePipelineCaches( PipelineCache dstCache, ArrayProxy srcCaches, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createGraphicsPipelines( PipelineCache pipelineCache, uint32_t createInfoCount, const GraphicsPipelineCreateInfo* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type createGraphicsPipelines( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; + template , typename Dispatch = DispatchLoaderStatic> + ResultValueType::type createGraphicsPipeline( PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType,Allocator>>::type createGraphicsPipelinesUnique( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type createGraphicsPipelineUnique( PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createComputePipelines( PipelineCache pipelineCache, uint32_t createInfoCount, const ComputePipelineCreateInfo* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type createComputePipelines( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; + template , typename Dispatch = DispatchLoaderStatic> + ResultValueType::type createComputePipeline( PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType,Allocator>>::type createComputePipelinesUnique( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type createComputePipelineUnique( PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyPipeline( Pipeline pipeline, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyPipeline( Pipeline pipeline, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( Pipeline pipeline, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Pipeline pipeline, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createPipelineLayout( const PipelineLayoutCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, PipelineLayout* pPipelineLayout, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyPipelineLayout( PipelineLayout pipelineLayout, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyPipelineLayout( PipelineLayout pipelineLayout, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( PipelineLayout pipelineLayout, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( PipelineLayout pipelineLayout, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createSampler( const SamplerCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Sampler* pSampler, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createSampler( const SamplerCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createSamplerUnique( const SamplerCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroySampler( Sampler sampler, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySampler( Sampler sampler, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( Sampler sampler, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Sampler sampler, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorSetLayout* pSetLayout, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( DescriptorSetLayout descriptorSetLayout, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( DescriptorSetLayout descriptorSetLayout, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createDescriptorPool( const DescriptorPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorPool* pDescriptorPool, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyDescriptorPool( DescriptorPool descriptorPool, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDescriptorPool( DescriptorPool descriptorPool, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( DescriptorPool descriptorPool, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( DescriptorPool descriptorPool, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result resetDescriptorPool( DescriptorPool descriptorPool, DescriptorPoolResetFlags flags = DescriptorPoolResetFlags(), Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type resetDescriptorPool( DescriptorPool descriptorPool, DescriptorPoolResetFlags flags = DescriptorPoolResetFlags(), Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result allocateDescriptorSets( const DescriptorSetAllocateInfo* pAllocateInfo, DescriptorSet* pDescriptorSets, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType,Allocator>>::type allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result freeDescriptorSets( DescriptorPool descriptorPool, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type freeDescriptorSets( DescriptorPool descriptorPool, ArrayProxy descriptorSets, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result free( DescriptorPool descriptorPool, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type free( DescriptorPool descriptorPool, ArrayProxy descriptorSets, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void updateDescriptorSets( uint32_t descriptorWriteCount, const WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const CopyDescriptorSet* pDescriptorCopies, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void updateDescriptorSets( ArrayProxy descriptorWrites, ArrayProxy descriptorCopies, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createFramebuffer( const FramebufferCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Framebuffer* pFramebuffer, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createFramebuffer( const FramebufferCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyFramebuffer( Framebuffer framebuffer, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyFramebuffer( Framebuffer framebuffer, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( Framebuffer framebuffer, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Framebuffer framebuffer, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createRenderPass( const RenderPassCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, RenderPass* pRenderPass, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createRenderPass( const RenderPassCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyRenderPass( RenderPass renderPass, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyRenderPass( RenderPass renderPass, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( RenderPass renderPass, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( RenderPass renderPass, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getRenderAreaGranularity( RenderPass renderPass, Extent2D* pGranularity, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Extent2D getRenderAreaGranularity( RenderPass renderPass, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createCommandPool( const CommandPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, CommandPool* pCommandPool, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createCommandPool( const CommandPoolCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyCommandPool( CommandPool commandPool, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyCommandPool( CommandPool commandPool, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( CommandPool commandPool, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( CommandPool commandPool, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result resetCommandPool( CommandPool commandPool, CommandPoolResetFlags flags, Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type resetCommandPool( CommandPool commandPool, CommandPoolResetFlags flags, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result allocateCommandBuffers( const CommandBufferAllocateInfo* pAllocateInfo, CommandBuffer* pCommandBuffers, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType,Allocator>>::type allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void freeCommandBuffers( CommandPool commandPool, uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void freeCommandBuffers( CommandPool commandPool, ArrayProxy commandBuffers, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void free( CommandPool commandPool, uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void free( CommandPool commandPool, ArrayProxy commandBuffers, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createSharedSwapchainsKHR( uint32_t swapchainCount, const SwapchainCreateInfoKHR* pCreateInfos, const AllocationCallbacks* pAllocator, SwapchainKHR* pSwapchains, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type createSharedSwapchainsKHR( ArrayProxy createInfos, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; + template , typename Dispatch = DispatchLoaderStatic> + ResultValueType::type createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType,Allocator>>::type createSharedSwapchainsKHRUnique( ArrayProxy createInfos, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createSwapchainKHR( const SwapchainCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SwapchainKHR* pSwapchain, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroySwapchainKHR( SwapchainKHR swapchain, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySwapchainKHR( SwapchainKHR swapchain, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( SwapchainKHR swapchain, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( SwapchainKHR swapchain, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getSwapchainImagesKHR( SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, Image* pSwapchainImages, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getSwapchainImagesKHR( SwapchainKHR swapchain, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result acquireNextImageKHR( SwapchainKHR swapchain, uint64_t timeout, Semaphore semaphore, Fence fence, uint32_t* pImageIndex, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValue acquireNextImageKHR( SwapchainKHR swapchain, uint64_t timeout, Semaphore semaphore, Fence fence, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT* pNameInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT* pTagInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_WIN32_NV + template + Result getMemoryWin32HandleNV( DeviceMemory memory, ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getMemoryWin32HandleNV( DeviceMemory memory, ExternalMemoryHandleTypeFlagsNV handleType, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_NV*/ + + template + Result createIndirectCommandsLayoutNVX( const IndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, IndirectCommandsLayoutNVX* pIndirectCommandsLayout, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createIndirectCommandsLayoutNVX( const IndirectCommandsLayoutCreateInfoNVX & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createIndirectCommandsLayoutNVXUnique( const IndirectCommandsLayoutCreateInfoNVX & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyIndirectCommandsLayoutNVX( IndirectCommandsLayoutNVX indirectCommandsLayout, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyIndirectCommandsLayoutNVX( IndirectCommandsLayoutNVX indirectCommandsLayout, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( IndirectCommandsLayoutNVX indirectCommandsLayout, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( IndirectCommandsLayoutNVX indirectCommandsLayout, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createObjectTableNVX( const ObjectTableCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, ObjectTableNVX* pObjectTable, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createObjectTableNVX( const ObjectTableCreateInfoNVX & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createObjectTableNVXUnique( const ObjectTableCreateInfoNVX & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyObjectTableNVX( ObjectTableNVX objectTable, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyObjectTableNVX( ObjectTableNVX objectTable, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( ObjectTableNVX objectTable, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( ObjectTableNVX objectTable, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result registerObjectsNVX( ObjectTableNVX objectTable, uint32_t objectCount, const ObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type registerObjectsNVX( ObjectTableNVX objectTable, ArrayProxy pObjectTableEntries, ArrayProxy objectIndices, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result unregisterObjectsNVX( ObjectTableNVX objectTable, uint32_t objectCount, const ObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type unregisterObjectsNVX( ObjectTableNVX objectTable, ArrayProxy objectEntryTypes, ArrayProxy objectIndices, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void trimCommandPool( CommandPool commandPool, CommandPoolTrimFlags flags = CommandPoolTrimFlags(), Dispatch const &d = Dispatch() ) const; + + template + void trimCommandPoolKHR( CommandPool commandPool, CommandPoolTrimFlags flags = CommandPoolTrimFlags(), Dispatch const &d = Dispatch() ) const; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + Result getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + Result getMemoryWin32HandlePropertiesKHR( ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, MemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getMemoryWin32HandlePropertiesKHR( ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + template + Result getMemoryFdKHR( const MemoryGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getMemoryFdPropertiesKHR( ExternalMemoryHandleTypeFlagBits handleType, int fd, MemoryFdPropertiesKHR* pMemoryFdProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getMemoryFdPropertiesKHR( ExternalMemoryHandleTypeFlagBits handleType, int fd, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + Result getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + Result importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + template + Result getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + Result getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + Result importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + template + Result getFenceFdKHR( const FenceGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result importFenceFdKHR( const ImportFenceFdInfoKHR* pImportFenceFdInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result displayPowerControlEXT( DisplayKHR display, const DisplayPowerInfoEXT* pDisplayPowerInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type displayPowerControlEXT( DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result registerEventEXT( const DeviceEventInfoEXT* pDeviceEventInfo, const AllocationCallbacks* pAllocator, Fence* pFence, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result registerDisplayEventEXT( DisplayKHR display, const DisplayEventInfoEXT* pDisplayEventInfo, const AllocationCallbacks* pAllocator, Fence* pFence, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type registerDisplayEventEXT( DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getSwapchainCounterEXT( SwapchainKHR swapchain, SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getSwapchainCounterEXT( SwapchainKHR swapchain, SurfaceCounterFlagBitsEXT counter, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PeerMemoryFeatureFlags getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PeerMemoryFeatureFlags getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result bindBufferMemory2( uint32_t bindInfoCount, const BindBufferMemoryInfo* pBindInfos, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type bindBufferMemory2( ArrayProxy bindInfos, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result bindBufferMemory2KHR( uint32_t bindInfoCount, const BindBufferMemoryInfo* pBindInfos, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type bindBufferMemory2KHR( ArrayProxy bindInfos, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result bindImageMemory2( uint32_t bindInfoCount, const BindImageMemoryInfo* pBindInfos, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type bindImageMemory2( ArrayProxy bindInfos, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result bindImageMemory2KHR( uint32_t bindInfoCount, const BindImageMemoryInfo* pBindInfos, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type bindImageMemory2KHR( ArrayProxy bindInfos, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getGroupPresentCapabilitiesKHR( DeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getGroupPresentCapabilitiesKHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getGroupSurfacePresentModesKHR( SurfaceKHR surface, DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getGroupSurfacePresentModesKHR( SurfaceKHR surface, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result acquireNextImage2KHR( const AcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValue acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createDescriptorUpdateTemplateUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyDescriptorUpdateTemplate( DescriptorUpdateTemplate descriptorUpdateTemplate, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDescriptorUpdateTemplate( DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( DescriptorUpdateTemplate descriptorUpdateTemplate, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyDescriptorUpdateTemplateKHR( DescriptorUpdateTemplate descriptorUpdateTemplate, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDescriptorUpdateTemplateKHR( DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void updateDescriptorSetWithTemplate( DescriptorSet descriptorSet, DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d = Dispatch() ) const; + + template + void updateDescriptorSetWithTemplateKHR( DescriptorSet descriptorSet, DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d = Dispatch() ) const; + + template + void setHdrMetadataEXT( uint32_t swapchainCount, const SwapchainKHR* pSwapchains, const HdrMetadataEXT* pMetadata, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setHdrMetadataEXT( ArrayProxy swapchains, ArrayProxy metadata, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getSwapchainStatusKHR( SwapchainKHR swapchain, Dispatch const &d = Dispatch() ) const; + + template + Result getRefreshCycleDurationGOOGLE( SwapchainKHR swapchain, RefreshCycleDurationGOOGLE* pDisplayTimingProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getRefreshCycleDurationGOOGLE( SwapchainKHR swapchain, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getPastPresentationTimingGOOGLE( SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, PastPresentationTimingGOOGLE* pPresentationTimings, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getPastPresentationTimingGOOGLE( SwapchainKHR swapchain, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2* pInfo, MemoryRequirements2* pMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + MemoryRequirements2 getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; + template + StructureChain getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2* pInfo, MemoryRequirements2* pMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + MemoryRequirements2 getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; + template + StructureChain getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2* pInfo, MemoryRequirements2* pMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + MemoryRequirements2 getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; + template + StructureChain getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2* pInfo, MemoryRequirements2* pMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + MemoryRequirements2 getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; + template + StructureChain getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, SamplerYcbcrConversion* pYcbcrConversion, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createSamplerYcbcrConversionUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, SamplerYcbcrConversion* pYcbcrConversion, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createSamplerYcbcrConversionKHRUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroySamplerYcbcrConversion( SamplerYcbcrConversion ycbcrConversion, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySamplerYcbcrConversion( SamplerYcbcrConversion ycbcrConversion, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( SamplerYcbcrConversion ycbcrConversion, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( SamplerYcbcrConversion ycbcrConversion, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroySamplerYcbcrConversionKHR( SamplerYcbcrConversion ycbcrConversion, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySamplerYcbcrConversionKHR( SamplerYcbcrConversion ycbcrConversion, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getQueue2( const DeviceQueueInfo2* pQueueInfo, Queue* pQueue, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Queue getQueue2( const DeviceQueueInfo2 & queueInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createValidationCacheEXT( const ValidationCacheCreateInfoEXT* pCreateInfo, const AllocationCallbacks* pAllocator, ValidationCacheEXT* pValidationCache, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createValidationCacheEXT( const ValidationCacheCreateInfoEXT & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createValidationCacheEXTUnique( const ValidationCacheCreateInfoEXT & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyValidationCacheEXT( ValidationCacheEXT validationCache, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyValidationCacheEXT( ValidationCacheEXT validationCache, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( ValidationCacheEXT validationCache, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( ValidationCacheEXT validationCache, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getValidationCacheDataEXT( ValidationCacheEXT validationCache, size_t* pDataSize, void* pData, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getValidationCacheDataEXT( ValidationCacheEXT validationCache, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result mergeValidationCachesEXT( ValidationCacheEXT dstCache, uint32_t srcCacheCount, const ValidationCacheEXT* pSrcCaches, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type mergeValidationCachesEXT( ValidationCacheEXT dstCache, ArrayProxy srcCaches, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo* pCreateInfo, DescriptorSetLayoutSupport* pSupport, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + DescriptorSetLayoutSupport getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d = Dispatch() ) const; + template + StructureChain getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo* pCreateInfo, DescriptorSetLayoutSupport* pSupport, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + DescriptorSetLayoutSupport getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d = Dispatch() ) const; + template + StructureChain getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getShaderInfoAMD( Pipeline pipeline, ShaderStageFlagBits shaderStage, ShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getShaderInfoAMD( Pipeline pipeline, ShaderStageFlagBits shaderStage, ShaderInfoTypeAMD infoType, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT* pNameInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT* pTagInfo, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getMemoryHostPointerPropertiesEXT( ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, MemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getMemoryHostPointerPropertiesEXT( ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createRenderPass2KHR( const RenderPassCreateInfo2KHR* pCreateInfo, const AllocationCallbacks* pAllocator, RenderPass* pRenderPass, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createRenderPass2KHR( const RenderPassCreateInfo2KHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createRenderPass2KHRUnique( const RenderPassCreateInfo2KHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + template + Result getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer* buffer, AndroidHardwareBufferPropertiesANDROID* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const &d = Dispatch() ) const; + template + typename ResultValueType>::type getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + template + Result getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result compileDeferredNVX( Pipeline pipeline, uint32_t shader, Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type compileDeferredNVX( Pipeline pipeline, uint32_t shader, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createAccelerationStructureNVX( const AccelerationStructureCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, AccelerationStructureNVX* pAccelerationStructure, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createAccelerationStructureNVX( const AccelerationStructureCreateInfoNVX & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createAccelerationStructureNVXUnique( const AccelerationStructureCreateInfoNVX & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyAccelerationStructureNVX( AccelerationStructureNVX accelerationStructure, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyAccelerationStructureNVX( AccelerationStructureNVX accelerationStructure, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( AccelerationStructureNVX accelerationStructure, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( AccelerationStructureNVX accelerationStructure, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getAccelerationStructureMemoryRequirementsNVX( const AccelerationStructureMemoryRequirementsInfoNVX* pInfo, MemoryRequirements2KHR* pMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + MemoryRequirements2KHR getAccelerationStructureMemoryRequirementsNVX( const AccelerationStructureMemoryRequirementsInfoNVX & info, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getAccelerationStructureScratchMemoryRequirementsNVX( const AccelerationStructureMemoryRequirementsInfoNVX* pInfo, MemoryRequirements2KHR* pMemoryRequirements, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + MemoryRequirements2KHR getAccelerationStructureScratchMemoryRequirementsNVX( const AccelerationStructureMemoryRequirementsInfoNVX & info, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result bindAccelerationStructureMemoryNVX( uint32_t bindInfoCount, const BindAccelerationStructureMemoryInfoNVX* pBindInfos, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type bindAccelerationStructureMemoryNVX( ArrayProxy bindInfos, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getRaytracingShaderHandlesNVX( Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getRaytracingShaderHandlesNVX( Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getAccelerationStructureHandleNVX( AccelerationStructureNVX accelerationStructure, size_t dataSize, void* pData, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getAccelerationStructureHandleNVX( AccelerationStructureNVX accelerationStructure, size_t dataSize, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createRaytracingPipelinesNVX( PipelineCache pipelineCache, uint32_t createInfoCount, const RaytracingPipelineCreateInfoNVX* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type createRaytracingPipelinesNVX( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; + template , typename Dispatch = DispatchLoaderStatic> + ResultValueType::type createRaytracingPipelineNVX( PipelineCache pipelineCache, const RaytracingPipelineCreateInfoNVX & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType,Allocator>>::type createRaytracingPipelinesNVXUnique( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type createRaytracingPipelineNVXUnique( PipelineCache pipelineCache, const RaytracingPipelineCreateInfoNVX & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDevice() const + { + return m_device; + } + + explicit operator bool() const + { + return m_device != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_device == VK_NULL_HANDLE; + } + + private: + VkDevice m_device; + }; + + static_assert( sizeof( Device ) == sizeof( VkDevice ), "handle and wrapper have different size!" ); + + template + VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const char* pName, Dispatch const &d) const + { + return d.vkGetDeviceProcAddr( m_device, pName ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const std::string & name, Dispatch const &d ) const + { + return d.vkGetDeviceProcAddr( m_device, name.c_str() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDevice( m_device, reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDevice( m_device, reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Queue* pQueue, Dispatch const &d) const + { + d.vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast( pQueue ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Queue Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Dispatch const &d ) const + { + Queue queue; + d.vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast( &queue ) ); + return queue; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::waitIdle(Dispatch const &d) const + { + return static_cast( d.vkDeviceWaitIdle( m_device ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type Device::waitIdle(Dispatch const &d ) const + { + Result result = static_cast( d.vkDeviceWaitIdle( m_device ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::waitIdle" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::allocateMemory( const MemoryAllocateInfo* pAllocateInfo, const AllocationCallbacks* pAllocator, DeviceMemory* pMemory, Dispatch const &d) const + { + return static_cast( d.vkAllocateMemory( m_device, reinterpret_cast( pAllocateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pMemory ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional allocator, Dispatch const &d ) const + { + DeviceMemory memory; + Result result = static_cast( d.vkAllocateMemory( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &memory ) ) ); + return createResultValue( result, memory, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateMemory" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional allocator, Dispatch const &d ) const + { + DeviceMemory memory; + Result result = static_cast( d.vkAllocateMemory( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &memory ) ) ); + + ObjectFree deleter( *this, allocator, d ); + return createResultValue( result, memory, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateMemoryUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::freeMemory( DeviceMemory memory, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkFreeMemory( m_device, static_cast( memory ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::freeMemory( DeviceMemory memory, Optional allocator, Dispatch const &d ) const + { + d.vkFreeMemory( m_device, static_cast( memory ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::free( DeviceMemory memory, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkFreeMemory( m_device, static_cast( memory ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::free( DeviceMemory memory, Optional allocator, Dispatch const &d ) const + { + d.vkFreeMemory( m_device, static_cast( memory ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::mapMemory( DeviceMemory memory, DeviceSize offset, DeviceSize size, MemoryMapFlags flags, void** ppData, Dispatch const &d) const + { + return static_cast( d.vkMapMemory( m_device, static_cast( memory ), offset, size, static_cast( flags ), ppData ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::mapMemory( DeviceMemory memory, DeviceSize offset, DeviceSize size, MemoryMapFlags flags, Dispatch const &d ) const + { + void* pData; + Result result = static_cast( d.vkMapMemory( m_device, static_cast( memory ), offset, size, static_cast( flags ), &pData ) ); + return createResultValue( result, pData, VULKAN_HPP_NAMESPACE_STRING"::Device::mapMemory" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::unmapMemory( DeviceMemory memory, Dispatch const &d) const + { + d.vkUnmapMemory( m_device, static_cast( memory ) ); + } +#else + template + VULKAN_HPP_INLINE void Device::unmapMemory( DeviceMemory memory, Dispatch const &d ) const + { + d.vkUnmapMemory( m_device, static_cast( memory ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::flushMappedMemoryRanges( uint32_t memoryRangeCount, const MappedMemoryRange* pMemoryRanges, Dispatch const &d) const + { + return static_cast( d.vkFlushMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast( pMemoryRanges ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::flushMappedMemoryRanges( ArrayProxy memoryRanges, Dispatch const &d ) const + { + Result result = static_cast( d.vkFlushMappedMemoryRanges( m_device, memoryRanges.size() , reinterpret_cast( memoryRanges.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::flushMappedMemoryRanges" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const MappedMemoryRange* pMemoryRanges, Dispatch const &d) const + { + return static_cast( d.vkInvalidateMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast( pMemoryRanges ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::invalidateMappedMemoryRanges( ArrayProxy memoryRanges, Dispatch const &d ) const + { + Result result = static_cast( d.vkInvalidateMappedMemoryRanges( m_device, memoryRanges.size() , reinterpret_cast( memoryRanges.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::invalidateMappedMemoryRanges" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getMemoryCommitment( DeviceMemory memory, DeviceSize* pCommittedMemoryInBytes, Dispatch const &d) const + { + d.vkGetDeviceMemoryCommitment( m_device, static_cast( memory ), pCommittedMemoryInBytes ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE DeviceSize Device::getMemoryCommitment( DeviceMemory memory, Dispatch const &d ) const + { + DeviceSize committedMemoryInBytes; + d.vkGetDeviceMemoryCommitment( m_device, static_cast( memory ), &committedMemoryInBytes ); + return committedMemoryInBytes; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements( Buffer buffer, MemoryRequirements* pMemoryRequirements, Dispatch const &d) const + { + d.vkGetBufferMemoryRequirements( m_device, static_cast( buffer ), reinterpret_cast( pMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE MemoryRequirements Device::getBufferMemoryRequirements( Buffer buffer, Dispatch const &d ) const + { + MemoryRequirements memoryRequirements; + d.vkGetBufferMemoryRequirements( m_device, static_cast( buffer ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::bindBufferMemory( Buffer buffer, DeviceMemory memory, DeviceSize memoryOffset, Dispatch const &d) const + { + return static_cast( d.vkBindBufferMemory( m_device, static_cast( buffer ), static_cast( memory ), memoryOffset ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type Device::bindBufferMemory( Buffer buffer, DeviceMemory memory, DeviceSize memoryOffset, Dispatch const &d ) const + { + Result result = static_cast( d.vkBindBufferMemory( m_device, static_cast( buffer ), static_cast( memory ), memoryOffset ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindBufferMemory" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getImageMemoryRequirements( Image image, MemoryRequirements* pMemoryRequirements, Dispatch const &d) const + { + d.vkGetImageMemoryRequirements( m_device, static_cast( image ), reinterpret_cast( pMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE MemoryRequirements Device::getImageMemoryRequirements( Image image, Dispatch const &d ) const + { + MemoryRequirements memoryRequirements; + d.vkGetImageMemoryRequirements( m_device, static_cast( image ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::bindImageMemory( Image image, DeviceMemory memory, DeviceSize memoryOffset, Dispatch const &d) const + { + return static_cast( d.vkBindImageMemory( m_device, static_cast( image ), static_cast( memory ), memoryOffset ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type Device::bindImageMemory( Image image, DeviceMemory memory, DeviceSize memoryOffset, Dispatch const &d ) const + { + Result result = static_cast( d.vkBindImageMemory( m_device, static_cast( image ), static_cast( memory ), memoryOffset ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindImageMemory" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements( Image image, uint32_t* pSparseMemoryRequirementCount, SparseImageMemoryRequirements* pSparseMemoryRequirements, Dispatch const &d) const + { + d.vkGetImageSparseMemoryRequirements( m_device, static_cast( image ), pSparseMemoryRequirementCount, reinterpret_cast( pSparseMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector Device::getImageSparseMemoryRequirements( Image image, Dispatch const &d ) const + { + std::vector sparseMemoryRequirements; + uint32_t sparseMemoryRequirementCount; + d.vkGetImageSparseMemoryRequirements( m_device, static_cast( image ), &sparseMemoryRequirementCount, nullptr ); + sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); + d.vkGetImageSparseMemoryRequirements( m_device, static_cast( image ), &sparseMemoryRequirementCount, reinterpret_cast( sparseMemoryRequirements.data() ) ); + return sparseMemoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createFence( const FenceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Fence* pFence, Dispatch const &d) const + { + return static_cast( d.vkCreateFence( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pFence ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createFence( const FenceCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Fence fence; + Result result = static_cast( d.vkCreateFence( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::createFence" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createFenceUnique( const FenceCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Fence fence; + Result result = static_cast( d.vkCreateFence( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::createFenceUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyFence( Fence fence, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyFence( m_device, static_cast( fence ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyFence( Fence fence, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyFence( m_device, static_cast( fence ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( Fence fence, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyFence( m_device, static_cast( fence ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Fence fence, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyFence( m_device, static_cast( fence ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::resetFences( uint32_t fenceCount, const Fence* pFences, Dispatch const &d) const + { + return static_cast( d.vkResetFences( m_device, fenceCount, reinterpret_cast( pFences ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::resetFences( ArrayProxy fences, Dispatch const &d ) const + { + Result result = static_cast( d.vkResetFences( m_device, fences.size() , reinterpret_cast( fences.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::resetFences" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::getFenceStatus( Fence fence, Dispatch const &d) const + { + return static_cast( d.vkGetFenceStatus( m_device, static_cast( fence ) ) ); + } +#else + template + VULKAN_HPP_INLINE Result Device::getFenceStatus( Fence fence, Dispatch const &d ) const + { + Result result = static_cast( d.vkGetFenceStatus( m_device, static_cast( fence ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getFenceStatus", { Result::eSuccess, Result::eNotReady } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::waitForFences( uint32_t fenceCount, const Fence* pFences, Bool32 waitAll, uint64_t timeout, Dispatch const &d) const + { + return static_cast( d.vkWaitForFences( m_device, fenceCount, reinterpret_cast( pFences ), waitAll, timeout ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::waitForFences( ArrayProxy fences, Bool32 waitAll, uint64_t timeout, Dispatch const &d ) const + { + Result result = static_cast( d.vkWaitForFences( m_device, fences.size() , reinterpret_cast( fences.data() ), waitAll, timeout ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::waitForFences", { Result::eSuccess, Result::eTimeout } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createSemaphore( const SemaphoreCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Semaphore* pSemaphore, Dispatch const &d) const + { + return static_cast( d.vkCreateSemaphore( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSemaphore ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createSemaphore( const SemaphoreCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Semaphore semaphore; + Result result = static_cast( d.vkCreateSemaphore( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &semaphore ) ) ); + return createResultValue( result, semaphore, VULKAN_HPP_NAMESPACE_STRING"::Device::createSemaphore" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Semaphore semaphore; + Result result = static_cast( d.vkCreateSemaphore( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &semaphore ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, semaphore, VULKAN_HPP_NAMESPACE_STRING"::Device::createSemaphoreUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroySemaphore( Semaphore semaphore, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySemaphore( m_device, static_cast( semaphore ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySemaphore( Semaphore semaphore, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySemaphore( m_device, static_cast( semaphore ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( Semaphore semaphore, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySemaphore( m_device, static_cast( semaphore ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Semaphore semaphore, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySemaphore( m_device, static_cast( semaphore ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createEvent( const EventCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Event* pEvent, Dispatch const &d) const + { + return static_cast( d.vkCreateEvent( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pEvent ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createEvent( const EventCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Event event; + Result result = static_cast( d.vkCreateEvent( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &event ) ) ); + return createResultValue( result, event, VULKAN_HPP_NAMESPACE_STRING"::Device::createEvent" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createEventUnique( const EventCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Event event; + Result result = static_cast( d.vkCreateEvent( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &event ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, event, VULKAN_HPP_NAMESPACE_STRING"::Device::createEventUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyEvent( Event event, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyEvent( m_device, static_cast( event ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyEvent( Event event, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyEvent( m_device, static_cast( event ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( Event event, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyEvent( m_device, static_cast( event ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Event event, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyEvent( m_device, static_cast( event ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::getEventStatus( Event event, Dispatch const &d) const + { + return static_cast( d.vkGetEventStatus( m_device, static_cast( event ) ) ); + } +#else + template + VULKAN_HPP_INLINE Result Device::getEventStatus( Event event, Dispatch const &d ) const + { + Result result = static_cast( d.vkGetEventStatus( m_device, static_cast( event ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getEventStatus", { Result::eEventSet, Result::eEventReset } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::setEvent( Event event, Dispatch const &d) const + { + return static_cast( d.vkSetEvent( m_device, static_cast( event ) ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type Device::setEvent( Event event, Dispatch const &d ) const + { + Result result = static_cast( d.vkSetEvent( m_device, static_cast( event ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::setEvent" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::resetEvent( Event event, Dispatch const &d) const + { + return static_cast( d.vkResetEvent( m_device, static_cast( event ) ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type Device::resetEvent( Event event, Dispatch const &d ) const + { + Result result = static_cast( d.vkResetEvent( m_device, static_cast( event ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::resetEvent" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createQueryPool( const QueryPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, QueryPool* pQueryPool, Dispatch const &d) const + { + return static_cast( d.vkCreateQueryPool( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pQueryPool ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createQueryPool( const QueryPoolCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + QueryPool queryPool; + Result result = static_cast( d.vkCreateQueryPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &queryPool ) ) ); + return createResultValue( result, queryPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createQueryPool" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + QueryPool queryPool; + Result result = static_cast( d.vkCreateQueryPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &queryPool ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, queryPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createQueryPoolUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyQueryPool( QueryPool queryPool, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyQueryPool( m_device, static_cast( queryPool ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyQueryPool( QueryPool queryPool, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyQueryPool( m_device, static_cast( queryPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( QueryPool queryPool, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyQueryPool( m_device, static_cast( queryPool ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( QueryPool queryPool, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyQueryPool( m_device, static_cast( queryPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, DeviceSize stride, QueryResultFlags flags, Dispatch const &d) const + { + return static_cast( d.vkGetQueryPoolResults( m_device, static_cast( queryPool ), firstQuery, queryCount, dataSize, pData, stride, static_cast( flags ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::getQueryPoolResults( QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, ArrayProxy data, DeviceSize stride, QueryResultFlags flags, Dispatch const &d ) const + { + Result result = static_cast( d.vkGetQueryPoolResults( m_device, static_cast( queryPool ), firstQuery, queryCount, data.size() * sizeof( T ) , reinterpret_cast( data.data() ), stride, static_cast( flags ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getQueryPoolResults", { Result::eSuccess, Result::eNotReady } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createBuffer( const BufferCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Buffer* pBuffer, Dispatch const &d) const + { + return static_cast( d.vkCreateBuffer( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pBuffer ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createBuffer( const BufferCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Buffer buffer; + Result result = static_cast( d.vkCreateBuffer( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &buffer ) ) ); + return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING"::Device::createBuffer" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createBufferUnique( const BufferCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Buffer buffer; + Result result = static_cast( d.vkCreateBuffer( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &buffer ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING"::Device::createBufferUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyBuffer( Buffer buffer, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyBuffer( m_device, static_cast( buffer ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyBuffer( Buffer buffer, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyBuffer( m_device, static_cast( buffer ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( Buffer buffer, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyBuffer( m_device, static_cast( buffer ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Buffer buffer, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyBuffer( m_device, static_cast( buffer ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createBufferView( const BufferViewCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, BufferView* pView, Dispatch const &d) const + { + return static_cast( d.vkCreateBufferView( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pView ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createBufferView( const BufferViewCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + BufferView view; + Result result = static_cast( d.vkCreateBufferView( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING"::Device::createBufferView" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + BufferView view; + Result result = static_cast( d.vkCreateBufferView( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING"::Device::createBufferViewUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyBufferView( BufferView bufferView, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyBufferView( m_device, static_cast( bufferView ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyBufferView( BufferView bufferView, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyBufferView( m_device, static_cast( bufferView ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( BufferView bufferView, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyBufferView( m_device, static_cast( bufferView ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( BufferView bufferView, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyBufferView( m_device, static_cast( bufferView ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createImage( const ImageCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Image* pImage, Dispatch const &d) const + { + return static_cast( d.vkCreateImage( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pImage ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createImage( const ImageCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Image image; + Result result = static_cast( d.vkCreateImage( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &image ) ) ); + return createResultValue( result, image, VULKAN_HPP_NAMESPACE_STRING"::Device::createImage" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createImageUnique( const ImageCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Image image; + Result result = static_cast( d.vkCreateImage( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &image ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, image, VULKAN_HPP_NAMESPACE_STRING"::Device::createImageUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyImage( Image image, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyImage( m_device, static_cast( image ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyImage( Image image, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyImage( m_device, static_cast( image ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( Image image, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyImage( m_device, static_cast( image ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Image image, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyImage( m_device, static_cast( image ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getImageSubresourceLayout( Image image, const ImageSubresource* pSubresource, SubresourceLayout* pLayout, Dispatch const &d) const + { + d.vkGetImageSubresourceLayout( m_device, static_cast( image ), reinterpret_cast( pSubresource ), reinterpret_cast( pLayout ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE SubresourceLayout Device::getImageSubresourceLayout( Image image, const ImageSubresource & subresource, Dispatch const &d ) const + { + SubresourceLayout layout; + d.vkGetImageSubresourceLayout( m_device, static_cast( image ), reinterpret_cast( &subresource ), reinterpret_cast( &layout ) ); + return layout; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createImageView( const ImageViewCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, ImageView* pView, Dispatch const &d) const + { + return static_cast( d.vkCreateImageView( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pView ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createImageView( const ImageViewCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + ImageView view; + Result result = static_cast( d.vkCreateImageView( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING"::Device::createImageView" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + ImageView view; + Result result = static_cast( d.vkCreateImageView( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING"::Device::createImageViewUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyImageView( ImageView imageView, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyImageView( m_device, static_cast( imageView ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyImageView( ImageView imageView, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyImageView( m_device, static_cast( imageView ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( ImageView imageView, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyImageView( m_device, static_cast( imageView ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( ImageView imageView, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyImageView( m_device, static_cast( imageView ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createShaderModule( const ShaderModuleCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, ShaderModule* pShaderModule, Dispatch const &d) const + { + return static_cast( d.vkCreateShaderModule( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pShaderModule ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + ShaderModule shaderModule; + Result result = static_cast( d.vkCreateShaderModule( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &shaderModule ) ) ); + return createResultValue( result, shaderModule, VULKAN_HPP_NAMESPACE_STRING"::Device::createShaderModule" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + ShaderModule shaderModule; + Result result = static_cast( d.vkCreateShaderModule( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &shaderModule ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, shaderModule, VULKAN_HPP_NAMESPACE_STRING"::Device::createShaderModuleUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyShaderModule( ShaderModule shaderModule, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyShaderModule( m_device, static_cast( shaderModule ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyShaderModule( ShaderModule shaderModule, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyShaderModule( m_device, static_cast( shaderModule ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( ShaderModule shaderModule, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyShaderModule( m_device, static_cast( shaderModule ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( ShaderModule shaderModule, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyShaderModule( m_device, static_cast( shaderModule ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createPipelineCache( const PipelineCacheCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, PipelineCache* pPipelineCache, Dispatch const &d) const + { + return static_cast( d.vkCreatePipelineCache( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pPipelineCache ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + PipelineCache pipelineCache; + Result result = static_cast( d.vkCreatePipelineCache( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineCache ) ) ); + return createResultValue( result, pipelineCache, VULKAN_HPP_NAMESPACE_STRING"::Device::createPipelineCache" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + PipelineCache pipelineCache; + Result result = static_cast( d.vkCreatePipelineCache( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineCache ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipelineCache, VULKAN_HPP_NAMESPACE_STRING"::Device::createPipelineCacheUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyPipelineCache( PipelineCache pipelineCache, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyPipelineCache( m_device, static_cast( pipelineCache ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyPipelineCache( PipelineCache pipelineCache, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyPipelineCache( m_device, static_cast( pipelineCache ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( PipelineCache pipelineCache, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyPipelineCache( m_device, static_cast( pipelineCache ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( PipelineCache pipelineCache, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyPipelineCache( m_device, static_cast( pipelineCache ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getPipelineCacheData( PipelineCache pipelineCache, size_t* pDataSize, void* pData, Dispatch const &d) const + { + return static_cast( d.vkGetPipelineCacheData( m_device, static_cast( pipelineCache ), pDataSize, pData ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPipelineCacheData( PipelineCache pipelineCache, Dispatch const &d ) const + { + std::vector data; + size_t dataSize; + Result result; + do + { + result = static_cast( d.vkGetPipelineCacheData( m_device, static_cast( pipelineCache ), &dataSize, nullptr ) ); + if ( ( result == Result::eSuccess ) && dataSize ) + { + data.resize( dataSize ); + result = static_cast( d.vkGetPipelineCacheData( m_device, static_cast( pipelineCache ), &dataSize, reinterpret_cast( data.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); + data.resize( dataSize ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineCacheData" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::mergePipelineCaches( PipelineCache dstCache, uint32_t srcCacheCount, const PipelineCache* pSrcCaches, Dispatch const &d) const + { + return static_cast( d.vkMergePipelineCaches( m_device, static_cast( dstCache ), srcCacheCount, reinterpret_cast( pSrcCaches ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::mergePipelineCaches( PipelineCache dstCache, ArrayProxy srcCaches, Dispatch const &d ) const + { + Result result = static_cast( d.vkMergePipelineCaches( m_device, static_cast( dstCache ), srcCaches.size() , reinterpret_cast( srcCaches.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::mergePipelineCaches" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createGraphicsPipelines( PipelineCache pipelineCache, uint32_t createInfoCount, const GraphicsPipelineCreateInfo* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines, Dispatch const &d) const + { + return static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), createInfoCount, reinterpret_cast( pCreateInfos ), reinterpret_cast( pAllocator ), reinterpret_cast( pPipelines ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createGraphicsPipelines( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator, Dispatch const &d ) const + { + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), createInfos.size() , reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createGraphicsPipelines" ); + } + template + VULKAN_HPP_INLINE ResultValueType::type Device::createGraphicsPipeline( PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), 1 , reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createGraphicsPipeline" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType,Allocator>>::type Device::createGraphicsPipelinesUnique( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator, Dispatch const &d ) const + { + static_assert( sizeof( Pipeline ) <= sizeof( UniquePipeline ), "Pipeline is greater than UniquePipeline!" ); + std::vector pipelines; + pipelines.reserve( createInfos.size() ); + Pipeline* buffer = reinterpret_cast( reinterpret_cast( pipelines.data() ) + createInfos.size() * ( sizeof( UniquePipeline ) - sizeof( Pipeline ) ) ); + Result result = static_cast(d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), createInfos.size() , reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( buffer ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0 ; i + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createGraphicsPipelineUnique( PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), 1 , reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createGraphicsPipelineUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createComputePipelines( PipelineCache pipelineCache, uint32_t createInfoCount, const ComputePipelineCreateInfo* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines, Dispatch const &d) const + { + return static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), createInfoCount, reinterpret_cast( pCreateInfos ), reinterpret_cast( pAllocator ), reinterpret_cast( pPipelines ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createComputePipelines( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator, Dispatch const &d ) const + { + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), createInfos.size() , reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createComputePipelines" ); + } + template + VULKAN_HPP_INLINE ResultValueType::type Device::createComputePipeline( PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), 1 , reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createComputePipeline" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType,Allocator>>::type Device::createComputePipelinesUnique( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator, Dispatch const &d ) const + { + static_assert( sizeof( Pipeline ) <= sizeof( UniquePipeline ), "Pipeline is greater than UniquePipeline!" ); + std::vector pipelines; + pipelines.reserve( createInfos.size() ); + Pipeline* buffer = reinterpret_cast( reinterpret_cast( pipelines.data() ) + createInfos.size() * ( sizeof( UniquePipeline ) - sizeof( Pipeline ) ) ); + Result result = static_cast(d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), createInfos.size() , reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( buffer ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0 ; i + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createComputePipelineUnique( PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), 1 , reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createComputePipelineUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyPipeline( Pipeline pipeline, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyPipeline( m_device, static_cast( pipeline ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyPipeline( Pipeline pipeline, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyPipeline( m_device, static_cast( pipeline ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( Pipeline pipeline, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyPipeline( m_device, static_cast( pipeline ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Pipeline pipeline, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyPipeline( m_device, static_cast( pipeline ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createPipelineLayout( const PipelineLayoutCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, PipelineLayout* pPipelineLayout, Dispatch const &d) const + { + return static_cast( d.vkCreatePipelineLayout( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pPipelineLayout ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + PipelineLayout pipelineLayout; + Result result = static_cast( d.vkCreatePipelineLayout( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineLayout ) ) ); + return createResultValue( result, pipelineLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createPipelineLayout" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + PipelineLayout pipelineLayout; + Result result = static_cast( d.vkCreatePipelineLayout( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineLayout ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipelineLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createPipelineLayoutUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyPipelineLayout( PipelineLayout pipelineLayout, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyPipelineLayout( m_device, static_cast( pipelineLayout ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyPipelineLayout( PipelineLayout pipelineLayout, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyPipelineLayout( m_device, static_cast( pipelineLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( PipelineLayout pipelineLayout, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyPipelineLayout( m_device, static_cast( pipelineLayout ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( PipelineLayout pipelineLayout, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyPipelineLayout( m_device, static_cast( pipelineLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createSampler( const SamplerCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Sampler* pSampler, Dispatch const &d) const + { + return static_cast( d.vkCreateSampler( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSampler ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createSampler( const SamplerCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Sampler sampler; + Result result = static_cast( d.vkCreateSampler( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &sampler ) ) ); + return createResultValue( result, sampler, VULKAN_HPP_NAMESPACE_STRING"::Device::createSampler" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSamplerUnique( const SamplerCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Sampler sampler; + Result result = static_cast( d.vkCreateSampler( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &sampler ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, sampler, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroySampler( Sampler sampler, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySampler( m_device, static_cast( sampler ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySampler( Sampler sampler, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySampler( m_device, static_cast( sampler ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( Sampler sampler, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySampler( m_device, static_cast( sampler ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Sampler sampler, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySampler( m_device, static_cast( sampler ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorSetLayout* pSetLayout, Dispatch const &d) const + { + return static_cast( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSetLayout ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + DescriptorSetLayout setLayout; + Result result = static_cast( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &setLayout ) ) ); + return createResultValue( result, setLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorSetLayout" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + DescriptorSetLayout setLayout; + Result result = static_cast( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &setLayout ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, setLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorSetLayoutUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDescriptorSetLayout( m_device, static_cast( descriptorSetLayout ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( DescriptorSetLayout descriptorSetLayout, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDescriptorSetLayout( m_device, static_cast( descriptorSetLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( DescriptorSetLayout descriptorSetLayout, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDescriptorSetLayout( m_device, static_cast( descriptorSetLayout ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( DescriptorSetLayout descriptorSetLayout, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDescriptorSetLayout( m_device, static_cast( descriptorSetLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createDescriptorPool( const DescriptorPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorPool* pDescriptorPool, Dispatch const &d) const + { + return static_cast( d.vkCreateDescriptorPool( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pDescriptorPool ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + DescriptorPool descriptorPool; + Result result = static_cast( d.vkCreateDescriptorPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorPool ) ) ); + return createResultValue( result, descriptorPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorPool" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + DescriptorPool descriptorPool; + Result result = static_cast( d.vkCreateDescriptorPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorPool ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, descriptorPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorPoolUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyDescriptorPool( DescriptorPool descriptorPool, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDescriptorPool( m_device, static_cast( descriptorPool ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyDescriptorPool( DescriptorPool descriptorPool, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDescriptorPool( m_device, static_cast( descriptorPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( DescriptorPool descriptorPool, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDescriptorPool( m_device, static_cast( descriptorPool ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( DescriptorPool descriptorPool, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDescriptorPool( m_device, static_cast( descriptorPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::resetDescriptorPool( DescriptorPool descriptorPool, DescriptorPoolResetFlags flags, Dispatch const &d) const + { + return static_cast( d.vkResetDescriptorPool( m_device, static_cast( descriptorPool ), static_cast( flags ) ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type Device::resetDescriptorPool( DescriptorPool descriptorPool, DescriptorPoolResetFlags flags, Dispatch const &d ) const + { + Result result = static_cast( d.vkResetDescriptorPool( m_device, static_cast( descriptorPool ), static_cast( flags ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::resetDescriptorPool" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::allocateDescriptorSets( const DescriptorSetAllocateInfo* pAllocateInfo, DescriptorSet* pDescriptorSets, Dispatch const &d) const + { + return static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( pAllocateInfo ), reinterpret_cast( pDescriptorSets ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const &d ) const + { + std::vector descriptorSets( allocateInfo.descriptorSetCount ); + Result result = static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( descriptorSets.data() ) ) ); + return createResultValue( result, descriptorSets, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateDescriptorSets" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType,Allocator>>::type Device::allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const &d ) const + { + static_assert( sizeof( DescriptorSet ) <= sizeof( UniqueDescriptorSet ), "DescriptorSet is greater than UniqueDescriptorSet!" ); + std::vector descriptorSets; + descriptorSets.reserve( allocateInfo.descriptorSetCount ); + DescriptorSet* buffer = reinterpret_cast( reinterpret_cast( descriptorSets.data() ) + allocateInfo.descriptorSetCount * ( sizeof( UniqueDescriptorSet ) - sizeof( DescriptorSet ) ) ); + Result result = static_cast(d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( buffer ) ) ); + + PoolFree deleter( *this, allocateInfo.descriptorPool, d ); + for ( size_t i=0 ; i + VULKAN_HPP_INLINE Result Device::freeDescriptorSets( DescriptorPool descriptorPool, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets, Dispatch const &d) const + { + return static_cast( d.vkFreeDescriptorSets( m_device, static_cast( descriptorPool ), descriptorSetCount, reinterpret_cast( pDescriptorSets ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::freeDescriptorSets( DescriptorPool descriptorPool, ArrayProxy descriptorSets, Dispatch const &d ) const + { + Result result = static_cast( d.vkFreeDescriptorSets( m_device, static_cast( descriptorPool ), descriptorSets.size() , reinterpret_cast( descriptorSets.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::freeDescriptorSets" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::free( DescriptorPool descriptorPool, uint32_t descriptorSetCount, const DescriptorSet* pDescriptorSets, Dispatch const &d) const + { + return static_cast( d.vkFreeDescriptorSets( m_device, static_cast( descriptorPool ), descriptorSetCount, reinterpret_cast( pDescriptorSets ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::free( DescriptorPool descriptorPool, ArrayProxy descriptorSets, Dispatch const &d ) const + { + Result result = static_cast( d.vkFreeDescriptorSets( m_device, static_cast( descriptorPool ), descriptorSets.size() , reinterpret_cast( descriptorSets.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::free" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::updateDescriptorSets( uint32_t descriptorWriteCount, const WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const CopyDescriptorSet* pDescriptorCopies, Dispatch const &d) const + { + d.vkUpdateDescriptorSets( m_device, descriptorWriteCount, reinterpret_cast( pDescriptorWrites ), descriptorCopyCount, reinterpret_cast( pDescriptorCopies ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::updateDescriptorSets( ArrayProxy descriptorWrites, ArrayProxy descriptorCopies, Dispatch const &d ) const + { + d.vkUpdateDescriptorSets( m_device, descriptorWrites.size() , reinterpret_cast( descriptorWrites.data() ), descriptorCopies.size() , reinterpret_cast( descriptorCopies.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createFramebuffer( const FramebufferCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Framebuffer* pFramebuffer, Dispatch const &d) const + { + return static_cast( d.vkCreateFramebuffer( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pFramebuffer ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createFramebuffer( const FramebufferCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Framebuffer framebuffer; + Result result = static_cast( d.vkCreateFramebuffer( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &framebuffer ) ) ); + return createResultValue( result, framebuffer, VULKAN_HPP_NAMESPACE_STRING"::Device::createFramebuffer" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Framebuffer framebuffer; + Result result = static_cast( d.vkCreateFramebuffer( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &framebuffer ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, framebuffer, VULKAN_HPP_NAMESPACE_STRING"::Device::createFramebufferUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyFramebuffer( Framebuffer framebuffer, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyFramebuffer( m_device, static_cast( framebuffer ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyFramebuffer( Framebuffer framebuffer, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyFramebuffer( m_device, static_cast( framebuffer ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( Framebuffer framebuffer, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyFramebuffer( m_device, static_cast( framebuffer ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Framebuffer framebuffer, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyFramebuffer( m_device, static_cast( framebuffer ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createRenderPass( const RenderPassCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, RenderPass* pRenderPass, Dispatch const &d) const + { + return static_cast( d.vkCreateRenderPass( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pRenderPass ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createRenderPass( const RenderPassCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPass" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPassUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyRenderPass( RenderPass renderPass, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyRenderPass( m_device, static_cast( renderPass ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyRenderPass( RenderPass renderPass, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyRenderPass( m_device, static_cast( renderPass ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( RenderPass renderPass, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyRenderPass( m_device, static_cast( renderPass ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( RenderPass renderPass, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyRenderPass( m_device, static_cast( renderPass ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getRenderAreaGranularity( RenderPass renderPass, Extent2D* pGranularity, Dispatch const &d) const + { + d.vkGetRenderAreaGranularity( m_device, static_cast( renderPass ), reinterpret_cast( pGranularity ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Extent2D Device::getRenderAreaGranularity( RenderPass renderPass, Dispatch const &d ) const + { + Extent2D granularity; + d.vkGetRenderAreaGranularity( m_device, static_cast( renderPass ), reinterpret_cast( &granularity ) ); + return granularity; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createCommandPool( const CommandPoolCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, CommandPool* pCommandPool, Dispatch const &d) const + { + return static_cast( d.vkCreateCommandPool( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pCommandPool ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createCommandPool( const CommandPoolCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + CommandPool commandPool; + Result result = static_cast( d.vkCreateCommandPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &commandPool ) ) ); + return createResultValue( result, commandPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createCommandPool" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + CommandPool commandPool; + Result result = static_cast( d.vkCreateCommandPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &commandPool ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, commandPool, VULKAN_HPP_NAMESPACE_STRING"::Device::createCommandPoolUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyCommandPool( CommandPool commandPool, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyCommandPool( m_device, static_cast( commandPool ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyCommandPool( CommandPool commandPool, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyCommandPool( m_device, static_cast( commandPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( CommandPool commandPool, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyCommandPool( m_device, static_cast( commandPool ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( CommandPool commandPool, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyCommandPool( m_device, static_cast( commandPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::resetCommandPool( CommandPool commandPool, CommandPoolResetFlags flags, Dispatch const &d) const + { + return static_cast( d.vkResetCommandPool( m_device, static_cast( commandPool ), static_cast( flags ) ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type Device::resetCommandPool( CommandPool commandPool, CommandPoolResetFlags flags, Dispatch const &d ) const + { + Result result = static_cast( d.vkResetCommandPool( m_device, static_cast( commandPool ), static_cast( flags ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::resetCommandPool" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::allocateCommandBuffers( const CommandBufferAllocateInfo* pAllocateInfo, CommandBuffer* pCommandBuffers, Dispatch const &d) const + { + return static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( pAllocateInfo ), reinterpret_cast( pCommandBuffers ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Dispatch const &d ) const + { + std::vector commandBuffers( allocateInfo.commandBufferCount ); + Result result = static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( commandBuffers.data() ) ) ); + return createResultValue( result, commandBuffers, VULKAN_HPP_NAMESPACE_STRING"::Device::allocateCommandBuffers" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType,Allocator>>::type Device::allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Dispatch const &d ) const + { + static_assert( sizeof( CommandBuffer ) <= sizeof( UniqueCommandBuffer ), "CommandBuffer is greater than UniqueCommandBuffer!" ); + std::vector commandBuffers; + commandBuffers.reserve( allocateInfo.commandBufferCount ); + CommandBuffer* buffer = reinterpret_cast( reinterpret_cast( commandBuffers.data() ) + allocateInfo.commandBufferCount * ( sizeof( UniqueCommandBuffer ) - sizeof( CommandBuffer ) ) ); + Result result = static_cast(d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( buffer ) ) ); + + PoolFree deleter( *this, allocateInfo.commandPool, d ); + for ( size_t i=0 ; i + VULKAN_HPP_INLINE void Device::freeCommandBuffers( CommandPool commandPool, uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers, Dispatch const &d) const + { + d.vkFreeCommandBuffers( m_device, static_cast( commandPool ), commandBufferCount, reinterpret_cast( pCommandBuffers ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::freeCommandBuffers( CommandPool commandPool, ArrayProxy commandBuffers, Dispatch const &d ) const + { + d.vkFreeCommandBuffers( m_device, static_cast( commandPool ), commandBuffers.size() , reinterpret_cast( commandBuffers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::free( CommandPool commandPool, uint32_t commandBufferCount, const CommandBuffer* pCommandBuffers, Dispatch const &d) const + { + d.vkFreeCommandBuffers( m_device, static_cast( commandPool ), commandBufferCount, reinterpret_cast( pCommandBuffers ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::free( CommandPool commandPool, ArrayProxy commandBuffers, Dispatch const &d ) const + { + d.vkFreeCommandBuffers( m_device, static_cast( commandPool ), commandBuffers.size() , reinterpret_cast( commandBuffers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createSharedSwapchainsKHR( uint32_t swapchainCount, const SwapchainCreateInfoKHR* pCreateInfos, const AllocationCallbacks* pAllocator, SwapchainKHR* pSwapchains, Dispatch const &d) const + { + return static_cast( d.vkCreateSharedSwapchainsKHR( m_device, swapchainCount, reinterpret_cast( pCreateInfos ), reinterpret_cast( pAllocator ), reinterpret_cast( pSwapchains ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSharedSwapchainsKHR( ArrayProxy createInfos, Optional allocator, Dispatch const &d ) const + { + std::vector swapchains( createInfos.size() ); + Result result = static_cast( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size() , reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( swapchains.data() ) ) ); + return createResultValue( result, swapchains, VULKAN_HPP_NAMESPACE_STRING"::Device::createSharedSwapchainsKHR" ); + } + template + VULKAN_HPP_INLINE ResultValueType::type Device::createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SwapchainKHR swapchain; + Result result = static_cast( d.vkCreateSharedSwapchainsKHR( m_device, 1 , reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING"::Device::createSharedSwapchainKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType,Allocator>>::type Device::createSharedSwapchainsKHRUnique( ArrayProxy createInfos, Optional allocator, Dispatch const &d ) const + { + static_assert( sizeof( SwapchainKHR ) <= sizeof( UniqueSwapchainKHR ), "SwapchainKHR is greater than UniqueSwapchainKHR!" ); + std::vector swapchainKHRs; + swapchainKHRs.reserve( createInfos.size() ); + SwapchainKHR* buffer = reinterpret_cast( reinterpret_cast( swapchainKHRs.data() ) + createInfos.size() * ( sizeof( UniqueSwapchainKHR ) - sizeof( SwapchainKHR ) ) ); + Result result = static_cast(d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size() , reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( buffer ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0 ; i + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SwapchainKHR swapchain; + Result result = static_cast( d.vkCreateSharedSwapchainsKHR( m_device, 1 , reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING"::Device::createSharedSwapchainKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createSwapchainKHR( const SwapchainCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SwapchainKHR* pSwapchain, Dispatch const &d) const + { + return static_cast( d.vkCreateSwapchainKHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSwapchain ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SwapchainKHR swapchain; + Result result = static_cast( d.vkCreateSwapchainKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING"::Device::createSwapchainKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SwapchainKHR swapchain; + Result result = static_cast( d.vkCreateSwapchainKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING"::Device::createSwapchainKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroySwapchainKHR( SwapchainKHR swapchain, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySwapchainKHR( m_device, static_cast( swapchain ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySwapchainKHR( SwapchainKHR swapchain, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySwapchainKHR( m_device, static_cast( swapchain ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( SwapchainKHR swapchain, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySwapchainKHR( m_device, static_cast( swapchain ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( SwapchainKHR swapchain, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySwapchainKHR( m_device, static_cast( swapchain ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getSwapchainImagesKHR( SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, Image* pSwapchainImages, Dispatch const &d) const + { + return static_cast( d.vkGetSwapchainImagesKHR( m_device, static_cast( swapchain ), pSwapchainImageCount, reinterpret_cast( pSwapchainImages ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::getSwapchainImagesKHR( SwapchainKHR swapchain, Dispatch const &d ) const + { + std::vector swapchainImages; + uint32_t swapchainImageCount; + Result result; + do + { + result = static_cast( d.vkGetSwapchainImagesKHR( m_device, static_cast( swapchain ), &swapchainImageCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && swapchainImageCount ) + { + swapchainImages.resize( swapchainImageCount ); + result = static_cast( d.vkGetSwapchainImagesKHR( m_device, static_cast( swapchain ), &swapchainImageCount, reinterpret_cast( swapchainImages.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); + swapchainImages.resize( swapchainImageCount ); + return createResultValue( result, swapchainImages, VULKAN_HPP_NAMESPACE_STRING"::Device::getSwapchainImagesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::acquireNextImageKHR( SwapchainKHR swapchain, uint64_t timeout, Semaphore semaphore, Fence fence, uint32_t* pImageIndex, Dispatch const &d) const + { + return static_cast( d.vkAcquireNextImageKHR( m_device, static_cast( swapchain ), timeout, static_cast( semaphore ), static_cast( fence ), pImageIndex ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValue Device::acquireNextImageKHR( SwapchainKHR swapchain, uint64_t timeout, Semaphore semaphore, Fence fence, Dispatch const &d ) const + { + uint32_t imageIndex; + Result result = static_cast( d.vkAcquireNextImageKHR( m_device, static_cast( swapchain ), timeout, static_cast( semaphore ), static_cast( fence ), &imageIndex ) ); + return createResultValue( result, imageIndex, VULKAN_HPP_NAMESPACE_STRING"::Device::acquireNextImageKHR", { Result::eSuccess, Result::eTimeout, Result::eNotReady, Result::eSuboptimalKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT* pNameInfo, Dispatch const &d) const + { + return static_cast( d.vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast( pNameInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast( &nameInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::debugMarkerSetObjectNameEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT* pTagInfo, Dispatch const &d) const + { + return static_cast( d.vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast( pTagInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast( &tagInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::debugMarkerSetObjectTagEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_WIN32_NV + template + VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleNV( DeviceMemory memory, ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle, Dispatch const &d) const + { + return static_cast( d.vkGetMemoryWin32HandleNV( m_device, static_cast( memory ), static_cast( handleType ), pHandle ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getMemoryWin32HandleNV( DeviceMemory memory, ExternalMemoryHandleTypeFlagsNV handleType, Dispatch const &d ) const + { + HANDLE handle; + Result result = static_cast( d.vkGetMemoryWin32HandleNV( m_device, static_cast( memory ), static_cast( handleType ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryWin32HandleNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_NV*/ + + template + VULKAN_HPP_INLINE Result Device::createIndirectCommandsLayoutNVX( const IndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, IndirectCommandsLayoutNVX* pIndirectCommandsLayout, Dispatch const &d) const + { + return static_cast( d.vkCreateIndirectCommandsLayoutNVX( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pIndirectCommandsLayout ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createIndirectCommandsLayoutNVX( const IndirectCommandsLayoutCreateInfoNVX & createInfo, Optional allocator, Dispatch const &d ) const + { + IndirectCommandsLayoutNVX indirectCommandsLayout; + Result result = static_cast( d.vkCreateIndirectCommandsLayoutNVX( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &indirectCommandsLayout ) ) ); + return createResultValue( result, indirectCommandsLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createIndirectCommandsLayoutNVX" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createIndirectCommandsLayoutNVXUnique( const IndirectCommandsLayoutCreateInfoNVX & createInfo, Optional allocator, Dispatch const &d ) const + { + IndirectCommandsLayoutNVX indirectCommandsLayout; + Result result = static_cast( d.vkCreateIndirectCommandsLayoutNVX( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &indirectCommandsLayout ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, indirectCommandsLayout, VULKAN_HPP_NAMESPACE_STRING"::Device::createIndirectCommandsLayoutNVXUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNVX( IndirectCommandsLayoutNVX indirectCommandsLayout, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyIndirectCommandsLayoutNVX( m_device, static_cast( indirectCommandsLayout ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNVX( IndirectCommandsLayoutNVX indirectCommandsLayout, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyIndirectCommandsLayoutNVX( m_device, static_cast( indirectCommandsLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( IndirectCommandsLayoutNVX indirectCommandsLayout, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyIndirectCommandsLayoutNVX( m_device, static_cast( indirectCommandsLayout ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( IndirectCommandsLayoutNVX indirectCommandsLayout, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyIndirectCommandsLayoutNVX( m_device, static_cast( indirectCommandsLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createObjectTableNVX( const ObjectTableCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, ObjectTableNVX* pObjectTable, Dispatch const &d) const + { + return static_cast( d.vkCreateObjectTableNVX( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pObjectTable ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createObjectTableNVX( const ObjectTableCreateInfoNVX & createInfo, Optional allocator, Dispatch const &d ) const + { + ObjectTableNVX objectTable; + Result result = static_cast( d.vkCreateObjectTableNVX( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &objectTable ) ) ); + return createResultValue( result, objectTable, VULKAN_HPP_NAMESPACE_STRING"::Device::createObjectTableNVX" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createObjectTableNVXUnique( const ObjectTableCreateInfoNVX & createInfo, Optional allocator, Dispatch const &d ) const + { + ObjectTableNVX objectTable; + Result result = static_cast( d.vkCreateObjectTableNVX( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &objectTable ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, objectTable, VULKAN_HPP_NAMESPACE_STRING"::Device::createObjectTableNVXUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyObjectTableNVX( ObjectTableNVX objectTable, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyObjectTableNVX( m_device, static_cast( objectTable ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyObjectTableNVX( ObjectTableNVX objectTable, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyObjectTableNVX( m_device, static_cast( objectTable ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( ObjectTableNVX objectTable, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyObjectTableNVX( m_device, static_cast( objectTable ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( ObjectTableNVX objectTable, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyObjectTableNVX( m_device, static_cast( objectTable ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::registerObjectsNVX( ObjectTableNVX objectTable, uint32_t objectCount, const ObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices, Dispatch const &d) const + { + return static_cast( d.vkRegisterObjectsNVX( m_device, static_cast( objectTable ), objectCount, reinterpret_cast( ppObjectTableEntries ), pObjectIndices ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::registerObjectsNVX( ObjectTableNVX objectTable, ArrayProxy pObjectTableEntries, ArrayProxy objectIndices, Dispatch const &d ) const + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( pObjectTableEntries.size() == objectIndices.size() ); +#else + if ( pObjectTableEntries.size() != objectIndices.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Device::registerObjectsNVX: pObjectTableEntries.size() != objectIndices.size()" ); + } +#endif // VULKAN_HPP_NO_EXCEPTIONS + Result result = static_cast( d.vkRegisterObjectsNVX( m_device, static_cast( objectTable ), pObjectTableEntries.size() , reinterpret_cast( pObjectTableEntries.data() ), objectIndices.data() ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::registerObjectsNVX" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::unregisterObjectsNVX( ObjectTableNVX objectTable, uint32_t objectCount, const ObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices, Dispatch const &d) const + { + return static_cast( d.vkUnregisterObjectsNVX( m_device, static_cast( objectTable ), objectCount, reinterpret_cast( pObjectEntryTypes ), pObjectIndices ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::unregisterObjectsNVX( ObjectTableNVX objectTable, ArrayProxy objectEntryTypes, ArrayProxy objectIndices, Dispatch const &d ) const + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( objectEntryTypes.size() == objectIndices.size() ); +#else + if ( objectEntryTypes.size() != objectIndices.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Device::unregisterObjectsNVX: objectEntryTypes.size() != objectIndices.size()" ); + } +#endif // VULKAN_HPP_NO_EXCEPTIONS + Result result = static_cast( d.vkUnregisterObjectsNVX( m_device, static_cast( objectTable ), objectEntryTypes.size() , reinterpret_cast( objectEntryTypes.data() ), objectIndices.data() ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::unregisterObjectsNVX" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::trimCommandPool( CommandPool commandPool, CommandPoolTrimFlags flags, Dispatch const &d) const + { + d.vkTrimCommandPool( m_device, static_cast( commandPool ), static_cast( flags ) ); + } +#else + template + VULKAN_HPP_INLINE void Device::trimCommandPool( CommandPool commandPool, CommandPoolTrimFlags flags, Dispatch const &d ) const + { + d.vkTrimCommandPool( m_device, static_cast( commandPool ), static_cast( flags ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::trimCommandPoolKHR( CommandPool commandPool, CommandPoolTrimFlags flags, Dispatch const &d) const + { + d.vkTrimCommandPoolKHR( m_device, static_cast( commandPool ), static_cast( flags ) ); + } +#else + template + VULKAN_HPP_INLINE void Device::trimCommandPoolKHR( CommandPool commandPool, CommandPoolTrimFlags flags, Dispatch const &d ) const + { + d.vkTrimCommandPoolKHR( m_device, static_cast( commandPool ), static_cast( flags ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d) const + { + return static_cast( d.vkGetMemoryWin32HandleKHR( m_device, reinterpret_cast( pGetWin32HandleInfo ), pHandle ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d ) const + { + HANDLE handle; + Result result = static_cast( d.vkGetMemoryWin32HandleKHR( m_device, reinterpret_cast( &getWin32HandleInfo ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_INLINE Result Device::getMemoryWin32HandlePropertiesKHR( ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, MemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties, Dispatch const &d) const + { + return static_cast( d.vkGetMemoryWin32HandlePropertiesKHR( m_device, static_cast( handleType ), handle, reinterpret_cast( pMemoryWin32HandleProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getMemoryWin32HandlePropertiesKHR( ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, Dispatch const &d ) const + { + MemoryWin32HandlePropertiesKHR memoryWin32HandleProperties; + Result result = static_cast( d.vkGetMemoryWin32HandlePropertiesKHR( m_device, static_cast( handleType ), handle, reinterpret_cast( &memoryWin32HandleProperties ) ) ); + return createResultValue( result, memoryWin32HandleProperties, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryWin32HandlePropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + template + VULKAN_HPP_INLINE Result Device::getMemoryFdKHR( const MemoryGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d) const + { + return static_cast( d.vkGetMemoryFdKHR( m_device, reinterpret_cast( pGetFdInfo ), pFd ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo, Dispatch const &d ) const + { + int fd; + Result result = static_cast( d.vkGetMemoryFdKHR( m_device, reinterpret_cast( &getFdInfo ), &fd ) ); + return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getMemoryFdPropertiesKHR( ExternalMemoryHandleTypeFlagBits handleType, int fd, MemoryFdPropertiesKHR* pMemoryFdProperties, Dispatch const &d) const + { + return static_cast( d.vkGetMemoryFdPropertiesKHR( m_device, static_cast( handleType ), fd, reinterpret_cast( pMemoryFdProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getMemoryFdPropertiesKHR( ExternalMemoryHandleTypeFlagBits handleType, int fd, Dispatch const &d ) const + { + MemoryFdPropertiesKHR memoryFdProperties; + Result result = static_cast( d.vkGetMemoryFdPropertiesKHR( m_device, static_cast( handleType ), fd, reinterpret_cast( &memoryFdProperties ) ) ); + return createResultValue( result, memoryFdProperties, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryFdPropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_INLINE Result Device::getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d) const + { + return static_cast( d.vkGetSemaphoreWin32HandleKHR( m_device, reinterpret_cast( pGetWin32HandleInfo ), pHandle ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d ) const + { + HANDLE handle; + Result result = static_cast( d.vkGetSemaphoreWin32HandleKHR( m_device, reinterpret_cast( &getWin32HandleInfo ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING"::Device::getSemaphoreWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_INLINE Result Device::importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo, Dispatch const &d) const + { + return static_cast( d.vkImportSemaphoreWin32HandleKHR( m_device, reinterpret_cast( pImportSemaphoreWin32HandleInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkImportSemaphoreWin32HandleKHR( m_device, reinterpret_cast( &importSemaphoreWin32HandleInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::importSemaphoreWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + template + VULKAN_HPP_INLINE Result Device::getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d) const + { + return static_cast( d.vkGetSemaphoreFdKHR( m_device, reinterpret_cast( pGetFdInfo ), pFd ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo, Dispatch const &d ) const + { + int fd; + Result result = static_cast( d.vkGetSemaphoreFdKHR( m_device, reinterpret_cast( &getFdInfo ), &fd ) ); + return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING"::Device::getSemaphoreFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo, Dispatch const &d) const + { + return static_cast( d.vkImportSemaphoreFdKHR( m_device, reinterpret_cast( pImportSemaphoreFdInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkImportSemaphoreFdKHR( m_device, reinterpret_cast( &importSemaphoreFdInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::importSemaphoreFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_INLINE Result Device::getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const &d) const + { + return static_cast( d.vkGetFenceWin32HandleKHR( m_device, reinterpret_cast( pGetWin32HandleInfo ), pHandle ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const &d ) const + { + HANDLE handle; + Result result = static_cast( d.vkGetFenceWin32HandleKHR( m_device, reinterpret_cast( &getWin32HandleInfo ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING"::Device::getFenceWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_INLINE Result Device::importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo, Dispatch const &d) const + { + return static_cast( d.vkImportFenceWin32HandleKHR( m_device, reinterpret_cast( pImportFenceWin32HandleInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkImportFenceWin32HandleKHR( m_device, reinterpret_cast( &importFenceWin32HandleInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::importFenceWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + template + VULKAN_HPP_INLINE Result Device::getFenceFdKHR( const FenceGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const &d) const + { + return static_cast( d.vkGetFenceFdKHR( m_device, reinterpret_cast( pGetFdInfo ), pFd ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo, Dispatch const &d ) const + { + int fd; + Result result = static_cast( d.vkGetFenceFdKHR( m_device, reinterpret_cast( &getFdInfo ), &fd ) ); + return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING"::Device::getFenceFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::importFenceFdKHR( const ImportFenceFdInfoKHR* pImportFenceFdInfo, Dispatch const &d) const + { + return static_cast( d.vkImportFenceFdKHR( m_device, reinterpret_cast( pImportFenceFdInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkImportFenceFdKHR( m_device, reinterpret_cast( &importFenceFdInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::importFenceFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::displayPowerControlEXT( DisplayKHR display, const DisplayPowerInfoEXT* pDisplayPowerInfo, Dispatch const &d) const + { + return static_cast( d.vkDisplayPowerControlEXT( m_device, static_cast( display ), reinterpret_cast( pDisplayPowerInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::displayPowerControlEXT( DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkDisplayPowerControlEXT( m_device, static_cast( display ), reinterpret_cast( &displayPowerInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::displayPowerControlEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::registerEventEXT( const DeviceEventInfoEXT* pDeviceEventInfo, const AllocationCallbacks* pAllocator, Fence* pFence, Dispatch const &d) const + { + return static_cast( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast( pDeviceEventInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pFence ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, Optional allocator, Dispatch const &d ) const + { + Fence fence; + Result result = static_cast( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast( &deviceEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::registerEventEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::registerDisplayEventEXT( DisplayKHR display, const DisplayEventInfoEXT* pDisplayEventInfo, const AllocationCallbacks* pAllocator, Fence* pFence, Dispatch const &d) const + { + return static_cast( d.vkRegisterDisplayEventEXT( m_device, static_cast( display ), reinterpret_cast( pDisplayEventInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pFence ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::registerDisplayEventEXT( DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional allocator, Dispatch const &d ) const + { + Fence fence; + Result result = static_cast( d.vkRegisterDisplayEventEXT( m_device, static_cast( display ), reinterpret_cast( &displayEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING"::Device::registerDisplayEventEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getSwapchainCounterEXT( SwapchainKHR swapchain, SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue, Dispatch const &d) const + { + return static_cast( d.vkGetSwapchainCounterEXT( m_device, static_cast( swapchain ), static_cast( counter ), pCounterValue ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getSwapchainCounterEXT( SwapchainKHR swapchain, SurfaceCounterFlagBitsEXT counter, Dispatch const &d ) const + { + uint64_t counterValue; + Result result = static_cast( d.vkGetSwapchainCounterEXT( m_device, static_cast( swapchain ), static_cast( counter ), &counterValue ) ); + return createResultValue( result, counterValue, VULKAN_HPP_NAMESPACE_STRING"::Device::getSwapchainCounterEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const &d) const + { + d.vkGetDeviceGroupPeerMemoryFeatures( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast( pPeerMemoryFeatures ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PeerMemoryFeatureFlags Device::getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const &d ) const + { + PeerMemoryFeatureFlags peerMemoryFeatures; + d.vkGetDeviceGroupPeerMemoryFeatures( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast( &peerMemoryFeatures ) ); + return peerMemoryFeatures; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const &d) const + { + d.vkGetDeviceGroupPeerMemoryFeaturesKHR( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast( pPeerMemoryFeatures ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PeerMemoryFeatureFlags Device::getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const &d ) const + { + PeerMemoryFeatureFlags peerMemoryFeatures; + d.vkGetDeviceGroupPeerMemoryFeaturesKHR( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast( &peerMemoryFeatures ) ); + return peerMemoryFeatures; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::bindBufferMemory2( uint32_t bindInfoCount, const BindBufferMemoryInfo* pBindInfos, Dispatch const &d) const + { + return static_cast( d.vkBindBufferMemory2( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::bindBufferMemory2( ArrayProxy bindInfos, Dispatch const &d ) const + { + Result result = static_cast( d.vkBindBufferMemory2( m_device, bindInfos.size() , reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindBufferMemory2" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::bindBufferMemory2KHR( uint32_t bindInfoCount, const BindBufferMemoryInfo* pBindInfos, Dispatch const &d) const + { + return static_cast( d.vkBindBufferMemory2KHR( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::bindBufferMemory2KHR( ArrayProxy bindInfos, Dispatch const &d ) const + { + Result result = static_cast( d.vkBindBufferMemory2KHR( m_device, bindInfos.size() , reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindBufferMemory2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::bindImageMemory2( uint32_t bindInfoCount, const BindImageMemoryInfo* pBindInfos, Dispatch const &d) const + { + return static_cast( d.vkBindImageMemory2( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::bindImageMemory2( ArrayProxy bindInfos, Dispatch const &d ) const + { + Result result = static_cast( d.vkBindImageMemory2( m_device, bindInfos.size() , reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindImageMemory2" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::bindImageMemory2KHR( uint32_t bindInfoCount, const BindImageMemoryInfo* pBindInfos, Dispatch const &d) const + { + return static_cast( d.vkBindImageMemory2KHR( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::bindImageMemory2KHR( ArrayProxy bindInfos, Dispatch const &d ) const + { + Result result = static_cast( d.vkBindImageMemory2KHR( m_device, bindInfos.size() , reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindImageMemory2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getGroupPresentCapabilitiesKHR( DeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities, Dispatch const &d) const + { + return static_cast( d.vkGetDeviceGroupPresentCapabilitiesKHR( m_device, reinterpret_cast( pDeviceGroupPresentCapabilities ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getGroupPresentCapabilitiesKHR(Dispatch const &d ) const + { + DeviceGroupPresentCapabilitiesKHR deviceGroupPresentCapabilities; + Result result = static_cast( d.vkGetDeviceGroupPresentCapabilitiesKHR( m_device, reinterpret_cast( &deviceGroupPresentCapabilities ) ) ); + return createResultValue( result, deviceGroupPresentCapabilities, VULKAN_HPP_NAMESPACE_STRING"::Device::getGroupPresentCapabilitiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getGroupSurfacePresentModesKHR( SurfaceKHR surface, DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const &d) const + { + return static_cast( d.vkGetDeviceGroupSurfacePresentModesKHR( m_device, static_cast( surface ), reinterpret_cast( pModes ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getGroupSurfacePresentModesKHR( SurfaceKHR surface, Dispatch const &d ) const + { + DeviceGroupPresentModeFlagsKHR modes; + Result result = static_cast( d.vkGetDeviceGroupSurfacePresentModesKHR( m_device, static_cast( surface ), reinterpret_cast( &modes ) ) ); + return createResultValue( result, modes, VULKAN_HPP_NAMESPACE_STRING"::Device::getGroupSurfacePresentModesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::acquireNextImage2KHR( const AcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex, Dispatch const &d) const + { + return static_cast( d.vkAcquireNextImage2KHR( m_device, reinterpret_cast( pAcquireInfo ), pImageIndex ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValue Device::acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo, Dispatch const &d ) const + { + uint32_t imageIndex; + Result result = static_cast( d.vkAcquireNextImage2KHR( m_device, reinterpret_cast( &acquireInfo ), &imageIndex ) ); + return createResultValue( result, imageIndex, VULKAN_HPP_NAMESPACE_STRING"::Device::acquireNextImage2KHR", { Result::eSuccess, Result::eTimeout, Result::eNotReady, Result::eSuboptimalKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const &d) const + { + return static_cast( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pDescriptorUpdateTemplate ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + DescriptorUpdateTemplate descriptorUpdateTemplate; + Result result = static_cast( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorUpdateTemplate" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createDescriptorUpdateTemplateUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + DescriptorUpdateTemplate descriptorUpdateTemplate; + Result result = static_cast( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorUpdateTemplateUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const &d) const + { + return static_cast( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pDescriptorUpdateTemplate ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + DescriptorUpdateTemplate descriptorUpdateTemplate; + Result result = static_cast( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorUpdateTemplateKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + DescriptorUpdateTemplate descriptorUpdateTemplate; + Result result = static_cast( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING"::Device::createDescriptorUpdateTemplateKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplate( DescriptorUpdateTemplate descriptorUpdateTemplate, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplate( DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( DescriptorUpdateTemplate descriptorUpdateTemplate, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( DescriptorUpdateTemplate descriptorUpdateTemplate, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplate( DescriptorSet descriptorSet, DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d) const + { + d.vkUpdateDescriptorSetWithTemplate( m_device, static_cast( descriptorSet ), static_cast( descriptorUpdateTemplate ), pData ); + } +#else + template + VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplate( DescriptorSet descriptorSet, DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d ) const + { + d.vkUpdateDescriptorSetWithTemplate( m_device, static_cast( descriptorSet ), static_cast( descriptorUpdateTemplate ), pData ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplateKHR( DescriptorSet descriptorSet, DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d) const + { + d.vkUpdateDescriptorSetWithTemplateKHR( m_device, static_cast( descriptorSet ), static_cast( descriptorUpdateTemplate ), pData ); + } +#else + template + VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplateKHR( DescriptorSet descriptorSet, DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const &d ) const + { + d.vkUpdateDescriptorSetWithTemplateKHR( m_device, static_cast( descriptorSet ), static_cast( descriptorUpdateTemplate ), pData ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( uint32_t swapchainCount, const SwapchainKHR* pSwapchains, const HdrMetadataEXT* pMetadata, Dispatch const &d) const + { + d.vkSetHdrMetadataEXT( m_device, swapchainCount, reinterpret_cast( pSwapchains ), reinterpret_cast( pMetadata ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( ArrayProxy swapchains, ArrayProxy metadata, Dispatch const &d ) const + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( swapchains.size() == metadata.size() ); +#else + if ( swapchains.size() != metadata.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Device::setHdrMetadataEXT: swapchains.size() != metadata.size()" ); + } +#endif // VULKAN_HPP_NO_EXCEPTIONS + d.vkSetHdrMetadataEXT( m_device, swapchains.size() , reinterpret_cast( swapchains.data() ), reinterpret_cast( metadata.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( SwapchainKHR swapchain, Dispatch const &d) const + { + return static_cast( d.vkGetSwapchainStatusKHR( m_device, static_cast( swapchain ) ) ); + } +#else + template + VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( SwapchainKHR swapchain, Dispatch const &d ) const + { + Result result = static_cast( d.vkGetSwapchainStatusKHR( m_device, static_cast( swapchain ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getSwapchainStatusKHR", { Result::eSuccess, Result::eSuboptimalKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getRefreshCycleDurationGOOGLE( SwapchainKHR swapchain, RefreshCycleDurationGOOGLE* pDisplayTimingProperties, Dispatch const &d) const + { + return static_cast( d.vkGetRefreshCycleDurationGOOGLE( m_device, static_cast( swapchain ), reinterpret_cast( pDisplayTimingProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getRefreshCycleDurationGOOGLE( SwapchainKHR swapchain, Dispatch const &d ) const + { + RefreshCycleDurationGOOGLE displayTimingProperties; + Result result = static_cast( d.vkGetRefreshCycleDurationGOOGLE( m_device, static_cast( swapchain ), reinterpret_cast( &displayTimingProperties ) ) ); + return createResultValue( result, displayTimingProperties, VULKAN_HPP_NAMESPACE_STRING"::Device::getRefreshCycleDurationGOOGLE" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getPastPresentationTimingGOOGLE( SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, PastPresentationTimingGOOGLE* pPresentationTimings, Dispatch const &d) const + { + return static_cast( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast( swapchain ), pPresentationTimingCount, reinterpret_cast( pPresentationTimings ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPastPresentationTimingGOOGLE( SwapchainKHR swapchain, Dispatch const &d ) const + { + std::vector presentationTimings; + uint32_t presentationTimingCount; + Result result; + do + { + result = static_cast( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast( swapchain ), &presentationTimingCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && presentationTimingCount ) + { + presentationTimings.resize( presentationTimingCount ); + result = static_cast( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast( swapchain ), &presentationTimingCount, reinterpret_cast( presentationTimings.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); + presentationTimings.resize( presentationTimingCount ); + return createResultValue( result, presentationTimings, VULKAN_HPP_NAMESPACE_STRING"::Device::getPastPresentationTimingGOOGLE" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2* pInfo, MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const + { + d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast( pInfo ), reinterpret_cast( pMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE MemoryRequirements2 Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + MemoryRequirements2 memoryRequirements; + d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + template + VULKAN_HPP_INLINE StructureChain Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + StructureChain structureChain; + MemoryRequirements2& memoryRequirements = structureChain.template get(); + d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2* pInfo, MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const + { + d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast( pInfo ), reinterpret_cast( pMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE MemoryRequirements2 Device::getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + MemoryRequirements2 memoryRequirements; + d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + template + VULKAN_HPP_INLINE StructureChain Device::getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + StructureChain structureChain; + MemoryRequirements2& memoryRequirements = structureChain.template get(); + d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2* pInfo, MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const + { + d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast( pInfo ), reinterpret_cast( pMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE MemoryRequirements2 Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + MemoryRequirements2 memoryRequirements; + d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + template + VULKAN_HPP_INLINE StructureChain Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + StructureChain structureChain; + MemoryRequirements2& memoryRequirements = structureChain.template get(); + d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2* pInfo, MemoryRequirements2* pMemoryRequirements, Dispatch const &d) const + { + d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast( pInfo ), reinterpret_cast( pMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE MemoryRequirements2 Device::getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + MemoryRequirements2 memoryRequirements; + d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + template + VULKAN_HPP_INLINE StructureChain Device::getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + StructureChain structureChain; + MemoryRequirements2& memoryRequirements = structureChain.template get(); + d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const &d) const + { + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast( pInfo ), pSparseMemoryRequirementCount, reinterpret_cast( pSparseMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector Device::getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + std::vector sparseMemoryRequirements; + uint32_t sparseMemoryRequirementCount; + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, nullptr ); + sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, reinterpret_cast( sparseMemoryRequirements.data() ) ); + return sparseMemoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const &d) const + { + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast( pInfo ), pSparseMemoryRequirementCount, reinterpret_cast( pSparseMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector Device::getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const &d ) const + { + std::vector sparseMemoryRequirements; + uint32_t sparseMemoryRequirementCount; + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, nullptr ); + sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, reinterpret_cast( sparseMemoryRequirements.data() ) ); + return sparseMemoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, SamplerYcbcrConversion* pYcbcrConversion, Dispatch const &d) const + { + return static_cast( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pYcbcrConversion ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + SamplerYcbcrConversion ycbcrConversion; + Result result = static_cast( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerYcbcrConversion" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSamplerYcbcrConversionUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + SamplerYcbcrConversion ycbcrConversion; + Result result = static_cast( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerYcbcrConversionUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, SamplerYcbcrConversion* pYcbcrConversion, Dispatch const &d) const + { + return static_cast( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pYcbcrConversion ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + SamplerYcbcrConversion ycbcrConversion; + Result result = static_cast( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerYcbcrConversionKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSamplerYcbcrConversionKHRUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + SamplerYcbcrConversion ycbcrConversion; + Result result = static_cast( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING"::Device::createSamplerYcbcrConversionKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversion( SamplerYcbcrConversion ycbcrConversion, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySamplerYcbcrConversion( m_device, static_cast( ycbcrConversion ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversion( SamplerYcbcrConversion ycbcrConversion, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySamplerYcbcrConversion( m_device, static_cast( ycbcrConversion ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( SamplerYcbcrConversion ycbcrConversion, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySamplerYcbcrConversion( m_device, static_cast( ycbcrConversion ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( SamplerYcbcrConversion ycbcrConversion, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySamplerYcbcrConversion( m_device, static_cast( ycbcrConversion ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversionKHR( SamplerYcbcrConversion ycbcrConversion, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySamplerYcbcrConversionKHR( m_device, static_cast( ycbcrConversion ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversionKHR( SamplerYcbcrConversion ycbcrConversion, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySamplerYcbcrConversionKHR( m_device, static_cast( ycbcrConversion ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getQueue2( const DeviceQueueInfo2* pQueueInfo, Queue* pQueue, Dispatch const &d) const + { + d.vkGetDeviceQueue2( m_device, reinterpret_cast( pQueueInfo ), reinterpret_cast( pQueue ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Queue Device::getQueue2( const DeviceQueueInfo2 & queueInfo, Dispatch const &d ) const + { + Queue queue; + d.vkGetDeviceQueue2( m_device, reinterpret_cast( &queueInfo ), reinterpret_cast( &queue ) ); + return queue; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createValidationCacheEXT( const ValidationCacheCreateInfoEXT* pCreateInfo, const AllocationCallbacks* pAllocator, ValidationCacheEXT* pValidationCache, Dispatch const &d) const + { + return static_cast( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pValidationCache ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createValidationCacheEXT( const ValidationCacheCreateInfoEXT & createInfo, Optional allocator, Dispatch const &d ) const + { + ValidationCacheEXT validationCache; + Result result = static_cast( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &validationCache ) ) ); + return createResultValue( result, validationCache, VULKAN_HPP_NAMESPACE_STRING"::Device::createValidationCacheEXT" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createValidationCacheEXTUnique( const ValidationCacheCreateInfoEXT & createInfo, Optional allocator, Dispatch const &d ) const + { + ValidationCacheEXT validationCache; + Result result = static_cast( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &validationCache ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, validationCache, VULKAN_HPP_NAMESPACE_STRING"::Device::createValidationCacheEXTUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyValidationCacheEXT( ValidationCacheEXT validationCache, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyValidationCacheEXT( m_device, static_cast( validationCache ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyValidationCacheEXT( ValidationCacheEXT validationCache, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyValidationCacheEXT( m_device, static_cast( validationCache ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( ValidationCacheEXT validationCache, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyValidationCacheEXT( m_device, static_cast( validationCache ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( ValidationCacheEXT validationCache, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyValidationCacheEXT( m_device, static_cast( validationCache ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getValidationCacheDataEXT( ValidationCacheEXT validationCache, size_t* pDataSize, void* pData, Dispatch const &d) const + { + return static_cast( d.vkGetValidationCacheDataEXT( m_device, static_cast( validationCache ), pDataSize, pData ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::getValidationCacheDataEXT( ValidationCacheEXT validationCache, Dispatch const &d ) const + { + std::vector data; + size_t dataSize; + Result result; + do + { + result = static_cast( d.vkGetValidationCacheDataEXT( m_device, static_cast( validationCache ), &dataSize, nullptr ) ); + if ( ( result == Result::eSuccess ) && dataSize ) + { + data.resize( dataSize ); + result = static_cast( d.vkGetValidationCacheDataEXT( m_device, static_cast( validationCache ), &dataSize, reinterpret_cast( data.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); + data.resize( dataSize ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getValidationCacheDataEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::mergeValidationCachesEXT( ValidationCacheEXT dstCache, uint32_t srcCacheCount, const ValidationCacheEXT* pSrcCaches, Dispatch const &d) const + { + return static_cast( d.vkMergeValidationCachesEXT( m_device, static_cast( dstCache ), srcCacheCount, reinterpret_cast( pSrcCaches ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::mergeValidationCachesEXT( ValidationCacheEXT dstCache, ArrayProxy srcCaches, Dispatch const &d ) const + { + Result result = static_cast( d.vkMergeValidationCachesEXT( m_device, static_cast( dstCache ), srcCaches.size() , reinterpret_cast( srcCaches.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::mergeValidationCachesEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo* pCreateInfo, DescriptorSetLayoutSupport* pSupport, Dispatch const &d) const + { + d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pSupport ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE DescriptorSetLayoutSupport Device::getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d ) const + { + DescriptorSetLayoutSupport support; + d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( &support ) ); + return support; + } + template + VULKAN_HPP_INLINE StructureChain Device::getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d ) const + { + StructureChain structureChain; + DescriptorSetLayoutSupport& support = structureChain.template get(); + d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( &support ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo* pCreateInfo, DescriptorSetLayoutSupport* pSupport, Dispatch const &d) const + { + d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pSupport ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE DescriptorSetLayoutSupport Device::getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d ) const + { + DescriptorSetLayoutSupport support; + d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( &support ) ); + return support; + } + template + VULKAN_HPP_INLINE StructureChain Device::getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const &d ) const + { + StructureChain structureChain; + DescriptorSetLayoutSupport& support = structureChain.template get(); + d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( &support ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getShaderInfoAMD( Pipeline pipeline, ShaderStageFlagBits shaderStage, ShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo, Dispatch const &d) const + { + return static_cast( d.vkGetShaderInfoAMD( m_device, static_cast( pipeline ), static_cast( shaderStage ), static_cast( infoType ), pInfoSize, pInfo ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::getShaderInfoAMD( Pipeline pipeline, ShaderStageFlagBits shaderStage, ShaderInfoTypeAMD infoType, Dispatch const &d ) const + { + std::vector info; + size_t infoSize; + Result result; + do + { + result = static_cast( d.vkGetShaderInfoAMD( m_device, static_cast( pipeline ), static_cast( shaderStage ), static_cast( infoType ), &infoSize, nullptr ) ); + if ( ( result == Result::eSuccess ) && infoSize ) + { + info.resize( infoSize ); + result = static_cast( d.vkGetShaderInfoAMD( m_device, static_cast( pipeline ), static_cast( shaderStage ), static_cast( infoType ), &infoSize, reinterpret_cast( info.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( infoSize <= info.size() ); + info.resize( infoSize ); + return createResultValue( result, info, VULKAN_HPP_NAMESPACE_STRING"::Device::getShaderInfoAMD" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT* pNameInfo, Dispatch const &d) const + { + return static_cast( d.vkSetDebugUtilsObjectNameEXT( m_device, reinterpret_cast( pNameInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkSetDebugUtilsObjectNameEXT( m_device, reinterpret_cast( &nameInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::setDebugUtilsObjectNameEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT* pTagInfo, Dispatch const &d) const + { + return static_cast( d.vkSetDebugUtilsObjectTagEXT( m_device, reinterpret_cast( pTagInfo ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo, Dispatch const &d ) const + { + Result result = static_cast( d.vkSetDebugUtilsObjectTagEXT( m_device, reinterpret_cast( &tagInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::setDebugUtilsObjectTagEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getMemoryHostPointerPropertiesEXT( ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, MemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties, Dispatch const &d) const + { + return static_cast( d.vkGetMemoryHostPointerPropertiesEXT( m_device, static_cast( handleType ), pHostPointer, reinterpret_cast( pMemoryHostPointerProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getMemoryHostPointerPropertiesEXT( ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, Dispatch const &d ) const + { + MemoryHostPointerPropertiesEXT memoryHostPointerProperties; + Result result = static_cast( d.vkGetMemoryHostPointerPropertiesEXT( m_device, static_cast( handleType ), pHostPointer, reinterpret_cast( &memoryHostPointerProperties ) ) ); + return createResultValue( result, memoryHostPointerProperties, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryHostPointerPropertiesEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createRenderPass2KHR( const RenderPassCreateInfo2KHR* pCreateInfo, const AllocationCallbacks* pAllocator, RenderPass* pRenderPass, Dispatch const &d) const + { + return static_cast( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pRenderPass ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createRenderPass2KHR( const RenderPassCreateInfo2KHR & createInfo, Optional allocator, Dispatch const &d ) const + { + RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPass2KHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createRenderPass2KHRUnique( const RenderPassCreateInfo2KHR & createInfo, Optional allocator, Dispatch const &d ) const + { + RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING"::Device::createRenderPass2KHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + template + VULKAN_HPP_INLINE Result Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer* buffer, AndroidHardwareBufferPropertiesANDROID* pProperties, Dispatch const &d) const + { + return static_cast( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, buffer, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const &d ) const + { + AndroidHardwareBufferPropertiesANDROID properties; + Result result = static_cast( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, buffer, reinterpret_cast( &properties ) ) ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::Device::getAndroidHardwareBufferPropertiesANDROID" ); + } + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const &d ) const + { + StructureChain structureChain; + AndroidHardwareBufferPropertiesANDROID& properties = structureChain.template get(); + Result result = static_cast( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, buffer, reinterpret_cast( &properties ) ) ); + return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::Device::getAndroidHardwareBufferPropertiesANDROID" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + template + VULKAN_HPP_INLINE Result Device::getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer, Dispatch const &d) const + { + return static_cast( d.vkGetMemoryAndroidHardwareBufferANDROID( m_device, reinterpret_cast( pInfo ), pBuffer ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info, Dispatch const &d ) const + { + struct AHardwareBuffer* buffer; + Result result = static_cast( d.vkGetMemoryAndroidHardwareBufferANDROID( m_device, reinterpret_cast( &info ), &buffer ) ); + return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING"::Device::getMemoryAndroidHardwareBufferANDROID" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::compileDeferredNVX( Pipeline pipeline, uint32_t shader, Dispatch const &d) const + { + return static_cast( d.vkCompileDeferredNVX( m_device, static_cast( pipeline ), shader ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type Device::compileDeferredNVX( Pipeline pipeline, uint32_t shader, Dispatch const &d ) const + { + Result result = static_cast( d.vkCompileDeferredNVX( m_device, static_cast( pipeline ), shader ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::compileDeferredNVX" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createAccelerationStructureNVX( const AccelerationStructureCreateInfoNVX* pCreateInfo, const AllocationCallbacks* pAllocator, AccelerationStructureNVX* pAccelerationStructure, Dispatch const &d) const + { + return static_cast( d.vkCreateAccelerationStructureNVX( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pAccelerationStructure ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::createAccelerationStructureNVX( const AccelerationStructureCreateInfoNVX & createInfo, Optional allocator, Dispatch const &d ) const + { + AccelerationStructureNVX accelerationStructure; + Result result = static_cast( d.vkCreateAccelerationStructureNVX( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); + return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING"::Device::createAccelerationStructureNVX" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createAccelerationStructureNVXUnique( const AccelerationStructureCreateInfoNVX & createInfo, Optional allocator, Dispatch const &d ) const + { + AccelerationStructureNVX accelerationStructure; + Result result = static_cast( d.vkCreateAccelerationStructureNVX( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING"::Device::createAccelerationStructureNVXUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureNVX( AccelerationStructureNVX accelerationStructure, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyAccelerationStructureNVX( m_device, static_cast( accelerationStructure ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureNVX( AccelerationStructureNVX accelerationStructure, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyAccelerationStructureNVX( m_device, static_cast( accelerationStructure ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroy( AccelerationStructureNVX accelerationStructure, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyAccelerationStructureNVX( m_device, static_cast( accelerationStructure ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( AccelerationStructureNVX accelerationStructure, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyAccelerationStructureNVX( m_device, static_cast( accelerationStructure ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getAccelerationStructureMemoryRequirementsNVX( const AccelerationStructureMemoryRequirementsInfoNVX* pInfo, MemoryRequirements2KHR* pMemoryRequirements, Dispatch const &d) const + { + d.vkGetAccelerationStructureMemoryRequirementsNVX( m_device, reinterpret_cast( pInfo ), reinterpret_cast( pMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE MemoryRequirements2KHR Device::getAccelerationStructureMemoryRequirementsNVX( const AccelerationStructureMemoryRequirementsInfoNVX & info, Dispatch const &d ) const + { + MemoryRequirements2KHR memoryRequirements; + d.vkGetAccelerationStructureMemoryRequirementsNVX( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getAccelerationStructureScratchMemoryRequirementsNVX( const AccelerationStructureMemoryRequirementsInfoNVX* pInfo, MemoryRequirements2KHR* pMemoryRequirements, Dispatch const &d) const + { + d.vkGetAccelerationStructureScratchMemoryRequirementsNVX( m_device, reinterpret_cast( pInfo ), reinterpret_cast( pMemoryRequirements ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE MemoryRequirements2KHR Device::getAccelerationStructureScratchMemoryRequirementsNVX( const AccelerationStructureMemoryRequirementsInfoNVX & info, Dispatch const &d ) const + { + MemoryRequirements2KHR memoryRequirements; + d.vkGetAccelerationStructureScratchMemoryRequirementsNVX( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::bindAccelerationStructureMemoryNVX( uint32_t bindInfoCount, const BindAccelerationStructureMemoryInfoNVX* pBindInfos, Dispatch const &d) const + { + return static_cast( d.vkBindAccelerationStructureMemoryNVX( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Device::bindAccelerationStructureMemoryNVX( ArrayProxy bindInfos, Dispatch const &d ) const + { + Result result = static_cast( d.vkBindAccelerationStructureMemoryNVX( m_device, bindInfos.size() , reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::bindAccelerationStructureMemoryNVX" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getRaytracingShaderHandlesNVX( Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const &d) const + { + return static_cast( d.vkGetRaytracingShaderHandlesNVX( m_device, static_cast( pipeline ), firstGroup, groupCount, dataSize, pData ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::getRaytracingShaderHandlesNVX( Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const &d ) const + { + std::vector data( dataSize ); + Result result = static_cast( d.vkGetRaytracingShaderHandlesNVX( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( uint8_t ) , reinterpret_cast( data.data() ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getRaytracingShaderHandlesNVX" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::getAccelerationStructureHandleNVX( AccelerationStructureNVX accelerationStructure, size_t dataSize, void* pData, Dispatch const &d) const + { + return static_cast( d.vkGetAccelerationStructureHandleNVX( m_device, static_cast( accelerationStructure ), dataSize, pData ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::getAccelerationStructureHandleNVX( AccelerationStructureNVX accelerationStructure, size_t dataSize, Dispatch const &d ) const + { + std::vector data( dataSize ); + Result result = static_cast( d.vkGetAccelerationStructureHandleNVX( m_device, static_cast( accelerationStructure ), data.size() * sizeof( uint8_t ) , reinterpret_cast( data.data() ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getAccelerationStructureHandleNVX" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Device::createRaytracingPipelinesNVX( PipelineCache pipelineCache, uint32_t createInfoCount, const RaytracingPipelineCreateInfoNVX* pCreateInfos, const AllocationCallbacks* pAllocator, Pipeline* pPipelines, Dispatch const &d) const + { + return static_cast( d.vkCreateRaytracingPipelinesNVX( m_device, static_cast( pipelineCache ), createInfoCount, reinterpret_cast( pCreateInfos ), reinterpret_cast( pAllocator ), reinterpret_cast( pPipelines ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createRaytracingPipelinesNVX( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator, Dispatch const &d ) const + { + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateRaytracingPipelinesNVX( m_device, static_cast( pipelineCache ), createInfos.size() , reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING"::Device::createRaytracingPipelinesNVX" ); + } + template + VULKAN_HPP_INLINE ResultValueType::type Device::createRaytracingPipelineNVX( PipelineCache pipelineCache, const RaytracingPipelineCreateInfoNVX & createInfo, Optional allocator, Dispatch const &d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateRaytracingPipelinesNVX( m_device, static_cast( pipelineCache ), 1 , reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createRaytracingPipelineNVX" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType,Allocator>>::type Device::createRaytracingPipelinesNVXUnique( PipelineCache pipelineCache, ArrayProxy createInfos, Optional allocator, Dispatch const &d ) const + { + static_assert( sizeof( Pipeline ) <= sizeof( UniquePipeline ), "Pipeline is greater than UniquePipeline!" ); + std::vector pipelines; + pipelines.reserve( createInfos.size() ); + Pipeline* buffer = reinterpret_cast( reinterpret_cast( pipelines.data() ) + createInfos.size() * ( sizeof( UniquePipeline ) - sizeof( Pipeline ) ) ); + Result result = static_cast(d.vkCreateRaytracingPipelinesNVX( m_device, static_cast( pipelineCache ), createInfos.size() , reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( buffer ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0 ; i + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createRaytracingPipelineNVXUnique( PipelineCache pipelineCache, const RaytracingPipelineCreateInfoNVX & createInfo, Optional allocator, Dispatch const &d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateRaytracingPipelinesNVX( m_device, static_cast( pipelineCache ), 1 , reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING"::Device::createRaytracingPipelineNVXUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueDevice = UniqueHandle; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + + class PhysicalDevice + { + public: + VULKAN_HPP_CONSTEXPR PhysicalDevice() + : m_physicalDevice(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevice( std::nullptr_t ) + : m_physicalDevice(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT PhysicalDevice( VkPhysicalDevice physicalDevice ) + : m_physicalDevice( physicalDevice ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + PhysicalDevice & operator=(VkPhysicalDevice physicalDevice) + { + m_physicalDevice = physicalDevice; + return *this; + } +#endif + + PhysicalDevice & operator=( std::nullptr_t ) + { + m_physicalDevice = VK_NULL_HANDLE; + return *this; + } + + bool operator==( PhysicalDevice const & rhs ) const + { + return m_physicalDevice == rhs.m_physicalDevice; + } + + bool operator!=(PhysicalDevice const & rhs ) const + { + return m_physicalDevice != rhs.m_physicalDevice; + } + + bool operator<(PhysicalDevice const & rhs ) const + { + return m_physicalDevice < rhs.m_physicalDevice; + } + + template + void getProperties( PhysicalDeviceProperties* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PhysicalDeviceProperties getProperties(Dispatch const &d = Dispatch() ) const; + template + StructureChain getProperties(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties* pQueueFamilyProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getQueueFamilyProperties(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getMemoryProperties( PhysicalDeviceMemoryProperties* pMemoryProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PhysicalDeviceMemoryProperties getMemoryProperties(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getFeatures( PhysicalDeviceFeatures* pFeatures, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PhysicalDeviceFeatures getFeatures(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getFormatProperties( Format format, FormatProperties* pFormatProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + FormatProperties getFormatProperties( Format format, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getImageFormatProperties( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ImageFormatProperties* pImageFormatProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getImageFormatProperties( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createDevice( const DeviceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Device* pDevice, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createDevice( const DeviceCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createDeviceUnique( const DeviceCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result enumerateDeviceLayerProperties( uint32_t* pPropertyCount, LayerProperties* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type enumerateDeviceLayerProperties(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, ExtensionProperties* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type enumerateDeviceExtensionProperties( Optional layerName = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getSparseImageFormatProperties( Format format, ImageType type, SampleCountFlagBits samples, ImageUsageFlags usage, ImageTiling tiling, uint32_t* pPropertyCount, SparseImageFormatProperties* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getSparseImageFormatProperties( Format format, ImageType type, SampleCountFlagBits samples, ImageUsageFlags usage, ImageTiling tiling, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getDisplayPropertiesKHR( uint32_t* pPropertyCount, DisplayPropertiesKHR* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getDisplayPropertiesKHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, DisplayPlanePropertiesKHR* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getDisplayPlanePropertiesKHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, DisplayKHR* pDisplays, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getDisplayModePropertiesKHR( DisplayKHR display, uint32_t* pPropertyCount, DisplayModePropertiesKHR* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getDisplayModePropertiesKHR( DisplayKHR display, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result createDisplayModeKHR( DisplayKHR display, const DisplayModeCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, DisplayModeKHR* pMode, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createDisplayModeKHR( DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getDisplayPlaneCapabilitiesKHR( DisplayModeKHR mode, uint32_t planeIndex, DisplayPlaneCapabilitiesKHR* pCapabilities, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getDisplayPlaneCapabilitiesKHR( DisplayModeKHR mode, uint32_t planeIndex, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_MIR_KHR + template + Bool32 getMirPresentationSupportKHR( uint32_t queueFamilyIndex, MirConnection* connection, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Bool32 getMirPresentationSupportKHR( uint32_t queueFamilyIndex, MirConnection & connection, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + + template + Result getSurfaceSupportKHR( uint32_t queueFamilyIndex, SurfaceKHR surface, Bool32* pSupported, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getSurfaceSupportKHR( uint32_t queueFamilyIndex, SurfaceKHR surface, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getSurfaceCapabilitiesKHR( SurfaceKHR surface, SurfaceCapabilitiesKHR* pSurfaceCapabilities, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getSurfaceCapabilitiesKHR( SurfaceKHR surface, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getSurfaceFormatsKHR( SurfaceKHR surface, uint32_t* pSurfaceFormatCount, SurfaceFormatKHR* pSurfaceFormats, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getSurfaceFormatsKHR( SurfaceKHR surface, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getSurfacePresentModesKHR( SurfaceKHR surface, uint32_t* pPresentModeCount, PresentModeKHR* pPresentModes, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getSurfacePresentModesKHR( SurfaceKHR surface, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + template + Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + Bool32 getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const &d = Dispatch() ) const; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + template + Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + template + Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + + template + Result getExternalImageFormatPropertiesNV( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ExternalMemoryHandleTypeFlagsNV externalHandleType, ExternalImageFormatPropertiesNV* pExternalImageFormatProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getExternalImageFormatPropertiesNV( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ExternalMemoryHandleTypeFlagsNV externalHandleType, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getGeneratedCommandsPropertiesNVX( DeviceGeneratedCommandsFeaturesNVX* pFeatures, DeviceGeneratedCommandsLimitsNVX* pLimits, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + DeviceGeneratedCommandsLimitsNVX getGeneratedCommandsPropertiesNVX( DeviceGeneratedCommandsFeaturesNVX & features, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getFeatures2( PhysicalDeviceFeatures2* pFeatures, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PhysicalDeviceFeatures2 getFeatures2(Dispatch const &d = Dispatch() ) const; + template + StructureChain getFeatures2(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getFeatures2KHR( PhysicalDeviceFeatures2* pFeatures, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PhysicalDeviceFeatures2 getFeatures2KHR(Dispatch const &d = Dispatch() ) const; + template + StructureChain getFeatures2KHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getProperties2( PhysicalDeviceProperties2* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PhysicalDeviceProperties2 getProperties2(Dispatch const &d = Dispatch() ) const; + template + StructureChain getProperties2(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getProperties2KHR( PhysicalDeviceProperties2* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PhysicalDeviceProperties2 getProperties2KHR(Dispatch const &d = Dispatch() ) const; + template + StructureChain getProperties2KHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getFormatProperties2( Format format, FormatProperties2* pFormatProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + FormatProperties2 getFormatProperties2( Format format, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getFormatProperties2KHR( Format format, FormatProperties2* pFormatProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + FormatProperties2 getFormatProperties2KHR( Format format, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2* pImageFormatInfo, ImageFormatProperties2* pImageFormatProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d = Dispatch() ) const; + template + typename ResultValueType>::type getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2* pImageFormatInfo, ImageFormatProperties2* pImageFormatProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d = Dispatch() ) const; + template + typename ResultValueType>::type getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getQueueFamilyProperties2( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getQueueFamilyProperties2(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getQueueFamilyProperties2KHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getMemoryProperties2( PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PhysicalDeviceMemoryProperties2 getMemoryProperties2(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getMemoryProperties2KHR( PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PhysicalDeviceMemoryProperties2 getMemoryProperties2KHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, SparseImageFormatProperties2* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, SparseImageFormatProperties2* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + std::vector getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, ExternalBufferProperties* pExternalBufferProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ExternalBufferProperties getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, ExternalBufferProperties* pExternalBufferProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ExternalBufferProperties getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ExternalSemaphoreProperties getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ExternalSemaphoreProperties getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, ExternalFenceProperties* pExternalFenceProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ExternalFenceProperties getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, ExternalFenceProperties* pExternalFenceProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ExternalFenceProperties getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result releaseDisplayEXT( DisplayKHR display, Dispatch const &d = Dispatch() ) const; +#else + template + ResultValueType::type releaseDisplayEXT( DisplayKHR display, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + template + Result acquireXlibDisplayEXT( Display* dpy, DisplayKHR display, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type acquireXlibDisplayEXT( DisplayKHR display, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + template + Result getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, DisplayKHR* pDisplay, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + + template + Result getSurfaceCapabilities2EXT( SurfaceKHR surface, SurfaceCapabilities2EXT* pSurfaceCapabilities, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getSurfaceCapabilities2EXT( SurfaceKHR surface, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getPresentRectanglesKHR( SurfaceKHR surface, uint32_t* pRectCount, Rect2D* pRects, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getPresentRectanglesKHR( SurfaceKHR surface, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getMultisamplePropertiesEXT( SampleCountFlagBits samples, MultisamplePropertiesEXT* pMultisampleProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + MultisamplePropertiesEXT getMultisamplePropertiesEXT( SampleCountFlagBits samples, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, SurfaceCapabilities2KHR* pSurfaceCapabilities, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d = Dispatch() ) const; + template + typename ResultValueType>::type getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, SurfaceFormat2KHR* pSurfaceFormats, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getDisplayProperties2KHR( uint32_t* pPropertyCount, DisplayProperties2KHR* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getDisplayProperties2KHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getDisplayPlaneProperties2KHR( uint32_t* pPropertyCount, DisplayPlaneProperties2KHR* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getDisplayPlaneProperties2KHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getDisplayModeProperties2KHR( DisplayKHR display, uint32_t* pPropertyCount, DisplayModeProperties2KHR* pProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type getDisplayModeProperties2KHR( DisplayKHR display, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR* pDisplayPlaneInfo, DisplayPlaneCapabilities2KHR* pCapabilities, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPhysicalDevice() const + { + return m_physicalDevice; + } + + explicit operator bool() const + { + return m_physicalDevice != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_physicalDevice == VK_NULL_HANDLE; + } + + private: + VkPhysicalDevice m_physicalDevice; + }; + + static_assert( sizeof( PhysicalDevice ) == sizeof( VkPhysicalDevice ), "handle and wrapper have different size!" ); + + template + VULKAN_HPP_INLINE void PhysicalDevice::getProperties( PhysicalDeviceProperties* pProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast( pProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PhysicalDeviceProperties PhysicalDevice::getProperties(Dispatch const &d ) const + { + PhysicalDeviceProperties properties; + d.vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast( &properties ) ); + return properties; + } + template + VULKAN_HPP_INLINE StructureChain PhysicalDevice::getProperties(Dispatch const &d ) const + { + StructureChain structureChain; + PhysicalDeviceProperties& properties = structureChain.template get(); + d.vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast( &properties ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties* pQueueFamilyProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast( pQueueFamilyProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties(Dispatch const &d ) const + { + std::vector queueFamilyProperties; + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + queueFamilyProperties.resize( queueFamilyPropertyCount ); + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + return queueFamilyProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties( PhysicalDeviceMemoryProperties* pMemoryProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast( pMemoryProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PhysicalDeviceMemoryProperties PhysicalDevice::getMemoryProperties(Dispatch const &d ) const + { + PhysicalDeviceMemoryProperties memoryProperties; + d.vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast( &memoryProperties ) ); + return memoryProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFeatures( PhysicalDeviceFeatures* pFeatures, Dispatch const &d) const + { + d.vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast( pFeatures ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PhysicalDeviceFeatures PhysicalDevice::getFeatures(Dispatch const &d ) const + { + PhysicalDeviceFeatures features; + d.vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast( &features ) ); + return features; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties( Format format, FormatProperties* pFormatProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast( format ), reinterpret_cast( pFormatProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE FormatProperties PhysicalDevice::getFormatProperties( Format format, Dispatch const &d ) const + { + FormatProperties formatProperties; + d.vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast( format ), reinterpret_cast( &formatProperties ) ); + return formatProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ImageFormatProperties* pImageFormatProperties, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( tiling ), static_cast( usage ), static_cast( flags ), reinterpret_cast( pImageFormatProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getImageFormatProperties( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, Dispatch const &d ) const + { + ImageFormatProperties imageFormatProperties; + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( tiling ), static_cast( usage ), static_cast( flags ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::createDevice( const DeviceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Device* pDevice, Dispatch const &d) const + { + return static_cast( d.vkCreateDevice( m_physicalDevice, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pDevice ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::createDevice( const DeviceCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Device device; + Result result = static_cast( d.vkCreateDevice( m_physicalDevice, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &device ) ) ); + return createResultValue( result, device, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::createDevice" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::createDeviceUnique( const DeviceCreateInfo & createInfo, Optional allocator, Dispatch const &d ) const + { + Device device; + Result result = static_cast( d.vkCreateDevice( m_physicalDevice, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &device ) ) ); + + ObjectDestroy deleter( allocator, d ); + return createResultValue( result, device, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::createDeviceUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceLayerProperties( uint32_t* pPropertyCount, LayerProperties* pProperties, Dispatch const &d) const + { + return static_cast( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::enumerateDeviceLayerProperties(Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceLayerProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, ExtensionProperties* pProperties, Dispatch const &d) const + { + return static_cast( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, pLayerName, pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::enumerateDeviceExtensionProperties( Optional layerName, Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceExtensionProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties( Format format, ImageType type, SampleCountFlagBits samples, ImageUsageFlags usage, ImageTiling tiling, uint32_t* pPropertyCount, SparseImageFormatProperties* pProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( samples ), static_cast( usage ), static_cast( tiling ), pPropertyCount, reinterpret_cast( pProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector PhysicalDevice::getSparseImageFormatProperties( Format format, ImageType type, SampleCountFlagBits samples, ImageUsageFlags usage, ImageTiling tiling, Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( samples ), static_cast( usage ), static_cast( tiling ), &propertyCount, nullptr ); + properties.resize( propertyCount ); + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( samples ), static_cast( usage ), static_cast( tiling ), &propertyCount, reinterpret_cast( properties.data() ) ); + return properties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPropertiesKHR( uint32_t* pPropertyCount, DisplayPropertiesKHR* pProperties, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPropertiesKHR(Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, DisplayPlanePropertiesKHR* pProperties, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPlanePropertiesKHR(Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlanePropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, DisplayKHR* pDisplays, Dispatch const &d) const + { + return static_cast( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, pDisplayCount, reinterpret_cast( pDisplays ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Dispatch const &d ) const + { + std::vector displays; + uint32_t displayCount; + Result result; + do + { + result = static_cast( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && displayCount ) + { + displays.resize( displayCount ); + result = static_cast( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, reinterpret_cast( displays.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( displayCount <= displays.size() ); + displays.resize( displayCount ); + return createResultValue( result, displays, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayModePropertiesKHR( DisplayKHR display, uint32_t* pPropertyCount, DisplayModePropertiesKHR* pProperties, Dispatch const &d) const + { + return static_cast( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast( display ), pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayModePropertiesKHR( DisplayKHR display, Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast( display ), &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModePropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::createDisplayModeKHR( DisplayKHR display, const DisplayModeCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, DisplayModeKHR* pMode, Dispatch const &d) const + { + return static_cast( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast( display ), reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pMode ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::createDisplayModeKHR( DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + DisplayModeKHR mode; + Result result = static_cast( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast( display ), reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &mode ) ) ); + return createResultValue( result, mode, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::createDisplayModeKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneCapabilitiesKHR( DisplayModeKHR mode, uint32_t planeIndex, DisplayPlaneCapabilitiesKHR* pCapabilities, Dispatch const &d) const + { + return static_cast( d.vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast( mode ), planeIndex, reinterpret_cast( pCapabilities ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getDisplayPlaneCapabilitiesKHR( DisplayModeKHR mode, uint32_t planeIndex, Dispatch const &d ) const + { + DisplayPlaneCapabilitiesKHR capabilities; + Result result = static_cast( d.vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast( mode ), planeIndex, reinterpret_cast( &capabilities ) ) ); + return createResultValue( result, capabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneCapabilitiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_MIR_KHR + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getMirPresentationSupportKHR( uint32_t queueFamilyIndex, MirConnection* connection, Dispatch const &d) const + { + return d.vkGetPhysicalDeviceMirPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, connection ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getMirPresentationSupportKHR( uint32_t queueFamilyIndex, MirConnection & connection, Dispatch const &d ) const + { + return d.vkGetPhysicalDeviceMirPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &connection ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, SurfaceKHR surface, Bool32* pSupported, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast( surface ), pSupported ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, SurfaceKHR surface, Dispatch const &d ) const + { + Bool32 supported; + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast( surface ), &supported ) ); + return createResultValue( result, supported, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceSupportKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilitiesKHR( SurfaceKHR surface, SurfaceCapabilitiesKHR* pSurfaceCapabilities, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast( surface ), reinterpret_cast( pSurfaceCapabilities ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getSurfaceCapabilitiesKHR( SurfaceKHR surface, Dispatch const &d ) const + { + SurfaceCapabilitiesKHR surfaceCapabilities; + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast( surface ), reinterpret_cast( &surfaceCapabilities ) ) ); + return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceCapabilitiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormatsKHR( SurfaceKHR surface, uint32_t* pSurfaceFormatCount, SurfaceFormatKHR* pSurfaceFormats, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast( surface ), pSurfaceFormatCount, reinterpret_cast( pSurfaceFormats ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfaceFormatsKHR( SurfaceKHR surface, Dispatch const &d ) const + { + std::vector surfaceFormats; + uint32_t surfaceFormatCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast( surface ), &surfaceFormatCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && surfaceFormatCount ) + { + surfaceFormats.resize( surfaceFormatCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast( surface ), &surfaceFormatCount, reinterpret_cast( surfaceFormats.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); + surfaceFormats.resize( surfaceFormatCount ); + return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormatsKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getSurfacePresentModesKHR( SurfaceKHR surface, uint32_t* pPresentModeCount, PresentModeKHR* pPresentModes, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast( surface ), pPresentModeCount, reinterpret_cast( pPresentModes ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfacePresentModesKHR( SurfaceKHR surface, Dispatch const &d ) const + { + std::vector presentModes; + uint32_t presentModeCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast( surface ), &presentModeCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && presentModeCount ) + { + presentModes.resize( presentModeCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast( surface ), &presentModeCount, reinterpret_cast( presentModes.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); + presentModes.resize( presentModeCount ); + return createResultValue( result, presentModes, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfacePresentModesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display, Dispatch const &d) const + { + return d.vkGetPhysicalDeviceWaylandPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, display ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display, Dispatch const &d ) const + { + return d.vkGetPhysicalDeviceWaylandPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &display ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const &d) const + { + return d.vkGetPhysicalDeviceWin32PresentationSupportKHR( m_physicalDevice, queueFamilyIndex ); + } +#else + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const &d ) const + { + return d.vkGetPhysicalDeviceWin32PresentationSupportKHR( m_physicalDevice, queueFamilyIndex ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID, Dispatch const &d) const + { + return d.vkGetPhysicalDeviceXlibPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, dpy, visualID ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID, Dispatch const &d ) const + { + return d.vkGetPhysicalDeviceXlibPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &dpy, visualID ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id, Dispatch const &d) const + { + return d.vkGetPhysicalDeviceXcbPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, connection, visual_id ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id, Dispatch const &d ) const + { + return d.vkGetPhysicalDeviceXcbPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &connection, visual_id ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getExternalImageFormatPropertiesNV( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ExternalMemoryHandleTypeFlagsNV externalHandleType, ExternalImageFormatPropertiesNV* pExternalImageFormatProperties, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( tiling ), static_cast( usage ), static_cast( flags ), static_cast( externalHandleType ), reinterpret_cast( pExternalImageFormatProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getExternalImageFormatPropertiesNV( Format format, ImageType type, ImageTiling tiling, ImageUsageFlags usage, ImageCreateFlags flags, ExternalMemoryHandleTypeFlagsNV externalHandleType, Dispatch const &d ) const + { + ExternalImageFormatPropertiesNV externalImageFormatProperties; + Result result = static_cast( d.vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( tiling ), static_cast( usage ), static_cast( flags ), static_cast( externalHandleType ), reinterpret_cast( &externalImageFormatProperties ) ) ); + return createResultValue( result, externalImageFormatProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getExternalImageFormatPropertiesNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getGeneratedCommandsPropertiesNVX( DeviceGeneratedCommandsFeaturesNVX* pFeatures, DeviceGeneratedCommandsLimitsNVX* pLimits, Dispatch const &d) const + { + d.vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( m_physicalDevice, reinterpret_cast( pFeatures ), reinterpret_cast( pLimits ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE DeviceGeneratedCommandsLimitsNVX PhysicalDevice::getGeneratedCommandsPropertiesNVX( DeviceGeneratedCommandsFeaturesNVX & features, Dispatch const &d ) const + { + DeviceGeneratedCommandsLimitsNVX limits; + d.vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( m_physicalDevice, reinterpret_cast( &features ), reinterpret_cast( &limits ) ); + return limits; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFeatures2( PhysicalDeviceFeatures2* pFeatures, Dispatch const &d) const + { + d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast( pFeatures ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PhysicalDeviceFeatures2 PhysicalDevice::getFeatures2(Dispatch const &d ) const + { + PhysicalDeviceFeatures2 features; + d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast( &features ) ); + return features; + } + template + VULKAN_HPP_INLINE StructureChain PhysicalDevice::getFeatures2(Dispatch const &d ) const + { + StructureChain structureChain; + PhysicalDeviceFeatures2& features = structureChain.template get(); + d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast( &features ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFeatures2KHR( PhysicalDeviceFeatures2* pFeatures, Dispatch const &d) const + { + d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast( pFeatures ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PhysicalDeviceFeatures2 PhysicalDevice::getFeatures2KHR(Dispatch const &d ) const + { + PhysicalDeviceFeatures2 features; + d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast( &features ) ); + return features; + } + template + VULKAN_HPP_INLINE StructureChain PhysicalDevice::getFeatures2KHR(Dispatch const &d ) const + { + StructureChain structureChain; + PhysicalDeviceFeatures2& features = structureChain.template get(); + d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast( &features ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getProperties2( PhysicalDeviceProperties2* pProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast( pProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PhysicalDeviceProperties2 PhysicalDevice::getProperties2(Dispatch const &d ) const + { + PhysicalDeviceProperties2 properties; + d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast( &properties ) ); + return properties; + } + template + VULKAN_HPP_INLINE StructureChain PhysicalDevice::getProperties2(Dispatch const &d ) const + { + StructureChain structureChain; + PhysicalDeviceProperties2& properties = structureChain.template get(); + d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast( &properties ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getProperties2KHR( PhysicalDeviceProperties2* pProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast( pProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PhysicalDeviceProperties2 PhysicalDevice::getProperties2KHR(Dispatch const &d ) const + { + PhysicalDeviceProperties2 properties; + d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast( &properties ) ); + return properties; + } + template + VULKAN_HPP_INLINE StructureChain PhysicalDevice::getProperties2KHR(Dispatch const &d ) const + { + StructureChain structureChain; + PhysicalDeviceProperties2& properties = structureChain.template get(); + d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast( &properties ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties2( Format format, FormatProperties2* pFormatProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast( format ), reinterpret_cast( pFormatProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE FormatProperties2 PhysicalDevice::getFormatProperties2( Format format, Dispatch const &d ) const + { + FormatProperties2 formatProperties; + d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast( format ), reinterpret_cast( &formatProperties ) ); + return formatProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties2KHR( Format format, FormatProperties2* pFormatProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast( format ), reinterpret_cast( pFormatProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE FormatProperties2 PhysicalDevice::getFormatProperties2KHR( Format format, Dispatch const &d ) const + { + FormatProperties2 formatProperties; + d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast( format ), reinterpret_cast( &formatProperties ) ); + return formatProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2* pImageFormatInfo, ImageFormatProperties2* pImageFormatProperties, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast( pImageFormatInfo ), reinterpret_cast( pImageFormatProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d ) const + { + ImageFormatProperties2 imageFormatProperties; + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2" ); + } + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d ) const + { + StructureChain structureChain; + ImageFormatProperties2& imageFormatProperties = structureChain.template get(); + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2* pImageFormatInfo, ImageFormatProperties2* pImageFormatProperties, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( pImageFormatInfo ), reinterpret_cast( pImageFormatProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d ) const + { + ImageFormatProperties2 imageFormatProperties; + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2KHR" ); + } + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const &d ) const + { + StructureChain structureChain; + ImageFormatProperties2& imageFormatProperties = structureChain.template get(); + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties2( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast( pQueueFamilyProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2(Dispatch const &d ) const + { + std::vector queueFamilyProperties; + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + queueFamilyProperties.resize( queueFamilyPropertyCount ); + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + return queueFamilyProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast( pQueueFamilyProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2KHR(Dispatch const &d ) const + { + std::vector queueFamilyProperties; + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + queueFamilyProperties.resize( queueFamilyPropertyCount ); + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + return queueFamilyProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties2( PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast( pMemoryProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PhysicalDeviceMemoryProperties2 PhysicalDevice::getMemoryProperties2(Dispatch const &d ) const + { + PhysicalDeviceMemoryProperties2 memoryProperties; + d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast( &memoryProperties ) ); + return memoryProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties2KHR( PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast( pMemoryProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PhysicalDeviceMemoryProperties2 PhysicalDevice::getMemoryProperties2KHR(Dispatch const &d ) const + { + PhysicalDeviceMemoryProperties2 memoryProperties; + d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast( &memoryProperties ) ); + return memoryProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, SparseImageFormatProperties2* pProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast( pFormatInfo ), pPropertyCount, reinterpret_cast( pProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector PhysicalDevice::getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, nullptr ); + properties.resize( propertyCount ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, reinterpret_cast( properties.data() ) ); + return properties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, SparseImageFormatProperties2* pProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( pFormatInfo ), pPropertyCount, reinterpret_cast( pProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE std::vector PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, nullptr ); + properties.resize( propertyCount ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, reinterpret_cast( properties.data() ) ); + return properties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, ExternalBufferProperties* pExternalBufferProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceExternalBufferProperties( m_physicalDevice, reinterpret_cast( pExternalBufferInfo ), reinterpret_cast( pExternalBufferProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ExternalBufferProperties PhysicalDevice::getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const &d ) const + { + ExternalBufferProperties externalBufferProperties; + d.vkGetPhysicalDeviceExternalBufferProperties( m_physicalDevice, reinterpret_cast( &externalBufferInfo ), reinterpret_cast( &externalBufferProperties ) ); + return externalBufferProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, ExternalBufferProperties* pExternalBufferProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceExternalBufferPropertiesKHR( m_physicalDevice, reinterpret_cast( pExternalBufferInfo ), reinterpret_cast( pExternalBufferProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ExternalBufferProperties PhysicalDevice::getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const &d ) const + { + ExternalBufferProperties externalBufferProperties; + d.vkGetPhysicalDeviceExternalBufferPropertiesKHR( m_physicalDevice, reinterpret_cast( &externalBufferInfo ), reinterpret_cast( &externalBufferProperties ) ); + return externalBufferProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceExternalSemaphoreProperties( m_physicalDevice, reinterpret_cast( pExternalSemaphoreInfo ), reinterpret_cast( pExternalSemaphoreProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ExternalSemaphoreProperties PhysicalDevice::getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const &d ) const + { + ExternalSemaphoreProperties externalSemaphoreProperties; + d.vkGetPhysicalDeviceExternalSemaphoreProperties( m_physicalDevice, reinterpret_cast( &externalSemaphoreInfo ), reinterpret_cast( &externalSemaphoreProperties ) ); + return externalSemaphoreProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( m_physicalDevice, reinterpret_cast( pExternalSemaphoreInfo ), reinterpret_cast( pExternalSemaphoreProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ExternalSemaphoreProperties PhysicalDevice::getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const &d ) const + { + ExternalSemaphoreProperties externalSemaphoreProperties; + d.vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( m_physicalDevice, reinterpret_cast( &externalSemaphoreInfo ), reinterpret_cast( &externalSemaphoreProperties ) ); + return externalSemaphoreProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, ExternalFenceProperties* pExternalFenceProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceExternalFenceProperties( m_physicalDevice, reinterpret_cast( pExternalFenceInfo ), reinterpret_cast( pExternalFenceProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ExternalFenceProperties PhysicalDevice::getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const &d ) const + { + ExternalFenceProperties externalFenceProperties; + d.vkGetPhysicalDeviceExternalFenceProperties( m_physicalDevice, reinterpret_cast( &externalFenceInfo ), reinterpret_cast( &externalFenceProperties ) ); + return externalFenceProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, ExternalFenceProperties* pExternalFenceProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceExternalFencePropertiesKHR( m_physicalDevice, reinterpret_cast( pExternalFenceInfo ), reinterpret_cast( pExternalFenceProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ExternalFenceProperties PhysicalDevice::getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const &d ) const + { + ExternalFenceProperties externalFenceProperties; + d.vkGetPhysicalDeviceExternalFencePropertiesKHR( m_physicalDevice, reinterpret_cast( &externalFenceInfo ), reinterpret_cast( &externalFenceProperties ) ); + return externalFenceProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result PhysicalDevice::releaseDisplayEXT( DisplayKHR display, Dispatch const &d) const + { + return static_cast( d.vkReleaseDisplayEXT( m_physicalDevice, static_cast( display ) ) ); + } +#else + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::releaseDisplayEXT( DisplayKHR display, Dispatch const &d ) const + { + Result result = static_cast( d.vkReleaseDisplayEXT( m_physicalDevice, static_cast( display ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::releaseDisplayEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + template + VULKAN_HPP_INLINE Result PhysicalDevice::acquireXlibDisplayEXT( Display* dpy, DisplayKHR display, Dispatch const &d) const + { + return static_cast( d.vkAcquireXlibDisplayEXT( m_physicalDevice, dpy, static_cast( display ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::acquireXlibDisplayEXT( DisplayKHR display, Dispatch const &d ) const + { + Display dpy; + Result result = static_cast( d.vkAcquireXlibDisplayEXT( m_physicalDevice, &dpy, static_cast( display ) ) ); + return createResultValue( result, dpy, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::acquireXlibDisplayEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + template + VULKAN_HPP_INLINE Result PhysicalDevice::getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, DisplayKHR* pDisplay, Dispatch const &d) const + { + return static_cast( d.vkGetRandROutputDisplayEXT( m_physicalDevice, dpy, rrOutput, reinterpret_cast( pDisplay ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput, Dispatch const &d ) const + { + DisplayKHR display; + Result result = static_cast( d.vkGetRandROutputDisplayEXT( m_physicalDevice, &dpy, rrOutput, reinterpret_cast( &display ) ) ); + return createResultValue( result, display, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getRandROutputDisplayEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2EXT( SurfaceKHR surface, SurfaceCapabilities2EXT* pSurfaceCapabilities, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast( surface ), reinterpret_cast( pSurfaceCapabilities ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getSurfaceCapabilities2EXT( SurfaceKHR surface, Dispatch const &d ) const + { + SurfaceCapabilities2EXT surfaceCapabilities; + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast( surface ), reinterpret_cast( &surfaceCapabilities ) ) ); + return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceCapabilities2EXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getPresentRectanglesKHR( SurfaceKHR surface, uint32_t* pRectCount, Rect2D* pRects, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast( surface ), pRectCount, reinterpret_cast( pRects ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getPresentRectanglesKHR( SurfaceKHR surface, Dispatch const &d ) const + { + std::vector rects; + uint32_t rectCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast( surface ), &rectCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && rectCount ) + { + rects.resize( rectCount ); + result = static_cast( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast( surface ), &rectCount, reinterpret_cast( rects.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( rectCount <= rects.size() ); + rects.resize( rectCount ); + return createResultValue( result, rects, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getPresentRectanglesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getMultisamplePropertiesEXT( SampleCountFlagBits samples, MultisamplePropertiesEXT* pMultisampleProperties, Dispatch const &d) const + { + d.vkGetPhysicalDeviceMultisamplePropertiesEXT( m_physicalDevice, static_cast( samples ), reinterpret_cast( pMultisampleProperties ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE MultisamplePropertiesEXT PhysicalDevice::getMultisamplePropertiesEXT( SampleCountFlagBits samples, Dispatch const &d ) const + { + MultisamplePropertiesEXT multisampleProperties; + d.vkGetPhysicalDeviceMultisamplePropertiesEXT( m_physicalDevice, static_cast( samples ), reinterpret_cast( &multisampleProperties ) ); + return multisampleProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, SurfaceCapabilities2KHR* pSurfaceCapabilities, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast( pSurfaceInfo ), reinterpret_cast( pSurfaceCapabilities ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d ) const + { + SurfaceCapabilities2KHR surfaceCapabilities; + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), reinterpret_cast( &surfaceCapabilities ) ) ); + return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceCapabilities2KHR" ); + } + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d ) const + { + StructureChain structureChain; + SurfaceCapabilities2KHR& surfaceCapabilities = structureChain.template get(); + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), reinterpret_cast( &surfaceCapabilities ) ) ); + return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceCapabilities2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, SurfaceFormat2KHR* pSurfaceFormats, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast( pSurfaceInfo ), pSurfaceFormatCount, reinterpret_cast( pSurfaceFormats ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const &d ) const + { + std::vector surfaceFormats; + uint32_t surfaceFormatCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &surfaceFormatCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && surfaceFormatCount ) + { + surfaceFormats.resize( surfaceFormatCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &surfaceFormatCount, reinterpret_cast( surfaceFormats.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); + surfaceFormats.resize( surfaceFormatCount ); + return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormats2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayProperties2KHR( uint32_t* pPropertyCount, DisplayProperties2KHR* pProperties, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayProperties2KHR(Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayProperties2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneProperties2KHR( uint32_t* pPropertyCount, DisplayPlaneProperties2KHR* pProperties, Dispatch const &d) const + { + return static_cast( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPlaneProperties2KHR(Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneProperties2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayModeProperties2KHR( DisplayKHR display, uint32_t* pPropertyCount, DisplayModeProperties2KHR* pProperties, Dispatch const &d) const + { + return static_cast( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast( display ), pPropertyCount, reinterpret_cast( pProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayModeProperties2KHR( DisplayKHR display, Dispatch const &d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast( display ), &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + properties.resize( propertyCount ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModeProperties2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR* pDisplayPlaneInfo, DisplayPlaneCapabilities2KHR* pCapabilities, Dispatch const &d) const + { + return static_cast( d.vkGetDisplayPlaneCapabilities2KHR( m_physicalDevice, reinterpret_cast( pDisplayPlaneInfo ), reinterpret_cast( pCapabilities ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type PhysicalDevice::getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo, Dispatch const &d ) const + { + DisplayPlaneCapabilities2KHR capabilities; + Result result = static_cast( d.vkGetDisplayPlaneCapabilities2KHR( m_physicalDevice, reinterpret_cast( &displayPlaneInfo ), reinterpret_cast( &capabilities ) ) ); + return createResultValue( result, capabilities, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneCapabilities2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + struct CmdProcessCommandsInfoNVX + { + CmdProcessCommandsInfoNVX( ObjectTableNVX objectTable_ = ObjectTableNVX(), + IndirectCommandsLayoutNVX indirectCommandsLayout_ = IndirectCommandsLayoutNVX(), + uint32_t indirectCommandsTokenCount_ = 0, + const IndirectCommandsTokenNVX* pIndirectCommandsTokens_ = nullptr, + uint32_t maxSequencesCount_ = 0, + CommandBuffer targetCommandBuffer_ = CommandBuffer(), + Buffer sequencesCountBuffer_ = Buffer(), + DeviceSize sequencesCountOffset_ = 0, + Buffer sequencesIndexBuffer_ = Buffer(), + DeviceSize sequencesIndexOffset_ = 0 ) + : objectTable( objectTable_ ) + , indirectCommandsLayout( indirectCommandsLayout_ ) + , indirectCommandsTokenCount( indirectCommandsTokenCount_ ) + , pIndirectCommandsTokens( pIndirectCommandsTokens_ ) + , maxSequencesCount( maxSequencesCount_ ) + , targetCommandBuffer( targetCommandBuffer_ ) + , sequencesCountBuffer( sequencesCountBuffer_ ) + , sequencesCountOffset( sequencesCountOffset_ ) + , sequencesIndexBuffer( sequencesIndexBuffer_ ) + , sequencesIndexOffset( sequencesIndexOffset_ ) + { + } + + CmdProcessCommandsInfoNVX( VkCmdProcessCommandsInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( CmdProcessCommandsInfoNVX ) ); + } + + CmdProcessCommandsInfoNVX& operator=( VkCmdProcessCommandsInfoNVX const & rhs ) + { + memcpy( this, &rhs, sizeof( CmdProcessCommandsInfoNVX ) ); + return *this; + } + CmdProcessCommandsInfoNVX& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + CmdProcessCommandsInfoNVX& setObjectTable( ObjectTableNVX objectTable_ ) + { + objectTable = objectTable_; + return *this; + } + + CmdProcessCommandsInfoNVX& setIndirectCommandsLayout( IndirectCommandsLayoutNVX indirectCommandsLayout_ ) + { + indirectCommandsLayout = indirectCommandsLayout_; + return *this; + } + + CmdProcessCommandsInfoNVX& setIndirectCommandsTokenCount( uint32_t indirectCommandsTokenCount_ ) + { + indirectCommandsTokenCount = indirectCommandsTokenCount_; + return *this; + } + + CmdProcessCommandsInfoNVX& setPIndirectCommandsTokens( const IndirectCommandsTokenNVX* pIndirectCommandsTokens_ ) + { + pIndirectCommandsTokens = pIndirectCommandsTokens_; + return *this; + } + + CmdProcessCommandsInfoNVX& setMaxSequencesCount( uint32_t maxSequencesCount_ ) + { + maxSequencesCount = maxSequencesCount_; + return *this; + } + + CmdProcessCommandsInfoNVX& setTargetCommandBuffer( CommandBuffer targetCommandBuffer_ ) + { + targetCommandBuffer = targetCommandBuffer_; + return *this; + } + + CmdProcessCommandsInfoNVX& setSequencesCountBuffer( Buffer sequencesCountBuffer_ ) + { + sequencesCountBuffer = sequencesCountBuffer_; + return *this; + } + + CmdProcessCommandsInfoNVX& setSequencesCountOffset( DeviceSize sequencesCountOffset_ ) + { + sequencesCountOffset = sequencesCountOffset_; + return *this; + } + + CmdProcessCommandsInfoNVX& setSequencesIndexBuffer( Buffer sequencesIndexBuffer_ ) + { + sequencesIndexBuffer = sequencesIndexBuffer_; + return *this; + } + + CmdProcessCommandsInfoNVX& setSequencesIndexOffset( DeviceSize sequencesIndexOffset_ ) + { + sequencesIndexOffset = sequencesIndexOffset_; + return *this; + } + + operator VkCmdProcessCommandsInfoNVX const&() const + { + return *reinterpret_cast(this); + } + + operator VkCmdProcessCommandsInfoNVX &() + { + return *reinterpret_cast(this); + } + + bool operator==( CmdProcessCommandsInfoNVX const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectTable == rhs.objectTable ) + && ( indirectCommandsLayout == rhs.indirectCommandsLayout ) + && ( indirectCommandsTokenCount == rhs.indirectCommandsTokenCount ) + && ( pIndirectCommandsTokens == rhs.pIndirectCommandsTokens ) + && ( maxSequencesCount == rhs.maxSequencesCount ) + && ( targetCommandBuffer == rhs.targetCommandBuffer ) + && ( sequencesCountBuffer == rhs.sequencesCountBuffer ) + && ( sequencesCountOffset == rhs.sequencesCountOffset ) + && ( sequencesIndexBuffer == rhs.sequencesIndexBuffer ) + && ( sequencesIndexOffset == rhs.sequencesIndexOffset ); + } + + bool operator!=( CmdProcessCommandsInfoNVX const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eCmdProcessCommandsInfoNVX; + + public: + const void* pNext = nullptr; + ObjectTableNVX objectTable; + IndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t indirectCommandsTokenCount; + const IndirectCommandsTokenNVX* pIndirectCommandsTokens; + uint32_t maxSequencesCount; + CommandBuffer targetCommandBuffer; + Buffer sequencesCountBuffer; + DeviceSize sequencesCountOffset; + Buffer sequencesIndexBuffer; + DeviceSize sequencesIndexOffset; + }; + static_assert( sizeof( CmdProcessCommandsInfoNVX ) == sizeof( VkCmdProcessCommandsInfoNVX ), "struct and wrapper have different size!" ); + + struct PhysicalDeviceGroupProperties + { + operator VkPhysicalDeviceGroupProperties const&() const + { + return *reinterpret_cast(this); + } + + operator VkPhysicalDeviceGroupProperties &() + { + return *reinterpret_cast(this); + } + + bool operator==( PhysicalDeviceGroupProperties const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( physicalDeviceCount == rhs.physicalDeviceCount ) + && ( memcmp( physicalDevices, rhs.physicalDevices, VK_MAX_DEVICE_GROUP_SIZE * sizeof( PhysicalDevice ) ) == 0 ) + && ( subsetAllocation == rhs.subsetAllocation ); + } + + bool operator!=( PhysicalDeviceGroupProperties const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::ePhysicalDeviceGroupProperties; + + public: + void* pNext = nullptr; + uint32_t physicalDeviceCount; + PhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE]; + Bool32 subsetAllocation; + }; + static_assert( sizeof( PhysicalDeviceGroupProperties ) == sizeof( VkPhysicalDeviceGroupProperties ), "struct and wrapper have different size!" ); + + using PhysicalDeviceGroupPropertiesKHR = PhysicalDeviceGroupProperties; + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + class Instance; + + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueDebugReportCallbackEXT = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueDebugUtilsMessengerEXT = UniqueHandle; + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueSurfaceKHR = UniqueHandle; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + + class Instance + { + public: + VULKAN_HPP_CONSTEXPR Instance() + : m_instance(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Instance( std::nullptr_t ) + : m_instance(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Instance( VkInstance instance ) + : m_instance( instance ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Instance & operator=(VkInstance instance) + { + m_instance = instance; + return *this; + } +#endif + + Instance & operator=( std::nullptr_t ) + { + m_instance = VK_NULL_HANDLE; + return *this; + } + + bool operator==( Instance const & rhs ) const + { + return m_instance == rhs.m_instance; + } + + bool operator!=(Instance const & rhs ) const + { + return m_instance != rhs.m_instance; + } + + bool operator<(Instance const & rhs ) const + { + return m_instance < rhs.m_instance; + } + + template + void destroy( const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, PhysicalDevice* pPhysicalDevices, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type enumeratePhysicalDevices(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + PFN_vkVoidFunction getProcAddr( const char* pName, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PFN_vkVoidFunction getProcAddr( const std::string & name, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template + Result createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + template + Result createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_MIR_KHR + template + Result createMirSurfaceKHR( const MirSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createMirSurfaceKHR( const MirSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createMirSurfaceKHRUnique( const MirSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + + template + void destroySurfaceKHR( SurfaceKHR surface, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySurfaceKHR( SurfaceKHR surface, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( SurfaceKHR surface, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( SurfaceKHR surface, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_VI_NN + template + Result createViSurfaceNN( const ViSurfaceCreateInfoNN* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + template + Result createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + Result createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + template + Result createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + template + Result createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + + template + Result createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT* pCreateInfo, const AllocationCallbacks* pAllocator, DebugReportCallbackEXT* pCallback, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyDebugReportCallbackEXT( DebugReportCallbackEXT callback, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDebugReportCallbackEXT( DebugReportCallbackEXT callback, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( DebugReportCallbackEXT callback, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( DebugReportCallbackEXT callback, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void debugReportMessageEXT( DebugReportFlagsEXT flags, DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void debugReportMessageEXT( DebugReportFlagsEXT flags, DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result enumeratePhysicalDeviceGroups( uint32_t* pPhysicalDeviceGroupCount, PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type enumeratePhysicalDeviceGroups(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + Result enumeratePhysicalDeviceGroupsKHR( uint32_t* pPhysicalDeviceGroupCount, PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = DispatchLoaderStatic> + typename ResultValueType>::type enumeratePhysicalDeviceGroupsKHR(Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_IOS_MVK + template + Result createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + +#ifdef VK_USE_PLATFORM_MACOS_MVK + template + Result createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + template + Result createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT* pCreateInfo, const AllocationCallbacks* pAllocator, DebugUtilsMessengerEXT* pMessenger, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createDebugUtilsMessengerEXTUnique( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyDebugUtilsMessengerEXT( DebugUtilsMessengerEXT messenger, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDebugUtilsMessengerEXT( DebugUtilsMessengerEXT messenger, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroy( DebugUtilsMessengerEXT messenger, const AllocationCallbacks* pAllocator, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( DebugUtilsMessengerEXT messenger, Optional allocator = nullptr, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void submitDebugUtilsMessageEXT( DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT* pCallbackData, Dispatch const &d = Dispatch() ) const; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void submitDebugUtilsMessageEXT( DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT & callbackData, Dispatch const &d = Dispatch() ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkInstance() const + { + return m_instance; + } + + explicit operator bool() const + { + return m_instance != VK_NULL_HANDLE; + } + + bool operator!() const + { + return m_instance == VK_NULL_HANDLE; + } + + private: + VkInstance m_instance; + }; + + static_assert( sizeof( Instance ) == sizeof( VkInstance ), "handle and wrapper have different size!" ); + + template + VULKAN_HPP_INLINE void Instance::destroy( const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyInstance( m_instance, reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroy( Optional allocator, Dispatch const &d ) const + { + d.vkDestroyInstance( m_instance, reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, PhysicalDevice* pPhysicalDevices, Dispatch const &d) const + { + return static_cast( d.vkEnumeratePhysicalDevices( m_instance, pPhysicalDeviceCount, reinterpret_cast( pPhysicalDevices ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::enumeratePhysicalDevices(Dispatch const &d ) const + { + std::vector physicalDevices; + uint32_t physicalDeviceCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && physicalDeviceCount ) + { + physicalDevices.resize( physicalDeviceCount ); + result = static_cast( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, reinterpret_cast( physicalDevices.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); + physicalDevices.resize( physicalDeviceCount ); + return createResultValue( result, physicalDevices, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDevices" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const char* pName, Dispatch const &d) const + { + return d.vkGetInstanceProcAddr( m_instance, pName ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const std::string & name, Dispatch const &d ) const + { + return d.vkGetInstanceProcAddr( m_instance, name.c_str() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template + VULKAN_HPP_INLINE Result Instance::createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createAndroidSurfaceKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createAndroidSurfaceKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + template + VULKAN_HPP_INLINE Result Instance::createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDisplayPlaneSurfaceKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDisplayPlaneSurfaceKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_MIR_KHR + template + VULKAN_HPP_INLINE Result Instance::createMirSurfaceKHR( const MirSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateMirSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createMirSurfaceKHR( const MirSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateMirSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createMirSurfaceKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createMirSurfaceKHRUnique( const MirSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateMirSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createMirSurfaceKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + + template + VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( SurfaceKHR surface, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySurfaceKHR( m_instance, static_cast( surface ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( SurfaceKHR surface, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySurfaceKHR( m_instance, static_cast( surface ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Instance::destroy( SurfaceKHR surface, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroySurfaceKHR( m_instance, static_cast( surface ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroy( SurfaceKHR surface, Optional allocator, Dispatch const &d ) const + { + d.vkDestroySurfaceKHR( m_instance, static_cast( surface ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_VI_NN + template + VULKAN_HPP_INLINE Result Instance::createViSurfaceNN( const ViSurfaceCreateInfoNN* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createViSurfaceNN" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createViSurfaceNNUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + template + VULKAN_HPP_INLINE Result Instance::createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createWaylandSurfaceKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createWaylandSurfaceKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_INLINE Result Instance::createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createWin32SurfaceKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createWin32SurfaceKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + template + VULKAN_HPP_INLINE Result Instance::createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createXlibSurfaceKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createXlibSurfaceKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + template + VULKAN_HPP_INLINE Result Instance::createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createXcbSurfaceKHR" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createXcbSurfaceKHRUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + + template + VULKAN_HPP_INLINE Result Instance::createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT* pCreateInfo, const AllocationCallbacks* pAllocator, DebugReportCallbackEXT* pCallback, Dispatch const &d) const + { + return static_cast( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pCallback ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional allocator, Dispatch const &d ) const + { + DebugReportCallbackEXT callback; + Result result = static_cast( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &callback ) ) ); + return createResultValue( result, callback, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDebugReportCallbackEXT" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional allocator, Dispatch const &d ) const + { + DebugReportCallbackEXT callback; + Result result = static_cast( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &callback ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, callback, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDebugReportCallbackEXTUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( DebugReportCallbackEXT callback, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast( callback ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( DebugReportCallbackEXT callback, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast( callback ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Instance::destroy( DebugReportCallbackEXT callback, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast( callback ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroy( DebugReportCallbackEXT callback, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast( callback ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( DebugReportFlagsEXT flags, DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage, Dispatch const &d) const + { + d.vkDebugReportMessageEXT( m_instance, static_cast( flags ), static_cast( objectType ), object, location, messageCode, pLayerPrefix, pMessage ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( DebugReportFlagsEXT flags, DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message, Dispatch const &d ) const + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( layerPrefix.size() == message.size() ); +#else + if ( layerPrefix.size() != message.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Instance::debugReportMessageEXT: layerPrefix.size() != message.size()" ); + } +#endif // VULKAN_HPP_NO_EXCEPTIONS + d.vkDebugReportMessageEXT( m_instance, static_cast( flags ), static_cast( objectType ), object, location, messageCode, layerPrefix.c_str(), message.c_str() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDeviceGroups( uint32_t* pPhysicalDeviceGroupCount, PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const &d) const + { + return static_cast( d.vkEnumeratePhysicalDeviceGroups( m_instance, pPhysicalDeviceGroupCount, reinterpret_cast( pPhysicalDeviceGroupProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::enumeratePhysicalDeviceGroups(Dispatch const &d ) const + { + std::vector physicalDeviceGroupProperties; + uint32_t physicalDeviceGroupCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroups" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDeviceGroupsKHR( uint32_t* pPhysicalDeviceGroupCount, PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const &d) const + { + return static_cast( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, pPhysicalDeviceGroupCount, reinterpret_cast( pPhysicalDeviceGroupProperties ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::enumeratePhysicalDeviceGroupsKHR(Dispatch const &d ) const + { + std::vector physicalDeviceGroupProperties; + uint32_t physicalDeviceGroupCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroupsKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_IOS_MVK + template + VULKAN_HPP_INLINE Result Instance::createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createIOSSurfaceMVK" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createIOSSurfaceMVKUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + +#ifdef VK_USE_PLATFORM_MACOS_MVK + template + VULKAN_HPP_INLINE Result Instance::createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK* pCreateInfo, const AllocationCallbacks* pAllocator, SurfaceKHR* pSurface, Dispatch const &d) const + { + return static_cast( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pSurface ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createMacOSSurfaceMVK" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional allocator, Dispatch const &d ) const + { + SurfaceKHR surface; + Result result = static_cast( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING"::Instance::createMacOSSurfaceMVKUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + template + VULKAN_HPP_INLINE Result Instance::createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT* pCreateInfo, const AllocationCallbacks* pAllocator, DebugUtilsMessengerEXT* pMessenger, Dispatch const &d) const + { + return static_cast( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pMessenger ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type Instance::createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional allocator, Dispatch const &d ) const + { + DebugUtilsMessengerEXT messenger; + Result result = static_cast( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &messenger ) ) ); + return createResultValue( result, messenger, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDebugUtilsMessengerEXT" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createDebugUtilsMessengerEXTUnique( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional allocator, Dispatch const &d ) const + { + DebugUtilsMessengerEXT messenger; + Result result = static_cast( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &messenger ) ) ); + + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, messenger, VULKAN_HPP_NAMESPACE_STRING"::Instance::createDebugUtilsMessengerEXTUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Instance::destroyDebugUtilsMessengerEXT( DebugUtilsMessengerEXT messenger, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast( messenger ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroyDebugUtilsMessengerEXT( DebugUtilsMessengerEXT messenger, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast( messenger ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Instance::destroy( DebugUtilsMessengerEXT messenger, const AllocationCallbacks* pAllocator, Dispatch const &d) const + { + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast( messenger ), reinterpret_cast( pAllocator ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroy( DebugUtilsMessengerEXT messenger, Optional allocator, Dispatch const &d ) const + { + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast( messenger ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Instance::submitDebugUtilsMessageEXT( DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT* pCallbackData, Dispatch const &d) const + { + d.vkSubmitDebugUtilsMessageEXT( m_instance, static_cast( messageSeverity ), static_cast( messageTypes ), reinterpret_cast( pCallbackData ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::submitDebugUtilsMessageEXT( DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT & callbackData, Dispatch const &d ) const + { + d.vkSubmitDebugUtilsMessageEXT( m_instance, static_cast( messageSeverity ), static_cast( messageTypes ), reinterpret_cast( &callbackData ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + struct DeviceGroupDeviceCreateInfo + { + DeviceGroupDeviceCreateInfo( uint32_t physicalDeviceCount_ = 0, + const PhysicalDevice* pPhysicalDevices_ = nullptr ) + : physicalDeviceCount( physicalDeviceCount_ ) + , pPhysicalDevices( pPhysicalDevices_ ) + { + } + + DeviceGroupDeviceCreateInfo( VkDeviceGroupDeviceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupDeviceCreateInfo ) ); + } + + DeviceGroupDeviceCreateInfo& operator=( VkDeviceGroupDeviceCreateInfo const & rhs ) + { + memcpy( this, &rhs, sizeof( DeviceGroupDeviceCreateInfo ) ); + return *this; + } + DeviceGroupDeviceCreateInfo& setPNext( const void* pNext_ ) + { + pNext = pNext_; + return *this; + } + + DeviceGroupDeviceCreateInfo& setPhysicalDeviceCount( uint32_t physicalDeviceCount_ ) + { + physicalDeviceCount = physicalDeviceCount_; + return *this; + } + + DeviceGroupDeviceCreateInfo& setPPhysicalDevices( const PhysicalDevice* pPhysicalDevices_ ) + { + pPhysicalDevices = pPhysicalDevices_; + return *this; + } + + operator VkDeviceGroupDeviceCreateInfo const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceGroupDeviceCreateInfo &() + { + return *reinterpret_cast(this); + } + + bool operator==( DeviceGroupDeviceCreateInfo const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( physicalDeviceCount == rhs.physicalDeviceCount ) + && ( pPhysicalDevices == rhs.pPhysicalDevices ); + } + + bool operator!=( DeviceGroupDeviceCreateInfo const& rhs ) const + { + return !operator==( rhs ); + } + + private: + StructureType sType = StructureType::eDeviceGroupDeviceCreateInfo; + + public: + const void* pNext = nullptr; + uint32_t physicalDeviceCount; + const PhysicalDevice* pPhysicalDevices; + }; + static_assert( sizeof( DeviceGroupDeviceCreateInfo ) == sizeof( VkDeviceGroupDeviceCreateInfo ), "struct and wrapper have different size!" ); + + using DeviceGroupDeviceCreateInfoKHR = DeviceGroupDeviceCreateInfo; + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + + template class UniqueHandleTraits {public: using deleter = ObjectDestroy; }; + using UniqueInstance = UniqueHandle; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + + template + Result createInstance( const InstanceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Instance* pInstance, Dispatch const &d = Dispatch() ); +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + ResultValueType::type createInstance( const InstanceCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ); +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + typename ResultValueType>::type createInstanceUnique( const InstanceCreateInfo & createInfo, Optional allocator = nullptr, Dispatch const &d = Dispatch() ); +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE Result createInstance( const InstanceCreateInfo* pCreateInfo, const AllocationCallbacks* pAllocator, Instance* pInstance, Dispatch const &d) + { + return static_cast( d.vkCreateInstance( reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast( pInstance ) ) ); + } +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE ResultValueType::type createInstance( const InstanceCreateInfo & createInfo, Optional allocator, Dispatch const &d ) + { + Instance instance; + Result result = static_cast( d.vkCreateInstance( reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &instance ) ) ); + return createResultValue( result, instance, VULKAN_HPP_NAMESPACE_STRING"::createInstance" ); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type createInstanceUnique( const InstanceCreateInfo & createInfo, Optional allocator, Dispatch const &d ) + { + Instance instance; + Result result = static_cast( d.vkCreateInstance( reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &instance ) ) ); + + ObjectDestroy deleter( allocator, d ); + return createResultValue( result, instance, VULKAN_HPP_NAMESPACE_STRING"::createInstanceUnique", deleter ); + } +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + struct BaseOutStructure + { + BaseOutStructure( ) + { + } + + BaseOutStructure( VkBaseOutStructure const & rhs ) + { + memcpy( this, &rhs, sizeof( BaseOutStructure ) ); + } + + BaseOutStructure& operator=( VkBaseOutStructure const & rhs ) + { + memcpy( this, &rhs, sizeof( BaseOutStructure ) ); + return *this; + } + BaseOutStructure& setPNext( struct BaseOutStructure* pNext_ ) + { + pNext = pNext_; + return *this; + } + + operator VkBaseOutStructure const&() const + { + return *reinterpret_cast(this); + } + + operator VkBaseOutStructure &() + { + return *reinterpret_cast(this); + } + + bool operator==( BaseOutStructure const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ); + } + + bool operator!=( BaseOutStructure const& rhs ) const + { + return !operator==( rhs ); + } + + StructureType sType; + struct BaseOutStructure* pNext = nullptr; + }; + static_assert( sizeof( BaseOutStructure ) == sizeof( VkBaseOutStructure ), "struct and wrapper have different size!" ); + + struct BaseInStructure + { + BaseInStructure( ) + { + } + + BaseInStructure( VkBaseInStructure const & rhs ) + { + memcpy( this, &rhs, sizeof( BaseInStructure ) ); + } + + BaseInStructure& operator=( VkBaseInStructure const & rhs ) + { + memcpy( this, &rhs, sizeof( BaseInStructure ) ); + return *this; + } + BaseInStructure& setPNext( const struct BaseInStructure* pNext_ ) + { + pNext = pNext_; + return *this; + } + + operator VkBaseInStructure const&() const + { + return *reinterpret_cast(this); + } + + operator VkBaseInStructure &() + { + return *reinterpret_cast(this); + } + + bool operator==( BaseInStructure const& rhs ) const + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ); + } + + bool operator!=( BaseInStructure const& rhs ) const + { + return !operator==( rhs ); + } + + StructureType sType; + const struct BaseInStructure* pNext = nullptr; + }; + static_assert( sizeof( BaseInStructure ) == sizeof( VkBaseInStructure ), "struct and wrapper have different size!" ); + + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_NV + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_NV*/ +#ifdef VK_USE_PLATFORM_WIN32_NV + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_NV*/ + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + template <> struct isStructureChainValid{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_NV + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_NV*/ + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + template <> struct isStructureChainValid{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + template <> struct isStructureChainValid{ enum { value = true }; }; + VULKAN_HPP_INLINE std::string to_string(FramebufferCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(FramebufferCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(QueryPoolCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(QueryPoolCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(RenderPassCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(RenderPassCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(SamplerCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(SamplerCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineLayoutCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineLayoutCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineCacheCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineCacheCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineDepthStencilStateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineDepthStencilStateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineDynamicStateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineDynamicStateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineColorBlendStateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineColorBlendStateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineMultisampleStateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineMultisampleStateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineRasterizationStateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineRasterizationStateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineViewportStateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineViewportStateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineTessellationStateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineTessellationStateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineInputAssemblyStateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineInputAssemblyStateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineVertexInputStateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineVertexInputStateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineShaderStageCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineShaderStageCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(BufferViewCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(BufferViewCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(InstanceCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(InstanceCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(DeviceCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(DeviceCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(ImageViewCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(ImageViewCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(SemaphoreCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(SemaphoreCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(ShaderModuleCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(ShaderModuleCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(EventCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(EventCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(MemoryMapFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(MemoryMapFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorPoolResetFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorPoolResetFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorUpdateTemplateCreateFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorUpdateTemplateCreateFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(DisplayModeCreateFlagBitsKHR) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(DisplayModeCreateFlagsKHR) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(DisplaySurfaceCreateFlagBitsKHR) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(DisplaySurfaceCreateFlagsKHR) + { + return "{}"; + } + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + VULKAN_HPP_INLINE std::string to_string(AndroidSurfaceCreateFlagBitsKHR) + { + return "(void)"; + } +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + VULKAN_HPP_INLINE std::string to_string(AndroidSurfaceCreateFlagsKHR) + { + return "{}"; + } +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#ifdef VK_USE_PLATFORM_MIR_KHR + VULKAN_HPP_INLINE std::string to_string(MirSurfaceCreateFlagBitsKHR) + { + return "(void)"; + } +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + +#ifdef VK_USE_PLATFORM_MIR_KHR + VULKAN_HPP_INLINE std::string to_string(MirSurfaceCreateFlagsKHR) + { + return "{}"; + } +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + +#ifdef VK_USE_PLATFORM_VI_NN + VULKAN_HPP_INLINE std::string to_string(ViSurfaceCreateFlagBitsNN) + { + return "(void)"; + } +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_VI_NN + VULKAN_HPP_INLINE std::string to_string(ViSurfaceCreateFlagsNN) + { + return "{}"; + } +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + VULKAN_HPP_INLINE std::string to_string(WaylandSurfaceCreateFlagBitsKHR) + { + return "(void)"; + } +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + VULKAN_HPP_INLINE std::string to_string(WaylandSurfaceCreateFlagsKHR) + { + return "{}"; + } +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_INLINE std::string to_string(Win32SurfaceCreateFlagBitsKHR) + { + return "(void)"; + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VULKAN_HPP_INLINE std::string to_string(Win32SurfaceCreateFlagsKHR) + { + return "{}"; + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + VULKAN_HPP_INLINE std::string to_string(XlibSurfaceCreateFlagBitsKHR) + { + return "(void)"; + } +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + VULKAN_HPP_INLINE std::string to_string(XlibSurfaceCreateFlagsKHR) + { + return "{}"; + } +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + VULKAN_HPP_INLINE std::string to_string(XcbSurfaceCreateFlagBitsKHR) + { + return "(void)"; + } +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + VULKAN_HPP_INLINE std::string to_string(XcbSurfaceCreateFlagsKHR) + { + return "{}"; + } +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + +#ifdef VK_USE_PLATFORM_IOS_MVK + VULKAN_HPP_INLINE std::string to_string(IOSSurfaceCreateFlagBitsMVK) + { + return "(void)"; + } +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + +#ifdef VK_USE_PLATFORM_IOS_MVK + VULKAN_HPP_INLINE std::string to_string(IOSSurfaceCreateFlagsMVK) + { + return "{}"; + } +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + +#ifdef VK_USE_PLATFORM_MACOS_MVK + VULKAN_HPP_INLINE std::string to_string(MacOSSurfaceCreateFlagBitsMVK) + { + return "(void)"; + } +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + +#ifdef VK_USE_PLATFORM_MACOS_MVK + VULKAN_HPP_INLINE std::string to_string(MacOSSurfaceCreateFlagsMVK) + { + return "{}"; + } +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + VULKAN_HPP_INLINE std::string to_string(CommandPoolTrimFlagBits) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(CommandPoolTrimFlags) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineViewportSwizzleStateCreateFlagBitsNV) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineViewportSwizzleStateCreateFlagsNV) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineDiscardRectangleStateCreateFlagBitsEXT) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineDiscardRectangleStateCreateFlagsEXT) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineCoverageToColorStateCreateFlagBitsNV) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineCoverageToColorStateCreateFlagsNV) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineCoverageModulationStateCreateFlagBitsNV) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineCoverageModulationStateCreateFlagsNV) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(ValidationCacheCreateFlagBitsEXT) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(ValidationCacheCreateFlagsEXT) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(DebugUtilsMessengerCreateFlagBitsEXT) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(DebugUtilsMessengerCreateFlagsEXT) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(DebugUtilsMessengerCallbackDataFlagBitsEXT) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(DebugUtilsMessengerCallbackDataFlagsEXT) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineRasterizationConservativeStateCreateFlagBitsEXT) + { + return "(void)"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineRasterizationConservativeStateCreateFlagsEXT) + { + return "{}"; + } + + VULKAN_HPP_INLINE std::string to_string(ImageLayout value) + { + switch (value) + { + case ImageLayout::eUndefined: return "Undefined"; + case ImageLayout::eGeneral: return "General"; + case ImageLayout::eColorAttachmentOptimal: return "ColorAttachmentOptimal"; + case ImageLayout::eDepthStencilAttachmentOptimal: return "DepthStencilAttachmentOptimal"; + case ImageLayout::eDepthStencilReadOnlyOptimal: return "DepthStencilReadOnlyOptimal"; + case ImageLayout::eShaderReadOnlyOptimal: return "ShaderReadOnlyOptimal"; + case ImageLayout::eTransferSrcOptimal: return "TransferSrcOptimal"; + case ImageLayout::eTransferDstOptimal: return "TransferDstOptimal"; + case ImageLayout::ePreinitialized: return "Preinitialized"; + case ImageLayout::eDepthReadOnlyStencilAttachmentOptimal: return "DepthReadOnlyStencilAttachmentOptimal"; + case ImageLayout::eDepthAttachmentStencilReadOnlyOptimal: return "DepthAttachmentStencilReadOnlyOptimal"; + case ImageLayout::ePresentSrcKHR: return "PresentSrcKHR"; + case ImageLayout::eSharedPresentKHR: return "SharedPresentKHR"; + case ImageLayout::eShadingRateOptimalNV: return "ShadingRateOptimalNV"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(AttachmentLoadOp value) + { + switch (value) + { + case AttachmentLoadOp::eLoad: return "Load"; + case AttachmentLoadOp::eClear: return "Clear"; + case AttachmentLoadOp::eDontCare: return "DontCare"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(AttachmentStoreOp value) + { + switch (value) + { + case AttachmentStoreOp::eStore: return "Store"; + case AttachmentStoreOp::eDontCare: return "DontCare"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ImageType value) + { + switch (value) + { + case ImageType::e1D: return "1D"; + case ImageType::e2D: return "2D"; + case ImageType::e3D: return "3D"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ImageTiling value) + { + switch (value) + { + case ImageTiling::eOptimal: return "Optimal"; + case ImageTiling::eLinear: return "Linear"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ImageViewType value) + { + switch (value) + { + case ImageViewType::e1D: return "1D"; + case ImageViewType::e2D: return "2D"; + case ImageViewType::e3D: return "3D"; + case ImageViewType::eCube: return "Cube"; + case ImageViewType::e1DArray: return "1DArray"; + case ImageViewType::e2DArray: return "2DArray"; + case ImageViewType::eCubeArray: return "CubeArray"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CommandBufferLevel value) + { + switch (value) + { + case CommandBufferLevel::ePrimary: return "Primary"; + case CommandBufferLevel::eSecondary: return "Secondary"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ComponentSwizzle value) + { + switch (value) + { + case ComponentSwizzle::eIdentity: return "Identity"; + case ComponentSwizzle::eZero: return "Zero"; + case ComponentSwizzle::eOne: return "One"; + case ComponentSwizzle::eR: return "R"; + case ComponentSwizzle::eG: return "G"; + case ComponentSwizzle::eB: return "B"; + case ComponentSwizzle::eA: return "A"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorType value) + { + switch (value) + { + case DescriptorType::eSampler: return "Sampler"; + case DescriptorType::eCombinedImageSampler: return "CombinedImageSampler"; + case DescriptorType::eSampledImage: return "SampledImage"; + case DescriptorType::eStorageImage: return "StorageImage"; + case DescriptorType::eUniformTexelBuffer: return "UniformTexelBuffer"; + case DescriptorType::eStorageTexelBuffer: return "StorageTexelBuffer"; + case DescriptorType::eUniformBuffer: return "UniformBuffer"; + case DescriptorType::eStorageBuffer: return "StorageBuffer"; + case DescriptorType::eUniformBufferDynamic: return "UniformBufferDynamic"; + case DescriptorType::eStorageBufferDynamic: return "StorageBufferDynamic"; + case DescriptorType::eInputAttachment: return "InputAttachment"; + case DescriptorType::eInlineUniformBlockEXT: return "InlineUniformBlockEXT"; + case DescriptorType::eAccelerationStructureNVX: return "AccelerationStructureNVX"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(QueryType value) + { + switch (value) + { + case QueryType::eOcclusion: return "Occlusion"; + case QueryType::ePipelineStatistics: return "PipelineStatistics"; + case QueryType::eTimestamp: return "Timestamp"; + case QueryType::eCompactedSizeNVX: return "CompactedSizeNVX"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(BorderColor value) + { + switch (value) + { + case BorderColor::eFloatTransparentBlack: return "FloatTransparentBlack"; + case BorderColor::eIntTransparentBlack: return "IntTransparentBlack"; + case BorderColor::eFloatOpaqueBlack: return "FloatOpaqueBlack"; + case BorderColor::eIntOpaqueBlack: return "IntOpaqueBlack"; + case BorderColor::eFloatOpaqueWhite: return "FloatOpaqueWhite"; + case BorderColor::eIntOpaqueWhite: return "IntOpaqueWhite"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(PipelineBindPoint value) + { + switch (value) + { + case PipelineBindPoint::eGraphics: return "Graphics"; + case PipelineBindPoint::eCompute: return "Compute"; + case PipelineBindPoint::eRaytracingNVX: return "RaytracingNVX"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(PipelineCacheHeaderVersion value) + { + switch (value) + { + case PipelineCacheHeaderVersion::eOne: return "One"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(PrimitiveTopology value) + { + switch (value) + { + case PrimitiveTopology::ePointList: return "PointList"; + case PrimitiveTopology::eLineList: return "LineList"; + case PrimitiveTopology::eLineStrip: return "LineStrip"; + case PrimitiveTopology::eTriangleList: return "TriangleList"; + case PrimitiveTopology::eTriangleStrip: return "TriangleStrip"; + case PrimitiveTopology::eTriangleFan: return "TriangleFan"; + case PrimitiveTopology::eLineListWithAdjacency: return "LineListWithAdjacency"; + case PrimitiveTopology::eLineStripWithAdjacency: return "LineStripWithAdjacency"; + case PrimitiveTopology::eTriangleListWithAdjacency: return "TriangleListWithAdjacency"; + case PrimitiveTopology::eTriangleStripWithAdjacency: return "TriangleStripWithAdjacency"; + case PrimitiveTopology::ePatchList: return "PatchList"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SharingMode value) + { + switch (value) + { + case SharingMode::eExclusive: return "Exclusive"; + case SharingMode::eConcurrent: return "Concurrent"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(IndexType value) + { + switch (value) + { + case IndexType::eUint16: return "Uint16"; + case IndexType::eUint32: return "Uint32"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(Filter value) + { + switch (value) + { + case Filter::eNearest: return "Nearest"; + case Filter::eLinear: return "Linear"; + case Filter::eCubicIMG: return "CubicIMG"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SamplerMipmapMode value) + { + switch (value) + { + case SamplerMipmapMode::eNearest: return "Nearest"; + case SamplerMipmapMode::eLinear: return "Linear"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SamplerAddressMode value) + { + switch (value) + { + case SamplerAddressMode::eRepeat: return "Repeat"; + case SamplerAddressMode::eMirroredRepeat: return "MirroredRepeat"; + case SamplerAddressMode::eClampToEdge: return "ClampToEdge"; + case SamplerAddressMode::eClampToBorder: return "ClampToBorder"; + case SamplerAddressMode::eMirrorClampToEdge: return "MirrorClampToEdge"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CompareOp value) + { + switch (value) + { + case CompareOp::eNever: return "Never"; + case CompareOp::eLess: return "Less"; + case CompareOp::eEqual: return "Equal"; + case CompareOp::eLessOrEqual: return "LessOrEqual"; + case CompareOp::eGreater: return "Greater"; + case CompareOp::eNotEqual: return "NotEqual"; + case CompareOp::eGreaterOrEqual: return "GreaterOrEqual"; + case CompareOp::eAlways: return "Always"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(PolygonMode value) + { + switch (value) + { + case PolygonMode::eFill: return "Fill"; + case PolygonMode::eLine: return "Line"; + case PolygonMode::ePoint: return "Point"; + case PolygonMode::eFillRectangleNV: return "FillRectangleNV"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CullModeFlagBits value) + { + switch (value) + { + case CullModeFlagBits::eNone: return "None"; + case CullModeFlagBits::eFront: return "Front"; + case CullModeFlagBits::eBack: return "Back"; + case CullModeFlagBits::eFrontAndBack: return "FrontAndBack"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CullModeFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & CullModeFlagBits::eNone) result += "None | "; + if (value & CullModeFlagBits::eFront) result += "Front | "; + if (value & CullModeFlagBits::eBack) result += "Back | "; + if (value & CullModeFlagBits::eFrontAndBack) result += "FrontAndBack | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(FrontFace value) + { + switch (value) + { + case FrontFace::eCounterClockwise: return "CounterClockwise"; + case FrontFace::eClockwise: return "Clockwise"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(BlendFactor value) + { + switch (value) + { + case BlendFactor::eZero: return "Zero"; + case BlendFactor::eOne: return "One"; + case BlendFactor::eSrcColor: return "SrcColor"; + case BlendFactor::eOneMinusSrcColor: return "OneMinusSrcColor"; + case BlendFactor::eDstColor: return "DstColor"; + case BlendFactor::eOneMinusDstColor: return "OneMinusDstColor"; + case BlendFactor::eSrcAlpha: return "SrcAlpha"; + case BlendFactor::eOneMinusSrcAlpha: return "OneMinusSrcAlpha"; + case BlendFactor::eDstAlpha: return "DstAlpha"; + case BlendFactor::eOneMinusDstAlpha: return "OneMinusDstAlpha"; + case BlendFactor::eConstantColor: return "ConstantColor"; + case BlendFactor::eOneMinusConstantColor: return "OneMinusConstantColor"; + case BlendFactor::eConstantAlpha: return "ConstantAlpha"; + case BlendFactor::eOneMinusConstantAlpha: return "OneMinusConstantAlpha"; + case BlendFactor::eSrcAlphaSaturate: return "SrcAlphaSaturate"; + case BlendFactor::eSrc1Color: return "Src1Color"; + case BlendFactor::eOneMinusSrc1Color: return "OneMinusSrc1Color"; + case BlendFactor::eSrc1Alpha: return "Src1Alpha"; + case BlendFactor::eOneMinusSrc1Alpha: return "OneMinusSrc1Alpha"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(BlendOp value) + { + switch (value) + { + case BlendOp::eAdd: return "Add"; + case BlendOp::eSubtract: return "Subtract"; + case BlendOp::eReverseSubtract: return "ReverseSubtract"; + case BlendOp::eMin: return "Min"; + case BlendOp::eMax: return "Max"; + case BlendOp::eZeroEXT: return "ZeroEXT"; + case BlendOp::eSrcEXT: return "SrcEXT"; + case BlendOp::eDstEXT: return "DstEXT"; + case BlendOp::eSrcOverEXT: return "SrcOverEXT"; + case BlendOp::eDstOverEXT: return "DstOverEXT"; + case BlendOp::eSrcInEXT: return "SrcInEXT"; + case BlendOp::eDstInEXT: return "DstInEXT"; + case BlendOp::eSrcOutEXT: return "SrcOutEXT"; + case BlendOp::eDstOutEXT: return "DstOutEXT"; + case BlendOp::eSrcAtopEXT: return "SrcAtopEXT"; + case BlendOp::eDstAtopEXT: return "DstAtopEXT"; + case BlendOp::eXorEXT: return "XorEXT"; + case BlendOp::eMultiplyEXT: return "MultiplyEXT"; + case BlendOp::eScreenEXT: return "ScreenEXT"; + case BlendOp::eOverlayEXT: return "OverlayEXT"; + case BlendOp::eDarkenEXT: return "DarkenEXT"; + case BlendOp::eLightenEXT: return "LightenEXT"; + case BlendOp::eColordodgeEXT: return "ColordodgeEXT"; + case BlendOp::eColorburnEXT: return "ColorburnEXT"; + case BlendOp::eHardlightEXT: return "HardlightEXT"; + case BlendOp::eSoftlightEXT: return "SoftlightEXT"; + case BlendOp::eDifferenceEXT: return "DifferenceEXT"; + case BlendOp::eExclusionEXT: return "ExclusionEXT"; + case BlendOp::eInvertEXT: return "InvertEXT"; + case BlendOp::eInvertRgbEXT: return "InvertRgbEXT"; + case BlendOp::eLineardodgeEXT: return "LineardodgeEXT"; + case BlendOp::eLinearburnEXT: return "LinearburnEXT"; + case BlendOp::eVividlightEXT: return "VividlightEXT"; + case BlendOp::eLinearlightEXT: return "LinearlightEXT"; + case BlendOp::ePinlightEXT: return "PinlightEXT"; + case BlendOp::eHardmixEXT: return "HardmixEXT"; + case BlendOp::eHslHueEXT: return "HslHueEXT"; + case BlendOp::eHslSaturationEXT: return "HslSaturationEXT"; + case BlendOp::eHslColorEXT: return "HslColorEXT"; + case BlendOp::eHslLuminosityEXT: return "HslLuminosityEXT"; + case BlendOp::ePlusEXT: return "PlusEXT"; + case BlendOp::ePlusClampedEXT: return "PlusClampedEXT"; + case BlendOp::ePlusClampedAlphaEXT: return "PlusClampedAlphaEXT"; + case BlendOp::ePlusDarkerEXT: return "PlusDarkerEXT"; + case BlendOp::eMinusEXT: return "MinusEXT"; + case BlendOp::eMinusClampedEXT: return "MinusClampedEXT"; + case BlendOp::eContrastEXT: return "ContrastEXT"; + case BlendOp::eInvertOvgEXT: return "InvertOvgEXT"; + case BlendOp::eRedEXT: return "RedEXT"; + case BlendOp::eGreenEXT: return "GreenEXT"; + case BlendOp::eBlueEXT: return "BlueEXT"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(StencilOp value) + { + switch (value) + { + case StencilOp::eKeep: return "Keep"; + case StencilOp::eZero: return "Zero"; + case StencilOp::eReplace: return "Replace"; + case StencilOp::eIncrementAndClamp: return "IncrementAndClamp"; + case StencilOp::eDecrementAndClamp: return "DecrementAndClamp"; + case StencilOp::eInvert: return "Invert"; + case StencilOp::eIncrementAndWrap: return "IncrementAndWrap"; + case StencilOp::eDecrementAndWrap: return "DecrementAndWrap"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(LogicOp value) + { + switch (value) + { + case LogicOp::eClear: return "Clear"; + case LogicOp::eAnd: return "And"; + case LogicOp::eAndReverse: return "AndReverse"; + case LogicOp::eCopy: return "Copy"; + case LogicOp::eAndInverted: return "AndInverted"; + case LogicOp::eNoOp: return "NoOp"; + case LogicOp::eXor: return "Xor"; + case LogicOp::eOr: return "Or"; + case LogicOp::eNor: return "Nor"; + case LogicOp::eEquivalent: return "Equivalent"; + case LogicOp::eInvert: return "Invert"; + case LogicOp::eOrReverse: return "OrReverse"; + case LogicOp::eCopyInverted: return "CopyInverted"; + case LogicOp::eOrInverted: return "OrInverted"; + case LogicOp::eNand: return "Nand"; + case LogicOp::eSet: return "Set"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(InternalAllocationType value) + { + switch (value) + { + case InternalAllocationType::eExecutable: return "Executable"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SystemAllocationScope value) + { + switch (value) + { + case SystemAllocationScope::eCommand: return "Command"; + case SystemAllocationScope::eObject: return "Object"; + case SystemAllocationScope::eCache: return "Cache"; + case SystemAllocationScope::eDevice: return "Device"; + case SystemAllocationScope::eInstance: return "Instance"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(PhysicalDeviceType value) + { + switch (value) + { + case PhysicalDeviceType::eOther: return "Other"; + case PhysicalDeviceType::eIntegratedGpu: return "IntegratedGpu"; + case PhysicalDeviceType::eDiscreteGpu: return "DiscreteGpu"; + case PhysicalDeviceType::eVirtualGpu: return "VirtualGpu"; + case PhysicalDeviceType::eCpu: return "Cpu"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(VertexInputRate value) + { + switch (value) + { + case VertexInputRate::eVertex: return "Vertex"; + case VertexInputRate::eInstance: return "Instance"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(Format value) + { + switch (value) + { + case Format::eUndefined: return "Undefined"; + case Format::eR4G4UnormPack8: return "R4G4UnormPack8"; + case Format::eR4G4B4A4UnormPack16: return "R4G4B4A4UnormPack16"; + case Format::eB4G4R4A4UnormPack16: return "B4G4R4A4UnormPack16"; + case Format::eR5G6B5UnormPack16: return "R5G6B5UnormPack16"; + case Format::eB5G6R5UnormPack16: return "B5G6R5UnormPack16"; + case Format::eR5G5B5A1UnormPack16: return "R5G5B5A1UnormPack16"; + case Format::eB5G5R5A1UnormPack16: return "B5G5R5A1UnormPack16"; + case Format::eA1R5G5B5UnormPack16: return "A1R5G5B5UnormPack16"; + case Format::eR8Unorm: return "R8Unorm"; + case Format::eR8Snorm: return "R8Snorm"; + case Format::eR8Uscaled: return "R8Uscaled"; + case Format::eR8Sscaled: return "R8Sscaled"; + case Format::eR8Uint: return "R8Uint"; + case Format::eR8Sint: return "R8Sint"; + case Format::eR8Srgb: return "R8Srgb"; + case Format::eR8G8Unorm: return "R8G8Unorm"; + case Format::eR8G8Snorm: return "R8G8Snorm"; + case Format::eR8G8Uscaled: return "R8G8Uscaled"; + case Format::eR8G8Sscaled: return "R8G8Sscaled"; + case Format::eR8G8Uint: return "R8G8Uint"; + case Format::eR8G8Sint: return "R8G8Sint"; + case Format::eR8G8Srgb: return "R8G8Srgb"; + case Format::eR8G8B8Unorm: return "R8G8B8Unorm"; + case Format::eR8G8B8Snorm: return "R8G8B8Snorm"; + case Format::eR8G8B8Uscaled: return "R8G8B8Uscaled"; + case Format::eR8G8B8Sscaled: return "R8G8B8Sscaled"; + case Format::eR8G8B8Uint: return "R8G8B8Uint"; + case Format::eR8G8B8Sint: return "R8G8B8Sint"; + case Format::eR8G8B8Srgb: return "R8G8B8Srgb"; + case Format::eB8G8R8Unorm: return "B8G8R8Unorm"; + case Format::eB8G8R8Snorm: return "B8G8R8Snorm"; + case Format::eB8G8R8Uscaled: return "B8G8R8Uscaled"; + case Format::eB8G8R8Sscaled: return "B8G8R8Sscaled"; + case Format::eB8G8R8Uint: return "B8G8R8Uint"; + case Format::eB8G8R8Sint: return "B8G8R8Sint"; + case Format::eB8G8R8Srgb: return "B8G8R8Srgb"; + case Format::eR8G8B8A8Unorm: return "R8G8B8A8Unorm"; + case Format::eR8G8B8A8Snorm: return "R8G8B8A8Snorm"; + case Format::eR8G8B8A8Uscaled: return "R8G8B8A8Uscaled"; + case Format::eR8G8B8A8Sscaled: return "R8G8B8A8Sscaled"; + case Format::eR8G8B8A8Uint: return "R8G8B8A8Uint"; + case Format::eR8G8B8A8Sint: return "R8G8B8A8Sint"; + case Format::eR8G8B8A8Srgb: return "R8G8B8A8Srgb"; + case Format::eB8G8R8A8Unorm: return "B8G8R8A8Unorm"; + case Format::eB8G8R8A8Snorm: return "B8G8R8A8Snorm"; + case Format::eB8G8R8A8Uscaled: return "B8G8R8A8Uscaled"; + case Format::eB8G8R8A8Sscaled: return "B8G8R8A8Sscaled"; + case Format::eB8G8R8A8Uint: return "B8G8R8A8Uint"; + case Format::eB8G8R8A8Sint: return "B8G8R8A8Sint"; + case Format::eB8G8R8A8Srgb: return "B8G8R8A8Srgb"; + case Format::eA8B8G8R8UnormPack32: return "A8B8G8R8UnormPack32"; + case Format::eA8B8G8R8SnormPack32: return "A8B8G8R8SnormPack32"; + case Format::eA8B8G8R8UscaledPack32: return "A8B8G8R8UscaledPack32"; + case Format::eA8B8G8R8SscaledPack32: return "A8B8G8R8SscaledPack32"; + case Format::eA8B8G8R8UintPack32: return "A8B8G8R8UintPack32"; + case Format::eA8B8G8R8SintPack32: return "A8B8G8R8SintPack32"; + case Format::eA8B8G8R8SrgbPack32: return "A8B8G8R8SrgbPack32"; + case Format::eA2R10G10B10UnormPack32: return "A2R10G10B10UnormPack32"; + case Format::eA2R10G10B10SnormPack32: return "A2R10G10B10SnormPack32"; + case Format::eA2R10G10B10UscaledPack32: return "A2R10G10B10UscaledPack32"; + case Format::eA2R10G10B10SscaledPack32: return "A2R10G10B10SscaledPack32"; + case Format::eA2R10G10B10UintPack32: return "A2R10G10B10UintPack32"; + case Format::eA2R10G10B10SintPack32: return "A2R10G10B10SintPack32"; + case Format::eA2B10G10R10UnormPack32: return "A2B10G10R10UnormPack32"; + case Format::eA2B10G10R10SnormPack32: return "A2B10G10R10SnormPack32"; + case Format::eA2B10G10R10UscaledPack32: return "A2B10G10R10UscaledPack32"; + case Format::eA2B10G10R10SscaledPack32: return "A2B10G10R10SscaledPack32"; + case Format::eA2B10G10R10UintPack32: return "A2B10G10R10UintPack32"; + case Format::eA2B10G10R10SintPack32: return "A2B10G10R10SintPack32"; + case Format::eR16Unorm: return "R16Unorm"; + case Format::eR16Snorm: return "R16Snorm"; + case Format::eR16Uscaled: return "R16Uscaled"; + case Format::eR16Sscaled: return "R16Sscaled"; + case Format::eR16Uint: return "R16Uint"; + case Format::eR16Sint: return "R16Sint"; + case Format::eR16Sfloat: return "R16Sfloat"; + case Format::eR16G16Unorm: return "R16G16Unorm"; + case Format::eR16G16Snorm: return "R16G16Snorm"; + case Format::eR16G16Uscaled: return "R16G16Uscaled"; + case Format::eR16G16Sscaled: return "R16G16Sscaled"; + case Format::eR16G16Uint: return "R16G16Uint"; + case Format::eR16G16Sint: return "R16G16Sint"; + case Format::eR16G16Sfloat: return "R16G16Sfloat"; + case Format::eR16G16B16Unorm: return "R16G16B16Unorm"; + case Format::eR16G16B16Snorm: return "R16G16B16Snorm"; + case Format::eR16G16B16Uscaled: return "R16G16B16Uscaled"; + case Format::eR16G16B16Sscaled: return "R16G16B16Sscaled"; + case Format::eR16G16B16Uint: return "R16G16B16Uint"; + case Format::eR16G16B16Sint: return "R16G16B16Sint"; + case Format::eR16G16B16Sfloat: return "R16G16B16Sfloat"; + case Format::eR16G16B16A16Unorm: return "R16G16B16A16Unorm"; + case Format::eR16G16B16A16Snorm: return "R16G16B16A16Snorm"; + case Format::eR16G16B16A16Uscaled: return "R16G16B16A16Uscaled"; + case Format::eR16G16B16A16Sscaled: return "R16G16B16A16Sscaled"; + case Format::eR16G16B16A16Uint: return "R16G16B16A16Uint"; + case Format::eR16G16B16A16Sint: return "R16G16B16A16Sint"; + case Format::eR16G16B16A16Sfloat: return "R16G16B16A16Sfloat"; + case Format::eR32Uint: return "R32Uint"; + case Format::eR32Sint: return "R32Sint"; + case Format::eR32Sfloat: return "R32Sfloat"; + case Format::eR32G32Uint: return "R32G32Uint"; + case Format::eR32G32Sint: return "R32G32Sint"; + case Format::eR32G32Sfloat: return "R32G32Sfloat"; + case Format::eR32G32B32Uint: return "R32G32B32Uint"; + case Format::eR32G32B32Sint: return "R32G32B32Sint"; + case Format::eR32G32B32Sfloat: return "R32G32B32Sfloat"; + case Format::eR32G32B32A32Uint: return "R32G32B32A32Uint"; + case Format::eR32G32B32A32Sint: return "R32G32B32A32Sint"; + case Format::eR32G32B32A32Sfloat: return "R32G32B32A32Sfloat"; + case Format::eR64Uint: return "R64Uint"; + case Format::eR64Sint: return "R64Sint"; + case Format::eR64Sfloat: return "R64Sfloat"; + case Format::eR64G64Uint: return "R64G64Uint"; + case Format::eR64G64Sint: return "R64G64Sint"; + case Format::eR64G64Sfloat: return "R64G64Sfloat"; + case Format::eR64G64B64Uint: return "R64G64B64Uint"; + case Format::eR64G64B64Sint: return "R64G64B64Sint"; + case Format::eR64G64B64Sfloat: return "R64G64B64Sfloat"; + case Format::eR64G64B64A64Uint: return "R64G64B64A64Uint"; + case Format::eR64G64B64A64Sint: return "R64G64B64A64Sint"; + case Format::eR64G64B64A64Sfloat: return "R64G64B64A64Sfloat"; + case Format::eB10G11R11UfloatPack32: return "B10G11R11UfloatPack32"; + case Format::eE5B9G9R9UfloatPack32: return "E5B9G9R9UfloatPack32"; + case Format::eD16Unorm: return "D16Unorm"; + case Format::eX8D24UnormPack32: return "X8D24UnormPack32"; + case Format::eD32Sfloat: return "D32Sfloat"; + case Format::eS8Uint: return "S8Uint"; + case Format::eD16UnormS8Uint: return "D16UnormS8Uint"; + case Format::eD24UnormS8Uint: return "D24UnormS8Uint"; + case Format::eD32SfloatS8Uint: return "D32SfloatS8Uint"; + case Format::eBc1RgbUnormBlock: return "Bc1RgbUnormBlock"; + case Format::eBc1RgbSrgbBlock: return "Bc1RgbSrgbBlock"; + case Format::eBc1RgbaUnormBlock: return "Bc1RgbaUnormBlock"; + case Format::eBc1RgbaSrgbBlock: return "Bc1RgbaSrgbBlock"; + case Format::eBc2UnormBlock: return "Bc2UnormBlock"; + case Format::eBc2SrgbBlock: return "Bc2SrgbBlock"; + case Format::eBc3UnormBlock: return "Bc3UnormBlock"; + case Format::eBc3SrgbBlock: return "Bc3SrgbBlock"; + case Format::eBc4UnormBlock: return "Bc4UnormBlock"; + case Format::eBc4SnormBlock: return "Bc4SnormBlock"; + case Format::eBc5UnormBlock: return "Bc5UnormBlock"; + case Format::eBc5SnormBlock: return "Bc5SnormBlock"; + case Format::eBc6HUfloatBlock: return "Bc6HUfloatBlock"; + case Format::eBc6HSfloatBlock: return "Bc6HSfloatBlock"; + case Format::eBc7UnormBlock: return "Bc7UnormBlock"; + case Format::eBc7SrgbBlock: return "Bc7SrgbBlock"; + case Format::eEtc2R8G8B8UnormBlock: return "Etc2R8G8B8UnormBlock"; + case Format::eEtc2R8G8B8SrgbBlock: return "Etc2R8G8B8SrgbBlock"; + case Format::eEtc2R8G8B8A1UnormBlock: return "Etc2R8G8B8A1UnormBlock"; + case Format::eEtc2R8G8B8A1SrgbBlock: return "Etc2R8G8B8A1SrgbBlock"; + case Format::eEtc2R8G8B8A8UnormBlock: return "Etc2R8G8B8A8UnormBlock"; + case Format::eEtc2R8G8B8A8SrgbBlock: return "Etc2R8G8B8A8SrgbBlock"; + case Format::eEacR11UnormBlock: return "EacR11UnormBlock"; + case Format::eEacR11SnormBlock: return "EacR11SnormBlock"; + case Format::eEacR11G11UnormBlock: return "EacR11G11UnormBlock"; + case Format::eEacR11G11SnormBlock: return "EacR11G11SnormBlock"; + case Format::eAstc4x4UnormBlock: return "Astc4x4UnormBlock"; + case Format::eAstc4x4SrgbBlock: return "Astc4x4SrgbBlock"; + case Format::eAstc5x4UnormBlock: return "Astc5x4UnormBlock"; + case Format::eAstc5x4SrgbBlock: return "Astc5x4SrgbBlock"; + case Format::eAstc5x5UnormBlock: return "Astc5x5UnormBlock"; + case Format::eAstc5x5SrgbBlock: return "Astc5x5SrgbBlock"; + case Format::eAstc6x5UnormBlock: return "Astc6x5UnormBlock"; + case Format::eAstc6x5SrgbBlock: return "Astc6x5SrgbBlock"; + case Format::eAstc6x6UnormBlock: return "Astc6x6UnormBlock"; + case Format::eAstc6x6SrgbBlock: return "Astc6x6SrgbBlock"; + case Format::eAstc8x5UnormBlock: return "Astc8x5UnormBlock"; + case Format::eAstc8x5SrgbBlock: return "Astc8x5SrgbBlock"; + case Format::eAstc8x6UnormBlock: return "Astc8x6UnormBlock"; + case Format::eAstc8x6SrgbBlock: return "Astc8x6SrgbBlock"; + case Format::eAstc8x8UnormBlock: return "Astc8x8UnormBlock"; + case Format::eAstc8x8SrgbBlock: return "Astc8x8SrgbBlock"; + case Format::eAstc10x5UnormBlock: return "Astc10x5UnormBlock"; + case Format::eAstc10x5SrgbBlock: return "Astc10x5SrgbBlock"; + case Format::eAstc10x6UnormBlock: return "Astc10x6UnormBlock"; + case Format::eAstc10x6SrgbBlock: return "Astc10x6SrgbBlock"; + case Format::eAstc10x8UnormBlock: return "Astc10x8UnormBlock"; + case Format::eAstc10x8SrgbBlock: return "Astc10x8SrgbBlock"; + case Format::eAstc10x10UnormBlock: return "Astc10x10UnormBlock"; + case Format::eAstc10x10SrgbBlock: return "Astc10x10SrgbBlock"; + case Format::eAstc12x10UnormBlock: return "Astc12x10UnormBlock"; + case Format::eAstc12x10SrgbBlock: return "Astc12x10SrgbBlock"; + case Format::eAstc12x12UnormBlock: return "Astc12x12UnormBlock"; + case Format::eAstc12x12SrgbBlock: return "Astc12x12SrgbBlock"; + case Format::eG8B8G8R8422Unorm: return "G8B8G8R8422Unorm"; + case Format::eB8G8R8G8422Unorm: return "B8G8R8G8422Unorm"; + case Format::eG8B8R83Plane420Unorm: return "G8B8R83Plane420Unorm"; + case Format::eG8B8R82Plane420Unorm: return "G8B8R82Plane420Unorm"; + case Format::eG8B8R83Plane422Unorm: return "G8B8R83Plane422Unorm"; + case Format::eG8B8R82Plane422Unorm: return "G8B8R82Plane422Unorm"; + case Format::eG8B8R83Plane444Unorm: return "G8B8R83Plane444Unorm"; + case Format::eR10X6UnormPack16: return "R10X6UnormPack16"; + case Format::eR10X6G10X6Unorm2Pack16: return "R10X6G10X6Unorm2Pack16"; + case Format::eR10X6G10X6B10X6A10X6Unorm4Pack16: return "R10X6G10X6B10X6A10X6Unorm4Pack16"; + case Format::eG10X6B10X6G10X6R10X6422Unorm4Pack16: return "G10X6B10X6G10X6R10X6422Unorm4Pack16"; + case Format::eB10X6G10X6R10X6G10X6422Unorm4Pack16: return "B10X6G10X6R10X6G10X6422Unorm4Pack16"; + case Format::eG10X6B10X6R10X63Plane420Unorm3Pack16: return "G10X6B10X6R10X63Plane420Unorm3Pack16"; + case Format::eG10X6B10X6R10X62Plane420Unorm3Pack16: return "G10X6B10X6R10X62Plane420Unorm3Pack16"; + case Format::eG10X6B10X6R10X63Plane422Unorm3Pack16: return "G10X6B10X6R10X63Plane422Unorm3Pack16"; + case Format::eG10X6B10X6R10X62Plane422Unorm3Pack16: return "G10X6B10X6R10X62Plane422Unorm3Pack16"; + case Format::eG10X6B10X6R10X63Plane444Unorm3Pack16: return "G10X6B10X6R10X63Plane444Unorm3Pack16"; + case Format::eR12X4UnormPack16: return "R12X4UnormPack16"; + case Format::eR12X4G12X4Unorm2Pack16: return "R12X4G12X4Unorm2Pack16"; + case Format::eR12X4G12X4B12X4A12X4Unorm4Pack16: return "R12X4G12X4B12X4A12X4Unorm4Pack16"; + case Format::eG12X4B12X4G12X4R12X4422Unorm4Pack16: return "G12X4B12X4G12X4R12X4422Unorm4Pack16"; + case Format::eB12X4G12X4R12X4G12X4422Unorm4Pack16: return "B12X4G12X4R12X4G12X4422Unorm4Pack16"; + case Format::eG12X4B12X4R12X43Plane420Unorm3Pack16: return "G12X4B12X4R12X43Plane420Unorm3Pack16"; + case Format::eG12X4B12X4R12X42Plane420Unorm3Pack16: return "G12X4B12X4R12X42Plane420Unorm3Pack16"; + case Format::eG12X4B12X4R12X43Plane422Unorm3Pack16: return "G12X4B12X4R12X43Plane422Unorm3Pack16"; + case Format::eG12X4B12X4R12X42Plane422Unorm3Pack16: return "G12X4B12X4R12X42Plane422Unorm3Pack16"; + case Format::eG12X4B12X4R12X43Plane444Unorm3Pack16: return "G12X4B12X4R12X43Plane444Unorm3Pack16"; + case Format::eG16B16G16R16422Unorm: return "G16B16G16R16422Unorm"; + case Format::eB16G16R16G16422Unorm: return "B16G16R16G16422Unorm"; + case Format::eG16B16R163Plane420Unorm: return "G16B16R163Plane420Unorm"; + case Format::eG16B16R162Plane420Unorm: return "G16B16R162Plane420Unorm"; + case Format::eG16B16R163Plane422Unorm: return "G16B16R163Plane422Unorm"; + case Format::eG16B16R162Plane422Unorm: return "G16B16R162Plane422Unorm"; + case Format::eG16B16R163Plane444Unorm: return "G16B16R163Plane444Unorm"; + case Format::ePvrtc12BppUnormBlockIMG: return "Pvrtc12BppUnormBlockIMG"; + case Format::ePvrtc14BppUnormBlockIMG: return "Pvrtc14BppUnormBlockIMG"; + case Format::ePvrtc22BppUnormBlockIMG: return "Pvrtc22BppUnormBlockIMG"; + case Format::ePvrtc24BppUnormBlockIMG: return "Pvrtc24BppUnormBlockIMG"; + case Format::ePvrtc12BppSrgbBlockIMG: return "Pvrtc12BppSrgbBlockIMG"; + case Format::ePvrtc14BppSrgbBlockIMG: return "Pvrtc14BppSrgbBlockIMG"; + case Format::ePvrtc22BppSrgbBlockIMG: return "Pvrtc22BppSrgbBlockIMG"; + case Format::ePvrtc24BppSrgbBlockIMG: return "Pvrtc24BppSrgbBlockIMG"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(StructureType value) + { + switch (value) + { + case StructureType::eApplicationInfo: return "ApplicationInfo"; + case StructureType::eInstanceCreateInfo: return "InstanceCreateInfo"; + case StructureType::eDeviceQueueCreateInfo: return "DeviceQueueCreateInfo"; + case StructureType::eDeviceCreateInfo: return "DeviceCreateInfo"; + case StructureType::eSubmitInfo: return "SubmitInfo"; + case StructureType::eMemoryAllocateInfo: return "MemoryAllocateInfo"; + case StructureType::eMappedMemoryRange: return "MappedMemoryRange"; + case StructureType::eBindSparseInfo: return "BindSparseInfo"; + case StructureType::eFenceCreateInfo: return "FenceCreateInfo"; + case StructureType::eSemaphoreCreateInfo: return "SemaphoreCreateInfo"; + case StructureType::eEventCreateInfo: return "EventCreateInfo"; + case StructureType::eQueryPoolCreateInfo: return "QueryPoolCreateInfo"; + case StructureType::eBufferCreateInfo: return "BufferCreateInfo"; + case StructureType::eBufferViewCreateInfo: return "BufferViewCreateInfo"; + case StructureType::eImageCreateInfo: return "ImageCreateInfo"; + case StructureType::eImageViewCreateInfo: return "ImageViewCreateInfo"; + case StructureType::eShaderModuleCreateInfo: return "ShaderModuleCreateInfo"; + case StructureType::ePipelineCacheCreateInfo: return "PipelineCacheCreateInfo"; + case StructureType::ePipelineShaderStageCreateInfo: return "PipelineShaderStageCreateInfo"; + case StructureType::ePipelineVertexInputStateCreateInfo: return "PipelineVertexInputStateCreateInfo"; + case StructureType::ePipelineInputAssemblyStateCreateInfo: return "PipelineInputAssemblyStateCreateInfo"; + case StructureType::ePipelineTessellationStateCreateInfo: return "PipelineTessellationStateCreateInfo"; + case StructureType::ePipelineViewportStateCreateInfo: return "PipelineViewportStateCreateInfo"; + case StructureType::ePipelineRasterizationStateCreateInfo: return "PipelineRasterizationStateCreateInfo"; + case StructureType::ePipelineMultisampleStateCreateInfo: return "PipelineMultisampleStateCreateInfo"; + case StructureType::ePipelineDepthStencilStateCreateInfo: return "PipelineDepthStencilStateCreateInfo"; + case StructureType::ePipelineColorBlendStateCreateInfo: return "PipelineColorBlendStateCreateInfo"; + case StructureType::ePipelineDynamicStateCreateInfo: return "PipelineDynamicStateCreateInfo"; + case StructureType::eGraphicsPipelineCreateInfo: return "GraphicsPipelineCreateInfo"; + case StructureType::eComputePipelineCreateInfo: return "ComputePipelineCreateInfo"; + case StructureType::ePipelineLayoutCreateInfo: return "PipelineLayoutCreateInfo"; + case StructureType::eSamplerCreateInfo: return "SamplerCreateInfo"; + case StructureType::eDescriptorSetLayoutCreateInfo: return "DescriptorSetLayoutCreateInfo"; + case StructureType::eDescriptorPoolCreateInfo: return "DescriptorPoolCreateInfo"; + case StructureType::eDescriptorSetAllocateInfo: return "DescriptorSetAllocateInfo"; + case StructureType::eWriteDescriptorSet: return "WriteDescriptorSet"; + case StructureType::eCopyDescriptorSet: return "CopyDescriptorSet"; + case StructureType::eFramebufferCreateInfo: return "FramebufferCreateInfo"; + case StructureType::eRenderPassCreateInfo: return "RenderPassCreateInfo"; + case StructureType::eCommandPoolCreateInfo: return "CommandPoolCreateInfo"; + case StructureType::eCommandBufferAllocateInfo: return "CommandBufferAllocateInfo"; + case StructureType::eCommandBufferInheritanceInfo: return "CommandBufferInheritanceInfo"; + case StructureType::eCommandBufferBeginInfo: return "CommandBufferBeginInfo"; + case StructureType::eRenderPassBeginInfo: return "RenderPassBeginInfo"; + case StructureType::eBufferMemoryBarrier: return "BufferMemoryBarrier"; + case StructureType::eImageMemoryBarrier: return "ImageMemoryBarrier"; + case StructureType::eMemoryBarrier: return "MemoryBarrier"; + case StructureType::eLoaderInstanceCreateInfo: return "LoaderInstanceCreateInfo"; + case StructureType::eLoaderDeviceCreateInfo: return "LoaderDeviceCreateInfo"; + case StructureType::ePhysicalDeviceSubgroupProperties: return "PhysicalDeviceSubgroupProperties"; + case StructureType::eBindBufferMemoryInfo: return "BindBufferMemoryInfo"; + case StructureType::eBindImageMemoryInfo: return "BindImageMemoryInfo"; + case StructureType::ePhysicalDevice16BitStorageFeatures: return "PhysicalDevice16BitStorageFeatures"; + case StructureType::eMemoryDedicatedRequirements: return "MemoryDedicatedRequirements"; + case StructureType::eMemoryDedicatedAllocateInfo: return "MemoryDedicatedAllocateInfo"; + case StructureType::eMemoryAllocateFlagsInfo: return "MemoryAllocateFlagsInfo"; + case StructureType::eDeviceGroupRenderPassBeginInfo: return "DeviceGroupRenderPassBeginInfo"; + case StructureType::eDeviceGroupCommandBufferBeginInfo: return "DeviceGroupCommandBufferBeginInfo"; + case StructureType::eDeviceGroupSubmitInfo: return "DeviceGroupSubmitInfo"; + case StructureType::eDeviceGroupBindSparseInfo: return "DeviceGroupBindSparseInfo"; + case StructureType::eBindBufferMemoryDeviceGroupInfo: return "BindBufferMemoryDeviceGroupInfo"; + case StructureType::eBindImageMemoryDeviceGroupInfo: return "BindImageMemoryDeviceGroupInfo"; + case StructureType::ePhysicalDeviceGroupProperties: return "PhysicalDeviceGroupProperties"; + case StructureType::eDeviceGroupDeviceCreateInfo: return "DeviceGroupDeviceCreateInfo"; + case StructureType::eBufferMemoryRequirementsInfo2: return "BufferMemoryRequirementsInfo2"; + case StructureType::eImageMemoryRequirementsInfo2: return "ImageMemoryRequirementsInfo2"; + case StructureType::eImageSparseMemoryRequirementsInfo2: return "ImageSparseMemoryRequirementsInfo2"; + case StructureType::eMemoryRequirements2: return "MemoryRequirements2"; + case StructureType::eSparseImageMemoryRequirements2: return "SparseImageMemoryRequirements2"; + case StructureType::ePhysicalDeviceFeatures2: return "PhysicalDeviceFeatures2"; + case StructureType::ePhysicalDeviceProperties2: return "PhysicalDeviceProperties2"; + case StructureType::eFormatProperties2: return "FormatProperties2"; + case StructureType::eImageFormatProperties2: return "ImageFormatProperties2"; + case StructureType::ePhysicalDeviceImageFormatInfo2: return "PhysicalDeviceImageFormatInfo2"; + case StructureType::eQueueFamilyProperties2: return "QueueFamilyProperties2"; + case StructureType::ePhysicalDeviceMemoryProperties2: return "PhysicalDeviceMemoryProperties2"; + case StructureType::eSparseImageFormatProperties2: return "SparseImageFormatProperties2"; + case StructureType::ePhysicalDeviceSparseImageFormatInfo2: return "PhysicalDeviceSparseImageFormatInfo2"; + case StructureType::ePhysicalDevicePointClippingProperties: return "PhysicalDevicePointClippingProperties"; + case StructureType::eRenderPassInputAttachmentAspectCreateInfo: return "RenderPassInputAttachmentAspectCreateInfo"; + case StructureType::eImageViewUsageCreateInfo: return "ImageViewUsageCreateInfo"; + case StructureType::ePipelineTessellationDomainOriginStateCreateInfo: return "PipelineTessellationDomainOriginStateCreateInfo"; + case StructureType::eRenderPassMultiviewCreateInfo: return "RenderPassMultiviewCreateInfo"; + case StructureType::ePhysicalDeviceMultiviewFeatures: return "PhysicalDeviceMultiviewFeatures"; + case StructureType::ePhysicalDeviceMultiviewProperties: return "PhysicalDeviceMultiviewProperties"; + case StructureType::ePhysicalDeviceVariablePointerFeatures: return "PhysicalDeviceVariablePointerFeatures"; + case StructureType::eProtectedSubmitInfo: return "ProtectedSubmitInfo"; + case StructureType::ePhysicalDeviceProtectedMemoryFeatures: return "PhysicalDeviceProtectedMemoryFeatures"; + case StructureType::ePhysicalDeviceProtectedMemoryProperties: return "PhysicalDeviceProtectedMemoryProperties"; + case StructureType::eDeviceQueueInfo2: return "DeviceQueueInfo2"; + case StructureType::eSamplerYcbcrConversionCreateInfo: return "SamplerYcbcrConversionCreateInfo"; + case StructureType::eSamplerYcbcrConversionInfo: return "SamplerYcbcrConversionInfo"; + case StructureType::eBindImagePlaneMemoryInfo: return "BindImagePlaneMemoryInfo"; + case StructureType::eImagePlaneMemoryRequirementsInfo: return "ImagePlaneMemoryRequirementsInfo"; + case StructureType::ePhysicalDeviceSamplerYcbcrConversionFeatures: return "PhysicalDeviceSamplerYcbcrConversionFeatures"; + case StructureType::eSamplerYcbcrConversionImageFormatProperties: return "SamplerYcbcrConversionImageFormatProperties"; + case StructureType::eDescriptorUpdateTemplateCreateInfo: return "DescriptorUpdateTemplateCreateInfo"; + case StructureType::ePhysicalDeviceExternalImageFormatInfo: return "PhysicalDeviceExternalImageFormatInfo"; + case StructureType::eExternalImageFormatProperties: return "ExternalImageFormatProperties"; + case StructureType::ePhysicalDeviceExternalBufferInfo: return "PhysicalDeviceExternalBufferInfo"; + case StructureType::eExternalBufferProperties: return "ExternalBufferProperties"; + case StructureType::ePhysicalDeviceIdProperties: return "PhysicalDeviceIdProperties"; + case StructureType::eExternalMemoryBufferCreateInfo: return "ExternalMemoryBufferCreateInfo"; + case StructureType::eExternalMemoryImageCreateInfo: return "ExternalMemoryImageCreateInfo"; + case StructureType::eExportMemoryAllocateInfo: return "ExportMemoryAllocateInfo"; + case StructureType::ePhysicalDeviceExternalFenceInfo: return "PhysicalDeviceExternalFenceInfo"; + case StructureType::eExternalFenceProperties: return "ExternalFenceProperties"; + case StructureType::eExportFenceCreateInfo: return "ExportFenceCreateInfo"; + case StructureType::eExportSemaphoreCreateInfo: return "ExportSemaphoreCreateInfo"; + case StructureType::ePhysicalDeviceExternalSemaphoreInfo: return "PhysicalDeviceExternalSemaphoreInfo"; + case StructureType::eExternalSemaphoreProperties: return "ExternalSemaphoreProperties"; + case StructureType::ePhysicalDeviceMaintenance3Properties: return "PhysicalDeviceMaintenance3Properties"; + case StructureType::eDescriptorSetLayoutSupport: return "DescriptorSetLayoutSupport"; + case StructureType::ePhysicalDeviceShaderDrawParameterFeatures: return "PhysicalDeviceShaderDrawParameterFeatures"; + case StructureType::eSwapchainCreateInfoKHR: return "SwapchainCreateInfoKHR"; + case StructureType::ePresentInfoKHR: return "PresentInfoKHR"; + case StructureType::eDeviceGroupPresentCapabilitiesKHR: return "DeviceGroupPresentCapabilitiesKHR"; + case StructureType::eImageSwapchainCreateInfoKHR: return "ImageSwapchainCreateInfoKHR"; + case StructureType::eBindImageMemorySwapchainInfoKHR: return "BindImageMemorySwapchainInfoKHR"; + case StructureType::eAcquireNextImageInfoKHR: return "AcquireNextImageInfoKHR"; + case StructureType::eDeviceGroupPresentInfoKHR: return "DeviceGroupPresentInfoKHR"; + case StructureType::eDeviceGroupSwapchainCreateInfoKHR: return "DeviceGroupSwapchainCreateInfoKHR"; + case StructureType::eDisplayModeCreateInfoKHR: return "DisplayModeCreateInfoKHR"; + case StructureType::eDisplaySurfaceCreateInfoKHR: return "DisplaySurfaceCreateInfoKHR"; + case StructureType::eDisplayPresentInfoKHR: return "DisplayPresentInfoKHR"; + case StructureType::eXlibSurfaceCreateInfoKHR: return "XlibSurfaceCreateInfoKHR"; + case StructureType::eXcbSurfaceCreateInfoKHR: return "XcbSurfaceCreateInfoKHR"; + case StructureType::eWaylandSurfaceCreateInfoKHR: return "WaylandSurfaceCreateInfoKHR"; + case StructureType::eMirSurfaceCreateInfoKHR: return "MirSurfaceCreateInfoKHR"; + case StructureType::eAndroidSurfaceCreateInfoKHR: return "AndroidSurfaceCreateInfoKHR"; + case StructureType::eWin32SurfaceCreateInfoKHR: return "Win32SurfaceCreateInfoKHR"; + case StructureType::eDebugReportCallbackCreateInfoEXT: return "DebugReportCallbackCreateInfoEXT"; + case StructureType::ePipelineRasterizationStateRasterizationOrderAMD: return "PipelineRasterizationStateRasterizationOrderAMD"; + case StructureType::eDebugMarkerObjectNameInfoEXT: return "DebugMarkerObjectNameInfoEXT"; + case StructureType::eDebugMarkerObjectTagInfoEXT: return "DebugMarkerObjectTagInfoEXT"; + case StructureType::eDebugMarkerMarkerInfoEXT: return "DebugMarkerMarkerInfoEXT"; + case StructureType::eDedicatedAllocationImageCreateInfoNV: return "DedicatedAllocationImageCreateInfoNV"; + case StructureType::eDedicatedAllocationBufferCreateInfoNV: return "DedicatedAllocationBufferCreateInfoNV"; + case StructureType::eDedicatedAllocationMemoryAllocateInfoNV: return "DedicatedAllocationMemoryAllocateInfoNV"; + case StructureType::eTextureLodGatherFormatPropertiesAMD: return "TextureLodGatherFormatPropertiesAMD"; + case StructureType::ePhysicalDeviceCornerSampledImageFeaturesNV: return "PhysicalDeviceCornerSampledImageFeaturesNV"; + case StructureType::eExternalMemoryImageCreateInfoNV: return "ExternalMemoryImageCreateInfoNV"; + case StructureType::eExportMemoryAllocateInfoNV: return "ExportMemoryAllocateInfoNV"; + case StructureType::eImportMemoryWin32HandleInfoNV: return "ImportMemoryWin32HandleInfoNV"; + case StructureType::eExportMemoryWin32HandleInfoNV: return "ExportMemoryWin32HandleInfoNV"; + case StructureType::eWin32KeyedMutexAcquireReleaseInfoNV: return "Win32KeyedMutexAcquireReleaseInfoNV"; + case StructureType::eValidationFlagsEXT: return "ValidationFlagsEXT"; + case StructureType::eViSurfaceCreateInfoNN: return "ViSurfaceCreateInfoNN"; + case StructureType::eImageViewAstcDecodeModeEXT: return "ImageViewAstcDecodeModeEXT"; + case StructureType::ePhysicalDeviceAstcDecodeFeaturesEXT: return "PhysicalDeviceAstcDecodeFeaturesEXT"; + case StructureType::eImportMemoryWin32HandleInfoKHR: return "ImportMemoryWin32HandleInfoKHR"; + case StructureType::eExportMemoryWin32HandleInfoKHR: return "ExportMemoryWin32HandleInfoKHR"; + case StructureType::eMemoryWin32HandlePropertiesKHR: return "MemoryWin32HandlePropertiesKHR"; + case StructureType::eMemoryGetWin32HandleInfoKHR: return "MemoryGetWin32HandleInfoKHR"; + case StructureType::eImportMemoryFdInfoKHR: return "ImportMemoryFdInfoKHR"; + case StructureType::eMemoryFdPropertiesKHR: return "MemoryFdPropertiesKHR"; + case StructureType::eMemoryGetFdInfoKHR: return "MemoryGetFdInfoKHR"; + case StructureType::eWin32KeyedMutexAcquireReleaseInfoKHR: return "Win32KeyedMutexAcquireReleaseInfoKHR"; + case StructureType::eImportSemaphoreWin32HandleInfoKHR: return "ImportSemaphoreWin32HandleInfoKHR"; + case StructureType::eExportSemaphoreWin32HandleInfoKHR: return "ExportSemaphoreWin32HandleInfoKHR"; + case StructureType::eD3D12FenceSubmitInfoKHR: return "D3D12FenceSubmitInfoKHR"; + case StructureType::eSemaphoreGetWin32HandleInfoKHR: return "SemaphoreGetWin32HandleInfoKHR"; + case StructureType::eImportSemaphoreFdInfoKHR: return "ImportSemaphoreFdInfoKHR"; + case StructureType::eSemaphoreGetFdInfoKHR: return "SemaphoreGetFdInfoKHR"; + case StructureType::ePhysicalDevicePushDescriptorPropertiesKHR: return "PhysicalDevicePushDescriptorPropertiesKHR"; + case StructureType::eCommandBufferInheritanceConditionalRenderingInfoEXT: return "CommandBufferInheritanceConditionalRenderingInfoEXT"; + case StructureType::ePhysicalDeviceConditionalRenderingFeaturesEXT: return "PhysicalDeviceConditionalRenderingFeaturesEXT"; + case StructureType::eConditionalRenderingBeginInfoEXT: return "ConditionalRenderingBeginInfoEXT"; + case StructureType::ePresentRegionsKHR: return "PresentRegionsKHR"; + case StructureType::eObjectTableCreateInfoNVX: return "ObjectTableCreateInfoNVX"; + case StructureType::eIndirectCommandsLayoutCreateInfoNVX: return "IndirectCommandsLayoutCreateInfoNVX"; + case StructureType::eCmdProcessCommandsInfoNVX: return "CmdProcessCommandsInfoNVX"; + case StructureType::eCmdReserveSpaceForCommandsInfoNVX: return "CmdReserveSpaceForCommandsInfoNVX"; + case StructureType::eDeviceGeneratedCommandsLimitsNVX: return "DeviceGeneratedCommandsLimitsNVX"; + case StructureType::eDeviceGeneratedCommandsFeaturesNVX: return "DeviceGeneratedCommandsFeaturesNVX"; + case StructureType::ePipelineViewportWScalingStateCreateInfoNV: return "PipelineViewportWScalingStateCreateInfoNV"; + case StructureType::eSurfaceCapabilities2EXT: return "SurfaceCapabilities2EXT"; + case StructureType::eDisplayPowerInfoEXT: return "DisplayPowerInfoEXT"; + case StructureType::eDeviceEventInfoEXT: return "DeviceEventInfoEXT"; + case StructureType::eDisplayEventInfoEXT: return "DisplayEventInfoEXT"; + case StructureType::eSwapchainCounterCreateInfoEXT: return "SwapchainCounterCreateInfoEXT"; + case StructureType::ePresentTimesInfoGOOGLE: return "PresentTimesInfoGOOGLE"; + case StructureType::ePhysicalDeviceMultiviewPerViewAttributesPropertiesNVX: return "PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX"; + case StructureType::ePipelineViewportSwizzleStateCreateInfoNV: return "PipelineViewportSwizzleStateCreateInfoNV"; + case StructureType::ePhysicalDeviceDiscardRectanglePropertiesEXT: return "PhysicalDeviceDiscardRectanglePropertiesEXT"; + case StructureType::ePipelineDiscardRectangleStateCreateInfoEXT: return "PipelineDiscardRectangleStateCreateInfoEXT"; + case StructureType::ePhysicalDeviceConservativeRasterizationPropertiesEXT: return "PhysicalDeviceConservativeRasterizationPropertiesEXT"; + case StructureType::ePipelineRasterizationConservativeStateCreateInfoEXT: return "PipelineRasterizationConservativeStateCreateInfoEXT"; + case StructureType::eHdrMetadataEXT: return "HdrMetadataEXT"; + case StructureType::eAttachmentDescription2KHR: return "AttachmentDescription2KHR"; + case StructureType::eAttachmentReference2KHR: return "AttachmentReference2KHR"; + case StructureType::eSubpassDescription2KHR: return "SubpassDescription2KHR"; + case StructureType::eSubpassDependency2KHR: return "SubpassDependency2KHR"; + case StructureType::eRenderPassCreateInfo2KHR: return "RenderPassCreateInfo2KHR"; + case StructureType::eSubpassBeginInfoKHR: return "SubpassBeginInfoKHR"; + case StructureType::eSubpassEndInfoKHR: return "SubpassEndInfoKHR"; + case StructureType::eSharedPresentSurfaceCapabilitiesKHR: return "SharedPresentSurfaceCapabilitiesKHR"; + case StructureType::eImportFenceWin32HandleInfoKHR: return "ImportFenceWin32HandleInfoKHR"; + case StructureType::eExportFenceWin32HandleInfoKHR: return "ExportFenceWin32HandleInfoKHR"; + case StructureType::eFenceGetWin32HandleInfoKHR: return "FenceGetWin32HandleInfoKHR"; + case StructureType::eImportFenceFdInfoKHR: return "ImportFenceFdInfoKHR"; + case StructureType::eFenceGetFdInfoKHR: return "FenceGetFdInfoKHR"; + case StructureType::ePhysicalDeviceSurfaceInfo2KHR: return "PhysicalDeviceSurfaceInfo2KHR"; + case StructureType::eSurfaceCapabilities2KHR: return "SurfaceCapabilities2KHR"; + case StructureType::eSurfaceFormat2KHR: return "SurfaceFormat2KHR"; + case StructureType::eDisplayProperties2KHR: return "DisplayProperties2KHR"; + case StructureType::eDisplayPlaneProperties2KHR: return "DisplayPlaneProperties2KHR"; + case StructureType::eDisplayModeProperties2KHR: return "DisplayModeProperties2KHR"; + case StructureType::eDisplayPlaneInfo2KHR: return "DisplayPlaneInfo2KHR"; + case StructureType::eDisplayPlaneCapabilities2KHR: return "DisplayPlaneCapabilities2KHR"; + case StructureType::eIosSurfaceCreateInfoMVK: return "IosSurfaceCreateInfoMVK"; + case StructureType::eMacosSurfaceCreateInfoMVK: return "MacosSurfaceCreateInfoMVK"; + case StructureType::eDebugUtilsObjectNameInfoEXT: return "DebugUtilsObjectNameInfoEXT"; + case StructureType::eDebugUtilsObjectTagInfoEXT: return "DebugUtilsObjectTagInfoEXT"; + case StructureType::eDebugUtilsLabelEXT: return "DebugUtilsLabelEXT"; + case StructureType::eDebugUtilsMessengerCallbackDataEXT: return "DebugUtilsMessengerCallbackDataEXT"; + case StructureType::eDebugUtilsMessengerCreateInfoEXT: return "DebugUtilsMessengerCreateInfoEXT"; + case StructureType::eAndroidHardwareBufferUsageANDROID: return "AndroidHardwareBufferUsageANDROID"; + case StructureType::eAndroidHardwareBufferPropertiesANDROID: return "AndroidHardwareBufferPropertiesANDROID"; + case StructureType::eAndroidHardwareBufferFormatPropertiesANDROID: return "AndroidHardwareBufferFormatPropertiesANDROID"; + case StructureType::eImportAndroidHardwareBufferInfoANDROID: return "ImportAndroidHardwareBufferInfoANDROID"; + case StructureType::eMemoryGetAndroidHardwareBufferInfoANDROID: return "MemoryGetAndroidHardwareBufferInfoANDROID"; + case StructureType::eExternalFormatANDROID: return "ExternalFormatANDROID"; + case StructureType::ePhysicalDeviceSamplerFilterMinmaxPropertiesEXT: return "PhysicalDeviceSamplerFilterMinmaxPropertiesEXT"; + case StructureType::eSamplerReductionModeCreateInfoEXT: return "SamplerReductionModeCreateInfoEXT"; + case StructureType::ePhysicalDeviceInlineUniformBlockFeaturesEXT: return "PhysicalDeviceInlineUniformBlockFeaturesEXT"; + case StructureType::ePhysicalDeviceInlineUniformBlockPropertiesEXT: return "PhysicalDeviceInlineUniformBlockPropertiesEXT"; + case StructureType::eWriteDescriptorSetInlineUniformBlockEXT: return "WriteDescriptorSetInlineUniformBlockEXT"; + case StructureType::eDescriptorPoolInlineUniformBlockCreateInfoEXT: return "DescriptorPoolInlineUniformBlockCreateInfoEXT"; + case StructureType::eSampleLocationsInfoEXT: return "SampleLocationsInfoEXT"; + case StructureType::eRenderPassSampleLocationsBeginInfoEXT: return "RenderPassSampleLocationsBeginInfoEXT"; + case StructureType::ePipelineSampleLocationsStateCreateInfoEXT: return "PipelineSampleLocationsStateCreateInfoEXT"; + case StructureType::ePhysicalDeviceSampleLocationsPropertiesEXT: return "PhysicalDeviceSampleLocationsPropertiesEXT"; + case StructureType::eMultisamplePropertiesEXT: return "MultisamplePropertiesEXT"; + case StructureType::eImageFormatListCreateInfoKHR: return "ImageFormatListCreateInfoKHR"; + case StructureType::ePhysicalDeviceBlendOperationAdvancedFeaturesEXT: return "PhysicalDeviceBlendOperationAdvancedFeaturesEXT"; + case StructureType::ePhysicalDeviceBlendOperationAdvancedPropertiesEXT: return "PhysicalDeviceBlendOperationAdvancedPropertiesEXT"; + case StructureType::ePipelineColorBlendAdvancedStateCreateInfoEXT: return "PipelineColorBlendAdvancedStateCreateInfoEXT"; + case StructureType::ePipelineCoverageToColorStateCreateInfoNV: return "PipelineCoverageToColorStateCreateInfoNV"; + case StructureType::ePipelineCoverageModulationStateCreateInfoNV: return "PipelineCoverageModulationStateCreateInfoNV"; + case StructureType::eValidationCacheCreateInfoEXT: return "ValidationCacheCreateInfoEXT"; + case StructureType::eShaderModuleValidationCacheCreateInfoEXT: return "ShaderModuleValidationCacheCreateInfoEXT"; + case StructureType::eDescriptorSetLayoutBindingFlagsCreateInfoEXT: return "DescriptorSetLayoutBindingFlagsCreateInfoEXT"; + case StructureType::ePhysicalDeviceDescriptorIndexingFeaturesEXT: return "PhysicalDeviceDescriptorIndexingFeaturesEXT"; + case StructureType::ePhysicalDeviceDescriptorIndexingPropertiesEXT: return "PhysicalDeviceDescriptorIndexingPropertiesEXT"; + case StructureType::eDescriptorSetVariableDescriptorCountAllocateInfoEXT: return "DescriptorSetVariableDescriptorCountAllocateInfoEXT"; + case StructureType::eDescriptorSetVariableDescriptorCountLayoutSupportEXT: return "DescriptorSetVariableDescriptorCountLayoutSupportEXT"; + case StructureType::ePipelineViewportShadingRateImageStateCreateInfoNV: return "PipelineViewportShadingRateImageStateCreateInfoNV"; + case StructureType::ePhysicalDeviceShadingRateImageFeaturesNV: return "PhysicalDeviceShadingRateImageFeaturesNV"; + case StructureType::ePhysicalDeviceShadingRateImagePropertiesNV: return "PhysicalDeviceShadingRateImagePropertiesNV"; + case StructureType::ePipelineViewportCoarseSampleOrderStateCreateInfoNV: return "PipelineViewportCoarseSampleOrderStateCreateInfoNV"; + case StructureType::eRaytracingPipelineCreateInfoNVX: return "RaytracingPipelineCreateInfoNVX"; + case StructureType::eAccelerationStructureCreateInfoNVX: return "AccelerationStructureCreateInfoNVX"; + case StructureType::eGeometryInstanceNVX: return "GeometryInstanceNVX"; + case StructureType::eGeometryNVX: return "GeometryNVX"; + case StructureType::eGeometryTrianglesNVX: return "GeometryTrianglesNVX"; + case StructureType::eGeometryAabbNVX: return "GeometryAabbNVX"; + case StructureType::eBindAccelerationStructureMemoryInfoNVX: return "BindAccelerationStructureMemoryInfoNVX"; + case StructureType::eDescriptorAccelerationStructureInfoNVX: return "DescriptorAccelerationStructureInfoNVX"; + case StructureType::eAccelerationStructureMemoryRequirementsInfoNVX: return "AccelerationStructureMemoryRequirementsInfoNVX"; + case StructureType::ePhysicalDeviceRaytracingPropertiesNVX: return "PhysicalDeviceRaytracingPropertiesNVX"; + case StructureType::eHitShaderModuleCreateInfoNVX: return "HitShaderModuleCreateInfoNVX"; + case StructureType::ePhysicalDeviceRepresentativeFragmentTestFeaturesNV: return "PhysicalDeviceRepresentativeFragmentTestFeaturesNV"; + case StructureType::ePipelineRepresentativeFragmentTestStateCreateInfoNV: return "PipelineRepresentativeFragmentTestStateCreateInfoNV"; + case StructureType::eDeviceQueueGlobalPriorityCreateInfoEXT: return "DeviceQueueGlobalPriorityCreateInfoEXT"; + case StructureType::ePhysicalDevice8BitStorageFeaturesKHR: return "PhysicalDevice8BitStorageFeaturesKHR"; + case StructureType::eImportMemoryHostPointerInfoEXT: return "ImportMemoryHostPointerInfoEXT"; + case StructureType::eMemoryHostPointerPropertiesEXT: return "MemoryHostPointerPropertiesEXT"; + case StructureType::ePhysicalDeviceExternalMemoryHostPropertiesEXT: return "PhysicalDeviceExternalMemoryHostPropertiesEXT"; + case StructureType::ePhysicalDeviceShaderCorePropertiesAMD: return "PhysicalDeviceShaderCorePropertiesAMD"; + case StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesEXT: return "PhysicalDeviceVertexAttributeDivisorPropertiesEXT"; + case StructureType::ePipelineVertexInputDivisorStateCreateInfoEXT: return "PipelineVertexInputDivisorStateCreateInfoEXT"; + case StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesEXT: return "PhysicalDeviceVertexAttributeDivisorFeaturesEXT"; + case StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesNV: return "PhysicalDeviceComputeShaderDerivativesFeaturesNV"; + case StructureType::ePhysicalDeviceMeshShaderFeaturesNV: return "PhysicalDeviceMeshShaderFeaturesNV"; + case StructureType::ePhysicalDeviceMeshShaderPropertiesNV: return "PhysicalDeviceMeshShaderPropertiesNV"; + case StructureType::ePhysicalDeviceFragmentShaderBarycentricFeaturesNV: return "PhysicalDeviceFragmentShaderBarycentricFeaturesNV"; + case StructureType::ePhysicalDeviceShaderImageFootprintFeaturesNV: return "PhysicalDeviceShaderImageFootprintFeaturesNV"; + case StructureType::ePipelineViewportExclusiveScissorStateCreateInfoNV: return "PipelineViewportExclusiveScissorStateCreateInfoNV"; + case StructureType::ePhysicalDeviceExclusiveScissorFeaturesNV: return "PhysicalDeviceExclusiveScissorFeaturesNV"; + case StructureType::eCheckpointDataNV: return "CheckpointDataNV"; + case StructureType::eQueueFamilyCheckpointPropertiesNV: return "QueueFamilyCheckpointPropertiesNV"; + case StructureType::ePhysicalDeviceVulkanMemoryModelFeaturesKHR: return "PhysicalDeviceVulkanMemoryModelFeaturesKHR"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SubpassContents value) + { + switch (value) + { + case SubpassContents::eInline: return "Inline"; + case SubpassContents::eSecondaryCommandBuffers: return "SecondaryCommandBuffers"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DynamicState value) + { + switch (value) + { + case DynamicState::eViewport: return "Viewport"; + case DynamicState::eScissor: return "Scissor"; + case DynamicState::eLineWidth: return "LineWidth"; + case DynamicState::eDepthBias: return "DepthBias"; + case DynamicState::eBlendConstants: return "BlendConstants"; + case DynamicState::eDepthBounds: return "DepthBounds"; + case DynamicState::eStencilCompareMask: return "StencilCompareMask"; + case DynamicState::eStencilWriteMask: return "StencilWriteMask"; + case DynamicState::eStencilReference: return "StencilReference"; + case DynamicState::eViewportWScalingNV: return "ViewportWScalingNV"; + case DynamicState::eDiscardRectangleEXT: return "DiscardRectangleEXT"; + case DynamicState::eSampleLocationsEXT: return "SampleLocationsEXT"; + case DynamicState::eViewportShadingRatePaletteNV: return "ViewportShadingRatePaletteNV"; + case DynamicState::eViewportCoarseSampleOrderNV: return "ViewportCoarseSampleOrderNV"; + case DynamicState::eExclusiveScissorNV: return "ExclusiveScissorNV"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorUpdateTemplateType value) + { + switch (value) + { + case DescriptorUpdateTemplateType::eDescriptorSet: return "DescriptorSet"; + case DescriptorUpdateTemplateType::ePushDescriptorsKHR: return "PushDescriptorsKHR"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ObjectType value) + { + switch (value) + { + case ObjectType::eUnknown: return "Unknown"; + case ObjectType::eInstance: return "Instance"; + case ObjectType::ePhysicalDevice: return "PhysicalDevice"; + case ObjectType::eDevice: return "Device"; + case ObjectType::eQueue: return "Queue"; + case ObjectType::eSemaphore: return "Semaphore"; + case ObjectType::eCommandBuffer: return "CommandBuffer"; + case ObjectType::eFence: return "Fence"; + case ObjectType::eDeviceMemory: return "DeviceMemory"; + case ObjectType::eBuffer: return "Buffer"; + case ObjectType::eImage: return "Image"; + case ObjectType::eEvent: return "Event"; + case ObjectType::eQueryPool: return "QueryPool"; + case ObjectType::eBufferView: return "BufferView"; + case ObjectType::eImageView: return "ImageView"; + case ObjectType::eShaderModule: return "ShaderModule"; + case ObjectType::ePipelineCache: return "PipelineCache"; + case ObjectType::ePipelineLayout: return "PipelineLayout"; + case ObjectType::eRenderPass: return "RenderPass"; + case ObjectType::ePipeline: return "Pipeline"; + case ObjectType::eDescriptorSetLayout: return "DescriptorSetLayout"; + case ObjectType::eSampler: return "Sampler"; + case ObjectType::eDescriptorPool: return "DescriptorPool"; + case ObjectType::eDescriptorSet: return "DescriptorSet"; + case ObjectType::eFramebuffer: return "Framebuffer"; + case ObjectType::eCommandPool: return "CommandPool"; + case ObjectType::eSamplerYcbcrConversion: return "SamplerYcbcrConversion"; + case ObjectType::eDescriptorUpdateTemplate: return "DescriptorUpdateTemplate"; + case ObjectType::eSurfaceKHR: return "SurfaceKHR"; + case ObjectType::eSwapchainKHR: return "SwapchainKHR"; + case ObjectType::eDisplayKHR: return "DisplayKHR"; + case ObjectType::eDisplayModeKHR: return "DisplayModeKHR"; + case ObjectType::eDebugReportCallbackEXT: return "DebugReportCallbackEXT"; + case ObjectType::eObjectTableNVX: return "ObjectTableNVX"; + case ObjectType::eIndirectCommandsLayoutNVX: return "IndirectCommandsLayoutNVX"; + case ObjectType::eDebugUtilsMessengerEXT: return "DebugUtilsMessengerEXT"; + case ObjectType::eValidationCacheEXT: return "ValidationCacheEXT"; + case ObjectType::eAccelerationStructureNVX: return "AccelerationStructureNVX"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(QueueFlagBits value) + { + switch (value) + { + case QueueFlagBits::eGraphics: return "Graphics"; + case QueueFlagBits::eCompute: return "Compute"; + case QueueFlagBits::eTransfer: return "Transfer"; + case QueueFlagBits::eSparseBinding: return "SparseBinding"; + case QueueFlagBits::eProtected: return "Protected"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(QueueFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & QueueFlagBits::eGraphics) result += "Graphics | "; + if (value & QueueFlagBits::eCompute) result += "Compute | "; + if (value & QueueFlagBits::eTransfer) result += "Transfer | "; + if (value & QueueFlagBits::eSparseBinding) result += "SparseBinding | "; + if (value & QueueFlagBits::eProtected) result += "Protected | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(DeviceQueueCreateFlagBits value) + { + switch (value) + { + case DeviceQueueCreateFlagBits::eProtected: return "Protected"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DeviceQueueCreateFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & DeviceQueueCreateFlagBits::eProtected) result += "Protected | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(MemoryPropertyFlagBits value) + { + switch (value) + { + case MemoryPropertyFlagBits::eDeviceLocal: return "DeviceLocal"; + case MemoryPropertyFlagBits::eHostVisible: return "HostVisible"; + case MemoryPropertyFlagBits::eHostCoherent: return "HostCoherent"; + case MemoryPropertyFlagBits::eHostCached: return "HostCached"; + case MemoryPropertyFlagBits::eLazilyAllocated: return "LazilyAllocated"; + case MemoryPropertyFlagBits::eProtected: return "Protected"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(MemoryPropertyFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & MemoryPropertyFlagBits::eDeviceLocal) result += "DeviceLocal | "; + if (value & MemoryPropertyFlagBits::eHostVisible) result += "HostVisible | "; + if (value & MemoryPropertyFlagBits::eHostCoherent) result += "HostCoherent | "; + if (value & MemoryPropertyFlagBits::eHostCached) result += "HostCached | "; + if (value & MemoryPropertyFlagBits::eLazilyAllocated) result += "LazilyAllocated | "; + if (value & MemoryPropertyFlagBits::eProtected) result += "Protected | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(MemoryHeapFlagBits value) + { + switch (value) + { + case MemoryHeapFlagBits::eDeviceLocal: return "DeviceLocal"; + case MemoryHeapFlagBits::eMultiInstance: return "MultiInstance"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(MemoryHeapFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & MemoryHeapFlagBits::eDeviceLocal) result += "DeviceLocal | "; + if (value & MemoryHeapFlagBits::eMultiInstance) result += "MultiInstance | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(AccessFlagBits value) + { + switch (value) + { + case AccessFlagBits::eIndirectCommandRead: return "IndirectCommandRead"; + case AccessFlagBits::eIndexRead: return "IndexRead"; + case AccessFlagBits::eVertexAttributeRead: return "VertexAttributeRead"; + case AccessFlagBits::eUniformRead: return "UniformRead"; + case AccessFlagBits::eInputAttachmentRead: return "InputAttachmentRead"; + case AccessFlagBits::eShaderRead: return "ShaderRead"; + case AccessFlagBits::eShaderWrite: return "ShaderWrite"; + case AccessFlagBits::eColorAttachmentRead: return "ColorAttachmentRead"; + case AccessFlagBits::eColorAttachmentWrite: return "ColorAttachmentWrite"; + case AccessFlagBits::eDepthStencilAttachmentRead: return "DepthStencilAttachmentRead"; + case AccessFlagBits::eDepthStencilAttachmentWrite: return "DepthStencilAttachmentWrite"; + case AccessFlagBits::eTransferRead: return "TransferRead"; + case AccessFlagBits::eTransferWrite: return "TransferWrite"; + case AccessFlagBits::eHostRead: return "HostRead"; + case AccessFlagBits::eHostWrite: return "HostWrite"; + case AccessFlagBits::eMemoryRead: return "MemoryRead"; + case AccessFlagBits::eMemoryWrite: return "MemoryWrite"; + case AccessFlagBits::eConditionalRenderingReadEXT: return "ConditionalRenderingReadEXT"; + case AccessFlagBits::eCommandProcessReadNVX: return "CommandProcessReadNVX"; + case AccessFlagBits::eCommandProcessWriteNVX: return "CommandProcessWriteNVX"; + case AccessFlagBits::eColorAttachmentReadNoncoherentEXT: return "ColorAttachmentReadNoncoherentEXT"; + case AccessFlagBits::eShadingRateImageReadNV: return "ShadingRateImageReadNV"; + case AccessFlagBits::eAccelerationStructureReadNVX: return "AccelerationStructureReadNVX"; + case AccessFlagBits::eAccelerationStructureWriteNVX: return "AccelerationStructureWriteNVX"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(AccessFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & AccessFlagBits::eIndirectCommandRead) result += "IndirectCommandRead | "; + if (value & AccessFlagBits::eIndexRead) result += "IndexRead | "; + if (value & AccessFlagBits::eVertexAttributeRead) result += "VertexAttributeRead | "; + if (value & AccessFlagBits::eUniformRead) result += "UniformRead | "; + if (value & AccessFlagBits::eInputAttachmentRead) result += "InputAttachmentRead | "; + if (value & AccessFlagBits::eShaderRead) result += "ShaderRead | "; + if (value & AccessFlagBits::eShaderWrite) result += "ShaderWrite | "; + if (value & AccessFlagBits::eColorAttachmentRead) result += "ColorAttachmentRead | "; + if (value & AccessFlagBits::eColorAttachmentWrite) result += "ColorAttachmentWrite | "; + if (value & AccessFlagBits::eDepthStencilAttachmentRead) result += "DepthStencilAttachmentRead | "; + if (value & AccessFlagBits::eDepthStencilAttachmentWrite) result += "DepthStencilAttachmentWrite | "; + if (value & AccessFlagBits::eTransferRead) result += "TransferRead | "; + if (value & AccessFlagBits::eTransferWrite) result += "TransferWrite | "; + if (value & AccessFlagBits::eHostRead) result += "HostRead | "; + if (value & AccessFlagBits::eHostWrite) result += "HostWrite | "; + if (value & AccessFlagBits::eMemoryRead) result += "MemoryRead | "; + if (value & AccessFlagBits::eMemoryWrite) result += "MemoryWrite | "; + if (value & AccessFlagBits::eConditionalRenderingReadEXT) result += "ConditionalRenderingReadEXT | "; + if (value & AccessFlagBits::eCommandProcessReadNVX) result += "CommandProcessReadNVX | "; + if (value & AccessFlagBits::eCommandProcessWriteNVX) result += "CommandProcessWriteNVX | "; + if (value & AccessFlagBits::eColorAttachmentReadNoncoherentEXT) result += "ColorAttachmentReadNoncoherentEXT | "; + if (value & AccessFlagBits::eShadingRateImageReadNV) result += "ShadingRateImageReadNV | "; + if (value & AccessFlagBits::eAccelerationStructureReadNVX) result += "AccelerationStructureReadNVX | "; + if (value & AccessFlagBits::eAccelerationStructureWriteNVX) result += "AccelerationStructureWriteNVX | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(BufferUsageFlagBits value) + { + switch (value) + { + case BufferUsageFlagBits::eTransferSrc: return "TransferSrc"; + case BufferUsageFlagBits::eTransferDst: return "TransferDst"; + case BufferUsageFlagBits::eUniformTexelBuffer: return "UniformTexelBuffer"; + case BufferUsageFlagBits::eStorageTexelBuffer: return "StorageTexelBuffer"; + case BufferUsageFlagBits::eUniformBuffer: return "UniformBuffer"; + case BufferUsageFlagBits::eStorageBuffer: return "StorageBuffer"; + case BufferUsageFlagBits::eIndexBuffer: return "IndexBuffer"; + case BufferUsageFlagBits::eVertexBuffer: return "VertexBuffer"; + case BufferUsageFlagBits::eIndirectBuffer: return "IndirectBuffer"; + case BufferUsageFlagBits::eConditionalRenderingEXT: return "ConditionalRenderingEXT"; + case BufferUsageFlagBits::eRaytracingNVX: return "RaytracingNVX"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(BufferUsageFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & BufferUsageFlagBits::eTransferSrc) result += "TransferSrc | "; + if (value & BufferUsageFlagBits::eTransferDst) result += "TransferDst | "; + if (value & BufferUsageFlagBits::eUniformTexelBuffer) result += "UniformTexelBuffer | "; + if (value & BufferUsageFlagBits::eStorageTexelBuffer) result += "StorageTexelBuffer | "; + if (value & BufferUsageFlagBits::eUniformBuffer) result += "UniformBuffer | "; + if (value & BufferUsageFlagBits::eStorageBuffer) result += "StorageBuffer | "; + if (value & BufferUsageFlagBits::eIndexBuffer) result += "IndexBuffer | "; + if (value & BufferUsageFlagBits::eVertexBuffer) result += "VertexBuffer | "; + if (value & BufferUsageFlagBits::eIndirectBuffer) result += "IndirectBuffer | "; + if (value & BufferUsageFlagBits::eConditionalRenderingEXT) result += "ConditionalRenderingEXT | "; + if (value & BufferUsageFlagBits::eRaytracingNVX) result += "RaytracingNVX | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(BufferCreateFlagBits value) + { + switch (value) + { + case BufferCreateFlagBits::eSparseBinding: return "SparseBinding"; + case BufferCreateFlagBits::eSparseResidency: return "SparseResidency"; + case BufferCreateFlagBits::eSparseAliased: return "SparseAliased"; + case BufferCreateFlagBits::eProtected: return "Protected"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(BufferCreateFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & BufferCreateFlagBits::eSparseBinding) result += "SparseBinding | "; + if (value & BufferCreateFlagBits::eSparseResidency) result += "SparseResidency | "; + if (value & BufferCreateFlagBits::eSparseAliased) result += "SparseAliased | "; + if (value & BufferCreateFlagBits::eProtected) result += "Protected | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ShaderStageFlagBits value) + { + switch (value) + { + case ShaderStageFlagBits::eVertex: return "Vertex"; + case ShaderStageFlagBits::eTessellationControl: return "TessellationControl"; + case ShaderStageFlagBits::eTessellationEvaluation: return "TessellationEvaluation"; + case ShaderStageFlagBits::eGeometry: return "Geometry"; + case ShaderStageFlagBits::eFragment: return "Fragment"; + case ShaderStageFlagBits::eCompute: return "Compute"; + case ShaderStageFlagBits::eAllGraphics: return "AllGraphics"; + case ShaderStageFlagBits::eAll: return "All"; + case ShaderStageFlagBits::eRaygenNVX: return "RaygenNVX"; + case ShaderStageFlagBits::eAnyHitNVX: return "AnyHitNVX"; + case ShaderStageFlagBits::eClosestHitNVX: return "ClosestHitNVX"; + case ShaderStageFlagBits::eMissNVX: return "MissNVX"; + case ShaderStageFlagBits::eIntersectionNVX: return "IntersectionNVX"; + case ShaderStageFlagBits::eCallableNVX: return "CallableNVX"; + case ShaderStageFlagBits::eTaskNV: return "TaskNV"; + case ShaderStageFlagBits::eMeshNV: return "MeshNV"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ShaderStageFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ShaderStageFlagBits::eVertex) result += "Vertex | "; + if (value & ShaderStageFlagBits::eTessellationControl) result += "TessellationControl | "; + if (value & ShaderStageFlagBits::eTessellationEvaluation) result += "TessellationEvaluation | "; + if (value & ShaderStageFlagBits::eGeometry) result += "Geometry | "; + if (value & ShaderStageFlagBits::eFragment) result += "Fragment | "; + if (value & ShaderStageFlagBits::eCompute) result += "Compute | "; + if (value & ShaderStageFlagBits::eAllGraphics) result += "AllGraphics | "; + if (value & ShaderStageFlagBits::eAll) result += "All | "; + if (value & ShaderStageFlagBits::eRaygenNVX) result += "RaygenNVX | "; + if (value & ShaderStageFlagBits::eAnyHitNVX) result += "AnyHitNVX | "; + if (value & ShaderStageFlagBits::eClosestHitNVX) result += "ClosestHitNVX | "; + if (value & ShaderStageFlagBits::eMissNVX) result += "MissNVX | "; + if (value & ShaderStageFlagBits::eIntersectionNVX) result += "IntersectionNVX | "; + if (value & ShaderStageFlagBits::eCallableNVX) result += "CallableNVX | "; + if (value & ShaderStageFlagBits::eTaskNV) result += "TaskNV | "; + if (value & ShaderStageFlagBits::eMeshNV) result += "MeshNV | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ImageUsageFlagBits value) + { + switch (value) + { + case ImageUsageFlagBits::eTransferSrc: return "TransferSrc"; + case ImageUsageFlagBits::eTransferDst: return "TransferDst"; + case ImageUsageFlagBits::eSampled: return "Sampled"; + case ImageUsageFlagBits::eStorage: return "Storage"; + case ImageUsageFlagBits::eColorAttachment: return "ColorAttachment"; + case ImageUsageFlagBits::eDepthStencilAttachment: return "DepthStencilAttachment"; + case ImageUsageFlagBits::eTransientAttachment: return "TransientAttachment"; + case ImageUsageFlagBits::eInputAttachment: return "InputAttachment"; + case ImageUsageFlagBits::eShadingRateImageNV: return "ShadingRateImageNV"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ImageUsageFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ImageUsageFlagBits::eTransferSrc) result += "TransferSrc | "; + if (value & ImageUsageFlagBits::eTransferDst) result += "TransferDst | "; + if (value & ImageUsageFlagBits::eSampled) result += "Sampled | "; + if (value & ImageUsageFlagBits::eStorage) result += "Storage | "; + if (value & ImageUsageFlagBits::eColorAttachment) result += "ColorAttachment | "; + if (value & ImageUsageFlagBits::eDepthStencilAttachment) result += "DepthStencilAttachment | "; + if (value & ImageUsageFlagBits::eTransientAttachment) result += "TransientAttachment | "; + if (value & ImageUsageFlagBits::eInputAttachment) result += "InputAttachment | "; + if (value & ImageUsageFlagBits::eShadingRateImageNV) result += "ShadingRateImageNV | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ImageCreateFlagBits value) + { + switch (value) + { + case ImageCreateFlagBits::eSparseBinding: return "SparseBinding"; + case ImageCreateFlagBits::eSparseResidency: return "SparseResidency"; + case ImageCreateFlagBits::eSparseAliased: return "SparseAliased"; + case ImageCreateFlagBits::eMutableFormat: return "MutableFormat"; + case ImageCreateFlagBits::eCubeCompatible: return "CubeCompatible"; + case ImageCreateFlagBits::eAlias: return "Alias"; + case ImageCreateFlagBits::eSplitInstanceBindRegions: return "SplitInstanceBindRegions"; + case ImageCreateFlagBits::e2DArrayCompatible: return "2DArrayCompatible"; + case ImageCreateFlagBits::eBlockTexelViewCompatible: return "BlockTexelViewCompatible"; + case ImageCreateFlagBits::eExtendedUsage: return "ExtendedUsage"; + case ImageCreateFlagBits::eProtected: return "Protected"; + case ImageCreateFlagBits::eDisjoint: return "Disjoint"; + case ImageCreateFlagBits::eCornerSampledNV: return "CornerSampledNV"; + case ImageCreateFlagBits::eSampleLocationsCompatibleDepthEXT: return "SampleLocationsCompatibleDepthEXT"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ImageCreateFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ImageCreateFlagBits::eSparseBinding) result += "SparseBinding | "; + if (value & ImageCreateFlagBits::eSparseResidency) result += "SparseResidency | "; + if (value & ImageCreateFlagBits::eSparseAliased) result += "SparseAliased | "; + if (value & ImageCreateFlagBits::eMutableFormat) result += "MutableFormat | "; + if (value & ImageCreateFlagBits::eCubeCompatible) result += "CubeCompatible | "; + if (value & ImageCreateFlagBits::eAlias) result += "Alias | "; + if (value & ImageCreateFlagBits::eSplitInstanceBindRegions) result += "SplitInstanceBindRegions | "; + if (value & ImageCreateFlagBits::e2DArrayCompatible) result += "2DArrayCompatible | "; + if (value & ImageCreateFlagBits::eBlockTexelViewCompatible) result += "BlockTexelViewCompatible | "; + if (value & ImageCreateFlagBits::eExtendedUsage) result += "ExtendedUsage | "; + if (value & ImageCreateFlagBits::eProtected) result += "Protected | "; + if (value & ImageCreateFlagBits::eDisjoint) result += "Disjoint | "; + if (value & ImageCreateFlagBits::eCornerSampledNV) result += "CornerSampledNV | "; + if (value & ImageCreateFlagBits::eSampleLocationsCompatibleDepthEXT) result += "SampleLocationsCompatibleDepthEXT | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineCreateFlagBits value) + { + switch (value) + { + case PipelineCreateFlagBits::eDisableOptimization: return "DisableOptimization"; + case PipelineCreateFlagBits::eAllowDerivatives: return "AllowDerivatives"; + case PipelineCreateFlagBits::eDerivative: return "Derivative"; + case PipelineCreateFlagBits::eViewIndexFromDeviceIndex: return "ViewIndexFromDeviceIndex"; + case PipelineCreateFlagBits::eDispatchBase: return "DispatchBase"; + case PipelineCreateFlagBits::eDeferCompileNVX: return "DeferCompileNVX"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(PipelineCreateFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & PipelineCreateFlagBits::eDisableOptimization) result += "DisableOptimization | "; + if (value & PipelineCreateFlagBits::eAllowDerivatives) result += "AllowDerivatives | "; + if (value & PipelineCreateFlagBits::eDerivative) result += "Derivative | "; + if (value & PipelineCreateFlagBits::eViewIndexFromDeviceIndex) result += "ViewIndexFromDeviceIndex | "; + if (value & PipelineCreateFlagBits::eDispatchBase) result += "DispatchBase | "; + if (value & PipelineCreateFlagBits::eDeferCompileNVX) result += "DeferCompileNVX | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ColorComponentFlagBits value) + { + switch (value) + { + case ColorComponentFlagBits::eR: return "R"; + case ColorComponentFlagBits::eG: return "G"; + case ColorComponentFlagBits::eB: return "B"; + case ColorComponentFlagBits::eA: return "A"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ColorComponentFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ColorComponentFlagBits::eR) result += "R | "; + if (value & ColorComponentFlagBits::eG) result += "G | "; + if (value & ColorComponentFlagBits::eB) result += "B | "; + if (value & ColorComponentFlagBits::eA) result += "A | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(FenceCreateFlagBits value) + { + switch (value) + { + case FenceCreateFlagBits::eSignaled: return "Signaled"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(FenceCreateFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & FenceCreateFlagBits::eSignaled) result += "Signaled | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(FormatFeatureFlagBits value) + { + switch (value) + { + case FormatFeatureFlagBits::eSampledImage: return "SampledImage"; + case FormatFeatureFlagBits::eStorageImage: return "StorageImage"; + case FormatFeatureFlagBits::eStorageImageAtomic: return "StorageImageAtomic"; + case FormatFeatureFlagBits::eUniformTexelBuffer: return "UniformTexelBuffer"; + case FormatFeatureFlagBits::eStorageTexelBuffer: return "StorageTexelBuffer"; + case FormatFeatureFlagBits::eStorageTexelBufferAtomic: return "StorageTexelBufferAtomic"; + case FormatFeatureFlagBits::eVertexBuffer: return "VertexBuffer"; + case FormatFeatureFlagBits::eColorAttachment: return "ColorAttachment"; + case FormatFeatureFlagBits::eColorAttachmentBlend: return "ColorAttachmentBlend"; + case FormatFeatureFlagBits::eDepthStencilAttachment: return "DepthStencilAttachment"; + case FormatFeatureFlagBits::eBlitSrc: return "BlitSrc"; + case FormatFeatureFlagBits::eBlitDst: return "BlitDst"; + case FormatFeatureFlagBits::eSampledImageFilterLinear: return "SampledImageFilterLinear"; + case FormatFeatureFlagBits::eTransferSrc: return "TransferSrc"; + case FormatFeatureFlagBits::eTransferDst: return "TransferDst"; + case FormatFeatureFlagBits::eMidpointChromaSamples: return "MidpointChromaSamples"; + case FormatFeatureFlagBits::eSampledImageYcbcrConversionLinearFilter: return "SampledImageYcbcrConversionLinearFilter"; + case FormatFeatureFlagBits::eSampledImageYcbcrConversionSeparateReconstructionFilter: return "SampledImageYcbcrConversionSeparateReconstructionFilter"; + case FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicit: return "SampledImageYcbcrConversionChromaReconstructionExplicit"; + case FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable: return "SampledImageYcbcrConversionChromaReconstructionExplicitForceable"; + case FormatFeatureFlagBits::eDisjoint: return "Disjoint"; + case FormatFeatureFlagBits::eCositedChromaSamples: return "CositedChromaSamples"; + case FormatFeatureFlagBits::eSampledImageFilterCubicIMG: return "SampledImageFilterCubicIMG"; + case FormatFeatureFlagBits::eSampledImageFilterMinmaxEXT: return "SampledImageFilterMinmaxEXT"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(FormatFeatureFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & FormatFeatureFlagBits::eSampledImage) result += "SampledImage | "; + if (value & FormatFeatureFlagBits::eStorageImage) result += "StorageImage | "; + if (value & FormatFeatureFlagBits::eStorageImageAtomic) result += "StorageImageAtomic | "; + if (value & FormatFeatureFlagBits::eUniformTexelBuffer) result += "UniformTexelBuffer | "; + if (value & FormatFeatureFlagBits::eStorageTexelBuffer) result += "StorageTexelBuffer | "; + if (value & FormatFeatureFlagBits::eStorageTexelBufferAtomic) result += "StorageTexelBufferAtomic | "; + if (value & FormatFeatureFlagBits::eVertexBuffer) result += "VertexBuffer | "; + if (value & FormatFeatureFlagBits::eColorAttachment) result += "ColorAttachment | "; + if (value & FormatFeatureFlagBits::eColorAttachmentBlend) result += "ColorAttachmentBlend | "; + if (value & FormatFeatureFlagBits::eDepthStencilAttachment) result += "DepthStencilAttachment | "; + if (value & FormatFeatureFlagBits::eBlitSrc) result += "BlitSrc | "; + if (value & FormatFeatureFlagBits::eBlitDst) result += "BlitDst | "; + if (value & FormatFeatureFlagBits::eSampledImageFilterLinear) result += "SampledImageFilterLinear | "; + if (value & FormatFeatureFlagBits::eTransferSrc) result += "TransferSrc | "; + if (value & FormatFeatureFlagBits::eTransferDst) result += "TransferDst | "; + if (value & FormatFeatureFlagBits::eMidpointChromaSamples) result += "MidpointChromaSamples | "; + if (value & FormatFeatureFlagBits::eSampledImageYcbcrConversionLinearFilter) result += "SampledImageYcbcrConversionLinearFilter | "; + if (value & FormatFeatureFlagBits::eSampledImageYcbcrConversionSeparateReconstructionFilter) result += "SampledImageYcbcrConversionSeparateReconstructionFilter | "; + if (value & FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicit) result += "SampledImageYcbcrConversionChromaReconstructionExplicit | "; + if (value & FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable) result += "SampledImageYcbcrConversionChromaReconstructionExplicitForceable | "; + if (value & FormatFeatureFlagBits::eDisjoint) result += "Disjoint | "; + if (value & FormatFeatureFlagBits::eCositedChromaSamples) result += "CositedChromaSamples | "; + if (value & FormatFeatureFlagBits::eSampledImageFilterCubicIMG) result += "SampledImageFilterCubicIMG | "; + if (value & FormatFeatureFlagBits::eSampledImageFilterMinmaxEXT) result += "SampledImageFilterMinmaxEXT | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(QueryControlFlagBits value) + { + switch (value) + { + case QueryControlFlagBits::ePrecise: return "Precise"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(QueryControlFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & QueryControlFlagBits::ePrecise) result += "Precise | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(QueryResultFlagBits value) + { + switch (value) + { + case QueryResultFlagBits::e64: return "64"; + case QueryResultFlagBits::eWait: return "Wait"; + case QueryResultFlagBits::eWithAvailability: return "WithAvailability"; + case QueryResultFlagBits::ePartial: return "Partial"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(QueryResultFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & QueryResultFlagBits::e64) result += "64 | "; + if (value & QueryResultFlagBits::eWait) result += "Wait | "; + if (value & QueryResultFlagBits::eWithAvailability) result += "WithAvailability | "; + if (value & QueryResultFlagBits::ePartial) result += "Partial | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(CommandBufferUsageFlagBits value) + { + switch (value) + { + case CommandBufferUsageFlagBits::eOneTimeSubmit: return "OneTimeSubmit"; + case CommandBufferUsageFlagBits::eRenderPassContinue: return "RenderPassContinue"; + case CommandBufferUsageFlagBits::eSimultaneousUse: return "SimultaneousUse"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CommandBufferUsageFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & CommandBufferUsageFlagBits::eOneTimeSubmit) result += "OneTimeSubmit | "; + if (value & CommandBufferUsageFlagBits::eRenderPassContinue) result += "RenderPassContinue | "; + if (value & CommandBufferUsageFlagBits::eSimultaneousUse) result += "SimultaneousUse | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(QueryPipelineStatisticFlagBits value) + { + switch (value) + { + case QueryPipelineStatisticFlagBits::eInputAssemblyVertices: return "InputAssemblyVertices"; + case QueryPipelineStatisticFlagBits::eInputAssemblyPrimitives: return "InputAssemblyPrimitives"; + case QueryPipelineStatisticFlagBits::eVertexShaderInvocations: return "VertexShaderInvocations"; + case QueryPipelineStatisticFlagBits::eGeometryShaderInvocations: return "GeometryShaderInvocations"; + case QueryPipelineStatisticFlagBits::eGeometryShaderPrimitives: return "GeometryShaderPrimitives"; + case QueryPipelineStatisticFlagBits::eClippingInvocations: return "ClippingInvocations"; + case QueryPipelineStatisticFlagBits::eClippingPrimitives: return "ClippingPrimitives"; + case QueryPipelineStatisticFlagBits::eFragmentShaderInvocations: return "FragmentShaderInvocations"; + case QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches: return "TessellationControlShaderPatches"; + case QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations: return "TessellationEvaluationShaderInvocations"; + case QueryPipelineStatisticFlagBits::eComputeShaderInvocations: return "ComputeShaderInvocations"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(QueryPipelineStatisticFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & QueryPipelineStatisticFlagBits::eInputAssemblyVertices) result += "InputAssemblyVertices | "; + if (value & QueryPipelineStatisticFlagBits::eInputAssemblyPrimitives) result += "InputAssemblyPrimitives | "; + if (value & QueryPipelineStatisticFlagBits::eVertexShaderInvocations) result += "VertexShaderInvocations | "; + if (value & QueryPipelineStatisticFlagBits::eGeometryShaderInvocations) result += "GeometryShaderInvocations | "; + if (value & QueryPipelineStatisticFlagBits::eGeometryShaderPrimitives) result += "GeometryShaderPrimitives | "; + if (value & QueryPipelineStatisticFlagBits::eClippingInvocations) result += "ClippingInvocations | "; + if (value & QueryPipelineStatisticFlagBits::eClippingPrimitives) result += "ClippingPrimitives | "; + if (value & QueryPipelineStatisticFlagBits::eFragmentShaderInvocations) result += "FragmentShaderInvocations | "; + if (value & QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches) result += "TessellationControlShaderPatches | "; + if (value & QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations) result += "TessellationEvaluationShaderInvocations | "; + if (value & QueryPipelineStatisticFlagBits::eComputeShaderInvocations) result += "ComputeShaderInvocations | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ImageAspectFlagBits value) + { + switch (value) + { + case ImageAspectFlagBits::eColor: return "Color"; + case ImageAspectFlagBits::eDepth: return "Depth"; + case ImageAspectFlagBits::eStencil: return "Stencil"; + case ImageAspectFlagBits::eMetadata: return "Metadata"; + case ImageAspectFlagBits::ePlane0: return "Plane0"; + case ImageAspectFlagBits::ePlane1: return "Plane1"; + case ImageAspectFlagBits::ePlane2: return "Plane2"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ImageAspectFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ImageAspectFlagBits::eColor) result += "Color | "; + if (value & ImageAspectFlagBits::eDepth) result += "Depth | "; + if (value & ImageAspectFlagBits::eStencil) result += "Stencil | "; + if (value & ImageAspectFlagBits::eMetadata) result += "Metadata | "; + if (value & ImageAspectFlagBits::ePlane0) result += "Plane0 | "; + if (value & ImageAspectFlagBits::ePlane1) result += "Plane1 | "; + if (value & ImageAspectFlagBits::ePlane2) result += "Plane2 | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(SparseImageFormatFlagBits value) + { + switch (value) + { + case SparseImageFormatFlagBits::eSingleMiptail: return "SingleMiptail"; + case SparseImageFormatFlagBits::eAlignedMipSize: return "AlignedMipSize"; + case SparseImageFormatFlagBits::eNonstandardBlockSize: return "NonstandardBlockSize"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SparseImageFormatFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & SparseImageFormatFlagBits::eSingleMiptail) result += "SingleMiptail | "; + if (value & SparseImageFormatFlagBits::eAlignedMipSize) result += "AlignedMipSize | "; + if (value & SparseImageFormatFlagBits::eNonstandardBlockSize) result += "NonstandardBlockSize | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(SparseMemoryBindFlagBits value) + { + switch (value) + { + case SparseMemoryBindFlagBits::eMetadata: return "Metadata"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SparseMemoryBindFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & SparseMemoryBindFlagBits::eMetadata) result += "Metadata | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(PipelineStageFlagBits value) + { + switch (value) + { + case PipelineStageFlagBits::eTopOfPipe: return "TopOfPipe"; + case PipelineStageFlagBits::eDrawIndirect: return "DrawIndirect"; + case PipelineStageFlagBits::eVertexInput: return "VertexInput"; + case PipelineStageFlagBits::eVertexShader: return "VertexShader"; + case PipelineStageFlagBits::eTessellationControlShader: return "TessellationControlShader"; + case PipelineStageFlagBits::eTessellationEvaluationShader: return "TessellationEvaluationShader"; + case PipelineStageFlagBits::eGeometryShader: return "GeometryShader"; + case PipelineStageFlagBits::eFragmentShader: return "FragmentShader"; + case PipelineStageFlagBits::eEarlyFragmentTests: return "EarlyFragmentTests"; + case PipelineStageFlagBits::eLateFragmentTests: return "LateFragmentTests"; + case PipelineStageFlagBits::eColorAttachmentOutput: return "ColorAttachmentOutput"; + case PipelineStageFlagBits::eComputeShader: return "ComputeShader"; + case PipelineStageFlagBits::eTransfer: return "Transfer"; + case PipelineStageFlagBits::eBottomOfPipe: return "BottomOfPipe"; + case PipelineStageFlagBits::eHost: return "Host"; + case PipelineStageFlagBits::eAllGraphics: return "AllGraphics"; + case PipelineStageFlagBits::eAllCommands: return "AllCommands"; + case PipelineStageFlagBits::eConditionalRenderingEXT: return "ConditionalRenderingEXT"; + case PipelineStageFlagBits::eCommandProcessNVX: return "CommandProcessNVX"; + case PipelineStageFlagBits::eShadingRateImageNV: return "ShadingRateImageNV"; + case PipelineStageFlagBits::eRaytracingNVX: return "RaytracingNVX"; + case PipelineStageFlagBits::eTaskShaderNV: return "TaskShaderNV"; + case PipelineStageFlagBits::eMeshShaderNV: return "MeshShaderNV"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(PipelineStageFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & PipelineStageFlagBits::eTopOfPipe) result += "TopOfPipe | "; + if (value & PipelineStageFlagBits::eDrawIndirect) result += "DrawIndirect | "; + if (value & PipelineStageFlagBits::eVertexInput) result += "VertexInput | "; + if (value & PipelineStageFlagBits::eVertexShader) result += "VertexShader | "; + if (value & PipelineStageFlagBits::eTessellationControlShader) result += "TessellationControlShader | "; + if (value & PipelineStageFlagBits::eTessellationEvaluationShader) result += "TessellationEvaluationShader | "; + if (value & PipelineStageFlagBits::eGeometryShader) result += "GeometryShader | "; + if (value & PipelineStageFlagBits::eFragmentShader) result += "FragmentShader | "; + if (value & PipelineStageFlagBits::eEarlyFragmentTests) result += "EarlyFragmentTests | "; + if (value & PipelineStageFlagBits::eLateFragmentTests) result += "LateFragmentTests | "; + if (value & PipelineStageFlagBits::eColorAttachmentOutput) result += "ColorAttachmentOutput | "; + if (value & PipelineStageFlagBits::eComputeShader) result += "ComputeShader | "; + if (value & PipelineStageFlagBits::eTransfer) result += "Transfer | "; + if (value & PipelineStageFlagBits::eBottomOfPipe) result += "BottomOfPipe | "; + if (value & PipelineStageFlagBits::eHost) result += "Host | "; + if (value & PipelineStageFlagBits::eAllGraphics) result += "AllGraphics | "; + if (value & PipelineStageFlagBits::eAllCommands) result += "AllCommands | "; + if (value & PipelineStageFlagBits::eConditionalRenderingEXT) result += "ConditionalRenderingEXT | "; + if (value & PipelineStageFlagBits::eCommandProcessNVX) result += "CommandProcessNVX | "; + if (value & PipelineStageFlagBits::eShadingRateImageNV) result += "ShadingRateImageNV | "; + if (value & PipelineStageFlagBits::eRaytracingNVX) result += "RaytracingNVX | "; + if (value & PipelineStageFlagBits::eTaskShaderNV) result += "TaskShaderNV | "; + if (value & PipelineStageFlagBits::eMeshShaderNV) result += "MeshShaderNV | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(CommandPoolCreateFlagBits value) + { + switch (value) + { + case CommandPoolCreateFlagBits::eTransient: return "Transient"; + case CommandPoolCreateFlagBits::eResetCommandBuffer: return "ResetCommandBuffer"; + case CommandPoolCreateFlagBits::eProtected: return "Protected"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CommandPoolCreateFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & CommandPoolCreateFlagBits::eTransient) result += "Transient | "; + if (value & CommandPoolCreateFlagBits::eResetCommandBuffer) result += "ResetCommandBuffer | "; + if (value & CommandPoolCreateFlagBits::eProtected) result += "Protected | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(CommandPoolResetFlagBits value) + { + switch (value) + { + case CommandPoolResetFlagBits::eReleaseResources: return "ReleaseResources"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CommandPoolResetFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & CommandPoolResetFlagBits::eReleaseResources) result += "ReleaseResources | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(CommandBufferResetFlagBits value) + { + switch (value) + { + case CommandBufferResetFlagBits::eReleaseResources: return "ReleaseResources"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CommandBufferResetFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & CommandBufferResetFlagBits::eReleaseResources) result += "ReleaseResources | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(SampleCountFlagBits value) + { + switch (value) + { + case SampleCountFlagBits::e1: return "1"; + case SampleCountFlagBits::e2: return "2"; + case SampleCountFlagBits::e4: return "4"; + case SampleCountFlagBits::e8: return "8"; + case SampleCountFlagBits::e16: return "16"; + case SampleCountFlagBits::e32: return "32"; + case SampleCountFlagBits::e64: return "64"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SampleCountFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & SampleCountFlagBits::e1) result += "1 | "; + if (value & SampleCountFlagBits::e2) result += "2 | "; + if (value & SampleCountFlagBits::e4) result += "4 | "; + if (value & SampleCountFlagBits::e8) result += "8 | "; + if (value & SampleCountFlagBits::e16) result += "16 | "; + if (value & SampleCountFlagBits::e32) result += "32 | "; + if (value & SampleCountFlagBits::e64) result += "64 | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(AttachmentDescriptionFlagBits value) + { + switch (value) + { + case AttachmentDescriptionFlagBits::eMayAlias: return "MayAlias"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(AttachmentDescriptionFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & AttachmentDescriptionFlagBits::eMayAlias) result += "MayAlias | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(StencilFaceFlagBits value) + { + switch (value) + { + case StencilFaceFlagBits::eFront: return "Front"; + case StencilFaceFlagBits::eBack: return "Back"; + case StencilFaceFlagBits::eVkStencilFrontAndBack: return "VkStencilFrontAndBack"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(StencilFaceFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & StencilFaceFlagBits::eFront) result += "Front | "; + if (value & StencilFaceFlagBits::eBack) result += "Back | "; + if (value & StencilFaceFlagBits::eVkStencilFrontAndBack) result += "VkStencilFrontAndBack | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorPoolCreateFlagBits value) + { + switch (value) + { + case DescriptorPoolCreateFlagBits::eFreeDescriptorSet: return "FreeDescriptorSet"; + case DescriptorPoolCreateFlagBits::eUpdateAfterBindEXT: return "UpdateAfterBindEXT"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorPoolCreateFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & DescriptorPoolCreateFlagBits::eFreeDescriptorSet) result += "FreeDescriptorSet | "; + if (value & DescriptorPoolCreateFlagBits::eUpdateAfterBindEXT) result += "UpdateAfterBindEXT | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(DependencyFlagBits value) + { + switch (value) + { + case DependencyFlagBits::eByRegion: return "ByRegion"; + case DependencyFlagBits::eDeviceGroup: return "DeviceGroup"; + case DependencyFlagBits::eViewLocal: return "ViewLocal"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DependencyFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & DependencyFlagBits::eByRegion) result += "ByRegion | "; + if (value & DependencyFlagBits::eDeviceGroup) result += "DeviceGroup | "; + if (value & DependencyFlagBits::eViewLocal) result += "ViewLocal | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(PresentModeKHR value) + { + switch (value) + { + case PresentModeKHR::eImmediate: return "Immediate"; + case PresentModeKHR::eMailbox: return "Mailbox"; + case PresentModeKHR::eFifo: return "Fifo"; + case PresentModeKHR::eFifoRelaxed: return "FifoRelaxed"; + case PresentModeKHR::eSharedDemandRefresh: return "SharedDemandRefresh"; + case PresentModeKHR::eSharedContinuousRefresh: return "SharedContinuousRefresh"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ColorSpaceKHR value) + { + switch (value) + { + case ColorSpaceKHR::eSrgbNonlinear: return "SrgbNonlinear"; + case ColorSpaceKHR::eDisplayP3NonlinearEXT: return "DisplayP3NonlinearEXT"; + case ColorSpaceKHR::eExtendedSrgbLinearEXT: return "ExtendedSrgbLinearEXT"; + case ColorSpaceKHR::eDciP3LinearEXT: return "DciP3LinearEXT"; + case ColorSpaceKHR::eDciP3NonlinearEXT: return "DciP3NonlinearEXT"; + case ColorSpaceKHR::eBt709LinearEXT: return "Bt709LinearEXT"; + case ColorSpaceKHR::eBt709NonlinearEXT: return "Bt709NonlinearEXT"; + case ColorSpaceKHR::eBt2020LinearEXT: return "Bt2020LinearEXT"; + case ColorSpaceKHR::eHdr10St2084EXT: return "Hdr10St2084EXT"; + case ColorSpaceKHR::eDolbyvisionEXT: return "DolbyvisionEXT"; + case ColorSpaceKHR::eHdr10HlgEXT: return "Hdr10HlgEXT"; + case ColorSpaceKHR::eAdobergbLinearEXT: return "AdobergbLinearEXT"; + case ColorSpaceKHR::eAdobergbNonlinearEXT: return "AdobergbNonlinearEXT"; + case ColorSpaceKHR::ePassThroughEXT: return "PassThroughEXT"; + case ColorSpaceKHR::eExtendedSrgbNonlinearEXT: return "ExtendedSrgbNonlinearEXT"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DisplayPlaneAlphaFlagBitsKHR value) + { + switch (value) + { + case DisplayPlaneAlphaFlagBitsKHR::eOpaque: return "Opaque"; + case DisplayPlaneAlphaFlagBitsKHR::eGlobal: return "Global"; + case DisplayPlaneAlphaFlagBitsKHR::ePerPixel: return "PerPixel"; + case DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied: return "PerPixelPremultiplied"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DisplayPlaneAlphaFlagsKHR value) + { + if (!value) return "{}"; + std::string result; + if (value & DisplayPlaneAlphaFlagBitsKHR::eOpaque) result += "Opaque | "; + if (value & DisplayPlaneAlphaFlagBitsKHR::eGlobal) result += "Global | "; + if (value & DisplayPlaneAlphaFlagBitsKHR::ePerPixel) result += "PerPixel | "; + if (value & DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied) result += "PerPixelPremultiplied | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(CompositeAlphaFlagBitsKHR value) + { + switch (value) + { + case CompositeAlphaFlagBitsKHR::eOpaque: return "Opaque"; + case CompositeAlphaFlagBitsKHR::ePreMultiplied: return "PreMultiplied"; + case CompositeAlphaFlagBitsKHR::ePostMultiplied: return "PostMultiplied"; + case CompositeAlphaFlagBitsKHR::eInherit: return "Inherit"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CompositeAlphaFlagsKHR value) + { + if (!value) return "{}"; + std::string result; + if (value & CompositeAlphaFlagBitsKHR::eOpaque) result += "Opaque | "; + if (value & CompositeAlphaFlagBitsKHR::ePreMultiplied) result += "PreMultiplied | "; + if (value & CompositeAlphaFlagBitsKHR::ePostMultiplied) result += "PostMultiplied | "; + if (value & CompositeAlphaFlagBitsKHR::eInherit) result += "Inherit | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(SurfaceTransformFlagBitsKHR value) + { + switch (value) + { + case SurfaceTransformFlagBitsKHR::eIdentity: return "Identity"; + case SurfaceTransformFlagBitsKHR::eRotate90: return "Rotate90"; + case SurfaceTransformFlagBitsKHR::eRotate180: return "Rotate180"; + case SurfaceTransformFlagBitsKHR::eRotate270: return "Rotate270"; + case SurfaceTransformFlagBitsKHR::eHorizontalMirror: return "HorizontalMirror"; + case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate90: return "HorizontalMirrorRotate90"; + case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180: return "HorizontalMirrorRotate180"; + case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270: return "HorizontalMirrorRotate270"; + case SurfaceTransformFlagBitsKHR::eInherit: return "Inherit"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SurfaceTransformFlagsKHR value) + { + if (!value) return "{}"; + std::string result; + if (value & SurfaceTransformFlagBitsKHR::eIdentity) result += "Identity | "; + if (value & SurfaceTransformFlagBitsKHR::eRotate90) result += "Rotate90 | "; + if (value & SurfaceTransformFlagBitsKHR::eRotate180) result += "Rotate180 | "; + if (value & SurfaceTransformFlagBitsKHR::eRotate270) result += "Rotate270 | "; + if (value & SurfaceTransformFlagBitsKHR::eHorizontalMirror) result += "HorizontalMirror | "; + if (value & SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate90) result += "HorizontalMirrorRotate90 | "; + if (value & SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180) result += "HorizontalMirrorRotate180 | "; + if (value & SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270) result += "HorizontalMirrorRotate270 | "; + if (value & SurfaceTransformFlagBitsKHR::eInherit) result += "Inherit | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(DebugReportFlagBitsEXT value) + { + switch (value) + { + case DebugReportFlagBitsEXT::eInformation: return "Information"; + case DebugReportFlagBitsEXT::eWarning: return "Warning"; + case DebugReportFlagBitsEXT::ePerformanceWarning: return "PerformanceWarning"; + case DebugReportFlagBitsEXT::eError: return "Error"; + case DebugReportFlagBitsEXT::eDebug: return "Debug"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DebugReportFlagsEXT value) + { + if (!value) return "{}"; + std::string result; + if (value & DebugReportFlagBitsEXT::eInformation) result += "Information | "; + if (value & DebugReportFlagBitsEXT::eWarning) result += "Warning | "; + if (value & DebugReportFlagBitsEXT::ePerformanceWarning) result += "PerformanceWarning | "; + if (value & DebugReportFlagBitsEXT::eError) result += "Error | "; + if (value & DebugReportFlagBitsEXT::eDebug) result += "Debug | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(DebugReportObjectTypeEXT value) + { + switch (value) + { + case DebugReportObjectTypeEXT::eUnknown: return "Unknown"; + case DebugReportObjectTypeEXT::eInstance: return "Instance"; + case DebugReportObjectTypeEXT::ePhysicalDevice: return "PhysicalDevice"; + case DebugReportObjectTypeEXT::eDevice: return "Device"; + case DebugReportObjectTypeEXT::eQueue: return "Queue"; + case DebugReportObjectTypeEXT::eSemaphore: return "Semaphore"; + case DebugReportObjectTypeEXT::eCommandBuffer: return "CommandBuffer"; + case DebugReportObjectTypeEXT::eFence: return "Fence"; + case DebugReportObjectTypeEXT::eDeviceMemory: return "DeviceMemory"; + case DebugReportObjectTypeEXT::eBuffer: return "Buffer"; + case DebugReportObjectTypeEXT::eImage: return "Image"; + case DebugReportObjectTypeEXT::eEvent: return "Event"; + case DebugReportObjectTypeEXT::eQueryPool: return "QueryPool"; + case DebugReportObjectTypeEXT::eBufferView: return "BufferView"; + case DebugReportObjectTypeEXT::eImageView: return "ImageView"; + case DebugReportObjectTypeEXT::eShaderModule: return "ShaderModule"; + case DebugReportObjectTypeEXT::ePipelineCache: return "PipelineCache"; + case DebugReportObjectTypeEXT::ePipelineLayout: return "PipelineLayout"; + case DebugReportObjectTypeEXT::eRenderPass: return "RenderPass"; + case DebugReportObjectTypeEXT::ePipeline: return "Pipeline"; + case DebugReportObjectTypeEXT::eDescriptorSetLayout: return "DescriptorSetLayout"; + case DebugReportObjectTypeEXT::eSampler: return "Sampler"; + case DebugReportObjectTypeEXT::eDescriptorPool: return "DescriptorPool"; + case DebugReportObjectTypeEXT::eDescriptorSet: return "DescriptorSet"; + case DebugReportObjectTypeEXT::eFramebuffer: return "Framebuffer"; + case DebugReportObjectTypeEXT::eCommandPool: return "CommandPool"; + case DebugReportObjectTypeEXT::eSurfaceKhr: return "SurfaceKhr"; + case DebugReportObjectTypeEXT::eSwapchainKhr: return "SwapchainKhr"; + case DebugReportObjectTypeEXT::eDebugReportCallbackExt: return "DebugReportCallbackExt"; + case DebugReportObjectTypeEXT::eDisplayKhr: return "DisplayKhr"; + case DebugReportObjectTypeEXT::eDisplayModeKhr: return "DisplayModeKhr"; + case DebugReportObjectTypeEXT::eObjectTableNvx: return "ObjectTableNvx"; + case DebugReportObjectTypeEXT::eIndirectCommandsLayoutNvx: return "IndirectCommandsLayoutNvx"; + case DebugReportObjectTypeEXT::eValidationCacheExt: return "ValidationCacheExt"; + case DebugReportObjectTypeEXT::eSamplerYcbcrConversion: return "SamplerYcbcrConversion"; + case DebugReportObjectTypeEXT::eDescriptorUpdateTemplate: return "DescriptorUpdateTemplate"; + case DebugReportObjectTypeEXT::eAccelerationStructureNVX: return "AccelerationStructureNVX"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(RasterizationOrderAMD value) + { + switch (value) + { + case RasterizationOrderAMD::eStrict: return "Strict"; + case RasterizationOrderAMD::eRelaxed: return "Relaxed"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ExternalMemoryHandleTypeFlagBitsNV value) + { + switch (value) + { + case ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32: return "OpaqueWin32"; + case ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt: return "OpaqueWin32Kmt"; + case ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image: return "D3D11Image"; + case ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt: return "D3D11ImageKmt"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ExternalMemoryHandleTypeFlagsNV value) + { + if (!value) return "{}"; + std::string result; + if (value & ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32) result += "OpaqueWin32 | "; + if (value & ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt) result += "OpaqueWin32Kmt | "; + if (value & ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image) result += "D3D11Image | "; + if (value & ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt) result += "D3D11ImageKmt | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ExternalMemoryFeatureFlagBitsNV value) + { + switch (value) + { + case ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly: return "DedicatedOnly"; + case ExternalMemoryFeatureFlagBitsNV::eExportable: return "Exportable"; + case ExternalMemoryFeatureFlagBitsNV::eImportable: return "Importable"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ExternalMemoryFeatureFlagsNV value) + { + if (!value) return "{}"; + std::string result; + if (value & ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly) result += "DedicatedOnly | "; + if (value & ExternalMemoryFeatureFlagBitsNV::eExportable) result += "Exportable | "; + if (value & ExternalMemoryFeatureFlagBitsNV::eImportable) result += "Importable | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ValidationCheckEXT value) + { + switch (value) + { + case ValidationCheckEXT::eAll: return "All"; + case ValidationCheckEXT::eShaders: return "Shaders"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SubgroupFeatureFlagBits value) + { + switch (value) + { + case SubgroupFeatureFlagBits::eBasic: return "Basic"; + case SubgroupFeatureFlagBits::eVote: return "Vote"; + case SubgroupFeatureFlagBits::eArithmetic: return "Arithmetic"; + case SubgroupFeatureFlagBits::eBallot: return "Ballot"; + case SubgroupFeatureFlagBits::eShuffle: return "Shuffle"; + case SubgroupFeatureFlagBits::eShuffleRelative: return "ShuffleRelative"; + case SubgroupFeatureFlagBits::eClustered: return "Clustered"; + case SubgroupFeatureFlagBits::eQuad: return "Quad"; + case SubgroupFeatureFlagBits::ePartitionedNV: return "PartitionedNV"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SubgroupFeatureFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & SubgroupFeatureFlagBits::eBasic) result += "Basic | "; + if (value & SubgroupFeatureFlagBits::eVote) result += "Vote | "; + if (value & SubgroupFeatureFlagBits::eArithmetic) result += "Arithmetic | "; + if (value & SubgroupFeatureFlagBits::eBallot) result += "Ballot | "; + if (value & SubgroupFeatureFlagBits::eShuffle) result += "Shuffle | "; + if (value & SubgroupFeatureFlagBits::eShuffleRelative) result += "ShuffleRelative | "; + if (value & SubgroupFeatureFlagBits::eClustered) result += "Clustered | "; + if (value & SubgroupFeatureFlagBits::eQuad) result += "Quad | "; + if (value & SubgroupFeatureFlagBits::ePartitionedNV) result += "PartitionedNV | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(IndirectCommandsLayoutUsageFlagBitsNVX value) + { + switch (value) + { + case IndirectCommandsLayoutUsageFlagBitsNVX::eUnorderedSequences: return "UnorderedSequences"; + case IndirectCommandsLayoutUsageFlagBitsNVX::eSparseSequences: return "SparseSequences"; + case IndirectCommandsLayoutUsageFlagBitsNVX::eEmptyExecutions: return "EmptyExecutions"; + case IndirectCommandsLayoutUsageFlagBitsNVX::eIndexedSequences: return "IndexedSequences"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(IndirectCommandsLayoutUsageFlagsNVX value) + { + if (!value) return "{}"; + std::string result; + if (value & IndirectCommandsLayoutUsageFlagBitsNVX::eUnorderedSequences) result += "UnorderedSequences | "; + if (value & IndirectCommandsLayoutUsageFlagBitsNVX::eSparseSequences) result += "SparseSequences | "; + if (value & IndirectCommandsLayoutUsageFlagBitsNVX::eEmptyExecutions) result += "EmptyExecutions | "; + if (value & IndirectCommandsLayoutUsageFlagBitsNVX::eIndexedSequences) result += "IndexedSequences | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ObjectEntryUsageFlagBitsNVX value) + { + switch (value) + { + case ObjectEntryUsageFlagBitsNVX::eGraphics: return "Graphics"; + case ObjectEntryUsageFlagBitsNVX::eCompute: return "Compute"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ObjectEntryUsageFlagsNVX value) + { + if (!value) return "{}"; + std::string result; + if (value & ObjectEntryUsageFlagBitsNVX::eGraphics) result += "Graphics | "; + if (value & ObjectEntryUsageFlagBitsNVX::eCompute) result += "Compute | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(IndirectCommandsTokenTypeNVX value) + { + switch (value) + { + case IndirectCommandsTokenTypeNVX::ePipeline: return "Pipeline"; + case IndirectCommandsTokenTypeNVX::eDescriptorSet: return "DescriptorSet"; + case IndirectCommandsTokenTypeNVX::eIndexBuffer: return "IndexBuffer"; + case IndirectCommandsTokenTypeNVX::eVertexBuffer: return "VertexBuffer"; + case IndirectCommandsTokenTypeNVX::ePushConstant: return "PushConstant"; + case IndirectCommandsTokenTypeNVX::eDrawIndexed: return "DrawIndexed"; + case IndirectCommandsTokenTypeNVX::eDraw: return "Draw"; + case IndirectCommandsTokenTypeNVX::eDispatch: return "Dispatch"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ObjectEntryTypeNVX value) + { + switch (value) + { + case ObjectEntryTypeNVX::eDescriptorSet: return "DescriptorSet"; + case ObjectEntryTypeNVX::ePipeline: return "Pipeline"; + case ObjectEntryTypeNVX::eIndexBuffer: return "IndexBuffer"; + case ObjectEntryTypeNVX::eVertexBuffer: return "VertexBuffer"; + case ObjectEntryTypeNVX::ePushConstant: return "PushConstant"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorSetLayoutCreateFlagBits value) + { + switch (value) + { + case DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR: return "PushDescriptorKHR"; + case DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPoolEXT: return "UpdateAfterBindPoolEXT"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorSetLayoutCreateFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR) result += "PushDescriptorKHR | "; + if (value & DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPoolEXT) result += "UpdateAfterBindPoolEXT | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ExternalMemoryHandleTypeFlagBits value) + { + switch (value) + { + case ExternalMemoryHandleTypeFlagBits::eOpaqueFd: return "OpaqueFd"; + case ExternalMemoryHandleTypeFlagBits::eOpaqueWin32: return "OpaqueWin32"; + case ExternalMemoryHandleTypeFlagBits::eOpaqueWin32Kmt: return "OpaqueWin32Kmt"; + case ExternalMemoryHandleTypeFlagBits::eD3D11Texture: return "D3D11Texture"; + case ExternalMemoryHandleTypeFlagBits::eD3D11TextureKmt: return "D3D11TextureKmt"; + case ExternalMemoryHandleTypeFlagBits::eD3D12Heap: return "D3D12Heap"; + case ExternalMemoryHandleTypeFlagBits::eD3D12Resource: return "D3D12Resource"; + case ExternalMemoryHandleTypeFlagBits::eDmaBufEXT: return "DmaBufEXT"; + case ExternalMemoryHandleTypeFlagBits::eAndroidHardwareBufferANDROID: return "AndroidHardwareBufferANDROID"; + case ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT: return "HostAllocationEXT"; + case ExternalMemoryHandleTypeFlagBits::eHostMappedForeignMemoryEXT: return "HostMappedForeignMemoryEXT"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ExternalMemoryHandleTypeFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ExternalMemoryHandleTypeFlagBits::eOpaqueFd) result += "OpaqueFd | "; + if (value & ExternalMemoryHandleTypeFlagBits::eOpaqueWin32) result += "OpaqueWin32 | "; + if (value & ExternalMemoryHandleTypeFlagBits::eOpaqueWin32Kmt) result += "OpaqueWin32Kmt | "; + if (value & ExternalMemoryHandleTypeFlagBits::eD3D11Texture) result += "D3D11Texture | "; + if (value & ExternalMemoryHandleTypeFlagBits::eD3D11TextureKmt) result += "D3D11TextureKmt | "; + if (value & ExternalMemoryHandleTypeFlagBits::eD3D12Heap) result += "D3D12Heap | "; + if (value & ExternalMemoryHandleTypeFlagBits::eD3D12Resource) result += "D3D12Resource | "; + if (value & ExternalMemoryHandleTypeFlagBits::eDmaBufEXT) result += "DmaBufEXT | "; + if (value & ExternalMemoryHandleTypeFlagBits::eAndroidHardwareBufferANDROID) result += "AndroidHardwareBufferANDROID | "; + if (value & ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT) result += "HostAllocationEXT | "; + if (value & ExternalMemoryHandleTypeFlagBits::eHostMappedForeignMemoryEXT) result += "HostMappedForeignMemoryEXT | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ExternalMemoryFeatureFlagBits value) + { + switch (value) + { + case ExternalMemoryFeatureFlagBits::eDedicatedOnly: return "DedicatedOnly"; + case ExternalMemoryFeatureFlagBits::eExportable: return "Exportable"; + case ExternalMemoryFeatureFlagBits::eImportable: return "Importable"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ExternalMemoryFeatureFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ExternalMemoryFeatureFlagBits::eDedicatedOnly) result += "DedicatedOnly | "; + if (value & ExternalMemoryFeatureFlagBits::eExportable) result += "Exportable | "; + if (value & ExternalMemoryFeatureFlagBits::eImportable) result += "Importable | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ExternalSemaphoreHandleTypeFlagBits value) + { + switch (value) + { + case ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd: return "OpaqueFd"; + case ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32: return "OpaqueWin32"; + case ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32Kmt: return "OpaqueWin32Kmt"; + case ExternalSemaphoreHandleTypeFlagBits::eD3D12Fence: return "D3D12Fence"; + case ExternalSemaphoreHandleTypeFlagBits::eSyncFd: return "SyncFd"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ExternalSemaphoreHandleTypeFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd) result += "OpaqueFd | "; + if (value & ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32) result += "OpaqueWin32 | "; + if (value & ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32Kmt) result += "OpaqueWin32Kmt | "; + if (value & ExternalSemaphoreHandleTypeFlagBits::eD3D12Fence) result += "D3D12Fence | "; + if (value & ExternalSemaphoreHandleTypeFlagBits::eSyncFd) result += "SyncFd | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ExternalSemaphoreFeatureFlagBits value) + { + switch (value) + { + case ExternalSemaphoreFeatureFlagBits::eExportable: return "Exportable"; + case ExternalSemaphoreFeatureFlagBits::eImportable: return "Importable"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ExternalSemaphoreFeatureFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ExternalSemaphoreFeatureFlagBits::eExportable) result += "Exportable | "; + if (value & ExternalSemaphoreFeatureFlagBits::eImportable) result += "Importable | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(SemaphoreImportFlagBits value) + { + switch (value) + { + case SemaphoreImportFlagBits::eTemporary: return "Temporary"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SemaphoreImportFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & SemaphoreImportFlagBits::eTemporary) result += "Temporary | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ExternalFenceHandleTypeFlagBits value) + { + switch (value) + { + case ExternalFenceHandleTypeFlagBits::eOpaqueFd: return "OpaqueFd"; + case ExternalFenceHandleTypeFlagBits::eOpaqueWin32: return "OpaqueWin32"; + case ExternalFenceHandleTypeFlagBits::eOpaqueWin32Kmt: return "OpaqueWin32Kmt"; + case ExternalFenceHandleTypeFlagBits::eSyncFd: return "SyncFd"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ExternalFenceHandleTypeFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ExternalFenceHandleTypeFlagBits::eOpaqueFd) result += "OpaqueFd | "; + if (value & ExternalFenceHandleTypeFlagBits::eOpaqueWin32) result += "OpaqueWin32 | "; + if (value & ExternalFenceHandleTypeFlagBits::eOpaqueWin32Kmt) result += "OpaqueWin32Kmt | "; + if (value & ExternalFenceHandleTypeFlagBits::eSyncFd) result += "SyncFd | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ExternalFenceFeatureFlagBits value) + { + switch (value) + { + case ExternalFenceFeatureFlagBits::eExportable: return "Exportable"; + case ExternalFenceFeatureFlagBits::eImportable: return "Importable"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ExternalFenceFeatureFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & ExternalFenceFeatureFlagBits::eExportable) result += "Exportable | "; + if (value & ExternalFenceFeatureFlagBits::eImportable) result += "Importable | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(FenceImportFlagBits value) + { + switch (value) + { + case FenceImportFlagBits::eTemporary: return "Temporary"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(FenceImportFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & FenceImportFlagBits::eTemporary) result += "Temporary | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(SurfaceCounterFlagBitsEXT value) + { + switch (value) + { + case SurfaceCounterFlagBitsEXT::eVblank: return "Vblank"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SurfaceCounterFlagsEXT value) + { + if (!value) return "{}"; + std::string result; + if (value & SurfaceCounterFlagBitsEXT::eVblank) result += "Vblank | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(DisplayPowerStateEXT value) + { + switch (value) + { + case DisplayPowerStateEXT::eOff: return "Off"; + case DisplayPowerStateEXT::eSuspend: return "Suspend"; + case DisplayPowerStateEXT::eOn: return "On"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DeviceEventTypeEXT value) + { + switch (value) + { + case DeviceEventTypeEXT::eDisplayHotplug: return "DisplayHotplug"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DisplayEventTypeEXT value) + { + switch (value) + { + case DisplayEventTypeEXT::eFirstPixelOut: return "FirstPixelOut"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(PeerMemoryFeatureFlagBits value) + { + switch (value) + { + case PeerMemoryFeatureFlagBits::eCopySrc: return "CopySrc"; + case PeerMemoryFeatureFlagBits::eCopyDst: return "CopyDst"; + case PeerMemoryFeatureFlagBits::eGenericSrc: return "GenericSrc"; + case PeerMemoryFeatureFlagBits::eGenericDst: return "GenericDst"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(PeerMemoryFeatureFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & PeerMemoryFeatureFlagBits::eCopySrc) result += "CopySrc | "; + if (value & PeerMemoryFeatureFlagBits::eCopyDst) result += "CopyDst | "; + if (value & PeerMemoryFeatureFlagBits::eGenericSrc) result += "GenericSrc | "; + if (value & PeerMemoryFeatureFlagBits::eGenericDst) result += "GenericDst | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(MemoryAllocateFlagBits value) + { + switch (value) + { + case MemoryAllocateFlagBits::eDeviceMask: return "DeviceMask"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(MemoryAllocateFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & MemoryAllocateFlagBits::eDeviceMask) result += "DeviceMask | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(DeviceGroupPresentModeFlagBitsKHR value) + { + switch (value) + { + case DeviceGroupPresentModeFlagBitsKHR::eLocal: return "Local"; + case DeviceGroupPresentModeFlagBitsKHR::eRemote: return "Remote"; + case DeviceGroupPresentModeFlagBitsKHR::eSum: return "Sum"; + case DeviceGroupPresentModeFlagBitsKHR::eLocalMultiDevice: return "LocalMultiDevice"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DeviceGroupPresentModeFlagsKHR value) + { + if (!value) return "{}"; + std::string result; + if (value & DeviceGroupPresentModeFlagBitsKHR::eLocal) result += "Local | "; + if (value & DeviceGroupPresentModeFlagBitsKHR::eRemote) result += "Remote | "; + if (value & DeviceGroupPresentModeFlagBitsKHR::eSum) result += "Sum | "; + if (value & DeviceGroupPresentModeFlagBitsKHR::eLocalMultiDevice) result += "LocalMultiDevice | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(SwapchainCreateFlagBitsKHR value) + { + switch (value) + { + case SwapchainCreateFlagBitsKHR::eSplitInstanceBindRegions: return "SplitInstanceBindRegions"; + case SwapchainCreateFlagBitsKHR::eProtected: return "Protected"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SwapchainCreateFlagsKHR value) + { + if (!value) return "{}"; + std::string result; + if (value & SwapchainCreateFlagBitsKHR::eSplitInstanceBindRegions) result += "SplitInstanceBindRegions | "; + if (value & SwapchainCreateFlagBitsKHR::eProtected) result += "Protected | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ViewportCoordinateSwizzleNV value) + { + switch (value) + { + case ViewportCoordinateSwizzleNV::ePositiveX: return "PositiveX"; + case ViewportCoordinateSwizzleNV::eNegativeX: return "NegativeX"; + case ViewportCoordinateSwizzleNV::ePositiveY: return "PositiveY"; + case ViewportCoordinateSwizzleNV::eNegativeY: return "NegativeY"; + case ViewportCoordinateSwizzleNV::ePositiveZ: return "PositiveZ"; + case ViewportCoordinateSwizzleNV::eNegativeZ: return "NegativeZ"; + case ViewportCoordinateSwizzleNV::ePositiveW: return "PositiveW"; + case ViewportCoordinateSwizzleNV::eNegativeW: return "NegativeW"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DiscardRectangleModeEXT value) + { + switch (value) + { + case DiscardRectangleModeEXT::eInclusive: return "Inclusive"; + case DiscardRectangleModeEXT::eExclusive: return "Exclusive"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SubpassDescriptionFlagBits value) + { + switch (value) + { + case SubpassDescriptionFlagBits::ePerViewAttributesNVX: return "PerViewAttributesNVX"; + case SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX: return "PerViewPositionXOnlyNVX"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SubpassDescriptionFlags value) + { + if (!value) return "{}"; + std::string result; + if (value & SubpassDescriptionFlagBits::ePerViewAttributesNVX) result += "PerViewAttributesNVX | "; + if (value & SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX) result += "PerViewPositionXOnlyNVX | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(PointClippingBehavior value) + { + switch (value) + { + case PointClippingBehavior::eAllClipPlanes: return "AllClipPlanes"; + case PointClippingBehavior::eUserClipPlanesOnly: return "UserClipPlanesOnly"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SamplerReductionModeEXT value) + { + switch (value) + { + case SamplerReductionModeEXT::eWeightedAverage: return "WeightedAverage"; + case SamplerReductionModeEXT::eMin: return "Min"; + case SamplerReductionModeEXT::eMax: return "Max"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(TessellationDomainOrigin value) + { + switch (value) + { + case TessellationDomainOrigin::eUpperLeft: return "UpperLeft"; + case TessellationDomainOrigin::eLowerLeft: return "LowerLeft"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SamplerYcbcrModelConversion value) + { + switch (value) + { + case SamplerYcbcrModelConversion::eRgbIdentity: return "RgbIdentity"; + case SamplerYcbcrModelConversion::eYcbcrIdentity: return "YcbcrIdentity"; + case SamplerYcbcrModelConversion::eYcbcr709: return "Ycbcr709"; + case SamplerYcbcrModelConversion::eYcbcr601: return "Ycbcr601"; + case SamplerYcbcrModelConversion::eYcbcr2020: return "Ycbcr2020"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(SamplerYcbcrRange value) + { + switch (value) + { + case SamplerYcbcrRange::eItuFull: return "ItuFull"; + case SamplerYcbcrRange::eItuNarrow: return "ItuNarrow"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ChromaLocation value) + { + switch (value) + { + case ChromaLocation::eCositedEven: return "CositedEven"; + case ChromaLocation::eMidpoint: return "Midpoint"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(BlendOverlapEXT value) + { + switch (value) + { + case BlendOverlapEXT::eUncorrelated: return "Uncorrelated"; + case BlendOverlapEXT::eDisjoint: return "Disjoint"; + case BlendOverlapEXT::eConjoint: return "Conjoint"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CoverageModulationModeNV value) + { + switch (value) + { + case CoverageModulationModeNV::eNone: return "None"; + case CoverageModulationModeNV::eRgb: return "Rgb"; + case CoverageModulationModeNV::eAlpha: return "Alpha"; + case CoverageModulationModeNV::eRgba: return "Rgba"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ValidationCacheHeaderVersionEXT value) + { + switch (value) + { + case ValidationCacheHeaderVersionEXT::eOne: return "One"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ShaderInfoTypeAMD value) + { + switch (value) + { + case ShaderInfoTypeAMD::eStatistics: return "Statistics"; + case ShaderInfoTypeAMD::eBinary: return "Binary"; + case ShaderInfoTypeAMD::eDisassembly: return "Disassembly"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(QueueGlobalPriorityEXT value) + { + switch (value) + { + case QueueGlobalPriorityEXT::eLow: return "Low"; + case QueueGlobalPriorityEXT::eMedium: return "Medium"; + case QueueGlobalPriorityEXT::eHigh: return "High"; + case QueueGlobalPriorityEXT::eRealtime: return "Realtime"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DebugUtilsMessageSeverityFlagBitsEXT value) + { + switch (value) + { + case DebugUtilsMessageSeverityFlagBitsEXT::eVerbose: return "Verbose"; + case DebugUtilsMessageSeverityFlagBitsEXT::eInfo: return "Info"; + case DebugUtilsMessageSeverityFlagBitsEXT::eWarning: return "Warning"; + case DebugUtilsMessageSeverityFlagBitsEXT::eError: return "Error"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DebugUtilsMessageSeverityFlagsEXT value) + { + if (!value) return "{}"; + std::string result; + if (value & DebugUtilsMessageSeverityFlagBitsEXT::eVerbose) result += "Verbose | "; + if (value & DebugUtilsMessageSeverityFlagBitsEXT::eInfo) result += "Info | "; + if (value & DebugUtilsMessageSeverityFlagBitsEXT::eWarning) result += "Warning | "; + if (value & DebugUtilsMessageSeverityFlagBitsEXT::eError) result += "Error | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(DebugUtilsMessageTypeFlagBitsEXT value) + { + switch (value) + { + case DebugUtilsMessageTypeFlagBitsEXT::eGeneral: return "General"; + case DebugUtilsMessageTypeFlagBitsEXT::eValidation: return "Validation"; + case DebugUtilsMessageTypeFlagBitsEXT::ePerformance: return "Performance"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DebugUtilsMessageTypeFlagsEXT value) + { + if (!value) return "{}"; + std::string result; + if (value & DebugUtilsMessageTypeFlagBitsEXT::eGeneral) result += "General | "; + if (value & DebugUtilsMessageTypeFlagBitsEXT::eValidation) result += "Validation | "; + if (value & DebugUtilsMessageTypeFlagBitsEXT::ePerformance) result += "Performance | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ConservativeRasterizationModeEXT value) + { + switch (value) + { + case ConservativeRasterizationModeEXT::eDisabled: return "Disabled"; + case ConservativeRasterizationModeEXT::eOverestimate: return "Overestimate"; + case ConservativeRasterizationModeEXT::eUnderestimate: return "Underestimate"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorBindingFlagBitsEXT value) + { + switch (value) + { + case DescriptorBindingFlagBitsEXT::eUpdateAfterBind: return "UpdateAfterBind"; + case DescriptorBindingFlagBitsEXT::eUpdateUnusedWhilePending: return "UpdateUnusedWhilePending"; + case DescriptorBindingFlagBitsEXT::ePartiallyBound: return "PartiallyBound"; + case DescriptorBindingFlagBitsEXT::eVariableDescriptorCount: return "VariableDescriptorCount"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(DescriptorBindingFlagsEXT value) + { + if (!value) return "{}"; + std::string result; + if (value & DescriptorBindingFlagBitsEXT::eUpdateAfterBind) result += "UpdateAfterBind | "; + if (value & DescriptorBindingFlagBitsEXT::eUpdateUnusedWhilePending) result += "UpdateUnusedWhilePending | "; + if (value & DescriptorBindingFlagBitsEXT::ePartiallyBound) result += "PartiallyBound | "; + if (value & DescriptorBindingFlagBitsEXT::eVariableDescriptorCount) result += "VariableDescriptorCount | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(VendorId value) + { + switch (value) + { + case VendorId::eViv: return "Viv"; + case VendorId::eVsi: return "Vsi"; + case VendorId::eKazan: return "Kazan"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ConditionalRenderingFlagBitsEXT value) + { + switch (value) + { + case ConditionalRenderingFlagBitsEXT::eInverted: return "Inverted"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(ConditionalRenderingFlagsEXT value) + { + if (!value) return "{}"; + std::string result; + if (value & ConditionalRenderingFlagBitsEXT::eInverted) result += "Inverted | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(ShadingRatePaletteEntryNV value) + { + switch (value) + { + case ShadingRatePaletteEntryNV::eNoInvocations: return "NoInvocations"; + case ShadingRatePaletteEntryNV::e16InvocationsPerPixel: return "16InvocationsPerPixel"; + case ShadingRatePaletteEntryNV::e8InvocationsPerPixel: return "8InvocationsPerPixel"; + case ShadingRatePaletteEntryNV::e4InvocationsPerPixel: return "4InvocationsPerPixel"; + case ShadingRatePaletteEntryNV::e2InvocationsPerPixel: return "2InvocationsPerPixel"; + case ShadingRatePaletteEntryNV::e1InvocationPerPixel: return "1InvocationPerPixel"; + case ShadingRatePaletteEntryNV::e1InvocationPer2X1Pixels: return "1InvocationPer2X1Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer1X2Pixels: return "1InvocationPer1X2Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer2X2Pixels: return "1InvocationPer2X2Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer4X2Pixels: return "1InvocationPer4X2Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer2X4Pixels: return "1InvocationPer2X4Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer4X4Pixels: return "1InvocationPer4X4Pixels"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(CoarseSampleOrderTypeNV value) + { + switch (value) + { + case CoarseSampleOrderTypeNV::eDefault: return "Default"; + case CoarseSampleOrderTypeNV::eCustom: return "Custom"; + case CoarseSampleOrderTypeNV::ePixelMajor: return "PixelMajor"; + case CoarseSampleOrderTypeNV::eSampleMajor: return "SampleMajor"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(GeometryInstanceFlagBitsNVX value) + { + switch (value) + { + case GeometryInstanceFlagBitsNVX::eTriangleCullDisable: return "TriangleCullDisable"; + case GeometryInstanceFlagBitsNVX::eTriangleCullFlipWinding: return "TriangleCullFlipWinding"; + case GeometryInstanceFlagBitsNVX::eForceOpaque: return "ForceOpaque"; + case GeometryInstanceFlagBitsNVX::eForceNoOpaque: return "ForceNoOpaque"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(GeometryInstanceFlagsNVX value) + { + if (!value) return "{}"; + std::string result; + if (value & GeometryInstanceFlagBitsNVX::eTriangleCullDisable) result += "TriangleCullDisable | "; + if (value & GeometryInstanceFlagBitsNVX::eTriangleCullFlipWinding) result += "TriangleCullFlipWinding | "; + if (value & GeometryInstanceFlagBitsNVX::eForceOpaque) result += "ForceOpaque | "; + if (value & GeometryInstanceFlagBitsNVX::eForceNoOpaque) result += "ForceNoOpaque | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(GeometryFlagBitsNVX value) + { + switch (value) + { + case GeometryFlagBitsNVX::eOpaque: return "Opaque"; + case GeometryFlagBitsNVX::eNoDuplicateAnyHitInvocation: return "NoDuplicateAnyHitInvocation"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(GeometryFlagsNVX value) + { + if (!value) return "{}"; + std::string result; + if (value & GeometryFlagBitsNVX::eOpaque) result += "Opaque | "; + if (value & GeometryFlagBitsNVX::eNoDuplicateAnyHitInvocation) result += "NoDuplicateAnyHitInvocation | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(BuildAccelerationStructureFlagBitsNVX value) + { + switch (value) + { + case BuildAccelerationStructureFlagBitsNVX::eAllowUpdate: return "AllowUpdate"; + case BuildAccelerationStructureFlagBitsNVX::eAllowCompaction: return "AllowCompaction"; + case BuildAccelerationStructureFlagBitsNVX::ePreferFastTrace: return "PreferFastTrace"; + case BuildAccelerationStructureFlagBitsNVX::ePreferFastBuild: return "PreferFastBuild"; + case BuildAccelerationStructureFlagBitsNVX::eLowMemory: return "LowMemory"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(BuildAccelerationStructureFlagsNVX value) + { + if (!value) return "{}"; + std::string result; + if (value & BuildAccelerationStructureFlagBitsNVX::eAllowUpdate) result += "AllowUpdate | "; + if (value & BuildAccelerationStructureFlagBitsNVX::eAllowCompaction) result += "AllowCompaction | "; + if (value & BuildAccelerationStructureFlagBitsNVX::ePreferFastTrace) result += "PreferFastTrace | "; + if (value & BuildAccelerationStructureFlagBitsNVX::ePreferFastBuild) result += "PreferFastBuild | "; + if (value & BuildAccelerationStructureFlagBitsNVX::eLowMemory) result += "LowMemory | "; + return "{" + result.substr(0, result.size() - 3) + "}"; + } + + VULKAN_HPP_INLINE std::string to_string(CopyAccelerationStructureModeNVX value) + { + switch (value) + { + case CopyAccelerationStructureModeNVX::eClone: return "Clone"; + case CopyAccelerationStructureModeNVX::eCompact: return "Compact"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(AccelerationStructureTypeNVX value) + { + switch (value) + { + case AccelerationStructureTypeNVX::eTopLevel: return "TopLevel"; + case AccelerationStructureTypeNVX::eBottomLevel: return "BottomLevel"; + default: return "invalid"; + } + } + + VULKAN_HPP_INLINE std::string to_string(GeometryTypeNVX value) + { + switch (value) + { + case GeometryTypeNVX::eTriangles: return "Triangles"; + case GeometryTypeNVX::eAabbs: return "Aabbs"; + default: return "invalid"; + } + } + + class DispatchLoaderDynamic + { + public: + PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR = 0; + PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR = 0; +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT = 0; +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers = 0; + PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets = 0; + PFN_vkAllocateMemory vkAllocateMemory = 0; + PFN_vkBeginCommandBuffer vkBeginCommandBuffer = 0; + PFN_vkBindAccelerationStructureMemoryNVX vkBindAccelerationStructureMemoryNVX = 0; + PFN_vkBindBufferMemory vkBindBufferMemory = 0; + PFN_vkBindBufferMemory2 vkBindBufferMemory2 = 0; + PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR = 0; + PFN_vkBindImageMemory vkBindImageMemory = 0; + PFN_vkBindImageMemory2 vkBindImageMemory2 = 0; + PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR = 0; + PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT = 0; + PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT = 0; + PFN_vkCmdBeginQuery vkCmdBeginQuery = 0; + PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass = 0; + PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR = 0; + PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets = 0; + PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer = 0; + PFN_vkCmdBindPipeline vkCmdBindPipeline = 0; + PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV = 0; + PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers = 0; + PFN_vkCmdBlitImage vkCmdBlitImage = 0; + PFN_vkCmdBuildAccelerationStructureNVX vkCmdBuildAccelerationStructureNVX = 0; + PFN_vkCmdClearAttachments vkCmdClearAttachments = 0; + PFN_vkCmdClearColorImage vkCmdClearColorImage = 0; + PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage = 0; + PFN_vkCmdCopyAccelerationStructureNVX vkCmdCopyAccelerationStructureNVX = 0; + PFN_vkCmdCopyBuffer vkCmdCopyBuffer = 0; + PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage = 0; + PFN_vkCmdCopyImage vkCmdCopyImage = 0; + PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer = 0; + PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults = 0; + PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT = 0; + PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT = 0; + PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT = 0; + PFN_vkCmdDispatch vkCmdDispatch = 0; + PFN_vkCmdDispatchBase vkCmdDispatchBase = 0; + PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR = 0; + PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect = 0; + PFN_vkCmdDraw vkCmdDraw = 0; + PFN_vkCmdDrawIndexed vkCmdDrawIndexed = 0; + PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect = 0; + PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD = 0; + PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR = 0; + PFN_vkCmdDrawIndirect vkCmdDrawIndirect = 0; + PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD = 0; + PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR = 0; + PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV = 0; + PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV = 0; + PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV = 0; + PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT = 0; + PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT = 0; + PFN_vkCmdEndQuery vkCmdEndQuery = 0; + PFN_vkCmdEndRenderPass vkCmdEndRenderPass = 0; + PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR = 0; + PFN_vkCmdExecuteCommands vkCmdExecuteCommands = 0; + PFN_vkCmdFillBuffer vkCmdFillBuffer = 0; + PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT = 0; + PFN_vkCmdNextSubpass vkCmdNextSubpass = 0; + PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR = 0; + PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier = 0; + PFN_vkCmdProcessCommandsNVX vkCmdProcessCommandsNVX = 0; + PFN_vkCmdPushConstants vkCmdPushConstants = 0; + PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = 0; + PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR = 0; + PFN_vkCmdReserveSpaceForCommandsNVX vkCmdReserveSpaceForCommandsNVX = 0; + PFN_vkCmdResetEvent vkCmdResetEvent = 0; + PFN_vkCmdResetQueryPool vkCmdResetQueryPool = 0; + PFN_vkCmdResolveImage vkCmdResolveImage = 0; + PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants = 0; + PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV = 0; + PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV = 0; + PFN_vkCmdSetDepthBias vkCmdSetDepthBias = 0; + PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds = 0; + PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask = 0; + PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR = 0; + PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT = 0; + PFN_vkCmdSetEvent vkCmdSetEvent = 0; + PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV = 0; + PFN_vkCmdSetLineWidth vkCmdSetLineWidth = 0; + PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT = 0; + PFN_vkCmdSetScissor vkCmdSetScissor = 0; + PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask = 0; + PFN_vkCmdSetStencilReference vkCmdSetStencilReference = 0; + PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask = 0; + PFN_vkCmdSetViewport vkCmdSetViewport = 0; + PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV = 0; + PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV = 0; + PFN_vkCmdTraceRaysNVX vkCmdTraceRaysNVX = 0; + PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer = 0; + PFN_vkCmdWaitEvents vkCmdWaitEvents = 0; + PFN_vkCmdWriteAccelerationStructurePropertiesNVX vkCmdWriteAccelerationStructurePropertiesNVX = 0; + PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD = 0; + PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp = 0; + PFN_vkCompileDeferredNVX vkCompileDeferredNVX = 0; + PFN_vkCreateAccelerationStructureNVX vkCreateAccelerationStructureNVX = 0; +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + PFN_vkCreateBuffer vkCreateBuffer = 0; + PFN_vkCreateBufferView vkCreateBufferView = 0; + PFN_vkCreateCommandPool vkCreateCommandPool = 0; + PFN_vkCreateComputePipelines vkCreateComputePipelines = 0; + PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT = 0; + PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT = 0; + PFN_vkCreateDescriptorPool vkCreateDescriptorPool = 0; + PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout = 0; + PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate = 0; + PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR = 0; + PFN_vkCreateDevice vkCreateDevice = 0; + PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR = 0; + PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR = 0; + PFN_vkCreateEvent vkCreateEvent = 0; + PFN_vkCreateFence vkCreateFence = 0; + PFN_vkCreateFramebuffer vkCreateFramebuffer = 0; + PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines = 0; +#ifdef VK_USE_PLATFORM_IOS_MVK + PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK = 0; +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + PFN_vkCreateImage vkCreateImage = 0; + PFN_vkCreateImageView vkCreateImageView = 0; + PFN_vkCreateIndirectCommandsLayoutNVX vkCreateIndirectCommandsLayoutNVX = 0; + PFN_vkCreateInstance vkCreateInstance = 0; +#ifdef VK_USE_PLATFORM_MACOS_MVK + PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK = 0; +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ +#ifdef VK_USE_PLATFORM_MIR_KHR + PFN_vkCreateMirSurfaceKHR vkCreateMirSurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + PFN_vkCreateObjectTableNVX vkCreateObjectTableNVX = 0; + PFN_vkCreatePipelineCache vkCreatePipelineCache = 0; + PFN_vkCreatePipelineLayout vkCreatePipelineLayout = 0; + PFN_vkCreateQueryPool vkCreateQueryPool = 0; + PFN_vkCreateRaytracingPipelinesNVX vkCreateRaytracingPipelinesNVX = 0; + PFN_vkCreateRenderPass vkCreateRenderPass = 0; + PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = 0; + PFN_vkCreateSampler vkCreateSampler = 0; + PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion = 0; + PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR = 0; + PFN_vkCreateSemaphore vkCreateSemaphore = 0; + PFN_vkCreateShaderModule vkCreateShaderModule = 0; + PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR = 0; + PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR = 0; + PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT = 0; +#ifdef VK_USE_PLATFORM_VI_NN + PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN = 0; +#endif /*VK_USE_PLATFORM_VI_NN*/ +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT = 0; + PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT = 0; + PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT = 0; + PFN_vkDestroyAccelerationStructureNVX vkDestroyAccelerationStructureNVX = 0; + PFN_vkDestroyBuffer vkDestroyBuffer = 0; + PFN_vkDestroyBufferView vkDestroyBufferView = 0; + PFN_vkDestroyCommandPool vkDestroyCommandPool = 0; + PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT = 0; + PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT = 0; + PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool = 0; + PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout = 0; + PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate = 0; + PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR = 0; + PFN_vkDestroyDevice vkDestroyDevice = 0; + PFN_vkDestroyEvent vkDestroyEvent = 0; + PFN_vkDestroyFence vkDestroyFence = 0; + PFN_vkDestroyFramebuffer vkDestroyFramebuffer = 0; + PFN_vkDestroyImage vkDestroyImage = 0; + PFN_vkDestroyImageView vkDestroyImageView = 0; + PFN_vkDestroyIndirectCommandsLayoutNVX vkDestroyIndirectCommandsLayoutNVX = 0; + PFN_vkDestroyInstance vkDestroyInstance = 0; + PFN_vkDestroyObjectTableNVX vkDestroyObjectTableNVX = 0; + PFN_vkDestroyPipeline vkDestroyPipeline = 0; + PFN_vkDestroyPipelineCache vkDestroyPipelineCache = 0; + PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout = 0; + PFN_vkDestroyQueryPool vkDestroyQueryPool = 0; + PFN_vkDestroyRenderPass vkDestroyRenderPass = 0; + PFN_vkDestroySampler vkDestroySampler = 0; + PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion = 0; + PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR = 0; + PFN_vkDestroySemaphore vkDestroySemaphore = 0; + PFN_vkDestroyShaderModule vkDestroyShaderModule = 0; + PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR = 0; + PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR = 0; + PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT = 0; + PFN_vkDeviceWaitIdle vkDeviceWaitIdle = 0; + PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT = 0; + PFN_vkEndCommandBuffer vkEndCommandBuffer = 0; + PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties = 0; + PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties = 0; + PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties = 0; + PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties = 0; + PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion = 0; + PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups = 0; + PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR = 0; + PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices = 0; + PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges = 0; + PFN_vkFreeCommandBuffers vkFreeCommandBuffers = 0; + PFN_vkFreeDescriptorSets vkFreeDescriptorSets = 0; + PFN_vkFreeMemory vkFreeMemory = 0; + PFN_vkGetAccelerationStructureHandleNVX vkGetAccelerationStructureHandleNVX = 0; + PFN_vkGetAccelerationStructureMemoryRequirementsNVX vkGetAccelerationStructureMemoryRequirementsNVX = 0; + PFN_vkGetAccelerationStructureScratchMemoryRequirementsNVX vkGetAccelerationStructureScratchMemoryRequirementsNVX = 0; +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID = 0; +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements = 0; + PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2 = 0; + PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR = 0; + PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport = 0; + PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR = 0; + PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures = 0; + PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR = 0; + PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR = 0; + PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR = 0; + PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment = 0; + PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr = 0; + PFN_vkGetDeviceQueue vkGetDeviceQueue = 0; + PFN_vkGetDeviceQueue2 vkGetDeviceQueue2 = 0; + PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR = 0; + PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR = 0; + PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR = 0; + PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR = 0; + PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR = 0; + PFN_vkGetEventStatus vkGetEventStatus = 0; + PFN_vkGetFenceFdKHR vkGetFenceFdKHR = 0; + PFN_vkGetFenceStatus vkGetFenceStatus = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements = 0; + PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2 = 0; + PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR = 0; + PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements = 0; + PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2 = 0; + PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR = 0; + PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout = 0; + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = 0; +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID = 0; +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR = 0; + PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR = 0; + PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_NV + PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV = 0; +#endif /*VK_USE_PLATFORM_WIN32_NV*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE = 0; + PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR = 0; + PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR = 0; + PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR = 0; + PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR = 0; + PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties = 0; + PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR = 0; + PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties = 0; + PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR = 0; + PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV = 0; + PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties = 0; + PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = 0; + PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures = 0; + PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 = 0; + PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = 0; + PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties = 0; + PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2 = 0; + PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR = 0; + PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX = 0; + PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties = 0; + PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2 = 0; + PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR = 0; + PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties = 0; + PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2 = 0; + PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR = 0; +#ifdef VK_USE_PLATFORM_MIR_KHR + PFN_vkGetPhysicalDeviceMirPresentationSupportKHR vkGetPhysicalDeviceMirPresentationSupportKHR = 0; +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT = 0; + PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR = 0; + PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties = 0; + PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2 = 0; + PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = 0; + PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties = 0; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2 = 0; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR = 0; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties = 0; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2 = 0; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR = 0; + PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT = 0; + PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR = 0; + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR = 0; + PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR = 0; + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR = 0; + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR = 0; + PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR = 0; +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR = 0; +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR = 0; +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR = 0; +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + PFN_vkGetPipelineCacheData vkGetPipelineCacheData = 0; + PFN_vkGetQueryPoolResults vkGetQueryPoolResults = 0; + PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV = 0; +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT = 0; +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + PFN_vkGetRaytracingShaderHandlesNVX vkGetRaytracingShaderHandlesNVX = 0; + PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE = 0; + PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity = 0; + PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD = 0; + PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT = 0; + PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR = 0; + PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR = 0; + PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT = 0; + PFN_vkImportFenceFdKHR vkImportFenceFdKHR = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges = 0; + PFN_vkMapMemory vkMapMemory = 0; + PFN_vkMergePipelineCaches vkMergePipelineCaches = 0; + PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT = 0; + PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT = 0; + PFN_vkQueueBindSparse vkQueueBindSparse = 0; + PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT = 0; + PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT = 0; + PFN_vkQueuePresentKHR vkQueuePresentKHR = 0; + PFN_vkQueueSubmit vkQueueSubmit = 0; + PFN_vkQueueWaitIdle vkQueueWaitIdle = 0; + PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT = 0; + PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT = 0; + PFN_vkRegisterObjectsNVX vkRegisterObjectsNVX = 0; + PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT = 0; + PFN_vkResetCommandBuffer vkResetCommandBuffer = 0; + PFN_vkResetCommandPool vkResetCommandPool = 0; + PFN_vkResetDescriptorPool vkResetDescriptorPool = 0; + PFN_vkResetEvent vkResetEvent = 0; + PFN_vkResetFences vkResetFences = 0; + PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT = 0; + PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT = 0; + PFN_vkSetEvent vkSetEvent = 0; + PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT = 0; + PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT = 0; + PFN_vkTrimCommandPool vkTrimCommandPool = 0; + PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR = 0; + PFN_vkUnmapMemory vkUnmapMemory = 0; + PFN_vkUnregisterObjectsNVX vkUnregisterObjectsNVX = 0; + PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate = 0; + PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR = 0; + PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets = 0; + PFN_vkWaitForFences vkWaitForFences = 0; + public: + DispatchLoaderDynamic(Instance instance = Instance(), Device device = Device()) + { + if (instance) + { + init(instance, device); + } + } + + void init(Instance instance, Device device = Device()) + { + vkAcquireNextImage2KHR = PFN_vkAcquireNextImage2KHR(device ? device.getProcAddr( "vkAcquireNextImage2KHR") : instance.getProcAddr( "vkAcquireNextImage2KHR")); + vkAcquireNextImageKHR = PFN_vkAcquireNextImageKHR(device ? device.getProcAddr( "vkAcquireNextImageKHR") : instance.getProcAddr( "vkAcquireNextImageKHR")); +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + vkAcquireXlibDisplayEXT = PFN_vkAcquireXlibDisplayEXT(device ? device.getProcAddr( "vkAcquireXlibDisplayEXT") : instance.getProcAddr( "vkAcquireXlibDisplayEXT")); +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + vkAllocateCommandBuffers = PFN_vkAllocateCommandBuffers(device ? device.getProcAddr( "vkAllocateCommandBuffers") : instance.getProcAddr( "vkAllocateCommandBuffers")); + vkAllocateDescriptorSets = PFN_vkAllocateDescriptorSets(device ? device.getProcAddr( "vkAllocateDescriptorSets") : instance.getProcAddr( "vkAllocateDescriptorSets")); + vkAllocateMemory = PFN_vkAllocateMemory(device ? device.getProcAddr( "vkAllocateMemory") : instance.getProcAddr( "vkAllocateMemory")); + vkBeginCommandBuffer = PFN_vkBeginCommandBuffer(device ? device.getProcAddr( "vkBeginCommandBuffer") : instance.getProcAddr( "vkBeginCommandBuffer")); + vkBindAccelerationStructureMemoryNVX = PFN_vkBindAccelerationStructureMemoryNVX(device ? device.getProcAddr( "vkBindAccelerationStructureMemoryNVX") : instance.getProcAddr( "vkBindAccelerationStructureMemoryNVX")); + vkBindBufferMemory = PFN_vkBindBufferMemory(device ? device.getProcAddr( "vkBindBufferMemory") : instance.getProcAddr( "vkBindBufferMemory")); + vkBindBufferMemory2 = PFN_vkBindBufferMemory2(device ? device.getProcAddr( "vkBindBufferMemory2") : instance.getProcAddr( "vkBindBufferMemory2")); + vkBindBufferMemory2KHR = PFN_vkBindBufferMemory2KHR(device ? device.getProcAddr( "vkBindBufferMemory2KHR") : instance.getProcAddr( "vkBindBufferMemory2KHR")); + vkBindImageMemory = PFN_vkBindImageMemory(device ? device.getProcAddr( "vkBindImageMemory") : instance.getProcAddr( "vkBindImageMemory")); + vkBindImageMemory2 = PFN_vkBindImageMemory2(device ? device.getProcAddr( "vkBindImageMemory2") : instance.getProcAddr( "vkBindImageMemory2")); + vkBindImageMemory2KHR = PFN_vkBindImageMemory2KHR(device ? device.getProcAddr( "vkBindImageMemory2KHR") : instance.getProcAddr( "vkBindImageMemory2KHR")); + vkCmdBeginConditionalRenderingEXT = PFN_vkCmdBeginConditionalRenderingEXT(device ? device.getProcAddr( "vkCmdBeginConditionalRenderingEXT") : instance.getProcAddr( "vkCmdBeginConditionalRenderingEXT")); + vkCmdBeginDebugUtilsLabelEXT = PFN_vkCmdBeginDebugUtilsLabelEXT(device ? device.getProcAddr( "vkCmdBeginDebugUtilsLabelEXT") : instance.getProcAddr( "vkCmdBeginDebugUtilsLabelEXT")); + vkCmdBeginQuery = PFN_vkCmdBeginQuery(device ? device.getProcAddr( "vkCmdBeginQuery") : instance.getProcAddr( "vkCmdBeginQuery")); + vkCmdBeginRenderPass = PFN_vkCmdBeginRenderPass(device ? device.getProcAddr( "vkCmdBeginRenderPass") : instance.getProcAddr( "vkCmdBeginRenderPass")); + vkCmdBeginRenderPass2KHR = PFN_vkCmdBeginRenderPass2KHR(device ? device.getProcAddr( "vkCmdBeginRenderPass2KHR") : instance.getProcAddr( "vkCmdBeginRenderPass2KHR")); + vkCmdBindDescriptorSets = PFN_vkCmdBindDescriptorSets(device ? device.getProcAddr( "vkCmdBindDescriptorSets") : instance.getProcAddr( "vkCmdBindDescriptorSets")); + vkCmdBindIndexBuffer = PFN_vkCmdBindIndexBuffer(device ? device.getProcAddr( "vkCmdBindIndexBuffer") : instance.getProcAddr( "vkCmdBindIndexBuffer")); + vkCmdBindPipeline = PFN_vkCmdBindPipeline(device ? device.getProcAddr( "vkCmdBindPipeline") : instance.getProcAddr( "vkCmdBindPipeline")); + vkCmdBindShadingRateImageNV = PFN_vkCmdBindShadingRateImageNV(device ? device.getProcAddr( "vkCmdBindShadingRateImageNV") : instance.getProcAddr( "vkCmdBindShadingRateImageNV")); + vkCmdBindVertexBuffers = PFN_vkCmdBindVertexBuffers(device ? device.getProcAddr( "vkCmdBindVertexBuffers") : instance.getProcAddr( "vkCmdBindVertexBuffers")); + vkCmdBlitImage = PFN_vkCmdBlitImage(device ? device.getProcAddr( "vkCmdBlitImage") : instance.getProcAddr( "vkCmdBlitImage")); + vkCmdBuildAccelerationStructureNVX = PFN_vkCmdBuildAccelerationStructureNVX(device ? device.getProcAddr( "vkCmdBuildAccelerationStructureNVX") : instance.getProcAddr( "vkCmdBuildAccelerationStructureNVX")); + vkCmdClearAttachments = PFN_vkCmdClearAttachments(device ? device.getProcAddr( "vkCmdClearAttachments") : instance.getProcAddr( "vkCmdClearAttachments")); + vkCmdClearColorImage = PFN_vkCmdClearColorImage(device ? device.getProcAddr( "vkCmdClearColorImage") : instance.getProcAddr( "vkCmdClearColorImage")); + vkCmdClearDepthStencilImage = PFN_vkCmdClearDepthStencilImage(device ? device.getProcAddr( "vkCmdClearDepthStencilImage") : instance.getProcAddr( "vkCmdClearDepthStencilImage")); + vkCmdCopyAccelerationStructureNVX = PFN_vkCmdCopyAccelerationStructureNVX(device ? device.getProcAddr( "vkCmdCopyAccelerationStructureNVX") : instance.getProcAddr( "vkCmdCopyAccelerationStructureNVX")); + vkCmdCopyBuffer = PFN_vkCmdCopyBuffer(device ? device.getProcAddr( "vkCmdCopyBuffer") : instance.getProcAddr( "vkCmdCopyBuffer")); + vkCmdCopyBufferToImage = PFN_vkCmdCopyBufferToImage(device ? device.getProcAddr( "vkCmdCopyBufferToImage") : instance.getProcAddr( "vkCmdCopyBufferToImage")); + vkCmdCopyImage = PFN_vkCmdCopyImage(device ? device.getProcAddr( "vkCmdCopyImage") : instance.getProcAddr( "vkCmdCopyImage")); + vkCmdCopyImageToBuffer = PFN_vkCmdCopyImageToBuffer(device ? device.getProcAddr( "vkCmdCopyImageToBuffer") : instance.getProcAddr( "vkCmdCopyImageToBuffer")); + vkCmdCopyQueryPoolResults = PFN_vkCmdCopyQueryPoolResults(device ? device.getProcAddr( "vkCmdCopyQueryPoolResults") : instance.getProcAddr( "vkCmdCopyQueryPoolResults")); + vkCmdDebugMarkerBeginEXT = PFN_vkCmdDebugMarkerBeginEXT(device ? device.getProcAddr( "vkCmdDebugMarkerBeginEXT") : instance.getProcAddr( "vkCmdDebugMarkerBeginEXT")); + vkCmdDebugMarkerEndEXT = PFN_vkCmdDebugMarkerEndEXT(device ? device.getProcAddr( "vkCmdDebugMarkerEndEXT") : instance.getProcAddr( "vkCmdDebugMarkerEndEXT")); + vkCmdDebugMarkerInsertEXT = PFN_vkCmdDebugMarkerInsertEXT(device ? device.getProcAddr( "vkCmdDebugMarkerInsertEXT") : instance.getProcAddr( "vkCmdDebugMarkerInsertEXT")); + vkCmdDispatch = PFN_vkCmdDispatch(device ? device.getProcAddr( "vkCmdDispatch") : instance.getProcAddr( "vkCmdDispatch")); + vkCmdDispatchBase = PFN_vkCmdDispatchBase(device ? device.getProcAddr( "vkCmdDispatchBase") : instance.getProcAddr( "vkCmdDispatchBase")); + vkCmdDispatchBaseKHR = PFN_vkCmdDispatchBaseKHR(device ? device.getProcAddr( "vkCmdDispatchBaseKHR") : instance.getProcAddr( "vkCmdDispatchBaseKHR")); + vkCmdDispatchIndirect = PFN_vkCmdDispatchIndirect(device ? device.getProcAddr( "vkCmdDispatchIndirect") : instance.getProcAddr( "vkCmdDispatchIndirect")); + vkCmdDraw = PFN_vkCmdDraw(device ? device.getProcAddr( "vkCmdDraw") : instance.getProcAddr( "vkCmdDraw")); + vkCmdDrawIndexed = PFN_vkCmdDrawIndexed(device ? device.getProcAddr( "vkCmdDrawIndexed") : instance.getProcAddr( "vkCmdDrawIndexed")); + vkCmdDrawIndexedIndirect = PFN_vkCmdDrawIndexedIndirect(device ? device.getProcAddr( "vkCmdDrawIndexedIndirect") : instance.getProcAddr( "vkCmdDrawIndexedIndirect")); + vkCmdDrawIndexedIndirectCountAMD = PFN_vkCmdDrawIndexedIndirectCountAMD(device ? device.getProcAddr( "vkCmdDrawIndexedIndirectCountAMD") : instance.getProcAddr( "vkCmdDrawIndexedIndirectCountAMD")); + vkCmdDrawIndexedIndirectCountKHR = PFN_vkCmdDrawIndexedIndirectCountKHR(device ? device.getProcAddr( "vkCmdDrawIndexedIndirectCountKHR") : instance.getProcAddr( "vkCmdDrawIndexedIndirectCountKHR")); + vkCmdDrawIndirect = PFN_vkCmdDrawIndirect(device ? device.getProcAddr( "vkCmdDrawIndirect") : instance.getProcAddr( "vkCmdDrawIndirect")); + vkCmdDrawIndirectCountAMD = PFN_vkCmdDrawIndirectCountAMD(device ? device.getProcAddr( "vkCmdDrawIndirectCountAMD") : instance.getProcAddr( "vkCmdDrawIndirectCountAMD")); + vkCmdDrawIndirectCountKHR = PFN_vkCmdDrawIndirectCountKHR(device ? device.getProcAddr( "vkCmdDrawIndirectCountKHR") : instance.getProcAddr( "vkCmdDrawIndirectCountKHR")); + vkCmdDrawMeshTasksIndirectCountNV = PFN_vkCmdDrawMeshTasksIndirectCountNV(device ? device.getProcAddr( "vkCmdDrawMeshTasksIndirectCountNV") : instance.getProcAddr( "vkCmdDrawMeshTasksIndirectCountNV")); + vkCmdDrawMeshTasksIndirectNV = PFN_vkCmdDrawMeshTasksIndirectNV(device ? device.getProcAddr( "vkCmdDrawMeshTasksIndirectNV") : instance.getProcAddr( "vkCmdDrawMeshTasksIndirectNV")); + vkCmdDrawMeshTasksNV = PFN_vkCmdDrawMeshTasksNV(device ? device.getProcAddr( "vkCmdDrawMeshTasksNV") : instance.getProcAddr( "vkCmdDrawMeshTasksNV")); + vkCmdEndConditionalRenderingEXT = PFN_vkCmdEndConditionalRenderingEXT(device ? device.getProcAddr( "vkCmdEndConditionalRenderingEXT") : instance.getProcAddr( "vkCmdEndConditionalRenderingEXT")); + vkCmdEndDebugUtilsLabelEXT = PFN_vkCmdEndDebugUtilsLabelEXT(device ? device.getProcAddr( "vkCmdEndDebugUtilsLabelEXT") : instance.getProcAddr( "vkCmdEndDebugUtilsLabelEXT")); + vkCmdEndQuery = PFN_vkCmdEndQuery(device ? device.getProcAddr( "vkCmdEndQuery") : instance.getProcAddr( "vkCmdEndQuery")); + vkCmdEndRenderPass = PFN_vkCmdEndRenderPass(device ? device.getProcAddr( "vkCmdEndRenderPass") : instance.getProcAddr( "vkCmdEndRenderPass")); + vkCmdEndRenderPass2KHR = PFN_vkCmdEndRenderPass2KHR(device ? device.getProcAddr( "vkCmdEndRenderPass2KHR") : instance.getProcAddr( "vkCmdEndRenderPass2KHR")); + vkCmdExecuteCommands = PFN_vkCmdExecuteCommands(device ? device.getProcAddr( "vkCmdExecuteCommands") : instance.getProcAddr( "vkCmdExecuteCommands")); + vkCmdFillBuffer = PFN_vkCmdFillBuffer(device ? device.getProcAddr( "vkCmdFillBuffer") : instance.getProcAddr( "vkCmdFillBuffer")); + vkCmdInsertDebugUtilsLabelEXT = PFN_vkCmdInsertDebugUtilsLabelEXT(device ? device.getProcAddr( "vkCmdInsertDebugUtilsLabelEXT") : instance.getProcAddr( "vkCmdInsertDebugUtilsLabelEXT")); + vkCmdNextSubpass = PFN_vkCmdNextSubpass(device ? device.getProcAddr( "vkCmdNextSubpass") : instance.getProcAddr( "vkCmdNextSubpass")); + vkCmdNextSubpass2KHR = PFN_vkCmdNextSubpass2KHR(device ? device.getProcAddr( "vkCmdNextSubpass2KHR") : instance.getProcAddr( "vkCmdNextSubpass2KHR")); + vkCmdPipelineBarrier = PFN_vkCmdPipelineBarrier(device ? device.getProcAddr( "vkCmdPipelineBarrier") : instance.getProcAddr( "vkCmdPipelineBarrier")); + vkCmdProcessCommandsNVX = PFN_vkCmdProcessCommandsNVX(device ? device.getProcAddr( "vkCmdProcessCommandsNVX") : instance.getProcAddr( "vkCmdProcessCommandsNVX")); + vkCmdPushConstants = PFN_vkCmdPushConstants(device ? device.getProcAddr( "vkCmdPushConstants") : instance.getProcAddr( "vkCmdPushConstants")); + vkCmdPushDescriptorSetKHR = PFN_vkCmdPushDescriptorSetKHR(device ? device.getProcAddr( "vkCmdPushDescriptorSetKHR") : instance.getProcAddr( "vkCmdPushDescriptorSetKHR")); + vkCmdPushDescriptorSetWithTemplateKHR = PFN_vkCmdPushDescriptorSetWithTemplateKHR(device ? device.getProcAddr( "vkCmdPushDescriptorSetWithTemplateKHR") : instance.getProcAddr( "vkCmdPushDescriptorSetWithTemplateKHR")); + vkCmdReserveSpaceForCommandsNVX = PFN_vkCmdReserveSpaceForCommandsNVX(device ? device.getProcAddr( "vkCmdReserveSpaceForCommandsNVX") : instance.getProcAddr( "vkCmdReserveSpaceForCommandsNVX")); + vkCmdResetEvent = PFN_vkCmdResetEvent(device ? device.getProcAddr( "vkCmdResetEvent") : instance.getProcAddr( "vkCmdResetEvent")); + vkCmdResetQueryPool = PFN_vkCmdResetQueryPool(device ? device.getProcAddr( "vkCmdResetQueryPool") : instance.getProcAddr( "vkCmdResetQueryPool")); + vkCmdResolveImage = PFN_vkCmdResolveImage(device ? device.getProcAddr( "vkCmdResolveImage") : instance.getProcAddr( "vkCmdResolveImage")); + vkCmdSetBlendConstants = PFN_vkCmdSetBlendConstants(device ? device.getProcAddr( "vkCmdSetBlendConstants") : instance.getProcAddr( "vkCmdSetBlendConstants")); + vkCmdSetCheckpointNV = PFN_vkCmdSetCheckpointNV(device ? device.getProcAddr( "vkCmdSetCheckpointNV") : instance.getProcAddr( "vkCmdSetCheckpointNV")); + vkCmdSetCoarseSampleOrderNV = PFN_vkCmdSetCoarseSampleOrderNV(device ? device.getProcAddr( "vkCmdSetCoarseSampleOrderNV") : instance.getProcAddr( "vkCmdSetCoarseSampleOrderNV")); + vkCmdSetDepthBias = PFN_vkCmdSetDepthBias(device ? device.getProcAddr( "vkCmdSetDepthBias") : instance.getProcAddr( "vkCmdSetDepthBias")); + vkCmdSetDepthBounds = PFN_vkCmdSetDepthBounds(device ? device.getProcAddr( "vkCmdSetDepthBounds") : instance.getProcAddr( "vkCmdSetDepthBounds")); + vkCmdSetDeviceMask = PFN_vkCmdSetDeviceMask(device ? device.getProcAddr( "vkCmdSetDeviceMask") : instance.getProcAddr( "vkCmdSetDeviceMask")); + vkCmdSetDeviceMaskKHR = PFN_vkCmdSetDeviceMaskKHR(device ? device.getProcAddr( "vkCmdSetDeviceMaskKHR") : instance.getProcAddr( "vkCmdSetDeviceMaskKHR")); + vkCmdSetDiscardRectangleEXT = PFN_vkCmdSetDiscardRectangleEXT(device ? device.getProcAddr( "vkCmdSetDiscardRectangleEXT") : instance.getProcAddr( "vkCmdSetDiscardRectangleEXT")); + vkCmdSetEvent = PFN_vkCmdSetEvent(device ? device.getProcAddr( "vkCmdSetEvent") : instance.getProcAddr( "vkCmdSetEvent")); + vkCmdSetExclusiveScissorNV = PFN_vkCmdSetExclusiveScissorNV(device ? device.getProcAddr( "vkCmdSetExclusiveScissorNV") : instance.getProcAddr( "vkCmdSetExclusiveScissorNV")); + vkCmdSetLineWidth = PFN_vkCmdSetLineWidth(device ? device.getProcAddr( "vkCmdSetLineWidth") : instance.getProcAddr( "vkCmdSetLineWidth")); + vkCmdSetSampleLocationsEXT = PFN_vkCmdSetSampleLocationsEXT(device ? device.getProcAddr( "vkCmdSetSampleLocationsEXT") : instance.getProcAddr( "vkCmdSetSampleLocationsEXT")); + vkCmdSetScissor = PFN_vkCmdSetScissor(device ? device.getProcAddr( "vkCmdSetScissor") : instance.getProcAddr( "vkCmdSetScissor")); + vkCmdSetStencilCompareMask = PFN_vkCmdSetStencilCompareMask(device ? device.getProcAddr( "vkCmdSetStencilCompareMask") : instance.getProcAddr( "vkCmdSetStencilCompareMask")); + vkCmdSetStencilReference = PFN_vkCmdSetStencilReference(device ? device.getProcAddr( "vkCmdSetStencilReference") : instance.getProcAddr( "vkCmdSetStencilReference")); + vkCmdSetStencilWriteMask = PFN_vkCmdSetStencilWriteMask(device ? device.getProcAddr( "vkCmdSetStencilWriteMask") : instance.getProcAddr( "vkCmdSetStencilWriteMask")); + vkCmdSetViewport = PFN_vkCmdSetViewport(device ? device.getProcAddr( "vkCmdSetViewport") : instance.getProcAddr( "vkCmdSetViewport")); + vkCmdSetViewportShadingRatePaletteNV = PFN_vkCmdSetViewportShadingRatePaletteNV(device ? device.getProcAddr( "vkCmdSetViewportShadingRatePaletteNV") : instance.getProcAddr( "vkCmdSetViewportShadingRatePaletteNV")); + vkCmdSetViewportWScalingNV = PFN_vkCmdSetViewportWScalingNV(device ? device.getProcAddr( "vkCmdSetViewportWScalingNV") : instance.getProcAddr( "vkCmdSetViewportWScalingNV")); + vkCmdTraceRaysNVX = PFN_vkCmdTraceRaysNVX(device ? device.getProcAddr( "vkCmdTraceRaysNVX") : instance.getProcAddr( "vkCmdTraceRaysNVX")); + vkCmdUpdateBuffer = PFN_vkCmdUpdateBuffer(device ? device.getProcAddr( "vkCmdUpdateBuffer") : instance.getProcAddr( "vkCmdUpdateBuffer")); + vkCmdWaitEvents = PFN_vkCmdWaitEvents(device ? device.getProcAddr( "vkCmdWaitEvents") : instance.getProcAddr( "vkCmdWaitEvents")); + vkCmdWriteAccelerationStructurePropertiesNVX = PFN_vkCmdWriteAccelerationStructurePropertiesNVX(device ? device.getProcAddr( "vkCmdWriteAccelerationStructurePropertiesNVX") : instance.getProcAddr( "vkCmdWriteAccelerationStructurePropertiesNVX")); + vkCmdWriteBufferMarkerAMD = PFN_vkCmdWriteBufferMarkerAMD(device ? device.getProcAddr( "vkCmdWriteBufferMarkerAMD") : instance.getProcAddr( "vkCmdWriteBufferMarkerAMD")); + vkCmdWriteTimestamp = PFN_vkCmdWriteTimestamp(device ? device.getProcAddr( "vkCmdWriteTimestamp") : instance.getProcAddr( "vkCmdWriteTimestamp")); + vkCompileDeferredNVX = PFN_vkCompileDeferredNVX(device ? device.getProcAddr( "vkCompileDeferredNVX") : instance.getProcAddr( "vkCompileDeferredNVX")); + vkCreateAccelerationStructureNVX = PFN_vkCreateAccelerationStructureNVX(device ? device.getProcAddr( "vkCreateAccelerationStructureNVX") : instance.getProcAddr( "vkCreateAccelerationStructureNVX")); +#ifdef VK_USE_PLATFORM_ANDROID_KHR + vkCreateAndroidSurfaceKHR = PFN_vkCreateAndroidSurfaceKHR(instance.getProcAddr( "vkCreateAndroidSurfaceKHR")); +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + vkCreateBuffer = PFN_vkCreateBuffer(device ? device.getProcAddr( "vkCreateBuffer") : instance.getProcAddr( "vkCreateBuffer")); + vkCreateBufferView = PFN_vkCreateBufferView(device ? device.getProcAddr( "vkCreateBufferView") : instance.getProcAddr( "vkCreateBufferView")); + vkCreateCommandPool = PFN_vkCreateCommandPool(device ? device.getProcAddr( "vkCreateCommandPool") : instance.getProcAddr( "vkCreateCommandPool")); + vkCreateComputePipelines = PFN_vkCreateComputePipelines(device ? device.getProcAddr( "vkCreateComputePipelines") : instance.getProcAddr( "vkCreateComputePipelines")); + vkCreateDebugReportCallbackEXT = PFN_vkCreateDebugReportCallbackEXT(instance.getProcAddr( "vkCreateDebugReportCallbackEXT")); + vkCreateDebugUtilsMessengerEXT = PFN_vkCreateDebugUtilsMessengerEXT(instance.getProcAddr( "vkCreateDebugUtilsMessengerEXT")); + vkCreateDescriptorPool = PFN_vkCreateDescriptorPool(device ? device.getProcAddr( "vkCreateDescriptorPool") : instance.getProcAddr( "vkCreateDescriptorPool")); + vkCreateDescriptorSetLayout = PFN_vkCreateDescriptorSetLayout(device ? device.getProcAddr( "vkCreateDescriptorSetLayout") : instance.getProcAddr( "vkCreateDescriptorSetLayout")); + vkCreateDescriptorUpdateTemplate = PFN_vkCreateDescriptorUpdateTemplate(device ? device.getProcAddr( "vkCreateDescriptorUpdateTemplate") : instance.getProcAddr( "vkCreateDescriptorUpdateTemplate")); + vkCreateDescriptorUpdateTemplateKHR = PFN_vkCreateDescriptorUpdateTemplateKHR(device ? device.getProcAddr( "vkCreateDescriptorUpdateTemplateKHR") : instance.getProcAddr( "vkCreateDescriptorUpdateTemplateKHR")); + vkCreateDevice = PFN_vkCreateDevice(device ? device.getProcAddr( "vkCreateDevice") : instance.getProcAddr( "vkCreateDevice")); + vkCreateDisplayModeKHR = PFN_vkCreateDisplayModeKHR(device ? device.getProcAddr( "vkCreateDisplayModeKHR") : instance.getProcAddr( "vkCreateDisplayModeKHR")); + vkCreateDisplayPlaneSurfaceKHR = PFN_vkCreateDisplayPlaneSurfaceKHR(instance.getProcAddr( "vkCreateDisplayPlaneSurfaceKHR")); + vkCreateEvent = PFN_vkCreateEvent(device ? device.getProcAddr( "vkCreateEvent") : instance.getProcAddr( "vkCreateEvent")); + vkCreateFence = PFN_vkCreateFence(device ? device.getProcAddr( "vkCreateFence") : instance.getProcAddr( "vkCreateFence")); + vkCreateFramebuffer = PFN_vkCreateFramebuffer(device ? device.getProcAddr( "vkCreateFramebuffer") : instance.getProcAddr( "vkCreateFramebuffer")); + vkCreateGraphicsPipelines = PFN_vkCreateGraphicsPipelines(device ? device.getProcAddr( "vkCreateGraphicsPipelines") : instance.getProcAddr( "vkCreateGraphicsPipelines")); +#ifdef VK_USE_PLATFORM_IOS_MVK + vkCreateIOSSurfaceMVK = PFN_vkCreateIOSSurfaceMVK(instance.getProcAddr( "vkCreateIOSSurfaceMVK")); +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + vkCreateImage = PFN_vkCreateImage(device ? device.getProcAddr( "vkCreateImage") : instance.getProcAddr( "vkCreateImage")); + vkCreateImageView = PFN_vkCreateImageView(device ? device.getProcAddr( "vkCreateImageView") : instance.getProcAddr( "vkCreateImageView")); + vkCreateIndirectCommandsLayoutNVX = PFN_vkCreateIndirectCommandsLayoutNVX(device ? device.getProcAddr( "vkCreateIndirectCommandsLayoutNVX") : instance.getProcAddr( "vkCreateIndirectCommandsLayoutNVX")); + vkCreateInstance = PFN_vkCreateInstance(instance.getProcAddr( "vkCreateInstance")); +#ifdef VK_USE_PLATFORM_MACOS_MVK + vkCreateMacOSSurfaceMVK = PFN_vkCreateMacOSSurfaceMVK(instance.getProcAddr( "vkCreateMacOSSurfaceMVK")); +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ +#ifdef VK_USE_PLATFORM_MIR_KHR + vkCreateMirSurfaceKHR = PFN_vkCreateMirSurfaceKHR(instance.getProcAddr( "vkCreateMirSurfaceKHR")); +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + vkCreateObjectTableNVX = PFN_vkCreateObjectTableNVX(device ? device.getProcAddr( "vkCreateObjectTableNVX") : instance.getProcAddr( "vkCreateObjectTableNVX")); + vkCreatePipelineCache = PFN_vkCreatePipelineCache(device ? device.getProcAddr( "vkCreatePipelineCache") : instance.getProcAddr( "vkCreatePipelineCache")); + vkCreatePipelineLayout = PFN_vkCreatePipelineLayout(device ? device.getProcAddr( "vkCreatePipelineLayout") : instance.getProcAddr( "vkCreatePipelineLayout")); + vkCreateQueryPool = PFN_vkCreateQueryPool(device ? device.getProcAddr( "vkCreateQueryPool") : instance.getProcAddr( "vkCreateQueryPool")); + vkCreateRaytracingPipelinesNVX = PFN_vkCreateRaytracingPipelinesNVX(device ? device.getProcAddr( "vkCreateRaytracingPipelinesNVX") : instance.getProcAddr( "vkCreateRaytracingPipelinesNVX")); + vkCreateRenderPass = PFN_vkCreateRenderPass(device ? device.getProcAddr( "vkCreateRenderPass") : instance.getProcAddr( "vkCreateRenderPass")); + vkCreateRenderPass2KHR = PFN_vkCreateRenderPass2KHR(device ? device.getProcAddr( "vkCreateRenderPass2KHR") : instance.getProcAddr( "vkCreateRenderPass2KHR")); + vkCreateSampler = PFN_vkCreateSampler(device ? device.getProcAddr( "vkCreateSampler") : instance.getProcAddr( "vkCreateSampler")); + vkCreateSamplerYcbcrConversion = PFN_vkCreateSamplerYcbcrConversion(device ? device.getProcAddr( "vkCreateSamplerYcbcrConversion") : instance.getProcAddr( "vkCreateSamplerYcbcrConversion")); + vkCreateSamplerYcbcrConversionKHR = PFN_vkCreateSamplerYcbcrConversionKHR(device ? device.getProcAddr( "vkCreateSamplerYcbcrConversionKHR") : instance.getProcAddr( "vkCreateSamplerYcbcrConversionKHR")); + vkCreateSemaphore = PFN_vkCreateSemaphore(device ? device.getProcAddr( "vkCreateSemaphore") : instance.getProcAddr( "vkCreateSemaphore")); + vkCreateShaderModule = PFN_vkCreateShaderModule(device ? device.getProcAddr( "vkCreateShaderModule") : instance.getProcAddr( "vkCreateShaderModule")); + vkCreateSharedSwapchainsKHR = PFN_vkCreateSharedSwapchainsKHR(device ? device.getProcAddr( "vkCreateSharedSwapchainsKHR") : instance.getProcAddr( "vkCreateSharedSwapchainsKHR")); + vkCreateSwapchainKHR = PFN_vkCreateSwapchainKHR(device ? device.getProcAddr( "vkCreateSwapchainKHR") : instance.getProcAddr( "vkCreateSwapchainKHR")); + vkCreateValidationCacheEXT = PFN_vkCreateValidationCacheEXT(device ? device.getProcAddr( "vkCreateValidationCacheEXT") : instance.getProcAddr( "vkCreateValidationCacheEXT")); +#ifdef VK_USE_PLATFORM_VI_NN + vkCreateViSurfaceNN = PFN_vkCreateViSurfaceNN(instance.getProcAddr( "vkCreateViSurfaceNN")); +#endif /*VK_USE_PLATFORM_VI_NN*/ +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + vkCreateWaylandSurfaceKHR = PFN_vkCreateWaylandSurfaceKHR(instance.getProcAddr( "vkCreateWaylandSurfaceKHR")); +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkCreateWin32SurfaceKHR = PFN_vkCreateWin32SurfaceKHR(instance.getProcAddr( "vkCreateWin32SurfaceKHR")); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + vkCreateXcbSurfaceKHR = PFN_vkCreateXcbSurfaceKHR(instance.getProcAddr( "vkCreateXcbSurfaceKHR")); +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + vkCreateXlibSurfaceKHR = PFN_vkCreateXlibSurfaceKHR(instance.getProcAddr( "vkCreateXlibSurfaceKHR")); +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + vkDebugMarkerSetObjectNameEXT = PFN_vkDebugMarkerSetObjectNameEXT(device ? device.getProcAddr( "vkDebugMarkerSetObjectNameEXT") : instance.getProcAddr( "vkDebugMarkerSetObjectNameEXT")); + vkDebugMarkerSetObjectTagEXT = PFN_vkDebugMarkerSetObjectTagEXT(device ? device.getProcAddr( "vkDebugMarkerSetObjectTagEXT") : instance.getProcAddr( "vkDebugMarkerSetObjectTagEXT")); + vkDebugReportMessageEXT = PFN_vkDebugReportMessageEXT(instance.getProcAddr( "vkDebugReportMessageEXT")); + vkDestroyAccelerationStructureNVX = PFN_vkDestroyAccelerationStructureNVX(device ? device.getProcAddr( "vkDestroyAccelerationStructureNVX") : instance.getProcAddr( "vkDestroyAccelerationStructureNVX")); + vkDestroyBuffer = PFN_vkDestroyBuffer(device ? device.getProcAddr( "vkDestroyBuffer") : instance.getProcAddr( "vkDestroyBuffer")); + vkDestroyBufferView = PFN_vkDestroyBufferView(device ? device.getProcAddr( "vkDestroyBufferView") : instance.getProcAddr( "vkDestroyBufferView")); + vkDestroyCommandPool = PFN_vkDestroyCommandPool(device ? device.getProcAddr( "vkDestroyCommandPool") : instance.getProcAddr( "vkDestroyCommandPool")); + vkDestroyDebugReportCallbackEXT = PFN_vkDestroyDebugReportCallbackEXT(instance.getProcAddr( "vkDestroyDebugReportCallbackEXT")); + vkDestroyDebugUtilsMessengerEXT = PFN_vkDestroyDebugUtilsMessengerEXT(instance.getProcAddr( "vkDestroyDebugUtilsMessengerEXT")); + vkDestroyDescriptorPool = PFN_vkDestroyDescriptorPool(device ? device.getProcAddr( "vkDestroyDescriptorPool") : instance.getProcAddr( "vkDestroyDescriptorPool")); + vkDestroyDescriptorSetLayout = PFN_vkDestroyDescriptorSetLayout(device ? device.getProcAddr( "vkDestroyDescriptorSetLayout") : instance.getProcAddr( "vkDestroyDescriptorSetLayout")); + vkDestroyDescriptorUpdateTemplate = PFN_vkDestroyDescriptorUpdateTemplate(device ? device.getProcAddr( "vkDestroyDescriptorUpdateTemplate") : instance.getProcAddr( "vkDestroyDescriptorUpdateTemplate")); + vkDestroyDescriptorUpdateTemplateKHR = PFN_vkDestroyDescriptorUpdateTemplateKHR(device ? device.getProcAddr( "vkDestroyDescriptorUpdateTemplateKHR") : instance.getProcAddr( "vkDestroyDescriptorUpdateTemplateKHR")); + vkDestroyDevice = PFN_vkDestroyDevice(device ? device.getProcAddr( "vkDestroyDevice") : instance.getProcAddr( "vkDestroyDevice")); + vkDestroyEvent = PFN_vkDestroyEvent(device ? device.getProcAddr( "vkDestroyEvent") : instance.getProcAddr( "vkDestroyEvent")); + vkDestroyFence = PFN_vkDestroyFence(device ? device.getProcAddr( "vkDestroyFence") : instance.getProcAddr( "vkDestroyFence")); + vkDestroyFramebuffer = PFN_vkDestroyFramebuffer(device ? device.getProcAddr( "vkDestroyFramebuffer") : instance.getProcAddr( "vkDestroyFramebuffer")); + vkDestroyImage = PFN_vkDestroyImage(device ? device.getProcAddr( "vkDestroyImage") : instance.getProcAddr( "vkDestroyImage")); + vkDestroyImageView = PFN_vkDestroyImageView(device ? device.getProcAddr( "vkDestroyImageView") : instance.getProcAddr( "vkDestroyImageView")); + vkDestroyIndirectCommandsLayoutNVX = PFN_vkDestroyIndirectCommandsLayoutNVX(device ? device.getProcAddr( "vkDestroyIndirectCommandsLayoutNVX") : instance.getProcAddr( "vkDestroyIndirectCommandsLayoutNVX")); + vkDestroyInstance = PFN_vkDestroyInstance(instance.getProcAddr( "vkDestroyInstance")); + vkDestroyObjectTableNVX = PFN_vkDestroyObjectTableNVX(device ? device.getProcAddr( "vkDestroyObjectTableNVX") : instance.getProcAddr( "vkDestroyObjectTableNVX")); + vkDestroyPipeline = PFN_vkDestroyPipeline(device ? device.getProcAddr( "vkDestroyPipeline") : instance.getProcAddr( "vkDestroyPipeline")); + vkDestroyPipelineCache = PFN_vkDestroyPipelineCache(device ? device.getProcAddr( "vkDestroyPipelineCache") : instance.getProcAddr( "vkDestroyPipelineCache")); + vkDestroyPipelineLayout = PFN_vkDestroyPipelineLayout(device ? device.getProcAddr( "vkDestroyPipelineLayout") : instance.getProcAddr( "vkDestroyPipelineLayout")); + vkDestroyQueryPool = PFN_vkDestroyQueryPool(device ? device.getProcAddr( "vkDestroyQueryPool") : instance.getProcAddr( "vkDestroyQueryPool")); + vkDestroyRenderPass = PFN_vkDestroyRenderPass(device ? device.getProcAddr( "vkDestroyRenderPass") : instance.getProcAddr( "vkDestroyRenderPass")); + vkDestroySampler = PFN_vkDestroySampler(device ? device.getProcAddr( "vkDestroySampler") : instance.getProcAddr( "vkDestroySampler")); + vkDestroySamplerYcbcrConversion = PFN_vkDestroySamplerYcbcrConversion(device ? device.getProcAddr( "vkDestroySamplerYcbcrConversion") : instance.getProcAddr( "vkDestroySamplerYcbcrConversion")); + vkDestroySamplerYcbcrConversionKHR = PFN_vkDestroySamplerYcbcrConversionKHR(device ? device.getProcAddr( "vkDestroySamplerYcbcrConversionKHR") : instance.getProcAddr( "vkDestroySamplerYcbcrConversionKHR")); + vkDestroySemaphore = PFN_vkDestroySemaphore(device ? device.getProcAddr( "vkDestroySemaphore") : instance.getProcAddr( "vkDestroySemaphore")); + vkDestroyShaderModule = PFN_vkDestroyShaderModule(device ? device.getProcAddr( "vkDestroyShaderModule") : instance.getProcAddr( "vkDestroyShaderModule")); + vkDestroySurfaceKHR = PFN_vkDestroySurfaceKHR(instance.getProcAddr( "vkDestroySurfaceKHR")); + vkDestroySwapchainKHR = PFN_vkDestroySwapchainKHR(device ? device.getProcAddr( "vkDestroySwapchainKHR") : instance.getProcAddr( "vkDestroySwapchainKHR")); + vkDestroyValidationCacheEXT = PFN_vkDestroyValidationCacheEXT(device ? device.getProcAddr( "vkDestroyValidationCacheEXT") : instance.getProcAddr( "vkDestroyValidationCacheEXT")); + vkDeviceWaitIdle = PFN_vkDeviceWaitIdle(device ? device.getProcAddr( "vkDeviceWaitIdle") : instance.getProcAddr( "vkDeviceWaitIdle")); + vkDisplayPowerControlEXT = PFN_vkDisplayPowerControlEXT(device ? device.getProcAddr( "vkDisplayPowerControlEXT") : instance.getProcAddr( "vkDisplayPowerControlEXT")); + vkEndCommandBuffer = PFN_vkEndCommandBuffer(device ? device.getProcAddr( "vkEndCommandBuffer") : instance.getProcAddr( "vkEndCommandBuffer")); + vkEnumerateDeviceExtensionProperties = PFN_vkEnumerateDeviceExtensionProperties(device ? device.getProcAddr( "vkEnumerateDeviceExtensionProperties") : instance.getProcAddr( "vkEnumerateDeviceExtensionProperties")); + vkEnumerateDeviceLayerProperties = PFN_vkEnumerateDeviceLayerProperties(device ? device.getProcAddr( "vkEnumerateDeviceLayerProperties") : instance.getProcAddr( "vkEnumerateDeviceLayerProperties")); + vkEnumerateInstanceExtensionProperties = PFN_vkEnumerateInstanceExtensionProperties(instance.getProcAddr( "vkEnumerateInstanceExtensionProperties")); + vkEnumerateInstanceLayerProperties = PFN_vkEnumerateInstanceLayerProperties(instance.getProcAddr( "vkEnumerateInstanceLayerProperties")); + vkEnumerateInstanceVersion = PFN_vkEnumerateInstanceVersion(instance.getProcAddr( "vkEnumerateInstanceVersion")); + vkEnumeratePhysicalDeviceGroups = PFN_vkEnumeratePhysicalDeviceGroups(instance.getProcAddr( "vkEnumeratePhysicalDeviceGroups")); + vkEnumeratePhysicalDeviceGroupsKHR = PFN_vkEnumeratePhysicalDeviceGroupsKHR(instance.getProcAddr( "vkEnumeratePhysicalDeviceGroupsKHR")); + vkEnumeratePhysicalDevices = PFN_vkEnumeratePhysicalDevices(instance.getProcAddr( "vkEnumeratePhysicalDevices")); + vkFlushMappedMemoryRanges = PFN_vkFlushMappedMemoryRanges(device ? device.getProcAddr( "vkFlushMappedMemoryRanges") : instance.getProcAddr( "vkFlushMappedMemoryRanges")); + vkFreeCommandBuffers = PFN_vkFreeCommandBuffers(device ? device.getProcAddr( "vkFreeCommandBuffers") : instance.getProcAddr( "vkFreeCommandBuffers")); + vkFreeDescriptorSets = PFN_vkFreeDescriptorSets(device ? device.getProcAddr( "vkFreeDescriptorSets") : instance.getProcAddr( "vkFreeDescriptorSets")); + vkFreeMemory = PFN_vkFreeMemory(device ? device.getProcAddr( "vkFreeMemory") : instance.getProcAddr( "vkFreeMemory")); + vkGetAccelerationStructureHandleNVX = PFN_vkGetAccelerationStructureHandleNVX(device ? device.getProcAddr( "vkGetAccelerationStructureHandleNVX") : instance.getProcAddr( "vkGetAccelerationStructureHandleNVX")); + vkGetAccelerationStructureMemoryRequirementsNVX = PFN_vkGetAccelerationStructureMemoryRequirementsNVX(device ? device.getProcAddr( "vkGetAccelerationStructureMemoryRequirementsNVX") : instance.getProcAddr( "vkGetAccelerationStructureMemoryRequirementsNVX")); + vkGetAccelerationStructureScratchMemoryRequirementsNVX = PFN_vkGetAccelerationStructureScratchMemoryRequirementsNVX(device ? device.getProcAddr( "vkGetAccelerationStructureScratchMemoryRequirementsNVX") : instance.getProcAddr( "vkGetAccelerationStructureScratchMemoryRequirementsNVX")); +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + vkGetAndroidHardwareBufferPropertiesANDROID = PFN_vkGetAndroidHardwareBufferPropertiesANDROID(device ? device.getProcAddr( "vkGetAndroidHardwareBufferPropertiesANDROID") : instance.getProcAddr( "vkGetAndroidHardwareBufferPropertiesANDROID")); +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + vkGetBufferMemoryRequirements = PFN_vkGetBufferMemoryRequirements(device ? device.getProcAddr( "vkGetBufferMemoryRequirements") : instance.getProcAddr( "vkGetBufferMemoryRequirements")); + vkGetBufferMemoryRequirements2 = PFN_vkGetBufferMemoryRequirements2(device ? device.getProcAddr( "vkGetBufferMemoryRequirements2") : instance.getProcAddr( "vkGetBufferMemoryRequirements2")); + vkGetBufferMemoryRequirements2KHR = PFN_vkGetBufferMemoryRequirements2KHR(device ? device.getProcAddr( "vkGetBufferMemoryRequirements2KHR") : instance.getProcAddr( "vkGetBufferMemoryRequirements2KHR")); + vkGetDescriptorSetLayoutSupport = PFN_vkGetDescriptorSetLayoutSupport(device ? device.getProcAddr( "vkGetDescriptorSetLayoutSupport") : instance.getProcAddr( "vkGetDescriptorSetLayoutSupport")); + vkGetDescriptorSetLayoutSupportKHR = PFN_vkGetDescriptorSetLayoutSupportKHR(device ? device.getProcAddr( "vkGetDescriptorSetLayoutSupportKHR") : instance.getProcAddr( "vkGetDescriptorSetLayoutSupportKHR")); + vkGetDeviceGroupPeerMemoryFeatures = PFN_vkGetDeviceGroupPeerMemoryFeatures(device ? device.getProcAddr( "vkGetDeviceGroupPeerMemoryFeatures") : instance.getProcAddr( "vkGetDeviceGroupPeerMemoryFeatures")); + vkGetDeviceGroupPeerMemoryFeaturesKHR = PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR(device ? device.getProcAddr( "vkGetDeviceGroupPeerMemoryFeaturesKHR") : instance.getProcAddr( "vkGetDeviceGroupPeerMemoryFeaturesKHR")); + vkGetDeviceGroupPresentCapabilitiesKHR = PFN_vkGetDeviceGroupPresentCapabilitiesKHR(device ? device.getProcAddr( "vkGetDeviceGroupPresentCapabilitiesKHR") : instance.getProcAddr( "vkGetDeviceGroupPresentCapabilitiesKHR")); + vkGetDeviceGroupSurfacePresentModesKHR = PFN_vkGetDeviceGroupSurfacePresentModesKHR(device ? device.getProcAddr( "vkGetDeviceGroupSurfacePresentModesKHR") : instance.getProcAddr( "vkGetDeviceGroupSurfacePresentModesKHR")); + vkGetDeviceMemoryCommitment = PFN_vkGetDeviceMemoryCommitment(device ? device.getProcAddr( "vkGetDeviceMemoryCommitment") : instance.getProcAddr( "vkGetDeviceMemoryCommitment")); + vkGetDeviceProcAddr = PFN_vkGetDeviceProcAddr(device ? device.getProcAddr( "vkGetDeviceProcAddr") : instance.getProcAddr( "vkGetDeviceProcAddr")); + vkGetDeviceQueue = PFN_vkGetDeviceQueue(device ? device.getProcAddr( "vkGetDeviceQueue") : instance.getProcAddr( "vkGetDeviceQueue")); + vkGetDeviceQueue2 = PFN_vkGetDeviceQueue2(device ? device.getProcAddr( "vkGetDeviceQueue2") : instance.getProcAddr( "vkGetDeviceQueue2")); + vkGetDisplayModeProperties2KHR = PFN_vkGetDisplayModeProperties2KHR(device ? device.getProcAddr( "vkGetDisplayModeProperties2KHR") : instance.getProcAddr( "vkGetDisplayModeProperties2KHR")); + vkGetDisplayModePropertiesKHR = PFN_vkGetDisplayModePropertiesKHR(device ? device.getProcAddr( "vkGetDisplayModePropertiesKHR") : instance.getProcAddr( "vkGetDisplayModePropertiesKHR")); + vkGetDisplayPlaneCapabilities2KHR = PFN_vkGetDisplayPlaneCapabilities2KHR(device ? device.getProcAddr( "vkGetDisplayPlaneCapabilities2KHR") : instance.getProcAddr( "vkGetDisplayPlaneCapabilities2KHR")); + vkGetDisplayPlaneCapabilitiesKHR = PFN_vkGetDisplayPlaneCapabilitiesKHR(device ? device.getProcAddr( "vkGetDisplayPlaneCapabilitiesKHR") : instance.getProcAddr( "vkGetDisplayPlaneCapabilitiesKHR")); + vkGetDisplayPlaneSupportedDisplaysKHR = PFN_vkGetDisplayPlaneSupportedDisplaysKHR(device ? device.getProcAddr( "vkGetDisplayPlaneSupportedDisplaysKHR") : instance.getProcAddr( "vkGetDisplayPlaneSupportedDisplaysKHR")); + vkGetEventStatus = PFN_vkGetEventStatus(device ? device.getProcAddr( "vkGetEventStatus") : instance.getProcAddr( "vkGetEventStatus")); + vkGetFenceFdKHR = PFN_vkGetFenceFdKHR(device ? device.getProcAddr( "vkGetFenceFdKHR") : instance.getProcAddr( "vkGetFenceFdKHR")); + vkGetFenceStatus = PFN_vkGetFenceStatus(device ? device.getProcAddr( "vkGetFenceStatus") : instance.getProcAddr( "vkGetFenceStatus")); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetFenceWin32HandleKHR = PFN_vkGetFenceWin32HandleKHR(device ? device.getProcAddr( "vkGetFenceWin32HandleKHR") : instance.getProcAddr( "vkGetFenceWin32HandleKHR")); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetImageMemoryRequirements = PFN_vkGetImageMemoryRequirements(device ? device.getProcAddr( "vkGetImageMemoryRequirements") : instance.getProcAddr( "vkGetImageMemoryRequirements")); + vkGetImageMemoryRequirements2 = PFN_vkGetImageMemoryRequirements2(device ? device.getProcAddr( "vkGetImageMemoryRequirements2") : instance.getProcAddr( "vkGetImageMemoryRequirements2")); + vkGetImageMemoryRequirements2KHR = PFN_vkGetImageMemoryRequirements2KHR(device ? device.getProcAddr( "vkGetImageMemoryRequirements2KHR") : instance.getProcAddr( "vkGetImageMemoryRequirements2KHR")); + vkGetImageSparseMemoryRequirements = PFN_vkGetImageSparseMemoryRequirements(device ? device.getProcAddr( "vkGetImageSparseMemoryRequirements") : instance.getProcAddr( "vkGetImageSparseMemoryRequirements")); + vkGetImageSparseMemoryRequirements2 = PFN_vkGetImageSparseMemoryRequirements2(device ? device.getProcAddr( "vkGetImageSparseMemoryRequirements2") : instance.getProcAddr( "vkGetImageSparseMemoryRequirements2")); + vkGetImageSparseMemoryRequirements2KHR = PFN_vkGetImageSparseMemoryRequirements2KHR(device ? device.getProcAddr( "vkGetImageSparseMemoryRequirements2KHR") : instance.getProcAddr( "vkGetImageSparseMemoryRequirements2KHR")); + vkGetImageSubresourceLayout = PFN_vkGetImageSubresourceLayout(device ? device.getProcAddr( "vkGetImageSubresourceLayout") : instance.getProcAddr( "vkGetImageSubresourceLayout")); + vkGetInstanceProcAddr = PFN_vkGetInstanceProcAddr(instance.getProcAddr( "vkGetInstanceProcAddr")); +#ifdef VK_USE_PLATFORM_ANDROID_ANDROID + vkGetMemoryAndroidHardwareBufferANDROID = PFN_vkGetMemoryAndroidHardwareBufferANDROID(device ? device.getProcAddr( "vkGetMemoryAndroidHardwareBufferANDROID") : instance.getProcAddr( "vkGetMemoryAndroidHardwareBufferANDROID")); +#endif /*VK_USE_PLATFORM_ANDROID_ANDROID*/ + vkGetMemoryFdKHR = PFN_vkGetMemoryFdKHR(device ? device.getProcAddr( "vkGetMemoryFdKHR") : instance.getProcAddr( "vkGetMemoryFdKHR")); + vkGetMemoryFdPropertiesKHR = PFN_vkGetMemoryFdPropertiesKHR(device ? device.getProcAddr( "vkGetMemoryFdPropertiesKHR") : instance.getProcAddr( "vkGetMemoryFdPropertiesKHR")); + vkGetMemoryHostPointerPropertiesEXT = PFN_vkGetMemoryHostPointerPropertiesEXT(device ? device.getProcAddr( "vkGetMemoryHostPointerPropertiesEXT") : instance.getProcAddr( "vkGetMemoryHostPointerPropertiesEXT")); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetMemoryWin32HandleKHR = PFN_vkGetMemoryWin32HandleKHR(device ? device.getProcAddr( "vkGetMemoryWin32HandleKHR") : instance.getProcAddr( "vkGetMemoryWin32HandleKHR")); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_NV + vkGetMemoryWin32HandleNV = PFN_vkGetMemoryWin32HandleNV(device ? device.getProcAddr( "vkGetMemoryWin32HandleNV") : instance.getProcAddr( "vkGetMemoryWin32HandleNV")); +#endif /*VK_USE_PLATFORM_WIN32_NV*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetMemoryWin32HandlePropertiesKHR = PFN_vkGetMemoryWin32HandlePropertiesKHR(device ? device.getProcAddr( "vkGetMemoryWin32HandlePropertiesKHR") : instance.getProcAddr( "vkGetMemoryWin32HandlePropertiesKHR")); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetPastPresentationTimingGOOGLE = PFN_vkGetPastPresentationTimingGOOGLE(device ? device.getProcAddr( "vkGetPastPresentationTimingGOOGLE") : instance.getProcAddr( "vkGetPastPresentationTimingGOOGLE")); + vkGetPhysicalDeviceDisplayPlaneProperties2KHR = PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceDisplayPlaneProperties2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceDisplayPlaneProperties2KHR")); + vkGetPhysicalDeviceDisplayPlanePropertiesKHR = PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceDisplayPlanePropertiesKHR") : instance.getProcAddr( "vkGetPhysicalDeviceDisplayPlanePropertiesKHR")); + vkGetPhysicalDeviceDisplayProperties2KHR = PFN_vkGetPhysicalDeviceDisplayProperties2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceDisplayProperties2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceDisplayProperties2KHR")); + vkGetPhysicalDeviceDisplayPropertiesKHR = PFN_vkGetPhysicalDeviceDisplayPropertiesKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceDisplayPropertiesKHR") : instance.getProcAddr( "vkGetPhysicalDeviceDisplayPropertiesKHR")); + vkGetPhysicalDeviceExternalBufferProperties = PFN_vkGetPhysicalDeviceExternalBufferProperties(device ? device.getProcAddr( "vkGetPhysicalDeviceExternalBufferProperties") : instance.getProcAddr( "vkGetPhysicalDeviceExternalBufferProperties")); + vkGetPhysicalDeviceExternalBufferPropertiesKHR = PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceExternalBufferPropertiesKHR") : instance.getProcAddr( "vkGetPhysicalDeviceExternalBufferPropertiesKHR")); + vkGetPhysicalDeviceExternalFenceProperties = PFN_vkGetPhysicalDeviceExternalFenceProperties(device ? device.getProcAddr( "vkGetPhysicalDeviceExternalFenceProperties") : instance.getProcAddr( "vkGetPhysicalDeviceExternalFenceProperties")); + vkGetPhysicalDeviceExternalFencePropertiesKHR = PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceExternalFencePropertiesKHR") : instance.getProcAddr( "vkGetPhysicalDeviceExternalFencePropertiesKHR")); + vkGetPhysicalDeviceExternalImageFormatPropertiesNV = PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV(device ? device.getProcAddr( "vkGetPhysicalDeviceExternalImageFormatPropertiesNV") : instance.getProcAddr( "vkGetPhysicalDeviceExternalImageFormatPropertiesNV")); + vkGetPhysicalDeviceExternalSemaphoreProperties = PFN_vkGetPhysicalDeviceExternalSemaphoreProperties(device ? device.getProcAddr( "vkGetPhysicalDeviceExternalSemaphoreProperties") : instance.getProcAddr( "vkGetPhysicalDeviceExternalSemaphoreProperties")); + vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR") : instance.getProcAddr( "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR")); + vkGetPhysicalDeviceFeatures = PFN_vkGetPhysicalDeviceFeatures(device ? device.getProcAddr( "vkGetPhysicalDeviceFeatures") : instance.getProcAddr( "vkGetPhysicalDeviceFeatures")); + vkGetPhysicalDeviceFeatures2 = PFN_vkGetPhysicalDeviceFeatures2(device ? device.getProcAddr( "vkGetPhysicalDeviceFeatures2") : instance.getProcAddr( "vkGetPhysicalDeviceFeatures2")); + vkGetPhysicalDeviceFeatures2KHR = PFN_vkGetPhysicalDeviceFeatures2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceFeatures2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceFeatures2KHR")); + vkGetPhysicalDeviceFormatProperties = PFN_vkGetPhysicalDeviceFormatProperties(device ? device.getProcAddr( "vkGetPhysicalDeviceFormatProperties") : instance.getProcAddr( "vkGetPhysicalDeviceFormatProperties")); + vkGetPhysicalDeviceFormatProperties2 = PFN_vkGetPhysicalDeviceFormatProperties2(device ? device.getProcAddr( "vkGetPhysicalDeviceFormatProperties2") : instance.getProcAddr( "vkGetPhysicalDeviceFormatProperties2")); + vkGetPhysicalDeviceFormatProperties2KHR = PFN_vkGetPhysicalDeviceFormatProperties2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceFormatProperties2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceFormatProperties2KHR")); + vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX = PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX(device ? device.getProcAddr( "vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX") : instance.getProcAddr( "vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX")); + vkGetPhysicalDeviceImageFormatProperties = PFN_vkGetPhysicalDeviceImageFormatProperties(device ? device.getProcAddr( "vkGetPhysicalDeviceImageFormatProperties") : instance.getProcAddr( "vkGetPhysicalDeviceImageFormatProperties")); + vkGetPhysicalDeviceImageFormatProperties2 = PFN_vkGetPhysicalDeviceImageFormatProperties2(device ? device.getProcAddr( "vkGetPhysicalDeviceImageFormatProperties2") : instance.getProcAddr( "vkGetPhysicalDeviceImageFormatProperties2")); + vkGetPhysicalDeviceImageFormatProperties2KHR = PFN_vkGetPhysicalDeviceImageFormatProperties2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceImageFormatProperties2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceImageFormatProperties2KHR")); + vkGetPhysicalDeviceMemoryProperties = PFN_vkGetPhysicalDeviceMemoryProperties(device ? device.getProcAddr( "vkGetPhysicalDeviceMemoryProperties") : instance.getProcAddr( "vkGetPhysicalDeviceMemoryProperties")); + vkGetPhysicalDeviceMemoryProperties2 = PFN_vkGetPhysicalDeviceMemoryProperties2(device ? device.getProcAddr( "vkGetPhysicalDeviceMemoryProperties2") : instance.getProcAddr( "vkGetPhysicalDeviceMemoryProperties2")); + vkGetPhysicalDeviceMemoryProperties2KHR = PFN_vkGetPhysicalDeviceMemoryProperties2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceMemoryProperties2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceMemoryProperties2KHR")); +#ifdef VK_USE_PLATFORM_MIR_KHR + vkGetPhysicalDeviceMirPresentationSupportKHR = PFN_vkGetPhysicalDeviceMirPresentationSupportKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceMirPresentationSupportKHR") : instance.getProcAddr( "vkGetPhysicalDeviceMirPresentationSupportKHR")); +#endif /*VK_USE_PLATFORM_MIR_KHR*/ + vkGetPhysicalDeviceMultisamplePropertiesEXT = PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT(device ? device.getProcAddr( "vkGetPhysicalDeviceMultisamplePropertiesEXT") : instance.getProcAddr( "vkGetPhysicalDeviceMultisamplePropertiesEXT")); + vkGetPhysicalDevicePresentRectanglesKHR = PFN_vkGetPhysicalDevicePresentRectanglesKHR(device ? device.getProcAddr( "vkGetPhysicalDevicePresentRectanglesKHR") : instance.getProcAddr( "vkGetPhysicalDevicePresentRectanglesKHR")); + vkGetPhysicalDeviceProperties = PFN_vkGetPhysicalDeviceProperties(device ? device.getProcAddr( "vkGetPhysicalDeviceProperties") : instance.getProcAddr( "vkGetPhysicalDeviceProperties")); + vkGetPhysicalDeviceProperties2 = PFN_vkGetPhysicalDeviceProperties2(device ? device.getProcAddr( "vkGetPhysicalDeviceProperties2") : instance.getProcAddr( "vkGetPhysicalDeviceProperties2")); + vkGetPhysicalDeviceProperties2KHR = PFN_vkGetPhysicalDeviceProperties2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceProperties2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceProperties2KHR")); + vkGetPhysicalDeviceQueueFamilyProperties = PFN_vkGetPhysicalDeviceQueueFamilyProperties(device ? device.getProcAddr( "vkGetPhysicalDeviceQueueFamilyProperties") : instance.getProcAddr( "vkGetPhysicalDeviceQueueFamilyProperties")); + vkGetPhysicalDeviceQueueFamilyProperties2 = PFN_vkGetPhysicalDeviceQueueFamilyProperties2(device ? device.getProcAddr( "vkGetPhysicalDeviceQueueFamilyProperties2") : instance.getProcAddr( "vkGetPhysicalDeviceQueueFamilyProperties2")); + vkGetPhysicalDeviceQueueFamilyProperties2KHR = PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceQueueFamilyProperties2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceQueueFamilyProperties2KHR")); + vkGetPhysicalDeviceSparseImageFormatProperties = PFN_vkGetPhysicalDeviceSparseImageFormatProperties(device ? device.getProcAddr( "vkGetPhysicalDeviceSparseImageFormatProperties") : instance.getProcAddr( "vkGetPhysicalDeviceSparseImageFormatProperties")); + vkGetPhysicalDeviceSparseImageFormatProperties2 = PFN_vkGetPhysicalDeviceSparseImageFormatProperties2(device ? device.getProcAddr( "vkGetPhysicalDeviceSparseImageFormatProperties2") : instance.getProcAddr( "vkGetPhysicalDeviceSparseImageFormatProperties2")); + vkGetPhysicalDeviceSparseImageFormatProperties2KHR = PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceSparseImageFormatProperties2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceSparseImageFormatProperties2KHR")); + vkGetPhysicalDeviceSurfaceCapabilities2EXT = PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT(device ? device.getProcAddr( "vkGetPhysicalDeviceSurfaceCapabilities2EXT") : instance.getProcAddr( "vkGetPhysicalDeviceSurfaceCapabilities2EXT")); + vkGetPhysicalDeviceSurfaceCapabilities2KHR = PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceSurfaceCapabilities2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceSurfaceCapabilities2KHR")); + vkGetPhysicalDeviceSurfaceCapabilitiesKHR = PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceSurfaceCapabilitiesKHR") : instance.getProcAddr( "vkGetPhysicalDeviceSurfaceCapabilitiesKHR")); + vkGetPhysicalDeviceSurfaceFormats2KHR = PFN_vkGetPhysicalDeviceSurfaceFormats2KHR(device ? device.getProcAddr( "vkGetPhysicalDeviceSurfaceFormats2KHR") : instance.getProcAddr( "vkGetPhysicalDeviceSurfaceFormats2KHR")); + vkGetPhysicalDeviceSurfaceFormatsKHR = PFN_vkGetPhysicalDeviceSurfaceFormatsKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceSurfaceFormatsKHR") : instance.getProcAddr( "vkGetPhysicalDeviceSurfaceFormatsKHR")); + vkGetPhysicalDeviceSurfacePresentModesKHR = PFN_vkGetPhysicalDeviceSurfacePresentModesKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceSurfacePresentModesKHR") : instance.getProcAddr( "vkGetPhysicalDeviceSurfacePresentModesKHR")); + vkGetPhysicalDeviceSurfaceSupportKHR = PFN_vkGetPhysicalDeviceSurfaceSupportKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceSurfaceSupportKHR") : instance.getProcAddr( "vkGetPhysicalDeviceSurfaceSupportKHR")); +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + vkGetPhysicalDeviceWaylandPresentationSupportKHR = PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceWaylandPresentationSupportKHR") : instance.getProcAddr( "vkGetPhysicalDeviceWaylandPresentationSupportKHR")); +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetPhysicalDeviceWin32PresentationSupportKHR = PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceWin32PresentationSupportKHR") : instance.getProcAddr( "vkGetPhysicalDeviceWin32PresentationSupportKHR")); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + vkGetPhysicalDeviceXcbPresentationSupportKHR = PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceXcbPresentationSupportKHR") : instance.getProcAddr( "vkGetPhysicalDeviceXcbPresentationSupportKHR")); +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + vkGetPhysicalDeviceXlibPresentationSupportKHR = PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR(device ? device.getProcAddr( "vkGetPhysicalDeviceXlibPresentationSupportKHR") : instance.getProcAddr( "vkGetPhysicalDeviceXlibPresentationSupportKHR")); +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + vkGetPipelineCacheData = PFN_vkGetPipelineCacheData(device ? device.getProcAddr( "vkGetPipelineCacheData") : instance.getProcAddr( "vkGetPipelineCacheData")); + vkGetQueryPoolResults = PFN_vkGetQueryPoolResults(device ? device.getProcAddr( "vkGetQueryPoolResults") : instance.getProcAddr( "vkGetQueryPoolResults")); + vkGetQueueCheckpointDataNV = PFN_vkGetQueueCheckpointDataNV(device ? device.getProcAddr( "vkGetQueueCheckpointDataNV") : instance.getProcAddr( "vkGetQueueCheckpointDataNV")); +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_NV + vkGetRandROutputDisplayEXT = PFN_vkGetRandROutputDisplayEXT(device ? device.getProcAddr( "vkGetRandROutputDisplayEXT") : instance.getProcAddr( "vkGetRandROutputDisplayEXT")); +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_NV*/ + vkGetRaytracingShaderHandlesNVX = PFN_vkGetRaytracingShaderHandlesNVX(device ? device.getProcAddr( "vkGetRaytracingShaderHandlesNVX") : instance.getProcAddr( "vkGetRaytracingShaderHandlesNVX")); + vkGetRefreshCycleDurationGOOGLE = PFN_vkGetRefreshCycleDurationGOOGLE(device ? device.getProcAddr( "vkGetRefreshCycleDurationGOOGLE") : instance.getProcAddr( "vkGetRefreshCycleDurationGOOGLE")); + vkGetRenderAreaGranularity = PFN_vkGetRenderAreaGranularity(device ? device.getProcAddr( "vkGetRenderAreaGranularity") : instance.getProcAddr( "vkGetRenderAreaGranularity")); + vkGetSemaphoreFdKHR = PFN_vkGetSemaphoreFdKHR(device ? device.getProcAddr( "vkGetSemaphoreFdKHR") : instance.getProcAddr( "vkGetSemaphoreFdKHR")); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetSemaphoreWin32HandleKHR = PFN_vkGetSemaphoreWin32HandleKHR(device ? device.getProcAddr( "vkGetSemaphoreWin32HandleKHR") : instance.getProcAddr( "vkGetSemaphoreWin32HandleKHR")); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetShaderInfoAMD = PFN_vkGetShaderInfoAMD(device ? device.getProcAddr( "vkGetShaderInfoAMD") : instance.getProcAddr( "vkGetShaderInfoAMD")); + vkGetSwapchainCounterEXT = PFN_vkGetSwapchainCounterEXT(device ? device.getProcAddr( "vkGetSwapchainCounterEXT") : instance.getProcAddr( "vkGetSwapchainCounterEXT")); + vkGetSwapchainImagesKHR = PFN_vkGetSwapchainImagesKHR(device ? device.getProcAddr( "vkGetSwapchainImagesKHR") : instance.getProcAddr( "vkGetSwapchainImagesKHR")); + vkGetSwapchainStatusKHR = PFN_vkGetSwapchainStatusKHR(device ? device.getProcAddr( "vkGetSwapchainStatusKHR") : instance.getProcAddr( "vkGetSwapchainStatusKHR")); + vkGetValidationCacheDataEXT = PFN_vkGetValidationCacheDataEXT(device ? device.getProcAddr( "vkGetValidationCacheDataEXT") : instance.getProcAddr( "vkGetValidationCacheDataEXT")); + vkImportFenceFdKHR = PFN_vkImportFenceFdKHR(device ? device.getProcAddr( "vkImportFenceFdKHR") : instance.getProcAddr( "vkImportFenceFdKHR")); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkImportFenceWin32HandleKHR = PFN_vkImportFenceWin32HandleKHR(device ? device.getProcAddr( "vkImportFenceWin32HandleKHR") : instance.getProcAddr( "vkImportFenceWin32HandleKHR")); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkImportSemaphoreFdKHR = PFN_vkImportSemaphoreFdKHR(device ? device.getProcAddr( "vkImportSemaphoreFdKHR") : instance.getProcAddr( "vkImportSemaphoreFdKHR")); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkImportSemaphoreWin32HandleKHR = PFN_vkImportSemaphoreWin32HandleKHR(device ? device.getProcAddr( "vkImportSemaphoreWin32HandleKHR") : instance.getProcAddr( "vkImportSemaphoreWin32HandleKHR")); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkInvalidateMappedMemoryRanges = PFN_vkInvalidateMappedMemoryRanges(device ? device.getProcAddr( "vkInvalidateMappedMemoryRanges") : instance.getProcAddr( "vkInvalidateMappedMemoryRanges")); + vkMapMemory = PFN_vkMapMemory(device ? device.getProcAddr( "vkMapMemory") : instance.getProcAddr( "vkMapMemory")); + vkMergePipelineCaches = PFN_vkMergePipelineCaches(device ? device.getProcAddr( "vkMergePipelineCaches") : instance.getProcAddr( "vkMergePipelineCaches")); + vkMergeValidationCachesEXT = PFN_vkMergeValidationCachesEXT(device ? device.getProcAddr( "vkMergeValidationCachesEXT") : instance.getProcAddr( "vkMergeValidationCachesEXT")); + vkQueueBeginDebugUtilsLabelEXT = PFN_vkQueueBeginDebugUtilsLabelEXT(device ? device.getProcAddr( "vkQueueBeginDebugUtilsLabelEXT") : instance.getProcAddr( "vkQueueBeginDebugUtilsLabelEXT")); + vkQueueBindSparse = PFN_vkQueueBindSparse(device ? device.getProcAddr( "vkQueueBindSparse") : instance.getProcAddr( "vkQueueBindSparse")); + vkQueueEndDebugUtilsLabelEXT = PFN_vkQueueEndDebugUtilsLabelEXT(device ? device.getProcAddr( "vkQueueEndDebugUtilsLabelEXT") : instance.getProcAddr( "vkQueueEndDebugUtilsLabelEXT")); + vkQueueInsertDebugUtilsLabelEXT = PFN_vkQueueInsertDebugUtilsLabelEXT(device ? device.getProcAddr( "vkQueueInsertDebugUtilsLabelEXT") : instance.getProcAddr( "vkQueueInsertDebugUtilsLabelEXT")); + vkQueuePresentKHR = PFN_vkQueuePresentKHR(device ? device.getProcAddr( "vkQueuePresentKHR") : instance.getProcAddr( "vkQueuePresentKHR")); + vkQueueSubmit = PFN_vkQueueSubmit(device ? device.getProcAddr( "vkQueueSubmit") : instance.getProcAddr( "vkQueueSubmit")); + vkQueueWaitIdle = PFN_vkQueueWaitIdle(device ? device.getProcAddr( "vkQueueWaitIdle") : instance.getProcAddr( "vkQueueWaitIdle")); + vkRegisterDeviceEventEXT = PFN_vkRegisterDeviceEventEXT(device ? device.getProcAddr( "vkRegisterDeviceEventEXT") : instance.getProcAddr( "vkRegisterDeviceEventEXT")); + vkRegisterDisplayEventEXT = PFN_vkRegisterDisplayEventEXT(device ? device.getProcAddr( "vkRegisterDisplayEventEXT") : instance.getProcAddr( "vkRegisterDisplayEventEXT")); + vkRegisterObjectsNVX = PFN_vkRegisterObjectsNVX(device ? device.getProcAddr( "vkRegisterObjectsNVX") : instance.getProcAddr( "vkRegisterObjectsNVX")); + vkReleaseDisplayEXT = PFN_vkReleaseDisplayEXT(device ? device.getProcAddr( "vkReleaseDisplayEXT") : instance.getProcAddr( "vkReleaseDisplayEXT")); + vkResetCommandBuffer = PFN_vkResetCommandBuffer(device ? device.getProcAddr( "vkResetCommandBuffer") : instance.getProcAddr( "vkResetCommandBuffer")); + vkResetCommandPool = PFN_vkResetCommandPool(device ? device.getProcAddr( "vkResetCommandPool") : instance.getProcAddr( "vkResetCommandPool")); + vkResetDescriptorPool = PFN_vkResetDescriptorPool(device ? device.getProcAddr( "vkResetDescriptorPool") : instance.getProcAddr( "vkResetDescriptorPool")); + vkResetEvent = PFN_vkResetEvent(device ? device.getProcAddr( "vkResetEvent") : instance.getProcAddr( "vkResetEvent")); + vkResetFences = PFN_vkResetFences(device ? device.getProcAddr( "vkResetFences") : instance.getProcAddr( "vkResetFences")); + vkSetDebugUtilsObjectNameEXT = PFN_vkSetDebugUtilsObjectNameEXT(device ? device.getProcAddr( "vkSetDebugUtilsObjectNameEXT") : instance.getProcAddr( "vkSetDebugUtilsObjectNameEXT")); + vkSetDebugUtilsObjectTagEXT = PFN_vkSetDebugUtilsObjectTagEXT(device ? device.getProcAddr( "vkSetDebugUtilsObjectTagEXT") : instance.getProcAddr( "vkSetDebugUtilsObjectTagEXT")); + vkSetEvent = PFN_vkSetEvent(device ? device.getProcAddr( "vkSetEvent") : instance.getProcAddr( "vkSetEvent")); + vkSetHdrMetadataEXT = PFN_vkSetHdrMetadataEXT(device ? device.getProcAddr( "vkSetHdrMetadataEXT") : instance.getProcAddr( "vkSetHdrMetadataEXT")); + vkSubmitDebugUtilsMessageEXT = PFN_vkSubmitDebugUtilsMessageEXT(instance.getProcAddr( "vkSubmitDebugUtilsMessageEXT")); + vkTrimCommandPool = PFN_vkTrimCommandPool(device ? device.getProcAddr( "vkTrimCommandPool") : instance.getProcAddr( "vkTrimCommandPool")); + vkTrimCommandPoolKHR = PFN_vkTrimCommandPoolKHR(device ? device.getProcAddr( "vkTrimCommandPoolKHR") : instance.getProcAddr( "vkTrimCommandPoolKHR")); + vkUnmapMemory = PFN_vkUnmapMemory(device ? device.getProcAddr( "vkUnmapMemory") : instance.getProcAddr( "vkUnmapMemory")); + vkUnregisterObjectsNVX = PFN_vkUnregisterObjectsNVX(device ? device.getProcAddr( "vkUnregisterObjectsNVX") : instance.getProcAddr( "vkUnregisterObjectsNVX")); + vkUpdateDescriptorSetWithTemplate = PFN_vkUpdateDescriptorSetWithTemplate(device ? device.getProcAddr( "vkUpdateDescriptorSetWithTemplate") : instance.getProcAddr( "vkUpdateDescriptorSetWithTemplate")); + vkUpdateDescriptorSetWithTemplateKHR = PFN_vkUpdateDescriptorSetWithTemplateKHR(device ? device.getProcAddr( "vkUpdateDescriptorSetWithTemplateKHR") : instance.getProcAddr( "vkUpdateDescriptorSetWithTemplateKHR")); + vkUpdateDescriptorSets = PFN_vkUpdateDescriptorSets(device ? device.getProcAddr( "vkUpdateDescriptorSets") : instance.getProcAddr( "vkUpdateDescriptorSets")); + vkWaitForFences = PFN_vkWaitForFences(device ? device.getProcAddr( "vkWaitForFences") : instance.getProcAddr( "vkWaitForFences")); + } + }; +} // namespace VULKAN_HPP_NAMESPACE + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_android.h b/src/refresh/vkpt/include/vulkan/vulkan_android.h new file mode 100644 index 0000000..b9551b1 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_android.h @@ -0,0 +1,126 @@ +#ifndef VULKAN_ANDROID_H_ +#define VULKAN_ANDROID_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_android_surface 1 +struct ANativeWindow; + +#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6 +#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface" + +typedef VkFlags VkAndroidSurfaceCreateFlagsKHR; + +typedef struct VkAndroidSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAndroidSurfaceCreateFlagsKHR flags; + struct ANativeWindow* window; +} VkAndroidSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR( + VkInstance instance, + const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#define VK_ANDROID_external_memory_android_hardware_buffer 1 +struct AHardwareBuffer; + +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 3 +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer" + +typedef struct VkAndroidHardwareBufferUsageANDROID { + VkStructureType sType; + void* pNext; + uint64_t androidHardwareBufferUsage; +} VkAndroidHardwareBufferUsageANDROID; + +typedef struct VkAndroidHardwareBufferPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeBits; +} VkAndroidHardwareBufferPropertiesANDROID; + +typedef struct VkAndroidHardwareBufferFormatPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkFormat format; + uint64_t externalFormat; + VkFormatFeatureFlags formatFeatures; + VkComponentMapping samplerYcbcrConversionComponents; + VkSamplerYcbcrModelConversion suggestedYcbcrModel; + VkSamplerYcbcrRange suggestedYcbcrRange; + VkChromaLocation suggestedXChromaOffset; + VkChromaLocation suggestedYChromaOffset; +} VkAndroidHardwareBufferFormatPropertiesANDROID; + +typedef struct VkImportAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + struct AHardwareBuffer* buffer; +} VkImportAndroidHardwareBufferInfoANDROID; + +typedef struct VkMemoryGetAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkMemoryGetAndroidHardwareBufferInfoANDROID; + +typedef struct VkExternalFormatANDROID { + VkStructureType sType; + void* pNext; + uint64_t externalFormat; +} VkExternalFormatANDROID; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetAndroidHardwareBufferPropertiesANDROID( + VkDevice device, + const struct AHardwareBuffer* buffer, + VkAndroidHardwareBufferPropertiesANDROID* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryAndroidHardwareBufferANDROID( + VkDevice device, + const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, + struct AHardwareBuffer** pBuffer); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_core.h b/src/refresh/vkpt/include/vulkan/vulkan_core.h new file mode 100644 index 0000000..0f6d8f6 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_core.h @@ -0,0 +1,8390 @@ +#ifndef VULKAN_CORE_H_ +#define VULKAN_CORE_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_VERSION_1_0 1 +#include "vk_platform.h" + +#define VK_MAKE_VERSION(major, minor, patch) \ + (((major) << 22) | ((minor) << 12) | (patch)) + +// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. +//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0 + +// Vulkan 1.0 version number +#define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)// Patch version should always be set to 0 + +#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22) +#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff) +#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff) +// Version of this file +#define VK_HEADER_VERSION 84 + + +#define VK_NULL_HANDLE 0 + + + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; + + +#if !defined(VK_DEFINE_NON_DISPATCHABLE_HANDLE) +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; +#else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; +#endif +#endif + + + +typedef uint32_t VkFlags; +typedef uint32_t VkBool32; +typedef uint64_t VkDeviceSize; +typedef uint32_t VkSampleMask; + +VK_DEFINE_HANDLE(VkInstance) +VK_DEFINE_HANDLE(VkPhysicalDevice) +VK_DEFINE_HANDLE(VkDevice) +VK_DEFINE_HANDLE(VkQueue) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) +VK_DEFINE_HANDLE(VkCommandBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) + +#define VK_LOD_CLAMP_NONE 1000.0f +#define VK_REMAINING_MIP_LEVELS (~0U) +#define VK_REMAINING_ARRAY_LAYERS (~0U) +#define VK_WHOLE_SIZE (~0ULL) +#define VK_ATTACHMENT_UNUSED (~0U) +#define VK_TRUE 1 +#define VK_FALSE 0 +#define VK_QUEUE_FAMILY_IGNORED (~0U) +#define VK_SUBPASS_EXTERNAL (~0U) +#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256 +#define VK_UUID_SIZE 16 +#define VK_MAX_MEMORY_TYPES 32 +#define VK_MAX_MEMORY_HEAPS 16 +#define VK_MAX_EXTENSION_NAME_SIZE 256 +#define VK_MAX_DESCRIPTION_SIZE 256 + + +typedef enum VkPipelineCacheHeaderVersion { + VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, + VK_PIPELINE_CACHE_HEADER_VERSION_BEGIN_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE, + VK_PIPELINE_CACHE_HEADER_VERSION_END_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE, + VK_PIPELINE_CACHE_HEADER_VERSION_RANGE_SIZE = (VK_PIPELINE_CACHE_HEADER_VERSION_ONE - VK_PIPELINE_CACHE_HEADER_VERSION_ONE + 1), + VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheHeaderVersion; + +typedef enum VkResult { + VK_SUCCESS = 0, + VK_NOT_READY = 1, + VK_TIMEOUT = 2, + VK_EVENT_SET = 3, + VK_EVENT_RESET = 4, + VK_INCOMPLETE = 5, + VK_ERROR_OUT_OF_HOST_MEMORY = -1, + VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, + VK_ERROR_INITIALIZATION_FAILED = -3, + VK_ERROR_DEVICE_LOST = -4, + VK_ERROR_MEMORY_MAP_FAILED = -5, + VK_ERROR_LAYER_NOT_PRESENT = -6, + VK_ERROR_EXTENSION_NOT_PRESENT = -7, + VK_ERROR_FEATURE_NOT_PRESENT = -8, + VK_ERROR_INCOMPATIBLE_DRIVER = -9, + VK_ERROR_TOO_MANY_OBJECTS = -10, + VK_ERROR_FORMAT_NOT_SUPPORTED = -11, + VK_ERROR_FRAGMENTED_POOL = -12, + VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000, + VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003, + VK_ERROR_SURFACE_LOST_KHR = -1000000000, + VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, + VK_SUBOPTIMAL_KHR = 1000001003, + VK_ERROR_OUT_OF_DATE_KHR = -1000001004, + VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, + VK_ERROR_VALIDATION_FAILED_EXT = -1000011001, + VK_ERROR_INVALID_SHADER_NV = -1000012000, + VK_ERROR_FRAGMENTATION_EXT = -1000161000, + VK_ERROR_NOT_PERMITTED_EXT = -1000174001, + VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, + VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, + VK_RESULT_BEGIN_RANGE = VK_ERROR_FRAGMENTED_POOL, + VK_RESULT_END_RANGE = VK_INCOMPLETE, + VK_RESULT_RANGE_SIZE = (VK_INCOMPLETE - VK_ERROR_FRAGMENTED_POOL + 1), + VK_RESULT_MAX_ENUM = 0x7FFFFFFF +} VkResult; + +typedef enum VkStructureType { + VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, + VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, + VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, + VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, + VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, + VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, + VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, + VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, + VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, + VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, + VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, + VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, + VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = 1000120000, + VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = 1000063000, + VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, + VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, + VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009, + VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012, + VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, + VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, + VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, + VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, + VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000, + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, + VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, + VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, + VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, + VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, + VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, + VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, + VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, + VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, + VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001, + VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = 1000082000, + VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, + VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX = 1000086000, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX = 1000086001, + VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX = 1000086002, + VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX = 1000086003, + VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX = 1000086004, + VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX = 1000086005, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, + VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, + VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, + VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, + VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, + VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, + VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001, + VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR = 1000109000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR = 1000109001, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR = 1000109002, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR = 1000109003, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR = 1000109004, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR = 1000109005, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR = 1000109006, + VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, + VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, + VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, + VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, + VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, + VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, + VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, + VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001, + VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004, + VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, + VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001, + VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002, + VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003, + VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004, + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = 1000130000, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = 1000130001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = 1000138000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = 1000138001, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = 1000138002, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = 1000138003, + VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, + VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, + VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, + VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = 1000147000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, + VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, + VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = 1000161000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = 1000161001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = 1000161002, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = 1000161003, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = 1000161004, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, + VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, + VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, + VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, + VK_STRUCTURE_TYPE_HIT_SHADER_MODULE_CREATE_INFO_NV = 1000165010, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINTER_CAST_TO_POINTER_FEATURES_KHR = 1000176000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = 1000177000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000, + VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = 1000180000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = 1000196000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = 1000197000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = 1000199000, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = 1000199001, + VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = 1000211000, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO, + VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1), + VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkStructureType; + +typedef enum VkSystemAllocationScope { + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, + VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, + VK_SYSTEM_ALLOCATION_SCOPE_BEGIN_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND, + VK_SYSTEM_ALLOCATION_SCOPE_END_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE, + VK_SYSTEM_ALLOCATION_SCOPE_RANGE_SIZE = (VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE - VK_SYSTEM_ALLOCATION_SCOPE_COMMAND + 1), + VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF +} VkSystemAllocationScope; + +typedef enum VkInternalAllocationType { + VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, + VK_INTERNAL_ALLOCATION_TYPE_BEGIN_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE, + VK_INTERNAL_ALLOCATION_TYPE_END_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE, + VK_INTERNAL_ALLOCATION_TYPE_RANGE_SIZE = (VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE - VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + 1), + VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkInternalAllocationType; + +typedef enum VkFormat { + VK_FORMAT_UNDEFINED = 0, + VK_FORMAT_R4G4_UNORM_PACK8 = 1, + VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, + VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, + VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, + VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, + VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, + VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, + VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, + VK_FORMAT_R8_UNORM = 9, + VK_FORMAT_R8_SNORM = 10, + VK_FORMAT_R8_USCALED = 11, + VK_FORMAT_R8_SSCALED = 12, + VK_FORMAT_R8_UINT = 13, + VK_FORMAT_R8_SINT = 14, + VK_FORMAT_R8_SRGB = 15, + VK_FORMAT_R8G8_UNORM = 16, + VK_FORMAT_R8G8_SNORM = 17, + VK_FORMAT_R8G8_USCALED = 18, + VK_FORMAT_R8G8_SSCALED = 19, + VK_FORMAT_R8G8_UINT = 20, + VK_FORMAT_R8G8_SINT = 21, + VK_FORMAT_R8G8_SRGB = 22, + VK_FORMAT_R8G8B8_UNORM = 23, + VK_FORMAT_R8G8B8_SNORM = 24, + VK_FORMAT_R8G8B8_USCALED = 25, + VK_FORMAT_R8G8B8_SSCALED = 26, + VK_FORMAT_R8G8B8_UINT = 27, + VK_FORMAT_R8G8B8_SINT = 28, + VK_FORMAT_R8G8B8_SRGB = 29, + VK_FORMAT_B8G8R8_UNORM = 30, + VK_FORMAT_B8G8R8_SNORM = 31, + VK_FORMAT_B8G8R8_USCALED = 32, + VK_FORMAT_B8G8R8_SSCALED = 33, + VK_FORMAT_B8G8R8_UINT = 34, + VK_FORMAT_B8G8R8_SINT = 35, + VK_FORMAT_B8G8R8_SRGB = 36, + VK_FORMAT_R8G8B8A8_UNORM = 37, + VK_FORMAT_R8G8B8A8_SNORM = 38, + VK_FORMAT_R8G8B8A8_USCALED = 39, + VK_FORMAT_R8G8B8A8_SSCALED = 40, + VK_FORMAT_R8G8B8A8_UINT = 41, + VK_FORMAT_R8G8B8A8_SINT = 42, + VK_FORMAT_R8G8B8A8_SRGB = 43, + VK_FORMAT_B8G8R8A8_UNORM = 44, + VK_FORMAT_B8G8R8A8_SNORM = 45, + VK_FORMAT_B8G8R8A8_USCALED = 46, + VK_FORMAT_B8G8R8A8_SSCALED = 47, + VK_FORMAT_B8G8R8A8_UINT = 48, + VK_FORMAT_B8G8R8A8_SINT = 49, + VK_FORMAT_B8G8R8A8_SRGB = 50, + VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, + VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, + VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, + VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, + VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, + VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, + VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, + VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, + VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, + VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, + VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, + VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, + VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, + VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, + VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, + VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, + VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, + VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, + VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, + VK_FORMAT_R16_UNORM = 70, + VK_FORMAT_R16_SNORM = 71, + VK_FORMAT_R16_USCALED = 72, + VK_FORMAT_R16_SSCALED = 73, + VK_FORMAT_R16_UINT = 74, + VK_FORMAT_R16_SINT = 75, + VK_FORMAT_R16_SFLOAT = 76, + VK_FORMAT_R16G16_UNORM = 77, + VK_FORMAT_R16G16_SNORM = 78, + VK_FORMAT_R16G16_USCALED = 79, + VK_FORMAT_R16G16_SSCALED = 80, + VK_FORMAT_R16G16_UINT = 81, + VK_FORMAT_R16G16_SINT = 82, + VK_FORMAT_R16G16_SFLOAT = 83, + VK_FORMAT_R16G16B16_UNORM = 84, + VK_FORMAT_R16G16B16_SNORM = 85, + VK_FORMAT_R16G16B16_USCALED = 86, + VK_FORMAT_R16G16B16_SSCALED = 87, + VK_FORMAT_R16G16B16_UINT = 88, + VK_FORMAT_R16G16B16_SINT = 89, + VK_FORMAT_R16G16B16_SFLOAT = 90, + VK_FORMAT_R16G16B16A16_UNORM = 91, + VK_FORMAT_R16G16B16A16_SNORM = 92, + VK_FORMAT_R16G16B16A16_USCALED = 93, + VK_FORMAT_R16G16B16A16_SSCALED = 94, + VK_FORMAT_R16G16B16A16_UINT = 95, + VK_FORMAT_R16G16B16A16_SINT = 96, + VK_FORMAT_R16G16B16A16_SFLOAT = 97, + VK_FORMAT_R32_UINT = 98, + VK_FORMAT_R32_SINT = 99, + VK_FORMAT_R32_SFLOAT = 100, + VK_FORMAT_R32G32_UINT = 101, + VK_FORMAT_R32G32_SINT = 102, + VK_FORMAT_R32G32_SFLOAT = 103, + VK_FORMAT_R32G32B32_UINT = 104, + VK_FORMAT_R32G32B32_SINT = 105, + VK_FORMAT_R32G32B32_SFLOAT = 106, + VK_FORMAT_R32G32B32A32_UINT = 107, + VK_FORMAT_R32G32B32A32_SINT = 108, + VK_FORMAT_R32G32B32A32_SFLOAT = 109, + VK_FORMAT_R64_UINT = 110, + VK_FORMAT_R64_SINT = 111, + VK_FORMAT_R64_SFLOAT = 112, + VK_FORMAT_R64G64_UINT = 113, + VK_FORMAT_R64G64_SINT = 114, + VK_FORMAT_R64G64_SFLOAT = 115, + VK_FORMAT_R64G64B64_UINT = 116, + VK_FORMAT_R64G64B64_SINT = 117, + VK_FORMAT_R64G64B64_SFLOAT = 118, + VK_FORMAT_R64G64B64A64_UINT = 119, + VK_FORMAT_R64G64B64A64_SINT = 120, + VK_FORMAT_R64G64B64A64_SFLOAT = 121, + VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, + VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, + VK_FORMAT_D16_UNORM = 124, + VK_FORMAT_X8_D24_UNORM_PACK32 = 125, + VK_FORMAT_D32_SFLOAT = 126, + VK_FORMAT_S8_UINT = 127, + VK_FORMAT_D16_UNORM_S8_UINT = 128, + VK_FORMAT_D24_UNORM_S8_UINT = 129, + VK_FORMAT_D32_SFLOAT_S8_UINT = 130, + VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, + VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, + VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, + VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, + VK_FORMAT_BC2_UNORM_BLOCK = 135, + VK_FORMAT_BC2_SRGB_BLOCK = 136, + VK_FORMAT_BC3_UNORM_BLOCK = 137, + VK_FORMAT_BC3_SRGB_BLOCK = 138, + VK_FORMAT_BC4_UNORM_BLOCK = 139, + VK_FORMAT_BC4_SNORM_BLOCK = 140, + VK_FORMAT_BC5_UNORM_BLOCK = 141, + VK_FORMAT_BC5_SNORM_BLOCK = 142, + VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, + VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, + VK_FORMAT_BC7_UNORM_BLOCK = 145, + VK_FORMAT_BC7_SRGB_BLOCK = 146, + VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, + VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, + VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, + VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, + VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, + VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, + VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, + VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, + VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, + VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, + VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, + VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, + VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, + VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, + VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, + VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, + VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, + VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, + VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, + VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, + VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, + VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, + VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, + VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, + VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, + VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, + VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, + VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, + VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, + VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, + VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, + VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, + VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, + VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, + VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, + VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, + VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, + VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, + VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000, + VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006, + VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016, + VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026, + VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027, + VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033, + VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, + VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, + VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, + VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003, + VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004, + VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, + VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, + VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, + VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM, + VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM, + VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + VK_FORMAT_BEGIN_RANGE = VK_FORMAT_UNDEFINED, + VK_FORMAT_END_RANGE = VK_FORMAT_ASTC_12x12_SRGB_BLOCK, + VK_FORMAT_RANGE_SIZE = (VK_FORMAT_ASTC_12x12_SRGB_BLOCK - VK_FORMAT_UNDEFINED + 1), + VK_FORMAT_MAX_ENUM = 0x7FFFFFFF +} VkFormat; + +typedef enum VkImageType { + VK_IMAGE_TYPE_1D = 0, + VK_IMAGE_TYPE_2D = 1, + VK_IMAGE_TYPE_3D = 2, + VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_TYPE_1D, + VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_TYPE_3D, + VK_IMAGE_TYPE_RANGE_SIZE = (VK_IMAGE_TYPE_3D - VK_IMAGE_TYPE_1D + 1), + VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageType; + +typedef enum VkImageTiling { + VK_IMAGE_TILING_OPTIMAL = 0, + VK_IMAGE_TILING_LINEAR = 1, + VK_IMAGE_TILING_BEGIN_RANGE = VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_TILING_END_RANGE = VK_IMAGE_TILING_LINEAR, + VK_IMAGE_TILING_RANGE_SIZE = (VK_IMAGE_TILING_LINEAR - VK_IMAGE_TILING_OPTIMAL + 1), + VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF +} VkImageTiling; + +typedef enum VkPhysicalDeviceType { + VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, + VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, + VK_PHYSICAL_DEVICE_TYPE_CPU = 4, + VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE = VK_PHYSICAL_DEVICE_TYPE_OTHER, + VK_PHYSICAL_DEVICE_TYPE_END_RANGE = VK_PHYSICAL_DEVICE_TYPE_CPU, + VK_PHYSICAL_DEVICE_TYPE_RANGE_SIZE = (VK_PHYSICAL_DEVICE_TYPE_CPU - VK_PHYSICAL_DEVICE_TYPE_OTHER + 1), + VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkPhysicalDeviceType; + +typedef enum VkQueryType { + VK_QUERY_TYPE_OCCLUSION = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, + VK_QUERY_TYPE_TIMESTAMP = 2, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000, + VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION, + VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP, + VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1), + VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkQueryType; + +typedef enum VkSharingMode { + VK_SHARING_MODE_EXCLUSIVE = 0, + VK_SHARING_MODE_CONCURRENT = 1, + VK_SHARING_MODE_BEGIN_RANGE = VK_SHARING_MODE_EXCLUSIVE, + VK_SHARING_MODE_END_RANGE = VK_SHARING_MODE_CONCURRENT, + VK_SHARING_MODE_RANGE_SIZE = (VK_SHARING_MODE_CONCURRENT - VK_SHARING_MODE_EXCLUSIVE + 1), + VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSharingMode; + +typedef enum VkImageLayout { + VK_IMAGE_LAYOUT_UNDEFINED = 0, + VK_IMAGE_LAYOUT_GENERAL = 1, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, + VK_IMAGE_LAYOUT_PREINITIALIZED = 8, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, + VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_PREINITIALIZED, + VK_IMAGE_LAYOUT_RANGE_SIZE = (VK_IMAGE_LAYOUT_PREINITIALIZED - VK_IMAGE_LAYOUT_UNDEFINED + 1), + VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF +} VkImageLayout; + +typedef enum VkImageViewType { + VK_IMAGE_VIEW_TYPE_1D = 0, + VK_IMAGE_VIEW_TYPE_2D = 1, + VK_IMAGE_VIEW_TYPE_3D = 2, + VK_IMAGE_VIEW_TYPE_CUBE = 3, + VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, + VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, + VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, + VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_TYPE_1D, + VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, + VK_IMAGE_VIEW_TYPE_RANGE_SIZE = (VK_IMAGE_VIEW_TYPE_CUBE_ARRAY - VK_IMAGE_VIEW_TYPE_1D + 1), + VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageViewType; + +typedef enum VkComponentSwizzle { + VK_COMPONENT_SWIZZLE_IDENTITY = 0, + VK_COMPONENT_SWIZZLE_ZERO = 1, + VK_COMPONENT_SWIZZLE_ONE = 2, + VK_COMPONENT_SWIZZLE_R = 3, + VK_COMPONENT_SWIZZLE_G = 4, + VK_COMPONENT_SWIZZLE_B = 5, + VK_COMPONENT_SWIZZLE_A = 6, + VK_COMPONENT_SWIZZLE_BEGIN_RANGE = VK_COMPONENT_SWIZZLE_IDENTITY, + VK_COMPONENT_SWIZZLE_END_RANGE = VK_COMPONENT_SWIZZLE_A, + VK_COMPONENT_SWIZZLE_RANGE_SIZE = (VK_COMPONENT_SWIZZLE_A - VK_COMPONENT_SWIZZLE_IDENTITY + 1), + VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF +} VkComponentSwizzle; + +typedef enum VkVertexInputRate { + VK_VERTEX_INPUT_RATE_VERTEX = 0, + VK_VERTEX_INPUT_RATE_INSTANCE = 1, + VK_VERTEX_INPUT_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_RATE_VERTEX, + VK_VERTEX_INPUT_RATE_END_RANGE = VK_VERTEX_INPUT_RATE_INSTANCE, + VK_VERTEX_INPUT_RATE_RANGE_SIZE = (VK_VERTEX_INPUT_RATE_INSTANCE - VK_VERTEX_INPUT_RATE_VERTEX + 1), + VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF +} VkVertexInputRate; + +typedef enum VkPrimitiveTopology { + VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, + VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, + VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_PRIMITIVE_TOPOLOGY_POINT_LIST, + VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, + VK_PRIMITIVE_TOPOLOGY_RANGE_SIZE = (VK_PRIMITIVE_TOPOLOGY_PATCH_LIST - VK_PRIMITIVE_TOPOLOGY_POINT_LIST + 1), + VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF +} VkPrimitiveTopology; + +typedef enum VkPolygonMode { + VK_POLYGON_MODE_FILL = 0, + VK_POLYGON_MODE_LINE = 1, + VK_POLYGON_MODE_POINT = 2, + VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000, + VK_POLYGON_MODE_BEGIN_RANGE = VK_POLYGON_MODE_FILL, + VK_POLYGON_MODE_END_RANGE = VK_POLYGON_MODE_POINT, + VK_POLYGON_MODE_RANGE_SIZE = (VK_POLYGON_MODE_POINT - VK_POLYGON_MODE_FILL + 1), + VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF +} VkPolygonMode; + +typedef enum VkFrontFace { + VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, + VK_FRONT_FACE_CLOCKWISE = 1, + VK_FRONT_FACE_BEGIN_RANGE = VK_FRONT_FACE_COUNTER_CLOCKWISE, + VK_FRONT_FACE_END_RANGE = VK_FRONT_FACE_CLOCKWISE, + VK_FRONT_FACE_RANGE_SIZE = (VK_FRONT_FACE_CLOCKWISE - VK_FRONT_FACE_COUNTER_CLOCKWISE + 1), + VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF +} VkFrontFace; + +typedef enum VkCompareOp { + VK_COMPARE_OP_NEVER = 0, + VK_COMPARE_OP_LESS = 1, + VK_COMPARE_OP_EQUAL = 2, + VK_COMPARE_OP_LESS_OR_EQUAL = 3, + VK_COMPARE_OP_GREATER = 4, + VK_COMPARE_OP_NOT_EQUAL = 5, + VK_COMPARE_OP_GREATER_OR_EQUAL = 6, + VK_COMPARE_OP_ALWAYS = 7, + VK_COMPARE_OP_BEGIN_RANGE = VK_COMPARE_OP_NEVER, + VK_COMPARE_OP_END_RANGE = VK_COMPARE_OP_ALWAYS, + VK_COMPARE_OP_RANGE_SIZE = (VK_COMPARE_OP_ALWAYS - VK_COMPARE_OP_NEVER + 1), + VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF +} VkCompareOp; + +typedef enum VkStencilOp { + VK_STENCIL_OP_KEEP = 0, + VK_STENCIL_OP_ZERO = 1, + VK_STENCIL_OP_REPLACE = 2, + VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, + VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, + VK_STENCIL_OP_INVERT = 5, + VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, + VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, + VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP, + VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DECREMENT_AND_WRAP, + VK_STENCIL_OP_RANGE_SIZE = (VK_STENCIL_OP_DECREMENT_AND_WRAP - VK_STENCIL_OP_KEEP + 1), + VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF +} VkStencilOp; + +typedef enum VkLogicOp { + VK_LOGIC_OP_CLEAR = 0, + VK_LOGIC_OP_AND = 1, + VK_LOGIC_OP_AND_REVERSE = 2, + VK_LOGIC_OP_COPY = 3, + VK_LOGIC_OP_AND_INVERTED = 4, + VK_LOGIC_OP_NO_OP = 5, + VK_LOGIC_OP_XOR = 6, + VK_LOGIC_OP_OR = 7, + VK_LOGIC_OP_NOR = 8, + VK_LOGIC_OP_EQUIVALENT = 9, + VK_LOGIC_OP_INVERT = 10, + VK_LOGIC_OP_OR_REVERSE = 11, + VK_LOGIC_OP_COPY_INVERTED = 12, + VK_LOGIC_OP_OR_INVERTED = 13, + VK_LOGIC_OP_NAND = 14, + VK_LOGIC_OP_SET = 15, + VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_CLEAR, + VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET, + VK_LOGIC_OP_RANGE_SIZE = (VK_LOGIC_OP_SET - VK_LOGIC_OP_CLEAR + 1), + VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF +} VkLogicOp; + +typedef enum VkBlendFactor { + VK_BLEND_FACTOR_ZERO = 0, + VK_BLEND_FACTOR_ONE = 1, + VK_BLEND_FACTOR_SRC_COLOR = 2, + VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, + VK_BLEND_FACTOR_DST_COLOR = 4, + VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, + VK_BLEND_FACTOR_SRC_ALPHA = 6, + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, + VK_BLEND_FACTOR_DST_ALPHA = 8, + VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, + VK_BLEND_FACTOR_CONSTANT_COLOR = 10, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, + VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, + VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, + VK_BLEND_FACTOR_SRC1_COLOR = 15, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, + VK_BLEND_FACTOR_SRC1_ALPHA = 17, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, + VK_BLEND_FACTOR_BEGIN_RANGE = VK_BLEND_FACTOR_ZERO, + VK_BLEND_FACTOR_END_RANGE = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA, + VK_BLEND_FACTOR_RANGE_SIZE = (VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA - VK_BLEND_FACTOR_ZERO + 1), + VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF +} VkBlendFactor; + +typedef enum VkBlendOp { + VK_BLEND_OP_ADD = 0, + VK_BLEND_OP_SUBTRACT = 1, + VK_BLEND_OP_REVERSE_SUBTRACT = 2, + VK_BLEND_OP_MIN = 3, + VK_BLEND_OP_MAX = 4, + VK_BLEND_OP_ZERO_EXT = 1000148000, + VK_BLEND_OP_SRC_EXT = 1000148001, + VK_BLEND_OP_DST_EXT = 1000148002, + VK_BLEND_OP_SRC_OVER_EXT = 1000148003, + VK_BLEND_OP_DST_OVER_EXT = 1000148004, + VK_BLEND_OP_SRC_IN_EXT = 1000148005, + VK_BLEND_OP_DST_IN_EXT = 1000148006, + VK_BLEND_OP_SRC_OUT_EXT = 1000148007, + VK_BLEND_OP_DST_OUT_EXT = 1000148008, + VK_BLEND_OP_SRC_ATOP_EXT = 1000148009, + VK_BLEND_OP_DST_ATOP_EXT = 1000148010, + VK_BLEND_OP_XOR_EXT = 1000148011, + VK_BLEND_OP_MULTIPLY_EXT = 1000148012, + VK_BLEND_OP_SCREEN_EXT = 1000148013, + VK_BLEND_OP_OVERLAY_EXT = 1000148014, + VK_BLEND_OP_DARKEN_EXT = 1000148015, + VK_BLEND_OP_LIGHTEN_EXT = 1000148016, + VK_BLEND_OP_COLORDODGE_EXT = 1000148017, + VK_BLEND_OP_COLORBURN_EXT = 1000148018, + VK_BLEND_OP_HARDLIGHT_EXT = 1000148019, + VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020, + VK_BLEND_OP_DIFFERENCE_EXT = 1000148021, + VK_BLEND_OP_EXCLUSION_EXT = 1000148022, + VK_BLEND_OP_INVERT_EXT = 1000148023, + VK_BLEND_OP_INVERT_RGB_EXT = 1000148024, + VK_BLEND_OP_LINEARDODGE_EXT = 1000148025, + VK_BLEND_OP_LINEARBURN_EXT = 1000148026, + VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027, + VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028, + VK_BLEND_OP_PINLIGHT_EXT = 1000148029, + VK_BLEND_OP_HARDMIX_EXT = 1000148030, + VK_BLEND_OP_HSL_HUE_EXT = 1000148031, + VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032, + VK_BLEND_OP_HSL_COLOR_EXT = 1000148033, + VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034, + VK_BLEND_OP_PLUS_EXT = 1000148035, + VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036, + VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037, + VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038, + VK_BLEND_OP_MINUS_EXT = 1000148039, + VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040, + VK_BLEND_OP_CONTRAST_EXT = 1000148041, + VK_BLEND_OP_INVERT_OVG_EXT = 1000148042, + VK_BLEND_OP_RED_EXT = 1000148043, + VK_BLEND_OP_GREEN_EXT = 1000148044, + VK_BLEND_OP_BLUE_EXT = 1000148045, + VK_BLEND_OP_BEGIN_RANGE = VK_BLEND_OP_ADD, + VK_BLEND_OP_END_RANGE = VK_BLEND_OP_MAX, + VK_BLEND_OP_RANGE_SIZE = (VK_BLEND_OP_MAX - VK_BLEND_OP_ADD + 1), + VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF +} VkBlendOp; + +typedef enum VkDynamicState { + VK_DYNAMIC_STATE_VIEWPORT = 0, + VK_DYNAMIC_STATE_SCISSOR = 1, + VK_DYNAMIC_STATE_LINE_WIDTH = 2, + VK_DYNAMIC_STATE_DEPTH_BIAS = 3, + VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, + VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, + VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, + VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, + VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, + VK_DYNAMIC_STATE_BEGIN_RANGE = VK_DYNAMIC_STATE_VIEWPORT, + VK_DYNAMIC_STATE_END_RANGE = VK_DYNAMIC_STATE_STENCIL_REFERENCE, + VK_DYNAMIC_STATE_RANGE_SIZE = (VK_DYNAMIC_STATE_STENCIL_REFERENCE - VK_DYNAMIC_STATE_VIEWPORT + 1), + VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF +} VkDynamicState; + +typedef enum VkFilter { + VK_FILTER_NEAREST = 0, + VK_FILTER_LINEAR = 1, + VK_FILTER_CUBIC_IMG = 1000015000, + VK_FILTER_BEGIN_RANGE = VK_FILTER_NEAREST, + VK_FILTER_END_RANGE = VK_FILTER_LINEAR, + VK_FILTER_RANGE_SIZE = (VK_FILTER_LINEAR - VK_FILTER_NEAREST + 1), + VK_FILTER_MAX_ENUM = 0x7FFFFFFF +} VkFilter; + +typedef enum VkSamplerMipmapMode { + VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, + VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, + VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE = VK_SAMPLER_MIPMAP_MODE_NEAREST, + VK_SAMPLER_MIPMAP_MODE_END_RANGE = VK_SAMPLER_MIPMAP_MODE_LINEAR, + VK_SAMPLER_MIPMAP_MODE_RANGE_SIZE = (VK_SAMPLER_MIPMAP_MODE_LINEAR - VK_SAMPLER_MIPMAP_MODE_NEAREST + 1), + VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerMipmapMode; + +typedef enum VkSamplerAddressMode { + VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, + VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, + VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE = VK_SAMPLER_ADDRESS_MODE_REPEAT, + VK_SAMPLER_ADDRESS_MODE_END_RANGE = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, + VK_SAMPLER_ADDRESS_MODE_RANGE_SIZE = (VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER - VK_SAMPLER_ADDRESS_MODE_REPEAT + 1), + VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerAddressMode; + +typedef enum VkBorderColor { + VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, + VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, + VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, + VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, + VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, + VK_BORDER_COLOR_BEGIN_RANGE = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, + VK_BORDER_COLOR_END_RANGE = VK_BORDER_COLOR_INT_OPAQUE_WHITE, + VK_BORDER_COLOR_RANGE_SIZE = (VK_BORDER_COLOR_INT_OPAQUE_WHITE - VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK + 1), + VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF +} VkBorderColor; + +typedef enum VkDescriptorType { + VK_DESCRIPTOR_TYPE_SAMPLER = 0, + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, + VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, + VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, + VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, + VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, + VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER, + VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, + VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1), + VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorType; + +typedef enum VkAttachmentLoadOp { + VK_ATTACHMENT_LOAD_OP_LOAD = 0, + VK_ATTACHMENT_LOAD_OP_CLEAR = 1, + VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, + VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD, + VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_LOAD_OP_RANGE_SIZE = (VK_ATTACHMENT_LOAD_OP_DONT_CARE - VK_ATTACHMENT_LOAD_OP_LOAD + 1), + VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentLoadOp; + +typedef enum VkAttachmentStoreOp { + VK_ATTACHMENT_STORE_OP_STORE = 0, + VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, + VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE, + VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_RANGE_SIZE = (VK_ATTACHMENT_STORE_OP_DONT_CARE - VK_ATTACHMENT_STORE_OP_STORE + 1), + VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentStoreOp; + +typedef enum VkPipelineBindPoint { + VK_PIPELINE_BIND_POINT_GRAPHICS = 0, + VK_PIPELINE_BIND_POINT_COMPUTE = 1, + VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = 1000165000, + VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS, + VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE, + VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1), + VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF +} VkPipelineBindPoint; + +typedef enum VkCommandBufferLevel { + VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, + VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, + VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + VK_COMMAND_BUFFER_LEVEL_END_RANGE = VK_COMMAND_BUFFER_LEVEL_SECONDARY, + VK_COMMAND_BUFFER_LEVEL_RANGE_SIZE = (VK_COMMAND_BUFFER_LEVEL_SECONDARY - VK_COMMAND_BUFFER_LEVEL_PRIMARY + 1), + VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferLevel; + +typedef enum VkIndexType { + VK_INDEX_TYPE_UINT16 = 0, + VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_NONE_NV = 1000165000, + VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16, + VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32, + VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1), + VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkIndexType; + +typedef enum VkSubpassContents { + VK_SUBPASS_CONTENTS_INLINE = 0, + VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, + VK_SUBPASS_CONTENTS_BEGIN_RANGE = VK_SUBPASS_CONTENTS_INLINE, + VK_SUBPASS_CONTENTS_END_RANGE = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS, + VK_SUBPASS_CONTENTS_RANGE_SIZE = (VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS - VK_SUBPASS_CONTENTS_INLINE + 1), + VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassContents; + +typedef enum VkObjectType { + VK_OBJECT_TYPE_UNKNOWN = 0, + VK_OBJECT_TYPE_INSTANCE = 1, + VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, + VK_OBJECT_TYPE_DEVICE = 3, + VK_OBJECT_TYPE_QUEUE = 4, + VK_OBJECT_TYPE_SEMAPHORE = 5, + VK_OBJECT_TYPE_COMMAND_BUFFER = 6, + VK_OBJECT_TYPE_FENCE = 7, + VK_OBJECT_TYPE_DEVICE_MEMORY = 8, + VK_OBJECT_TYPE_BUFFER = 9, + VK_OBJECT_TYPE_IMAGE = 10, + VK_OBJECT_TYPE_EVENT = 11, + VK_OBJECT_TYPE_QUERY_POOL = 12, + VK_OBJECT_TYPE_BUFFER_VIEW = 13, + VK_OBJECT_TYPE_IMAGE_VIEW = 14, + VK_OBJECT_TYPE_SHADER_MODULE = 15, + VK_OBJECT_TYPE_PIPELINE_CACHE = 16, + VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, + VK_OBJECT_TYPE_RENDER_PASS = 18, + VK_OBJECT_TYPE_PIPELINE = 19, + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, + VK_OBJECT_TYPE_SAMPLER = 21, + VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, + VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, + VK_OBJECT_TYPE_FRAMEBUFFER = 24, + VK_OBJECT_TYPE_COMMAND_POOL = 25, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000, + VK_OBJECT_TYPE_SURFACE_KHR = 1000000000, + VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000, + VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000, + VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, + VK_OBJECT_TYPE_OBJECT_TABLE_NVX = 1000086000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX = 1000086001, + VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + VK_OBJECT_TYPE_BEGIN_RANGE = VK_OBJECT_TYPE_UNKNOWN, + VK_OBJECT_TYPE_END_RANGE = VK_OBJECT_TYPE_COMMAND_POOL, + VK_OBJECT_TYPE_RANGE_SIZE = (VK_OBJECT_TYPE_COMMAND_POOL - VK_OBJECT_TYPE_UNKNOWN + 1), + VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkObjectType; + +typedef enum VkVendorId { + VK_VENDOR_ID_VIV = 0x10001, + VK_VENDOR_ID_VSI = 0x10002, + VK_VENDOR_ID_KAZAN = 0x10003, + VK_VENDOR_ID_BEGIN_RANGE = VK_VENDOR_ID_VIV, + VK_VENDOR_ID_END_RANGE = VK_VENDOR_ID_KAZAN, + VK_VENDOR_ID_RANGE_SIZE = (VK_VENDOR_ID_KAZAN - VK_VENDOR_ID_VIV + 1), + VK_VENDOR_ID_MAX_ENUM = 0x7FFFFFFF +} VkVendorId; + +typedef VkFlags VkInstanceCreateFlags; + +typedef enum VkFormatFeatureFlagBits { + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001, + VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002, + VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, + VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, + VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, + VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400, + VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000, + VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = 0x00010000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFormatFeatureFlagBits; +typedef VkFlags VkFormatFeatureFlags; + +typedef enum VkImageUsageFlagBits { + VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, + VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, + VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, + VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageUsageFlagBits; +typedef VkFlags VkImageUsageFlags; + +typedef enum VkImageCreateFlagBits { + VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008, + VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010, + VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100, + VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800, + VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200, + VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT, + VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT, + VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageCreateFlagBits; +typedef VkFlags VkImageCreateFlags; + +typedef enum VkSampleCountFlagBits { + VK_SAMPLE_COUNT_1_BIT = 0x00000001, + VK_SAMPLE_COUNT_2_BIT = 0x00000002, + VK_SAMPLE_COUNT_4_BIT = 0x00000004, + VK_SAMPLE_COUNT_8_BIT = 0x00000008, + VK_SAMPLE_COUNT_16_BIT = 0x00000010, + VK_SAMPLE_COUNT_32_BIT = 0x00000020, + VK_SAMPLE_COUNT_64_BIT = 0x00000040, + VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSampleCountFlagBits; +typedef VkFlags VkSampleCountFlags; + +typedef enum VkQueueFlagBits { + VK_QUEUE_GRAPHICS_BIT = 0x00000001, + VK_QUEUE_COMPUTE_BIT = 0x00000002, + VK_QUEUE_TRANSFER_BIT = 0x00000004, + VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008, + VK_QUEUE_PROTECTED_BIT = 0x00000010, + VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueueFlagBits; +typedef VkFlags VkQueueFlags; + +typedef enum VkMemoryPropertyFlagBits { + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004, + VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008, + VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010, + VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020, + VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryPropertyFlagBits; +typedef VkFlags VkMemoryPropertyFlags; + +typedef enum VkMemoryHeapFlagBits { + VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, + VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryHeapFlagBits; +typedef VkFlags VkMemoryHeapFlags; +typedef VkFlags VkDeviceCreateFlags; + +typedef enum VkDeviceQueueCreateFlagBits { + VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001, + VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDeviceQueueCreateFlagBits; +typedef VkFlags VkDeviceQueueCreateFlags; + +typedef enum VkPipelineStageFlagBits { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001, + VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008, + VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010, + VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020, + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100, + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800, + VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000, + VK_PIPELINE_STAGE_HOST_BIT = 0x00004000, + VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, + VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000, + VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX = 0x00020000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = 0x00200000, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000, + VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000, + VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000, + VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineStageFlagBits; +typedef VkFlags VkPipelineStageFlags; +typedef VkFlags VkMemoryMapFlags; + +typedef enum VkImageAspectFlagBits { + VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001, + VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002, + VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, + VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008, + VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, + VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, + VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040, + VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT, + VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT, + VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT, + VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageAspectFlagBits; +typedef VkFlags VkImageAspectFlags; + +typedef enum VkSparseImageFormatFlagBits { + VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001, + VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002, + VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004, + VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseImageFormatFlagBits; +typedef VkFlags VkSparseImageFormatFlags; + +typedef enum VkSparseMemoryBindFlagBits { + VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001, + VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseMemoryBindFlagBits; +typedef VkFlags VkSparseMemoryBindFlags; + +typedef enum VkFenceCreateFlagBits { + VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001, + VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceCreateFlagBits; +typedef VkFlags VkFenceCreateFlags; +typedef VkFlags VkSemaphoreCreateFlags; +typedef VkFlags VkEventCreateFlags; +typedef VkFlags VkQueryPoolCreateFlags; + +typedef enum VkQueryPipelineStatisticFlagBits { + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001, + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002, + VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040, + VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200, + VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400, + VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryPipelineStatisticFlagBits; +typedef VkFlags VkQueryPipelineStatisticFlags; + +typedef enum VkQueryResultFlagBits { + VK_QUERY_RESULT_64_BIT = 0x00000001, + VK_QUERY_RESULT_WAIT_BIT = 0x00000002, + VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, + VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008, + VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryResultFlagBits; +typedef VkFlags VkQueryResultFlags; + +typedef enum VkBufferCreateFlagBits { + VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008, + VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferCreateFlagBits; +typedef VkFlags VkBufferCreateFlags; + +typedef enum VkBufferUsageFlagBits { + VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, + VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, + VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, + VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, + VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, + VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = 0x00000400, + VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferUsageFlagBits; +typedef VkFlags VkBufferUsageFlags; +typedef VkFlags VkBufferViewCreateFlags; +typedef VkFlags VkImageViewCreateFlags; +typedef VkFlags VkShaderModuleCreateFlags; +typedef VkFlags VkPipelineCacheCreateFlags; + +typedef enum VkPipelineCreateFlagBits { + VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001, + VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002, + VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008, + VK_PIPELINE_CREATE_DISPATCH_BASE = 0x00000010, + VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, + VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCreateFlagBits; +typedef VkFlags VkPipelineCreateFlags; +typedef VkFlags VkPipelineShaderStageCreateFlags; + +typedef enum VkShaderStageFlagBits { + VK_SHADER_STAGE_VERTEX_BIT = 0x00000001, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, + VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, + VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, + VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, + VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, + VK_SHADER_STAGE_ALL = 0x7FFFFFFF, + VK_SHADER_STAGE_RAYGEN_BIT_NV = 0x00000100, + VK_SHADER_STAGE_ANY_HIT_BIT_NV = 0x00000200, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = 0x00000400, + VK_SHADER_STAGE_MISS_BIT_NV = 0x00000800, + VK_SHADER_STAGE_INTERSECTION_BIT_NV = 0x00001000, + VK_SHADER_STAGE_CALLABLE_BIT_NV = 0x00002000, + VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040, + VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080, + VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderStageFlagBits; +typedef VkFlags VkPipelineVertexInputStateCreateFlags; +typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; +typedef VkFlags VkPipelineTessellationStateCreateFlags; +typedef VkFlags VkPipelineViewportStateCreateFlags; +typedef VkFlags VkPipelineRasterizationStateCreateFlags; + +typedef enum VkCullModeFlagBits { + VK_CULL_MODE_NONE = 0, + VK_CULL_MODE_FRONT_BIT = 0x00000001, + VK_CULL_MODE_BACK_BIT = 0x00000002, + VK_CULL_MODE_FRONT_AND_BACK = 0x00000003, + VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCullModeFlagBits; +typedef VkFlags VkCullModeFlags; +typedef VkFlags VkPipelineMultisampleStateCreateFlags; +typedef VkFlags VkPipelineDepthStencilStateCreateFlags; +typedef VkFlags VkPipelineColorBlendStateCreateFlags; + +typedef enum VkColorComponentFlagBits { + VK_COLOR_COMPONENT_R_BIT = 0x00000001, + VK_COLOR_COMPONENT_G_BIT = 0x00000002, + VK_COLOR_COMPONENT_B_BIT = 0x00000004, + VK_COLOR_COMPONENT_A_BIT = 0x00000008, + VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkColorComponentFlagBits; +typedef VkFlags VkColorComponentFlags; +typedef VkFlags VkPipelineDynamicStateCreateFlags; +typedef VkFlags VkPipelineLayoutCreateFlags; +typedef VkFlags VkShaderStageFlags; +typedef VkFlags VkSamplerCreateFlags; + +typedef enum VkDescriptorSetLayoutCreateFlagBits { + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = 0x00000002, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorSetLayoutCreateFlagBits; +typedef VkFlags VkDescriptorSetLayoutCreateFlags; + +typedef enum VkDescriptorPoolCreateFlagBits { + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = 0x00000002, + VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorPoolCreateFlagBits; +typedef VkFlags VkDescriptorPoolCreateFlags; +typedef VkFlags VkDescriptorPoolResetFlags; +typedef VkFlags VkFramebufferCreateFlags; +typedef VkFlags VkRenderPassCreateFlags; + +typedef enum VkAttachmentDescriptionFlagBits { + VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001, + VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentDescriptionFlagBits; +typedef VkFlags VkAttachmentDescriptionFlags; + +typedef enum VkSubpassDescriptionFlagBits { + VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001, + VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002, + VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassDescriptionFlagBits; +typedef VkFlags VkSubpassDescriptionFlags; + +typedef enum VkAccessFlagBits { + VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001, + VK_ACCESS_INDEX_READ_BIT = 0x00000002, + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004, + VK_ACCESS_UNIFORM_READ_BIT = 0x00000008, + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010, + VK_ACCESS_SHADER_READ_BIT = 0x00000020, + VK_ACCESS_SHADER_WRITE_BIT = 0x00000040, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400, + VK_ACCESS_TRANSFER_READ_BIT = 0x00000800, + VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000, + VK_ACCESS_HOST_READ_BIT = 0x00002000, + VK_ACCESS_HOST_WRITE_BIT = 0x00004000, + VK_ACCESS_MEMORY_READ_BIT = 0x00008000, + VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000, + VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000, + VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 0x00020000, + VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x00040000, + VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000, + VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAccessFlagBits; +typedef VkFlags VkAccessFlags; + +typedef enum VkDependencyFlagBits { + VK_DEPENDENCY_BY_REGION_BIT = 0x00000001, + VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004, + VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002, + VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT, + VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT, + VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDependencyFlagBits; +typedef VkFlags VkDependencyFlags; + +typedef enum VkCommandPoolCreateFlagBits { + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, + VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, + VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004, + VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolCreateFlagBits; +typedef VkFlags VkCommandPoolCreateFlags; + +typedef enum VkCommandPoolResetFlagBits { + VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolResetFlagBits; +typedef VkFlags VkCommandPoolResetFlags; + +typedef enum VkCommandBufferUsageFlagBits { + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, + VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, + VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferUsageFlagBits; +typedef VkFlags VkCommandBufferUsageFlags; + +typedef enum VkQueryControlFlagBits { + VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001, + VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryControlFlagBits; +typedef VkFlags VkQueryControlFlags; + +typedef enum VkCommandBufferResetFlagBits { + VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferResetFlagBits; +typedef VkFlags VkCommandBufferResetFlags; + +typedef enum VkStencilFaceFlagBits { + VK_STENCIL_FACE_FRONT_BIT = 0x00000001, + VK_STENCIL_FACE_BACK_BIT = 0x00000002, + VK_STENCIL_FRONT_AND_BACK = 0x00000003, + VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkStencilFaceFlagBits; +typedef VkFlags VkStencilFaceFlags; + +typedef struct VkApplicationInfo { + VkStructureType sType; + const void* pNext; + const char* pApplicationName; + uint32_t applicationVersion; + const char* pEngineName; + uint32_t engineVersion; + uint32_t apiVersion; +} VkApplicationInfo; + +typedef struct VkInstanceCreateInfo { + VkStructureType sType; + const void* pNext; + VkInstanceCreateFlags flags; + const VkApplicationInfo* pApplicationInfo; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; +} VkInstanceCreateInfo; + +typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( + void* pUserData, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( + void* pUserData, + void* pOriginal, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkFreeFunction)( + void* pUserData, + void* pMemory); + +typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef struct VkAllocationCallbacks { + void* pUserData; + PFN_vkAllocationFunction pfnAllocation; + PFN_vkReallocationFunction pfnReallocation; + PFN_vkFreeFunction pfnFree; + PFN_vkInternalAllocationNotification pfnInternalAllocation; + PFN_vkInternalFreeNotification pfnInternalFree; +} VkAllocationCallbacks; + +typedef struct VkPhysicalDeviceFeatures { + VkBool32 robustBufferAccess; + VkBool32 fullDrawIndexUint32; + VkBool32 imageCubeArray; + VkBool32 independentBlend; + VkBool32 geometryShader; + VkBool32 tessellationShader; + VkBool32 sampleRateShading; + VkBool32 dualSrcBlend; + VkBool32 logicOp; + VkBool32 multiDrawIndirect; + VkBool32 drawIndirectFirstInstance; + VkBool32 depthClamp; + VkBool32 depthBiasClamp; + VkBool32 fillModeNonSolid; + VkBool32 depthBounds; + VkBool32 wideLines; + VkBool32 largePoints; + VkBool32 alphaToOne; + VkBool32 multiViewport; + VkBool32 samplerAnisotropy; + VkBool32 textureCompressionETC2; + VkBool32 textureCompressionASTC_LDR; + VkBool32 textureCompressionBC; + VkBool32 occlusionQueryPrecise; + VkBool32 pipelineStatisticsQuery; + VkBool32 vertexPipelineStoresAndAtomics; + VkBool32 fragmentStoresAndAtomics; + VkBool32 shaderTessellationAndGeometryPointSize; + VkBool32 shaderImageGatherExtended; + VkBool32 shaderStorageImageExtendedFormats; + VkBool32 shaderStorageImageMultisample; + VkBool32 shaderStorageImageReadWithoutFormat; + VkBool32 shaderStorageImageWriteWithoutFormat; + VkBool32 shaderUniformBufferArrayDynamicIndexing; + VkBool32 shaderSampledImageArrayDynamicIndexing; + VkBool32 shaderStorageBufferArrayDynamicIndexing; + VkBool32 shaderStorageImageArrayDynamicIndexing; + VkBool32 shaderClipDistance; + VkBool32 shaderCullDistance; + VkBool32 shaderFloat64; + VkBool32 shaderInt64; + VkBool32 shaderInt16; + VkBool32 shaderResourceResidency; + VkBool32 shaderResourceMinLod; + VkBool32 sparseBinding; + VkBool32 sparseResidencyBuffer; + VkBool32 sparseResidencyImage2D; + VkBool32 sparseResidencyImage3D; + VkBool32 sparseResidency2Samples; + VkBool32 sparseResidency4Samples; + VkBool32 sparseResidency8Samples; + VkBool32 sparseResidency16Samples; + VkBool32 sparseResidencyAliased; + VkBool32 variableMultisampleRate; + VkBool32 inheritedQueries; +} VkPhysicalDeviceFeatures; + +typedef struct VkFormatProperties { + VkFormatFeatureFlags linearTilingFeatures; + VkFormatFeatureFlags optimalTilingFeatures; + VkFormatFeatureFlags bufferFeatures; +} VkFormatProperties; + +typedef struct VkExtent3D { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkExtent3D; + +typedef struct VkImageFormatProperties { + VkExtent3D maxExtent; + uint32_t maxMipLevels; + uint32_t maxArrayLayers; + VkSampleCountFlags sampleCounts; + VkDeviceSize maxResourceSize; +} VkImageFormatProperties; + +typedef struct VkPhysicalDeviceLimits { + uint32_t maxImageDimension1D; + uint32_t maxImageDimension2D; + uint32_t maxImageDimension3D; + uint32_t maxImageDimensionCube; + uint32_t maxImageArrayLayers; + uint32_t maxTexelBufferElements; + uint32_t maxUniformBufferRange; + uint32_t maxStorageBufferRange; + uint32_t maxPushConstantsSize; + uint32_t maxMemoryAllocationCount; + uint32_t maxSamplerAllocationCount; + VkDeviceSize bufferImageGranularity; + VkDeviceSize sparseAddressSpaceSize; + uint32_t maxBoundDescriptorSets; + uint32_t maxPerStageDescriptorSamplers; + uint32_t maxPerStageDescriptorUniformBuffers; + uint32_t maxPerStageDescriptorStorageBuffers; + uint32_t maxPerStageDescriptorSampledImages; + uint32_t maxPerStageDescriptorStorageImages; + uint32_t maxPerStageDescriptorInputAttachments; + uint32_t maxPerStageResources; + uint32_t maxDescriptorSetSamplers; + uint32_t maxDescriptorSetUniformBuffers; + uint32_t maxDescriptorSetUniformBuffersDynamic; + uint32_t maxDescriptorSetStorageBuffers; + uint32_t maxDescriptorSetStorageBuffersDynamic; + uint32_t maxDescriptorSetSampledImages; + uint32_t maxDescriptorSetStorageImages; + uint32_t maxDescriptorSetInputAttachments; + uint32_t maxVertexInputAttributes; + uint32_t maxVertexInputBindings; + uint32_t maxVertexInputAttributeOffset; + uint32_t maxVertexInputBindingStride; + uint32_t maxVertexOutputComponents; + uint32_t maxTessellationGenerationLevel; + uint32_t maxTessellationPatchSize; + uint32_t maxTessellationControlPerVertexInputComponents; + uint32_t maxTessellationControlPerVertexOutputComponents; + uint32_t maxTessellationControlPerPatchOutputComponents; + uint32_t maxTessellationControlTotalOutputComponents; + uint32_t maxTessellationEvaluationInputComponents; + uint32_t maxTessellationEvaluationOutputComponents; + uint32_t maxGeometryShaderInvocations; + uint32_t maxGeometryInputComponents; + uint32_t maxGeometryOutputComponents; + uint32_t maxGeometryOutputVertices; + uint32_t maxGeometryTotalOutputComponents; + uint32_t maxFragmentInputComponents; + uint32_t maxFragmentOutputAttachments; + uint32_t maxFragmentDualSrcAttachments; + uint32_t maxFragmentCombinedOutputResources; + uint32_t maxComputeSharedMemorySize; + uint32_t maxComputeWorkGroupCount[3]; + uint32_t maxComputeWorkGroupInvocations; + uint32_t maxComputeWorkGroupSize[3]; + uint32_t subPixelPrecisionBits; + uint32_t subTexelPrecisionBits; + uint32_t mipmapPrecisionBits; + uint32_t maxDrawIndexedIndexValue; + uint32_t maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32_t maxViewports; + uint32_t maxViewportDimensions[2]; + float viewportBoundsRange[2]; + uint32_t viewportSubPixelBits; + size_t minMemoryMapAlignment; + VkDeviceSize minTexelBufferOffsetAlignment; + VkDeviceSize minUniformBufferOffsetAlignment; + VkDeviceSize minStorageBufferOffsetAlignment; + int32_t minTexelOffset; + uint32_t maxTexelOffset; + int32_t minTexelGatherOffset; + uint32_t maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32_t subPixelInterpolationOffsetBits; + uint32_t maxFramebufferWidth; + uint32_t maxFramebufferHeight; + uint32_t maxFramebufferLayers; + VkSampleCountFlags framebufferColorSampleCounts; + VkSampleCountFlags framebufferDepthSampleCounts; + VkSampleCountFlags framebufferStencilSampleCounts; + VkSampleCountFlags framebufferNoAttachmentsSampleCounts; + uint32_t maxColorAttachments; + VkSampleCountFlags sampledImageColorSampleCounts; + VkSampleCountFlags sampledImageIntegerSampleCounts; + VkSampleCountFlags sampledImageDepthSampleCounts; + VkSampleCountFlags sampledImageStencilSampleCounts; + VkSampleCountFlags storageImageSampleCounts; + uint32_t maxSampleMaskWords; + VkBool32 timestampComputeAndGraphics; + float timestampPeriod; + uint32_t maxClipDistances; + uint32_t maxCullDistances; + uint32_t maxCombinedClipAndCullDistances; + uint32_t discreteQueuePriorities; + float pointSizeRange[2]; + float lineWidthRange[2]; + float pointSizeGranularity; + float lineWidthGranularity; + VkBool32 strictLines; + VkBool32 standardSampleLocations; + VkDeviceSize optimalBufferCopyOffsetAlignment; + VkDeviceSize optimalBufferCopyRowPitchAlignment; + VkDeviceSize nonCoherentAtomSize; +} VkPhysicalDeviceLimits; + +typedef struct VkPhysicalDeviceSparseProperties { + VkBool32 residencyStandard2DBlockShape; + VkBool32 residencyStandard2DMultisampleBlockShape; + VkBool32 residencyStandard3DBlockShape; + VkBool32 residencyAlignedMipSize; + VkBool32 residencyNonResidentStrict; +} VkPhysicalDeviceSparseProperties; + +typedef struct VkPhysicalDeviceProperties { + uint32_t apiVersion; + uint32_t driverVersion; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceType deviceType; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + VkPhysicalDeviceLimits limits; + VkPhysicalDeviceSparseProperties sparseProperties; +} VkPhysicalDeviceProperties; + +typedef struct VkQueueFamilyProperties { + VkQueueFlags queueFlags; + uint32_t queueCount; + uint32_t timestampValidBits; + VkExtent3D minImageTransferGranularity; +} VkQueueFamilyProperties; + +typedef struct VkMemoryType { + VkMemoryPropertyFlags propertyFlags; + uint32_t heapIndex; +} VkMemoryType; + +typedef struct VkMemoryHeap { + VkDeviceSize size; + VkMemoryHeapFlags flags; +} VkMemoryHeap; + +typedef struct VkPhysicalDeviceMemoryProperties { + uint32_t memoryTypeCount; + VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; + uint32_t memoryHeapCount; + VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryProperties; + +typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); +typedef struct VkDeviceQueueCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; +} VkDeviceQueueCreateInfo; + +typedef struct VkDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceCreateFlags flags; + uint32_t queueCreateInfoCount; + const VkDeviceQueueCreateInfo* pQueueCreateInfos; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const VkPhysicalDeviceFeatures* pEnabledFeatures; +} VkDeviceCreateInfo; + +typedef struct VkExtensionProperties { + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; +} VkExtensionProperties; + +typedef struct VkLayerProperties { + char layerName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + uint32_t implementationVersion; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkLayerProperties; + +typedef struct VkSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + const VkPipelineStageFlags* pWaitDstStageMask; + uint32_t commandBufferCount; + const VkCommandBuffer* pCommandBuffers; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkSubmitInfo; + +typedef struct VkMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeIndex; +} VkMemoryAllocateInfo; + +typedef struct VkMappedMemoryRange { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMappedMemoryRange; + +typedef struct VkMemoryRequirements { + VkDeviceSize size; + VkDeviceSize alignment; + uint32_t memoryTypeBits; +} VkMemoryRequirements; + +typedef struct VkSparseImageFormatProperties { + VkImageAspectFlags aspectMask; + VkExtent3D imageGranularity; + VkSparseImageFormatFlags flags; +} VkSparseImageFormatProperties; + +typedef struct VkSparseImageMemoryRequirements { + VkSparseImageFormatProperties formatProperties; + uint32_t imageMipTailFirstLod; + VkDeviceSize imageMipTailSize; + VkDeviceSize imageMipTailOffset; + VkDeviceSize imageMipTailStride; +} VkSparseImageMemoryRequirements; + +typedef struct VkSparseMemoryBind { + VkDeviceSize resourceOffset; + VkDeviceSize size; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseMemoryBind; + +typedef struct VkSparseBufferMemoryBindInfo { + VkBuffer buffer; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseBufferMemoryBindInfo; + +typedef struct VkSparseImageOpaqueMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseImageOpaqueMemoryBindInfo; + +typedef struct VkImageSubresource { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t arrayLayer; +} VkImageSubresource; + +typedef struct VkOffset3D { + int32_t x; + int32_t y; + int32_t z; +} VkOffset3D; + +typedef struct VkSparseImageMemoryBind { + VkImageSubresource subresource; + VkOffset3D offset; + VkExtent3D extent; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseImageMemoryBind; + +typedef struct VkSparseImageMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseImageMemoryBind* pBinds; +} VkSparseImageMemoryBindInfo; + +typedef struct VkBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t bufferBindCount; + const VkSparseBufferMemoryBindInfo* pBufferBinds; + uint32_t imageOpaqueBindCount; + const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + uint32_t imageBindCount; + const VkSparseImageMemoryBindInfo* pImageBinds; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkBindSparseInfo; + +typedef struct VkFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkFenceCreateFlags flags; +} VkFenceCreateInfo; + +typedef struct VkSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreCreateFlags flags; +} VkSemaphoreCreateInfo; + +typedef struct VkEventCreateInfo { + VkStructureType sType; + const void* pNext; + VkEventCreateFlags flags; +} VkEventCreateInfo; + +typedef struct VkQueryPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueryPoolCreateFlags flags; + VkQueryType queryType; + uint32_t queryCount; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkQueryPoolCreateInfo; + +typedef struct VkBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkDeviceSize size; + VkBufferUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkBufferCreateInfo; + +typedef struct VkBufferViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferViewCreateFlags flags; + VkBuffer buffer; + VkFormat format; + VkDeviceSize offset; + VkDeviceSize range; +} VkBufferViewCreateInfo; + +typedef struct VkImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageType imageType; + VkFormat format; + VkExtent3D extent; + uint32_t mipLevels; + uint32_t arrayLayers; + VkSampleCountFlagBits samples; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkImageLayout initialLayout; +} VkImageCreateInfo; + +typedef struct VkSubresourceLayout { + VkDeviceSize offset; + VkDeviceSize size; + VkDeviceSize rowPitch; + VkDeviceSize arrayPitch; + VkDeviceSize depthPitch; +} VkSubresourceLayout; + +typedef struct VkComponentMapping { + VkComponentSwizzle r; + VkComponentSwizzle g; + VkComponentSwizzle b; + VkComponentSwizzle a; +} VkComponentMapping; + +typedef struct VkImageSubresourceRange { + VkImageAspectFlags aspectMask; + uint32_t baseMipLevel; + uint32_t levelCount; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceRange; + +typedef struct VkImageViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageViewCreateFlags flags; + VkImage image; + VkImageViewType viewType; + VkFormat format; + VkComponentMapping components; + VkImageSubresourceRange subresourceRange; +} VkImageViewCreateInfo; + +typedef struct VkShaderModuleCreateInfo { + VkStructureType sType; + const void* pNext; + VkShaderModuleCreateFlags flags; + size_t codeSize; + const uint32_t* pCode; +} VkShaderModuleCreateInfo; + +typedef struct VkPipelineCacheCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCacheCreateFlags flags; + size_t initialDataSize; + const void* pInitialData; +} VkPipelineCacheCreateInfo; + +typedef struct VkSpecializationMapEntry { + uint32_t constantID; + uint32_t offset; + size_t size; +} VkSpecializationMapEntry; + +typedef struct VkSpecializationInfo { + uint32_t mapEntryCount; + const VkSpecializationMapEntry* pMapEntries; + size_t dataSize; + const void* pData; +} VkSpecializationInfo; + +typedef struct VkPipelineShaderStageCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineShaderStageCreateFlags flags; + VkShaderStageFlagBits stage; + VkShaderModule module; + const char* pName; + const VkSpecializationInfo* pSpecializationInfo; +} VkPipelineShaderStageCreateInfo; + +typedef struct VkVertexInputBindingDescription { + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; +} VkVertexInputBindingDescription; + +typedef struct VkVertexInputAttributeDescription { + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription; + +typedef struct VkPipelineVertexInputStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineVertexInputStateCreateFlags flags; + uint32_t vertexBindingDescriptionCount; + const VkVertexInputBindingDescription* pVertexBindingDescriptions; + uint32_t vertexAttributeDescriptionCount; + const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; +} VkPipelineVertexInputStateCreateInfo; + +typedef struct VkPipelineInputAssemblyStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineInputAssemblyStateCreateFlags flags; + VkPrimitiveTopology topology; + VkBool32 primitiveRestartEnable; +} VkPipelineInputAssemblyStateCreateInfo; + +typedef struct VkPipelineTessellationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineTessellationStateCreateFlags flags; + uint32_t patchControlPoints; +} VkPipelineTessellationStateCreateInfo; + +typedef struct VkViewport { + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} VkViewport; + +typedef struct VkOffset2D { + int32_t x; + int32_t y; +} VkOffset2D; + +typedef struct VkExtent2D { + uint32_t width; + uint32_t height; +} VkExtent2D; + +typedef struct VkRect2D { + VkOffset2D offset; + VkExtent2D extent; +} VkRect2D; + +typedef struct VkPipelineViewportStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineViewportStateCreateFlags flags; + uint32_t viewportCount; + const VkViewport* pViewports; + uint32_t scissorCount; + const VkRect2D* pScissors; +} VkPipelineViewportStateCreateInfo; + +typedef struct VkPipelineRasterizationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateCreateFlags flags; + VkBool32 depthClampEnable; + VkBool32 rasterizerDiscardEnable; + VkPolygonMode polygonMode; + VkCullModeFlags cullMode; + VkFrontFace frontFace; + VkBool32 depthBiasEnable; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; + float lineWidth; +} VkPipelineRasterizationStateCreateInfo; + +typedef struct VkPipelineMultisampleStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineMultisampleStateCreateFlags flags; + VkSampleCountFlagBits rasterizationSamples; + VkBool32 sampleShadingEnable; + float minSampleShading; + const VkSampleMask* pSampleMask; + VkBool32 alphaToCoverageEnable; + VkBool32 alphaToOneEnable; +} VkPipelineMultisampleStateCreateInfo; + +typedef struct VkStencilOpState { + VkStencilOp failOp; + VkStencilOp passOp; + VkStencilOp depthFailOp; + VkCompareOp compareOp; + uint32_t compareMask; + uint32_t writeMask; + uint32_t reference; +} VkStencilOpState; + +typedef struct VkPipelineDepthStencilStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDepthStencilStateCreateFlags flags; + VkBool32 depthTestEnable; + VkBool32 depthWriteEnable; + VkCompareOp depthCompareOp; + VkBool32 depthBoundsTestEnable; + VkBool32 stencilTestEnable; + VkStencilOpState front; + VkStencilOpState back; + float minDepthBounds; + float maxDepthBounds; +} VkPipelineDepthStencilStateCreateInfo; + +typedef struct VkPipelineColorBlendAttachmentState { + VkBool32 blendEnable; + VkBlendFactor srcColorBlendFactor; + VkBlendFactor dstColorBlendFactor; + VkBlendOp colorBlendOp; + VkBlendFactor srcAlphaBlendFactor; + VkBlendFactor dstAlphaBlendFactor; + VkBlendOp alphaBlendOp; + VkColorComponentFlags colorWriteMask; +} VkPipelineColorBlendAttachmentState; + +typedef struct VkPipelineColorBlendStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineColorBlendStateCreateFlags flags; + VkBool32 logicOpEnable; + VkLogicOp logicOp; + uint32_t attachmentCount; + const VkPipelineColorBlendAttachmentState* pAttachments; + float blendConstants[4]; +} VkPipelineColorBlendStateCreateInfo; + +typedef struct VkPipelineDynamicStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDynamicStateCreateFlags flags; + uint32_t dynamicStateCount; + const VkDynamicState* pDynamicStates; +} VkPipelineDynamicStateCreateInfo; + +typedef struct VkGraphicsPipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; + const VkPipelineViewportStateCreateInfo* pViewportState; + const VkPipelineRasterizationStateCreateInfo* pRasterizationState; + const VkPipelineMultisampleStateCreateInfo* pMultisampleState; + const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; + const VkPipelineColorBlendStateCreateInfo* pColorBlendState; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkRenderPass renderPass; + uint32_t subpass; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkGraphicsPipelineCreateInfo; + +typedef struct VkComputePipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + VkPipelineShaderStageCreateInfo stage; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkComputePipelineCreateInfo; + +typedef struct VkPushConstantRange { + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; +} VkPushConstantRange; + +typedef struct VkPipelineLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayoutCreateFlags flags; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; +} VkPipelineLayoutCreateInfo; + +typedef struct VkSamplerCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerCreateFlags flags; + VkFilter magFilter; + VkFilter minFilter; + VkSamplerMipmapMode mipmapMode; + VkSamplerAddressMode addressModeU; + VkSamplerAddressMode addressModeV; + VkSamplerAddressMode addressModeW; + float mipLodBias; + VkBool32 anisotropyEnable; + float maxAnisotropy; + VkBool32 compareEnable; + VkCompareOp compareOp; + float minLod; + float maxLod; + VkBorderColor borderColor; + VkBool32 unnormalizedCoordinates; +} VkSamplerCreateInfo; + +typedef struct VkDescriptorSetLayoutBinding { + uint32_t binding; + VkDescriptorType descriptorType; + uint32_t descriptorCount; + VkShaderStageFlags stageFlags; + const VkSampler* pImmutableSamplers; +} VkDescriptorSetLayoutBinding; + +typedef struct VkDescriptorSetLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorSetLayoutCreateFlags flags; + uint32_t bindingCount; + const VkDescriptorSetLayoutBinding* pBindings; +} VkDescriptorSetLayoutCreateInfo; + +typedef struct VkDescriptorPoolSize { + VkDescriptorType type; + uint32_t descriptorCount; +} VkDescriptorPoolSize; + +typedef struct VkDescriptorPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPoolCreateFlags flags; + uint32_t maxSets; + uint32_t poolSizeCount; + const VkDescriptorPoolSize* pPoolSizes; +} VkDescriptorPoolCreateInfo; + +typedef struct VkDescriptorSetAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPool descriptorPool; + uint32_t descriptorSetCount; + const VkDescriptorSetLayout* pSetLayouts; +} VkDescriptorSetAllocateInfo; + +typedef struct VkDescriptorImageInfo { + VkSampler sampler; + VkImageView imageView; + VkImageLayout imageLayout; +} VkDescriptorImageInfo; + +typedef struct VkDescriptorBufferInfo { + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize range; +} VkDescriptorBufferInfo; + +typedef struct VkWriteDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + const VkDescriptorImageInfo* pImageInfo; + const VkDescriptorBufferInfo* pBufferInfo; + const VkBufferView* pTexelBufferView; +} VkWriteDescriptorSet; + +typedef struct VkCopyDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet srcSet; + uint32_t srcBinding; + uint32_t srcArrayElement; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; +} VkCopyDescriptorSet; + +typedef struct VkFramebufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkFramebufferCreateFlags flags; + VkRenderPass renderPass; + uint32_t attachmentCount; + const VkImageView* pAttachments; + uint32_t width; + uint32_t height; + uint32_t layers; +} VkFramebufferCreateInfo; + +typedef struct VkAttachmentDescription { + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription; + +typedef struct VkAttachmentReference { + uint32_t attachment; + VkImageLayout layout; +} VkAttachmentReference; + +typedef struct VkSubpassDescription { + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t inputAttachmentCount; + const VkAttachmentReference* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference* pColorAttachments; + const VkAttachmentReference* pResolveAttachments; + const VkAttachmentReference* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription; + +typedef struct VkSubpassDependency { + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; +} VkSubpassDependency; + +typedef struct VkRenderPassCreateInfo { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency* pDependencies; +} VkRenderPassCreateInfo; + +typedef struct VkCommandPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPoolCreateFlags flags; + uint32_t queueFamilyIndex; +} VkCommandPoolCreateInfo; + +typedef struct VkCommandBufferAllocateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPool commandPool; + VkCommandBufferLevel level; + uint32_t commandBufferCount; +} VkCommandBufferAllocateInfo; + +typedef struct VkCommandBufferInheritanceInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + uint32_t subpass; + VkFramebuffer framebuffer; + VkBool32 occlusionQueryEnable; + VkQueryControlFlags queryFlags; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkCommandBufferInheritanceInfo; + +typedef struct VkCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + VkCommandBufferUsageFlags flags; + const VkCommandBufferInheritanceInfo* pInheritanceInfo; +} VkCommandBufferBeginInfo; + +typedef struct VkBufferCopy { + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy; + +typedef struct VkImageSubresourceLayers { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceLayers; + +typedef struct VkImageCopy { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy; + +typedef struct VkImageBlit { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit; + +typedef struct VkBufferImageCopy { + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy; + +typedef union VkClearColorValue { + float float32[4]; + int32_t int32[4]; + uint32_t uint32[4]; +} VkClearColorValue; + +typedef struct VkClearDepthStencilValue { + float depth; + uint32_t stencil; +} VkClearDepthStencilValue; + +typedef union VkClearValue { + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +} VkClearValue; + +typedef struct VkClearAttachment { + VkImageAspectFlags aspectMask; + uint32_t colorAttachment; + VkClearValue clearValue; +} VkClearAttachment; + +typedef struct VkClearRect { + VkRect2D rect; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkClearRect; + +typedef struct VkImageResolve { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve; + +typedef struct VkMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; +} VkMemoryBarrier; + +typedef struct VkBufferMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier; + +typedef struct VkImageMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier; + +typedef struct VkRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + VkFramebuffer framebuffer; + VkRect2D renderArea; + uint32_t clearValueCount; + const VkClearValue* pClearValues; +} VkRenderPassBeginInfo; + +typedef struct VkDispatchIndirectCommand { + uint32_t x; + uint32_t y; + uint32_t z; +} VkDispatchIndirectCommand; + +typedef struct VkDrawIndexedIndirectCommand { + uint32_t indexCount; + uint32_t instanceCount; + uint32_t firstIndex; + int32_t vertexOffset; + uint32_t firstInstance; +} VkDrawIndexedIndirectCommand; + +typedef struct VkDrawIndirectCommand { + uint32_t vertexCount; + uint32_t instanceCount; + uint32_t firstVertex; + uint32_t firstInstance; +} VkDrawIndirectCommand; + +typedef struct VkBaseOutStructure { + VkStructureType sType; + struct VkBaseOutStructure* pNext; +} VkBaseOutStructure; + +typedef struct VkBaseInStructure { + VkStructureType sType; + const struct VkBaseInStructure* pNext; +} VkBaseInStructure; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); +typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); +typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue); +typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); +typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); +typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory); +typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); +typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); +typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); +typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); +typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); +typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); +typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); +typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); +typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); +typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); +typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); +typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); +typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); +typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); +typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance( + const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance); + +VKAPI_ATTR void VKAPI_CALL vkDestroyInstance( + VkInstance instance, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices( + VkInstance instance, + uint32_t* pPhysicalDeviceCount, + VkPhysicalDevice* pPhysicalDevices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkImageFormatProperties* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties* pMemoryProperties); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr( + VkInstance instance, + const char* pName); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr( + VkDevice device, + const char* pName); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice( + VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDevice* pDevice); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDevice( + VkDevice device, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties( + VkPhysicalDevice physicalDevice, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue( + VkDevice device, + uint32_t queueFamilyIndex, + uint32_t queueIndex, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo* pSubmits, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle( + VkQueue queue); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory( + VkDevice device, + const VkMemoryAllocateInfo* pAllocateInfo, + const VkAllocationCallbacks* pAllocator, + VkDeviceMemory* pMemory); + +VKAPI_ATTR void VKAPI_CALL vkFreeMemory( + VkDevice device, + VkDeviceMemory memory, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize offset, + VkDeviceSize size, + VkMemoryMapFlags flags, + void** ppData); + +VKAPI_ATTR void VKAPI_CALL vkUnmapMemory( + VkDevice device, + VkDeviceMemory memory); + +VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize* pCommittedMemoryInBytes); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory( + VkDevice device, + VkBuffer buffer, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory( + VkDevice device, + VkImage image, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements( + VkDevice device, + VkBuffer buffer, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements( + VkDevice device, + VkImage image, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements( + VkDevice device, + VkImage image, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkSampleCountFlagBits samples, + VkImageUsageFlags usage, + VkImageTiling tiling, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse( + VkQueue queue, + uint32_t bindInfoCount, + const VkBindSparseInfo* pBindInfo, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence( + VkDevice device, + const VkFenceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFence( + VkDevice device, + VkFence fence, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus( + VkDevice device, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences, + VkBool32 waitAll, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore( + VkDevice device, + const VkSemaphoreCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSemaphore* pSemaphore); + +VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore( + VkDevice device, + VkSemaphore semaphore, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent( + VkDevice device, + const VkEventCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkEvent* pEvent); + +VKAPI_ATTR void VKAPI_CALL vkDestroyEvent( + VkDevice device, + VkEvent event, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool( + VkDevice device, + const VkQueryPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkQueryPool* pQueryPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool( + VkDevice device, + VkQueryPool queryPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + void* pData, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer( + VkDevice device, + const VkBufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBuffer* pBuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer( + VkDevice device, + VkBuffer buffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView( + VkDevice device, + const VkBufferViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBufferView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView( + VkDevice device, + VkBufferView bufferView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage( + VkDevice device, + const VkImageCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImage* pImage); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImage( + VkDevice device, + VkImage image, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout( + VkDevice device, + VkImage image, + const VkImageSubresource* pSubresource, + VkSubresourceLayout* pLayout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView( + VkDevice device, + const VkImageViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImageView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImageView( + VkDevice device, + VkImageView imageView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule( + VkDevice device, + const VkShaderModuleCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkShaderModule* pShaderModule); + +VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule( + VkDevice device, + VkShaderModule shaderModule, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache( + VkDevice device, + const VkPipelineCacheCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineCache* pPipelineCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache( + VkDevice device, + VkPipelineCache pipelineCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData( + VkDevice device, + VkPipelineCache pipelineCache, + size_t* pDataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches( + VkDevice device, + VkPipelineCache dstCache, + uint32_t srcCacheCount, + const VkPipelineCache* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkGraphicsPipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkComputePipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline( + VkDevice device, + VkPipeline pipeline, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout( + VkDevice device, + const VkPipelineLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineLayout* pPipelineLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout( + VkDevice device, + VkPipelineLayout pipelineLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler( + VkDevice device, + const VkSamplerCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSampler* pSampler); + +VKAPI_ATTR void VKAPI_CALL vkDestroySampler( + VkDevice device, + VkSampler sampler, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorSetLayout* pSetLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout( + VkDevice device, + VkDescriptorSetLayout descriptorSetLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool( + VkDevice device, + const VkDescriptorPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorPool* pDescriptorPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets( + VkDevice device, + const VkDescriptorSetAllocateInfo* pAllocateInfo, + VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets( + VkDevice device, + VkDescriptorPool descriptorPool, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets( + VkDevice device, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites, + uint32_t descriptorCopyCount, + const VkCopyDescriptorSet* pDescriptorCopies); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer( + VkDevice device, + const VkFramebufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFramebuffer* pFramebuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer( + VkDevice device, + VkFramebuffer framebuffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass( + VkDevice device, + const VkRenderPassCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass( + VkDevice device, + VkRenderPass renderPass, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity( + VkDevice device, + VkRenderPass renderPass, + VkExtent2D* pGranularity); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool( + VkDevice device, + const VkCommandPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCommandPool* pCommandPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool( + VkDevice device, + VkCommandPool commandPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers( + VkDevice device, + const VkCommandBufferAllocateInfo* pAllocateInfo, + VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers( + VkDevice device, + VkCommandPool commandPool, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer( + VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo* pBeginInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer( + VkCommandBuffer commandBuffer, + VkCommandBufferResetFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor( + VkCommandBuffer commandBuffer, + uint32_t firstScissor, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth( + VkCommandBuffer commandBuffer, + float lineWidth); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias( + VkCommandBuffer commandBuffer, + float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants( + VkCommandBuffer commandBuffer, + const float blendConstants[4]); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds( + VkCommandBuffer commandBuffer, + float minDepthBounds, + float maxDepthBounds); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t compareMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t writeMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t reference); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t firstSet, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets, + uint32_t dynamicOffsetCount, + const uint32_t* pDynamicOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdDraw( + VkCommandBuffer commandBuffer, + uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed( + VkCommandBuffer commandBuffer, + uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatch( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageBlit* pRegions, + VkFilter filter); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize dataSize, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize size, + uint32_t data); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearColorValue* pColor, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearDepthStencilValue* pDepthStencil, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkClearAttachment* pAttachments, + uint32_t rectCount, + const VkClearRect* pRects); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageResolve* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + VkDependencyFlags dependencyFlags, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants( + VkCommandBuffer commandBuffer, + VkPipelineLayout layout, + VkShaderStageFlags stageFlags, + uint32_t offset, + uint32_t size, + const void* pValues); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass( + VkCommandBuffer commandBuffer, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands( + VkCommandBuffer commandBuffer, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); +#endif + +#define VK_VERSION_1_1 1 +// Vulkan 1.1 version number +#define VK_API_VERSION_1_1 VK_MAKE_VERSION(1, 1, 0)// Patch version should always be set to 0 + + +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate) + +#define VK_MAX_DEVICE_GROUP_SIZE 32 +#define VK_LUID_SIZE 8 +#define VK_QUEUE_FAMILY_EXTERNAL (~0U-1) + + +typedef enum VkPointClippingBehavior { + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1, + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_BEGIN_RANGE = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_END_RANGE = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_RANGE_SIZE = (VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY - VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES + 1), + VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPointClippingBehavior; + +typedef enum VkTessellationDomainOrigin { + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1, + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_BEGIN_RANGE = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_END_RANGE = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_RANGE_SIZE = (VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT - VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT + 1), + VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF +} VkTessellationDomainOrigin; + +typedef enum VkSamplerYcbcrModelConversion { + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_BEGIN_RANGE = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_END_RANGE = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RANGE_SIZE = (VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 - VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY + 1), + VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrModelConversion; + +typedef enum VkSamplerYcbcrRange { + VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1, + VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_BEGIN_RANGE = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_END_RANGE = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_RANGE_SIZE = (VK_SAMPLER_YCBCR_RANGE_ITU_NARROW - VK_SAMPLER_YCBCR_RANGE_ITU_FULL + 1), + VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrRange; + +typedef enum VkChromaLocation { + VK_CHROMA_LOCATION_COSITED_EVEN = 0, + VK_CHROMA_LOCATION_MIDPOINT = 1, + VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_BEGIN_RANGE = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_END_RANGE = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_RANGE_SIZE = (VK_CHROMA_LOCATION_MIDPOINT - VK_CHROMA_LOCATION_COSITED_EVEN + 1), + VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF +} VkChromaLocation; + +typedef enum VkDescriptorUpdateTemplateType { + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_END_RANGE = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET - VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET + 1), + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorUpdateTemplateType; + + +typedef enum VkSubgroupFeatureFlagBits { + VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001, + VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002, + VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004, + VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008, + VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010, + VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, + VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, + VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, + VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100, + VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubgroupFeatureFlagBits; +typedef VkFlags VkSubgroupFeatureFlags; + +typedef enum VkPeerMemoryFeatureFlagBits { + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008, + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT, + VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPeerMemoryFeatureFlagBits; +typedef VkFlags VkPeerMemoryFeatureFlags; + +typedef enum VkMemoryAllocateFlagBits { + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001, + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, + VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryAllocateFlagBits; +typedef VkFlags VkMemoryAllocateFlags; +typedef VkFlags VkCommandPoolTrimFlags; +typedef VkFlags VkDescriptorUpdateTemplateCreateFlags; + +typedef enum VkExternalMemoryHandleTypeFlagBits { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBits; +typedef VkFlags VkExternalMemoryHandleTypeFlags; + +typedef enum VkExternalMemoryFeatureFlagBits { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBits; +typedef VkFlags VkExternalMemoryFeatureFlags; + +typedef enum VkExternalFenceHandleTypeFlagBits { + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceHandleTypeFlagBits; +typedef VkFlags VkExternalFenceHandleTypeFlags; + +typedef enum VkExternalFenceFeatureFlagBits { + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceFeatureFlagBits; +typedef VkFlags VkExternalFenceFeatureFlags; + +typedef enum VkFenceImportFlagBits { + VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT, + VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceImportFlagBits; +typedef VkFlags VkFenceImportFlags; + +typedef enum VkSemaphoreImportFlagBits { + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, + VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreImportFlagBits; +typedef VkFlags VkSemaphoreImportFlags; + +typedef enum VkExternalSemaphoreHandleTypeFlagBits { + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreHandleTypeFlagBits; +typedef VkFlags VkExternalSemaphoreHandleTypeFlags; + +typedef enum VkExternalSemaphoreFeatureFlagBits { + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreFeatureFlagBits; +typedef VkFlags VkExternalSemaphoreFeatureFlags; + +typedef struct VkPhysicalDeviceSubgroupProperties { + VkStructureType sType; + void* pNext; + uint32_t subgroupSize; + VkShaderStageFlags supportedStages; + VkSubgroupFeatureFlags supportedOperations; + VkBool32 quadOperationsInAllStages; +} VkPhysicalDeviceSubgroupProperties; + +typedef struct VkBindBufferMemoryInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindBufferMemoryInfo; + +typedef struct VkBindImageMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindImageMemoryInfo; + +typedef struct VkPhysicalDevice16BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; +} VkPhysicalDevice16BitStorageFeatures; + +typedef struct VkMemoryDedicatedRequirements { + VkStructureType sType; + void* pNext; + VkBool32 prefersDedicatedAllocation; + VkBool32 requiresDedicatedAllocation; +} VkMemoryDedicatedRequirements; + +typedef struct VkMemoryDedicatedAllocateInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkMemoryDedicatedAllocateInfo; + +typedef struct VkMemoryAllocateFlagsInfo { + VkStructureType sType; + const void* pNext; + VkMemoryAllocateFlags flags; + uint32_t deviceMask; +} VkMemoryAllocateFlagsInfo; + +typedef struct VkDeviceGroupRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; + uint32_t deviceRenderAreaCount; + const VkRect2D* pDeviceRenderAreas; +} VkDeviceGroupRenderPassBeginInfo; + +typedef struct VkDeviceGroupCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; +} VkDeviceGroupCommandBufferBeginInfo; + +typedef struct VkDeviceGroupSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const uint32_t* pWaitSemaphoreDeviceIndices; + uint32_t commandBufferCount; + const uint32_t* pCommandBufferDeviceMasks; + uint32_t signalSemaphoreCount; + const uint32_t* pSignalSemaphoreDeviceIndices; +} VkDeviceGroupSubmitInfo; + +typedef struct VkDeviceGroupBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t resourceDeviceIndex; + uint32_t memoryDeviceIndex; +} VkDeviceGroupBindSparseInfo; + +typedef struct VkBindBufferMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindBufferMemoryDeviceGroupInfo; + +typedef struct VkBindImageMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + uint32_t splitInstanceBindRegionCount; + const VkRect2D* pSplitInstanceBindRegions; +} VkBindImageMemoryDeviceGroupInfo; + +typedef struct VkPhysicalDeviceGroupProperties { + VkStructureType sType; + void* pNext; + uint32_t physicalDeviceCount; + VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE]; + VkBool32 subsetAllocation; +} VkPhysicalDeviceGroupProperties; + +typedef struct VkDeviceGroupDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t physicalDeviceCount; + const VkPhysicalDevice* pPhysicalDevices; +} VkDeviceGroupDeviceCreateInfo; + +typedef struct VkBufferMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferMemoryRequirementsInfo2; + +typedef struct VkImageMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageMemoryRequirementsInfo2; + +typedef struct VkImageSparseMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageSparseMemoryRequirementsInfo2; + +typedef struct VkMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkMemoryRequirements memoryRequirements; +} VkMemoryRequirements2; + +typedef struct VkSparseImageMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkSparseImageMemoryRequirements memoryRequirements; +} VkSparseImageMemoryRequirements2; + +typedef struct VkPhysicalDeviceFeatures2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceFeatures features; +} VkPhysicalDeviceFeatures2; + +typedef struct VkPhysicalDeviceProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties properties; +} VkPhysicalDeviceProperties2; + +typedef struct VkFormatProperties2 { + VkStructureType sType; + void* pNext; + VkFormatProperties formatProperties; +} VkFormatProperties2; + +typedef struct VkImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkImageFormatProperties imageFormatProperties; +} VkImageFormatProperties2; + +typedef struct VkPhysicalDeviceImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkImageCreateFlags flags; +} VkPhysicalDeviceImageFormatInfo2; + +typedef struct VkQueueFamilyProperties2 { + VkStructureType sType; + void* pNext; + VkQueueFamilyProperties queueFamilyProperties; +} VkQueueFamilyProperties2; + +typedef struct VkPhysicalDeviceMemoryProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceMemoryProperties memoryProperties; +} VkPhysicalDeviceMemoryProperties2; + +typedef struct VkSparseImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkSparseImageFormatProperties properties; +} VkSparseImageFormatProperties2; + +typedef struct VkPhysicalDeviceSparseImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkSampleCountFlagBits samples; + VkImageUsageFlags usage; + VkImageTiling tiling; +} VkPhysicalDeviceSparseImageFormatInfo2; + +typedef struct VkPhysicalDevicePointClippingProperties { + VkStructureType sType; + void* pNext; + VkPointClippingBehavior pointClippingBehavior; +} VkPhysicalDevicePointClippingProperties; + +typedef struct VkInputAttachmentAspectReference { + uint32_t subpass; + uint32_t inputAttachmentIndex; + VkImageAspectFlags aspectMask; +} VkInputAttachmentAspectReference; + +typedef struct VkRenderPassInputAttachmentAspectCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t aspectReferenceCount; + const VkInputAttachmentAspectReference* pAspectReferences; +} VkRenderPassInputAttachmentAspectCreateInfo; + +typedef struct VkImageViewUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags usage; +} VkImageViewUsageCreateInfo; + +typedef struct VkPipelineTessellationDomainOriginStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkTessellationDomainOrigin domainOrigin; +} VkPipelineTessellationDomainOriginStateCreateInfo; + +typedef struct VkRenderPassMultiviewCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t subpassCount; + const uint32_t* pViewMasks; + uint32_t dependencyCount; + const int32_t* pViewOffsets; + uint32_t correlationMaskCount; + const uint32_t* pCorrelationMasks; +} VkRenderPassMultiviewCreateInfo; + +typedef struct VkPhysicalDeviceMultiviewFeatures { + VkStructureType sType; + void* pNext; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; +} VkPhysicalDeviceMultiviewFeatures; + +typedef struct VkPhysicalDeviceMultiviewProperties { + VkStructureType sType; + void* pNext; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; +} VkPhysicalDeviceMultiviewProperties; + +typedef struct VkPhysicalDeviceVariablePointerFeatures { + VkStructureType sType; + void* pNext; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; +} VkPhysicalDeviceVariablePointerFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 protectedMemory; +} VkPhysicalDeviceProtectedMemoryFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryProperties { + VkStructureType sType; + void* pNext; + VkBool32 protectedNoFault; +} VkPhysicalDeviceProtectedMemoryProperties; + +typedef struct VkDeviceQueueInfo2 { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueIndex; +} VkDeviceQueueInfo2; + +typedef struct VkProtectedSubmitInfo { + VkStructureType sType; + const void* pNext; + VkBool32 protectedSubmit; +} VkProtectedSubmitInfo; + +typedef struct VkSamplerYcbcrConversionCreateInfo { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkSamplerYcbcrModelConversion ycbcrModel; + VkSamplerYcbcrRange ycbcrRange; + VkComponentMapping components; + VkChromaLocation xChromaOffset; + VkChromaLocation yChromaOffset; + VkFilter chromaFilter; + VkBool32 forceExplicitReconstruction; +} VkSamplerYcbcrConversionCreateInfo; + +typedef struct VkSamplerYcbcrConversionInfo { + VkStructureType sType; + const void* pNext; + VkSamplerYcbcrConversion conversion; +} VkSamplerYcbcrConversionInfo; + +typedef struct VkBindImagePlaneMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkBindImagePlaneMemoryInfo; + +typedef struct VkImagePlaneMemoryRequirementsInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkImagePlaneMemoryRequirementsInfo; + +typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures { + VkStructureType sType; + void* pNext; + VkBool32 samplerYcbcrConversion; +} VkPhysicalDeviceSamplerYcbcrConversionFeatures; + +typedef struct VkSamplerYcbcrConversionImageFormatProperties { + VkStructureType sType; + void* pNext; + uint32_t combinedImageSamplerDescriptorCount; +} VkSamplerYcbcrConversionImageFormatProperties; + +typedef struct VkDescriptorUpdateTemplateEntry { + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + size_t offset; + size_t stride; +} VkDescriptorUpdateTemplateEntry; + +typedef struct VkDescriptorUpdateTemplateCreateInfo { + VkStructureType sType; + void* pNext; + VkDescriptorUpdateTemplateCreateFlags flags; + uint32_t descriptorUpdateEntryCount; + const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntries; + VkDescriptorUpdateTemplateType templateType; + VkDescriptorSetLayout descriptorSetLayout; + VkPipelineBindPoint pipelineBindPoint; + VkPipelineLayout pipelineLayout; + uint32_t set; +} VkDescriptorUpdateTemplateCreateInfo; + +typedef struct VkExternalMemoryProperties { + VkExternalMemoryFeatureFlags externalMemoryFeatures; + VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlags compatibleHandleTypes; +} VkExternalMemoryProperties; + +typedef struct VkPhysicalDeviceExternalImageFormatInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalImageFormatInfo; + +typedef struct VkExternalImageFormatProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalImageFormatProperties; + +typedef struct VkPhysicalDeviceExternalBufferInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkBufferUsageFlags usage; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalBufferInfo; + +typedef struct VkExternalBufferProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalBufferProperties; + +typedef struct VkPhysicalDeviceIDProperties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; +} VkPhysicalDeviceIDProperties; + +typedef struct VkExternalMemoryImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryImageCreateInfo; + +typedef struct VkExternalMemoryBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryBufferCreateInfo; + +typedef struct VkExportMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExportMemoryAllocateInfo; + +typedef struct VkPhysicalDeviceExternalFenceInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalFenceInfo; + +typedef struct VkExternalFenceProperties { + VkStructureType sType; + void* pNext; + VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes; + VkExternalFenceHandleTypeFlags compatibleHandleTypes; + VkExternalFenceFeatureFlags externalFenceFeatures; +} VkExternalFenceProperties; + +typedef struct VkExportFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlags handleTypes; +} VkExportFenceCreateInfo; + +typedef struct VkExportSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlags handleTypes; +} VkExportSemaphoreCreateInfo; + +typedef struct VkPhysicalDeviceExternalSemaphoreInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalSemaphoreInfo; + +typedef struct VkExternalSemaphoreProperties { + VkStructureType sType; + void* pNext; + VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes; + VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes; + VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures; +} VkExternalSemaphoreProperties; + +typedef struct VkPhysicalDeviceMaintenance3Properties { + VkStructureType sType; + void* pNext; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceMaintenance3Properties; + +typedef struct VkDescriptorSetLayoutSupport { + VkStructureType sType; + void* pNext; + VkBool32 supported; +} VkDescriptorSetLayoutSupport; + +typedef struct VkPhysicalDeviceShaderDrawParameterFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceShaderDrawParameterFeatures; + + +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion( + uint32_t* pApiVersion); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2( + VkDevice device, + const VkDeviceQueueInfo2* pQueueInfo, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + +#define VK_KHR_surface 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) + +#define VK_KHR_SURFACE_SPEC_VERSION 25 +#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface" + + +typedef enum VkColorSpaceKHR { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, + VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001, + VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002, + VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = 1000104003, + VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004, + VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005, + VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, + VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, + VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, + VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, + VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, + VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, + VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, + VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, + VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, + VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_BEGIN_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_END_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_RANGE_SIZE_KHR = (VK_COLOR_SPACE_SRGB_NONLINEAR_KHR - VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + 1), + VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkColorSpaceKHR; + +typedef enum VkPresentModeKHR { + VK_PRESENT_MODE_IMMEDIATE_KHR = 0, + VK_PRESENT_MODE_MAILBOX_KHR = 1, + VK_PRESENT_MODE_FIFO_KHR = 2, + VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, + VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000, + VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001, + VK_PRESENT_MODE_BEGIN_RANGE_KHR = VK_PRESENT_MODE_IMMEDIATE_KHR, + VK_PRESENT_MODE_END_RANGE_KHR = VK_PRESENT_MODE_FIFO_RELAXED_KHR, + VK_PRESENT_MODE_RANGE_SIZE_KHR = (VK_PRESENT_MODE_FIFO_RELAXED_KHR - VK_PRESENT_MODE_IMMEDIATE_KHR + 1), + VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentModeKHR; + + +typedef enum VkSurfaceTransformFlagBitsKHR { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, + VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, + VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, + VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, + VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, + VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSurfaceTransformFlagBitsKHR; +typedef VkFlags VkSurfaceTransformFlagsKHR; + +typedef enum VkCompositeAlphaFlagBitsKHR { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, + VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCompositeAlphaFlagBitsKHR; +typedef VkFlags VkCompositeAlphaFlagsKHR; + +typedef struct VkSurfaceCapabilitiesKHR { + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; +} VkSurfaceCapabilitiesKHR; + +typedef struct VkSurfaceFormatKHR { + VkFormat format; + VkColorSpaceKHR colorSpace; +} VkSurfaceFormatKHR; + + +typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR( + VkInstance instance, + VkSurfaceKHR surface, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + VkSurfaceKHR surface, + VkBool32* pSupported); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormatKHR* pSurfaceFormats); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); +#endif + +#define VK_KHR_swapchain 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) + +#define VK_KHR_SWAPCHAIN_SPEC_VERSION 70 +#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain" + + +typedef enum VkSwapchainCreateFlagBitsKHR { + VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001, + VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002, + VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR = 0x00000004, + VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSwapchainCreateFlagBitsKHR; +typedef VkFlags VkSwapchainCreateFlagsKHR; + +typedef enum VkDeviceGroupPresentModeFlagBitsKHR { + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001, + VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002, + VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004, + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008, + VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDeviceGroupPresentModeFlagBitsKHR; +typedef VkFlags VkDeviceGroupPresentModeFlagsKHR; + +typedef struct VkSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainCreateFlagsKHR flags; + VkSurfaceKHR surface; + uint32_t minImageCount; + VkFormat imageFormat; + VkColorSpaceKHR imageColorSpace; + VkExtent2D imageExtent; + uint32_t imageArrayLayers; + VkImageUsageFlags imageUsage; + VkSharingMode imageSharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkSurfaceTransformFlagBitsKHR preTransform; + VkCompositeAlphaFlagBitsKHR compositeAlpha; + VkPresentModeKHR presentMode; + VkBool32 clipped; + VkSwapchainKHR oldSwapchain; +} VkSwapchainCreateInfoKHR; + +typedef struct VkPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t swapchainCount; + const VkSwapchainKHR* pSwapchains; + const uint32_t* pImageIndices; + VkResult* pResults; +} VkPresentInfoKHR; + +typedef struct VkImageSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; +} VkImageSwapchainCreateInfoKHR; + +typedef struct VkBindImageMemorySwapchainInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint32_t imageIndex; +} VkBindImageMemorySwapchainInfoKHR; + +typedef struct VkAcquireNextImageInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint64_t timeout; + VkSemaphore semaphore; + VkFence fence; + uint32_t deviceMask; +} VkAcquireNextImageInfoKHR; + +typedef struct VkDeviceGroupPresentCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE]; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupPresentCapabilitiesKHR; + +typedef struct VkDeviceGroupPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint32_t* pDeviceMasks; + VkDeviceGroupPresentModeFlagBitsKHR mode; +} VkDeviceGroupPresentInfoKHR; + +typedef struct VkDeviceGroupSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupSwapchainCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); +typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); +typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR( + VkDevice device, + const VkSwapchainCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchain); + +VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR( + VkDevice device, + VkSwapchainKHR swapchain, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pSwapchainImageCount, + VkImage* pSwapchainImages); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint64_t timeout, + VkSemaphore semaphore, + VkFence fence, + uint32_t* pImageIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR( + VkQueue queue, + const VkPresentInfoKHR* pPresentInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR( + VkDevice device, + VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR( + VkDevice device, + const VkAcquireNextImageInfoKHR* pAcquireInfo, + uint32_t* pImageIndex); +#endif + +#define VK_KHR_display 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) + +#define VK_KHR_DISPLAY_SPEC_VERSION 21 +#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display" + + +typedef enum VkDisplayPlaneAlphaFlagBitsKHR { + VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, + VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDisplayPlaneAlphaFlagBitsKHR; +typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; +typedef VkFlags VkDisplayModeCreateFlagsKHR; +typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; + +typedef struct VkDisplayPropertiesKHR { + VkDisplayKHR display; + const char* displayName; + VkExtent2D physicalDimensions; + VkExtent2D physicalResolution; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkBool32 planeReorderPossible; + VkBool32 persistentContent; +} VkDisplayPropertiesKHR; + +typedef struct VkDisplayModeParametersKHR { + VkExtent2D visibleRegion; + uint32_t refreshRate; +} VkDisplayModeParametersKHR; + +typedef struct VkDisplayModePropertiesKHR { + VkDisplayModeKHR displayMode; + VkDisplayModeParametersKHR parameters; +} VkDisplayModePropertiesKHR; + +typedef struct VkDisplayModeCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeCreateFlagsKHR flags; + VkDisplayModeParametersKHR parameters; +} VkDisplayModeCreateInfoKHR; + +typedef struct VkDisplayPlaneCapabilitiesKHR { + VkDisplayPlaneAlphaFlagsKHR supportedAlpha; + VkOffset2D minSrcPosition; + VkOffset2D maxSrcPosition; + VkExtent2D minSrcExtent; + VkExtent2D maxSrcExtent; + VkOffset2D minDstPosition; + VkOffset2D maxDstPosition; + VkExtent2D minDstExtent; + VkExtent2D maxDstExtent; +} VkDisplayPlaneCapabilitiesKHR; + +typedef struct VkDisplayPlanePropertiesKHR { + VkDisplayKHR currentDisplay; + uint32_t currentStackIndex; +} VkDisplayPlanePropertiesKHR; + +typedef struct VkDisplaySurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplaySurfaceCreateFlagsKHR flags; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkDisplaySurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlanePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR( + VkPhysicalDevice physicalDevice, + uint32_t planeIndex, + uint32_t* pDisplayCount, + VkDisplayKHR* pDisplays); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDisplayModeKHR* pMode); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR* pCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR( + VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#define VK_KHR_display_swapchain 1 +#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 9 +#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain" + +typedef struct VkDisplayPresentInfoKHR { + VkStructureType sType; + const void* pNext; + VkRect2D srcRect; + VkRect2D dstRect; + VkBool32 persistent; +} VkDisplayPresentInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchains); +#endif + +#define VK_KHR_sampler_mirror_clamp_to_edge 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge" + + +#define VK_KHR_multiview 1 +#define VK_KHR_MULTIVIEW_SPEC_VERSION 1 +#define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview" + +typedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR; + +typedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR; + +typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR; + + + +#define VK_KHR_get_physical_device_properties2 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2" + +typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR; + +typedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR; + +typedef VkFormatProperties2 VkFormatProperties2KHR; + +typedef VkImageFormatProperties2 VkImageFormatProperties2KHR; + +typedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR; + +typedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR; + +typedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR; + +typedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR; + +typedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); +#endif + +#define VK_KHR_device_group 1 +#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 3 +#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group" + +typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR; + +typedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR; + +typedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR; + +typedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR; + + +typedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR; + +typedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR; + +typedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR; + +typedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR; + +typedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR; + +typedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR; + +typedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); +#endif + +#define VK_KHR_shader_draw_parameters 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters" + + +#define VK_KHR_maintenance1 1 +#define VK_KHR_MAINTENANCE1_SPEC_VERSION 2 +#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1" + +typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR; + + +typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); +#endif + +#define VK_KHR_device_group_creation 1 +#define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1 +#define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHR_device_group_creation" +#define VK_MAX_DEVICE_GROUP_SIZE_KHR VK_MAX_DEVICE_GROUP_SIZE + +typedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR; + +typedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +#endif + +#define VK_KHR_external_memory_capabilities 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities" +#define VK_LUID_SIZE_KHR VK_LUID_SIZE + +typedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR; + +typedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR; + +typedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR; + +typedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR; + + +typedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR; + +typedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR; + +typedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR; + +typedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR; + +typedef VkExternalBufferProperties VkExternalBufferPropertiesKHR; + +typedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); +#endif + +#define VK_KHR_external_memory 1 +#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory" +#define VK_QUEUE_FAMILY_EXTERNAL_KHR VK_QUEUE_FAMILY_EXTERNAL + +typedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR; + +typedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR; + +typedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR; + + + +#define VK_KHR_external_memory_fd 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd" + +typedef struct VkImportMemoryFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + int fd; +} VkImportMemoryFdInfoKHR; + +typedef struct VkMemoryFdPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryFdPropertiesKHR; + +typedef struct VkMemoryGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetFdInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR( + VkDevice device, + const VkMemoryGetFdInfoKHR* pGetFdInfo, + int* pFd); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + int fd, + VkMemoryFdPropertiesKHR* pMemoryFdProperties); +#endif + +#define VK_KHR_external_semaphore_capabilities 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities" + +typedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR; + +typedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR; + +typedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR; + +typedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR; + + +typedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR; + +typedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +#endif + +#define VK_KHR_external_semaphore 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore" + +typedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR; + +typedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR; + + +typedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR; + + + +#define VK_KHR_external_semaphore_fd 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd" + +typedef struct VkImportSemaphoreFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + int fd; +} VkImportSemaphoreFdInfoKHR; + +typedef struct VkSemaphoreGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetFdInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR( + VkDevice device, + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( + VkDevice device, + const VkSemaphoreGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + +#define VK_KHR_push_descriptor 1 +#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2 +#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" + +typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t maxPushDescriptors; +} VkPhysicalDevicePushDescriptorPropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); +#endif + +#define VK_KHR_shader_float16_int8 1 +#define VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION 1 +#define VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME "VK_KHR_shader_float16_int8" + +typedef struct VkPhysicalDeviceFloat16Int8FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; +} VkPhysicalDeviceFloat16Int8FeaturesKHR; + + + +#define VK_KHR_16bit_storage 1 +#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage" + +typedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR; + + + +#define VK_KHR_incremental_present 1 +#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 1 +#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present" + +typedef struct VkRectLayerKHR { + VkOffset2D offset; + VkExtent2D extent; + uint32_t layer; +} VkRectLayerKHR; + +typedef struct VkPresentRegionKHR { + uint32_t rectangleCount; + const VkRectLayerKHR* pRectangles; +} VkPresentRegionKHR; + +typedef struct VkPresentRegionsKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentRegionKHR* pRegions; +} VkPresentRegionsKHR; + + + +#define VK_KHR_descriptor_update_template 1 +typedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR; + + +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1 +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template" + +typedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR; + + +typedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR; + + +typedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR; + +typedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); +#endif + +#define VK_KHR_create_renderpass2 1 +#define VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION 1 +#define VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME "VK_KHR_create_renderpass2" + +typedef struct VkAttachmentDescription2KHR { + VkStructureType sType; + const void* pNext; + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription2KHR; + +typedef struct VkAttachmentReference2KHR { + VkStructureType sType; + const void* pNext; + uint32_t attachment; + VkImageLayout layout; + VkImageAspectFlags aspectMask; +} VkAttachmentReference2KHR; + +typedef struct VkSubpassDescription2KHR { + VkStructureType sType; + const void* pNext; + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t viewMask; + uint32_t inputAttachmentCount; + const VkAttachmentReference2KHR* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference2KHR* pColorAttachments; + const VkAttachmentReference2KHR* pResolveAttachments; + const VkAttachmentReference2KHR* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription2KHR; + +typedef struct VkSubpassDependency2KHR { + VkStructureType sType; + const void* pNext; + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; + int32_t viewOffset; +} VkSubpassDependency2KHR; + +typedef struct VkRenderPassCreateInfo2KHR { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription2KHR* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription2KHR* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency2KHR* pDependencies; + uint32_t correlatedViewMaskCount; + const uint32_t* pCorrelatedViewMasks; +} VkRenderPassCreateInfo2KHR; + +typedef struct VkSubpassBeginInfoKHR { + VkStructureType sType; + const void* pNext; + VkSubpassContents contents; +} VkSubpassBeginInfoKHR; + +typedef struct VkSubpassEndInfoKHR { + VkStructureType sType; + const void* pNext; +} VkSubpassEndInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2KHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfoKHR* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo, const VkSubpassEndInfoKHR* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2KHR( + VkDevice device, + const VkRenderPassCreateInfo2KHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfoKHR* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfoKHR* pSubpassBeginInfo, + const VkSubpassEndInfoKHR* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfoKHR* pSubpassEndInfo); +#endif + +#define VK_KHR_shared_presentable_image 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image" + +typedef struct VkSharedPresentSurfaceCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkImageUsageFlags sharedPresentSupportedUsageFlags; +} VkSharedPresentSurfaceCapabilitiesKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR( + VkDevice device, + VkSwapchainKHR swapchain); +#endif + +#define VK_KHR_external_fence_capabilities 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities" + +typedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR; + +typedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR; + +typedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR; + +typedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR; + + +typedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR; + +typedef VkExternalFenceProperties VkExternalFencePropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); +#endif + +#define VK_KHR_external_fence 1 +#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence" + +typedef VkFenceImportFlags VkFenceImportFlagsKHR; + +typedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR; + + +typedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR; + + + +#define VK_KHR_external_fence_fd 1 +#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd" + +typedef struct VkImportFenceFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + int fd; +} VkImportFenceFdInfoKHR; + +typedef struct VkFenceGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetFdInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR( + VkDevice device, + const VkImportFenceFdInfoKHR* pImportFenceFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR( + VkDevice device, + const VkFenceGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + +#define VK_KHR_maintenance2 1 +#define VK_KHR_MAINTENANCE2_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE2_EXTENSION_NAME "VK_KHR_maintenance2" + +typedef VkPointClippingBehavior VkPointClippingBehaviorKHR; + +typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR; + + +typedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR; + +typedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR; + +typedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR; + +typedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR; + +typedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR; + + + +#define VK_KHR_get_surface_capabilities2 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2" + +typedef struct VkPhysicalDeviceSurfaceInfo2KHR { + VkStructureType sType; + const void* pNext; + VkSurfaceKHR surface; +} VkPhysicalDeviceSurfaceInfo2KHR; + +typedef struct VkSurfaceCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceCapabilitiesKHR surfaceCapabilities; +} VkSurfaceCapabilities2KHR; + +typedef struct VkSurfaceFormat2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceFormatKHR surfaceFormat; +} VkSurfaceFormat2KHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkSurfaceCapabilities2KHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormat2KHR* pSurfaceFormats); +#endif + +#define VK_KHR_variable_pointers 1 +#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1 +#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers" + +typedef VkPhysicalDeviceVariablePointerFeatures VkPhysicalDeviceVariablePointerFeaturesKHR; + + + +#define VK_KHR_get_display_properties2 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_display_properties2" + +typedef struct VkDisplayProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPropertiesKHR displayProperties; +} VkDisplayProperties2KHR; + +typedef struct VkDisplayPlaneProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlanePropertiesKHR displayPlaneProperties; +} VkDisplayPlaneProperties2KHR; + +typedef struct VkDisplayModeProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayModePropertiesKHR displayModeProperties; +} VkDisplayModeProperties2KHR; + +typedef struct VkDisplayPlaneInfo2KHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeKHR mode; + uint32_t planeIndex; +} VkDisplayPlaneInfo2KHR; + +typedef struct VkDisplayPlaneCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlaneCapabilitiesKHR capabilities; +} VkDisplayPlaneCapabilities2KHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModeProperties2KHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlaneProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModeProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, + VkDisplayPlaneCapabilities2KHR* pCapabilities); +#endif + +#define VK_KHR_dedicated_allocation 1 +#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3 +#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation" + +typedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR; + +typedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR; + + + +#define VK_KHR_storage_buffer_storage_class 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class" + + +#define VK_KHR_relaxed_block_layout 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout" + + +#define VK_KHR_get_memory_requirements2 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2" + +typedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR; + +typedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR; + +typedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR; + +typedef VkMemoryRequirements2 VkMemoryRequirements2KHR; + +typedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR; + + +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +#endif + +#define VK_KHR_image_format_list 1 +#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1 +#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list" + +typedef struct VkImageFormatListCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkImageFormatListCreateInfoKHR; + + + +#define VK_KHR_sampler_ycbcr_conversion 1 +typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR; + + +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 1 +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion" + +typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR; + +typedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR; + +typedef VkChromaLocation VkChromaLocationKHR; + + +typedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR; + +typedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR; + +typedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR; + +typedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR; + +typedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR; + +typedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); +#endif + +#define VK_KHR_bind_memory2 1 +#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1 +#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2" + +typedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR; + +typedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); +#endif + +#define VK_KHR_portability_subset 1 +#define VK_KHR_PORTABILITY_SUBSET_SPEC_VERSION 1 +#define VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME "VK_KHR_portability_subset" + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDevicePortabilitySubsetDataKHR)(VkPhysicalDevice physicalDevice, VkBaseOutStructure* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDevicePortabilitySubsetDataKHR( + VkPhysicalDevice physicalDevice, + VkBaseOutStructure* pData); +#endif + +#define VK_KHR_maintenance3 1 +#define VK_KHR_MAINTENANCE3_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE3_EXTENSION_NAME "VK_KHR_maintenance3" + +typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; + +typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + +#define VK_KHR_draw_indirect_count 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_KHR_draw_indirect_count" + +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + +#define VK_KHR_pointer_cast_to_pointer 1 +#define VK_KHR_POINTER_CAST_TO_POINTER_SPEC_VERSION 1 +#define VK_KHR_POINTER_CAST_TO_POINTER_EXTENSION_NAME "VK_KHR_pointer_cast_to_pointer" + +typedef struct VkPhysicalDevicePointerCastToPointerFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pointerCastToPointer; +} VkPhysicalDevicePointerCastToPointerFeaturesKHR; + + + +#define VK_KHR_8bit_storage 1 +#define VK_KHR_8BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_8BIT_STORAGE_EXTENSION_NAME "VK_KHR_8bit_storage" + +typedef struct VkPhysicalDevice8BitStorageFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; +} VkPhysicalDevice8BitStorageFeaturesKHR; + + + +#define VK_KHR_shader_atomic_int64 1 +#define VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME "VK_KHR_shader_atomic_int64" + +typedef struct VkPhysicalDeviceShaderAtomicInt64FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; +} VkPhysicalDeviceShaderAtomicInt64FeaturesKHR; + + + +#define VK_KHR_driver_properties 1 +#define VK_MAX_DRIVER_NAME_SIZE_KHR 256 +#define VK_MAX_DRIVER_INFO_SIZE_KHR 256 +#define VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME "VK_KHR_driver_properties" + + +typedef enum VkDriverIdKHR { + VK_DRIVER_ID_AMD_PROPRIETARY_KHR = 1, + VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = 2, + VK_DRIVER_ID_MESA_RADV_KHR = 3, + VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR = 4, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR = 5, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR = 6, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY_KHR = 7, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY_KHR = 8, + VK_DRIVER_ID_ARM_PROPRIETARY_KHR = 9, + VK_DRIVER_ID_BEGIN_RANGE_KHR = VK_DRIVER_ID_AMD_PROPRIETARY_KHR, + VK_DRIVER_ID_END_RANGE_KHR = VK_DRIVER_ID_ARM_PROPRIETARY_KHR, + VK_DRIVER_ID_RANGE_SIZE_KHR = (VK_DRIVER_ID_ARM_PROPRIETARY_KHR - VK_DRIVER_ID_AMD_PROPRIETARY_KHR + 1), + VK_DRIVER_ID_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDriverIdKHR; + +typedef struct VkConformanceVersionKHR { + uint8_t major; + uint8_t minor; + uint8_t subminor; + uint8_t patch; +} VkConformanceVersionKHR; + +typedef struct VkPhysicalDeviceDriverPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE_KHR]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE_KHR]; + VkConformanceVersionKHR conformanceVersion; +} VkPhysicalDeviceDriverPropertiesKHR; + + + +#define VK_KHR_shader_float_controls 1 +#define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 1 +#define VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME "VK_KHR_shader_float_controls" + +typedef struct VkPhysicalDeviceFloatControlsPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 separateDenormSettings; + VkBool32 separateRoundingModeSettings; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; +} VkPhysicalDeviceFloatControlsPropertiesKHR; + + + +#define VK_KHR_depth_stencil_resolve 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME "VK_KHR_depth_stencil_resolve" + + +typedef enum VkResolveModeFlagBitsKHR { + VK_RESOLVE_MODE_NONE_KHR = 0, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR = 0x00000001, + VK_RESOLVE_MODE_AVERAGE_BIT_KHR = 0x00000002, + VK_RESOLVE_MODE_MIN_BIT_KHR = 0x00000004, + VK_RESOLVE_MODE_MAX_BIT_KHR = 0x00000008, + VK_RESOLVE_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkResolveModeFlagBitsKHR; +typedef VkFlags VkResolveModeFlagsKHR; + +typedef struct VkSubpassDescriptionDepthStencilResolveKHR { + VkStructureType sType; + const void* pNext; + VkResolveModeFlagBitsKHR depthResolveMode; + VkResolveModeFlagBitsKHR stencilResolveMode; + const VkAttachmentReference2KHR* pDepthStencilResolveAttachment; +} VkSubpassDescriptionDepthStencilResolveKHR; + +typedef struct VkPhysicalDeviceDepthStencilResolvePropertiesKHR { + VkStructureType sType; + void* pNext; + VkResolveModeFlagsKHR supportedDepthResolveModes; + VkResolveModeFlagsKHR supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; +} VkPhysicalDeviceDepthStencilResolvePropertiesKHR; + + + +#define VK_KHR_swapchain_mutable_format 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_SPEC_VERSION 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME "VK_KHR_swapchain_mutable_format" + + +#define VK_KHR_vulkan_memory_model 1 +#define VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION 2 +#define VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME "VK_KHR_vulkan_memory_model" + +typedef struct VkPhysicalDeviceVulkanMemoryModelFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; +} VkPhysicalDeviceVulkanMemoryModelFeaturesKHR; + + + +#define VK_EXT_debug_report 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) + +#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 9 +#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report" + + +typedef enum VkDebugReportObjectTypeEXT { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, + VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT = 31, + VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT = 32, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_BEGIN_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_END_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_RANGE_SIZE_EXT = (VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT - VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT + 1), + VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportObjectTypeEXT; + + +typedef enum VkDebugReportFlagBitsEXT { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, + VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, + VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, + VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, + VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportFlagBitsEXT; +typedef VkFlags VkDebugReportFlagsEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + +typedef struct VkDebugReportCallbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportFlagsEXT flags; + PFN_vkDebugReportCallbackEXT pfnCallback; + void* pUserData; +} VkDebugReportCallbackCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT( + VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugReportCallbackEXT* pCallback); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT( + VkInstance instance, + VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT( + VkInstance instance, + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage); +#endif + +#define VK_NV_glsl_shader 1 +#define VK_NV_GLSL_SHADER_SPEC_VERSION 1 +#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader" + + +#define VK_EXT_depth_range_unrestricted 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted" + + +#define VK_IMG_filter_cubic 1 +#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1 +#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic" + + +#define VK_AMD_rasterization_order 1 +#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1 +#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order" + + +typedef enum VkRasterizationOrderAMD { + VK_RASTERIZATION_ORDER_STRICT_AMD = 0, + VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, + VK_RASTERIZATION_ORDER_BEGIN_RANGE_AMD = VK_RASTERIZATION_ORDER_STRICT_AMD, + VK_RASTERIZATION_ORDER_END_RANGE_AMD = VK_RASTERIZATION_ORDER_RELAXED_AMD, + VK_RASTERIZATION_ORDER_RANGE_SIZE_AMD = (VK_RASTERIZATION_ORDER_RELAXED_AMD - VK_RASTERIZATION_ORDER_STRICT_AMD + 1), + VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF +} VkRasterizationOrderAMD; + +typedef struct VkPipelineRasterizationStateRasterizationOrderAMD { + VkStructureType sType; + const void* pNext; + VkRasterizationOrderAMD rasterizationOrder; +} VkPipelineRasterizationStateRasterizationOrderAMD; + + + +#define VK_AMD_shader_trinary_minmax 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax" + + +#define VK_AMD_shader_explicit_vertex_parameter 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter" + + +#define VK_EXT_debug_marker 1 +#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4 +#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker" + +typedef struct VkDebugMarkerObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + const char* pObjectName; +} VkDebugMarkerObjectNameInfoEXT; + +typedef struct VkDebugMarkerObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugMarkerObjectTagInfoEXT; + +typedef struct VkDebugMarkerMarkerInfoEXT { + VkStructureType sType; + const void* pNext; + const char* pMarkerName; + float color[4]; +} VkDebugMarkerMarkerInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +#endif + +#define VK_AMD_gcn_shader 1 +#define VK_AMD_GCN_SHADER_SPEC_VERSION 1 +#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader" + + +#define VK_NV_dedicated_allocation 1 +#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation" + +typedef struct VkDedicatedAllocationImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationImageCreateInfoNV; + +typedef struct VkDedicatedAllocationBufferCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationBufferCreateInfoNV; + +typedef struct VkDedicatedAllocationMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkDedicatedAllocationMemoryAllocateInfoNV; + + + +#define VK_AMD_draw_indirect_count 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count" + +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + +#define VK_AMD_negative_viewport_height 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height" + + +#define VK_AMD_gpu_shader_half_float 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float" + + +#define VK_AMD_shader_ballot 1 +#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1 +#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot" + + +#define VK_AMD_texture_gather_bias_lod 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod" + +typedef struct VkTextureLODGatherFormatPropertiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 supportsTextureGatherLODBiasAMD; +} VkTextureLODGatherFormatPropertiesAMD; + + + +#define VK_AMD_shader_info 1 +#define VK_AMD_SHADER_INFO_SPEC_VERSION 1 +#define VK_AMD_SHADER_INFO_EXTENSION_NAME "VK_AMD_shader_info" + + +typedef enum VkShaderInfoTypeAMD { + VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0, + VK_SHADER_INFO_TYPE_BINARY_AMD = 1, + VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2, + VK_SHADER_INFO_TYPE_BEGIN_RANGE_AMD = VK_SHADER_INFO_TYPE_STATISTICS_AMD, + VK_SHADER_INFO_TYPE_END_RANGE_AMD = VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD, + VK_SHADER_INFO_TYPE_RANGE_SIZE_AMD = (VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD - VK_SHADER_INFO_TYPE_STATISTICS_AMD + 1), + VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderInfoTypeAMD; + +typedef struct VkShaderResourceUsageAMD { + uint32_t numUsedVgprs; + uint32_t numUsedSgprs; + uint32_t ldsSizePerLocalWorkGroup; + size_t ldsUsageSizeInBytes; + size_t scratchMemUsageInBytes; +} VkShaderResourceUsageAMD; + +typedef struct VkShaderStatisticsInfoAMD { + VkShaderStageFlags shaderStageMask; + VkShaderResourceUsageAMD resourceUsage; + uint32_t numPhysicalVgprs; + uint32_t numPhysicalSgprs; + uint32_t numAvailableVgprs; + uint32_t numAvailableSgprs; + uint32_t computeWorkGroupSize[3]; +} VkShaderStatisticsInfoAMD; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD( + VkDevice device, + VkPipeline pipeline, + VkShaderStageFlagBits shaderStage, + VkShaderInfoTypeAMD infoType, + size_t* pInfoSize, + void* pInfo); +#endif + +#define VK_AMD_shader_image_load_store_lod 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME "VK_AMD_shader_image_load_store_lod" + + +#define VK_IMG_format_pvrtc 1 +#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1 +#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc" + + +#define VK_NV_external_memory_capabilities 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities" + + +typedef enum VkExternalMemoryHandleTypeFlagBitsNV { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBitsNV; +typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; + +typedef enum VkExternalMemoryFeatureFlagBitsNV { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBitsNV; +typedef VkFlags VkExternalMemoryFeatureFlagsNV; + +typedef struct VkExternalImageFormatPropertiesNV { + VkImageFormatProperties imageFormatProperties; + VkExternalMemoryFeatureFlagsNV externalMemoryFeatures; + VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; +} VkExternalImageFormatPropertiesNV; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); +#endif + +#define VK_NV_external_memory 1 +#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory" + +typedef struct VkExternalMemoryImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExternalMemoryImageCreateInfoNV; + +typedef struct VkExportMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExportMemoryAllocateInfoNV; + + + +#define VK_EXT_validation_flags 1 +#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags" + + +typedef enum VkValidationCheckEXT { + VK_VALIDATION_CHECK_ALL_EXT = 0, + VK_VALIDATION_CHECK_SHADERS_EXT = 1, + VK_VALIDATION_CHECK_BEGIN_RANGE_EXT = VK_VALIDATION_CHECK_ALL_EXT, + VK_VALIDATION_CHECK_END_RANGE_EXT = VK_VALIDATION_CHECK_SHADERS_EXT, + VK_VALIDATION_CHECK_RANGE_SIZE_EXT = (VK_VALIDATION_CHECK_SHADERS_EXT - VK_VALIDATION_CHECK_ALL_EXT + 1), + VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCheckEXT; + +typedef struct VkValidationFlagsEXT { + VkStructureType sType; + const void* pNext; + uint32_t disabledValidationCheckCount; + const VkValidationCheckEXT* pDisabledValidationChecks; +} VkValidationFlagsEXT; + + + +#define VK_EXT_shader_subgroup_ballot 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot" + + +#define VK_EXT_shader_subgroup_vote 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote" + + +#define VK_EXT_astc_decode_mode 1 +#define VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION 1 +#define VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME "VK_EXT_astc_decode_mode" + +typedef struct VkImageViewASTCDecodeModeEXT { + VkStructureType sType; + const void* pNext; + VkFormat decodeMode; +} VkImageViewASTCDecodeModeEXT; + +typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 decodeModeSharedExponent; +} VkPhysicalDeviceASTCDecodeFeaturesEXT; + + + +#define VK_EXT_conditional_rendering 1 +#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 1 +#define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME "VK_EXT_conditional_rendering" + + +typedef enum VkConditionalRenderingFlagBitsEXT { + VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT = 0x00000001, + VK_CONDITIONAL_RENDERING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConditionalRenderingFlagBitsEXT; +typedef VkFlags VkConditionalRenderingFlagsEXT; + +typedef struct VkConditionalRenderingBeginInfoEXT { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceSize offset; + VkConditionalRenderingFlagsEXT flags; +} VkConditionalRenderingBeginInfoEXT; + +typedef struct VkPhysicalDeviceConditionalRenderingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 conditionalRendering; + VkBool32 inheritedConditionalRendering; +} VkPhysicalDeviceConditionalRenderingFeaturesEXT; + +typedef struct VkCommandBufferInheritanceConditionalRenderingInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 conditionalRenderingEnable; +} VkCommandBufferInheritanceConditionalRenderingInfoEXT; + + +typedef void (VKAPI_PTR *PFN_vkCmdBeginConditionalRenderingEXT)(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); +typedef void (VKAPI_PTR *PFN_vkCmdEndConditionalRenderingEXT)(VkCommandBuffer commandBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginConditionalRenderingEXT( + VkCommandBuffer commandBuffer, + const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndConditionalRenderingEXT( + VkCommandBuffer commandBuffer); +#endif + +#define VK_NVX_device_generated_commands 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkObjectTableNVX) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNVX) + +#define VK_NVX_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3 +#define VK_NVX_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NVX_device_generated_commands" + + +typedef enum VkIndirectCommandsTokenTypeNVX { + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX = 0, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DESCRIPTOR_SET_NVX = 1, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NVX = 2, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NVX = 3, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NVX = 4, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NVX = 5, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NVX = 6, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX = 7, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_BEGIN_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_END_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_RANGE_SIZE_NVX = (VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX - VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX + 1), + VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF +} VkIndirectCommandsTokenTypeNVX; + +typedef enum VkObjectEntryTypeNVX { + VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX = 0, + VK_OBJECT_ENTRY_TYPE_PIPELINE_NVX = 1, + VK_OBJECT_ENTRY_TYPE_INDEX_BUFFER_NVX = 2, + VK_OBJECT_ENTRY_TYPE_VERTEX_BUFFER_NVX = 3, + VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX = 4, + VK_OBJECT_ENTRY_TYPE_BEGIN_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX, + VK_OBJECT_ENTRY_TYPE_END_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX, + VK_OBJECT_ENTRY_TYPE_RANGE_SIZE_NVX = (VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX - VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX + 1), + VK_OBJECT_ENTRY_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF +} VkObjectEntryTypeNVX; + + +typedef enum VkIndirectCommandsLayoutUsageFlagBitsNVX { + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX = 0x00000001, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX = 0x00000002, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX = 0x00000004, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX = 0x00000008, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF +} VkIndirectCommandsLayoutUsageFlagBitsNVX; +typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNVX; + +typedef enum VkObjectEntryUsageFlagBitsNVX { + VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX = 0x00000001, + VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX = 0x00000002, + VK_OBJECT_ENTRY_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF +} VkObjectEntryUsageFlagBitsNVX; +typedef VkFlags VkObjectEntryUsageFlagsNVX; + +typedef struct VkDeviceGeneratedCommandsFeaturesNVX { + VkStructureType sType; + const void* pNext; + VkBool32 computeBindingPointSupport; +} VkDeviceGeneratedCommandsFeaturesNVX; + +typedef struct VkDeviceGeneratedCommandsLimitsNVX { + VkStructureType sType; + const void* pNext; + uint32_t maxIndirectCommandsLayoutTokenCount; + uint32_t maxObjectEntryCounts; + uint32_t minSequenceCountBufferOffsetAlignment; + uint32_t minSequenceIndexBufferOffsetAlignment; + uint32_t minCommandsTokenBufferOffsetAlignment; +} VkDeviceGeneratedCommandsLimitsNVX; + +typedef struct VkIndirectCommandsTokenNVX { + VkIndirectCommandsTokenTypeNVX tokenType; + VkBuffer buffer; + VkDeviceSize offset; +} VkIndirectCommandsTokenNVX; + +typedef struct VkIndirectCommandsLayoutTokenNVX { + VkIndirectCommandsTokenTypeNVX tokenType; + uint32_t bindingUnit; + uint32_t dynamicCount; + uint32_t divisor; +} VkIndirectCommandsLayoutTokenNVX; + +typedef struct VkIndirectCommandsLayoutCreateInfoNVX { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkIndirectCommandsLayoutUsageFlagsNVX flags; + uint32_t tokenCount; + const VkIndirectCommandsLayoutTokenNVX* pTokens; +} VkIndirectCommandsLayoutCreateInfoNVX; + +typedef struct VkCmdProcessCommandsInfoNVX { + VkStructureType sType; + const void* pNext; + VkObjectTableNVX objectTable; + VkIndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t indirectCommandsTokenCount; + const VkIndirectCommandsTokenNVX* pIndirectCommandsTokens; + uint32_t maxSequencesCount; + VkCommandBuffer targetCommandBuffer; + VkBuffer sequencesCountBuffer; + VkDeviceSize sequencesCountOffset; + VkBuffer sequencesIndexBuffer; + VkDeviceSize sequencesIndexOffset; +} VkCmdProcessCommandsInfoNVX; + +typedef struct VkCmdReserveSpaceForCommandsInfoNVX { + VkStructureType sType; + const void* pNext; + VkObjectTableNVX objectTable; + VkIndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t maxSequencesCount; +} VkCmdReserveSpaceForCommandsInfoNVX; + +typedef struct VkObjectTableCreateInfoNVX { + VkStructureType sType; + const void* pNext; + uint32_t objectCount; + const VkObjectEntryTypeNVX* pObjectEntryTypes; + const uint32_t* pObjectEntryCounts; + const VkObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags; + uint32_t maxUniformBuffersPerDescriptor; + uint32_t maxStorageBuffersPerDescriptor; + uint32_t maxStorageImagesPerDescriptor; + uint32_t maxSampledImagesPerDescriptor; + uint32_t maxPipelineLayouts; +} VkObjectTableCreateInfoNVX; + +typedef struct VkObjectTableEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; +} VkObjectTableEntryNVX; + +typedef struct VkObjectTablePipelineEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipeline pipeline; +} VkObjectTablePipelineEntryNVX; + +typedef struct VkObjectTableDescriptorSetEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipelineLayout pipelineLayout; + VkDescriptorSet descriptorSet; +} VkObjectTableDescriptorSetEntryNVX; + +typedef struct VkObjectTableVertexBufferEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkBuffer buffer; +} VkObjectTableVertexBufferEntryNVX; + +typedef struct VkObjectTableIndexBufferEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkBuffer buffer; + VkIndexType indexType; +} VkObjectTableIndexBufferEntryNVX; + +typedef struct VkObjectTablePushConstantEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipelineLayout pipelineLayout; + VkShaderStageFlags stageFlags; +} VkObjectTablePushConstantEntryNVX; + + +typedef void (VKAPI_PTR *PFN_vkCmdProcessCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdReserveSpaceForCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNVX)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNVX)(VkDevice device, VkIndirectCommandsLayoutNVX indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateObjectTableNVX)(VkDevice device, const VkObjectTableCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkObjectTableNVX* pObjectTable); +typedef void (VKAPI_PTR *PFN_vkDestroyObjectTableNVX)(VkDevice device, VkObjectTableNVX objectTable, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices); +typedef VkResult (VKAPI_PTR *PFN_vkUnregisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX)(VkPhysicalDevice physicalDevice, VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, VkDeviceGeneratedCommandsLimitsNVX* pLimits); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdProcessCommandsNVX( + VkCommandBuffer commandBuffer, + const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdReserveSpaceForCommandsNVX( + VkCommandBuffer commandBuffer, + const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNVX( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNVX( + VkDevice device, + VkIndirectCommandsLayoutNVX indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateObjectTableNVX( + VkDevice device, + const VkObjectTableCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkObjectTableNVX* pObjectTable); + +VKAPI_ATTR void VKAPI_CALL vkDestroyObjectTableNVX( + VkDevice device, + VkObjectTableNVX objectTable, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterObjectsNVX( + VkDevice device, + VkObjectTableNVX objectTable, + uint32_t objectCount, + const VkObjectTableEntryNVX* const* ppObjectTableEntries, + const uint32_t* pObjectIndices); + +VKAPI_ATTR VkResult VKAPI_CALL vkUnregisterObjectsNVX( + VkDevice device, + VkObjectTableNVX objectTable, + uint32_t objectCount, + const VkObjectEntryTypeNVX* pObjectEntryTypes, + const uint32_t* pObjectIndices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( + VkPhysicalDevice physicalDevice, + VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, + VkDeviceGeneratedCommandsLimitsNVX* pLimits); +#endif + +#define VK_NV_clip_space_w_scaling 1 +#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1 +#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling" + +typedef struct VkViewportWScalingNV { + float xcoeff; + float ycoeff; +} VkViewportWScalingNV; + +typedef struct VkPipelineViewportWScalingStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportWScalingEnable; + uint32_t viewportCount; + const VkViewportWScalingNV* pViewportWScalings; +} VkPipelineViewportWScalingStateCreateInfoNV; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportWScalingNV* pViewportWScalings); +#endif + +#define VK_EXT_direct_mode_display 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display" + +typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display); +#endif + +#define VK_EXT_display_surface_counter 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" + + +typedef enum VkSurfaceCounterFlagBitsEXT { + VK_SURFACE_COUNTER_VBLANK_EXT = 0x00000001, + VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSurfaceCounterFlagBitsEXT; +typedef VkFlags VkSurfaceCounterFlagsEXT; + +typedef struct VkSurfaceCapabilities2EXT { + VkStructureType sType; + void* pNext; + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; + VkSurfaceCounterFlagsEXT supportedSurfaceCounters; +} VkSurfaceCapabilities2EXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); +#endif + +#define VK_EXT_display_control 1 +#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control" + + +typedef enum VkDisplayPowerStateEXT { + VK_DISPLAY_POWER_STATE_OFF_EXT = 0, + VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1, + VK_DISPLAY_POWER_STATE_ON_EXT = 2, + VK_DISPLAY_POWER_STATE_BEGIN_RANGE_EXT = VK_DISPLAY_POWER_STATE_OFF_EXT, + VK_DISPLAY_POWER_STATE_END_RANGE_EXT = VK_DISPLAY_POWER_STATE_ON_EXT, + VK_DISPLAY_POWER_STATE_RANGE_SIZE_EXT = (VK_DISPLAY_POWER_STATE_ON_EXT - VK_DISPLAY_POWER_STATE_OFF_EXT + 1), + VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayPowerStateEXT; + +typedef enum VkDeviceEventTypeEXT { + VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0, + VK_DEVICE_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT, + VK_DEVICE_EVENT_TYPE_END_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT, + VK_DEVICE_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT - VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT + 1), + VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceEventTypeEXT; + +typedef enum VkDisplayEventTypeEXT { + VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0, + VK_DISPLAY_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT, + VK_DISPLAY_EVENT_TYPE_END_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT, + VK_DISPLAY_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT - VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT + 1), + VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayEventTypeEXT; + +typedef struct VkDisplayPowerInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayPowerStateEXT powerState; +} VkDisplayPowerInfoEXT; + +typedef struct VkDeviceEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceEventTypeEXT deviceEvent; +} VkDeviceEventInfoEXT; + +typedef struct VkDisplayEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayEventTypeEXT displayEvent; +} VkDisplayEventInfoEXT; + +typedef struct VkSwapchainCounterCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSurfaceCounterFlagsEXT surfaceCounters; +} VkSwapchainCounterCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayPowerInfoEXT* pDisplayPowerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT( + VkDevice device, + const VkDeviceEventInfoEXT* pDeviceEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayEventInfoEXT* pDisplayEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSurfaceCounterFlagBitsEXT counter, + uint64_t* pCounterValue); +#endif + +#define VK_GOOGLE_display_timing 1 +#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1 +#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing" + +typedef struct VkRefreshCycleDurationGOOGLE { + uint64_t refreshDuration; +} VkRefreshCycleDurationGOOGLE; + +typedef struct VkPastPresentationTimingGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; + uint64_t actualPresentTime; + uint64_t earliestPresentTime; + uint64_t presentMargin; +} VkPastPresentationTimingGOOGLE; + +typedef struct VkPresentTimeGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; +} VkPresentTimeGOOGLE; + +typedef struct VkPresentTimesInfoGOOGLE { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentTimeGOOGLE* pTimes; +} VkPresentTimesInfoGOOGLE; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pPresentationTimingCount, + VkPastPresentationTimingGOOGLE* pPresentationTimings); +#endif + +#define VK_NV_sample_mask_override_coverage 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage" + + +#define VK_NV_geometry_shader_passthrough 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough" + + +#define VK_NV_viewport_array2 1 +#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2" + + +#define VK_NVX_multiview_per_view_attributes 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes" + +typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + VkStructureType sType; + void* pNext; + VkBool32 perViewPositionAllComponents; +} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + + + +#define VK_NV_viewport_swizzle 1 +#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle" + + +typedef enum VkViewportCoordinateSwizzleNV { + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7, + VK_VIEWPORT_COORDINATE_SWIZZLE_BEGIN_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, + VK_VIEWPORT_COORDINATE_SWIZZLE_END_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV, + VK_VIEWPORT_COORDINATE_SWIZZLE_RANGE_SIZE_NV = (VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV - VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV + 1), + VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF +} VkViewportCoordinateSwizzleNV; + +typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; + +typedef struct VkViewportSwizzleNV { + VkViewportCoordinateSwizzleNV x; + VkViewportCoordinateSwizzleNV y; + VkViewportCoordinateSwizzleNV z; + VkViewportCoordinateSwizzleNV w; +} VkViewportSwizzleNV; + +typedef struct VkPipelineViewportSwizzleStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineViewportSwizzleStateCreateFlagsNV flags; + uint32_t viewportCount; + const VkViewportSwizzleNV* pViewportSwizzles; +} VkPipelineViewportSwizzleStateCreateInfoNV; + + + +#define VK_EXT_discard_rectangles 1 +#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1 +#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles" + + +typedef enum VkDiscardRectangleModeEXT { + VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0, + VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1, + VK_DISCARD_RECTANGLE_MODE_BEGIN_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT, + VK_DISCARD_RECTANGLE_MODE_END_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT, + VK_DISCARD_RECTANGLE_MODE_RANGE_SIZE_EXT = (VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT - VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT + 1), + VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDiscardRectangleModeEXT; + +typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; + +typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxDiscardRectangles; +} VkPhysicalDeviceDiscardRectanglePropertiesEXT; + +typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineDiscardRectangleStateCreateFlagsEXT flags; + VkDiscardRectangleModeEXT discardRectangleMode; + uint32_t discardRectangleCount; + const VkRect2D* pDiscardRectangles; +} VkPipelineDiscardRectangleStateCreateInfoEXT; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT( + VkCommandBuffer commandBuffer, + uint32_t firstDiscardRectangle, + uint32_t discardRectangleCount, + const VkRect2D* pDiscardRectangles); +#endif + +#define VK_EXT_conservative_rasterization 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME "VK_EXT_conservative_rasterization" + + +typedef enum VkConservativeRasterizationModeEXT { + VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0, + VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1, + VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2, + VK_CONSERVATIVE_RASTERIZATION_MODE_BEGIN_RANGE_EXT = VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT, + VK_CONSERVATIVE_RASTERIZATION_MODE_END_RANGE_EXT = VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT, + VK_CONSERVATIVE_RASTERIZATION_MODE_RANGE_SIZE_EXT = (VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT - VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT + 1), + VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConservativeRasterizationModeEXT; + +typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT; + +typedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + float primitiveOverestimationSize; + float maxExtraPrimitiveOverestimationSize; + float extraPrimitiveOverestimationSizeGranularity; + VkBool32 primitiveUnderestimation; + VkBool32 conservativePointAndLineRasterization; + VkBool32 degenerateTrianglesRasterized; + VkBool32 degenerateLinesRasterized; + VkBool32 fullyCoveredFragmentShaderInputVariable; + VkBool32 conservativeRasterizationPostDepthCoverage; +} VkPhysicalDeviceConservativeRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationConservativeStateCreateFlagsEXT flags; + VkConservativeRasterizationModeEXT conservativeRasterizationMode; + float extraPrimitiveOverestimationSize; +} VkPipelineRasterizationConservativeStateCreateInfoEXT; + + + +#define VK_EXT_swapchain_colorspace 1 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 3 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" + + +#define VK_EXT_hdr_metadata 1 +#define VK_EXT_HDR_METADATA_SPEC_VERSION 1 +#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" + +typedef struct VkXYColorEXT { + float x; + float y; +} VkXYColorEXT; + +typedef struct VkHdrMetadataEXT { + VkStructureType sType; + const void* pNext; + VkXYColorEXT displayPrimaryRed; + VkXYColorEXT displayPrimaryGreen; + VkXYColorEXT displayPrimaryBlue; + VkXYColorEXT whitePoint; + float maxLuminance; + float minLuminance; + float maxContentLightLevel; + float maxFrameAverageLightLevel; +} VkHdrMetadataEXT; + + +typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainKHR* pSwapchains, + const VkHdrMetadataEXT* pMetadata); +#endif + +#define VK_EXT_external_memory_dma_buf 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME "VK_EXT_external_memory_dma_buf" + + +#define VK_EXT_queue_family_foreign 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME "VK_EXT_queue_family_foreign" +#define VK_QUEUE_FAMILY_FOREIGN_EXT (~0U-2) + + +#define VK_EXT_debug_utils 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT) + +#define VK_EXT_DEBUG_UTILS_SPEC_VERSION 1 +#define VK_EXT_DEBUG_UTILS_EXTENSION_NAME "VK_EXT_debug_utils" + +typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT; +typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT; + +typedef enum VkDebugUtilsMessageSeverityFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageSeverityFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT; + +typedef enum VkDebugUtilsMessageTypeFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002, + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004, + VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageTypeFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT; + +typedef struct VkDebugUtilsObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + const char* pObjectName; +} VkDebugUtilsObjectNameInfoEXT; + +typedef struct VkDebugUtilsObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugUtilsObjectTagInfoEXT; + +typedef struct VkDebugUtilsLabelEXT { + VkStructureType sType; + const void* pNext; + const char* pLabelName; + float color[4]; +} VkDebugUtilsLabelEXT; + +typedef struct VkDebugUtilsMessengerCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCallbackDataFlagsEXT flags; + const char* pMessageIdName; + int32_t messageIdNumber; + const char* pMessage; + uint32_t queueLabelCount; + VkDebugUtilsLabelEXT* pQueueLabels; + uint32_t cmdBufLabelCount; + VkDebugUtilsLabelEXT* pCmdBufLabels; + uint32_t objectCount; + VkDebugUtilsObjectNameInfoEXT* pObjects; +} VkDebugUtilsMessengerCallbackDataEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageType, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDebugUtilsMessengerCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCreateFlagsEXT flags; + VkDebugUtilsMessageSeverityFlagsEXT messageSeverity; + VkDebugUtilsMessageTypeFlagsEXT messageType; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; + void* pUserData; +} VkDebugUtilsMessengerCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo); +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo); +typedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue); +typedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT( + VkDevice device, + const VkDebugUtilsObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT( + VkDevice device, + const VkDebugUtilsObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT( + VkQueue queue); + +VKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT( + VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugUtilsMessengerEXT* pMessenger); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT( + VkInstance instance, + VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT( + VkInstance instance, + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); +#endif + +#define VK_EXT_sampler_filter_minmax 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax" + + +typedef enum VkSamplerReductionModeEXT { + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = 0, + VK_SAMPLER_REDUCTION_MODE_MIN_EXT = 1, + VK_SAMPLER_REDUCTION_MODE_MAX_EXT = 2, + VK_SAMPLER_REDUCTION_MODE_BEGIN_RANGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT, + VK_SAMPLER_REDUCTION_MODE_END_RANGE_EXT = VK_SAMPLER_REDUCTION_MODE_MAX_EXT, + VK_SAMPLER_REDUCTION_MODE_RANGE_SIZE_EXT = (VK_SAMPLER_REDUCTION_MODE_MAX_EXT - VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT + 1), + VK_SAMPLER_REDUCTION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSamplerReductionModeEXT; + +typedef struct VkSamplerReductionModeCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSamplerReductionModeEXT reductionMode; +} VkSamplerReductionModeCreateInfoEXT; + +typedef struct VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; +} VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + + + +#define VK_AMD_gpu_shader_int16 1 +#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 1 +#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16" + + +#define VK_AMD_mixed_attachment_samples 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples" + + +#define VK_AMD_shader_fragment_mask 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask" + + +#define VK_EXT_inline_uniform_block 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME "VK_EXT_inline_uniform_block" + +typedef struct VkPhysicalDeviceInlineUniformBlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 inlineUniformBlock; + VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind; +} VkPhysicalDeviceInlineUniformBlockFeaturesEXT; + +typedef struct VkPhysicalDeviceInlineUniformBlockPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxInlineUniformBlockSize; + uint32_t maxPerStageDescriptorInlineUniformBlocks; + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; + uint32_t maxDescriptorSetInlineUniformBlocks; + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; +} VkPhysicalDeviceInlineUniformBlockPropertiesEXT; + +typedef struct VkWriteDescriptorSetInlineUniformBlockEXT { + VkStructureType sType; + const void* pNext; + uint32_t dataSize; + const void* pData; +} VkWriteDescriptorSetInlineUniformBlockEXT; + +typedef struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t maxInlineUniformBlockBindings; +} VkDescriptorPoolInlineUniformBlockCreateInfoEXT; + + + +#define VK_EXT_shader_stencil_export 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export" + + +#define VK_EXT_sample_locations 1 +#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1 +#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations" + +typedef struct VkSampleLocationEXT { + float x; + float y; +} VkSampleLocationEXT; + +typedef struct VkSampleLocationsInfoEXT { + VkStructureType sType; + const void* pNext; + VkSampleCountFlagBits sampleLocationsPerPixel; + VkExtent2D sampleLocationGridSize; + uint32_t sampleLocationsCount; + const VkSampleLocationEXT* pSampleLocations; +} VkSampleLocationsInfoEXT; + +typedef struct VkAttachmentSampleLocationsEXT { + uint32_t attachmentIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkAttachmentSampleLocationsEXT; + +typedef struct VkSubpassSampleLocationsEXT { + uint32_t subpassIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkSubpassSampleLocationsEXT; + +typedef struct VkRenderPassSampleLocationsBeginInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentInitialSampleLocationsCount; + const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; + uint32_t postSubpassSampleLocationsCount; + const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations; +} VkRenderPassSampleLocationsBeginInfoEXT; + +typedef struct VkPipelineSampleLocationsStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 sampleLocationsEnable; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkPipelineSampleLocationsStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleLocationSampleCounts; + VkExtent2D maxSampleLocationGridSize; + float sampleLocationCoordinateRange[2]; + uint32_t sampleLocationSubPixelBits; + VkBool32 variableSampleLocations; +} VkPhysicalDeviceSampleLocationsPropertiesEXT; + +typedef struct VkMultisamplePropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D maxSampleLocationGridSize; +} VkMultisamplePropertiesEXT; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties); +#endif + +#define VK_EXT_blend_operation_advanced 1 +#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2 +#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced" + + +typedef enum VkBlendOverlapEXT { + VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0, + VK_BLEND_OVERLAP_DISJOINT_EXT = 1, + VK_BLEND_OVERLAP_CONJOINT_EXT = 2, + VK_BLEND_OVERLAP_BEGIN_RANGE_EXT = VK_BLEND_OVERLAP_UNCORRELATED_EXT, + VK_BLEND_OVERLAP_END_RANGE_EXT = VK_BLEND_OVERLAP_CONJOINT_EXT, + VK_BLEND_OVERLAP_RANGE_SIZE_EXT = (VK_BLEND_OVERLAP_CONJOINT_EXT - VK_BLEND_OVERLAP_UNCORRELATED_EXT + 1), + VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBlendOverlapEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 advancedBlendCoherentOperations; +} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t advancedBlendMaxColorAttachments; + VkBool32 advancedBlendIndependentBlend; + VkBool32 advancedBlendNonPremultipliedSrcColor; + VkBool32 advancedBlendNonPremultipliedDstColor; + VkBool32 advancedBlendCorrelatedOverlap; + VkBool32 advancedBlendAllOperations; +} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT; + +typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 srcPremultiplied; + VkBool32 dstPremultiplied; + VkBlendOverlapEXT blendOverlap; +} VkPipelineColorBlendAdvancedStateCreateInfoEXT; + + + +#define VK_NV_fragment_coverage_to_color 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color" + +typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; + +typedef struct VkPipelineCoverageToColorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageToColorStateCreateFlagsNV flags; + VkBool32 coverageToColorEnable; + uint32_t coverageToColorLocation; +} VkPipelineCoverageToColorStateCreateInfoNV; + + + +#define VK_NV_framebuffer_mixed_samples 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples" + + +typedef enum VkCoverageModulationModeNV { + VK_COVERAGE_MODULATION_MODE_NONE_NV = 0, + VK_COVERAGE_MODULATION_MODE_RGB_NV = 1, + VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2, + VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3, + VK_COVERAGE_MODULATION_MODE_BEGIN_RANGE_NV = VK_COVERAGE_MODULATION_MODE_NONE_NV, + VK_COVERAGE_MODULATION_MODE_END_RANGE_NV = VK_COVERAGE_MODULATION_MODE_RGBA_NV, + VK_COVERAGE_MODULATION_MODE_RANGE_SIZE_NV = (VK_COVERAGE_MODULATION_MODE_RGBA_NV - VK_COVERAGE_MODULATION_MODE_NONE_NV + 1), + VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageModulationModeNV; + +typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; + +typedef struct VkPipelineCoverageModulationStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageModulationStateCreateFlagsNV flags; + VkCoverageModulationModeNV coverageModulationMode; + VkBool32 coverageModulationTableEnable; + uint32_t coverageModulationTableCount; + const float* pCoverageModulationTable; +} VkPipelineCoverageModulationStateCreateInfoNV; + + + +#define VK_NV_fill_rectangle 1 +#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1 +#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle" + + +#define VK_EXT_post_depth_coverage 1 +#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1 +#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage" + + +#define VK_EXT_validation_cache 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) + +#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache" + + +typedef enum VkValidationCacheHeaderVersionEXT { + VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1, + VK_VALIDATION_CACHE_HEADER_VERSION_BEGIN_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT, + VK_VALIDATION_CACHE_HEADER_VERSION_END_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT, + VK_VALIDATION_CACHE_HEADER_VERSION_RANGE_SIZE_EXT = (VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT - VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT + 1), + VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCacheHeaderVersionEXT; + +typedef VkFlags VkValidationCacheCreateFlagsEXT; + +typedef struct VkValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheCreateFlagsEXT flags; + size_t initialDataSize; + const void* pInitialData; +} VkValidationCacheCreateInfoEXT; + +typedef struct VkShaderModuleValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheEXT validationCache; +} VkShaderModuleValidationCacheCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); +typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT( + VkDevice device, + const VkValidationCacheCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkValidationCacheEXT* pValidationCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT( + VkDevice device, + VkValidationCacheEXT dstCache, + uint32_t srcCacheCount, + const VkValidationCacheEXT* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + size_t* pDataSize, + void* pData); +#endif + +#define VK_EXT_descriptor_indexing 1 +#define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2 +#define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME "VK_EXT_descriptor_indexing" + + +typedef enum VkDescriptorBindingFlagBitsEXT { + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = 0x00000001, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = 0x00000002, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = 0x00000004, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = 0x00000008, + VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDescriptorBindingFlagBitsEXT; +typedef VkFlags VkDescriptorBindingFlagsEXT; + +typedef struct VkDescriptorSetLayoutBindingFlagsCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t bindingCount; + const VkDescriptorBindingFlagsEXT* pBindingFlags; +} VkDescriptorSetLayoutBindingFlagsCreateInfoEXT; + +typedef struct VkPhysicalDeviceDescriptorIndexingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; +} VkPhysicalDeviceDescriptorIndexingFeaturesEXT; + +typedef struct VkPhysicalDeviceDescriptorIndexingPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; +} VkPhysicalDeviceDescriptorIndexingPropertiesEXT; + +typedef struct VkDescriptorSetVariableDescriptorCountAllocateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t descriptorSetCount; + const uint32_t* pDescriptorCounts; +} VkDescriptorSetVariableDescriptorCountAllocateInfoEXT; + +typedef struct VkDescriptorSetVariableDescriptorCountLayoutSupportEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVariableDescriptorCount; +} VkDescriptorSetVariableDescriptorCountLayoutSupportEXT; + + + +#define VK_EXT_shader_viewport_index_layer 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer" + + + +#define VK_NV_ray_tracing 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV) + +#define VK_NV_RAY_TRACING_SPEC_VERSION 2 +#define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing" +#define VK_SHADER_UNUSED_NV (~0U) + + +typedef enum VkRayTracingShaderGroupTypeNV { + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = 0, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = 1, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = 2, + VK_RAY_TRACING_SHADER_GROUP_TYPE_BEGIN_RANGE_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV, + VK_RAY_TRACING_SHADER_GROUP_TYPE_END_RANGE_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV, + VK_RAY_TRACING_SHADER_GROUP_TYPE_RANGE_SIZE_NV = (VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV - VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV + 1), + VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkRayTracingShaderGroupTypeNV; + +typedef enum VkGeometryTypeNV { + VK_GEOMETRY_TYPE_TRIANGLES_NV = 0, + VK_GEOMETRY_TYPE_AABBS_NV = 1, + VK_GEOMETRY_TYPE_BEGIN_RANGE_NV = VK_GEOMETRY_TYPE_TRIANGLES_NV, + VK_GEOMETRY_TYPE_END_RANGE_NV = VK_GEOMETRY_TYPE_AABBS_NV, + VK_GEOMETRY_TYPE_RANGE_SIZE_NV = (VK_GEOMETRY_TYPE_AABBS_NV - VK_GEOMETRY_TYPE_TRIANGLES_NV + 1), + VK_GEOMETRY_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkGeometryTypeNV; + +typedef enum VkAccelerationStructureTypeNV { + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = 0, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = 1, + VK_ACCELERATION_STRUCTURE_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV, + VK_ACCELERATION_STRUCTURE_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV, + VK_ACCELERATION_STRUCTURE_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV + 1), + VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureTypeNV; + +typedef enum VkCopyAccelerationStructureModeNV { + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = 0, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = 1, + VK_COPY_ACCELERATION_STRUCTURE_MODE_BEGIN_RANGE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV, + VK_COPY_ACCELERATION_STRUCTURE_MODE_END_RANGE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV, + VK_COPY_ACCELERATION_STRUCTURE_MODE_RANGE_SIZE_NV = (VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV - VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV + 1), + VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCopyAccelerationStructureModeNV; + +typedef enum VkAccelerationStructureMemoryRequirementsTypeNV { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV + 1), + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureMemoryRequirementsTypeNV; + + +typedef enum VkGeometryFlagBitsNV { + VK_GEOMETRY_OPAQUE_BIT_NV = 0x00000001, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = 0x00000002, + VK_GEOMETRY_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkGeometryFlagBitsNV; +typedef VkFlags VkGeometryFlagsNV; + +typedef enum VkGeometryInstanceFlagBitsNV { + VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = 0x00000001, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = 0x00000002, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = 0x00000004, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = 0x00000008, + VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkGeometryInstanceFlagBitsNV; +typedef VkFlags VkGeometryInstanceFlagsNV; + +typedef enum VkBuildAccelerationStructureFlagBitsNV { + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = 0x00000001, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = 0x00000002, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = 0x00000004, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = 0x00000008, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = 0x00000010, + VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkBuildAccelerationStructureFlagBitsNV; +typedef VkFlags VkBuildAccelerationStructureFlagsNV; + +typedef struct VkRayTracingShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeNV type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; +} VkRayTracingShaderGroupCreateInfoNV; + +typedef struct VkRayTracingPipelineCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoNV* pGroups; + uint32_t maxRecursionDepth; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoNV; + +typedef struct VkGeometryTrianglesNV { + VkStructureType sType; + const void* pNext; + VkBuffer vertexData; + VkDeviceSize vertexOffset; + uint32_t vertexCount; + VkDeviceSize vertexStride; + VkFormat vertexFormat; + VkBuffer indexData; + VkDeviceSize indexOffset; + uint32_t indexCount; + VkIndexType indexType; + VkBuffer transformData; + VkDeviceSize transformOffset; +} VkGeometryTrianglesNV; + +typedef struct VkGeometryAABBNV { + VkStructureType sType; + const void* pNext; + VkBuffer aabbData; + uint32_t numAABBs; + uint32_t stride; + VkDeviceSize offset; +} VkGeometryAABBNV; + +typedef struct VkGeometryDataNV { + VkGeometryTrianglesNV triangles; + VkGeometryAABBNV aabbs; +} VkGeometryDataNV; + +typedef struct VkGeometryNV { + VkStructureType sType; + const void* pNext; + VkGeometryTypeNV geometryType; + VkGeometryDataNV geometry; + VkGeometryFlagsNV flags; +} VkGeometryNV; + +typedef struct VkAccelerationStructureInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeNV type; + VkBuildAccelerationStructureFlagsNV flags; + uint32_t instanceCount; + uint32_t geometryCount; + const VkGeometryNV* pGeometries; +} VkAccelerationStructureInfoNV; + +typedef struct VkAccelerationStructureCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceSize compactedSize; + VkAccelerationStructureInfoNV info; +} VkAccelerationStructureCreateInfoNV; + +typedef struct VkBindAccelerationStructureMemoryInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureNV accelerationStructure; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindAccelerationStructureMemoryInfoNV; + +typedef struct VkWriteDescriptorSetAccelerationStructureNV { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureNV* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureNV; + +typedef struct VkAccelerationStructureMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureMemoryRequirementsTypeNV type; + VkAccelerationStructureNV accelerationStructure; +} VkAccelerationStructureMemoryRequirementsInfoNV; + +typedef struct VkPhysicalDeviceRayTracingPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxTriangleCount; + uint32_t maxDescriptorSetAccelerationStructures; +} VkPhysicalDeviceRayTracingPropertiesNV; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeNV mode); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV( + VkDevice device, + const VkAccelerationStructureCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureNV* pAccelerationStructure); + +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( + VkDevice device, + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2KHR* pMemoryRequirements); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV( + VkDevice device, + uint32_t bindInfoCount, + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( + VkCommandBuffer commandBuffer, + const VkAccelerationStructureInfoNV* pInfo, + VkBuffer instanceData, + VkDeviceSize instanceOffset, + VkBool32 update, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkBuffer scratch, + VkDeviceSize scratchOffset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV( + VkCommandBuffer commandBuffer, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkCopyAccelerationStructureModeNV mode); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV( + VkCommandBuffer commandBuffer, + VkBuffer raygenShaderBindingTableBuffer, + VkDeviceSize raygenShaderBindingOffset, + VkBuffer missShaderBindingTableBuffer, + VkDeviceSize missShaderBindingOffset, + VkDeviceSize missShaderBindingStride, + VkBuffer hitShaderBindingTableBuffer, + VkDeviceSize hitShaderBindingOffset, + VkDeviceSize hitShaderBindingStride, + VkBuffer callableShaderBindingTableBuffer, + VkDeviceSize callableShaderBindingOffset, + VkDeviceSize callableShaderBindingStride, + uint32_t width, + uint32_t height, + uint32_t depth); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoNV* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + size_t dataSize, + void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureNV* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); + +VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV( + VkDevice device, + VkPipeline pipeline, + uint32_t shader); +#endif + + +#define VK_EXT_global_priority 1 +#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2 +#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority" + + +typedef enum VkQueueGlobalPriorityEXT { + VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = 128, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = 256, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = 512, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = 1024, + VK_QUEUE_GLOBAL_PRIORITY_BEGIN_RANGE_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT, + VK_QUEUE_GLOBAL_PRIORITY_END_RANGE_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT, + VK_QUEUE_GLOBAL_PRIORITY_RANGE_SIZE_EXT = (VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT - VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT + 1), + VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_EXT = 0x7FFFFFFF +} VkQueueGlobalPriorityEXT; + +typedef struct VkDeviceQueueGlobalPriorityCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkQueueGlobalPriorityEXT globalPriority; +} VkDeviceQueueGlobalPriorityCreateInfoEXT; + + + +#define VK_EXT_external_memory_host 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME "VK_EXT_external_memory_host" + +typedef struct VkImportMemoryHostPointerInfoEXT { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + void* pHostPointer; +} VkImportMemoryHostPointerInfoEXT; + +typedef struct VkMemoryHostPointerPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryHostPointerPropertiesEXT; + +typedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize minImportedHostPointerAlignment; +} VkPhysicalDeviceExternalMemoryHostPropertiesEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + const void* pHostPointer, + VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); +#endif + +#define VK_AMD_buffer_marker 1 +#define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1 +#define VK_AMD_BUFFER_MARKER_EXTENSION_NAME "VK_AMD_buffer_marker" + +typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker); +#endif + +#define VK_AMD_shader_core_properties 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties" + +typedef struct VkPhysicalDeviceShaderCorePropertiesAMD { + VkStructureType sType; + void* pNext; + uint32_t shaderEngineCount; + uint32_t shaderArraysPerEngineCount; + uint32_t computeUnitsPerShaderArray; + uint32_t simdPerComputeUnit; + uint32_t wavefrontsPerSimd; + uint32_t wavefrontSize; + uint32_t sgprsPerSimd; + uint32_t minSgprAllocation; + uint32_t maxSgprAllocation; + uint32_t sgprAllocationGranularity; + uint32_t vgprsPerSimd; + uint32_t minVgprAllocation; + uint32_t maxVgprAllocation; + uint32_t vgprAllocationGranularity; +} VkPhysicalDeviceShaderCorePropertiesAMD; + + + +#define VK_EXT_vertex_attribute_divisor 1 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" + +typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVertexAttribDivisor; +} VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT; + +typedef struct VkVertexInputBindingDivisorDescriptionEXT { + uint32_t binding; + uint32_t divisor; +} VkVertexInputBindingDivisorDescriptionEXT; + +typedef struct VkPipelineVertexInputDivisorStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t vertexBindingDivisorCount; + const VkVertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors; +} VkPipelineVertexInputDivisorStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 vertexAttributeInstanceRateDivisor; + VkBool32 vertexAttributeInstanceRateZeroDivisor; +} VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT; + + + +#define VK_NV_shader_subgroup_partitioned 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_NV_shader_subgroup_partitioned" + + +#define VK_NV_device_diagnostic_checkpoints 1 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION 2 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME "VK_NV_device_diagnostic_checkpoints" + +typedef struct VkQueueFamilyCheckpointPropertiesNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags checkpointExecutionStageMask; +} VkQueueFamilyCheckpointPropertiesNV; + +typedef struct VkCheckpointDataNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlagBits stage; + void* pCheckpointMarker; +} VkCheckpointDataNV; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetCheckpointNV)(VkCommandBuffer commandBuffer, const void* pCheckpointMarker); +typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointDataNV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCheckpointNV( + VkCommandBuffer commandBuffer, + const void* pCheckpointMarker); + +VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointDataNV( + VkQueue queue, + uint32_t* pCheckpointDataCount, + VkCheckpointDataNV* pCheckpointData); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_ios.h b/src/refresh/vkpt/include/vulkan/vulkan_ios.h new file mode 100644 index 0000000..68aded8 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_ios.h @@ -0,0 +1,58 @@ +#ifndef VULKAN_IOS_H_ +#define VULKAN_IOS_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_MVK_ios_surface 1 +#define VK_MVK_IOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface" + +typedef VkFlags VkIOSSurfaceCreateFlagsMVK; + +typedef struct VkIOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkIOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkIOSSurfaceCreateInfoMVK; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK( + VkInstance instance, + const VkIOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_macos.h b/src/refresh/vkpt/include/vulkan/vulkan_macos.h new file mode 100644 index 0000000..3254f5d --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_macos.h @@ -0,0 +1,58 @@ +#ifndef VULKAN_MACOS_H_ +#define VULKAN_MACOS_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_MVK_macos_surface 1 +#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface" + +typedef VkFlags VkMacOSSurfaceCreateFlagsMVK; + +typedef struct VkMacOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkMacOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkMacOSSurfaceCreateInfoMVK; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK( + VkInstance instance, + const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_mir.h b/src/refresh/vkpt/include/vulkan/vulkan_mir.h new file mode 100644 index 0000000..19bc0a9 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_mir.h @@ -0,0 +1,65 @@ +#ifndef VULKAN_MIR_H_ +#define VULKAN_MIR_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_mir_surface 1 +#define VK_KHR_MIR_SURFACE_SPEC_VERSION 4 +#define VK_KHR_MIR_SURFACE_EXTENSION_NAME "VK_KHR_mir_surface" + +typedef VkFlags VkMirSurfaceCreateFlagsKHR; + +typedef struct VkMirSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkMirSurfaceCreateFlagsKHR flags; + MirConnection* connection; + MirSurface* mirSurface; +} VkMirSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMirSurfaceKHR)(VkInstance instance, const VkMirSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, MirConnection* connection); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR( + VkInstance instance, + const VkMirSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + MirConnection* connection); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_vi.h b/src/refresh/vkpt/include/vulkan/vulkan_vi.h new file mode 100644 index 0000000..58527f5 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_vi.h @@ -0,0 +1,58 @@ +#ifndef VULKAN_VI_H_ +#define VULKAN_VI_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_NN_vi_surface 1 +#define VK_NN_VI_SURFACE_SPEC_VERSION 1 +#define VK_NN_VI_SURFACE_EXTENSION_NAME "VK_NN_vi_surface" + +typedef VkFlags VkViSurfaceCreateFlagsNN; + +typedef struct VkViSurfaceCreateInfoNN { + VkStructureType sType; + const void* pNext; + VkViSurfaceCreateFlagsNN flags; + void* window; +} VkViSurfaceCreateInfoNN; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateViSurfaceNN)(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN( + VkInstance instance, + const VkViSurfaceCreateInfoNN* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_wayland.h b/src/refresh/vkpt/include/vulkan/vulkan_wayland.h new file mode 100644 index 0000000..5fdefc2 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_wayland.h @@ -0,0 +1,65 @@ +#ifndef VULKAN_WAYLAND_H_ +#define VULKAN_WAYLAND_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_wayland_surface 1 +#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface" + +typedef VkFlags VkWaylandSurfaceCreateFlagsKHR; + +typedef struct VkWaylandSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWaylandSurfaceCreateFlagsKHR flags; + struct wl_display* display; + struct wl_surface* surface; +} VkWaylandSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR( + VkInstance instance, + const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + struct wl_display* display); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_win32.h b/src/refresh/vkpt/include/vulkan/vulkan_win32.h new file mode 100644 index 0000000..b0cb3a3 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_win32.h @@ -0,0 +1,276 @@ +#ifndef VULKAN_WIN32_H_ +#define VULKAN_WIN32_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_win32_surface 1 +#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface" + +typedef VkFlags VkWin32SurfaceCreateFlagsKHR; + +typedef struct VkWin32SurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWin32SurfaceCreateFlagsKHR flags; + HINSTANCE hinstance; + HWND hwnd; +} VkWin32SurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR( + VkInstance instance, + const VkWin32SurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex); +#endif + +#define VK_KHR_external_memory_win32 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32" + +typedef struct VkImportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportMemoryWin32HandleInfoKHR; + +typedef struct VkExportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportMemoryWin32HandleInfoKHR; + +typedef struct VkMemoryWin32HandlePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryWin32HandlePropertiesKHR; + +typedef struct VkMemoryGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetWin32HandleInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR( + VkDevice device, + const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + HANDLE handle, + VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); +#endif + +#define VK_KHR_win32_keyed_mutex 1 +#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex" + +typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeouts; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoKHR; + + + +#define VK_KHR_external_semaphore_win32 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32" + +typedef struct VkImportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportSemaphoreWin32HandleInfoKHR; + +typedef struct VkExportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportSemaphoreWin32HandleInfoKHR; + +typedef struct VkD3D12FenceSubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValuesCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValuesCount; + const uint64_t* pSignalSemaphoreValues; +} VkD3D12FenceSubmitInfoKHR; + +typedef struct VkSemaphoreGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetWin32HandleInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR( + VkDevice device, + const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR( + VkDevice device, + const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + +#define VK_KHR_external_fence_win32 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32" + +typedef struct VkImportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportFenceWin32HandleInfoKHR; + +typedef struct VkExportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportFenceWin32HandleInfoKHR; + +typedef struct VkFenceGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetWin32HandleInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR( + VkDevice device, + const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR( + VkDevice device, + const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + +#define VK_NV_external_memory_win32 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32" + +typedef struct VkImportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleType; + HANDLE handle; +} VkImportMemoryWin32HandleInfoNV; + +typedef struct VkExportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; +} VkExportMemoryWin32HandleInfoNV; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV( + VkDevice device, + VkDeviceMemory memory, + VkExternalMemoryHandleTypeFlagsNV handleType, + HANDLE* pHandle); +#endif + +#define VK_NV_win32_keyed_mutex 1 +#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex" + +typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeoutMilliseconds; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoNV; + + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_xcb.h b/src/refresh/vkpt/include/vulkan/vulkan_xcb.h new file mode 100644 index 0000000..4454882 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_xcb.h @@ -0,0 +1,66 @@ +#ifndef VULKAN_XCB_H_ +#define VULKAN_XCB_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_xcb_surface 1 +#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface" + +typedef VkFlags VkXcbSurfaceCreateFlagsKHR; + +typedef struct VkXcbSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXcbSurfaceCreateFlagsKHR flags; + xcb_connection_t* connection; + xcb_window_t window; +} VkXcbSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR( + VkInstance instance, + const VkXcbSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t* connection, + xcb_visualid_t visual_id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_xlib.h b/src/refresh/vkpt/include/vulkan/vulkan_xlib.h new file mode 100644 index 0000000..08669cb --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_xlib.h @@ -0,0 +1,66 @@ +#ifndef VULKAN_XLIB_H_ +#define VULKAN_XLIB_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_xlib_surface 1 +#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface" + +typedef VkFlags VkXlibSurfaceCreateFlagsKHR; + +typedef struct VkXlibSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXlibSurfaceCreateFlagsKHR flags; + Display* dpy; + Window window; +} VkXlibSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR( + VkInstance instance, + const VkXlibSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + Display* dpy, + VisualID visualID); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/include/vulkan/vulkan_xlib_xrandr.h b/src/refresh/vkpt/include/vulkan/vulkan_xlib_xrandr.h new file mode 100644 index 0000000..9df53e5 --- /dev/null +++ b/src/refresh/vkpt/include/vulkan/vulkan_xlib_xrandr.h @@ -0,0 +1,54 @@ +#ifndef VULKAN_XLIB_XRANDR_H_ +#define VULKAN_XLIB_XRANDR_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_EXT_acquire_xlib_display 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_xlib_display" + +typedef VkResult (VKAPI_PTR *PFN_vkAcquireXlibDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display); +typedef VkResult (VKAPI_PTR *PFN_vkGetRandROutputDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireXlibDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRandROutputDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + RROutput rrOutput, + VkDisplayKHR* pDisplay); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/refresh/vkpt/light_hierarchy.c b/src/refresh/vkpt/light_hierarchy.c new file mode 100644 index 0000000..4bcd0f6 --- /dev/null +++ b/src/refresh/vkpt/light_hierarchy.c @@ -0,0 +1,823 @@ +/* +Copyright (C) 2018 Addis Dittebrandt +Copyright (C) 2018 Christoph Schied +Copyright (C) 2018 Florian Simon + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "vkpt.h" +#include "shader/light_hierarchy.h" +#include "shader/global_textures.h" + +#include +#include +#include + +//#include + +#define FMINMAXF(A, MIN, MAX) fminf(fmaxf(A, MIN), MAX) +#define BDOT(A, B) FMINMAXF(DotProduct(A, B), -1.0, 1.0) + +static VkDescriptorPool desc_pool_light_hierarchy; +static BufferResource_t buf_light_hierarchy[MAX_SWAPCHAIN_IMAGES]; +static BufferResource_t buf_light_hierarchy_staging[MAX_SWAPCHAIN_IMAGES]; + +typedef struct lh_bin_s { + int num_prims; + float c_aabb[6]; + float energy; + float aabb[6]; + lh_cone_t cone; +} lh_bin_t; + +typedef struct lh_prim_s { + int index; + float c[3]; + float energy; + float aabb[6]; + lh_cone_t cone; +} lh_prim_t; + +static inline float +tofloat(uint32_t i) +{ + union { uint32_t i; float f; } u = {.i=i}; + return u.f; +} + +static inline uint32_t +touint(float f) +{ + union { float f; uint32_t i; } u = {.f=f}; + return u.i; +} +// 32-bit normal encoding from Journal of Computer Graphics Techniques Vol. 3, No. 2, 2014 +// A Survey of Efficient Representations for Independent Unit Vectors +// almost like oct30, but our error is = 0.00077204 avg = 0.00010846 compared to oct32P 0.00246 0.00122 +// i'm thinking maybe because we use a fixed point quantization (only multiples of 2 are mul/divd here) +// this also enables us to precisely encode (1 0 0) vectors. +static inline uint32_t +encode_normal(const float vec[3]) +{ + uint32_t projected0, projected1; + const float invL1Norm = 1.0f / (fabsf(vec[0]) + fabsf(vec[1]) + fabsf(vec[2])); + + // first find floating point values of octahedral map in [-1,1]: + float enc0, enc1; + if (vec[2] < 0.0f) + { + enc0 = (1.0f - fabsf(vec[1] * invL1Norm)) * ((vec[0] < 0.0f) ? -1.0f : 1.0f); + enc1 = (1.0f - fabsf(vec[0] * invL1Norm)) * ((vec[1] < 0.0f) ? -1.0f : 1.0f); + } + else + { + enc0 = vec[0] * invL1Norm; + enc1 = vec[1] * invL1Norm; + } + // then encode: + uint32_t enci0 = touint((fabsf(enc0) + 2.0f)/2.0f); + uint32_t enci1 = touint((fabsf(enc1) + 2.0f)/2.0f); + // copy over sign bit and truncated mantissa. could use rounding for increased precision here. + projected0 = ((touint(enc0) & 0x80000000)>>16) | ((enci0 & 0x7fffff)>>8); + projected1 = ((touint(enc1) & 0x80000000)>>16) | ((enci1 & 0x7fffff)>>8); + // avoid -0 cases: + if((projected0 & 0x7fff) == 0) projected0 = 0; + if((projected1 & 0x7fff) == 0) projected1 = 0; + return (projected1 << 16) | projected0; +} + +static inline int +is_leaf(lh_node_t* n) +{ + return n->c[1].i < 0; +} + +static inline int +prim_offset(lh_node_t *n) +{ + assert(-n->c[0].i >= 0); + return -n->c[0].i; +} + +static inline int +num_prims(lh_node_t *n) +{ + assert(-n->c[1].i > 0); + return -n->c[1].i; +} + +static inline void +set_prim_offset(lh_node_t *n, int offset) +{ + assert(offset >= 0); + n->c[0].i = -offset; +} + +static inline void +set_num_prims(lh_node_t *n, int num_prims) +{ + assert(num_prims > 0); + n->c[1].i = -num_prims; +} + +static lh_cone_t +lh_triangle_to_cone(const float* t0, const float* t1, const float* t2) +{ + lh_cone_t r; + vec3_t u, v; + VectorSubtract(t1, t0, u); + VectorSubtract(t2, t0, v); + CrossProduct(u, v, r.axis); + VectorNormalize(r.axis); + r.th_o = 0; + r.th_e = M_PI/2; + return r; +} + +static lh_cone_t +lh_cone_union(lh_cone_t a, lh_cone_t b) +{ + lh_cone_t r; + + if (b.th_o > a.th_o) + { + lh_cone_t t = b; + b = a; + a = t; + } + + float th_d = acosf(BDOT(a.axis, b.axis)); + r.th_o = (a.th_o + th_d + b.th_o) / 2.0f; + r.th_e = fmaxf(a.th_e, b.th_e); + + if (fminf(th_d + b.th_o, M_PI) <= a.th_o) + { + // a already covers b + r.th_o = a.th_o; + VectorCopy(a.axis, r.axis); + } + else if (M_PI <= r.th_o) + { + r.th_o = M_PI; + VectorCopy(a.axis, r.axis); + } + else + { + float th_r = r.th_o - a.th_o; + vec3_t axis_r; + CrossProduct(a.axis, b.axis, axis_r); + VectorNormalize(axis_r); + RotatePointAroundVector(r.axis, axis_r, a.axis, RAD2DEG(th_r)); + VectorNormalize(r.axis); + } + + return r; +} + +static inline void +lh_len(float aabb[6], float lengths[3]) +{ + for (int k = 0; k < 3; k++) + lengths[k] = aabb[k + 3] - aabb[k]; +} + +static inline float +lh_sur_m(float lengths[3]) +{ + float area = 0.0f; + for (int k = 0; k < 3; k++) + area += 2.0f * lengths[k] * lengths[(k + 1) % 3]; + return area; +} + +static inline float +lh_ori_m(lh_cone_t c) +{ + float th_w = fminf(c.th_o + c.th_e, M_PI); + return 2.0f * M_PI * (1.0f - cosf(c.th_o)) + 0.5f * M_PI * ( + 2.0f * th_w * sinf(c.th_o) + - cosf(c.th_o - 2.0f * th_w) + - 2.0f * c.th_o * sinf(c.th_o) + + cosf(c.th_o) + ); +} + +static inline float +lh_reg_m(float lengths[3], int dim) +{ + float max_length = fmaxf(fmaxf(lengths[0], lengths[1]), lengths[2]); + return max_length / lengths[dim]; +} + +static inline float +lh_cost_measure( + int dim, + float p_aabb[6], + lh_cone_t p_cone, + float energy[2], + float aabb[2][6], + lh_cone_t cone[2]) +{ + float p_lengths[3]; + lh_len(p_aabb, p_lengths); + float lengths[2][3]; + for (int k = 0; k < 2; k++) + lh_len(aabb[k], lengths[k]); + float reg, p_sur, p_ori, sur[2], ori[2]; + reg = lh_reg_m(p_lengths, dim); + p_sur = lh_sur_m(p_lengths); + p_ori = lh_ori_m(p_cone); + for (int k = 0; k < 2; k++) + { + sur[k] = lh_sur_m(lengths[k]); + ori[k] = lh_ori_m(cone[k]); + } + return reg * + (energy[0] * sur[0] * ori[0] + energy[1] * sur[1] * ori[1]) / + (p_sur * p_ori); +} + +static inline void +lh_init_aabb(float aabb[6]) +{ + for (int k = 0; k < 3; k++) + { + aabb[k] = FLT_MAX; + aabb[k + 3] = -FLT_MAX; + } +} + +static inline void +lh_enlarge_aabb_point(float aabb[6], const float p[3]) +{ + for (int k = 0; k < 3; k++) + { + aabb[k] = fminf(aabb[k], p[k]); + aabb[k+3] = fmaxf(aabb[k+3], p[k]); + } +} + +static inline void +lh_enlarge_aabb_points(float aabb[6], const float *p[], int num_p) +{ + for (int j = 0; j < num_p; j++) + lh_enlarge_aabb_point(aabb, p[j]); +} + +static inline void +lh_enlarge_aabb_aabb(float aabb[6], const float a_aabb[6]) +{ + for (int k = 0; k < 3; k++) + { + aabb[k] = fminf(aabb[k], a_aabb[k]); + aabb[k+3] = fmaxf(aabb[k+3], a_aabb[k+3]); + } +} + +void +lh_build_binned_rec( + light_hierarchy_t* lh, + lh_child_t* child, + int offset, + int num_prims, + lh_prim_t *prims, + int num_bins, + lh_bin_t *bins[3], + lh_bin_t *a_bins[3][2], + float c_aabb[6], + int level) +{ + if (lh->num_nodes == lh->max_num_nodes) { + Com_Error(ERR_FATAL, "not enough space for light hierarchy nodes\n"); + return; + } + + assert(num_prims > 0); + + float skip[3], k_0[3], k_1[3]; + for (int d = 0; d < 3; d++) + { + k_0[d] = c_aabb[d]; + float diff = c_aabb[d+3] - c_aabb[d]; + skip[d] = diff == 0; + + if (!skip[d]) + k_1[d] = (float)num_bins / (c_aabb[d+3] - c_aabb[d]); + } + + if (num_prims == 1 || (skip[0] && skip[1] && skip[2])) + { + child->i = lh->num_nodes++; + lh_init_aabb(child->aabb); + + for (int i = offset; i < offset + num_prims; i++) + { + lh_prim_t *prim = &prims[i]; + lh_enlarge_aabb_aabb(child->aabb, prim->aabb); + if (i == offset) + child->cone = prim->cone; + else + child->cone = lh_cone_union(child->cone, prim->cone); + child->energy += prim->energy; + } + + lh_node_t *node = &lh->nodes[child->i]; + + // we can currently only have leaves with 1 prim (no index array in shader), + // so split further with dumb strategy if necessary + // TODO remove at some point + if (num_prims == 1) + { + set_num_prims(node, num_prims); + lh_prim_t *prim = &prims[offset]; + set_prim_offset(node, prim->index); + } + else + { + int mid = offset + num_prims / 2; + int end = offset + num_prims; + lh_build_binned_rec(lh, &node->c[0], offset, mid - offset, prims, num_bins, bins, a_bins, c_aabb, level + 1); + lh_build_binned_rec(lh, &node->c[1], mid, end - mid, prims, num_bins, bins, a_bins, c_aabb, level + 1); + } + } + else + { + // initialize bins + for (int d = 0; d < 3; d++) + for (int b = 0; b < num_bins; b++) + { + lh_bin_t *bin = &bins[d][b]; + bin->num_prims = 0; + lh_init_aabb(bin->c_aabb); + bin->energy = 0.0f; + lh_init_aabb(bin->aabb); + } + + // populate bins + for (int i = offset; i < offset + num_prims; i++) + { + lh_prim_t *prim = &prims[i]; + for (int d = 0; d < 3; d++) + { + if (skip[d]) + continue; + + int bin_i = min(k_1[d] * (prim->c[d] - k_0[d]), num_bins - 1); + lh_bin_t *bin = &bins[d][bin_i]; + bin->num_prims++; + lh_enlarge_aabb_point(bin->c_aabb, prim->c); + bin->energy += prim->energy; + lh_enlarge_aabb_aabb(bin->aabb, prim->aabb); + if (bin->num_prims == 1) + bin->cone = prim->cone; + else + bin->cone = lh_cone_union(bin->cone, prim->cone); + } + } + + for (int d = 0; d < 3; d++) + assert(skip[d] || + (bins[d][0].num_prims > 0 && bins[d][num_bins - 1].num_prims > 0)); + + // initialize accumulated bins + for (int d = 0; d < 3; d++) + { + if (skip[d]) + continue; + for (int i = 0; i < 2; i++) + memcpy(a_bins[d][i], bins[d], num_bins * sizeof(lh_bin_t)); + } + + // accumulate bins from left and right + for (int d = 0; d < 3; d++) + { + if (skip[d]) + continue; + + for (int s = 0; s < 2; s++) + { + for (int b = 1; b < num_bins; b++) + { + int b_i = s == 0 ? b : num_bins - b - 1; + int prev_b_i = s == 0 ? b_i - 1 : b_i + 1; + + lh_bin_t *prev_bin = &a_bins[d][s][prev_b_i]; + lh_bin_t *bin = &a_bins[d][s][b_i]; + + if (bin->num_prims == 0) + memcpy(bin, prev_bin, sizeof(lh_bin_t)); + else + { + bin->num_prims += prev_bin->num_prims; + lh_enlarge_aabb_aabb(bin->c_aabb, prev_bin->c_aabb); + bin->energy += prev_bin->energy; + lh_enlarge_aabb_aabb(bin->aabb, prev_bin->aabb); + bin->cone = lh_cone_union(bin->cone, prev_bin->cone); + } + } + } + } + + // find best split candidate + float m_cost = FLT_MAX; + int m_d, m_s; + for (int d = 0; d < 3; d++) + { + if (skip[d]) + continue; + + float p_aabb[6]; + memcpy(p_aabb, a_bins[d][1][0].aabb, sizeof(p_aabb)); + lh_cone_t p_cone = a_bins[d][1][0].cone; + + for (int s = 0; s < num_bins - 1; s++) + { + lh_bin_t *a_bin[] = {&a_bins[d][0][s], &a_bins[d][1][s + 1]}; + assert(a_bin[0]->num_prims != 0 && a_bin[1]->num_prims != 0); + float energy[2] = {a_bin[0]->energy, a_bin[1]->energy}; + float aabb[2][6]; + for (int i = 0; i < 2; i++) + memcpy(aabb[i], a_bin[i]->aabb, sizeof(aabb[i])); + lh_cone_t cone[2] = {a_bin[0]->cone, a_bin[1]->cone}; + + float cost = lh_cost_measure( + d, p_aabb, p_cone, energy, aabb, cone); + + if (cost < m_cost) + { + m_cost = cost; + m_d = d; + m_s = s; + } + } + } + + assert(m_cost != FLT_MAX); + + int d = m_d; + int s = m_s; + + lh_bin_t *a_bin[] = {&a_bins[d][0][s], &a_bins[d][1][s + 1]}; + + // build child + child->i = lh->num_nodes++; + memcpy(child->aabb, a_bins[d][1][0].aabb, sizeof(child->aabb)); + child->cone = a_bins[d][1][0].cone; + child->energy = a_bins[d][1][0].energy; + + // rearrange primitives among subtrees + int left_start = offset; + int left_end = offset + a_bin[0]->num_prims; + int right_start = left_end; + int right_end = offset + num_prims; + int left = left_start; + int right = right_start; + int term = 0; + while (1) + { + for (; left < left_end; left++) + { + lh_prim_t *prim = &prims[left]; + int bin_i = k_1[d] * (prim->c[d] - k_0[d]); + if (s < bin_i) + break; + } + + if (left == left_end) + term = 1; + + for (; right < right_end; right++) + { + lh_prim_t *prim = &prims[right]; + int bin_i = k_1[d] * (prim->c[d] - k_0[d]); + if (bin_i <= s) + break; + } + + assert(term ? right == right_end : right < right_end); + + if (term) + break; + + lh_prim_t t = prims[left]; + prims[left] = prims[right]; + prims[right] = t; + } + + lh_node_t *node = &lh->nodes[child->i]; + + float c_aabb[2][6]; + for (int s = 0; s < 2; s++) + memcpy(c_aabb[s], a_bin[s]->c_aabb, sizeof(c_aabb[s])); + + // recurse + lh_build_binned_rec(lh, &node->c[0], left_start, left_end - left_start, prims, num_bins, bins, a_bins, c_aabb[0], level + 1); + lh_build_binned_rec(lh, &node->c[1], right_start, right_end - right_start, prims, num_bins, bins, a_bins, c_aabb[1], level + 1); + } +} + +void +lh_compactify(light_hierarchy_t *lh, compact_lh_node_t *compact_nodes, const float *positions, const uint32_t *colors) +{ + for(int k = 0; k < lh->num_nodes; k++) { + compact_lh_node_t *cn = compact_nodes + k; + lh_node_t *n = lh->nodes + k; + + if(is_leaf(n)) { + int light_idx = prim_offset(n); + memcpy(cn->c[0].aabb, positions + light_idx * 9, 9 * sizeof(float)); + memcpy(cn->c[0].aabb + 9, colors + light_idx, sizeof(uint32_t)); + } + else { + compact_lh_node_t cn_tmp; + for(int i = 0; i < 2; i++) { + memcpy(cn_tmp.c[i].aabb, n->c[i].aabb, sizeof(float) * 6); + cn_tmp.c[i].axis = encode_normal(n->c[i].cone.axis); + cn_tmp.c[i].th_o = n->c[i].cone.th_o; + //cn_tmp.c[i].energy = n->c[i].energy; + cn_tmp.idx[i] = is_leaf(&lh->nodes[n->c[i].i]) ? ~n->c[i].i : n->c[i].i; + } + memcpy(cn, &cn_tmp, sizeof(compact_lh_node_t)); + } + } +} + +int +lh_build_binned(void *dst, const float *positions, const uint32_t *colors, int num_prims, int num_bins) +{ + light_hierarchy_t light_hierarchy; + light_hierarchy_t *lh = &light_hierarchy; // fixme + + lh->max_num_nodes = 3 * num_prims; + lh->nodes = calloc(lh->max_num_nodes, sizeof(lh_node_t)); + lh->num_nodes = 0; + + float c_aabb[6]; + lh_init_aabb(c_aabb); + lh_prim_t *prims = calloc(num_prims, sizeof(lh_prim_t)); + for (int i = 0; i < num_prims; i++) + { + lh_prim_t *prim = &prims[i]; + prim->index = i; + lh_init_aabb(prim->aabb); + prim->energy = 0.5; // TODO use actual emitted energy + + const float *p[] = { + &positions[9 * i + 0], + &positions[9 * i + 3], + &positions[9 * i + 6], + }; + lh_enlarge_aabb_point(prim->aabb, p[0]); + lh_enlarge_aabb_point(prim->aabb, p[1]); + lh_enlarge_aabb_point(prim->aabb, p[2]); + + prim->c[0] = 0.5f * (prim->aabb[0] + prim->aabb[0 + 3]); + prim->c[1] = 0.5f * (prim->aabb[1] + prim->aabb[1 + 3]); + prim->c[2] = 0.5f * (prim->aabb[2] + prim->aabb[2 + 3]); + + lh_enlarge_aabb_point(c_aabb, prim->c); + + prim->cone = lh_triangle_to_cone(p[0], p[1], p[2]); + } + + + lh_bin_t *bins[3]; + lh_bin_t *a_bins[3][2]; + for (int d = 0; d < 3; d++) + { + bins[d] = calloc(num_bins, sizeof(lh_bin_t)); + for (int i = 0; i < 2; i++) + a_bins[d][i] = calloc(num_bins, sizeof(lh_bin_t)); + } + + lh_child_t child; + lh_build_binned_rec(lh, &child, 0, num_prims, prims, num_bins, bins, a_bins, c_aabb, 0); + + for (int d = 0; d < 3; d++) + { + free(bins[d]); + for (int i = 0; i < 2; i++) + free(a_bins[d][i]); + } + free(prims); + + lh_compactify(lh, dst, positions, colors); + + free(lh->nodes); + + return lh->num_nodes; +} + +void +lh_dump(light_hierarchy_t *lh, const char *path) +{ + FILE *f = fopen(path, "wb"); + int vcnt = 1; + for(int i = 0; i < lh->num_nodes; i++) + { + lh_node_t *n = lh->nodes + i; + + for(int j=0;j<8;j++) + { + fprintf(f, "v %f %f %f\n", n->c[0].aabb[0 + (j & 1) * 3], n->c[0].aabb[1 + 3 * !!(j & 2)], n->c[0].aabb[2 + 3 * !!(j & 4)]); + } + + fprintf(f, "f %d %d %d %d\n", vcnt , vcnt + 1, vcnt + 5, vcnt + 4);//bottom + fprintf(f, "f %d %d %d %d\n", vcnt + 2, vcnt + 3, vcnt + 7, vcnt + 6);//top + fprintf(f, "f %d %d %d %d\n", vcnt , vcnt + 1, vcnt + 3, vcnt + 2);//front + fprintf(f, "f %d %d %d %d\n", vcnt + 4, vcnt + 5, vcnt + 7, vcnt + 6);//back + fprintf(f, "f %d %d %d %d\n", vcnt , vcnt + 4, vcnt + 6, vcnt + 2);//left + fprintf(f, "f %d %d %d %d\n", vcnt + 1, vcnt + 5, vcnt + 7, vcnt + 3);//right + vcnt += 8; + + for(int j=0;j<8;j++) + { + fprintf(f, "v %f %f %f\n", n->c[1].aabb[0 + (j & 1) * 3], n->c[1].aabb[1 + 3 * !!(j & 2)], n->c[1].aabb[2 + 3 * !!(j & 4)]); + } + + fprintf(f, "f %d %d %d %d\n", vcnt , vcnt + 1, vcnt + 5, vcnt + 4);//bottom + fprintf(f, "f %d %d %d %d\n", vcnt + 2, vcnt + 3, vcnt + 7, vcnt + 6);//top + fprintf(f, "f %d %d %d %d\n", vcnt , vcnt + 1, vcnt + 3, vcnt + 2);//front + fprintf(f, "f %d %d %d %d\n", vcnt + 4, vcnt + 5, vcnt + 7, vcnt + 6);//back + fprintf(f, "f %d %d %d %d\n", vcnt , vcnt + 4, vcnt + 6, vcnt + 2);//left + fprintf(f, "f %d %d %d %d\n", vcnt + 1, vcnt + 5, vcnt + 7, vcnt + 3);//right + vcnt += 8; + } + fclose(f); +} + +VkResult +vkpt_lh_upload_staging(VkCommandBuffer cmd_buf, int num_nodes) +{ + assert(!qvk.buf_vertex_staging.is_mapped); +#if 0 + VkCommandBufferAllocateInfo cmd_alloc = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = qvk.command_pool, + .commandBufferCount = 1, + }; + + VkCommandBuffer cmd_buf; + vkAllocateCommandBuffers(qvk.device, &cmd_alloc, &cmd_buf); + VkCommandBufferBeginInfo cmd_begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + vkBeginCommandBuffer(cmd_buf, &cmd_begin_info); +#endif + + VkBufferCopy copyRegion = { + .size = num_nodes * sizeof(compact_lh_node_t) + }; + vkCmdCopyBuffer(cmd_buf, + buf_light_hierarchy_staging[qvk.current_image_index].buffer, + buf_light_hierarchy[qvk.current_image_index].buffer, + 1, ©Region); + +#if 0 + vkEndCommandBuffer(cmd_buf); + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &cmd_buf, + }; + + vkQueueSubmit(qvk.queue_graphics, 1, &submit_info, VK_NULL_HANDLE); + + vkQueueWaitIdle(qvk.queue_graphics); +#endif + + return VK_SUCCESS; +} + +VkResult +vkpt_lh_update( + const float *positions, + const uint32_t *light_colors, + int num_primitives, + VkCommandBuffer cmd_buf) +{ + //struct timeval tv1, tv2; + //gettimeofday(&tv1, NULL); + void *lh = buffer_map(buf_light_hierarchy_staging + qvk.current_image_index); + int num_nodes = lh_build_binned(lh, positions, light_colors, num_primitives, 8); + lh = NULL; + buffer_unmap(buf_light_hierarchy_staging + qvk.current_image_index); + + VkResult res = vkpt_lh_upload_staging(cmd_buf, num_nodes); + +// gettimeofday(&tv2, NULL); +// +// printf ("Total time = %f seconds\n", +// (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + +// (double) (tv2.tv_sec - tv1.tv_sec)); +// + + return res; +} + +VkResult +vkpt_lh_initialize() +{ + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + _VK(buffer_create(buf_light_hierarchy_staging + i, + sizeof(lh_node_t) * MAX_LIGHT_HIERARCHY_NODES, + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)); + _VK(buffer_create(buf_light_hierarchy + i, + sizeof(lh_node_t) * MAX_LIGHT_HIERARCHY_NODES, + VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)); + } + + + VkDescriptorSetLayoutBinding layout_bindings[] = { + { + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .binding = 0, + .stageFlags = VK_SHADER_STAGE_ALL, + }, + }; + + VkDescriptorSetLayoutCreateInfo layout_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = LENGTH(layout_bindings), + .pBindings = layout_bindings, + }; + + _VK(vkCreateDescriptorSetLayout(qvk.device, &layout_info, NULL, + &qvk.desc_set_layout_light_hierarchy)); + + VkDescriptorPoolSize pool_size = { + .type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = MAX_SWAPCHAIN_IMAGES, + }; + + VkDescriptorPoolCreateInfo pool_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .poolSizeCount = 1, + .pPoolSizes = &pool_size, + .maxSets = MAX_SWAPCHAIN_IMAGES, + }; + + _VK(vkCreateDescriptorPool(qvk.device, &pool_info, NULL, &desc_pool_light_hierarchy)); + + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + VkDescriptorSetAllocateInfo descriptor_set_alloc_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = desc_pool_light_hierarchy, + .descriptorSetCount = 1, + .pSetLayouts = &qvk.desc_set_layout_light_hierarchy, + }; + + _VK(vkAllocateDescriptorSets(qvk.device, &descriptor_set_alloc_info, qvk.desc_set_light_hierarchy + i)); + + VkDescriptorBufferInfo buf_info = { + .buffer = buf_light_hierarchy[i].buffer, + .offset = 0, + .range = buf_light_hierarchy[i].size, + }; + + VkWriteDescriptorSet output_buf_write = { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = qvk.desc_set_light_hierarchy[i], + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .pBufferInfo = &buf_info, + }; + + vkUpdateDescriptorSets(qvk.device, 1, &output_buf_write, 0, NULL); + } + return VK_SUCCESS; +} + +VkResult +vkpt_lh_destroy() +{ + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + buffer_destroy(buf_light_hierarchy_staging + i); + buffer_destroy(buf_light_hierarchy + i); + } + vkDestroyDescriptorPool(qvk.device, desc_pool_light_hierarchy, NULL); + vkDestroyDescriptorSetLayout(qvk.device, qvk.desc_set_layout_light_hierarchy, NULL); + return VK_SUCCESS; +} + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/main.c b/src/refresh/vkpt/main.c new file mode 100644 index 0000000..cb0ad4c --- /dev/null +++ b/src/refresh/vkpt/main.c @@ -0,0 +1,1645 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "shared/shared.h" +#include "common/bsp.h" +#include "common/cmd.h" +#include "common/common.h" +#include "common/cvar.h" +#include "common/files.h" +#include "common/math.h" +#include "client/video.h" +#include "client/client.h" +#include "refresh/refresh.h" +#include "refresh/images.h" +#include "refresh/models.h" +#include "system/hunk.h" +#include "vkpt.h" +#include "shader/light_hierarchy.h" + +#include "shader/vertex_buffer.h" + +#include +#include +#include + +#include +#include +#include +#include + +cvar_t *vkpt_reconstruction; +cvar_t *cvar_rtx; +cvar_t *vkpt_profiler; + +static bsp_t *bsp_world_model; + +typedef enum { + VKPT_INIT_DEFAULT = 0, + VKPT_INIT_SWAPCHAIN_RECREATE = (1 << 0), + VKPT_INIT_RELOAD_SHADER = (1 << 1), +} VkptInitFlags_t; + +typedef struct VkptInit_s { + const char *name; + VkResult (*initialize)(); + VkResult (*destroy)(); + VkptInitFlags_t flags; + int is_initialized; +} VkptInit_t; +VkptInit_t vkpt_initialization[] = { + { "profiler", vkpt_profiler_initialize, vkpt_profiler_destroy, VKPT_INIT_DEFAULT, 0 }, + { "shader", vkpt_load_shader_modules, vkpt_destroy_shader_modules, VKPT_INIT_RELOAD_SHADER, 0 }, + { "vbo", vkpt_vertex_buffer_create, vkpt_vertex_buffer_destroy, VKPT_INIT_DEFAULT, 0 }, + { "ubo", vkpt_uniform_buffer_create, vkpt_uniform_buffer_destroy, VKPT_INIT_DEFAULT, 0 }, + { "textures", vkpt_textures_initialize, vkpt_textures_destroy, VKPT_INIT_DEFAULT, 0 }, + { "images", vkpt_create_images, vkpt_destroy_images, VKPT_INIT_SWAPCHAIN_RECREATE, 0 }, + { "draw", vkpt_draw_initialize, vkpt_draw_destroy, VKPT_INIT_DEFAULT, 0 }, + { "lh", vkpt_lh_initialize, vkpt_lh_destroy, VKPT_INIT_DEFAULT, 0 }, + { "pt", vkpt_pt_init, vkpt_pt_destroy, VKPT_INIT_DEFAULT, 0 }, + { "pt|", vkpt_pt_create_pipelines, vkpt_pt_destroy_pipelines, VKPT_INIT_SWAPCHAIN_RECREATE + | VKPT_INIT_RELOAD_SHADER, 0 }, + { "draw|", vkpt_draw_create_pipelines, vkpt_draw_destroy_pipelines, VKPT_INIT_SWAPCHAIN_RECREATE + | VKPT_INIT_RELOAD_SHADER, 0 }, + { "vbo|", vkpt_vertex_buffer_create_pipelines, vkpt_vertex_buffer_destroy_pipelines, VKPT_INIT_RELOAD_SHADER, 0 }, + { "asvgf", vkpt_asvgf_initialize, vkpt_asvgf_destroy, VKPT_INIT_DEFAULT, 0 }, + { "asvgf|", vkpt_asvgf_create_pipelines, vkpt_asvgf_destroy_pipelines, VKPT_INIT_RELOAD_SHADER, 0 }, +}; + +VkResult +vkpt_initialize_all(VkptInitFlags_t init_flags) +{ + vkDeviceWaitIdle(qvk.device); + for(int i = 0; i < LENGTH(vkpt_initialization); i++) { + VkptInit_t *init = vkpt_initialization + i; + if((init->flags & init_flags) != init_flags) + continue; + Com_Printf("initializing %s\n", vkpt_initialization[i].name); + assert(!init->is_initialized); + init->is_initialized = init->initialize + ? (init->initialize() == VK_SUCCESS) + : 1; + assert(init->is_initialized); + } + return VK_SUCCESS; +} + +VkResult +vkpt_destroy_all(VkptInitFlags_t destroy_flags) +{ + vkDeviceWaitIdle(qvk.device); + for(int i = LENGTH(vkpt_initialization) - 1; i >= 0; i--) { + VkptInit_t *init = vkpt_initialization + i; + if((init->flags & destroy_flags) != destroy_flags) + continue; + Com_Printf("destroying %s\n", vkpt_initialization[i].name); + assert(init->is_initialized); + init->is_initialized = init->destroy + ? !(init->destroy() == VK_SUCCESS) + : 0; + assert(!init->is_initialized); + } + return VK_SUCCESS; +} + +void +vkpt_reload_shader() +{ + char buf[1024]; +#ifdef _WIN32 + FILE *f = _popen("bash -c \"make -C/home/cschied/quake2-pt compile_shaders\"", "r"); +#else + FILE *f = popen("make -j compile_shaders", "r"); +#endif + if(f) { + while(fgets(buf, sizeof buf, f)) { + Com_Printf("%s", buf); + } + fclose(f); + } + vkpt_destroy_all(VKPT_INIT_RELOAD_SHADER); + vkpt_initialize_all(VKPT_INIT_RELOAD_SHADER); +} + +refcfg_t r_config; +int registration_sequence; +vkpt_refdef_t vkpt_refdef = { + .z_near = 1.0f, + .z_far = 4096.0f, +}; + +QVK_t qvk = { + .win_width = 1920, + .win_height = 1080, + .frame_counter = 0, +}; + +#define _VK_EXTENSION_DO(a) PFN_##a q##a; +_VK_EXTENSION_LIST +#undef _VK_EXTENSION_DO + +const char *vk_requested_layers[] = { + "VK_LAYER_LUNARG_standard_validation" +}; + +const char *vk_requested_instance_extensions[] = { + VK_EXT_DEBUG_UTILS_EXTENSION_NAME, + VK_EXT_DEBUG_REPORT_EXTENSION_NAME, +}; + +const char *vk_requested_device_extensions[] = { + VK_NV_RAY_TRACING_EXTENSION_NAME, + VK_KHR_SWAPCHAIN_EXTENSION_NAME, + VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME, +#ifdef VKPT_ENABLE_VALIDATION + VK_EXT_DEBUG_MARKER_EXTENSION_NAME, +#endif +}; + +static const VkApplicationInfo vk_app_info = { + .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, + .pApplicationName = "quake 2 pathtracing", + .applicationVersion = VK_MAKE_VERSION(1, 0, 0), + .pEngineName = "vkpt", + .engineVersion = VK_MAKE_VERSION(1, 0, 0), + .apiVersion = VK_API_VERSION_1_1, +}; + +/* use this to override file names */ +static const char *shader_module_file_names[NUM_QVK_SHADER_MODULES]; + +int register_model_dirty; + +void +get_vk_extension_list( + const char *layer, + uint32_t *num_extensions, + VkExtensionProperties **ext) +{ + _VK(vkEnumerateInstanceExtensionProperties(layer, num_extensions, NULL)); + *ext = malloc(sizeof(**ext) * *num_extensions); + _VK(vkEnumerateInstanceExtensionProperties(layer, num_extensions, *ext)); +} + +void +get_vk_layer_list( + uint32_t *num_layers, + VkLayerProperties **ext) +{ + _VK(vkEnumerateInstanceLayerProperties(num_layers, NULL)); + *ext = malloc(sizeof(**ext) * *num_layers); + _VK(vkEnumerateInstanceLayerProperties(num_layers, *ext)); +} + +int +layer_supported(const char *name) +{ + assert(qvk.layers); + for(int i = 0; i < qvk.num_layers; i++) + if(!strcmp(name, qvk.layers[i].layerName)) + return 1; + return 0; +} + +int +layer_requested(const char *name) +{ + for(int i = 0; i < LENGTH(vk_requested_layers); i++) + if(!strcmp(name, vk_requested_layers[i])) + return 1; + return 0; +} + +static VKAPI_ATTR VkBool32 VKAPI_CALL +vk_debug_callback( + VkDebugUtilsMessageSeverityFlagBitsEXT severity, + VkDebugUtilsMessageTypeFlagsEXT type, + const VkDebugUtilsMessengerCallbackDataEXT* callback_data, + void *user_data) +{ + Com_EPrintf("validation layer: %s\n", callback_data->pMessage); + return VK_FALSE; +} + +VkResult +qvkCreateDebugUtilsMessengerEXT( + VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugUtilsMessengerEXT* pCallback) +{ + PFN_vkCreateDebugUtilsMessengerEXT func = (PFN_vkCreateDebugUtilsMessengerEXT) + vkGetInstanceProcAddr(instance, "vkCreateDebugUtilsMessengerEXT"); + if(func) + return func(instance, pCreateInfo, pAllocator, pCallback); + return VK_ERROR_EXTENSION_NOT_PRESENT; +} + +VkResult +qvkDestroyDebugUtilsMessengerEXT( + VkInstance instance, + VkDebugUtilsMessengerEXT callback, + const VkAllocationCallbacks* pAllocator) +{ + PFN_vkDestroyDebugUtilsMessengerEXT func = (PFN_vkDestroyDebugUtilsMessengerEXT) + vkGetInstanceProcAddr(instance, "vkDestroyDebugUtilsMessengerEXT"); + if(func) { + func(instance, callback, pAllocator); + return VK_SUCCESS; + } + return VK_ERROR_EXTENSION_NOT_PRESENT; +} + +VkResult +create_swapchain() +{ + /* create swapchain (query details and ignore them afterwards :-) )*/ + VkSurfaceCapabilitiesKHR surf_capabilities; + vkGetPhysicalDeviceSurfaceCapabilitiesKHR(qvk.physical_device, qvk.surface, &surf_capabilities); + + uint32_t num_formats = 0; + vkGetPhysicalDeviceSurfaceFormatsKHR(qvk.physical_device, qvk.surface, &num_formats, NULL); + VkSurfaceFormatKHR *avail_surface_formats = alloca(sizeof(VkSurfaceFormatKHR) * num_formats); + vkGetPhysicalDeviceSurfaceFormatsKHR(qvk.physical_device, qvk.surface, &num_formats, avail_surface_formats); + Com_Printf("num surface formats: %d\n", num_formats); + + Com_Printf("available surface formats:\n"); + for(int i = 0; i < num_formats; i++) + Com_Printf(" %s\n", vk_format_to_string(avail_surface_formats[i].format)); + + + VkFormat acceptable_formats[] = { + VK_FORMAT_R8G8B8A8_SRGB, VK_FORMAT_B8G8R8A8_SRGB, + }; + + //qvk.surf_format.format = VK_FORMAT_R8G8B8A8_SRGB; + //qvk.surf_format.format = VK_FORMAT_B8G8R8A8_SRGB; + for(int i = 0; i < LENGTH(acceptable_formats); i++) { + for(int j = 0; j < num_formats; j++) + if(acceptable_formats[i] == avail_surface_formats[j].format) { + qvk.surf_format = avail_surface_formats[j]; + goto out; + } + } +out:; + + uint32_t num_present_modes = 0; + vkGetPhysicalDeviceSurfacePresentModesKHR(qvk.physical_device, qvk.surface, &num_present_modes, NULL); + VkPresentModeKHR *avail_present_modes = alloca(sizeof(VkPresentModeKHR) * num_present_modes); + vkGetPhysicalDeviceSurfacePresentModesKHR(qvk.physical_device, qvk.surface, &num_present_modes, avail_present_modes); + + //qvk.present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR; + //qvk.present_mode = VK_PRESENT_MODE_FIFO_KHR; + qvk.present_mode = VK_PRESENT_MODE_MAILBOX_KHR; + //qvk.present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR; + + if(surf_capabilities.currentExtent.width != ~0u) { + qvk.extent = surf_capabilities.currentExtent; + } + else { + qvk.extent.width = MIN(surf_capabilities.maxImageExtent.width, qvk.win_width); + qvk.extent.height = MIN(surf_capabilities.maxImageExtent.height, qvk.win_height); + + qvk.extent.width = MAX(surf_capabilities.minImageExtent.width, qvk.extent.width); + qvk.extent.height = MAX(surf_capabilities.minImageExtent.height, qvk.extent.height); + } + + uint32_t num_images = 2; + //uint32_t num_images = surf_capabilities.minImageCount + 1; + if(surf_capabilities.maxImageCount > 0) + num_images = MIN(num_images, surf_capabilities.maxImageCount); + + VkSwapchainCreateInfoKHR swpch_create_info = { + .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + .surface = qvk.surface, + .minImageCount = num_images, + .imageFormat = qvk.surf_format.format, + .imageColorSpace = qvk.surf_format.colorSpace, + .imageExtent = qvk.extent, + .imageArrayLayers = 1, /* only needs to be changed for stereoscopic rendering */ + .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT + | VK_IMAGE_USAGE_TRANSFER_DST_BIT, + .imageSharingMode = VK_SHARING_MODE_EXCLUSIVE, /* VK_SHARING_MODE_CONCURRENT if not using same queue */ + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = NULL, + .preTransform = surf_capabilities.currentTransform, + .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, /* no alpha for window transparency */ + .presentMode = qvk.present_mode, + .clipped = VK_FALSE, /* do not render pixels that are occluded by other windows */ + //.clipped = VK_TRUE, /* do not render pixels that are occluded by other windows */ + .oldSwapchain = VK_NULL_HANDLE, /* need to provide previous swapchain in case of window resize */ + }; + + if(vkCreateSwapchainKHR(qvk.device, &swpch_create_info, NULL, &qvk.swap_chain) != VK_SUCCESS) { + Com_EPrintf("error creating swapchain\n"); + return 1; + } + + vkGetSwapchainImagesKHR(qvk.device, qvk.swap_chain, &qvk.num_swap_chain_images, NULL); + //qvk.swap_chain_images = malloc(qvk.num_swap_chain_images * sizeof(*qvk.swap_chain_images)); + assert(qvk.num_swap_chain_images < MAX_SWAPCHAIN_IMAGES); + vkGetSwapchainImagesKHR(qvk.device, qvk.swap_chain, &qvk.num_swap_chain_images, qvk.swap_chain_images); + + //qvk.swap_chain_image_views = malloc(qvk.num_swap_chain_images * sizeof(*qvk.swap_chain_image_views)); + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + VkImageViewCreateInfo img_create_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = qvk.swap_chain_images[i], + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = qvk.surf_format.format, +#if 1 + .components = { + VK_COMPONENT_SWIZZLE_R, + VK_COMPONENT_SWIZZLE_G, + VK_COMPONENT_SWIZZLE_B, + VK_COMPONENT_SWIZZLE_A + }, +#endif + .subresourceRange = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + + if(vkCreateImageView(qvk.device, &img_create_info, NULL, qvk.swap_chain_image_views + i) != VK_SUCCESS) { + Com_EPrintf("error creating image view!"); + return 1; + } + } + + return VK_SUCCESS; +} + +VkResult +create_command_pool_and_fences() +{ + VkCommandPoolCreateInfo cmd_pool_create_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .queueFamilyIndex = qvk.queue_idx_graphics, + .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, + }; + + /* command pool and buffers */ + _VK(vkCreateCommandPool(qvk.device, &cmd_pool_create_info, NULL, &qvk.command_pool)); + + qvk.num_command_buffers = qvk.num_swap_chain_images; + VkCommandBufferAllocateInfo cmd_buf_alloc_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .commandPool = qvk.command_pool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = qvk.num_command_buffers + }; + qvk.command_buffers = malloc(qvk.num_command_buffers * sizeof(*qvk.command_buffers)); + _VK(vkAllocateCommandBuffers(qvk.device, &cmd_buf_alloc_info, qvk.command_buffers)); + + + /* fences and semaphores */ + VkSemaphoreCreateInfo semaphore_info = { .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO }; + for(int i = 0; i < NUM_SEMAPHORES; i++) + _VK(vkCreateSemaphore(qvk.device, &semaphore_info, NULL, &qvk.semaphores[i])); + + VkFenceCreateInfo fence_info = { + .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + .flags = VK_FENCE_CREATE_SIGNALED_BIT, /* fence's initial state set to be signaled + to make program not hang */ + }; + for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + _VK(vkCreateFence(qvk.device, &fence_info, NULL, qvk.fences_frame_sync + i)); + } + + return VK_SUCCESS; +} + +int +init_vulkan() +{ + /* layers */ + get_vk_layer_list(&qvk.num_layers, &qvk.layers); + Com_Printf("available vulkan layers: \n"); + for(int i = 0; i < qvk.num_layers; i++) { + int requested = layer_requested(qvk.layers[i].layerName); + Com_Printf("%s%s, ", qvk.layers[i].layerName, requested ? " (requested)" : ""); + } + Com_Printf("\n"); + + /* instance extensions */ + int num_inst_ext_combined = qvk.num_sdl2_extensions + LENGTH(vk_requested_instance_extensions); + char **ext = alloca(sizeof(char *) * num_inst_ext_combined); + memcpy(ext, qvk.sdl2_extensions, qvk.num_sdl2_extensions * sizeof(*qvk.sdl2_extensions)); + memcpy(ext + qvk.num_sdl2_extensions, vk_requested_instance_extensions, sizeof(vk_requested_instance_extensions)); + + get_vk_extension_list(NULL, &qvk.num_extensions, &qvk.extensions); /* valid here? */ + Com_Printf("supported vulkan instance extensions: \n"); + for(int i = 0; i < qvk.num_extensions; i++) { + int requested = 0; + for(int j = 0; j < num_inst_ext_combined; j++) { + if(!strcmp(qvk.extensions[i].extensionName, ext[j])) { + requested = 1; + break; + } + } + Com_Printf("%s%s, ", qvk.extensions[i].extensionName, requested ? " (requested)" : ""); + } + Com_Printf("\n"); + + /* create instance */ + VkInstanceCreateInfo inst_create_info = { + .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + .pApplicationInfo = &vk_app_info, +#ifdef VKPT_ENABLE_VALIDATION + .enabledLayerCount = LENGTH(vk_requested_layers), + .ppEnabledLayerNames = vk_requested_layers, +#endif + .enabledExtensionCount = num_inst_ext_combined, + .ppEnabledExtensionNames = (const char * const*)ext, + }; + + _VK(vkCreateInstance(&inst_create_info, NULL, &qvk.instance)); + + /* setup debug callback */ + VkDebugUtilsMessengerCreateInfoEXT dbg_create_info = { + .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + .messageSeverity = + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT + | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT + | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT, + .messageType = + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT + | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT + | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, + .pfnUserCallback = vk_debug_callback, + .pUserData = NULL + }; + + _VK(qvkCreateDebugUtilsMessengerEXT(qvk.instance, &dbg_create_info, NULL, &qvk.dbg_messenger)); + + /* create surface */ + if(!SDL_Vulkan_CreateSurface(qvk.window, qvk.instance, &qvk.surface)) { + Com_EPrintf("[vkq2] could not create surface!\n"); + return 1; + } + + /* pick physical device (iterate over all but pick device 0 anyways) */ + uint32_t num_devices = 0; + _VK(vkEnumeratePhysicalDevices(qvk.instance, &num_devices, NULL)); + if(num_devices == 0) + return 1; + VkPhysicalDevice *devices = alloca(sizeof(VkPhysicalDevice) *num_devices); + _VK(vkEnumeratePhysicalDevices(qvk.instance, &num_devices, devices)); + + int picked_device = -1; + for(int i = 0; i < num_devices; i++) { + VkPhysicalDeviceProperties dev_properties; + VkPhysicalDeviceFeatures dev_features; + vkGetPhysicalDeviceProperties(devices[i], &dev_properties); + vkGetPhysicalDeviceFeatures (devices[i], &dev_features); + + Com_Printf("dev %d: %s\n", i, dev_properties.deviceName); + Com_Printf("max number of allocations %d\n", dev_properties.limits.maxMemoryAllocationCount); + uint32_t num_ext; + vkEnumerateDeviceExtensionProperties(devices[i], NULL, &num_ext, NULL); + + VkExtensionProperties *ext_properties = alloca(sizeof(VkExtensionProperties) * num_ext); + vkEnumerateDeviceExtensionProperties(devices[i], NULL, &num_ext, ext_properties); + + Com_Printf("supported extensions:\n"); + for(int j = 0; j < num_ext; j++) { + Com_Printf("%s, ", ext_properties[j].extensionName); + if(!strcmp(ext_properties[j].extensionName, VK_NV_RAY_TRACING_EXTENSION_NAME)) { + if(picked_device < 0) + picked_device = i; + } + } + Com_Printf("\n"); + } + + if(picked_device < 0) { + Com_Error(ERR_FATAL, "could not find any suitable device supporting " VK_NV_RAY_TRACING_EXTENSION_NAME"!"); + } + + Com_Printf("picked device %d\n", picked_device); + + qvk.physical_device = devices[picked_device]; + + vkGetPhysicalDeviceMemoryProperties(qvk.physical_device, &qvk.mem_properties); + + /* queue family and create physical device */ + uint32_t num_queue_families = 0; + vkGetPhysicalDeviceQueueFamilyProperties(qvk.physical_device, &num_queue_families, NULL); + VkQueueFamilyProperties *queue_families = alloca(sizeof(VkQueueFamilyProperties) * num_queue_families); + vkGetPhysicalDeviceQueueFamilyProperties(qvk.physical_device, &num_queue_families, queue_families); + + Com_Printf("num queue families: %d\n", num_queue_families); + + qvk.queue_idx_graphics = -1; + qvk.queue_idx_compute = -1; + qvk.queue_idx_transfer = -1; + + for(int i = 0; i < num_queue_families; i++) { + if(!queue_families[i].queueCount) + continue; + VkBool32 present_support = 0; + vkGetPhysicalDeviceSurfaceSupportKHR(qvk.physical_device, i, qvk.surface, &present_support); + if(!present_support) + continue; + if((queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT && qvk.queue_idx_graphics < 0) { + qvk.queue_idx_graphics = i; + } + if((queue_families[i].queueFlags & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT && qvk.queue_idx_compute < 0) { + qvk.queue_idx_compute = i; + } + if((queue_families[i].queueFlags & VK_QUEUE_TRANSFER_BIT) == VK_QUEUE_TRANSFER_BIT && qvk.queue_idx_transfer < 0) { + qvk.queue_idx_transfer = i; + } + } + + if(qvk.queue_idx_graphics < 0 || qvk.queue_idx_compute < 0 || qvk.queue_idx_transfer < 0) { + Com_Error(ERR_FATAL, "error: could not find suitable queue family!\n"); + return 1; + } + + float queue_priorities = 1.0f; + int num_create_queues = 0; + VkDeviceQueueCreateInfo queue_create_info[3]; + + { + VkDeviceQueueCreateInfo q = { + .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + .queueCount = 1, + .pQueuePriorities = &queue_priorities, + .queueFamilyIndex = qvk.queue_idx_graphics, + }; + + queue_create_info[num_create_queues++] = q; + }; + if(qvk.queue_idx_compute != qvk.queue_idx_graphics) { + VkDeviceQueueCreateInfo q = { + .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + .queueCount = 1, + .pQueuePriorities = &queue_priorities, + .queueFamilyIndex = qvk.queue_idx_compute, + }; + queue_create_info[num_create_queues++] = q; + }; + if(qvk.queue_idx_transfer != qvk.queue_idx_graphics && qvk.queue_idx_transfer != qvk.queue_idx_compute) { + VkDeviceQueueCreateInfo q = { + .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + .queueCount = 1, + .pQueuePriorities = &queue_priorities, + .queueFamilyIndex = qvk.queue_idx_transfer, + }; + queue_create_info[num_create_queues++] = q; + }; + + VkPhysicalDeviceDescriptorIndexingFeaturesEXT idx_features = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT, + .runtimeDescriptorArray = 1, + .shaderSampledImageArrayNonUniformIndexing = 1, + }; + VkPhysicalDeviceFeatures2 device_features = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR, + .pNext = &idx_features, + + .features = { + .robustBufferAccess = 1, + .fullDrawIndexUint32 = 1, + .imageCubeArray = 1, + .independentBlend = 1, + .geometryShader = 1, + .tessellationShader = 1, + .sampleRateShading = 0, + .dualSrcBlend = 1, + .logicOp = 1, + .multiDrawIndirect = 1, + .drawIndirectFirstInstance = 1, + .depthClamp = 1, + .depthBiasClamp = 1, + .fillModeNonSolid = 0, + .depthBounds = 1, + .wideLines = 0, + .largePoints = 0, + .alphaToOne = 1, + .multiViewport = 0, + .samplerAnisotropy = 1, + .textureCompressionETC2 = 0, + .textureCompressionASTC_LDR = 0, + .textureCompressionBC = 0, + .occlusionQueryPrecise = 0, + .pipelineStatisticsQuery = 1, + .vertexPipelineStoresAndAtomics = 1, + .fragmentStoresAndAtomics = 1, + .shaderTessellationAndGeometryPointSize = 1, + .shaderImageGatherExtended = 1, + .shaderStorageImageExtendedFormats = 1, + .shaderStorageImageMultisample = 1, + .shaderStorageImageReadWithoutFormat = 1, + .shaderStorageImageWriteWithoutFormat = 1, + .shaderUniformBufferArrayDynamicIndexing = 1, + .shaderSampledImageArrayDynamicIndexing = 1, + .shaderStorageBufferArrayDynamicIndexing = 1, + .shaderStorageImageArrayDynamicIndexing = 1, + .shaderClipDistance = 1, + .shaderCullDistance = 1, + .shaderFloat64 = 1, + .shaderInt64 = 1, + .shaderInt16 = 1, + .shaderResourceResidency = 1, + .shaderResourceMinLod = 1, + .sparseBinding = 1, + .sparseResidencyBuffer = 1, + .sparseResidencyImage2D = 1, + .sparseResidencyImage3D = 1, + .sparseResidency2Samples = 1, + .sparseResidency4Samples = 1, + .sparseResidency8Samples = 1, + .sparseResidency16Samples = 1, + .sparseResidencyAliased = 1, + .variableMultisampleRate = 0, + .inheritedQueries = 1, + } + }; + VkDeviceCreateInfo dev_create_info = { + .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + .pNext = &device_features, + .pQueueCreateInfos = queue_create_info, + .queueCreateInfoCount = num_create_queues, + .enabledExtensionCount = LENGTH(vk_requested_device_extensions), + .ppEnabledExtensionNames = vk_requested_device_extensions, + }; + + /* create device and queue */ + _VK(vkCreateDevice(qvk.physical_device, &dev_create_info, NULL, &qvk.device)); + + vkGetDeviceQueue(qvk.device, qvk.queue_idx_graphics, 0, &qvk.queue_graphics); + vkGetDeviceQueue(qvk.device, qvk.queue_idx_compute, 0, &qvk.queue_compute); + vkGetDeviceQueue(qvk.device, qvk.queue_idx_transfer, 0, &qvk.queue_transfer); + +#define _VK_EXTENSION_DO(a) \ + q##a = (PFN_##a) vkGetDeviceProcAddr(qvk.device, #a); \ + if(!q##a) { Com_EPrintf("warning: could not load function %s\n", #a); } + _VK_EXTENSION_LIST +#undef _VK_EXTENSION_DO + + return 0; +} + +qboolean +load_file(const char *path, char **data, size_t *s) +{ + *data = NULL; + FILE *f = fopen(path, "rb"); + if(!f) { + //Com_EPrintf("could not open %s\n", path); + Com_Error(ERR_FATAL, "could not open %s\n", path); + /* let's try not to crash everything */ + char *ret = malloc(1); + *s = 1; + ret[0] = 0; + return qfalse; + } + fseek(f, 0, SEEK_END); + *s = ftell(f); + rewind(f); + + *data = malloc(*s + 1); + //*data = aligned_alloc(4, *s + 1); // XXX lets hope malloc returns aligned memory + if(fread(*data, 1, *s, f) != *s) { + //Com_EPrintf("could not read file %s\n", path); + Com_Error(ERR_FATAL, "could not read file %s\n", path); + fclose(f); + *data[0] = 0; + return qfalse; + } + fclose(f); + return qtrue; +} + +static VkShaderModule +create_shader_module_from_file(const char *name, const char *enum_name) +{ + char *data; + size_t size; + + char path[1024]; + snprintf(path, sizeof path, SHADER_PATH_TEMPLATE, name ? name : (enum_name + 8)); + if(!name) { + int len = 0; + for(len = 0; path[len]; len++) + path[len] = tolower(path[len]); + while(--len >= 0) { + if(path[len] == '_') { + path[len] = '.'; + break; + } + } + } + + if(!load_file(path, &data, &size)) { + free(data); + return VK_NULL_HANDLE; + } + + VkShaderModule module; + + VkShaderModuleCreateInfo create_info = { + .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + .codeSize = size, + .pCode = (uint32_t *) data, + }; + + _VK(vkCreateShaderModule(qvk.device, &create_info, NULL, &module)); + + free(data); + + return module; +} + +VkResult +vkpt_load_shader_modules() +{ + VkResult ret = VK_SUCCESS; +#define SHADER_MODULE_DO(a) do { \ + qvk.shader_modules[a] = create_shader_module_from_file(shader_module_file_names[a], #a); \ + ret = (ret == VK_SUCCESS && qvk.shader_modules[a]) ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; \ + if(qvk.shader_modules[a]) { \ + ATTACH_LABEL_VARIABLE_NAME((uint64_t)qvk.shader_modules[a], SHADER_MODULE, #a); \ + }\ + } while(0); + + LIST_SHADER_MODULES + +#undef SHADER_MODULE_DO + return ret; +} + +VkResult +vkpt_destroy_shader_modules() +{ +#define SHADER_MODULE_DO(a) \ + vkDestroyShaderModule(qvk.device, qvk.shader_modules[a], NULL); \ + qvk.shader_modules[a] = VK_NULL_HANDLE; + + LIST_SHADER_MODULES + +#undef SHADER_MODULE_DO + + return VK_SUCCESS; +} + +VkResult +destroy_swapchain() +{ + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + vkDestroyImageView (qvk.device, qvk.swap_chain_image_views[i], NULL); + } + + vkDestroySwapchainKHR(qvk.device, qvk.swap_chain, NULL); + return VK_SUCCESS; +} + +int +destroy_vulkan() +{ + vkDeviceWaitIdle(qvk.device); + + destroy_swapchain(); + vkDestroySurfaceKHR (qvk.instance, qvk.surface, NULL); + + for(int i = 0; i < NUM_SEMAPHORES; i++) { + vkDestroySemaphore(qvk.device, qvk.semaphores[i], NULL); + } + + for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vkDestroyFence(qvk.device, qvk.fences_frame_sync[i], NULL); + } + + vkDestroyCommandPool (qvk.device, qvk.command_pool, NULL); + + vkDestroyDevice (qvk.device, NULL); + _VK(qvkDestroyDebugUtilsMessengerEXT(qvk.instance, qvk.dbg_messenger, NULL)); + vkDestroyInstance (qvk.instance, NULL); + + free(qvk.extensions); + qvk.extensions = NULL; + qvk.num_extensions = 0; + + free(qvk.layers); + qvk.layers = NULL; + qvk.num_layers = 0; + + return 0; +} + +static int +get_model_flags(const char *name) +{ + const char *light_sources[] = { + "models/objects/explode/tris.md2", + "models/objects/flash/tris.md2", + "models/objects/r_explode/tris.md2", + "models/objects/laser/tris.md2", + "models/objects/rocket/tris.md2", + //"models/weapons/v_blast/tris.md2", /* make blaster a flash light */ + }; + + for(int i = 0; i < LENGTH(light_sources); i++) { + if(!strcmp(name, light_sources[i])) + return BSP_FLAG_LIGHT; + } + return 0; +} + +inline int +is_light(int mat) +{ + return (mat & (BSP_FLAG_LIGHT | BSP_FLAG_WATER)) == BSP_FLAG_LIGHT; +} + +static void +upload_entity_transforms(uint32_t *num_instances, uint32_t *num_vertices) +{ + static int entity_frame_num = 0; + static int model_entity_ids[2][MAX_ENTITIES]; + static int world_entity_ids[2][MAX_ENTITIES]; + static int model_entity_id_count[2]; + static int world_entity_id_count[2]; + + + entity_frame_num = !entity_frame_num; + + QVKUniformBuffer_t *ubo = &vkpt_refdef.uniform_buffer; + ubo->num_lights = 0; + ubo->num_instances_model_bsp = 0; + memcpy(ubo->bsp_mesh_instances_prev, + ubo->bsp_mesh_instances, + sizeof(ubo->bsp_mesh_instances_prev)); + memcpy(ubo->model_instances_prev, + ubo->model_instances, + sizeof(ubo->model_instances_prev)); + int model_instance_idx = 0; + int bsp_mesh_idx = 0; + int num_instanced_vert = 0; /* need to track this here to find lights */ + + static uvec4_t bsp_cluster_id_prev[SHADER_MAX_BSP_ENTITIES / 4]; + static uvec4_t model_cluster_id_prev[SHADER_MAX_ENTITIES / 4]; + + memcpy(bsp_cluster_id_prev, ubo->bsp_cluster_id, sizeof(ubo->bsp_cluster_id)); + memcpy(model_cluster_id_prev, ubo->model_cluster_id, sizeof(ubo->model_cluster_id)); + + int instance_idx = 0; + for(int i = 0; i < vkpt_refdef.fd->num_entities; i++) { + entity_t *e = vkpt_refdef.fd->entities + i; + + float M[16]; + create_entity_matrix(M, e); + + /* embedded in bsp */ + if (e->model & 0x80000000) { + assert(bsp_mesh_idx < SHADER_MAX_BSP_ENTITIES); + + /* update cluster index */ + float pos_center_orig[4]; + float pos_center_trans[4]; + memcpy(pos_center_orig, vkpt_refdef.bsp_mesh_world.model_centers[~e->model], sizeof(float) * 3); + pos_center_orig[3] = 1.0; + mult_matrix_vector(pos_center_trans, M, pos_center_orig); + uint32_t cluster_id = BSP_PointLeaf(bsp_world_model->nodes, pos_center_trans)->cluster; + + memcpy(&ubo->bsp_mesh_instances[bsp_mesh_idx].M, M, sizeof(M)); + int idx = ~e->model; + int mesh_vert_cnt = vkpt_refdef.bsp_mesh_world.models_idx_count[idx]; + ubo->bsp_prim_offset[bsp_mesh_idx / 4][bsp_mesh_idx % 4] /* insanity due to alignment :( */ + = vkpt_refdef.bsp_mesh_world.models_idx_offset[~e->model] / 3; + ubo->bsp_cluster_id[bsp_mesh_idx / 4][bsp_mesh_idx % 4] = cluster_id; + + world_entity_ids[entity_frame_num][bsp_mesh_idx] = e->id; + + bsp_mesh_idx++; + + ubo->instance_buf_offset[instance_idx / 4][instance_idx % 4] = num_instanced_vert / 3; + num_instanced_vert += mesh_vert_cnt; + + ubo->num_instances_model_bsp += 1 << 0; + instance_idx++; + } + } + for(int i = 0; i < vkpt_refdef.fd->num_entities; i++) { + entity_t *e = vkpt_refdef.fd->entities + i; + + float M[16]; + create_entity_matrix(M, e); + + model_t *model = NULL; + if(!(e->model & 0x80000000) && (model = MOD_ForHandle(e->model))) { + maliasmesh_t *mesh = &model->meshes[0]; + if(!model->meshes) { + continue; + } + image_t *img = mesh->skins[0]; + for(int s = 0; s < mesh->numskins; s++) { + if((img = mesh->skins[s])) + break; + } + + model_entity_ids[entity_frame_num][model_instance_idx] = e->id; + + ModelInstance_t *mi = &vkpt_refdef.uniform_buffer.model_instances[model_instance_idx]; + memcpy(mi->M, M, sizeof(float) * 16); + mi->offset_curr = mesh->vertex_offset + e->frame * mesh->numverts; + mi->offset_prev = mesh->vertex_offset + e->oldframe * mesh->numverts; + mi->backlerp = e->backlerp; + mi->material = img ? (int)(img - r_images) : ~0; + mi->material |= get_model_flags(model->name); + + /* insanity due to alignment :( */ + uint32_t cluster_id = BSP_PointLeaf(bsp_world_model->nodes, e->origin)->cluster; + ubo->model_idx_offset[model_instance_idx / 4][model_instance_idx % 4] = mesh->idx_offset; + ubo->model_cluster_id[model_instance_idx / 4][model_instance_idx % 4] = cluster_id; + + uint32_t mat_flags = get_model_flags(model->name); + if(is_light(mat_flags)) { + ubo->light_offset_cnt[ubo->num_lights / 2][0 + (ubo->num_lights % 2) * 2] = num_instanced_vert / 3; + ubo->light_offset_cnt[ubo->num_lights / 2][1 + (ubo->num_lights % 2) * 2] = mesh->numtris; + ubo->num_lights++; + } + else if((e->flags & RF_SHELL_MASK)) { /* quad damage */ + ubo->light_offset_cnt[ubo->num_lights / 2][0 + (ubo->num_lights % 2) * 2] = (1 << 31) | (num_instanced_vert / 3); + ubo->light_offset_cnt[ubo->num_lights / 2][1 + (ubo->num_lights % 2) * 2] = mesh->numtris; + ubo->num_lights++; + } + + ubo->instance_buf_offset[instance_idx / 4][instance_idx % 4] = num_instanced_vert / 3; + + ubo->num_instances_model_bsp += 1 << 16; + instance_idx++; + model_instance_idx++; + num_instanced_vert += mesh->numtris * 3; + } + } + + /* anchor for last element */ + ubo->instance_buf_offset[instance_idx / 4][instance_idx % 4] = num_instanced_vert / 3; + + memset(ubo->world_current_to_prev, ~0u, sizeof(ubo->world_current_to_prev)); + memset(ubo->world_prev_to_current, ~0u, sizeof(ubo->world_prev_to_current)); + memset(ubo->model_current_to_prev, ~0u, sizeof(ubo->model_current_to_prev)); + memset(ubo->model_prev_to_current, ~0u, sizeof(ubo->model_prev_to_current)); + + world_entity_id_count[entity_frame_num] = bsp_mesh_idx; + uint32_t *world_current_to_prev = &ubo->world_current_to_prev[0][0]; + uint32_t *world_prev_to_current = &ubo->world_prev_to_current[0][0]; + for(int i = 0; i < world_entity_id_count[entity_frame_num]; i++) { + for(int j = 0; j < world_entity_id_count[!entity_frame_num]; j++) { + if(world_entity_ids[entity_frame_num][i] == world_entity_ids[!entity_frame_num][j]) { + world_current_to_prev[i] = j; + world_prev_to_current[j] = i; + } + } + } + + model_entity_id_count[entity_frame_num] = model_instance_idx; + uint32_t *model_current_to_prev = &ubo->model_current_to_prev[0][0]; + uint32_t *model_prev_to_current = &ubo->model_prev_to_current[0][0]; + for(int i = 0; i < model_entity_id_count[entity_frame_num]; i++) { + for(int j = 0; j < model_entity_id_count[!entity_frame_num]; j++) { + if(model_entity_ids[entity_frame_num][i] == model_entity_ids[!entity_frame_num][j]) { + model_current_to_prev[i] = j; + model_prev_to_current[j] = i; + } + } + } + + *num_instances = instance_idx; + *num_vertices = num_instanced_vert; + + for(int i = 0; i < world_entity_id_count[entity_frame_num]; i++) { + if(ubo->bsp_cluster_id[i / 4][i % 4] == ~0u) { + uint32_t id_prev = world_current_to_prev[i]; + if(id_prev == ~0u) + continue; + ubo->bsp_cluster_id[i / 4][i % 4] = bsp_cluster_id_prev[id_prev / 4][id_prev % 4]; + } + } + + for(int i = 0; i < model_entity_id_count[entity_frame_num]; i++) { + if(ubo->model_cluster_id[i / 4][i % 4] == ~0u) { + uint32_t id_prev = model_current_to_prev[i]; + if(id_prev == ~0u) + continue; + ubo->model_cluster_id[i / 4][i % 4] = model_cluster_id_prev[id_prev / 4][id_prev % 4]; + } + } +} + +#if 0 +/* code for updating the light hierarchy, potentially buggy */ +void +update_lights() +{ + vkpt_refdef.num_dynamic_lights = 0; + + bsp_mesh_t *bsp = &vkpt_refdef.bsp_mesh_world; + + float *light_pos = vkpt_refdef.light_positions + + vkpt_refdef.num_static_lights * 9; + uint32_t *light_col = vkpt_refdef.light_colors + + vkpt_refdef.num_static_lights; + + int num_lights = 0; + + for(int i = 0; i < vkpt_refdef.fd->num_entities; i++) { + model_t *model = NULL; + entity_t *e = vkpt_refdef.fd->entities + i; + float M[16]; + create_entity_matrix(M, e); + + /* embedded in bsp */ + if(e->model & 0x80000000) { + int idx_off = bsp->models_idx_offset[~e->model]; + int ent_is_light = 0; + for(int j = 0; j < bsp->models_idx_count[~e->model] / 3; j++) { // per prim + if(is_light(bsp->materials[idx_off / 3 + j])) { + ent_is_light |= 1; + for(int k = 0; k < 3; k++) { + float tmp[4]; + memcpy(tmp, bsp->positions + (idx_off + j * 3 + k) * 3, 3 * sizeof(float)); + tmp[3] = 1.0; + mult_matrix_vector(light_pos, M, tmp); + light_pos += 3; + } + vkpt_refdef.num_dynamic_lights++; + *light_col++ = ~0u; // fixme: actually add proper color + } + } + if(ent_is_light) + num_lights++; + } + else if((model = MOD_ForHandle(e->model))) { + maliasmesh_t *mesh = &model->meshes[0]; + if(!model->meshes) { + continue; + } + image_t *img = mesh->skins[0]; + for(int s = 0; s < mesh->numskins; s++) { + if((img = mesh->skins[s])) + break; + } + uint32_t mat_flags = get_model_flags(model->name); + if(!is_light(mat_flags)) + continue; + + num_lights++; + //Com_Printf("num light tri %d\n", mesh->numtris); + + int vert_off_curr = e->frame * mesh->numverts; + int vert_off_prev = e->oldframe * mesh->numverts; + int idx_cnt = mesh->numtris * 3; + float backlerp = e->backlerp; + + for(int j = 0; j < idx_cnt; j++) { + int idx = mesh->indices[j]; + + float pos[4] = { 0.0f, 0.0f, 0.0f, 1.0f }; + pos[0] += mesh->positions[idx + vert_off_curr][0] * (1.0f - backlerp); + pos[1] += mesh->positions[idx + vert_off_curr][1] * (1.0f - backlerp); + pos[2] += mesh->positions[idx + vert_off_curr][2] * (1.0f - backlerp); + + pos[0] += mesh->positions[idx + vert_off_prev][0] * (backlerp); + pos[1] += mesh->positions[idx + vert_off_prev][1] * (backlerp); + pos[2] += mesh->positions[idx + vert_off_prev][2] * (backlerp); + + mult_matrix_vector(light_pos, M, pos); + light_pos += 3; + } + for(int j = 0; j < idx_cnt / 3; j++) { + *light_col++ = 0x000045FF; // fixme: actually add proper color + } + vkpt_refdef.num_dynamic_lights += idx_cnt / 3; + } + } + + //Com_Printf("num lights: %d\n", num_lights); + +#if 0 + static int _cnt = 0; + char buf[1024]; + snprintf(buf, sizeof buf, "/tmp/light%04d.obj", _cnt++); + FILE *f = fopen(buf, "wb+"); + for(int i = 0; i < vkpt_refdef.num_static_lights + vkpt_refdef.num_dynamic_lights; + i++) { + + fprintf(f, "v %f %f %f\n", + vkpt_refdef.light_positions[i * 9 + 0 + 0], + vkpt_refdef.light_positions[i * 9 + 0 + 1], + vkpt_refdef.light_positions[i * 9 + 0 + 2]); + fprintf(f, "v %f %f %f\n", + vkpt_refdef.light_positions[i * 9 + 3 + 0], + vkpt_refdef.light_positions[i * 9 + 3 + 1], + vkpt_refdef.light_positions[i * 9 + 3 + 2]); + fprintf(f, "v %f %f %f\n", + vkpt_refdef.light_positions[i * 9 + 6 + 0], + vkpt_refdef.light_positions[i * 9 + 6 + 1], + vkpt_refdef.light_positions[i * 9 + 6 + 2]); + fprintf(f, "f -1 -2 -3\n"); + } + fclose(f); +#endif + + vkpt_lh_update( + vkpt_refdef.light_positions, + vkpt_refdef.light_colors, + vkpt_refdef.num_static_lights, + //vkpt_refdef.num_static_lights + vkpt_refdef.num_dynamic_lights, + qvk.cmd_buf_current); + +} +#endif + +static int +get_output_img() +{ + if(!strcmp(cvar_rtx->string, "on")) { + return VKPT_IMG_PT_COLOR_A + (qvk.frame_counter & 1); + } + + switch(vkpt_reconstruction->integer) { + default: + case 0: return VKPT_IMG_PT_COLOR_A + (qvk.frame_counter & 1); break; + case 1: return VKPT_IMG_ASVGF_TAA_A + (qvk.frame_counter & 1); break; + case 2: return VKPT_IMG_DEBUG; break; + case 3: return VKPT_IMG_ASVGF_GRAD_B; break; + } +} + + +/* renders the map ingame */ +void +R_RenderFrame(refdef_t *fd) +{ + vkpt_refdef.fd = fd; + LOG_FUNC(); + if(!vkpt_refdef.bsp_mesh_world_loaded) + return; + + //update_lights(); /* updates the light hierarchy, not present in this version */ + + uint32_t num_vert_instanced; + uint32_t num_instances; + upload_entity_transforms(&num_instances, &num_vert_instanced); + + float P[16]; + float V[16]; + float VP[16]; + float inv_VP[16]; + + QVKUniformBuffer_t *ubo = &vkpt_refdef.uniform_buffer; + memcpy(ubo->VP_prev, ubo->VP, sizeof(float) * 16); + create_projection_matrix(P, vkpt_refdef.z_near, vkpt_refdef.z_far, fd->fov_x, fd->fov_y); + create_view_matrix(V, fd); + mult_matrix_matrix(VP, P, V); + memcpy(ubo->V, V, sizeof(float) * 16); + memcpy(ubo->VP, VP, sizeof(float) * 16); + inverse(VP, inv_VP); + memcpy(ubo->invVP, inv_VP, sizeof(float) * 16); + ubo->current_frame_idx = qvk.frame_counter; + ubo->width = qvk.extent.width; + ubo->height = qvk.extent.height; + ubo->under_water = !!(fd->rdflags & RDF_UNDERWATER); + ubo->time = fd->time; + memcpy(ubo->cam_pos, fd->vieworg, sizeof(float) * 3); + + _VK(vkpt_uniform_buffer_update()); + + _VK(vkpt_profiler_query(PROFILER_INSTANCE_GEOMETRY, PROFILER_START)); + vkpt_vertex_buffer_create_instance(num_instances); + _VK(vkpt_profiler_query(PROFILER_INSTANCE_GEOMETRY, PROFILER_STOP)); + + _VK(vkpt_profiler_query(PROFILER_BVH_UPDATE, PROFILER_START)); + vkpt_pt_destroy_dynamic(qvk.current_image_index); + + assert(num_vert_instanced % 3 == 0); + vkpt_pt_create_dynamic(qvk.current_image_index, qvk.buf_vertex.buffer, + offsetof(VertexBuffer, positions_instanced), num_vert_instanced); + + vkpt_pt_create_toplevel(qvk.current_image_index); + vkpt_pt_update_descripter_set_bindings(qvk.current_image_index); + _VK(vkpt_profiler_query(PROFILER_BVH_UPDATE, PROFILER_STOP)); + + _VK(vkpt_profiler_query(PROFILER_ASVGF_GRADIENT_SAMPLES, PROFILER_START)); + vkpt_asvgf_create_gradient_samples(qvk.cmd_buf_current, qvk.frame_counter); + _VK(vkpt_profiler_query(PROFILER_ASVGF_GRADIENT_SAMPLES, PROFILER_STOP)); + + _VK(vkpt_profiler_query(PROFILER_PATH_TRACER, PROFILER_START)); + vkpt_pt_record_cmd_buffer(qvk.cmd_buf_current, qvk.frame_counter); + _VK(vkpt_profiler_query(PROFILER_PATH_TRACER, PROFILER_STOP)); + + _VK(vkpt_profiler_query(PROFILER_ASVGF_FULL, PROFILER_START)); + vkpt_asvgf_record_cmd_buffer(qvk.cmd_buf_current); + _VK(vkpt_profiler_query(PROFILER_ASVGF_FULL, PROFILER_STOP)); + + VkImageSubresourceRange subresource_range = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + }; + + IMAGE_BARRIER(qvk.cmd_buf_current, + .image = qvk.swap_chain_images[qvk.current_image_index], + .subresourceRange = subresource_range, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL + ); + + int output_img = get_output_img(); + + IMAGE_BARRIER(qvk.cmd_buf_current, + .image = qvk.images[output_img], + .subresourceRange = subresource_range, + .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_GENERAL, + .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL + ); + + VkOffset3D blit_size = { + .x = qvk.extent.width, .y = qvk.extent.height, .z = 1 + }; + VkImageBlit img_blit = { + .srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }, + .dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }, + .srcOffsets = { [1] = blit_size }, + .dstOffsets = { [1] = blit_size }, + }; + vkCmdBlitImage(qvk.cmd_buf_current, + qvk.images[output_img], VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + qvk.swap_chain_images[qvk.current_image_index], VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + 1, &img_blit, VK_FILTER_NEAREST); + + IMAGE_BARRIER(qvk.cmd_buf_current, + .image = qvk.swap_chain_images[qvk.current_image_index], + .subresourceRange = subresource_range, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = 0, + .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR + ); +} + +static void +recreate_swapchain() +{ + vkDeviceWaitIdle(qvk.device); + vkpt_destroy_all(VKPT_INIT_SWAPCHAIN_RECREATE); + destroy_swapchain(); + SDL_GetWindowSize(qvk.window, &qvk.win_width, &qvk.win_height); + create_swapchain(); + vkpt_initialize_all(VKPT_INIT_SWAPCHAIN_RECREATE); +} + +void +R_BeginFrame() +{ + LOG_FUNC(); +retry:; + int sem_idx = qvk.frame_counter % MAX_FRAMES_IN_FLIGHT; + + vkWaitForFences(qvk.device, 1, qvk.fences_frame_sync + sem_idx, VK_TRUE, ~((uint64_t) 0)); + VkResult res_swapchain = vkAcquireNextImageKHR(qvk.device, qvk.swap_chain, ~((uint64_t) 0), + qvk.semaphores[SEM_IMG_AVAILABLE + sem_idx], VK_NULL_HANDLE, &qvk.current_image_index); + if(res_swapchain == VK_ERROR_OUT_OF_DATE_KHR || res_swapchain == VK_SUBOPTIMAL_KHR) { + recreate_swapchain(); + goto retry; + } + else if(res_swapchain != VK_SUCCESS) { + _VK(res_swapchain); + } + vkResetFences(qvk.device, 1, qvk.fences_frame_sync + sem_idx); + + _VK(vkpt_profiler_next_frame(qvk.current_image_index)); + + /* cannot be called in R_EndRegistration as it would miss the initially textures (charset etc) */ + if(register_model_dirty) { + _VK(vkpt_vertex_buffer_upload_models_to_staging()); + _VK(vkpt_vertex_buffer_upload_staging()); + register_model_dirty = 0; + } + vkpt_textures_end_registration(); + vkpt_draw_clear_stretch_pics(); + + qvk.cmd_buf_current = qvk.command_buffers[qvk.current_image_index]; + + VkCommandBufferBeginInfo begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, + .pInheritanceInfo = NULL, + }; + + _VK(vkResetCommandBuffer(qvk.cmd_buf_current, 0)); + _VK(vkBeginCommandBuffer(qvk.cmd_buf_current, &begin_info)); + _VK(vkpt_profiler_query(PROFILER_FRAME_TIME, PROFILER_START)); +} + +void +R_EndFrame() +{ + LOG_FUNC(); + + if(vkpt_profiler->integer) + draw_profiler(); + vkpt_draw_submit_stretch_pics(&qvk.cmd_buf_current); + _VK(vkpt_profiler_query(PROFILER_FRAME_TIME, PROFILER_STOP)); + _VK(vkEndCommandBuffer(qvk.cmd_buf_current)); + + int sem_idx = qvk.frame_counter % MAX_FRAMES_IN_FLIGHT; + + VkSemaphore wait_semaphores[] = { qvk.semaphores[SEM_IMG_AVAILABLE + sem_idx] }; + VkPipelineStageFlags wait_stages[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT }; + VkSemaphore signal_semaphores[] = { qvk.semaphores[SEM_RENDER_FINISHED + sem_idx] }; + + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .waitSemaphoreCount = LENGTH(wait_semaphores), + .pWaitSemaphores = wait_semaphores, + .signalSemaphoreCount = LENGTH(signal_semaphores), + .pSignalSemaphores = signal_semaphores, + .pWaitDstStageMask = wait_stages, + .commandBufferCount = 1, + .pCommandBuffers = &qvk.cmd_buf_current, + }; + + _VK(vkQueueSubmit(qvk.queue_graphics, 1, &submit_info, qvk.fences_frame_sync[sem_idx])); + + VkPresentInfoKHR present_info = { + .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + .waitSemaphoreCount = LENGTH(signal_semaphores), + .pWaitSemaphores = signal_semaphores, + .swapchainCount = 1, + .pSwapchains = &qvk.swap_chain, + .pImageIndices = &qvk.current_image_index, + .pResults = NULL, + }; + + VkResult res_present = vkQueuePresentKHR(qvk.queue_graphics, &present_info); + if(res_present == VK_ERROR_OUT_OF_DATE_KHR || res_present == VK_SUBOPTIMAL_KHR) { + recreate_swapchain(); + } + qvk.frame_counter++; +} + +void +R_ModeChanged(int width, int height, int flags, int rowbytes, void *pixels) +{ + Com_Printf("mode changed %d %d\n", width, height); + + r_config.width = width; + r_config.height = height; + r_config.flags = flags; +} + +float +R_ClampScale(cvar_t *var) +{ + if(!var) + return 1.0f; + + if(var->value) + return 1.0f / Cvar_ClampValue(var, 1.0f, 10.0f); + + if(r_config.width * r_config.height >= 2560 * 1440) + return 0.25f; + + if(r_config.width * r_config.height >= 1280 * 720) + return 0.5f; + + return 1.0f; +} + +/* called when the library is loaded */ +qboolean +R_Init(qboolean total) +{ + registration_sequence = 1; + qvk.window = SDL_CreateWindow("vkq2", 20, 50, r_config.width, r_config.height, SDL_WINDOW_VULKAN | SDL_WINDOW_RESIZABLE); + if(!qvk.window) { + Com_Error(ERR_FATAL, "[vkq2] could not create window `%s'\n", SDL_GetError()); + return qfalse; + } + extern SDL_Window *sdl_window; + sdl_window = qvk.window; + + if (!VID_Init()) { + Com_Error(ERR_FATAL, "[vkq2] VID_Init failed\n"); + return qfalse; + } + + vkpt_profiler = Cvar_Get("vkpt_profiler", "0", 0); + vkpt_reconstruction = Cvar_Get("vkpt_reconstruction", "1", 0); + cvar_rtx = Cvar_Get("rtx", "off", 0); + + qvk.win_width = r_config.width; + qvk.win_height = r_config.height; + + if(!SDL_Vulkan_GetInstanceExtensions(qvk.window, &qvk.num_sdl2_extensions, NULL)) { + Com_Error(ERR_FATAL, "[vkq2] couldn't get extension count\n"); + return qfalse; + } + qvk.sdl2_extensions = malloc(sizeof(char*) * qvk.num_sdl2_extensions); + if(!SDL_Vulkan_GetInstanceExtensions(qvk.window, &qvk.num_sdl2_extensions, qvk.sdl2_extensions)) { + Com_Error(ERR_FATAL, "[vkq2] couldn't get extensions\n"); + return qfalse; + } + + Com_Printf("[vkq2] vk extension required by SDL2: \n"); + for(int i = 0; i < qvk.num_sdl2_extensions; i++) { + Com_Printf(" %s\n", qvk.sdl2_extensions[i]); + } + + IMG_Init(); + IMG_GetPalette(); + MOD_Init(); + + vkpt_refdef.light_positions = calloc(MAX_LIGHTS * 3 * 3, sizeof(float)); + vkpt_refdef.light_colors = calloc(MAX_LIGHTS, sizeof(uint32_t)); + + if(init_vulkan()) { + Com_Error(ERR_FATAL, "[vkq2] init vulkan failed\n"); + return qfalse; + } + _VK(create_swapchain()); + _VK(create_command_pool_and_fences()); + + _VK(vkpt_initialize_all(VKPT_INIT_DEFAULT)); + + Cmd_AddCommand("reload_shader", (xcommand_t)&vkpt_reload_shader); + + return qtrue; +} + +/* called before the library is unloaded */ +void +R_Shutdown(qboolean total) +{ + _VK(vkpt_destroy_all(VKPT_INIT_DEFAULT)); + + if(destroy_vulkan()) { + Com_EPrintf("[vkpt] destroy vulkan failed\n"); + } + + IMG_Shutdown(); + MOD_Shutdown(); // todo: currently leaks memory, need to clear submeshes + VID_Shutdown(); +} + +// for screenshots +byte * +IMG_ReadPixels(int *width, int *height, int *rowbytes) +{ + return 0; // sorry guys +} + +void +R_SetSky(const char *name, float rotate, vec3_t axis) +{ + int i; + char pathname[MAX_QPATH]; + // 3dstudio environment map names + const char *suf[6] = { "ft", "bk", "up", "dn", "rt", "lf" }; + + byte *data = NULL; + + int w_prev, h_prev; + for (i = 0; i < 6; i++) { + Q_concat(pathname, sizeof(pathname), "env/", name, suf[i], ".tga", NULL); + FS_NormalizePath(pathname, pathname); + image_t img; + qerror_t ret = load_img(pathname, &img); + if(ret != Q_ERR_SUCCESS) { + if(data) { + Z_Free(data); + } + data = Z_Malloc(6 * sizeof(uint32_t)); + for(int j = 0; j < 6; j++) + ((uint32_t *)data)[j] = 0xff00ffffu; + w_prev = h_prev = 1; + break; + } + + size_t s = img.upload_width * img.upload_height * 4; + if(!data) { + data = Z_Malloc(s * 6); + w_prev = img.upload_width; + h_prev = img.upload_height; + } + + memcpy(data + s * i, img.pix_data, s); + + assert(w_prev == img.upload_width); + assert(h_prev == img.upload_height); + } + + vkpt_textures_upload_envmap(w_prev, h_prev, data); + Z_Free(data); +} + +void R_AddDecal(decal_t *d) +{ } + +void +R_BeginRegistration(const char *name) +{ + registration_sequence++; + LOG_FUNC(); + Com_Printf("loading %s\n", name); + vkDeviceWaitIdle(qvk.device); + + if(vkpt_refdef.bsp_mesh_world_loaded) { + bsp_mesh_destroy(&vkpt_refdef.bsp_mesh_world); + vkpt_refdef.bsp_mesh_world_loaded = 0; + } + + if(bsp_world_model) { + BSP_Free(bsp_world_model); + bsp_world_model = NULL; + } + + char bsp_path[MAX_QPATH]; + Q_concat(bsp_path, sizeof(bsp_path), "maps/", name, ".bsp", NULL); + bsp_t *bsp; + qerror_t ret = BSP_Load(bsp_path, &bsp); + if(!bsp) { + Com_Error(ERR_DROP, "%s: couldn't load %s: %s", __func__, bsp_path, Q_ErrorString(ret)); + } + bsp_world_model = bsp; + bsp_mesh_register_textures(bsp); + bsp_mesh_create_from_bsp(&vkpt_refdef.bsp_mesh_world, bsp); + _VK(vkpt_vertex_buffer_upload_bsp_mesh_to_staging(&vkpt_refdef.bsp_mesh_world)); + _VK(vkpt_vertex_buffer_upload_staging()); + vkpt_refdef.bsp_mesh_world_loaded = 1; + bsp = NULL; + + _VK(vkpt_pt_destroy_static()); + const bsp_mesh_t *m = &vkpt_refdef.bsp_mesh_world; + _VK(vkpt_pt_create_static(qvk.buf_vertex.buffer, offsetof(VertexBuffer, positions_bsp), m->world_idx_count)); + + { + int num_prims = 0; + for(int i = 0; i < m->world_idx_count / 3; i++) { + num_prims += !!is_light(m->materials[i]); + } + + vkpt_refdef.num_static_lights = num_prims; + + for(int i = 0, lh_idx = 0; i < m->world_idx_count / 3; i++) { + if(!is_light(m->materials[i])) + continue; + + image_t *img = &r_images[m->materials[i] & BSP_TEXTURE_MASK]; + vkpt_refdef.light_colors[lh_idx] = img->light_color; + + for(int j = 0; j < 3; j++) { + int bsp_idx = m->indices[i * 3 + j]; + assert(bsp_idx >= 0 && bsp_idx < m->num_vertices); + float *p_in = m->positions + bsp_idx * 3; + float *p_out = vkpt_refdef.light_positions + (lh_idx * 3 + j) * 3; + + p_out[0] = p_in[0]; + p_out[1] = p_in[1]; + p_out[2] = p_in[2]; + } + lh_idx++; + } + } + +} + +void +R_EndRegistration(void) +{ + LOG_FUNC(); + IMG_FreeUnused(); + MOD_FreeUnused(); +} + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/matrix.c b/src/refresh/vkpt/matrix.c new file mode 100644 index 0000000..91608fe --- /dev/null +++ b/src/refresh/vkpt/matrix.c @@ -0,0 +1,310 @@ +/* +Copyright (C) 2018 Christoph Schied +Copyright (C) 2003-2006 Andrey Nazarov + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "vkpt.h" + +void +create_entity_matrix(float matrix[16], entity_t *e) +{ + vec3_t axis[3]; + vec3_t origin; + origin[0] = (1.f-e->backlerp) * e->origin[0] + e->backlerp * e->oldorigin[0]; + origin[1] = (1.f-e->backlerp) * e->origin[1] + e->backlerp * e->oldorigin[1]; + origin[2] = (1.f-e->backlerp) * e->origin[2] + e->backlerp * e->oldorigin[2]; + + AnglesToAxis(e->angles, axis); + + matrix[0] = axis[0][0]; + matrix[4] = axis[1][0]; + matrix[8] = axis[2][0]; + matrix[12] = origin[0]; + + matrix[1] = axis[0][1]; + matrix[5] = axis[1][1]; + matrix[9] = axis[2][1]; + matrix[13] = origin[1]; + + matrix[2] = axis[0][2]; + matrix[6] = axis[1][2]; + matrix[10] = axis[2][2]; + matrix[14] = origin[2]; + + matrix[3] = 0.0f; + matrix[7] = 0.0f; + matrix[11] = 0.0f; + matrix[15] = 1.0f; +} + +void +create_projection_matrix(float matrix[16], float znear, float zfar, float fov_x, float fov_y) +{ + float xmin, xmax, ymin, ymax; + float width, height, depth; + + //znear = gl_znear->value; + + + //if (glr.fd.rdflags & RDF_NOWORLDMODEL) + // zfar = 2048; + //else + // zfar = gl_static.world.size * 2; + + //znear = 1.0f; + //zfar = 4096; + + ymax = znear * tan(fov_y * M_PI / 360.0); + ymin = -ymax; + + xmax = znear * tan(fov_x * M_PI / 360.0); + xmin = -xmax; + + width = xmax - xmin; + height = ymax - ymin; + depth = zfar - znear; + + matrix[0] = 2 * znear / width; + matrix[4] = 0; + matrix[8] = (xmax + xmin) / width; + matrix[12] = 0; + + matrix[1] = 0; + // negative because vulkan has inverted clipspace over opengl + matrix[5] = -2 * znear / height; + matrix[9] = (ymax + ymin) / height; + matrix[13] = 0; + + matrix[2] = 0; + matrix[6] = 0; + matrix[10] = -(zfar + znear) / depth; + matrix[14] = -2 * zfar * znear / depth; + + matrix[3] = 0; + matrix[7] = 0; + matrix[11] = -1; + matrix[15] = 0; +} + +void +create_orthographic_matrix(float matrix[16], float xmin, float xmax, + float ymin, float ymax, float znear, float zfar) +{ + float width, height, depth; + + width = xmax - xmin; + height = ymax - ymin; + depth = zfar - znear; + + matrix[0] = 2 / width; + matrix[4] = 0; + matrix[8] = 0; + matrix[12] = -(xmax + xmin) / width; + + matrix[1] = 0; + matrix[5] = 2 / height; + matrix[9] = 0; + matrix[13] = -(ymax + ymin) / height; + + matrix[2] = 0; + matrix[6] = 0; + matrix[10] = -2 / depth; + matrix[14] = -(zfar + znear) / depth; + + matrix[3] = 0; + matrix[7] = 0; + matrix[11] = 0; + matrix[15] = 1; +} + +void +create_view_matrix(float matrix[16], refdef_t *fd) +{ + vec3_t viewaxis[3]; + AnglesToAxis(fd->viewangles, viewaxis); + + matrix[0] = -viewaxis[1][0]; + matrix[4] = -viewaxis[1][1]; + matrix[8] = -viewaxis[1][2]; + matrix[12] = DotProduct(viewaxis[1], fd->vieworg); + + matrix[1] = viewaxis[2][0]; + matrix[5] = viewaxis[2][1]; + matrix[9] = viewaxis[2][2]; + matrix[13] = -DotProduct(viewaxis[2], fd->vieworg); + + matrix[2] = -viewaxis[0][0]; + matrix[6] = -viewaxis[0][1]; + matrix[10] = -viewaxis[0][2]; + matrix[14] = DotProduct(viewaxis[0], fd->vieworg); + + matrix[3] = 0; + matrix[7] = 0; + matrix[11] = 0; + matrix[15] = 1; +} + +void +inverse(const float *m, float *inv) +{ + inv[0] = m[5] * m[10] * m[15] - + m[5] * m[11] * m[14] - + m[9] * m[6] * m[15] + + m[9] * m[7] * m[14] + + m[13] * m[6] * m[11] - + m[13] * m[7] * m[10]; + + inv[1] = -m[1] * m[10] * m[15] + + m[1] * m[11] * m[14] + + m[9] * m[2] * m[15] - + m[9] * m[3] * m[14] - + m[13] * m[2] * m[11] + + m[13] * m[3] * m[10]; + + inv[2] = m[1] * m[6] * m[15] - + m[1] * m[7] * m[14] - + m[5] * m[2] * m[15] + + m[5] * m[3] * m[14] + + m[13] * m[2] * m[7] - + m[13] * m[3] * m[6]; + + inv[3] = -m[1] * m[6] * m[11] + + m[1] * m[7] * m[10] + + m[5] * m[2] * m[11] - + m[5] * m[3] * m[10] - + m[9] * m[2] * m[7] + + m[9] * m[3] * m[6]; + + inv[4] = -m[4] * m[10] * m[15] + + m[4] * m[11] * m[14] + + m[8] * m[6] * m[15] - + m[8] * m[7] * m[14] - + m[12] * m[6] * m[11] + + m[12] * m[7] * m[10]; + + inv[5] = m[0] * m[10] * m[15] - + m[0] * m[11] * m[14] - + m[8] * m[2] * m[15] + + m[8] * m[3] * m[14] + + m[12] * m[2] * m[11] - + m[12] * m[3] * m[10]; + + inv[6] = -m[0] * m[6] * m[15] + + m[0] * m[7] * m[14] + + m[4] * m[2] * m[15] - + m[4] * m[3] * m[14] - + m[12] * m[2] * m[7] + + m[12] * m[3] * m[6]; + + inv[7] = m[0] * m[6] * m[11] - + m[0] * m[7] * m[10] - + m[4] * m[2] * m[11] + + m[4] * m[3] * m[10] + + m[8] * m[2] * m[7] - + m[8] * m[3] * m[6]; + + inv[8] = m[4] * m[9] * m[15] - + m[4] * m[11] * m[13] - + m[8] * m[5] * m[15] + + m[8] * m[7] * m[13] + + m[12] * m[5] * m[11] - + m[12] * m[7] * m[9]; + + inv[9] = -m[0] * m[9] * m[15] + + m[0] * m[11] * m[13] + + m[8] * m[1] * m[15] - + m[8] * m[3] * m[13] - + m[12] * m[1] * m[11] + + m[12] * m[3] * m[9]; + + inv[10] = m[0] * m[5] * m[15] - + m[0] * m[7] * m[13] - + m[4] * m[1] * m[15] + + m[4] * m[3] * m[13] + + m[12] * m[1] * m[7] - + m[12] * m[3] * m[5]; + + inv[11] = -m[0] * m[5] * m[11] + + m[0] * m[7] * m[9] + + m[4] * m[1] * m[11] - + m[4] * m[3] * m[9] - + m[8] * m[1] * m[7] + + m[8] * m[3] * m[5]; + + inv[12] = -m[4] * m[9] * m[14] + + m[4] * m[10] * m[13] + + m[8] * m[5] * m[14] - + m[8] * m[6] * m[13] - + m[12] * m[5] * m[10] + + m[12] * m[6] * m[9]; + + inv[13] = m[0] * m[9] * m[14] - + m[0] * m[10] * m[13] - + m[8] * m[1] * m[14] + + m[8] * m[2] * m[13] + + m[12] * m[1] * m[10] - + m[12] * m[2] * m[9]; + + inv[14] = -m[0] * m[5] * m[14] + + m[0] * m[6] * m[13] + + m[4] * m[1] * m[14] - + m[4] * m[2] * m[13] - + m[12] * m[1] * m[6] + + m[12] * m[2] * m[5]; + + inv[15] = m[0] * m[5] * m[10] - + m[0] * m[6] * m[9] - + m[4] * m[1] * m[10] + + m[4] * m[2] * m[9] + + m[8] * m[1] * m[6] - + m[8] * m[2] * m[5]; + + float det = m[0] * inv[0] + m[1] * inv[4] + m[2] * inv[8] + m[3] * inv[12]; + + det = 1.0f / det; + + for(int i = 0; i < 16; i++) + inv[i] = inv[i] * det; +} + +void +mult_matrix_matrix(float *p, const float *a, const float *b) +{ + for(int i = 0; i < 4; i++) { + for(int j = 0; j < 4; j++) { + p[i * 4 + j] = + a[0 * 4 + j] * b[i * 4 + 0] + + a[1 * 4 + j] * b[i * 4 + 1] + + a[2 * 4 + j] * b[i * 4 + 2] + + a[3 * 4 + j] * b[i * 4 + 3]; + } + } +} + +void +mult_matrix_vector(float *p, const float *a, const float *b) +{ + int j; + for (j = 0; j < 4; j++) { + p[j] = + a[0 * 4 + j] * b[0] + + a[1 * 4 + j] * b[1] + + a[2 * 4 + j] * b[2] + + a[3 * 4 + j] * b[3]; + } +} + diff --git a/src/refresh/vkpt/models.c b/src/refresh/vkpt/models.c new file mode 100644 index 0000000..18e715d --- /dev/null +++ b/src/refresh/vkpt/models.c @@ -0,0 +1,477 @@ +/* +Copyright (C) 2018 Christoph Schied +Copyright (C) 2018 Florian Simon +Copyright (C) 2003-2006 Andrey Nazarov + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +/* +Copyright (C) 2003-2006 Andrey Nazarov + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "vkpt.h" + +#include "format/md2.h" +#include "format/md3.h" +#include "format/sp2.h" +#include + +#if MAX_ALIAS_VERTS > TESS_MAX_VERTICES +#error TESS_MAX_VERTICES +#endif + +#if MD2_MAX_TRIANGLES > TESS_MAX_INDICES / 3 +#error TESS_MAX_INDICES +#endif + +qerror_t MOD_LoadMD2(model_t *model, const void *rawdata, size_t length) +{ + dmd2header_t header; + dmd2frame_t *src_frame; + dmd2trivertx_t *src_vert; + dmd2triangle_t *src_tri; + dmd2stvert_t *src_tc; + char *src_skin; + maliasframe_t *dst_frame; + maliasmesh_t *dst_mesh; + int val; + uint16_t remap[TESS_MAX_INDICES]; + uint16_t vertIndices[TESS_MAX_INDICES]; + uint16_t tcIndices[TESS_MAX_INDICES]; + uint16_t finalIndices[TESS_MAX_INDICES]; + int numverts, numindices; + char skinname[MAX_QPATH]; + vec_t scale_s, scale_t; + vec3_t mins, maxs; + qerror_t ret; + + if (length < sizeof(header)) { + return Q_ERR_FILE_TOO_SMALL; + } + + // byte swap the header + header = *(dmd2header_t *)rawdata; + for (int i = 0; i < sizeof(header) / 4; i++) { + ((uint32_t *)&header)[i] = LittleLong(((uint32_t *)&header)[i]); + } + + // validate the header + ret = MOD_ValidateMD2(&header, length); + if (ret) { + if (ret == Q_ERR_TOO_FEW) { + // empty models draw nothing + model->type = MOD_EMPTY; + return Q_ERR_SUCCESS; + } + return ret; + } + + // load all triangle indices + numindices = 0; + src_tri = (dmd2triangle_t *)((byte *)rawdata + header.ofs_tris); + for (int i = 0; i < header.num_tris; i++) { + int good = 1; + for (int j = 0; j < 3; j++) { + uint16_t idx_xyz = LittleShort(src_tri->index_xyz[j]); + uint16_t idx_st = LittleShort(src_tri->index_st[j]); + + // some broken models have 0xFFFF indices + if (idx_xyz >= header.num_xyz || idx_st >= header.num_st) { + good = 0; + break; + } + + vertIndices[numindices + j] = idx_xyz; + tcIndices[numindices + j] = idx_st; + } + if (good) { + // only count good triangles + numindices += 3; + } + src_tri++; + } + + if (numindices < 3) { + return Q_ERR_TOO_FEW; + } + + for (int i = 0; i < numindices; i++) { + remap[i] = 0xFFFF; + } + + // remap all triangle indices + numverts = 0; + src_tc = (dmd2stvert_t *)((byte *)rawdata + header.ofs_st); + for (int i = 0; i < numindices; i++) { + if (remap[i] != 0xFFFF) { + continue; // already remapped + } + + for (int j = i + 1; j < numindices; j++) { + if (vertIndices[i] == vertIndices[j] && + (src_tc[tcIndices[i]].s == src_tc[tcIndices[j]].s && + src_tc[tcIndices[i]].t == src_tc[tcIndices[j]].t)) { + // duplicate vertex + remap[j] = i; + finalIndices[j] = numverts; + } + } + + // new vertex + remap[i] = i; + finalIndices[i] = numverts++; + } + + Hunk_Begin(&model->hunk, 50u<<20); + model->type = MOD_ALIAS; + model->nummeshes = 1; + model->numframes = header.num_frames; + model->meshes = MOD_Malloc(sizeof(maliasmesh_t)); + model->frames = MOD_Malloc(header.num_frames * sizeof(maliasframe_t)); + + dst_mesh = model->meshes; + dst_mesh->numtris = numindices / 3; + dst_mesh->numindices = numindices; + dst_mesh->numverts = numverts; + dst_mesh->numskins = header.num_skins; + dst_mesh->positions = MOD_Malloc(numverts * header.num_frames * sizeof(vec3_t)); + dst_mesh->normals = MOD_Malloc(numverts * header.num_frames * sizeof(vec3_t)); + dst_mesh->tex_coords = MOD_Malloc(numverts * header.num_frames * sizeof(vec2_t)); + dst_mesh->indices = MOD_Malloc(numindices * sizeof(int)); + + if (dst_mesh->numtris != header.num_tris) { + Com_DPrintf("%s has %d bad triangles\n", model->name, header.num_tris - dst_mesh->numtris); + } + + // store final triangle indices + for (int i = 0; i < numindices; i++) { + dst_mesh->indices[i] = finalIndices[i]; + } + + // load all skins + src_skin = (char *)rawdata + header.ofs_skins; + for (int i = 0; i < header.num_skins; i++) { + if (!Q_memccpy(skinname, src_skin, 0, sizeof(skinname))) { + ret = Q_ERR_STRING_TRUNCATED; + goto fail; + } + FS_NormalizePath(skinname, skinname); + dst_mesh->skins[i] = IMG_Find(skinname, IT_SKIN, IF_NONE); + src_skin += MD2_MAX_SKINNAME; + } + + // load all tcoords + src_tc = (dmd2stvert_t *)((byte *)rawdata + header.ofs_st); + scale_s = 1.0f / header.skinwidth; + scale_t = 1.0f / header.skinheight; + + // load all frames + src_frame = (dmd2frame_t *)((byte *)rawdata + header.ofs_frames); + dst_frame = model->frames; + for (int j = 0; j < header.num_frames; j++) { + LittleVector(src_frame->scale, dst_frame->scale); + LittleVector(src_frame->translate, dst_frame->translate); + + // load frame vertices + ClearBounds(mins, maxs); + for (int i = 0; i < numindices; i++) { + if (remap[i] != i) { + continue; + } + src_vert = &src_frame->verts[vertIndices[i]]; + vec3_t *dst_pos = &dst_mesh->positions [j * numverts + finalIndices[i]]; + vec3_t *dst_nrm = &dst_mesh->normals [j * numverts + finalIndices[i]]; + vec2_t *dst_tc = &dst_mesh->tex_coords[j * numverts + finalIndices[i]]; + + (*dst_tc)[0] = scale_s * src_tc[tcIndices[i]].s; + (*dst_tc)[1] = scale_t * src_tc[tcIndices[i]].t; + + (*dst_pos)[0] = src_vert->v[0] * dst_frame->scale[0] + dst_frame->translate[0]; + (*dst_pos)[1] = src_vert->v[1] * dst_frame->scale[1] + dst_frame->translate[1]; + (*dst_pos)[2] = src_vert->v[2] * dst_frame->scale[2] + dst_frame->translate[2]; + + (*dst_nrm)[0] = 0.0f; + (*dst_nrm)[1] = 0.0f; + (*dst_nrm)[2] = 0.0f; + + val = src_vert->lightnormalindex; + if (val < NUMVERTEXNORMALS) { + (*dst_nrm)[0] = bytedirs[val][0]; + (*dst_nrm)[1] = bytedirs[val][1]; + (*dst_nrm)[2] = bytedirs[val][2]; + } + + for (int k = 0; k < 3; k++) { + val = (*dst_pos)[k]; + if (val < mins[k]) + mins[k] = val; + if (val > maxs[k]) + maxs[k] = val; + } + } + + VectorVectorScale(mins, dst_frame->scale, mins); + VectorVectorScale(maxs, dst_frame->scale, maxs); + + dst_frame->radius = RadiusFromBounds(mins, maxs); + + VectorAdd(mins, dst_frame->translate, dst_frame->bounds[0]); + VectorAdd(maxs, dst_frame->translate, dst_frame->bounds[1]); + + src_frame = (dmd2frame_t *)((byte *)src_frame + header.framesize); + dst_frame++; + } + + // fix winding order + for (int i = 0; i < dst_mesh->numindices; i += 3) { + int tmp = dst_mesh->indices[i + 1]; + dst_mesh->indices[i + 1] = dst_mesh->indices[i + 2]; + dst_mesh->indices[i + 2] = tmp; + } + + Hunk_End(&model->hunk); + return Q_ERR_SUCCESS; + +fail: + Hunk_Free(&model->hunk); + return ret; +} + +#if USE_MD3 +static qerror_t MOD_LoadMD3Mesh(model_t *model, maliasmesh_t *mesh, + const byte *rawdata, size_t length, size_t *offset_p) +{ + dmd3mesh_t header; + size_t end; + dmd3vertex_t *src_vert; + dmd3coord_t *src_tc; + dmd3skin_t *src_skin; + uint32_t *src_idx; + maliasvert_t *dst_vert; + maliastc_t *dst_tc; + int *dst_idx; + uint32_t index; + char skinname[MAX_QPATH]; + int i; + + if (length < sizeof(header)) + return Q_ERR_BAD_EXTENT; + + // byte swap the header + header = *(dmd3mesh_t *)rawdata; + for (i = 0; i < sizeof(header) / 4; i++) + ((uint32_t *)&header)[i] = LittleLong(((uint32_t *)&header)[i]); + + if (header.meshsize < sizeof(header) || header.meshsize > length) + return Q_ERR_BAD_EXTENT; + if (header.num_verts < 3) + return Q_ERR_TOO_FEW; + if (header.num_verts > TESS_MAX_VERTICES) + return Q_ERR_TOO_MANY; + if (header.num_tris < 1) + return Q_ERR_TOO_FEW; + if (header.num_tris > TESS_MAX_INDICES / 3) + return Q_ERR_TOO_MANY; + if (header.num_skins > MAX_ALIAS_SKINS) + return Q_ERR_TOO_MANY; + end = header.ofs_skins + header.num_skins * sizeof(dmd3skin_t); + if (end < header.ofs_skins || end > length) + return Q_ERR_BAD_EXTENT; + end = header.ofs_verts + header.num_verts * model->numframes * sizeof(dmd3vertex_t); + if (end < header.ofs_verts || end > length) + return Q_ERR_BAD_EXTENT; + end = header.ofs_tcs + header.num_verts * sizeof(dmd3coord_t); + if (end < header.ofs_tcs || end > length) + return Q_ERR_BAD_EXTENT; + end = header.ofs_indexes + header.num_tris * 3 * sizeof(uint32_t); + if (end < header.ofs_indexes || end > length) + return Q_ERR_BAD_EXTENT; + + mesh->numtris = header.num_tris; + mesh->numindices = header.num_tris * 3; + mesh->numverts = header.num_verts; + mesh->numskins = header.num_skins; + mesh->verts = MOD_Malloc(sizeof(maliasvert_t) * header.num_verts * model->numframes); + mesh->tcoords = MOD_Malloc(sizeof(maliastc_t) * header.num_verts); + mesh->indices = MOD_Malloc(sizeof(int) * header.num_tris * 3); + + // load all skins + src_skin = (dmd3skin_t *)(rawdata + header.ofs_skins); + for (i = 0; i < header.num_skins; i++) { + if (!Q_memccpy(skinname, src_skin->name, 0, sizeof(skinname))) + return Q_ERR_STRING_TRUNCATED; + FS_NormalizePath(skinname, skinname); + mesh->skins[i] = IMG_Find(skinname, IT_SKIN, IF_NONE); + } + + // load all vertices + src_vert = (dmd3vertex_t *)(rawdata + header.ofs_verts); + dst_vert = mesh->verts; + for (i = 0; i < header.num_verts * model->numframes; i++) { + dst_vert->pos[0] = (int16_t)LittleShort(src_vert->point[0]); + dst_vert->pos[1] = (int16_t)LittleShort(src_vert->point[1]); + dst_vert->pos[2] = (int16_t)LittleShort(src_vert->point[2]); + + dst_vert->norm[0] = src_vert->norm[0]; + dst_vert->norm[1] = src_vert->norm[1]; + assert(!"need to convert from latlong to cartesian"); + + src_vert++; dst_vert++; + } + + // load all texture coords + src_tc = (dmd3coord_t *)(rawdata + header.ofs_tcs); + dst_tc = mesh->tcoords; + for (i = 0; i < header.num_verts; i++) { + dst_tc->st[0] = LittleFloat(src_tc->st[0]); + dst_tc->st[1] = LittleFloat(src_tc->st[1]); + src_tc++; dst_tc++; + } + + // load all triangle indices + src_idx = (uint32_t *)(rawdata + header.ofs_indexes); + dst_idx = mesh->indices; + for (i = 0; i < header.num_tris * 3; i++) { + index = LittleLong(*src_idx++); + if (index >= header.num_verts) + return Q_ERR_BAD_INDEX; + *dst_idx++ = index; + } + + *offset_p = header.meshsize; + return Q_ERR_SUCCESS; +} + +qerror_t MOD_LoadMD3(model_t *model, const void *rawdata, size_t length) +{ + dmd3header_t header; + size_t end, offset, remaining; + dmd3frame_t *src_frame; + maliasframe_t *dst_frame; + const byte *src_mesh; + int i; + qerror_t ret; + + if (length < sizeof(header)) + return Q_ERR_FILE_TOO_SMALL; + + // byte swap the header + header = *(dmd3header_t *)rawdata; + for (i = 0; i < sizeof(header) / 4; i++) + ((uint32_t *)&header)[i] = LittleLong(((uint32_t *)&header)[i]); + + if (header.ident != MD3_IDENT) + return Q_ERR_UNKNOWN_FORMAT; + if (header.version != MD3_VERSION) + return Q_ERR_UNKNOWN_FORMAT; + if (header.num_frames < 1) + return Q_ERR_TOO_FEW; + if (header.num_frames > MD3_MAX_FRAMES) + return Q_ERR_TOO_MANY; + end = header.ofs_frames + sizeof(dmd3frame_t) * header.num_frames; + if (end < header.ofs_frames || end > length) + return Q_ERR_BAD_EXTENT; + if (header.num_meshes < 1) + return Q_ERR_TOO_FEW; + if (header.num_meshes > MD3_MAX_MESHES) + return Q_ERR_TOO_MANY; + if (header.ofs_meshes > length) + return Q_ERR_BAD_EXTENT; + + Hunk_Begin(&model->hunk, 0x400000); + model->type = MOD_ALIAS; + model->numframes = header.num_frames; + model->nummeshes = header.num_meshes; + model->meshes = MOD_Malloc(sizeof(maliasmesh_t) * header.num_meshes); + model->frames = MOD_Malloc(sizeof(maliasframe_t) * header.num_frames); + + // load all frames + src_frame = (dmd3frame_t *)((byte *)rawdata + header.ofs_frames); + dst_frame = model->frames; + for (i = 0; i < header.num_frames; i++) { + LittleVector(src_frame->translate, dst_frame->translate); + VectorSet(dst_frame->scale, MD3_XYZ_SCALE, MD3_XYZ_SCALE, MD3_XYZ_SCALE); + + LittleVector(src_frame->mins, dst_frame->bounds[0]); + LittleVector(src_frame->maxs, dst_frame->bounds[1]); + dst_frame->radius = LittleFloat(src_frame->radius); + + src_frame++; dst_frame++; + } + + // load all meshes + src_mesh = (const byte *)rawdata + header.ofs_meshes; + remaining = length - header.ofs_meshes; + for (i = 0; i < header.num_meshes; i++) { + ret = MOD_LoadMD3Mesh(model, &model->meshes[i], src_mesh, remaining, &offset); + if (ret) + goto fail; + src_mesh += offset; + remaining -= offset; + } + + Hunk_End(&model->hunk); + return Q_ERR_SUCCESS; + +fail: + Hunk_Free(&model->hunk); + return ret; +} +#endif + +void MOD_Reference(model_t *model) +{ + int i, j; + + // register any images used by the models + switch (model->type) { + case MOD_ALIAS: + for (i = 0; i < model->nummeshes; i++) { + maliasmesh_t *mesh = &model->meshes[i]; + for (j = 0; j < mesh->numskins; j++) { + mesh->skins[j]->registration_sequence = registration_sequence; + } + } + break; + case MOD_SPRITE: + for (i = 0; i < model->numframes; i++) { + model->spriteframes[i].image->registration_sequence = registration_sequence; + } + break; + case MOD_EMPTY: + break; + default: + Com_Error(ERR_FATAL, "%s: bad model type", __func__); + } + + model->registration_sequence = registration_sequence; +} + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/path_tracer.c b/src/refresh/vkpt/path_tracer.c new file mode 100644 index 0000000..bfeac66 --- /dev/null +++ b/src/refresh/vkpt/path_tracer.c @@ -0,0 +1,729 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "shared/shared.h" +#include "vkpt.h" +#include "vk_util.h" + +#include + +#define RAY_GEN_ACCEL_STRUCTURE_BINDING_IDX 0 + +#define SIZE_SCRATCH_BUFFER (1 << 24) +/* not really a changeable parameter! */ +#define NUM_INSTANCES 2 + +static VkPhysicalDeviceRayTracingPropertiesNV rt_properties = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV, + .pNext = NULL, + .maxRecursionDepth = 0, /* updated during init */ + .shaderGroupHandleSize = 0 +}; + +static BufferResource_t buf_accel_scratch; +static BufferResource_t buf_instances [MAX_SWAPCHAIN_IMAGES]; +static VkAccelerationStructureNV accel_static; +static VkAccelerationStructureNV accel_dynamic [MAX_SWAPCHAIN_IMAGES]; +static VkAccelerationStructureNV accel_top [MAX_SWAPCHAIN_IMAGES]; +static VkDeviceMemory mem_accel_static; +static VkDeviceMemory mem_accel_top [MAX_SWAPCHAIN_IMAGES]; +static VkDeviceMemory mem_accel_dynamic[MAX_SWAPCHAIN_IMAGES]; + +static BufferResource_t buf_shader_binding_table, buf_shader_binding_table_rtx; + +static VkDescriptorPool rt_descriptor_pool; +static VkDescriptorSet rt_descriptor_set[MAX_SWAPCHAIN_IMAGES]; +static VkDescriptorSetLayout rt_descriptor_set_layout; +static VkPipelineLayout rt_pipeline_layout; +static VkPipeline rt_pipeline, rt_pipeline_rtx; + +typedef struct QvkGeometryInstance_s { + float transform[12]; + uint32_t instance_id : 24; + uint32_t mask : 8; + uint32_t instance_offset : 24; + uint32_t flags : 8; + uint64_t acceleration_structure_handle; +} QvkGeometryInstance_t; + + +#define MEM_BARRIER_BUILD_ACCEL(cmd_buf, ...) \ + do { \ + VkMemoryBarrier mem_barrier = { \ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, \ + .srcAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV \ + | VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV, \ + .dstAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV \ + | VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV, \ + __VA_ARGS__ \ + }; \ + \ + vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, \ + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, 0, 1, \ + &mem_barrier, 0, 0, 0, 0); \ + } while(0) + +VkResult +vkpt_pt_init() +{ + VkPhysicalDeviceProperties2 dev_props2 = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + .pNext = &rt_properties, + }; + + vkGetPhysicalDeviceProperties2(qvk.physical_device, &dev_props2); + + Com_Printf("Maximum recursion depth: %d\n", rt_properties.maxRecursionDepth); + Com_Printf("Shader group handle size: %d\n", rt_properties.shaderGroupHandleSize); + + buffer_create(&buf_accel_scratch, SIZE_SCRATCH_BUFFER, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + buffer_create(buf_instances + i, NUM_INSTANCES * sizeof(QvkGeometryInstance_t), VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + } + + /* create descriptor set layout */ + VkDescriptorSetLayoutBinding bindings[] = { + { + .binding = RAY_GEN_ACCEL_STRUCTURE_BINDING_IDX, + .descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_RAYGEN_BIT_NV, + }, + }; + + VkDescriptorSetLayoutCreateInfo layout_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = LENGTH(bindings), + .pBindings = bindings + }; + _VK(vkCreateDescriptorSetLayout(qvk.device, &layout_info, NULL, &rt_descriptor_set_layout)); + ATTACH_LABEL_VARIABLE(rt_descriptor_set_layout, DESCRIPTOR_SET_LAYOUT); + + + VkDescriptorSetLayout desc_set_layouts[] = { + rt_descriptor_set_layout, + qvk.desc_set_layout_ubo, + qvk.desc_set_layout_textures, + qvk.desc_set_layout_vertex_buffer, + qvk.desc_set_layout_light_hierarchy + }; + + /* create pipeline */ + VkPipelineLayoutCreateInfo pipeline_layout_create_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, + .setLayoutCount = LENGTH(desc_set_layouts), + .pSetLayouts = desc_set_layouts, + }; + + _VK(vkCreatePipelineLayout(qvk.device, &pipeline_layout_create_info, NULL, &rt_pipeline_layout)); + ATTACH_LABEL_VARIABLE(rt_pipeline_layout, PIPELINE_LAYOUT); + + VkDescriptorPoolSize pool_sizes[] = { + { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, MAX_SWAPCHAIN_IMAGES }, + { VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV, MAX_SWAPCHAIN_IMAGES } + }; + + VkDescriptorPoolCreateInfo pool_create_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .maxSets = MAX_SWAPCHAIN_IMAGES, + .poolSizeCount = LENGTH(pool_sizes), + .pPoolSizes = pool_sizes + }; + + _VK(vkCreateDescriptorPool(qvk.device, &pool_create_info, NULL, &rt_descriptor_pool)); + ATTACH_LABEL_VARIABLE(rt_descriptor_pool, DESCRIPTOR_POOL); + + VkDescriptorSetAllocateInfo descriptor_set_alloc_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = rt_descriptor_pool, + .descriptorSetCount = 1, + .pSetLayouts = &rt_descriptor_set_layout, + }; + + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + _VK(vkAllocateDescriptorSets(qvk.device, &descriptor_set_alloc_info, rt_descriptor_set + i)); + ATTACH_LABEL_VARIABLE(rt_descriptor_set[i], DESCRIPTOR_SET); + } + + return VK_SUCCESS; +} + +VkResult +vkpt_pt_update_descripter_set_bindings(int idx) +{ + /* update descriptor set bindings */ + VkWriteDescriptorSetAccelerationStructureNV desc_accel_struct_info = { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV, + .accelerationStructureCount = 1, + .pAccelerationStructures = accel_top + idx + }; + + VkWriteDescriptorSet write_desc_accel = { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .pNext = &desc_accel_struct_info, + .dstSet = rt_descriptor_set[idx], + .dstBinding = RAY_GEN_ACCEL_STRUCTURE_BINDING_IDX, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV + }; + + vkUpdateDescriptorSets(qvk.device, 1, &write_desc_accel, 0, NULL); + + return VK_SUCCESS; +} + + +static size_t +get_scratch_buffer_size(VkAccelerationStructureNV ac) +{ + VkAccelerationStructureMemoryRequirementsInfoNV mem_req_info = { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV, + .accelerationStructure = ac, + .type = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV + }; + + VkMemoryRequirements2 mem_req; + qvkGetAccelerationStructureMemoryRequirementsNV(qvk.device, &mem_req_info, &mem_req); + + return mem_req.memoryRequirements.size; +} + +static inline VkGeometryNV +get_geometry(VkBuffer buffer, size_t offset, uint32_t num_vertices) +{ + size_t size_per_vertex = sizeof(float) * 3; + VkGeometryNV geometry = { + .sType = VK_STRUCTURE_TYPE_GEOMETRY_NV, + .geometry = { + .triangles = { + .sType = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV, + .vertexData = buffer, + .vertexOffset = offset, + .vertexCount = num_vertices, + .vertexStride = size_per_vertex, + .vertexFormat = VK_FORMAT_R32G32B32_SFLOAT, + .indexType = VK_INDEX_TYPE_NONE_NV, + .indexCount = num_vertices, /* idiotic, doesn't work without */ + }, + .aabbs = { .sType = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV } + } + }; + return geometry; +} + +VkResult +vkpt_pt_destroy_static() +{ + if(mem_accel_static) { + vkFreeMemory(qvk.device, mem_accel_static, NULL); + mem_accel_static = VK_NULL_HANDLE; + } + if(accel_static) { + qvkDestroyAccelerationStructureNV(qvk.device, accel_static, NULL); + accel_static = VK_NULL_HANDLE; + } + return VK_SUCCESS; +} + +VkResult +vkpt_pt_destroy_dynamic(int idx) +{ + if(mem_accel_dynamic[idx]) { + vkFreeMemory(qvk.device, mem_accel_dynamic[idx], NULL); + mem_accel_dynamic[idx] = VK_NULL_HANDLE; + } + if(accel_dynamic[idx]) { + qvkDestroyAccelerationStructureNV(qvk.device, accel_dynamic[idx], NULL); + accel_dynamic[idx] = VK_NULL_HANDLE; + } + return VK_SUCCESS; +} + +static VkResult +vkpt_pt_create_accel_bottom( + VkBuffer vertex_buffer, + size_t buffer_offset, + int num_vertices, + VkAccelerationStructureNV *accel, + VkDeviceMemory *mem_accel, + VkCommandBuffer cmd_buf + ) +{ + assert(accel); + assert(!*accel); + assert(mem_accel); + assert(!*mem_accel); + + VkGeometryNV geometry = get_geometry(vertex_buffer, buffer_offset, num_vertices); + + VkAccelerationStructureCreateInfoNV accel_create_info = { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV, + .info = { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV, + .instanceCount = 0, + .geometryCount = 1, + .pGeometries = &geometry, + .type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV + } + }; + + qvkCreateAccelerationStructureNV(qvk.device, &accel_create_info, NULL, accel); + + VkAccelerationStructureMemoryRequirementsInfoNV mem_req_info = { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV, + .accelerationStructure = *accel, + .type = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV + }; + VkMemoryRequirements2 mem_req; + qvkGetAccelerationStructureMemoryRequirementsNV(qvk.device, &mem_req_info, &mem_req); + + VkMemoryAllocateInfo mem_alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = mem_req.memoryRequirements.size, + .memoryTypeIndex = get_memory_type(mem_req.memoryRequirements.memoryTypeBits, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + }; + + _VK(vkAllocateMemory(qvk.device, &mem_alloc_info, NULL, mem_accel)); + + VkBindAccelerationStructureMemoryInfoNV bind_info = { + .sType = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV, + .accelerationStructure = *accel, + .memory = *mem_accel, + }; + + _VK(qvkBindAccelerationStructureMemoryNV(qvk.device, 1, &bind_info)); + + assert(get_scratch_buffer_size(*accel) < SIZE_SCRATCH_BUFFER); + + VkAccelerationStructureInfoNV as_info = { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV, + .type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV, + .geometryCount = 1, + .pGeometries = &geometry, + /* setting .flags here somehow screws up the build */ + }; + + qvkCmdBuildAccelerationStructureNV(cmd_buf, &as_info, + VK_NULL_HANDLE, /* instance buffer */ + 0 /* instance offset */, + VK_FALSE, /* update */ + *accel, + VK_NULL_HANDLE, /* source acceleration structure ?? */ + buf_accel_scratch.buffer, + 0 /* scratch offset */); + + MEM_BARRIER_BUILD_ACCEL(cmd_buf); + + return VK_SUCCESS; +} + +VkResult +vkpt_pt_create_static( + VkBuffer vertex_buffer, + size_t buffer_offset, + int num_vertices + ) +{ + VkCommandBufferAllocateInfo cmd_buf_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .commandPool = qvk.command_pool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = 1, + }; + + VkCommandBuffer cmd_buf; + _VK(vkAllocateCommandBuffers(qvk.device, &cmd_buf_info, &cmd_buf)); + + VkCommandBufferBeginInfo cmd_begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + + vkBeginCommandBuffer(cmd_buf, &cmd_begin_info); + + VkResult ret = vkpt_pt_create_accel_bottom( + vertex_buffer, + buffer_offset, + num_vertices, + &accel_static, + &mem_accel_static, + cmd_buf); + + vkEndCommandBuffer(cmd_buf); + + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &cmd_buf, + }; + + vkQueueSubmit(qvk.queue_graphics, 1, &submit_info, VK_NULL_HANDLE); + vkQueueWaitIdle(qvk.queue_graphics); + + vkFreeCommandBuffers(qvk.device, qvk.command_pool, 1, &cmd_buf); + + return ret; +} + +VkResult +vkpt_pt_create_dynamic( + int idx, + VkBuffer vertex_buffer, + size_t buffer_offset, + int num_vertices + ) +{ + return vkpt_pt_create_accel_bottom( + vertex_buffer, + buffer_offset, + num_vertices, + accel_dynamic + idx, + mem_accel_dynamic + idx, + qvk.cmd_buf_current); +} + +void +vkpt_pt_destroy_toplevel(int idx) +{ + if(accel_top[idx]) { + qvkDestroyAccelerationStructureNV(qvk.device, accel_top[idx], NULL); + accel_top[idx] = VK_NULL_HANDLE; + } + if(mem_accel_top[idx]) { + vkFreeMemory(qvk.device, mem_accel_top[idx], NULL); + mem_accel_top[idx] = VK_NULL_HANDLE; + } +} + +VkResult +vkpt_pt_create_toplevel(int idx) +{ + vkpt_pt_destroy_toplevel(idx); + + QvkGeometryInstance_t instances[NUM_INSTANCES] = { + { + .transform = { + 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + }, + .instance_id = 0, + .mask = 0xff, /* ??? */ + .instance_offset = 0, /* ??? */ + //.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV, + .acceleration_structure_handle = 1337, // will be overwritten + }, + { + .transform = { + 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + }, + .instance_id = 0, + .mask = 0xff, /* ??? */ + .instance_offset = 0, /* ??? */ + //.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV, + .acceleration_structure_handle = 1337, // will be overwritten + }, + }; + + assert(NUM_INSTANCES == 2); + + _VK(qvkGetAccelerationStructureHandleNV(qvk.device, accel_static, sizeof(uint64_t), + &instances[0].acceleration_structure_handle)); + + _VK(qvkGetAccelerationStructureHandleNV(qvk.device, accel_dynamic[idx], sizeof(uint64_t), + &instances[1].acceleration_structure_handle)); + + void *instance_data = buffer_map(buf_instances + idx); + memcpy(instance_data, &instances, sizeof(QvkGeometryInstance_t) * NUM_INSTANCES); + + buffer_unmap(buf_instances + idx); + instance_data = NULL; + + VkAccelerationStructureCreateInfoNV accel_create_info = { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV, + .info = { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV, + .instanceCount = 1, + .geometryCount = 0, + .pGeometries = NULL, + .type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV + } + }; + + qvkCreateAccelerationStructureNV(qvk.device, &accel_create_info, NULL, accel_top + idx); + + /* XXX: do allocation only once with safety margin */ + VkAccelerationStructureMemoryRequirementsInfoNV mem_req_info = { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV, + .accelerationStructure = accel_top[idx], + .type = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV + }; + VkMemoryRequirements2 mem_req; + qvkGetAccelerationStructureMemoryRequirementsNV(qvk.device, &mem_req_info, &mem_req); + + VkMemoryAllocateInfo mem_alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = mem_req.memoryRequirements.size, + .memoryTypeIndex = get_memory_type(mem_req.memoryRequirements.memoryTypeBits, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + }; + + _VK(vkAllocateMemory(qvk.device, &mem_alloc_info, NULL, mem_accel_top + idx)); + + VkBindAccelerationStructureMemoryInfoNV bind_info = { + .sType = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV, + .accelerationStructure = accel_top[idx], + .memory = mem_accel_top[idx], + }; + + _VK(qvkBindAccelerationStructureMemoryNV(qvk.device, 1, &bind_info)); + + assert(get_scratch_buffer_size(accel_top[idx]) < SIZE_SCRATCH_BUFFER); + + VkAccelerationStructureInfoNV as_info = { + .sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV, + .type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV, + .geometryCount = 0, + .pGeometries = NULL, + .instanceCount = NUM_INSTANCES, + }; + + qvkCmdBuildAccelerationStructureNV( + qvk.cmd_buf_current, + &as_info, + buf_instances[idx].buffer, /* instance buffer */ + 0 /* instance offset */, + VK_FALSE, /* update */ + accel_top[idx], + VK_NULL_HANDLE, /* source acceleration structure ?? */ + buf_accel_scratch.buffer, + 0 /* scratch offset */); + + MEM_BARRIER_BUILD_ACCEL(qvk.cmd_buf_current); /* probably not needed here but doesn't matter */ + + return VK_SUCCESS; +} + +VkResult +vkpt_pt_record_cmd_buffer(VkCommandBuffer cmd_buf, uint32_t frame_num) +{ + VkImageSubresourceRange subresource_range = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + }; + + IMAGE_BARRIER(cmd_buf, + .image = qvk.images[VKPT_IMG_PT_COLOR_A], + .subresourceRange = subresource_range, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT, + .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .newLayout = VK_IMAGE_LAYOUT_GENERAL + ); + + vkCmdBindDescriptorSets(cmd_buf, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, + rt_pipeline_layout, 0, 1, rt_descriptor_set + qvk.current_image_index, 0, 0); + + vkCmdBindDescriptorSets(cmd_buf, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, + rt_pipeline_layout, 1, 1, qvk.desc_set_ubo + qvk.current_image_index, 0, 0); + + vkCmdBindDescriptorSets(cmd_buf, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, + rt_pipeline_layout, 2, 1, &qvk.desc_set_textures, 0, 0); + + vkCmdBindDescriptorSets(cmd_buf, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, + rt_pipeline_layout, 3, 1, &qvk.desc_set_vertex_buffer, 0, 0); + + vkCmdBindDescriptorSets(cmd_buf, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, + rt_pipeline_layout, 4, 1, qvk.desc_set_light_hierarchy + qvk.current_image_index, 0, 0); + + if(!strcmp(cvar_rtx->string, "on")) { + vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, rt_pipeline_rtx); + qvkCmdTraceRaysNV(cmd_buf, + buf_shader_binding_table_rtx.buffer, 0, + buf_shader_binding_table_rtx.buffer, 2 * rt_properties.shaderGroupHandleSize, rt_properties.shaderGroupHandleSize, + buf_shader_binding_table_rtx.buffer, 1 * rt_properties.shaderGroupHandleSize, rt_properties.shaderGroupHandleSize, + VK_NULL_HANDLE, 0, 0, + qvk.extent.width, qvk.extent.height, 1); + } + else { + vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, rt_pipeline); + qvkCmdTraceRaysNV(cmd_buf, + buf_shader_binding_table.buffer, 0, + buf_shader_binding_table.buffer, 2 * rt_properties.shaderGroupHandleSize, rt_properties.shaderGroupHandleSize, + buf_shader_binding_table.buffer, 1 * rt_properties.shaderGroupHandleSize, rt_properties.shaderGroupHandleSize, + VK_NULL_HANDLE, 0, 0, + qvk.extent.width, qvk.extent.height, 1); + } + + +#define BARRIER_COMPUTE(img) \ + do { \ + VkImageSubresourceRange subresource_range = { \ + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, \ + .baseMipLevel = 0, \ + .levelCount = 1, \ + .baseArrayLayer = 0, \ + .layerCount = 1 \ + }; \ + IMAGE_BARRIER(cmd_buf, \ + .image = img, \ + .subresourceRange = subresource_range, \ + .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, \ + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, \ + .oldLayout = VK_IMAGE_LAYOUT_GENERAL, \ + .newLayout = VK_IMAGE_LAYOUT_GENERAL, \ + ); \ + } while(0) + + BARRIER_COMPUTE(qvk.images[VKPT_IMG_PT_COLOR_A]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_PT_ALBEDO]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_PT_MOTION]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_PT_DEPTH_NORMAL_A]); + BARRIER_COMPUTE(qvk.images[VKPT_IMG_PT_DEPTH_NORMAL_B]); + + return VK_SUCCESS; +} + +VkResult +vkpt_pt_destroy() +{ + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + vkpt_pt_destroy_toplevel(i); + buffer_destroy(buf_instances + i); + vkpt_pt_destroy_dynamic(i); + } + vkpt_pt_destroy_static(); + buffer_destroy(&buf_accel_scratch); + vkDestroyDescriptorSetLayout(qvk.device, rt_descriptor_set_layout, NULL); + vkDestroyPipelineLayout(qvk.device, rt_pipeline_layout, NULL); + vkDestroyDescriptorPool(qvk.device, rt_descriptor_pool, NULL); + return VK_SUCCESS; +} + +VkResult +vkpt_pt_create_pipelines() +{ + VkPipelineShaderStageCreateInfo shader_stages[] = { + SHADER_STAGE(QVK_MOD_PATH_TRACER_RGEN, VK_SHADER_STAGE_RAYGEN_BIT_NV ), + SHADER_STAGE(QVK_MOD_PATH_TRACER_RCHIT, VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV), + SHADER_STAGE(QVK_MOD_PATH_TRACER_RMISS, VK_SHADER_STAGE_MISS_BIT_NV ), + }; + + VkPipelineShaderStageCreateInfo shader_stages_rtx[] = { + SHADER_STAGE(QVK_MOD_PATH_TRACER_RTX_RGEN, VK_SHADER_STAGE_RAYGEN_BIT_NV ), + SHADER_STAGE(QVK_MOD_PATH_TRACER_RTX_RCHIT, VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV), + SHADER_STAGE(QVK_MOD_PATH_TRACER_RTX_RMISS, VK_SHADER_STAGE_MISS_BIT_NV ), + }; + + VkRayTracingShaderGroupCreateInfoNV rt_shader_group_info[] = { + { + .sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV, + .type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV, + .generalShader = 0, + .closestHitShader = VK_SHADER_UNUSED_NV, + .anyHitShader = VK_SHADER_UNUSED_NV, + .intersectionShader = VK_SHADER_UNUSED_NV + }, + { + .sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV, + .type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV, + .generalShader = VK_SHADER_UNUSED_NV, + .closestHitShader = 1, + .anyHitShader = VK_SHADER_UNUSED_NV, + .intersectionShader = VK_SHADER_UNUSED_NV + }, + { + .sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV, + .type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV, + .generalShader = 2, + .closestHitShader = VK_SHADER_UNUSED_NV, + .anyHitShader = VK_SHADER_UNUSED_NV, + .intersectionShader = VK_SHADER_UNUSED_NV + }, + }; + + VkRayTracingPipelineCreateInfoNV rt_pipeline_info = { + .sType = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV, + .stageCount = LENGTH(shader_stages), + .pStages = shader_stages, + .groupCount = LENGTH(rt_shader_group_info), + .pGroups = rt_shader_group_info, + .layout = rt_pipeline_layout, + .maxRecursionDepth = 1, + }; + + VkRayTracingPipelineCreateInfoNV rt_pipeline_info_rtx = { + .sType = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV, + .stageCount = LENGTH(shader_stages_rtx), + .pStages = shader_stages_rtx, + .groupCount = LENGTH(rt_shader_group_info), + .pGroups = rt_shader_group_info, + .layout = rt_pipeline_layout, + .maxRecursionDepth = 1, + }; + + _VK(qvkCreateRayTracingPipelinesNV(qvk.device, NULL, 1, &rt_pipeline_info, NULL, &rt_pipeline )); + _VK(qvkCreateRayTracingPipelinesNV(qvk.device, NULL, 1, &rt_pipeline_info_rtx, NULL, &rt_pipeline_rtx)); + + uint32_t num_groups = LENGTH(rt_shader_group_info); + uint32_t shader_binding_table_size = rt_properties.shaderGroupHandleSize * num_groups; + + /* pt */ + _VK(buffer_create(&buf_shader_binding_table, shader_binding_table_size, + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)); + + void *shader_binding_table = buffer_map(&buf_shader_binding_table); + _VK(qvkGetRayTracingShaderGroupHandlesNV(qvk.device, rt_pipeline, 0, num_groups, + shader_binding_table_size, shader_binding_table)); + buffer_unmap(&buf_shader_binding_table); + shader_binding_table = NULL; + + /* rtx on */ + _VK(buffer_create(&buf_shader_binding_table_rtx, shader_binding_table_size, + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)); + + shader_binding_table = buffer_map(&buf_shader_binding_table_rtx); + _VK(qvkGetRayTracingShaderGroupHandlesNV(qvk.device, rt_pipeline_rtx, 0, num_groups, + shader_binding_table_size, shader_binding_table)); + buffer_unmap(&buf_shader_binding_table_rtx); + shader_binding_table = NULL; + + return VK_SUCCESS; +} + +VkResult +vkpt_pt_destroy_pipelines() +{ + buffer_destroy(&buf_shader_binding_table); + vkDestroyPipeline(qvk.device, rt_pipeline, NULL); + + buffer_destroy(&buf_shader_binding_table_rtx); + vkDestroyPipeline(qvk.device, rt_pipeline_rtx, NULL); + + return VK_SUCCESS; +} diff --git a/src/refresh/vkpt/profiler.c b/src/refresh/vkpt/profiler.c new file mode 100644 index 0000000..e7b135e --- /dev/null +++ b/src/refresh/vkpt/profiler.c @@ -0,0 +1,109 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "vkpt.h" + +static VkQueryPool query_pool; +static uint64_t query_pool_results[NUM_PROFILER_QUERIES_PER_FRAME]; + +VkResult +vkpt_profiler_initialize() +{ + VkQueryPoolCreateInfo query_pool_info = { + .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, + .queryType = VK_QUERY_TYPE_TIMESTAMP, + .queryCount = MAX_SWAPCHAIN_IMAGES * NUM_PROFILER_ENTRIES * 2, + }; + vkCreateQueryPool(qvk.device, &query_pool_info, NULL, &query_pool); + return VK_SUCCESS; +} + +VkResult +vkpt_profiler_destroy() +{ + vkDestroyQueryPool(qvk.device, query_pool, NULL); + return VK_SUCCESS; +} + +VkResult +vkpt_profiler_query(int idx, VKPTProfilerAction action) +{ + idx = idx * 2 + action + qvk.current_image_index * NUM_PROFILER_QUERIES_PER_FRAME; + vkCmdWriteTimestamp(qvk.cmd_buf_current, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + query_pool, idx); + + return VK_SUCCESS; +} + +VkResult +vkpt_profiler_next_frame(int frame_num) +{ + _VK(vkGetQueryPoolResults(qvk.device, query_pool, + NUM_PROFILER_QUERIES_PER_FRAME * frame_num, + NUM_PROFILER_QUERIES_PER_FRAME, + sizeof(query_pool_results), + query_pool_results, + sizeof(query_pool_results[0]), + VK_QUERY_RESULT_64_BIT)); + + /* + // crashes, need to add!!! queries are undefined if not reset before + vkCmdResetQueryPool(qvk.cmd_buf_current, query_pool, + NUM_PROFILER_QUERIES_PER_FRAME * frame_num, + NUM_PROFILER_QUERIES_PER_FRAME); + */ + + //Com_Printf("%ld %ld\n", query_pool_results[0], query_pool_results[1]); + + //double ms = (double) (query_pool_results[1] - query_pool_results[0]) * 1e-6; + //Com_Printf("%f\n", ms); + + return VK_SUCCESS; +} + +static void +draw_query(int x, int y, qhandle_t font, const char *enum_name, int idx) +{ + char buf[256]; + int i; + for(i = 0; i < LENGTH(buf) - 1 && enum_name[i]; i++) + buf[i] = enum_name[i] == '_' ? ' ' : tolower(enum_name[i]); + buf[i] = 0; + + R_DrawString(x, y, 0, 128, buf, font); + double ms = (double) (query_pool_results[idx * 2 + 1] - query_pool_results[idx * 2 + 0]) * 1e-6; + snprintf(buf, sizeof buf, "%8.2f ms", ms); + R_DrawString(x + 256, y, 0, 128, buf, font); +} + +void +draw_profiler() +{ + int x = 500; + int y = 100; + + qhandle_t font; + font = R_RegisterFont("conchars"); + if(!font) + return; + +#define PROFILER_DO(name, indent) \ + draw_query(x, y, font, #name + 9, name); y += 10; +PROFILER_LIST +#undef PROFILER_DO +} diff --git a/src/refresh/vkpt/shader/asvgf.glsl b/src/refresh/vkpt/shader/asvgf.glsl new file mode 100644 index 0000000..c7faa91 --- /dev/null +++ b/src/refresh/vkpt/shader/asvgf.glsl @@ -0,0 +1,41 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef _ASVGF_GLSL_ +#define _ASVGF_GLSL_ + +#include "utils.glsl" + +#define STRATUM_OFFSET_SHIFT 3 +#define STRATUM_OFFSET_MASK ((1 << STRATUM_OFFSET_SHIFT) - 1) + +void +read_normal_depth(sampler2D tex, ivec2 p, out float z, out vec3 n) +{ + vec2 v = texelFetch(tex, p, 0).rg; + z = v.x; + n = decode_normal(floatBitsToUint(v.y)); +} + +const float gaussian_kernel[2][2] = { + { 1.0 / 4.0, 1.0 / 8.0 }, + { 1.0 / 8.0, 1.0 / 16.0 } +}; + + +#endif /*_ASVGF_GLSL_*/ diff --git a/src/refresh/vkpt/shader/asvgf_atrous.comp b/src/refresh/vkpt/shader/asvgf_atrous.comp new file mode 100644 index 0000000..018b13f --- /dev/null +++ b/src/refresh/vkpt/shader/asvgf_atrous.comp @@ -0,0 +1,167 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable +#extension GL_EXT_nonuniform_qualifier : enable + +layout(local_size_x = 32, local_size_y = 32, local_size_z = 1) in; + +layout(push_constant, std140) uniform IterationInfo { + uint iteration; +} push; + +#define GLOBAL_UBO_DESC_SET_IDX 0 +#include "global_ubo.h" + +#define GLOBAL_TEXTURES_DESC_SET_IDX 1 +#include "global_textures.h" + +#include "utils.glsl" +#include "asvgf.glsl" + + +float +compute_sigma_luminance(sampler2D img, ivec2 ipos, float center) +{ + const int r = 1; + + //float sum = 0.0; + float sum = center * gaussian_kernel[0][0]; + + for(int yy = -r; yy <= r; yy++) { + for(int xx = -r; xx <= r; xx++) { + if(xx != 0 || yy != 0) + { + ivec2 p = ipos + ivec2(xx, yy); + float variance = texelFetch(img, p, 0).a; + float w = gaussian_kernel[abs(xx)][abs(yy)]; + //float w = 1.0 / float((2 * r + 1) * (2 * r + 1)); + sum += variance * w; + } + } + } + + return max(sum, 1e-8); +} + +vec4 +filter_image(sampler2D img, sampler2D tex_depth_normal, const uint iteration) +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + + float sum_w = 1.0; + + vec3 normal_center; + float depth_center; + read_normal_depth(tex_depth_normal, ipos, depth_center, normal_center); + float fwidth_depth = texelFetch(TEX_PT_MOTION, ipos, 0).w; + vec4 color_center = texelFetch(img, ipos, 0); + float luminance_center = luminance(color_center.rgb); + + //float sigma_l = 2.0 * compute_sigma_luminance(img, ipos); + float sigma_l = 1.0 / (2.0 * compute_sigma_luminance(img, ipos, luminance_center)); + + const int step_size = int(1u << iteration); + + vec3 sum_color = color_center.rgb; + float sum_variance = color_center.a; + + //const int r = iteration > 0 ? 1 : 2; + const int r = 1; + for(int yy = -r; yy <= r; yy++) { + for(int xx = -r; xx <= r; xx++) { + ivec2 p = ipos + ivec2(xx, yy) * step_size; + + if(xx == 0 && yy == 0) + continue; + + float w = float(all(greaterThanEqual(p, ivec2(0))) + && all(lessThan(p, ivec2(global_ubo.width, global_ubo.height)))); + + vec4 c = texelFetch(img, p, 0); + float l = luminance(c.rgb); + float dist_l = abs(luminance_center - l); + + vec3 normal; + float depth; + read_normal_depth(tex_depth_normal, p, depth, normal); + + float dist_z = abs(depth_center - depth) * fwidth_depth; + + w *= pow(max(0.0, dot(normal_center, normal)), 128.0); + w *= exp(-dist_z / float(step_size) - dist_l * dist_l * sigma_l); + + sum_color += c.rgb * w; + sum_variance += c.a * w * w; + sum_w += w; + } + } + sum_color /= sum_w; + sum_variance /= sum_w * sum_w; + + return vec4(sum_color, sum_variance); +} + +vec4 +_filter_image(const sampler2D img, const uint iteration, const uint frame_idx) +{ + if(frame_idx == 0) return filter_image(img, TEX_PT_DEPTH_NORMAL_A, iteration); + if(frame_idx == 1) return filter_image(img, TEX_PT_DEPTH_NORMAL_B, iteration); + return vec4(0); +} + +void +main() +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + if(any(greaterThanEqual(ipos, ivec2(global_ubo.width, global_ubo.height)))) + return; + +#define tex_depth_normal ((global_ubo.current_frame_idx % 1) > 0 ? IMG_PT_DEPTH_NORMAL_A : IMG_PT_DEPTH_NORMAL_B) + + const int frame_idx = global_ubo.current_frame_idx % 1; + + vec4 filtered = vec4(0); + switch(push.iteration) { + case 0: filtered = _filter_image(TEX_ASVGF_ATROUS_PING, 0, frame_idx); break; + case 1: filtered = _filter_image(TEX_ASVGF_HIST_COLOR, 1, frame_idx); break; + case 2: filtered = _filter_image(TEX_ASVGF_ATROUS_PING, 2, frame_idx); break; + case 3: filtered = _filter_image(TEX_ASVGF_ATROUS_PONG, 3, frame_idx); break; + case 4: filtered = _filter_image(TEX_ASVGF_ATROUS_PING, 4, frame_idx); break; + } + if(push.iteration == 4) { + filtered.rgb *= texelFetch(TEX_PT_ALBEDO, ipos, 0).rgb; + filtered.rgb *= 4.0; + filtered.rgb -= 0.5; + filtered.rgb *= 1.010; + filtered.rgb += 0.5; + filtered.rgb = clamp(filtered.rgb, vec3(0), vec3(1)); + } + //if(push.iteration == 4) + // filtered = filtered.aaaa; + + switch(push.iteration) { + case 0: imageStore(IMG_ASVGF_HIST_COLOR, ipos, filtered); + case 1: imageStore(IMG_ASVGF_ATROUS_PING, ipos, filtered); + case 2: imageStore(IMG_ASVGF_ATROUS_PONG, ipos, filtered); + case 3: imageStore(IMG_ASVGF_ATROUS_PING, ipos, filtered); + case 4: imageStore(IMG_ASVGF_COLOR, ipos, filtered); + } +} + diff --git a/src/refresh/vkpt/shader/asvgf_fwd_project.comp b/src/refresh/vkpt/shader/asvgf_fwd_project.comp new file mode 100644 index 0000000..6fe5a14 --- /dev/null +++ b/src/refresh/vkpt/shader/asvgf_fwd_project.comp @@ -0,0 +1,133 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable +#extension GL_EXT_nonuniform_qualifier : enable + +layout(local_size_x = 32, local_size_y = 32, local_size_z = 1) in; + +#define GLOBAL_UBO_DESC_SET_IDX 0 +#include "global_ubo.h" + +#define GLOBAL_TEXTURES_DESC_SET_IDX 1 +#include "global_textures.h" + +#define VERTEX_BUFFER_DESC_SET_IDX 2 +#include "vertex_buffer.h" + +#include "asvgf.glsl" +#include "read_visbuf.glsl" + +void +encrypt_tea(inout uvec2 arg) +{ + const uint key[] = { + 0xa341316c, 0xc8013ea4, 0xad90777d, 0x7e95761e + }; + uint v0 = arg[0], v1 = arg[1]; + uint sum = 0; + uint delta = 0x9e3779b9; + + for(int i = 0; i < 16; i++) { // XXX rounds reduced, carefully check if good + //for(int i = 0; i < 32; i++) { + sum += delta; + v0 += ((v1 << 4) + key[0]) ^ (v1 + sum) ^ ((v1 >> 5) + key[1]); + v1 += ((v0 << 4) + key[2]) ^ (v0 + sum) ^ ((v0 >> 5) + key[3]); + } + arg[0] = v0; + arg[1] = v1; +} + +void +main() +{ + ivec2 ipos; + { + uvec2 arg = uvec2(gl_GlobalInvocationID.x + gl_GlobalInvocationID.y * global_ubo.width, + global_ubo.current_frame_idx); + encrypt_tea(arg); + arg %= GRAD_DWN; + + ipos = ivec2(gl_GlobalInvocationID.xy * GRAD_DWN + arg); + } + //ipos = ivec2(gl_GlobalInvocationID.xy * GRAD_DWN + 1); + + if(any(greaterThanEqual(ipos, ivec2(global_ubo.width, global_ubo.height)))) + return; + + vec4 vis_buf = texelFetch(TEX_PT_VISBUF, ipos, 0); + if(any(lessThan(vis_buf.xy, vec2(0)))) + return; + + Triangle triangle; + if(!visbuf_get_triangle_fwdprj(triangle, vis_buf)) + return; + + if((triangle.material_id & BSP_FLAG_LIGHT) > 0) + return; + + vec3 bary; + bary.yz = vis_buf.xy; + bary.x = 1.0 - vis_buf.x - vis_buf.y; + vec3 pos_ws = triangle.positions * bary; + + vec4 pos_cs_curr = global_ubo.VP * vec4(pos_ws, 1.0); + + /* check if reprojection lies outside of current view frustum */ + if(any(lessThan(pos_cs_curr.xyz, -pos_cs_curr.www)) + || any(greaterThan(pos_cs_curr.xyz, pos_cs_curr.www))) + { + return; + } + pos_cs_curr.xyz /= pos_cs_curr.w; + + /* pixel coordinate of forward projected sample */ + ivec2 ipos_curr = ivec2((pos_cs_curr.xy * 0.5 + 0.5) * vec2(global_ubo.width, global_ubo.height)); + + ivec2 pos_grad = ipos_curr / GRAD_DWN; + ivec2 pos_stratum = ipos_curr % GRAD_DWN; + + uint idx_prev = ipos.x + ipos.y * global_ubo.width; + uint gradient_idx = + (1 << 31) /* mark sample as busy */ + | (pos_stratum.x << (STRATUM_OFFSET_SHIFT * 0)) /* encode pos in */ + | (pos_stratum.y << (STRATUM_OFFSET_SHIFT * 1)) /* current frame */ + | (idx_prev << (STRATUM_OFFSET_SHIFT * 2));/* pos in prev frame */ + + /* check if this sample is allowed to become a gradient sample */ + if(imageAtomicCompSwap(IMG_ASVGF_GRAD_SMPL_POS, pos_grad, 0u, gradient_idx) != 0) { + return; + } + + /* forward-project the rng seed */ + if((global_ubo.current_frame_idx & 1) == 0) { + uint rng_prev = texelFetch(TEX_ASVGF_RNG_SEED_B, ipos, 0).x; + imageStore(IMG_ASVGF_RNG_SEED_A, ipos_curr, rng_prev.xxxx); + } + else { + uint rng_prev = texelFetch(TEX_ASVGF_RNG_SEED_A, ipos, 0).x; + imageStore(IMG_ASVGF_RNG_SEED_B, ipos_curr, rng_prev.xxxx); + } + + /* forward-project the clip-space position for handling sub-pixel offsets */ + imageStore(IMG_ASVGF_POS_WS_FWD, pos_grad, vec4(pos_ws, 0.0)); + + vis_buf.z = uintBitsToFloat(map_instance_fwd(floatBitsToUint(vis_buf.z))); + imageStore(IMG_ASVGF_VISBUF_FWD, pos_grad, vis_buf); +} diff --git a/src/refresh/vkpt/shader/asvgf_gradient_atrous.comp b/src/refresh/vkpt/shader/asvgf_gradient_atrous.comp new file mode 100644 index 0000000..f82d9c0 --- /dev/null +++ b/src/refresh/vkpt/shader/asvgf_gradient_atrous.comp @@ -0,0 +1,147 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable +#extension GL_EXT_nonuniform_qualifier : enable + +layout(local_size_x = 32, local_size_y = 32, local_size_z = 1) in; + +layout(push_constant, std140) uniform IterationInfo { + uint iteration; +} push; + +#define GLOBAL_UBO_DESC_SET_IDX 0 +#include "global_ubo.h" + +#define GLOBAL_TEXTURES_DESC_SET_IDX 1 +#include "global_textures.h" + +#include "utils.glsl" +#include "asvgf.glsl" + + +float +compute_sigma_luminance(sampler2D img, ivec2 ipos) +{ + const int r = 1; + + float sum = 0.0; + //float sum = center * gaussian_kernel[0][0]; + + for(int yy = -r; yy <= r; yy++) { + for(int xx = -r; xx <= r; xx++) { + //if(xx != 0 || yy != 0) + { + ivec2 p = ipos + ivec2(xx, yy); + float variance = texelFetch(img, p, 0).a; + float w = gaussian_kernel[abs(xx)][abs(yy)]; + //float w = 1.0 / float((2 * r + 1) * (2 * r + 1)); + sum += variance * w; + } + } + } + + return max(sum, 1e-8); +} + +vec4 +filter_image(sampler2D img, const uint iteration) +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + + float sum_w = 1.0; + + vec4 color_center = texelFetch(img, ipos, 0); + float luminance_center = min(color_center.b, 8.0); + + //float sigma_l = 2.0 * compute_sigma_luminance(img, ipos); + float sigma_l = 4.0 * compute_sigma_luminance(img, ipos); + + const int step_size = int(1u << iteration); + + vec3 sum_color = color_center.rgb; + float sum_variance = color_center.a; + + + const int r = 1; + for(int yy = -r; yy <= r; yy++) { + for(int xx = -r; xx <= r; xx++) { + ivec2 p = ipos + ivec2(xx, yy) * step_size; + + if(xx == 0 && yy == 0) + continue; + + float w = float(all(greaterThanEqual(p, ivec2(0))) + && all(lessThan(p, ivec2(global_ubo.width, global_ubo.height)))); + + vec4 c = texelFetch(img, p, 0); + float l = c.b; + + float dist_l = abs(luminance_center - l); + + w *= exp(- dist_l * dist_l / sigma_l); + + sum_color += c.rgb * w; + sum_variance += c.a * w * w; + sum_w += w; + } + } + sum_color /= sum_w; + sum_variance /= sum_w * sum_w; + + return vec4(sum_color, sum_variance); +} + +void +main() +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + if(any(greaterThanEqual(ipos, ivec2(global_ubo.width, global_ubo.height)))) + return; + + vec4 filtered = vec4(0); + switch(push.iteration) { + case 0: filtered = filter_image(TEX_ASVGF_GRAD_A, 0); break; + case 1: filtered = filter_image(TEX_ASVGF_GRAD_B, 1); break; + case 2: filtered = filter_image(TEX_ASVGF_GRAD_A, 2); break; + case 3: filtered = filter_image(TEX_ASVGF_GRAD_B, 3); break; + case 4: filtered = filter_image(TEX_ASVGF_GRAD_A, 4); break; + } + + if(push.iteration == 4) { + /* this mirrors the exposure in the actual reconstruction to reduce perceptual lag */ + filtered.rg *= 4.0; + filtered.rg -= 0.5; + filtered.rg *= 1.010; + filtered.rg += 0.5; + filtered.rg = pow(abs(filtered.rg), vec2(1.0 / 2.2)); + float a = max(0.0, filtered.r - 1e-1) / (filtered.g + 1e-3); + //float a = filtered.g > 1e-2 ? abs(filtered.r) / filtered.g : 0.0; + filtered.r = a; + } + + switch(push.iteration) { + case 0: imageStore(IMG_ASVGF_GRAD_B, ipos, filtered); + case 1: imageStore(IMG_ASVGF_GRAD_A, ipos, filtered); + case 2: imageStore(IMG_ASVGF_GRAD_B, ipos, filtered); + case 3: imageStore(IMG_ASVGF_GRAD_A, ipos, filtered); + case 4: imageStore(IMG_ASVGF_GRAD_B, ipos, filtered); + } +} + diff --git a/src/refresh/vkpt/shader/asvgf_gradient_img.comp b/src/refresh/vkpt/shader/asvgf_gradient_img.comp new file mode 100644 index 0000000..ce668ec --- /dev/null +++ b/src/refresh/vkpt/shader/asvgf_gradient_img.comp @@ -0,0 +1,114 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable +#extension GL_EXT_nonuniform_qualifier : enable + +layout(local_size_x = 32, local_size_y = 32, local_size_z = 1) in; + +#define GLOBAL_UBO_DESC_SET_IDX 0 +#include "global_ubo.h" + +#define GLOBAL_TEXTURES_DESC_SET_IDX 1 +#include "global_textures.h" + +#include "utils.glsl" +#include "asvgf.glsl" + +vec4 +compute_gradient_image( + sampler2D tex_pt_curr, + sampler2D tex_pt_prev, + usampler2D tex_grad_sample_pos) +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + + vec2 mom = vec2(0); + + vec4 ret = vec4(0); + + for(int yy = 0; yy < GRAD_DWN; yy++) { + for(int xx = 0; xx < GRAD_DWN; xx++) { + ivec2 p = ipos * GRAD_DWN + ivec2(xx, yy); + + vec3 c = texelFetch(tex_pt_curr, p, 0).rgb; + + float l = luminance(c); + mom += vec2(l, l * l); + } + } + + mom /= float(GRAD_DWN * GRAD_DWN); + + ret.zw = vec2(mom.x, sqrt(max(0.0, mom.y - mom.x * mom.x))); + + uint u = texelFetch(tex_grad_sample_pos, ipos, 0).r; + + if(u == 0u) + return ret; + + /* position of sample inside of stratum in the current frame */ + ivec2 grad_strata_pos = ivec2( + u >> (STRATUM_OFFSET_SHIFT * 0), + u >> (STRATUM_OFFSET_SHIFT * 1)) & STRATUM_OFFSET_MASK; + + /* full position in current frame for gradient sample */ + ivec2 grad_sample_pos_curr = ipos * GRAD_DWN + grad_strata_pos; + uint idx_prev = (u & (~(1 << 31))) >> (2 * STRATUM_OFFSET_SHIFT); + + /* full position in the previous frame */ + int w = global_ubo.width; + ivec2 grad_sample_pos_prev = ivec2(idx_prev % w, idx_prev / w); + + vec3 c_curr = texelFetch(tex_pt_curr, grad_sample_pos_curr, 0).rgb; + vec3 c_prev = texelFetch(tex_pt_prev, grad_sample_pos_prev, 0).rgb; + + float l_curr = luminance(c_curr); + float l_prev = luminance(c_prev); + + ret.x = l_curr - l_prev; + ret.y = max(l_curr, l_prev); + + return ret; +} + +void +main() +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + if(any(greaterThanEqual(ipos, ivec2(global_ubo.width, global_ubo.height) / GRAD_DWN))) + return; + + vec4 v; + if((global_ubo.current_frame_idx & 1) == 0) { + v = compute_gradient_image(TEX_PT_COLOR_A, TEX_PT_COLOR_B, TEX_ASVGF_GRAD_SMPL_POS); + } + else { + v = compute_gradient_image(TEX_PT_COLOR_B, TEX_PT_COLOR_A, TEX_ASVGF_GRAD_SMPL_POS); + } + imageStore(IMG_ASVGF_GRAD_A, ipos, v); + +#if 0 + imageStore(IMG_DEBUG, ipos * GRAD_DWN + ivec2(0, 0), vec4(v.x, -v.x, 0, 0)); + imageStore(IMG_DEBUG, ipos * GRAD_DWN + ivec2(1, 0), vec4(v.x, -v.x, 0, 0)); + imageStore(IMG_DEBUG, ipos * GRAD_DWN + ivec2(0, 1), vec4(v.x, -v.x, 0, 0)); + imageStore(IMG_DEBUG, ipos * GRAD_DWN + ivec2(1, 1), vec4(v.x, -v.x, 0, 0)); +#endif + //imageStore(IMG_DEBUG, ipos + ivec2(0, global_ubo.height / GRAD_DWN), vec4(0, v.y, 0, 0)); +} diff --git a/src/refresh/vkpt/shader/asvgf_seed_rng.comp b/src/refresh/vkpt/shader/asvgf_seed_rng.comp new file mode 100644 index 0000000..6264a90 --- /dev/null +++ b/src/refresh/vkpt/shader/asvgf_seed_rng.comp @@ -0,0 +1,47 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable +#extension GL_EXT_nonuniform_qualifier : enable + +layout(local_size_x = 32, local_size_y = 32, local_size_z = 1) in; + +#define GLOBAL_UBO_DESC_SET_IDX 0 +#include "global_ubo.h" + +#define GLOBAL_TEXTURES_DESC_SET_IDX 1 +#include "global_textures.h" + +void +main() +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + int frame_num = global_ubo.current_frame_idx; + + uint rng_seed = 0; + + rng_seed |= (uint(ipos.x) % BLUE_NOISE_RES) << 0u; + rng_seed |= (uint(ipos.y) % BLUE_NOISE_RES) << 10u; + rng_seed |= uint(frame_num) << 20; + + if((global_ubo.current_frame_idx & 1) == 0) + imageStore(IMG_ASVGF_RNG_SEED_A, ipos, uvec4(rng_seed)); + else + imageStore(IMG_ASVGF_RNG_SEED_B, ipos, uvec4(rng_seed)); +} diff --git a/src/refresh/vkpt/shader/asvgf_taa.comp b/src/refresh/vkpt/shader/asvgf_taa.comp new file mode 100644 index 0000000..1f07d9e --- /dev/null +++ b/src/refresh/vkpt/shader/asvgf_taa.comp @@ -0,0 +1,132 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable +#extension GL_EXT_nonuniform_qualifier : enable + +layout(local_size_x = 32, local_size_y = 32, local_size_z = 1) in; + +#define GLOBAL_UBO_DESC_SET_IDX 0 +#include "global_ubo.h" + +#define GLOBAL_TEXTURES_DESC_SET_IDX 1 +#include "global_textures.h" + +#include "utils.glsl" +#include "asvgf.glsl" + +vec4 +temporal_filter( + sampler2D tex_color_curr, + sampler2D tex_color_prev, + sampler2D tex_motion) +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + + vec3 mom1 = vec3(0.0); + vec3 mom2 = vec3(0.0); + const int r = 1; + vec4 color_center = vec4(0); + for(int yy = -r; yy <= r; yy++) { + for(int xx = -r; xx <= r; xx++) { + ivec2 p = ipos + ivec2(xx, yy); + vec4 c = texelFetch(tex_color_curr, p, 0); + + if(xx == 0 && yy == 0) { + color_center = c; + } + else { + mom1 += c.rgb; + mom2 += c.rgb * c.rgb; + } + } + } + int num_pix = (2 * r + 1) * (2 * r + 1); + + { + vec3 mom1_c = mom1 / (num_pix - 1); + vec3 mom2_c = mom2 / (num_pix - 1); + + vec3 sigma = sqrt(max(vec3(0), mom2_c - mom1_c * mom1_c)); + vec3 mi = mom1_c - sigma * 3.0; + vec3 ma = mom1_c + sigma * 3.0; + if(any(lessThan(color_center.rgb, mi)) + || any(greaterThan(color_center.rgb, ma))) { + color_center.rgb = mom1_c; + } + } + + mom1 = (mom1 + color_center.rgb) / float(num_pix); + mom2 = (mom2 + color_center.rgb * color_center.rgb) / float(num_pix); + + vec2 motion; + { + float len = -1; + const int r = 1; + for(int yy = -r; yy <= r; yy++) { + for(int xx = -r; xx <= r; xx++) { + ivec2 p = ipos + ivec2(xx, yy); + vec2 m = texelFetch(tex_motion, p, 0).xy; + float l = dot(m, m); + if(l > len) { + len = l; + motion = m; + } + + } + } + } + + motion *= vec2(global_ubo.width, global_ubo.height) * 0.5; + vec2 pos_prev = vec2(ipos) + vec2(0.5) + motion.xy; + if(any(lessThan(ivec2(pos_prev), ivec2(1))) + || any(greaterThanEqual(ivec2(pos_prev), ivec2(global_ubo.width, global_ubo.height) - 1))) { + return color_center; + } + + //vec3 color_prev = textureLod(tex_color_prev, pos_prev / vec2(global_ubo.width, global_ubo.height), 0).rgb; + vec3 color_prev = sample_texture_catmull_rom(tex_color_prev, pos_prev).rgb; + + + vec3 sigma = sqrt(max(vec3(0), mom2 - mom1 * mom1)); + vec3 mi = mom1 - sigma; + vec3 ma = mom1 + sigma; + + if(any(isnan(color_prev))) + color_prev = vec3(0); + + color_prev = clamp(color_prev, mi, ma); + + return vec4(mix(color_prev, color_center.rgb, 0.1), color_center.a); + +} + +void +main() +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + if((global_ubo.current_frame_idx & 1) == 0) { + vec4 v = temporal_filter(TEX_ASVGF_COLOR, TEX_ASVGF_TAA_B, TEX_PT_MOTION); + imageStore(IMG_ASVGF_TAA_A, ipos, v); + } + else { + vec4 v = temporal_filter(TEX_ASVGF_COLOR, TEX_ASVGF_TAA_A, TEX_PT_MOTION); + imageStore(IMG_ASVGF_TAA_B, ipos, v); + } +} diff --git a/src/refresh/vkpt/shader/asvgf_temporal.comp b/src/refresh/vkpt/shader/asvgf_temporal.comp new file mode 100644 index 0000000..886e6db --- /dev/null +++ b/src/refresh/vkpt/shader/asvgf_temporal.comp @@ -0,0 +1,247 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable +#extension GL_EXT_nonuniform_qualifier : enable + +layout(local_size_x = 32, local_size_y = 32, local_size_z = 1) in; + +layout(push_constant, std140) uniform IterationInfo { + uint iteration; +} push; + +#define GLOBAL_UBO_DESC_SET_IDX 0 +#include "global_ubo.h" + +#define GLOBAL_TEXTURES_DESC_SET_IDX 1 +#include "global_textures.h" + +#include "utils.glsl" +#include "asvgf.glsl" + +float +get_antilag_alpha(sampler2D tex_gradient, ivec2 ipos) +{ + const int r = 1; + + float antilag_alpha = 0.0; + + for(int yy = -r; yy <= r; yy++) { + for(int xx = -r; xx <= r; xx++) { + ivec2 p = ipos / GRAD_DWN + ivec2(xx, yy); + + if(any(lessThan(p, vec2(0))) + || any(greaterThanEqual(p, ivec2(global_ubo.width, global_ubo.height) / GRAD_DWN))) + continue; + + float a = texelFetch(tex_gradient, p, 0).r; + //vec2 gradient = texelFetch(tex_gradient, p, 0).rg; + + //float a = gradient.g > 1e-2 ? abs(gradient.r) / gradient.g : 0.0; + antilag_alpha = max(antilag_alpha, a); + } + } + + return clamp(antilag_alpha, 0.0, 1.0); +} + +void +temporal_filter( + sampler2D tex_color, // needs to be switched at some point as well + sampler2D tex_color_history, /* this not color prev, filtered output of svgf */ + sampler2D tex_motion, + sampler2D tex_depth_normal_curr, + sampler2D tex_depth_normal_prev, + sampler2D tex_moments_histlen_prev, + image2D img_moments_histlen_curr, + image2D img_filtered_output) +{ + ivec2 ipos = ivec2(gl_GlobalInvocationID); + vec4 motion = texelFetch(tex_motion, ipos, 0); + vec4 color_curr = texelFetch(tex_color, ipos, 0); + float lum_curr = luminance(color_curr.rgb); + + /* env map */ + if(motion.a < 0.0 || color_curr.a < 1.0) { + imageStore(img_moments_histlen_curr, ipos, vec4(0)); + imageStore(img_filtered_output, ipos, color_curr); + return; + } + + motion.xy *= vec2(global_ubo.width, global_ubo.height) * 0.5; + + /* need a bit of jitter, variance estimate is changed by the subpixel + * error introduced from the reprojection. With the rasterizer this + * was automatically happening due to the global jitter */ + vec2 jitter; + jitter.x = fract(texelFetch(TEX_BLUE_NOISE, ivec3(ipos, global_ubo.current_frame_idx + 0) % ivec3(BLUE_NOISE_RES, BLUE_NOISE_RES, NUM_BLUE_NOISE_TEX), 0).r); + jitter.y = fract(texelFetch(TEX_BLUE_NOISE, ivec3(ipos, global_ubo.current_frame_idx + 1) % ivec3(BLUE_NOISE_RES, BLUE_NOISE_RES, NUM_BLUE_NOISE_TEX), 0).r); + jitter -= vec2(0.5); + + vec2 pos_prev = vec2(ipos) + vec2(0.5) + motion.xy + jitter * 0.1; + + float depth_curr; + vec3 normal_curr; + read_normal_depth(tex_depth_normal_curr, ipos, depth_curr, normal_curr); + + vec4 color_prev = vec4(0); + vec3 moments_histlen_prev = vec3(0); + float sum_w = 0.0; + { + vec2 pos_ld = floor(pos_prev - vec2(0.5)); + vec2 subpix = fract(pos_prev - vec2(0.5) - pos_ld); + + const ivec2 off[4] = { { 0, 0 }, { 1, 0 }, { 0, 1 }, { 1, 1 } }; + float w[4] = { + (1.0 - subpix.x) * (1.0 - subpix.y), + (subpix.x ) * (1.0 - subpix.y), + (1.0 - subpix.x) * (subpix.y ), + (subpix.x ) * (subpix.y ) + }; + for(int i = 0; i < 4; i++) { + ivec2 p = ivec2(pos_ld) + off[i]; + + float depth_prev; + vec3 normal_prev; + read_normal_depth(tex_depth_normal_prev, p, depth_prev, normal_prev); + + float dist_depth = abs(depth_curr - depth_prev + motion.z) * motion.a; + if(dist_depth < 2.0 && depth_prev > 0.0 && depth_curr > 0.0) { + color_prev += texelFetch(tex_color_history, p, 0) * w[i]; + moments_histlen_prev += texelFetch(tex_moments_histlen_prev, p, 0).rgb * w[i]; + sum_w += w[i]; + } + } + } + + if(sum_w > 0.001 && color_curr.a > 0.0 + && !any(isnan(color_prev)) && !any(isnan(moments_histlen_prev))) + { + + float antilag_alpha = get_antilag_alpha(TEX_ASVGF_GRAD_B, ipos); + + + color_prev /= sum_w; + moments_histlen_prev /= sum_w; + + float hist_len = min(moments_histlen_prev.z + 1.0, 256.0); + float alpha_color = max(0.075, 1.0 / hist_len); + float alpha_moments = max(0.1, 1.0 / hist_len); + + alpha_color = mix(alpha_color, 1.0, antilag_alpha); + alpha_moments = mix(alpha_moments, 1.0, antilag_alpha); + + vec2 mom_curr = vec2(lum_curr, lum_curr * lum_curr); + vec2 mom = mix(moments_histlen_prev.xy, mom_curr, alpha_moments); + imageStore(img_moments_histlen_curr, ipos, vec4(mom, hist_len, 0)); + + vec4 color_var; + color_var.rgb = mix(color_prev.rgb, color_curr.rgb, alpha_color); + color_var.a = sqrt(max(mom.y - mom.x * mom.x, 0.0)); + color_var.a *= max(1.0, 8.0 / hist_len); + imageStore(img_filtered_output, ipos, color_var); + + imageStore(IMG_DEBUG, ipos, vec4(viridis_quintic(alpha_color), 0)); + } + else { + const int r = 2; /* spatially compute variance in 5x5 window */ + const int num_pixels = (2 * r + 1) * (2 * r + 1); + + vec3 sum_color = color_curr.rgb; + vec2 sum_moments = vec2(lum_curr, lum_curr * lum_curr); + + float sum_w = 1.0; + float max_lum = luminance(color_curr.rgb); + for(int yy = -r; yy <= r; yy++) { + for(int xx = -r; xx <= r; xx++) { + if(xx == 0 && yy == 0) + continue; + + ivec2 p = ipos + ivec2(xx, yy); + + float depth; + vec3 normal; + read_normal_depth(tex_depth_normal_curr, p, depth, normal); + float dist_z = abs(depth_curr - depth) * motion.a; + if(dist_z < 2.0) { + vec4 col_p = texelFetch(tex_color, p, 0); + float lum_p = luminance(col_p.rgb); + + float w = pow(max(0.0, dot(normal, normal_curr)), 128.0); + w *= col_p.a; + + max_lum = max(max_lum, lum_p); + sum_color += col_p.rgb * w; + sum_moments += vec2(lum_p * w, lum_p * lum_p * w * w); + sum_w += w; + } + } + } + + sum_color *= 1.0 / sum_w; + sum_moments *= vec2(1.0 / sum_w, 1.0 / (sum_w * sum_w)); + + vec4 color_var; + color_var.rgb = clamp_color(sum_color.rgb, 128.0); + /* give variance some boost to make filter more aggressive */ + /* in high variance situations the spatial estimate becomes really bad and + unreliable. max seems to work more stable */ + color_var.a = max_lum * 8; + //color_var.a = sqrt(max(sum_moments.y - sum_moments.x * sum_moments.x, 0.0)) * 16.0; + + imageStore(img_moments_histlen_curr, ipos, vec4(sum_moments, 1, 0)); + imageStore(img_filtered_output, ipos, color_var); + + imageStore(IMG_DEBUG, ipos, vec4(viridis_quintic(1.0), 0)); + } + + + //imageStore(IMG_ASVGF_ATROUS_PING, ipos, filtered); + + //return vec4(0); // moments and histlen +} + +void +main() +{ + if((global_ubo.current_frame_idx & 1) == 0) { + temporal_filter( + TEX_PT_COLOR_A, + TEX_ASVGF_HIST_COLOR, + TEX_PT_MOTION, + TEX_PT_DEPTH_NORMAL_A, + TEX_PT_DEPTH_NORMAL_B, + TEX_ASVGF_HIST_MOMENTS_B, + IMG_ASVGF_HIST_MOMENTS_A, + IMG_ASVGF_ATROUS_PING + ); + } + else { + temporal_filter( + TEX_PT_COLOR_B, + TEX_ASVGF_HIST_COLOR, + TEX_PT_MOTION, + TEX_PT_DEPTH_NORMAL_B, + TEX_PT_DEPTH_NORMAL_A, + TEX_ASVGF_HIST_MOMENTS_A, + IMG_ASVGF_HIST_MOMENTS_B, + IMG_ASVGF_ATROUS_PING + ); + } +} diff --git a/src/refresh/vkpt/shader/brdf.glsl b/src/refresh/vkpt/shader/brdf.glsl new file mode 100644 index 0000000..b7b28db --- /dev/null +++ b/src/refresh/vkpt/shader/brdf.glsl @@ -0,0 +1,27 @@ +/* +Copyright (C) 2018 Christoph Schied +Copyright (C) 2018 Tobias Zirr + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +float +blinn_phong_based_brdf(vec3 V, vec3 L, vec3 N, float phong_exp) +{ + vec3 H = normalize(V + L); + float F = pow(1.0 - max(0.0, dot(H, V)), 5.0); + return mix(0.15, 0.05 + 10.25 * pow(max(0.0, dot(H, N)), phong_exp), F) / M_PI; +} + diff --git a/src/refresh/vkpt/shader/global_textures.h b/src/refresh/vkpt/shader/global_textures.h new file mode 100644 index 0000000..b4ca156 --- /dev/null +++ b/src/refresh/vkpt/shader/global_textures.h @@ -0,0 +1,185 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef _TEXTURES_H_ +#define _TEXTURES_H_ + +#define GRAD_DWN (3) + +#define IMG_WIDTH (qvk.extent.width) +#define IMG_HEIGHT (qvk.extent.height) + +#define IMG_WIDTH_GRAD (qvk.extent.width / GRAD_DWN) +#define IMG_HEIGHT_GRAD (qvk.extent.height / GRAD_DWN) + +/* These are images that are to be used as render targets and buffers, but not textures. */ +#define LIST_IMAGES \ + IMG_DO(PT_VISBUF, 0, R32G32B32A32_SFLOAT, rgba32f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(PT_COLOR_A, 1, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(PT_COLOR_B, 2, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(PT_ALBEDO, 3, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(PT_MOTION, 4, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(PT_DEPTH_NORMAL_A, 5, R32G32_SFLOAT, rg32f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(PT_DEPTH_NORMAL_B, 6, R32G32_SFLOAT, rg32f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(PT_VIS_BUF, 7, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_HIST_COLOR, 8, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_HIST_MOMENTS_A, 9, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_HIST_MOMENTS_B, 10, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_ATROUS_PING, 11, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_ATROUS_PONG, 12, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_COLOR, 13, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_TAA_A, 14, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_TAA_B, 15, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_GRAD_A, 16, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH_GRAD, IMG_HEIGHT_GRAD) \ + IMG_DO(ASVGF_GRAD_B, 17, R16G16B16A16_SFLOAT, rgba16f, IMG_WIDTH_GRAD, IMG_HEIGHT_GRAD) \ + IMG_DO(ASVGF_GRAD_SMPL_POS, 18, R32_UINT, r32ui, IMG_WIDTH_GRAD, IMG_HEIGHT_GRAD) \ + IMG_DO(ASVGF_RNG_SEED_A, 19, R32_UINT, r32ui, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_RNG_SEED_B, 20, R32_UINT, r32ui, IMG_WIDTH, IMG_HEIGHT ) \ + IMG_DO(ASVGF_POS_WS_FWD, 21, R32G32B32A32_SFLOAT, rgba32f, IMG_WIDTH_GRAD, IMG_HEIGHT_GRAD) \ + IMG_DO(ASVGF_VISBUF_FWD, 22, R32G32B32A32_SFLOAT, rgba32f, IMG_WIDTH_GRAD, IMG_HEIGHT_GRAD) \ + IMG_DO(DEBUG, 23, R32G32B32A32_SFLOAT, rgba32f, IMG_WIDTH, IMG_HEIGHT ) \ + +#define NUM_IMAGES 24 /* this really sucks but I don't know how to fix it + counting with enum does not work in GLSL */ + +// todo: make naming consistent! +#define GLOBAL_TEXTURES_TEX_ARR_BINDING_IDX 0 +#define BINDING_OFFSET_IMAGES (1 + GLOBAL_TEXTURES_TEX_ARR_BINDING_IDX) +#define BINDING_OFFSET_TEXTURES (BINDING_OFFSET_IMAGES + NUM_IMAGES) +#define BINDING_OFFSET_BLUE_NOISE (BINDING_OFFSET_TEXTURES + NUM_IMAGES) +#define BINDING_OFFSET_ENVMAP (BINDING_OFFSET_BLUE_NOISE + 1) + + +#define NUM_GLOBAL_TEXTUES 1024 + +#define NUM_BLUE_NOISE_TEX (128 * 4) +#define BLUE_NOISE_RES (256) + +#define BSP_FLAG_LIGHT (1 << 31) +#define BSP_FLAG_WATER (1 << 30) +#define BSP_FLAG_TRANSPARENT (1 << 29) +#define BSP_TEXTURE_MASK (BSP_FLAG_TRANSPARENT - 1) + + +#ifndef VKPT_SHADER +/***************************************************************************/ +/* HOST CODE */ +/***************************************************************************/ + +#if MAX_RIMAGES != NUM_GLOBAL_TEXTUES +#error need to fix the constant here as well +#endif + + +enum QVK_IMAGES { +#define IMG_DO(_name, ...) \ + VKPT_IMG_##_name, + LIST_IMAGES +#undef IMG_DO + NUM_VKPT_IMAGES +}; + +typedef char compile_time_check_num_images[(NUM_IMAGES == NUM_VKPT_IMAGES)*2-1]; + +#else +/***************************************************************************/ +/* SHADER CODE */ +/***************************************************************************/ + +/* general texture array for world, etc */ +layout( + set = GLOBAL_TEXTURES_DESC_SET_IDX, + binding = GLOBAL_TEXTURES_TEX_ARR_BINDING_IDX +) uniform sampler2D global_texture_descriptors[]; + +#define SAMPLER_r32ui usampler2D +#define SAMPLER_rg32f sampler2D +#define SAMPLER_rgba32f sampler2D +#define SAMPLER_rgba16f sampler2D + +#define IMAGE_r32ui uimage2D +#define IMAGE_rg32f image2D +#define IMAGE_rgba32f image2D +#define IMAGE_rgba16f image2D + +/* framebuffer images */ +#define IMG_DO(_name, _binding, _vkformat, _glslformat, _w, _h) \ + layout(set = GLOBAL_TEXTURES_DESC_SET_IDX, binding = BINDING_OFFSET_IMAGES + _binding, _glslformat) \ + uniform IMAGE_##_glslformat IMG_##_name; +LIST_IMAGES +#undef IMG_DO + +/* framebuffer textures */ +#define IMG_DO(_name, _binding, _vkformat, _glslformat, _w, _h) \ + layout(set = GLOBAL_TEXTURES_DESC_SET_IDX, binding = BINDING_OFFSET_TEXTURES + _binding) \ + uniform SAMPLER_##_glslformat TEX_##_name; +LIST_IMAGES +#undef IMG_DO + +layout( + set = GLOBAL_TEXTURES_DESC_SET_IDX, + binding = BINDING_OFFSET_BLUE_NOISE +) uniform sampler2DArray TEX_BLUE_NOISE; + +layout( + set = GLOBAL_TEXTURES_DESC_SET_IDX, + binding = BINDING_OFFSET_ENVMAP +) uniform samplerCube TEX_ENVMAP; + +vec4 +global_texture(uint idx, vec2 tex_coord) +{ + idx &= BSP_TEXTURE_MASK; + if(idx >= NUM_GLOBAL_TEXTUES) + return vec4(1, 0, 1, 0); + return texture(global_texture_descriptors[nonuniformEXT(idx)], tex_coord); +} + +vec4 +global_textureLod(uint idx, vec2 tex_coord, uint lod) +{ + idx &= BSP_TEXTURE_MASK; + if(idx >= NUM_GLOBAL_TEXTUES) + return vec4(1, 1, 0, 0); + return textureLod(global_texture_descriptors[nonuniformEXT(idx)], tex_coord, lod); +} + +vec4 +global_textureGrad(uint idx, vec2 tex_coord, vec2 d_x, vec2 d_y) +{ + idx &= BSP_TEXTURE_MASK; + if(idx >= NUM_GLOBAL_TEXTUES) + return vec4(1, 1, 0, 0); + return textureGrad(global_texture_descriptors[nonuniformEXT(idx)], tex_coord, d_x, d_y); +} + +ivec2 +global_textureSize(uint idx, int level) +{ + idx &= BSP_TEXTURE_MASK; + if(idx >= NUM_GLOBAL_TEXTUES) + return ivec2(0); + return textureSize(global_texture_descriptors[nonuniformEXT(idx)], level); +} + + +#endif + + +#endif /*_TEXTURES_H_*/ +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/shader/global_ubo.h b/src/refresh/vkpt/shader/global_ubo.h new file mode 100644 index 0000000..87f1055 --- /dev/null +++ b/src/refresh/vkpt/shader/global_ubo.h @@ -0,0 +1,111 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef _GLOBAL_UBO_DESCRIPTOR_SET_LAYOUT_H_ +#define _GLOBAL_UBO_DESCRIPTOR_SET_LAYOUT_H_ + +#define SHADER_MAX_ENTITIES 256 +#define SHADER_MAX_BSP_ENTITIES 32 +#define MAX_LIGHT_SOURCES 32 + +#define GLOBAL_UBO_BINDING_IDX 0 + +/* glsl alignment rules make me very very sad :'( */ +#define GLOBAL_UBO_VAR_LIST \ + GLOBAL_UBO_VAR_LIST_DO(int, current_frame_idx) \ + GLOBAL_UBO_VAR_LIST_DO(int, width) \ + GLOBAL_UBO_VAR_LIST_DO(int, height)\ + GLOBAL_UBO_VAR_LIST_DO(int, num_lights) \ + \ + GLOBAL_UBO_VAR_LIST_DO(int, under_water) \ + GLOBAL_UBO_VAR_LIST_DO(float, time) \ + GLOBAL_UBO_VAR_LIST_DO(int, num_instances_model_bsp) /* 16 bit each */ \ + GLOBAL_UBO_VAR_LIST_DO(int, padding2) \ + \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, model_current_to_prev [SHADER_MAX_ENTITIES / 4]) \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, model_prev_to_current [SHADER_MAX_ENTITIES / 4]) \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, world_current_to_prev [SHADER_MAX_BSP_ENTITIES / 4]) \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, world_prev_to_current [SHADER_MAX_BSP_ENTITIES / 4]) \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, bsp_prim_offset [SHADER_MAX_BSP_ENTITIES / 4]) \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, model_idx_offset [SHADER_MAX_ENTITIES / 4]) \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, light_offset_cnt [MAX_LIGHT_SOURCES]) \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, model_cluster_id [SHADER_MAX_ENTITIES / 4]) \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, bsp_cluster_id [SHADER_MAX_BSP_ENTITIES / 4]) \ + GLOBAL_UBO_VAR_LIST_DO(vec4, cam_pos) \ + GLOBAL_UBO_VAR_LIST_DO(mat4, invVP) \ + GLOBAL_UBO_VAR_LIST_DO(mat4, VP) \ + GLOBAL_UBO_VAR_LIST_DO(mat4, VP_prev) \ + GLOBAL_UBO_VAR_LIST_DO(mat4, V) \ + GLOBAL_UBO_VAR_LIST_DO(ModelInstance, model_instances [SHADER_MAX_ENTITIES]) \ + GLOBAL_UBO_VAR_LIST_DO(ModelInstance, model_instances_prev [SHADER_MAX_ENTITIES]) \ + GLOBAL_UBO_VAR_LIST_DO(BspMeshInstance, bsp_mesh_instances [SHADER_MAX_BSP_ENTITIES]) \ + GLOBAL_UBO_VAR_LIST_DO(BspMeshInstance, bsp_mesh_instances_prev [SHADER_MAX_BSP_ENTITIES]) \ + /* stores the offset into the instance buffer in numberof primitives */ \ + GLOBAL_UBO_VAR_LIST_DO(uvec4, instance_buf_offset [(SHADER_MAX_ENTITIES + SHADER_MAX_BSP_ENTITIES) / 4]) \ + +#ifndef VKPT_SHADER + +#if SHADER_MAX_ENTITIES != MAX_ENTITIES +#error need to update constant here +#endif + +typedef uint32_t uvec4_t[4]; + +typedef struct ModelInstance_s { + float M[16]; // 16 + uint32_t material; int offset_curr, offset_prev; float backlerp; // 4 +} ModelInstance_t; + +typedef struct BspMeshInstance_s { + float M[16]; +} BspMeshInstance_t; + +#define int_t int32_t +typedef struct QVKUniformBuffer_s { +#define GLOBAL_UBO_VAR_LIST_DO(type, name) type##_t name; + GLOBAL_UBO_VAR_LIST +#undef GLOBAL_UBO_VAR_LIST_DO +} QVKUniformBuffer_t; +#undef int_t + +#else + +struct ModelInstance { + mat4 M; + uvec4 mat_offset_backlerp; +}; + +struct BspMeshInstance { + mat4 M; +}; + +struct GlobalUniformBuffer { +#define GLOBAL_UBO_VAR_LIST_DO(type, name) type name; + GLOBAL_UBO_VAR_LIST +#undef GLOBAL_UBO_VAR_LIST_DO +}; + +layout(set = GLOBAL_UBO_DESC_SET_IDX, binding = GLOBAL_UBO_BINDING_IDX, std140) uniform UBO { + GlobalUniformBuffer global_ubo; +}; + +#endif + + + +#endif /*_GLOBAL_UBO_DESCRIPTOR_SET_LAYOUT_H_*/ diff --git a/src/refresh/vkpt/shader/instance_geometry.comp b/src/refresh/vkpt/shader/instance_geometry.comp new file mode 100644 index 0000000..29fb0ba --- /dev/null +++ b/src/refresh/vkpt/shader/instance_geometry.comp @@ -0,0 +1,128 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable + +#define GLOBAL_UBO_DESC_SET_IDX 0 +#include "global_ubo.h" + +#define VERTEX_BUFFER_DESC_SET_IDX 1 +#include "vertex_buffer.h" + +#include "read_visbuf.glsl" + +#define LOCAL_SIZE_X 512 + +layout(local_size_x = LOCAL_SIZE_X, local_size_y = 1, local_size_z = 1) in; + +void +main() +{ + uint instance_id = gl_WorkGroupID.x; + + bool is_world = instance_id < (global_ubo.num_instances_model_bsp & 0xffffu); + + uint buf_offset = global_ubo.instance_buf_offset[instance_id / 4][instance_id % 4]; + uint num_triangles = global_ubo.instance_buf_offset[(instance_id + 1) / 4][(instance_id + 1) % 4]; + num_triangles -= buf_offset; + + if(!is_world) + instance_id -= (global_ubo.num_instances_model_bsp & 0xffffu); + + for(uint idx = gl_LocalInvocationID.x; idx < num_triangles; idx += LOCAL_SIZE_X) + { + InstancedTriangle t_i; + mat4 M_curr = mat4(1.0); + mat4 M_prev = mat4(1.0); + + if(is_world) { + uint id = instance_id; + Triangle t = get_bsp_triangle(idx + global_ubo.bsp_prim_offset[id / 4][id % 4]); + M_curr = global_ubo.bsp_mesh_instances[id].M; + uint id_prev = global_ubo.world_current_to_prev[id / 4][id % 4]; + M_prev = global_ubo.bsp_mesh_instances_prev[id_prev].M; + + t_i.positions = t.positions; + t_i.positions_prev = t.positions; /* no vertex anim for bsp meshes */ + t_i.normals = t.normals; + t_i.material_id = t.material_id; + t_i.tex_coords = t.tex_coords; + } + else { /* model */ + /* idx_offset should stay the same across frames */ + uint idx_offset = global_ubo.model_idx_offset[instance_id/4][instance_id%4]; + + { + /* read and interpolate triangles for model for _current_ frame */ + ModelInstance mi_curr = global_ubo.model_instances[instance_id]; + uint vertex_off_curr = mi_curr.mat_offset_backlerp.y; + uint vertex_off_prev = mi_curr.mat_offset_backlerp.z; // referes to animation frame + Triangle t = get_model_triangle(idx, idx_offset, vertex_off_curr); + Triangle t_prev = get_model_triangle(idx, idx_offset, vertex_off_prev); + M_curr = mi_curr.M; + + float backlerp = uintBitsToFloat(mi_curr.mat_offset_backlerp.w); + + t_i.positions[0] = mix(t.positions[0], t_prev.positions[0], backlerp); + t_i.positions[1] = mix(t.positions[1], t_prev.positions[1], backlerp); + t_i.positions[2] = mix(t.positions[2], t_prev.positions[2], backlerp); + + t_i.normals[0] = mix(t.normals[0], t_prev.normals[0], backlerp); + t_i.normals[1] = mix(t.normals[1], t_prev.normals[1], backlerp); + t_i.normals[2] = mix(t.normals[2], t_prev.normals[2], backlerp); + + t_i.tex_coords = t.tex_coords; + + t_i.material_id = mi_curr.mat_offset_backlerp.x; + } + { + uint id_prev = global_ubo.model_current_to_prev[instance_id / 4][instance_id % 4]; + ModelInstance mi_prev = global_ubo.model_instances_prev[id_prev]; + /* read and interpolate triangles for model for _previous_ frame */ + uint vertex_off_curr = mi_prev.mat_offset_backlerp.y; + uint vertex_off_prev = mi_prev.mat_offset_backlerp.z; // referes to animation frame + Triangle t = get_model_triangle(idx, idx_offset, vertex_off_curr); + Triangle t_prev = get_model_triangle(idx, idx_offset, vertex_off_prev); + M_prev = mi_prev.M; + + float backlerp = uintBitsToFloat(mi_prev.mat_offset_backlerp.w); + + t_i.positions_prev[0] = mix(t.positions[0], t_prev.positions[0], backlerp); + t_i.positions_prev[1] = mix(t.positions[1], t_prev.positions[1], backlerp); + t_i.positions_prev[2] = mix(t.positions[2], t_prev.positions[2], backlerp); + } + } + + t_i.positions[0] = vec3(M_curr * vec4(t_i.positions[0], 1.0)); + t_i.positions[1] = vec3(M_curr * vec4(t_i.positions[1], 1.0)); + t_i.positions[2] = vec3(M_curr * vec4(t_i.positions[2], 1.0)); + + t_i.positions_prev[0] = vec3(M_prev * vec4(t_i.positions_prev[0], 1.0)); + t_i.positions_prev[1] = vec3(M_prev * vec4(t_i.positions_prev[1], 1.0)); + t_i.positions_prev[2] = vec3(M_prev * vec4(t_i.positions_prev[2], 1.0)); + + t_i.normals[0] = vec3(M_curr * vec4(t_i.normals[0], 0.0)); + t_i.normals[1] = vec3(M_curr * vec4(t_i.normals[1], 0.0)); + t_i.normals[2] = vec3(M_curr * vec4(t_i.normals[2], 0.0)); + + uint instance_triangle_id = pack_instance_id_triangle_idx(instance_id | (is_world ? (1 << 31) : 0), idx); + store_instanced_triangle(t_i, instance_triangle_id, idx + buf_offset); + } +} + diff --git a/src/refresh/vkpt/shader/light_hierarchy.h b/src/refresh/vkpt/shader/light_hierarchy.h new file mode 100644 index 0000000..7a6f2f4 --- /dev/null +++ b/src/refresh/vkpt/shader/light_hierarchy.h @@ -0,0 +1,357 @@ +/* +Copyright (C) 2018 Addis Dittebrandt +Copyright (C) 2018 Christoph Schied +Copyright (C) 2018 Florian Simon + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef _LIGHT_HIERARCHY_ +#define _LIGHT_HIERARCHY_ + +#define LH_STATIC_BINDING_IDX 0 +#define MAX_LIGHT_HIERARCHY_NODES (1 << 20) + +#define LH_COMPACT_NODES + +#ifndef VKPT_SHADER + +#include "vkpt.h" + +typedef struct lh_cone_s +{ + vec3_t axis; // mean direction of lights in node + float th_o; // max normal deviaton + float th_e; // max emission angle +} +lh_cone_t; + +typedef struct lh_child_s +{ + int i; // child index; in leaf: prim offset if c[0], num prims in c[1] + float aabb[6]; + lh_cone_t cone; + float energy; +} +lh_child_t; + +typedef struct lh_node_s +{ + lh_child_t c[2]; +} +lh_node_t; + +/* this is the data structure actually uploaded to the gpu */ +/* the struct is aliased with triangle data which is copied + * on top of the struct beginning at c[0].aabb[0] */ +typedef struct compact_lh_node_s { + struct { + float aabb[6]; + uint32_t axis; + float th_o; + //float energy; + } c[2]; + int idx[2]; +} compact_lh_node_t; + +typedef struct light_hierarchy_s +{ + lh_node_t* nodes; + int num_nodes; + int max_num_nodes; + + int num_lights; + int num_light_vertices; + int num_light_indices; +} +light_hierarchy_t; + + +int lh_build_binned(void *dst, const float *positions, const uint32_t *colors, int num_prims, int num_bins); +void lh_dump(light_hierarchy_t *lh, const char *path); +float lh_importance( const float p[3], const float n[3], lh_node_t* node, int child); + +#else + +#include "utils.glsl" + +#define M_PI 3.1415926535897932384626433832795 + +struct LHCompactChild { + float aabb[6]; + uint axis; + float th_o; + //float energy; +}; + +struct LHCompactNode { + LHCompactChild c[2]; + int idx[2]; +}; + +layout( + set = LIGHT_HIERARCHY_SET_IDX, + binding = LH_STATIC_BINDING_IDX, + std430) +buffer BufferLightHierarchyNodes { + LHCompactNode lh_nodes[]; +}; + +vec3 +sample_triangle(vec2 xi) +{ + float sqrt_xi = sqrt(xi.x); + return vec3( + 1.0 - sqrt_xi, + sqrt_xi * (1.0 - xi.y), + sqrt_xi * xi.y); +} + +vec3 +array_to_vec(float arr[3]) +{ + return vec3(arr[0], arr[1], arr[2]); +} + +void +aabb_to_vec(float aabb[6], out vec3 amin, out vec3 amax) +{ + amin = vec3(aabb[0], aabb[1], aabb[2]); + amax = vec3(aabb[3], aabb[4], aabb[5]); +} + +float +_acos(float x) +{ + //return acos(clamp(x, -1.0, 1.0)); + + float a = -0.939115566365855; + float b = 0.9217841528914573; + float c = -1.2845906244690837; + float d = 0.295624144969963174; + return 3.1415926535 * 0.5 + (a * x + b * x * x * x) / (1 + c * x * x + d * x * x * x * x); + + //return 1.57079f - 1.57079f * x; /* not good enough */ +} + +#if 0 +float +lh_importance(vec3 p, vec3 n, LHCompactChild child) +{ + vec3 amin, amax; + aabb_to_vec(child.aabb, amin, amax); + vec3 axis = decode_normal(child.axis); + bool inside = all(greaterThan(p, amin)) + && all(lessThan(p, amax)); + + vec3 c = 0.5f * (amin + amax); + vec3 pc = c - p; + float d_sq = dot(pc, pc); + //float len_pc = sqrt(d_sq); + vec3 pc_n = pc / sqrt(d_sq); + + float th = _acos(-dot(axis, pc_n)); + float th_i = _acos(dot(n, pc_n)); + float cos_th_u = 1.0; + +#define EDGE_DIST(x0, x1, x2) \ + do { \ + vec3 e = vec3( \ + x0 == 0 ? amin[0] : amax[0], \ + x1 == 0 ? amin[1] : amax[1], \ + x2 == 0 ? amin[2] : amax[2]); \ + vec3 pe = normalize(e - p); \ + cos_th_u = min(cos_th_u, dot(pc_n, pe)); \ + } while(false) + + EDGE_DIST(0, 0, 0); + EDGE_DIST(0, 0, 1); + EDGE_DIST(0, 1, 0); + EDGE_DIST(0, 1, 1); + EDGE_DIST(1, 0, 0); + EDGE_DIST(1, 0, 1); + EDGE_DIST(1, 1, 0); + EDGE_DIST(1, 1, 1); + + float th_u = _acos(cos_th_u); + + + float th_h = max(th - child.th_o - th_u, 0.0f); + float th_ih = max(th_i - th_u, 0.0f); + + float cos_th_h = th_h < 1.570796 ? cos(th_h) : 0.10f; + return (abs(cos(th_ih)) * cos_th_h) / d_sq; +} +#endif + +vec3 +aabb_vertex(vec3 aabb[2], int i) +{ + float x = aabb[(i>>0) & 1].x; + float y = aabb[(i>>1) & 1].y; + float z = aabb[(i>>2) & 1].z; + return vec3(x, y, z); +} + +float +lh_importance(vec3 p, vec3 n, LHCompactChild child) +{ + vec3 aabb[2];// = child.aabb; + aabb[0][0] = child.aabb[0]; + aabb[0][1] = child.aabb[1]; + aabb[0][2] = child.aabb[2]; + aabb[1][0] = child.aabb[3]; + aabb[1][1] = child.aabb[4]; + aabb[1][2] = child.aabb[5]; + //LHCone cone = child.cone; + + vec3 axis = decode_normal(child.axis); + + vec3 c = 0.5f * (aabb[0] + aabb[1]); + vec3 pc = c - p; + vec3 pc_n = normalize(pc); + float d_sq = dot(pc, pc); + + float th_s = child.th_o + M_PI / 2.0; + float th = _acos(dot(axis, -pc_n)); + float th_i = _acos(dot(n, pc_n)); + float _max = -1.0f/0.0f; + float m_d = 1.0/0.0; + vec3 m_cu; + float cos_th_u = 1.0f; +#if 0 + // @compiler: please unroll + for (int i = 0; i < 8; i++) + { + vec3 e = aabb_vertex(aabb, i); + vec3 pe_n = normalize(e - p); + + _max = max(_max, dot(n, pe_n)); + + float d = dot(e, axis); + m_d = min(m_d, d); + m_cu = m_d == d ? e : m_cu; + + cos_th_u = min(cos_th_u, dot(pc_n, pe_n)); + } +#endif +#define EDGE_DIST(x0, x1, x2) \ + do { \ + vec3 e = vec3( \ + x0 == 0 ? aabb[0][0] : aabb[1][0], \ + x1 == 0 ? aabb[0][1] : aabb[1][1], \ + x2 == 0 ? aabb[0][2] : aabb[1][2]); \ + vec3 pe_n = normalize(e - p); \ + _max = max(_max, dot(n, pe_n)); \ + float d = dot(e, axis); \ + m_d = min(m_d, d); \ + m_cu = m_d == d ? e : m_cu; \ + cos_th_u = min(cos_th_u, dot(pc_n, pe_n)); \ + } while(false) + + EDGE_DIST(0, 0, 0); + EDGE_DIST(0, 0, 1); + EDGE_DIST(0, 1, 0); + EDGE_DIST(0, 1, 1); + EDGE_DIST(1, 0, 0); + EDGE_DIST(1, 0, 1); + EDGE_DIST(1, 1, 0); + EDGE_DIST(1, 1, 1); + vec3 cup_n = normalize(p - m_cu); + + if (_max <= 0 || (M_PI/2 <= th_s && th_s < _acos(dot(cup_n, axis)))) return 0.0f; + + //if (_max <= 0 || (M_PI >= th_s && cos(th_s) >= (dot(cup_n, cone.axis)))) return 0.0f; + float th_u = _acos(cos_th_u); + + float th_h = max(th - child.th_o - th_u, 0.0f); + float th_ih = max(th_i - th_u, 0.0f); + + float cos_th_h = th_h < (M_PI / 2.0) ? cos(th_h) : 0.00f; + return (abs(cos(th_ih)) * cos_th_h) / d_sq; + //return (abs(cos(th_ih)) * cos_th_h) / max(1e-2, d_sq); + //return child.energy * (abs(cos(th_ih)) * cos_th_h) / max(1e-2, d_sq); +} + + +void +sample_light_hierarchy(vec3 p, vec3 n, + out vec3 position_light, + out vec3 normal_light, + out vec3 light_color, + out float pdf, + vec3 rng) +{ + pdf = 1.0; + + int current_idx = 0; + + #pragma unroll + for(int i = 0; i < 32; i++) { + LHCompactNode curr = lh_nodes[current_idx]; + + float I0 = lh_importance(p, n, curr.c[0]); + float I1 = lh_importance(p, n, curr.c[1]); + float thresh = I0 / (I0 + I1); + //float thresh = (I0 + I1) > 1e-3 ? (I0 / (I0 + I1)) : 0.5; + + if(rng.x < thresh) { + pdf *= thresh; + rng.x /= thresh; + current_idx = curr.idx[0]; + } + else { + pdf *= 1.0 - thresh; + rng.x -= thresh; + rng.x /= 1.0 - thresh; + current_idx = curr.idx[1]; + } + + if(current_idx < 0) + break; + } + + //pdf = max(0.01, pdf); + + LHCompactNode light_source = lh_nodes[~current_idx]; + mat3 positions = mat3( + light_source.c[0].aabb[0], + light_source.c[0].aabb[1], + light_source.c[0].aabb[2], + light_source.c[0].aabb[3], + light_source.c[0].aabb[4], + light_source.c[0].aabb[5], + uintBitsToFloat(light_source.c[0].axis), // I hope the compiler doesn't care + light_source.c[0].th_o, + light_source.c[1].aabb[0]); + //light_source.c[0].energy); + + light_color = unpackUnorm4x8(floatBitsToUint(light_source.c[1].aabb[1])).rgb; + //light_color = unpackUnorm4x8(floatBitsToUint(light_source.c[1].aabb[0])).rgb; + + vec3 bary = sample_triangle(rng.yz); + + position_light = positions * bary; + normal_light = cross(positions[1] - positions[0], positions[2] - positions[0]); + float len = length(normal_light); + pdf *= 2.0 / len; + normal_light /= len; +} + +#endif + +#endif /*_LIGHT_HIERARCHY_*/ + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/shader/light_lists.h b/src/refresh/vkpt/shader/light_lists.h new file mode 100644 index 0000000..61e2fe8 --- /dev/null +++ b/src/refresh/vkpt/shader/light_lists.h @@ -0,0 +1,328 @@ +/* +Copyright (C) 2018 Tobias Zirr + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef _LIGHT_LISTS_ +#define _LIGHT_LISTS_ + +#define MAX_BRUTEFORCE_SAMPLING 8 +#define MAX_BRUTEFORCE_SAMPLING_DYNAMIC 8 +#define SOLID_ANGLE_SAMPLING 1 + +#ifndef M_PI +#define M_PI 3.1415926535897932384626433832795 +#endif + +#define PHONG_IS_EXP 20.0 + +float +projected_tri_area(mat3 positions, vec3 p, vec3 n, vec3 v) +{ + positions[0] = positions[0] - p; + positions[1] = positions[1] - p; + positions[2] = positions[2] - p; + + vec3 g = cross(positions[1] - positions[0], positions[2] - positions[0]); + if ( dot(n, positions[0]) <= 0 && dot(n, positions[1]) <= 0 && dot(n, positions[2]) <= 0 ) + return 0; + if ( dot(g, positions[0]) >= 0 && dot(g, positions[1]) >= 0 && dot(g, positions[2]) >= 0 ) + return 0; + + vec3 L = normalize(positions * vec3(1.0 / 3.0)); + float brdf = blinn_phong_based_brdf(v, L, n, PHONG_IS_EXP); + + positions[0] = normalize(positions[0]); + positions[1] = normalize(positions[1]); + positions[2] = normalize(positions[2]); + + vec3 a = cross(positions[1] - positions[0], positions[2] - positions[0]); + float pa = max(-dot(n, a), 0.); + return pa * brdf; +} + +vec3 +sample_projected_triangle(vec3 p, mat3 positions, vec2 rnd, out vec3 n, inout float pdf) +{ + n = cross(positions[1] - positions[0], positions[2] - positions[0]); + n = normalize(n); + + positions[0] = positions[0] - p; + positions[1] = positions[1] - p; + positions[2] = positions[2] - p; + + float o = dot(n, positions[0]); + + positions[0] = normalize(positions[0]); + positions[1] = normalize(positions[1]); + positions[2] = normalize(positions[2]); + + vec3 n2 = cross(positions[1] - positions[0], positions[2] - positions[0]); + + vec3 d = positions * sample_triangle(rnd); + float dl = length(d); + // n (p + d * t - p[i]) == 0 + // -n (p - pi) / n d == o / n d == t + vec3 lo = d * (o / dot(n, d)); + float lol = length(lo); + + pdf *= 2.0 / dot(n2, d) * dot(n, lo); + pdf *= (dl * dl * dl) / (lol * lol * lol); + return p + lo; +} + +void +sample_light_list( + uint list_idx, + vec3 V, + vec3 p, + vec3 n, + out vec3 position_light, + out vec3 normal_light, + out vec3 light_color, + out float pdf, + vec3 rng) +{ + if(list_idx == ~0u) + return; + pdf = 1.0; + + uint list_start = get_light_list_offsets(list_idx); + uint list_end = get_light_list_offsets(list_idx + 1); + +//#define NO_IS +#ifdef NO_IS + int current_idx = -1; + { + uint n_idx = min(list_start + int(rng.x * float(list_end - list_start)), list_end - 1); + current_idx = get_light_list_lights(n_idx); + } +#else + + int stride = 1; +//#define UNSTRIDED +#ifdef UNSTRIDED + { + float partitions = ceil(float(list_end - list_start) / float(MAX_BRUTEFORCE_SAMPLING)); + //int partitions = (list_end - list_start + (MAX_BRUTEFORCE_SAMPLING-1)) / MAX_BRUTEFORCE_SAMPLING; + rng.x *= partitions; + float fpart = min(floor(rng.x), partitions-1); + rng.x -= fpart; + int part_idx = int(fpart); + int ceq = (list_end - list_start + int(partitions)-1) / int(partitions); + list_start += ceq * part_idx; + list_end = min(list_start + ceq, list_end); + pdf = partitions; + } +#else + { + float partitions = ceil(float(list_end - list_start) / float(MAX_BRUTEFORCE_SAMPLING)); + rng.x *= partitions; + float fpart = min(floor(rng.x), partitions-1); + rng.x -= fpart; + list_start += int(fpart); + stride = int(partitions); + pdf = partitions; + } +#endif + + float mass = 0.; + + float light_masses[MAX_BRUTEFORCE_SAMPLING]; + + #pragma unroll + for(uint i = 0, n_idx = list_start; i < MAX_BRUTEFORCE_SAMPLING; i++, n_idx += stride) { + if (n_idx >= list_end) + break; + + uint current_idx = get_light_list_lights(n_idx); + + mat3 positions = mat3x3( + get_positions_bsp(current_idx * 3 + 0), + get_positions_bsp(current_idx * 3 + 1), + get_positions_bsp(current_idx * 3 + 2)); + + float m = projected_tri_area(positions, p, n, V); + mass += m; + light_masses[i] = m; + } + + if (!(mass > 0)) { + pdf = 0; + return; + } + + rng.x *= mass; + int current_idx = -1; + mass *= pdf; // partitions + pdf = 0; + + #pragma unroll + for(uint i = 0, n_idx = list_start; i < MAX_BRUTEFORCE_SAMPLING; i++, n_idx += stride) { + if (n_idx >= list_end) + break; +#if 0 + + current_idx = int(get_light_list_lights(n_idx)); + mat3 positions = mat3x3( + get_positions_bsp(current_idx * 3 + 0), + get_positions_bsp(current_idx * 3 + 1), + get_positions_bsp(current_idx * 3 + 2)); + pdf = projected_tri_area(positions, p, n, V); +#else + pdf = light_masses[i]; + current_idx = int(n_idx); +#endif + rng.x -= pdf; + + if (!(rng.x > 0)) + break; + } + + pdf /= mass; + +#endif + + // assert: current_idx >= 0? + if (current_idx >= 0) { + current_idx = int(get_light_list_lights(current_idx)); + mat3 positions = mat3x3( + get_positions_bsp(current_idx * 3 + 0), + get_positions_bsp(current_idx * 3 + 1), + get_positions_bsp(current_idx * 3 + 2)); +#if SOLID_ANGLE_SAMPLING + position_light = sample_projected_triangle(p, positions, rng.yz, normal_light, pdf); +#else + vec3 bary = sample_triangle(rng.yz); + position_light = positions * bary; + vec3 a = cross(positions[1] - positions[0], positions[2] - positions[0]); + normal_light = a / length(a); + float area = length(a); + pdf *= 2.0 / area; +#endif + uint light_material = get_materials_bsp(current_idx); + light_color = global_textureLod(light_material, vec2(0.5), 2).rgb; + } +} + +void +sample_light_list_dynamic( + uint light_idx, + vec3 V, + vec3 p, + vec3 n, + out vec3 position_light, + out vec3 normal_light, + out vec3 light_color, + out float pdf, + vec3 rng) +{ + pdf = 1.0; + + uint list_start = global_ubo.light_offset_cnt[light_idx / 2][(light_idx % 2) * 2]; + light_color = (list_start & (1 << 31)) > 0 ? vec3(-5.0, -5.0, 1.0) : vec3(1, 0.2, 0.0); + + list_start &= ~(1 << 31); + uint list_end = global_ubo.light_offset_cnt[light_idx / 2][(light_idx % 2) * 2 + 1]; + list_end += list_start; + + int stride = 1; + { + float partitions = ceil(float(list_end - list_start) / float(MAX_BRUTEFORCE_SAMPLING_DYNAMIC)); + rng.x *= partitions; + float fpart = min(floor(rng.x), partitions-1); + rng.x -= fpart; + list_start += int(fpart); + stride = int(partitions); + pdf = partitions; + } + + float mass = 0.; + float light_masses[MAX_BRUTEFORCE_SAMPLING_DYNAMIC]; + + #pragma unroll + for(uint i = 0, n_idx = list_start; i < MAX_BRUTEFORCE_SAMPLING_DYNAMIC; i++, n_idx += stride) { + if (n_idx >= list_end) + break; + + uint current_idx = n_idx; + + mat3 positions = mat3( + get_positions_instanced(current_idx * 3 + 0), + get_positions_instanced(current_idx * 3 + 1), + get_positions_instanced(current_idx * 3 + 2)); + + float m = projected_tri_area(positions, p, n, V); + mass += m; + light_masses[i] = m; + } + + if (!(mass > 0)) { + pdf = 0; + return; + } + + rng.x *= mass; + int current_idx = -1; + mass *= pdf; // partitions + pdf = 0; + + #pragma unroll + for(uint i = 0, n_idx = list_start; i < MAX_BRUTEFORCE_SAMPLING_DYNAMIC; i++, n_idx += stride) { + if (n_idx >= list_end) + break; + + current_idx = int(n_idx); +#if 0 + mat3 positions = mat3x3( + get_positions_instanced(current_idx * 3 + 0), + get_positions_instanced(current_idx * 3 + 1), + get_positions_instanced(current_idx * 3 + 2)); + pdf = projected_tri_area(positions, p, n, V); +#else + pdf = light_masses[i]; + rng.x -= pdf; +#endif + + if (!(rng.x > 0)) + break; + } + + pdf /= mass; + + // assert: current_idx >= 0? + if (current_idx >= 0) { + mat3 positions = mat3x3( + get_positions_instanced(current_idx * 3 + 0), + get_positions_instanced(current_idx * 3 + 1), + get_positions_instanced(current_idx * 3 + 2)); + +#if SOLID_ANGLE_SAMPLING + position_light = sample_projected_triangle(p, positions, rng.yz, normal_light, pdf); +#else + vec3 bary = sample_triangle(rng.yz); + position_light = positions * bary; + vec3 a = cross(positions[1] - positions[0], positions[2] - positions[0]); + normal_light = a / length(a); + float area = length(a); + pdf *= 2.0 / area; +#endif + } +} + +#endif /*_LIGHT_LISTS_*/ + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/shader/path_tracer.h b/src/refresh/vkpt/shader/path_tracer.h new file mode 100644 index 0000000..738babf --- /dev/null +++ b/src/refresh/vkpt/shader/path_tracer.h @@ -0,0 +1,836 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#extension GL_NV_ray_tracing : require +#extension GL_ARB_separate_shader_objects : enable +#extension GL_EXT_nonuniform_qualifier : enable +#pragma optionNV(unroll all) + +#define RAY_GEN_DESCRIPTOR_SET_IDX 0 +layout(set = RAY_GEN_DESCRIPTOR_SET_IDX, binding = 0) +uniform accelerationStructureNV topLevelAS; + +#define INSTANCE_DYNAMIC_FLAG (1u << 31) +#define PRIM_ID_MASK (~INSTANCE_DYNAMIC_FLAG) + +#define GLOBAL_UBO_DESC_SET_IDX 1 +#include "global_ubo.h" + +#define GLOBAL_TEXTURES_DESC_SET_IDX 2 +#include "global_textures.h" + +#define VERTEX_BUFFER_DESC_SET_IDX 3 +#include "vertex_buffer.h" + +#define LIGHT_HIERARCHY_SET_IDX 4 +#include "light_hierarchy.h" + +#include "brdf.glsl" +#include "tiny_encryption_algorithm.h" +#include "utils.glsl" +#include "asvgf.glsl" +#include "read_visbuf.glsl" +#include "light_lists.h" +#include "water.glsl" + +#define ALBEDO_MULT 1.3 + +#define NUM_BOUNCES 2 + +#define RNG_PRIMARY_OFF_X 0 +#define RNG_PRIMARY_OFF_Y 1 + +#define RNG_NEE_LH(bounce) (2 + 0 + 7 * bounce) +#define RNG_NEE_TRI_X(bounce) (2 + 1 + 7 * bounce) +#define RNG_NEE_TRI_Y(bounce) (2 + 2 + 7 * bounce) +#define RNG_NEE_STATIC_DYNAMIC(bounce) (2 + 3 + 7 * bounce) +#define RNG_BRDF_X(bounce) (2 + 4 + 7 * bounce) +#define RNG_BRDF_Y(bounce) (2 + 5 + 7 * bounce) +#define RNG_BRDF_FRESNEL(bounce) (2 + 6 + 7 * bounce) + +/* no BRDF sampling in last bounce */ +#define NUM_RNG_PER_FRAME (RNG_NEE_STATIC_DYNAMIC(NUM_BOUNCES - 1) + 1) + +struct RayPayload { + vec2 barycentric; + uint instance_prim; +}; + +bool +found_intersection(RayPayload rp) +{ + return rp.instance_prim != ~0u; +} + +bool +is_dynamic_instance(RayPayload pay_load) +{ + return (pay_load.instance_prim & INSTANCE_DYNAMIC_FLAG) > 0; +} + +uint +get_primitive(RayPayload pay_load) +{ + return pay_load.instance_prim & PRIM_ID_MASK; +} + +Triangle +get_hit_triangle(RayPayload rp) +{ + uint prim = get_primitive(rp); + + return is_dynamic_instance(rp) + ? get_instanced_triangle(prim) + : get_bsp_triangle(prim); +} + +vec3 +get_hit_barycentric(RayPayload rp) +{ + vec3 bary; + bary.yz = rp.barycentric; + bary.x = 1.0 - bary.y - bary.z; + return bary; +} + +#if defined(SHADER_STAGE_RGEN) +# define RAY_PAYLOAD_SPEC rayPayloadNV +#elif defined(SHADER_STAGE_RMISS) || defined(SHADER_STAGE_RCHIT) +# define RAY_PAYLOAD_SPEC rayPayloadInNV +#else +# error no shader type specified +#endif + +#if defined(SHADER_STAGE_RGEN) +#define RT_PAYLOAD_NEE 0 +#define RT_PAYLOAD_BRDF 1 +layout(location = RT_PAYLOAD_NEE) RAY_PAYLOAD_SPEC RayPayload ray_payload_nee; +layout(location = RT_PAYLOAD_BRDF) RAY_PAYLOAD_SPEC RayPayload ray_payload_brdf; +#endif +#if defined(SHADER_STAGE_RCHIT) || defined(SHADER_STAGE_RMISS) +rayPayloadInNV RayPayload ray_payload; +#endif + + +#ifdef SHADER_STAGE_RCHIT + +hitAttributeNV vec3 hit_attribs; + + +void +main() +{ + ray_payload.barycentric = hit_attribs.xy; + ray_payload.instance_prim = int(gl_InstanceID != 0) << 31; /* we only have one instance */ + ray_payload.instance_prim |= gl_PrimitiveID; +} + +#endif + +#ifdef SHADER_STAGE_RMISS + +void +main() +{ + ray_payload.instance_prim = ~0u; +} + +#endif + +#ifdef SHADER_STAGE_RGEN + +struct Ray { + vec3 origin, direction; + float t_min, t_max; +}; + +uint rng_seed; + +void +trace_ray(Ray ray) +{ + const uint rayFlags = gl_RayFlagsOpaqueNV; + //const uint rayFlags = gl_RayFlagsOpaqueNV | gl_RayFlagsCullBackFacingTrianglesNV; + const uint cullMask = 0xff; + + traceNV( topLevelAS, rayFlags, cullMask, + 0 /*sbtRecordOffset*/, 0 /*sbtRecordStride*/, 0 /*missIndex*/, + ray.origin, ray.t_min, ray.direction, ray.t_max, RT_PAYLOAD_BRDF); +} + +bool +trace_shadow_ray(vec3 p1, vec3 p2) +{ + //const uint rayFlags = gl_RayFlagsOpaqueNV; + /* measured no performance impact for early hit termination */ + const uint rayFlags = gl_RayFlagsOpaqueNV | gl_RayFlagsTerminateOnFirstHitNV; + //const uint rayFlags = gl_RayFlagsOpaqueNV | gl_RayFlagsCullBackFacingTrianglesNV; + const uint cullMask = 0xff; + const float tmin = 0.01; + + vec3 l = p2 - p1; + float dist = length(l); + l /= dist; + + traceNV( topLevelAS, rayFlags, cullMask, + 0 /*sbtRecordOffset*/, 0 /*sbtRecordStride*/, 0 /*missIndex*/, + p2, tmin, -l, dist - tmin, RT_PAYLOAD_NEE); +#if 0 + /* hack that kills intersections if they hit a light source */ + if(found_intersection(ray_payload_nee)) { + if(!is_dynamic_instance(ray_payload_nee)) /* hit the static geometry */ + return false; + uint material_id = get_materials_instanced(get_primitive(ray_payload_nee)); + + return (material_id & (BSP_FLAG_LIGHT | BSP_FLAG_WATER)) > 0; + } + return true; +#else + return !found_intersection(ray_payload_nee); +#endif +} + +mat3 +construct_ONB_frisvad(vec3 normal) +{ + precise mat3 ret; + ret[1] = normal; + if(normal.z < -0.999805696f) { + ret[0] = vec3(0.0f, -1.0f, 0.0f); + ret[2] = vec3(-1.0f, 0.0f, 0.0f); + } + else { + precise float a = 1.0f / (1.0f + normal.z); + precise float b = -normal.x * normal.y * a; + ret[0] = vec3(1.0f - normal.x * normal.x * a, b, -normal.x); + ret[2] = vec3(b, 1.0f - normal.y * normal.y * a, -normal.y); + } + return ret; +} + +vec2 +sample_disk(vec2 uv) +{ + float theta = 2.0 * 3.141592653589 * uv.x; + float r = sqrt(uv.y); + + return vec2(cos(theta), sin(theta)) * r; +} + +vec3 +sample_cos_hemisphere(vec2 uv) +{ + vec2 disk = sample_disk(uv); + + return vec3(disk.x, sqrt(max(0.0, 1.0 - dot(disk, disk))), disk.y); +} + +vec3 +sample_sphere(vec2 uv) +{ + float y = 2.0 * uv.x - 1; + float theta = 2.0 * 3.141592653589 * uv.y; + float r = sqrt(1.0 - y * y); + return vec3(cos(theta) * r, y, sin(theta) * r); +} + +#if 0 +uvec2 +get_rng_uint(uint idx) +{ + uint pixel_idx = gl_LaunchIDNV.x + gl_LaunchSizeNV.x * gl_LaunchIDNV.y; + uint sample_idx = idx * 100 + uint(global_ubo.current_frame_idx); + //uint sample_idx = idx * 100 + uint(global_ubo.current_frame_idx[0]); + uvec2 _rng = encrypt_tea(uvec2(pixel_idx, sample_idx)); + return _rng; +} +#endif + +float +get_rng(uint idx) +{ + uvec3 p = uvec3(rng_seed, rng_seed >> 10, rng_seed >> 20); + p.z = (p.z * NUM_RNG_PER_FRAME + idx); + p &= uvec3(BLUE_NOISE_RES - 1, BLUE_NOISE_RES - 1, NUM_BLUE_NOISE_TEX - 1); + + return min(texelFetch(TEX_BLUE_NOISE, ivec3(p), 0).r, 0.9999999999999); + //return fract(vec2(get_rng_uint(idx)) / vec2(0xffffffffu)); +} + +vec3 +env_map(vec3 direction) +{ + return textureLod(TEX_ENVMAP, direction.xzy, 0).rgb; +} + +Ray +get_primary_ray(vec2 pos_cs) +{ + if(global_ubo.under_water > 0) + pos_cs.x += 20.0 / float(global_ubo.width) * sin(pos_cs.y * 10.0 + 5.0 * global_ubo.time); + vec4 v_near = global_ubo.invVP * vec4(pos_cs, -1.0, 1.0); + vec4 v_far = global_ubo.invVP * vec4(pos_cs, 0.0, 1.0); + v_near /= v_near.w; + v_far /= v_far.w; + + Ray ray; + ray.origin = v_near.xyz; + ray.direction = normalize(v_far.xyz - v_near.xyz); + ray.t_min = 0.01; + ray.t_max = 10000.0; + return ray; +} + +float +intersect_ray_plane(Ray r, vec3 p, vec3 n) +{ + return dot(p - r.origin, n) / dot(r.direction, n); +} + +void +store_first_hit( + vec3 normal, + float depth, + float fwidth_depth, + vec3 motion, + vec3 albedo, + vec4 vis_buf) +{ + ivec2 ipos = ivec2(gl_LaunchIDNV); + uint n_encoded = encode_normal(normal); + vec4 depth_normal = vec4(depth, uintBitsToFloat(n_encoded), 0, 0); + + vec4 m = vec4(motion, fwidth_depth); + + if((global_ubo.current_frame_idx & 1) == 0) { + imageStore(IMG_PT_DEPTH_NORMAL_A, ipos, depth_normal); + } + else { + imageStore(IMG_PT_DEPTH_NORMAL_B, ipos, depth_normal); + } + imageStore(IMG_PT_ALBEDO, ipos, vec4(albedo, 1.0)); + imageStore(IMG_PT_VISBUF, ipos, vis_buf); + imageStore(IMG_PT_MOTION, ipos, m); +} + +void +store_no_hit(vec3 albedo, vec3 motion) +{ + store_first_hit(vec3(0, 0, 1), -999, 9999.0, motion, albedo, vec4(-1, -1, uintBitsToFloat(uvec2(~0u)))); +} + +vec3 +compute_direct_illumination_static(vec3 V, vec3 position, vec3 normal, out vec3 L, int bounce, uint cluster, out vec3 pos_on_light) +{ + float pdf; + vec3 normal_light; + vec3 light_color; + + sample_light_list( + cluster, + V, + position, + normal, + pos_on_light, + normal_light, + light_color, + pdf, + vec3( + get_rng(RNG_NEE_LH(bounce)), + get_rng(RNG_NEE_TRI_X(bounce)), + get_rng(RNG_NEE_TRI_Y(bounce))) + ); + + if(pdf == 0) + return vec3(0); + + L = pos_on_light - position; + + float dist_light = length(L); + + L /= dist_light; + /* + float visibility = float(trace_shadow_ray(position, position_light, dist_light)); + if(visibility == 0) { + L = vec3(0); + return vec3(0); + } + L /= dist_light; + */ + + vec3 light_energy = light_color * 500.0f; + + float cos_l = max(0.0, -dot(normal_light, L)); + cos_l *= cos_l; + //cos_l *= cos_l; + + float geom = (max(0.0, dot(normal, L)) * cos_l) + / max(0.01, dist_light * dist_light); + return (light_energy * geom) / pdf; +} + +vec3 +compute_direct_illumination_dynamic(vec3 V, vec3 position, vec3 normal, out vec3 L, uint bounce, out vec3 pos_on_light) +{ + if(global_ubo.num_lights == 0) + return vec3(0); + + uint light_idx = min(global_ubo.num_lights - 1, uint(get_rng(RNG_NEE_LH(bounce)) * global_ubo.num_lights)); + float rng_triangle = fract(get_rng(RNG_NEE_LH(bounce)) * global_ubo.num_lights); + + float pdf = 1.0; + vec3 normal_light; + vec3 light_color; + sample_light_list_dynamic( + light_idx, + V, + position, + normal, + pos_on_light, + normal_light, + light_color, + pdf, + vec3(rng_triangle, get_rng(RNG_NEE_TRI_X(bounce)), get_rng(RNG_NEE_TRI_Y(bounce)))); + + if(pdf == 0) + return vec3(0); + + L = pos_on_light - position; + float r2 = dot(L, L); + float r = sqrt(r2); + L /= r; + + //float geom = 1.0; + /* sad panda would like to have MIS to avoid heavy clamping */ + float geom = max(-dot(normal_light, L), 0.0) * max(dot(normal, L), 0) / max(r2, 1000.0); + + //pdf = max(0.001, pdf); + //geom = min(geom, 500.0); + + return light_color * float(global_ubo.num_lights) * geom * 3000.0 / pdf; +} + +bool +is_water(uint material) +{ + return (material & (BSP_FLAG_WATER | BSP_FLAG_LIGHT)) == BSP_FLAG_WATER; +} + +bool +is_lava(uint material) +{ + uint flag = BSP_FLAG_WATER | BSP_FLAG_LIGHT; + return (material & flag) == flag; +} + +vec4 +path_tracer() +{ + ivec2 ipos = ivec2(gl_LaunchIDNV); + rng_seed = (global_ubo.current_frame_idx & 1) == 0 + ? texelFetch(TEX_ASVGF_RNG_SEED_A, ipos, 0).r + : texelFetch(TEX_ASVGF_RNG_SEED_B, ipos, 0).r; + vec3 position; + vec3 normal; + vec3 albedo = vec3(1); + vec3 direction; + uint material_id; + vec3 primary_albedo = vec3(1); + + vec3 contrib = vec3(0); + vec3 throughput = vec3(1); + + uint cluster_idx = ~0u; + + bool temporal_accum = true; + + { + /* box muller transform */ + vec2 pixel_offset = vec2(get_rng(RNG_PRIMARY_OFF_X), get_rng(RNG_PRIMARY_OFF_Y)); + pixel_offset.x = sqrt(-2.0 * log(pixel_offset.x)); + pixel_offset = clamp(pixel_offset, vec2(-1), vec2(1)) * 0.5; // temporal filter breaks otherwise + + const vec2 pixel_center = vec2(gl_LaunchIDNV.xy) + vec2(0.5); + const vec2 inUV = (pixel_center + pixel_offset) / vec2(gl_LaunchSizeNV.xy); + + vec2 screen_coord_cs = inUV * 2.0 - vec2(1.0); + + bool is_gradient; + { + uint u = texelFetch(TEX_ASVGF_GRAD_SMPL_POS, ipos / GRAD_DWN, 0).r; + + ivec2 grad_strata_pos = ivec2( + u >> (STRATUM_OFFSET_SHIFT * 0), + u >> (STRATUM_OFFSET_SHIFT * 1)) & STRATUM_OFFSET_MASK; + + is_gradient = (u > 0 && all(equal(grad_strata_pos, ipos % GRAD_DWN))); + } + + Ray ray = get_primary_ray(screen_coord_cs); + + if(is_gradient) { + /* gradient samples only need to verify visibility but need to + * maintain the precise location */ + vec3 pos_ws = texelFetch(TEX_ASVGF_POS_WS_FWD, ipos / GRAD_DWN, 0).xyz; + ray.origin = global_ubo.cam_pos.xyz; + ray.direction = pos_ws - ray.origin; + float len = length(ray.direction); + ray.direction /= len; + ray.t_max = len - 0.1; + } + + trace_ray(ray); + + direction = ray.direction; + + if(!found_intersection(ray_payload_brdf) && !is_gradient) { + vec3 env = env_map(ray.direction); + store_no_hit(env_map(ray.direction), vec3(0)); + #ifndef RTX + return vec4(1, 1, 1, false); + #else + return vec4(env, false); + #endif + } + + vec4 vis_buf; + precise vec3 bary; + Triangle triangle; + /* reprojection was valid for the gradient sample */ + if(is_gradient && !found_intersection(ray_payload_brdf)) { + vis_buf = texelFetch(TEX_ASVGF_VISBUF_FWD, ipos / GRAD_DWN, 0); + bary.yz = vis_buf.xy; + bary.x = 1.0 - bary.y - bary.z; + } + else { + uvec2 v; + if(is_dynamic_instance(ray_payload_brdf)) { + unpack_instance_id_triangle_idx( + get_instance_id_instanced(get_primitive(ray_payload_brdf)), + v.x, v.y); + } + else { + v.x = ~0u; + v.y = get_primitive(ray_payload_brdf); + } + + bary = get_hit_barycentric(ray_payload_brdf); + vis_buf = vec4(bary.yz, uintBitsToFloat(v)); + + if(is_gradient) { /* gradient sample became occluded, mask out */ + imageStore(IMG_ASVGF_GRAD_SMPL_POS, ipos / GRAD_DWN, uvec4(0)); + } + is_gradient = false; + } + visbuf_get_triangle(triangle, vis_buf); + + /* world-space */ + position = triangle.positions * bary; + normal = normalize(triangle.normals * bary); + material_id = triangle.material_id; + vec2 tex_coord = triangle.tex_coords * bary; + + /* compute view-space derivatives of depth and clip-space motion vectors */ + /* cannot use ray-t as svgf expects closest distance to plane */ + Ray ray_x = get_primary_ray(screen_coord_cs + vec2(2.0 / float(global_ubo.width), 0)); + Ray ray_y = get_primary_ray(screen_coord_cs - vec2(0, 2.0 / float(global_ubo.height))); + + vec3 bary_x = compute_barycentric(triangle.positions, ray_x.origin, ray_x.direction); + vec3 bary_y = compute_barycentric(triangle.positions, ray_y.origin, ray_y.direction); + + vec3 pos_ws_x= triangle.positions * bary_x; + vec3 pos_ws_y= triangle.positions * bary_y; + + vec2 tex_coord_x = triangle.tex_coords * bary_x; + vec2 tex_coord_y = triangle.tex_coords * bary_y; + tex_coord_x -= tex_coord; + tex_coord_y -= tex_coord; + + tex_coord_x *= 0.5; + tex_coord_y *= 0.5; + + primary_albedo = global_textureGrad(triangle.material_id, tex_coord, tex_coord_x, tex_coord_y).rgb * ALBEDO_MULT; + + vec3 pos_ws_curr = position, pos_ws_prev; + { + vec3 bary; + bary.yz = vis_buf.xy; + bary.x = 1.0 - bary.y - bary.z; + Triangle t_prev; + visbuf_get_triangle_backprj(t_prev, vis_buf); + pos_ws_prev = t_prev.positions * bary; + } + + vec3 pos_curr_cs = (global_ubo.VP * vec4(pos_ws_curr, 1.0)).xyw; + vec3 pos_prev_cs = (global_ubo.VP_prev * vec4(pos_ws_prev, 1.0)).xyw; + float depth_vs_x = (global_ubo.VP * vec4(pos_ws_x, 1.0)).w; + float depth_vs_y = (global_ubo.VP * vec4(pos_ws_y, 1.0)).w; + + pos_curr_cs.xy /= pos_curr_cs.z; + pos_prev_cs.xy /= pos_prev_cs.z; + + float fwidth_depth = 1.0 / max(1e-4, (abs(depth_vs_x - pos_curr_cs.z) + abs(depth_vs_y - pos_curr_cs.z))); + + vec3 motion = vec3(pos_prev_cs - pos_curr_cs); + + if(is_water(material_id)) { + vec3 water_normal = waterd(position.xy * 0.1).xzy; + float F = pow(1.0 - max(0.0, -dot(direction, water_normal)), 5.0); + direction = reflect(direction, water_normal); + trace_ray(Ray(position, direction, 0.01, 10000.0)); + throughput *= mix(vec3(0.1, 0.1, 0.15), vec3(1.0), F); + contrib += (1.0 - F) * vec3(0.2, 0.4, 0.4) * 0.5; + + if(!found_intersection(ray_payload_brdf)) + { + primary_albedo = env_map(direction); // * throughput; + store_no_hit(primary_albedo, motion); + return vec4(contrib, false); + } + + Triangle triangle = get_hit_triangle(ray_payload_brdf); + vec3 bary = get_hit_barycentric(ray_payload_brdf); + vec2 tex_coord = triangle.tex_coords * bary; + + /* world-space */ + position = triangle.positions * bary; + normal = normalize(triangle.normals * bary); + material_id = triangle.material_id; + albedo = global_textureLod(triangle.material_id, tex_coord, 2).rgb * ALBEDO_MULT; + cluster_idx = triangle.cluster; + primary_albedo = albedo; + albedo = vec3(1); + + temporal_accum = true; + + if(dot(direction, normal) > 0) + normal = -normal; + } + + if((material_id & BSP_FLAG_TRANSPARENT) > 0) { + temporal_accum = false; + contrib += vec3(0.05); // XXX hack! makes windows appear a bit milky + } + + if((material_id & BSP_FLAG_LIGHT) > 0) { + if(is_lava(material_id)) { + primary_albedo = lava(position.xy * 0.03); + //primary_albedo *= 5.0; + //primary_albedo += vec3(1) * abs(sin(position.x * 0.01 + 1.0 * global_ubo.time)) * max(0.0, (1.0 - luminance(primary_albedo))); + } + store_no_hit(primary_albedo, motion); + #ifndef RTX + return vec4(1, 1, 1, false); + #endif + } + else { + store_first_hit(normal, pos_curr_cs.z, fwidth_depth, motion, primary_albedo, vis_buf); + } + + cluster_idx = triangle.cluster; + } + +#ifdef RTX + + throughput = vec3(1); + contrib = primary_albedo * throughput; + + const int num_bounces = 10; + for(int i = 0; i < num_bounces; i++) { + + if(dot(direction, normal) > 0) + normal = -normal; + + direction = reflect(direction, normal); + + trace_ray(Ray(position, direction, 0.01, 10000.0)); + + if(!found_intersection(ray_payload_brdf)) { + vec3 env = env_map(direction); + contrib += env * throughput; + break; + } + + { + Triangle triangle = get_hit_triangle(ray_payload_brdf); + vec3 bary = get_hit_barycentric(ray_payload_brdf); + vec2 tex_coord = triangle.tex_coords * bary; + + /* world-space */ + position = triangle.positions * bary; + normal = normalize(triangle.normals * bary); + material_id = triangle.material_id; + albedo = global_textureLod(triangle.material_id, tex_coord, i / 2).rgb; + cluster_idx = triangle.cluster; + + contrib += albedo * throughput; + throughput *= 0.8; + + } + } + + return vec4(contrib * 2.0, false); + +#else + int bounce = 0; + while(bounce < NUM_BOUNCES) { + //for(int bounce = 0; bounce < NUM_BOUNCES; bounce++) { + + if((material_id & BSP_FLAG_TRANSPARENT) > 0) { + } + else if((material_id & BSP_FLAG_LIGHT) > 0) { + break; + } + else { + vec3 pos_on_light_static; + vec3 pos_on_light_dynamic; + + vec3 contrib_static; + vec3 contrib_dynamic; + + { /* static illumination */ + vec3 L_nee; + vec3 nee = compute_direct_illumination_static(-direction, position, normal, L_nee, bounce, cluster_idx, pos_on_light_static); + vec3 brdf = bounce == 0 || true + ? albedo * blinn_phong_based_brdf(-direction, L_nee, normal, 20.0) + : albedo / M_PI; + contrib_static = nee * throughput * brdf; + } + { /* dynamic illumination */ + vec3 L_nee; + vec3 nee = compute_direct_illumination_dynamic(-direction, position, normal, L_nee, bounce, pos_on_light_dynamic); + vec3 brdf = bounce == 0 + ? albedo * blinn_phong_based_brdf(-direction, L_nee, normal, 20.0) + : albedo / M_PI; + contrib_dynamic = nee * throughput * brdf; + } + + float l_static = luminance(abs(contrib_static)); + float l_dynamic = luminance(abs(contrib_dynamic)); + float w = l_static / (l_static + l_dynamic); + if(!isnan(w)) { + float rng = get_rng(RNG_NEE_STATIC_DYNAMIC(bounce)); + float vis = float(trace_shadow_ray(position, rng < w ? pos_on_light_static : pos_on_light_dynamic)); + if(rng < w) { + contrib += vis * contrib_static / w; + } + else { + contrib += vis * contrib_dynamic / (1.0 - w); + } + } + } + + if(bounce == NUM_BOUNCES - 1) + break; + + if((material_id & BSP_FLAG_TRANSPARENT) > 0) { + } + else { + vec2 rng3 = vec2(get_rng(RNG_BRDF_X(bounce)), get_rng(RNG_BRDF_Y(bounce))); +#if 0 + mat3 ONB = construct_ONB_frisvad(normal); + + vec3 diffuse_dir = sample_cos_hemisphere(rng3); + direction = normalize(ONB * diffuse_dir); +#else + /* perfect importance sampling for the artistically driven KIT BRDF */ + + float rng_frensel = get_rng(RNG_BRDF_FRESNEL(bounce)); + + float F = pow(1.0 - max(0.0, -dot(direction, normal)), 5.0); + F = mix(0.5, 1.0, F); + + + vec3 dir_sphere = sample_sphere(rng3); + + if(rng_frensel < F) { + vec3 dir = reflect(direction, normal); + direction = normalize(dir_sphere + dir * 2.0); + if(dot(direction, normal) < 0.0) + direction = reflect(direction, normal); + } + else { + direction = normalize(dir_sphere + normal); + } + +#endif + + + throughput *= albedo; + } + + trace_ray(Ray(position, direction, 0.01, 10000.0)); + + if(!found_intersection(ray_payload_brdf)) { + vec3 env = env_map(direction); + env *= env; + contrib += throughput * env * 20.0; + break; + } + + { + Triangle triangle = get_hit_triangle(ray_payload_brdf); + vec3 bary = get_hit_barycentric(ray_payload_brdf); + vec2 tex_coord = triangle.tex_coords * bary; + + /* world-space */ + position = triangle.positions * bary; + normal = normalize(triangle.normals * bary); + material_id = triangle.material_id; + albedo = global_textureLod(triangle.material_id, tex_coord, 5).rgb + vec3(0.01); + cluster_idx = triangle.cluster; + + if(dot(direction, normal) > 0) + normal = -normal; + } + if(is_water(material_id)) { + albedo /= 0.0001 + max(albedo.r, max(albedo.g, albedo.b)); + if(direction.z > 0) + contrib += albedo * throughput * 1.0; + else + contrib += albedo * throughput * 0.5; + } + + if((material_id & BSP_FLAG_TRANSPARENT) == 0) + bounce++; + } + + return vec4(contrib, temporal_accum); // / (primary_albedo + 0.00001); +#endif +} + +void +main() +{ + vec4 color = path_tracer(); + color.rgb = max(vec3(0), color.rgb); + if(any(isnan(color.rgb))) + color.rgb = vec3(0); + + color.rgb = clamp_color(color.rgb, 128.0); + + if((global_ubo.current_frame_idx & 1) == 0) { + imageStore(IMG_PT_COLOR_A, ivec2(gl_LaunchIDNV.xy), color); + } + else { + imageStore(IMG_PT_COLOR_B, ivec2(gl_LaunchIDNV.xy), color); + } +} + +#endif +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/shader/path_tracer.rchit b/src/refresh/vkpt/shader/path_tracer.rchit new file mode 100644 index 0000000..25364e4 --- /dev/null +++ b/src/refresh/vkpt/shader/path_tracer.rchit @@ -0,0 +1,22 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable + +#include "path_tracer.h" diff --git a/src/refresh/vkpt/shader/path_tracer.rgen b/src/refresh/vkpt/shader/path_tracer.rgen new file mode 100644 index 0000000..25364e4 --- /dev/null +++ b/src/refresh/vkpt/shader/path_tracer.rgen @@ -0,0 +1,22 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable + +#include "path_tracer.h" diff --git a/src/refresh/vkpt/shader/path_tracer.rmiss b/src/refresh/vkpt/shader/path_tracer.rmiss new file mode 100644 index 0000000..25364e4 --- /dev/null +++ b/src/refresh/vkpt/shader/path_tracer.rmiss @@ -0,0 +1,22 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable + +#include "path_tracer.h" diff --git a/src/refresh/vkpt/shader/path_tracer_rtx.rchit b/src/refresh/vkpt/shader/path_tracer_rtx.rchit new file mode 100644 index 0000000..17e8e70 --- /dev/null +++ b/src/refresh/vkpt/shader/path_tracer_rtx.rchit @@ -0,0 +1,23 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable + +#define RTX +#include "path_tracer.h" diff --git a/src/refresh/vkpt/shader/path_tracer_rtx.rgen b/src/refresh/vkpt/shader/path_tracer_rtx.rgen new file mode 100644 index 0000000..17e8e70 --- /dev/null +++ b/src/refresh/vkpt/shader/path_tracer_rtx.rgen @@ -0,0 +1,23 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable + +#define RTX +#include "path_tracer.h" diff --git a/src/refresh/vkpt/shader/path_tracer_rtx.rmiss b/src/refresh/vkpt/shader/path_tracer_rtx.rmiss new file mode 100644 index 0000000..17e8e70 --- /dev/null +++ b/src/refresh/vkpt/shader/path_tracer_rtx.rmiss @@ -0,0 +1,23 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 460 +#extension GL_GOOGLE_include_directive : enable + +#define RTX +#include "path_tracer.h" diff --git a/src/refresh/vkpt/shader/read_visbuf.glsl b/src/refresh/vkpt/shader/read_visbuf.glsl new file mode 100644 index 0000000..b32b1aa --- /dev/null +++ b/src/refresh/vkpt/shader/read_visbuf.glsl @@ -0,0 +1,239 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +uint +pack_instance_id_triangle_idx(uint instance_id, uint triangle_idx) +{ + return instance_id | (triangle_idx << 10); +} + +void +unpack_instance_id_triangle_idx(uint u, out uint instance_id, out uint triangle_idx) +{ + instance_id = (u & (1 << 31)) | (u & ((1 << 10) - 1)); + triangle_idx = (u & ~(1 << 31)) >> 10; +} + +bool is_world_instance(uint u) { return (u & (1 << 31)) > 0; } +bool is_static_world_model(uint u) { return u == ~0u; } + +uint +map_instance_fwd(uint id) +{ + if(is_world_instance(id)) { + if(!is_static_world_model(id)) { + id &= ~(1 << 31); + id = global_ubo.world_prev_to_current[id / 4][id % 4]; + id |= 1 << 31; + } + } + else { + id = global_ubo.model_prev_to_current[id / 4][id % 4]; + } + return id; +} + +void +visbuf_get_triangle(out Triangle t, uint instance, uint primitive) +{ + if(is_world_instance(instance)) { + if(!is_static_world_model(instance)) { + instance &= ~(1 << 31); + primitive += global_ubo.bsp_prim_offset[instance / 4][instance % 4]; + } + + t = get_bsp_triangle(primitive); + + if(!is_static_world_model(instance)) { + mat4 M = global_ubo.bsp_mesh_instances[instance].M; + + t.positions[0] = vec3(M * vec4(t.positions[0], 1.0)); + t.positions[1] = vec3(M * vec4(t.positions[1], 1.0)); + t.positions[2] = vec3(M * vec4(t.positions[2], 1.0)); + + t.normals[0] = vec3(M * vec4(t.normals[0], 0.0)); + t.normals[1] = vec3(M * vec4(t.normals[1], 0.0)); + t.normals[2] = vec3(M * vec4(t.normals[2], 0.0)); + + t.cluster = global_ubo.bsp_cluster_id[instance / 4][instance % 4]; + } + } + else { + uint idx_offset = global_ubo.model_idx_offset[instance / 4][instance % 4]; + uint idx = primitive; + + ModelInstance mi_curr = global_ubo.model_instances[instance]; + uint vertex_off_curr = mi_curr.mat_offset_backlerp.y; + uint vertex_off_prev = mi_curr.mat_offset_backlerp.z; // referes to animation frame + + Triangle t_curr = get_model_triangle(idx, idx_offset, vertex_off_curr); + Triangle t_prev = get_model_triangle(idx, idx_offset, vertex_off_prev); + + mat4 M = mi_curr.M; + float backlerp = uintBitsToFloat(mi_curr.mat_offset_backlerp.w); + + t.positions[0] = (M * vec4(mix(t_curr.positions[0], t_prev.positions[0], backlerp), 1.0)).xyz; + t.positions[1] = (M * vec4(mix(t_curr.positions[1], t_prev.positions[1], backlerp), 1.0)).xyz; + t.positions[2] = (M * vec4(mix(t_curr.positions[2], t_prev.positions[2], backlerp), 1.0)).xyz; + + t.normals[0] = (M * vec4(mix(t_curr.normals[0], t_prev.normals[0], backlerp), 0.0)).xyz; + t.normals[1] = (M * vec4(mix(t_curr.normals[1], t_prev.normals[1], backlerp), 0.0)).xyz; + t.normals[2] = (M * vec4(mix(t_curr.normals[2], t_prev.normals[2], backlerp), 0.0)).xyz; + + t.tex_coords = t_curr.tex_coords; + t.material_id = mi_curr.mat_offset_backlerp.x; + + t.cluster = global_ubo.model_cluster_id[instance / 4][instance % 4]; + } +} + +void +visbuf_get_triangle(out Triangle t, vec4 vis) +{ + uvec2 v = floatBitsToUint(vis.zw); + visbuf_get_triangle(t, v.x, v.y); +} + +void +visbuf_get_triangle_backprj(out Triangle t, uint instance, uint primitive) +{ + if(is_world_instance(instance)) { + if(!is_static_world_model(instance)) { + instance &= ~(1 << 31); + primitive += global_ubo.bsp_prim_offset[instance / 4][instance % 4]; + } + + t = get_bsp_triangle(primitive); + + if(!is_static_world_model(instance)) { + instance = global_ubo.world_current_to_prev[instance / 4][instance % 4]; + mat4 M = global_ubo.bsp_mesh_instances_prev[instance].M; + + t.positions[0] = vec3(M * vec4(t.positions[0], 1.0)); + t.positions[1] = vec3(M * vec4(t.positions[1], 1.0)); + t.positions[2] = vec3(M * vec4(t.positions[2], 1.0)); + + t.normals[0] = vec3(M * vec4(t.normals[0], 0.0)); + t.normals[1] = vec3(M * vec4(t.normals[1], 0.0)); + t.normals[2] = vec3(M * vec4(t.normals[2], 0.0)); + + t.cluster = global_ubo.bsp_cluster_id[instance / 4][instance % 4]; + } + } + else { + uint idx_offset = global_ubo.model_idx_offset[instance / 4][instance % 4]; + uint idx = primitive; + + instance = global_ubo.model_current_to_prev[instance / 4][instance % 4]; + + ModelInstance mi_curr = global_ubo.model_instances_prev[instance]; + uint vertex_off_curr = mi_curr.mat_offset_backlerp.y; + uint vertex_off_prev = mi_curr.mat_offset_backlerp.z; // referes to animation frame + + Triangle t_curr = get_model_triangle(idx, idx_offset, vertex_off_curr); + Triangle t_prev = get_model_triangle(idx, idx_offset, vertex_off_prev); + + mat4 M = mi_curr.M; + float backlerp = uintBitsToFloat(mi_curr.mat_offset_backlerp.w); + + t.positions[0] = (M * vec4(mix(t_curr.positions[0], t_prev.positions[0], backlerp), 1.0)).xyz; + t.positions[1] = (M * vec4(mix(t_curr.positions[1], t_prev.positions[1], backlerp), 1.0)).xyz; + t.positions[2] = (M * vec4(mix(t_curr.positions[2], t_prev.positions[2], backlerp), 1.0)).xyz; + + t.normals[0] = (M * vec4(mix(t_curr.normals[0], t_prev.normals[0], backlerp), 0.0)).xyz; + t.normals[1] = (M * vec4(mix(t_curr.normals[1], t_prev.normals[1], backlerp), 0.0)).xyz; + t.normals[2] = (M * vec4(mix(t_curr.normals[2], t_prev.normals[2], backlerp), 0.0)).xyz; + + t.tex_coords = t_curr.tex_coords; + t.material_id = mi_curr.mat_offset_backlerp.x; + } +} + +void +visbuf_get_triangle_backprj(out Triangle t, vec4 vis) +{ + uvec2 v = floatBitsToUint(vis.zw); + visbuf_get_triangle_backprj(t, v.x, v.y); +} + +bool +visbuf_get_triangle_fwdprj(out Triangle t, uint instance, uint primitive) +{ + if(is_world_instance(instance)) { + if(!is_static_world_model(instance)) { + instance &= ~(1 << 31); + primitive += global_ubo.bsp_prim_offset[instance / 4][instance % 4]; + } + + t = get_bsp_triangle(primitive); + + if(!is_static_world_model(instance)) { + instance = global_ubo.world_prev_to_current[instance / 4][instance % 4]; + if(instance == ~0u) + return false; + mat4 M = global_ubo.bsp_mesh_instances[instance].M; + + t.positions[0] = vec3(M * vec4(t.positions[0], 1.0)); + t.positions[1] = vec3(M * vec4(t.positions[1], 1.0)); + t.positions[2] = vec3(M * vec4(t.positions[2], 1.0)); + + t.normals[0] = vec3(M * vec4(t.normals[0], 0.0)); + t.normals[1] = vec3(M * vec4(t.normals[1], 0.0)); + t.normals[2] = vec3(M * vec4(t.normals[2], 0.0)); + + t.cluster = global_ubo.bsp_cluster_id[instance / 4][instance % 4]; + } + } + else { + instance = global_ubo.model_prev_to_current[instance / 4][instance % 4]; + if(instance == ~0u) + return false; + + uint idx_offset = global_ubo.model_idx_offset[instance / 4][instance % 4]; + uint idx = primitive; + + ModelInstance mi_curr = global_ubo.model_instances[instance]; + uint vertex_off_curr = mi_curr.mat_offset_backlerp.y; + uint vertex_off_prev = mi_curr.mat_offset_backlerp.z; // referes to animation frame + + Triangle t_curr = get_model_triangle(idx, idx_offset, vertex_off_curr); + Triangle t_prev = get_model_triangle(idx, idx_offset, vertex_off_prev); + + mat4 M = mi_curr.M; + float backlerp = uintBitsToFloat(mi_curr.mat_offset_backlerp.w); + + t.positions[0] = (M * vec4(mix(t_curr.positions[0], t_prev.positions[0], backlerp), 1.0)).xyz; + t.positions[1] = (M * vec4(mix(t_curr.positions[1], t_prev.positions[1], backlerp), 1.0)).xyz; + t.positions[2] = (M * vec4(mix(t_curr.positions[2], t_prev.positions[2], backlerp), 1.0)).xyz; + + t.normals[0] = (M * vec4(mix(t_curr.normals[0], t_prev.normals[0], backlerp), 0.0)).xyz; + t.normals[1] = (M * vec4(mix(t_curr.normals[1], t_prev.normals[1], backlerp), 0.0)).xyz; + t.normals[2] = (M * vec4(mix(t_curr.normals[2], t_prev.normals[2], backlerp), 0.0)).xyz; + + t.tex_coords = t_curr.tex_coords; + t.material_id = mi_curr.mat_offset_backlerp.x; + } + return true; +} + +bool +visbuf_get_triangle_fwdprj(out Triangle t, vec4 vis) +{ + uvec2 v = floatBitsToUint(vis.zw); + return visbuf_get_triangle_fwdprj(t, v.x, v.y); +} diff --git a/src/refresh/vkpt/shader/stretch_pic.frag b/src/refresh/vkpt/shader/stretch_pic.frag new file mode 100644 index 0000000..4cd1966 --- /dev/null +++ b/src/refresh/vkpt/shader/stretch_pic.frag @@ -0,0 +1,42 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 450 +#extension GL_GOOGLE_include_directive : enable +#extension GL_ARB_separate_shader_objects : enable +#extension GL_EXT_nonuniform_qualifier : enable + +#define GLOBAL_TEXTURES_DESC_SET_IDX 1 +#include "global_textures.h" + +layout(location = 0) in vec4 color; +layout(location = 1) in flat uint tex_id; +layout(location = 2) in vec2 tex_coord; + +layout(location = 0) out vec4 outColor; + +void +main() +{ + vec4 c = color; + if(tex_id != ~0u) { + vec2 tc = tex_coord; + c *= global_textureLod(tex_id, tc, 0); + } + outColor = c; +} diff --git a/src/refresh/vkpt/shader/stretch_pic.vert b/src/refresh/vkpt/shader/stretch_pic.vert new file mode 100644 index 0000000..2251805 --- /dev/null +++ b/src/refresh/vkpt/shader/stretch_pic.vert @@ -0,0 +1,58 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#version 450 +#extension GL_ARB_separate_shader_objects : enable + +out gl_PerVertex { + vec4 gl_Position; +}; + +layout(location = 0) out vec4 color; +layout(location = 1) out flat uint tex_id; +layout(location = 2) out vec2 tex_coord; + +struct StretchPic { + float x, y, w, h; + float s, t, w_s, h_t; + uint color, tex_handle; +}; + +layout(set = 0, binding = 0) buffer SBO { + StretchPic stretch_pics[]; +}; + +vec2 positions[4] = vec2[]( + vec2(0.0, 1.0), + vec2(0.0, 0.0), + vec2(1.0, 1.0), + vec2(1.0, 0.0) +); + +void +main() +{ + StretchPic sp = stretch_pics[gl_InstanceIndex]; + vec2 pos = positions[gl_VertexIndex] * vec2(sp.w, sp.h) + vec2(sp.x, sp.y); + color = unpackUnorm4x8(sp.color); + tex_coord = vec2(sp.s, sp.t) + positions[gl_VertexIndex] * vec2(sp.w_s, sp.h_t); + tex_id = sp.tex_handle; + + gl_Position = vec4(pos, 0.0, 1.0); +} + diff --git a/src/refresh/vkpt/shader/tiny_encryption_algorithm.h b/src/refresh/vkpt/shader/tiny_encryption_algorithm.h new file mode 100644 index 0000000..dad5937 --- /dev/null +++ b/src/refresh/vkpt/shader/tiny_encryption_algorithm.h @@ -0,0 +1,37 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +uvec2 +encrypt_tea(uvec2 arg) +{ + const uint key[] = { + 0xa341316c, 0xc8013ea4, 0xad90777d, 0x7e95761e + }; + uint v0 = arg[0], v1 = arg[1]; + uint sum = 0; + uint delta = 0x9e3779b9; + + #pragma unroll + for(int i = 0; i < 16; i++) { + //for(int i = 0; i < 32; i++) { + sum += delta; + v0 += ((v1 << 4) + key[0]) ^ (v1 + sum) ^ ((v1 >> 5) + key[1]); + v1 += ((v0 << 4) + key[2]) ^ (v0 + sum) ^ ((v0 >> 5) + key[3]); + } + return uvec2(v0, v1); +} diff --git a/src/refresh/vkpt/shader/utils.glsl b/src/refresh/vkpt/shader/utils.glsl new file mode 100644 index 0000000..1ef7068 --- /dev/null +++ b/src/refresh/vkpt/shader/utils.glsl @@ -0,0 +1,220 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef _GLSL_UTILS_GLSL +#define _GLSL_UTILS_GLSL +float +linear_to_srgb(float linear) +{ + return (linear <= 0.0031308f) + ? linear * 12.92f + : pow(linear, (1.0f / 2.4f)) * (1.055f) - 0.055f; +} + +vec3 +linear_to_srgb(vec3 l) +{ + return vec3( + linear_to_srgb(l.x), + linear_to_srgb(l.y), + linear_to_srgb(l.z)); +} + +float +luminance(in vec3 color) +{ + return dot(color, vec3(0.299, 0.587, 0.114)); +} + +float +clamped_luminance(vec3 c) +{ + float l = luminance(c); + return min(l, 150.0); +} + +vec3 +clamp_color(vec3 color, float max_val) +{ + float m = max(color.r, max(color.g, color.b)); + if(m < max_val) + return color; + + return color * (max_val / m); +} + +vec3 +decode_normal(uint enc) +{ + uint projected0 = enc & 0xffffu; + uint projected1 = enc >> 16; + // copy sign bit and paste rest to upper mantissa to get float encoded in [1,2). + // the employed bits will only go to [1,1.5] for valid encodings. + uint vec0 = 0x3f800000u | ((projected0 & 0x7fffu)<<8); + uint vec1 = 0x3f800000u | ((projected1 & 0x7fffu)<<8); + // transform to [-1,1] to be able to precisely encode the important academic special cases {-1,0,1} + vec3 n; + n.x = uintBitsToFloat(floatBitsToUint(2.0f*uintBitsToFloat(vec0) - 2.0f) | ((projected0 & 0x8000u)<<16)); + n.y = uintBitsToFloat(floatBitsToUint(2.0f*uintBitsToFloat(vec1) - 2.0f) | ((projected1 & 0x8000u)<<16)); + n.z = 1.0f - (abs(n.x) + abs(n.y)); + + if (n.z < 0.0f) { + float oldX = n.x; + n.x = (1.0f - abs(n.y)) * ((oldX < 0.0f) ? -1.0f : 1.0f); + n.y = (1.0f - abs(oldX)) * ((n.y < 0.0f) ? -1.0f : 1.0f); + } + return normalize(n); +} + +uint +encode_normal(vec3 normal) +{ + uint projected0, projected1; + const float invL1Norm = 1.0f / dot(abs(normal), vec3(1)); + + // first find floating point values of octahedral map in [-1,1]: + float enc0, enc1; + if (normal[2] < 0.0f) { + enc0 = (1.0f - abs(normal[1] * invL1Norm)) * ((normal[0] < 0.0f) ? -1.0f : 1.0f); + enc1 = (1.0f - abs(normal[0] * invL1Norm)) * ((normal[1] < 0.0f) ? -1.0f : 1.0f); + } + else { + enc0 = normal[0] * invL1Norm; + enc1 = normal[1] * invL1Norm; + } + // then encode: + uint enci0 = floatBitsToUint((abs(enc0) + 2.0f)/2.0f); + uint enci1 = floatBitsToUint((abs(enc1) + 2.0f)/2.0f); + // copy over sign bit and truncated mantissa. could use rounding for increased precision here. + projected0 = ((floatBitsToUint(enc0) & 0x80000000u)>>16) | ((enci0 & 0x7fffffu)>>8); + projected1 = ((floatBitsToUint(enc1) & 0x80000000u)>>16) | ((enci1 & 0x7fffffu)>>8); + // avoid -0 cases: + if((projected0 & 0x7fffu) == 0) projected0 = 0; + if((projected1 & 0x7fffu) == 0) projected1 = 0; + return (projected1 << 16) | projected0; +} + +float +reconstruct_view_depth(float depth, float near, float far) +{ + float z_ndc = 2.0 * depth - 1.0; + return 2.0 * near * far / (near + far - z_ndc * (far - near)); +} + +// Catmull-Rom filtering code from http://vec3.ca/bicubic-filtering-in-fewer-taps/ +void +BicubicCatmullRom(vec2 UV, vec2 texSize, out vec2 Sample[3], out vec2 Weight[3]) +{ + const vec2 invTexSize = 1.0 / texSize; + + vec2 tc = floor( UV - 0.5 ) + 0.5; + vec2 f = UV - tc; + vec2 f2 = f * f; + vec2 f3 = f2 * f; + + vec2 w0 = f2 - 0.5 * (f3 + f); + vec2 w1 = 1.5 * f3 - 2.5 * f2 + 1; + vec2 w3 = 0.5 * (f3 - f2); + vec2 w2 = 1 - w0 - w1 - w3; + + Weight[0] = w0; + Weight[1] = w1 + w2; + Weight[2] = w3; + + Sample[0] = tc - 1; + Sample[1] = tc + w2 / Weight[1]; + Sample[2] = tc + 2; + + Sample[0] *= invTexSize; + Sample[1] *= invTexSize; + Sample[2] *= invTexSize; +} + +/* uv is in pixel coordinates */ +vec4 +sample_texture_catmull_rom(sampler2D tex, vec2 uv) +{ + vec4 sum = vec4(0); + vec2 sampleLoc[3], sampleWeight[3]; + BicubicCatmullRom(uv, vec2(textureSize(tex, 0)), sampleLoc, sampleWeight); + for(int i = 0; i < 3; i++) { + for(int j = 0; j < 3; j++) { + vec2 uv = vec2(sampleLoc[j].x, sampleLoc[i].y); + vec4 c = textureLod(tex, uv, 0); + sum += c * vec4(sampleWeight[j].x * sampleWeight[i].y); + } + } + return sum; +} + +float +srgb_to_linear(float srgb) +{ + if(srgb <= 0.04045f) { + return srgb * (1.0f / 12.92f); + } + else { + return pow((srgb + 0.055f) * (1.0f / 1.055f), 2.4f); + } +} + +vec3 +srgb_to_linear(vec3 srgb) +{ + return vec3( + srgb_to_linear(srgb.x), + srgb_to_linear(srgb.y), + srgb_to_linear(srgb.z)); +} + +vec3 +viridis_quintic(float x) +{ + x = clamp(x, 0.0, 1.0); + vec4 x1 = vec4(1.0, x, x * x, x * x * x); // 1 x x2 x3 + vec4 x2 = x1 * x1.w * x; // x4 x5 x6 x7 + return srgb_to_linear(vec3( + dot(x1.xyzw, vec4(+0.280268003, -0.143510503, +2.225793877, -14.815088879)) + dot(x2.xy, vec2(+25.212752309, -11.772589584)), + dot(x1.xyzw, vec4(-0.002117546, +1.617109353, -1.909305070, +2.701152864 )) + dot(x2.xy, vec2(-1.685288385, +0.178738871 )), + dot(x1.xyzw, vec4(+0.300805501, +2.614650302, -12.019139090, +28.933559110)) + dot(x2.xy, vec2(-33.491294770, +13.762053843)))); +} + + +vec3 +compute_barycentric(mat3 v, vec3 ray_origin, vec3 ray_direction) +{ + vec3 edge1 = v[1] - v[0]; + vec3 edge2 = v[2] - v[0]; + + vec3 pvec = cross(ray_direction, edge2); + + float det = dot(edge1, pvec); + float inv_det = 1.0f / det; + + vec3 tvec = ray_origin - v[0]; + + float alpha = dot(tvec, pvec) * inv_det; + + vec3 qvec = cross(tvec, edge1); + + float beta = dot(ray_direction, qvec) * inv_det; + + return vec3(1.f - alpha - beta, alpha, beta); +} + +#endif /*_GLSL_UTILS_GLSL*/ diff --git a/src/refresh/vkpt/shader/vertex_buffer.h b/src/refresh/vkpt/shader/vertex_buffer.h new file mode 100644 index 0000000..3a2f52e --- /dev/null +++ b/src/refresh/vkpt/shader/vertex_buffer.h @@ -0,0 +1,267 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + + +#define MAX_VERT_BSP (1 << 21) + +#define MAX_VERT_MODEL (1 << 21) +#define MAX_IDX_MODEL (1 << 21) + +#define MAX_VERT_INSTANCED (1 << 21) +#define MAX_IDX_INSTANCED (MAX_VERT_INSTANCED / 3) + +#define MAX_LIGHT_LISTS (1 << 14) +#define MAX_LIGHT_LIST_NODES (1 << 20) + +#define ALIGN_SIZE_4(x, n) ((x * n + 3) & (~3)) + +#define VERTEX_BUFFER_BINDING_IDX 0 + +#ifdef VKPT_SHADER +#define uint32_t uint +#endif + +#define VERTEX_BUFFER_LIST \ + VERTEX_BUFFER_LIST_DO(float, 3, positions_bsp, (MAX_VERT_BSP )) \ + VERTEX_BUFFER_LIST_DO(float, 2, tex_coords_bsp, (MAX_VERT_BSP )) \ + VERTEX_BUFFER_LIST_DO(uint32_t, 1, materials_bsp, (MAX_VERT_BSP / 3 )) \ + VERTEX_BUFFER_LIST_DO(uint32_t, 1, clusters_bsp, (MAX_VERT_BSP / 3 )) \ + \ + VERTEX_BUFFER_LIST_DO(float, 3, positions_model, (MAX_VERT_MODEL )) \ + VERTEX_BUFFER_LIST_DO(float, 3, normals_model, (MAX_VERT_MODEL )) \ + VERTEX_BUFFER_LIST_DO(float, 2, tex_coords_model, (MAX_VERT_MODEL )) \ + VERTEX_BUFFER_LIST_DO(uint32_t, 3, idx_model, (MAX_IDX_MODEL )) \ + \ + VERTEX_BUFFER_LIST_DO(float, 3, positions_instanced, (MAX_VERT_MODEL )) \ + VERTEX_BUFFER_LIST_DO(float, 3, normals_instanced, (MAX_VERT_MODEL )) \ + VERTEX_BUFFER_LIST_DO(float, 2, tex_coords_instanced, (MAX_VERT_MODEL )) \ + VERTEX_BUFFER_LIST_DO(uint32_t, 1, clusters_instanced, (MAX_IDX_MODEL )) \ + VERTEX_BUFFER_LIST_DO(uint32_t, 1, materials_instanced, (MAX_IDX_MODEL )) \ + VERTEX_BUFFER_LIST_DO(uint32_t, 1, instance_id_instanced, (MAX_IDX_MODEL )) \ + \ + VERTEX_BUFFER_LIST_DO(uint32_t, 1, light_list_offsets, (MAX_LIGHT_LISTS )) \ + VERTEX_BUFFER_LIST_DO(uint32_t, 1, light_list_lights, (MAX_LIGHT_LIST_NODES)) \ + + +struct VertexBuffer +{ +#define VERTEX_BUFFER_LIST_DO(type, dim, name, size) \ + type name[ALIGN_SIZE_4(size, dim)]; + + VERTEX_BUFFER_LIST + +#undef VERTEX_BUFFER_LIST_DO +}; + +#ifndef VKPT_SHADER +typedef struct VertexBuffer VertexBuffer; +#endif + +#ifdef VKPT_SHADER + +layout(set = VERTEX_BUFFER_DESC_SET_IDX, binding = VERTEX_BUFFER_BINDING_IDX) buffer VERTEX_BUFFER { + VertexBuffer vbo; +}; + +#define GET_float_2(name) \ +vec2 \ +get_##name(uint idx) \ +{ \ + return vec2(vbo.name[idx * 2 + 0], vbo.name[idx * 2 + 1]); \ +} + +#define GET_float_3(name) \ +vec3 \ +get_##name(uint idx) \ +{ \ + return vec3(vbo.name[idx * 3 + 0], vbo.name[idx * 3 + 1], vbo.name[idx * 3 + 2]); \ +} + +#define GET_uint32_t_1(name) \ +uint \ +get_##name(uint idx) \ +{ \ + return vbo.name[idx]; \ +} + +#define GET_uint32_t_3(name) \ +uvec3 \ +get_##name(uint idx) \ +{ \ + return uvec3(vbo.name[idx * 3 + 0], vbo.name[idx * 3 + 1], vbo.name[idx * 3 + 2]); \ +} + +#define SET_float_2(name) \ +void \ +set_##name(uint idx, vec2 v) \ +{ \ + vbo.name[idx * 2 + 0] = v[0]; \ + vbo.name[idx * 2 + 1] = v[1]; \ +} + +#define SET_float_3(name) \ +void \ +set_##name(uint idx, vec3 v) \ +{ \ + vbo.name[idx * 3 + 0] = v[0]; \ + vbo.name[idx * 3 + 1] = v[1]; \ + vbo.name[idx * 3 + 2] = v[2]; \ +} + +#define SET_uint32_t_1(name) \ +void \ +set_##name(uint idx, uint u) \ +{ \ + vbo.name[idx] = u; \ +} + +#define SET_uint32_t_3(name) \ +void \ +set_##name(uint idx, uvec3 v) \ +{ \ + vbo.name[idx * 3 + 0] = v[0]; \ + vbo.name[idx * 3 + 1] = v[1]; \ + vbo.name[idx * 3 + 2] = v[2]; \ +} + +#define VERTEX_BUFFER_LIST_DO(type, dim, name, size) \ + GET_##type##_##dim(name) \ + SET_##type##_##dim(name) +VERTEX_BUFFER_LIST +#undef VERTEX_BUFFER_LIST_DO + +struct Triangle +{ + mat3x3 positions; + mat3x3 normals; + mat3x2 tex_coords; + uint material_id; + uint cluster; +}; + +struct InstancedTriangle +{ + mat3x3 positions; + mat3x3 normals; + mat3x2 tex_coords; + mat3x3 positions_prev; + uint material_id; +}; + +Triangle +get_bsp_triangle(uint prim_id) +{ + Triangle t; + t.positions[0] = get_positions_bsp(prim_id * 3 + 0); + t.positions[1] = get_positions_bsp(prim_id * 3 + 1); + t.positions[2] = get_positions_bsp(prim_id * 3 + 2); + + vec3 normal = normalize(cross( + t.positions[1] - t.positions[0], + t.positions[2] - t.positions[0])); + + t.normals[0] = normal; + t.normals[1] = normal; + t.normals[2] = normal; + + t.tex_coords[0] = get_tex_coords_bsp(prim_id * 3 + 0); + t.tex_coords[1] = get_tex_coords_bsp(prim_id * 3 + 1); + t.tex_coords[2] = get_tex_coords_bsp(prim_id * 3 + 2); + + t.material_id = get_materials_bsp(prim_id); + + t.cluster = get_clusters_bsp(prim_id); + + return t; +} + +Triangle +get_model_triangle(uint prim_id, uint idx_offset, uint vert_offset) +{ + uvec3 idx = get_idx_model(prim_id + idx_offset / 3); + idx += vert_offset; + + Triangle t; + t.positions[0] = get_positions_model(idx[0]); + t.positions[1] = get_positions_model(idx[1]); + t.positions[2] = get_positions_model(idx[2]); + + vec3 normal = normalize(cross( + t.positions[1] - t.positions[0], + t.positions[2] - t.positions[0])); + + t.normals[0] = get_normals_model(idx[0]); + t.normals[1] = get_normals_model(idx[1]); + t.normals[2] = get_normals_model(idx[2]); + + t.tex_coords[0] = get_tex_coords_model(idx[0]); + t.tex_coords[1] = get_tex_coords_model(idx[1]); + t.tex_coords[2] = get_tex_coords_model(idx[2]); + + t.material_id = 0; // needs to come from uniform buffer + return t; +} + +Triangle +get_instanced_triangle(uint prim_id) +{ + Triangle t; + t.positions[0] = get_positions_instanced(prim_id * 3 + 0); + t.positions[1] = get_positions_instanced(prim_id * 3 + 1); + t.positions[2] = get_positions_instanced(prim_id * 3 + 2); + + vec3 normal = normalize(cross( + t.positions[1] - t.positions[0], + t.positions[2] - t.positions[0])); + + t.normals[0] = get_normals_instanced(prim_id * 3 + 0); + t.normals[1] = get_normals_instanced(prim_id * 3 + 1); + t.normals[2] = get_normals_instanced(prim_id * 3 + 2); + + t.tex_coords[0] = get_tex_coords_instanced(prim_id * 3 + 0); + t.tex_coords[1] = get_tex_coords_instanced(prim_id * 3 + 1); + t.tex_coords[2] = get_tex_coords_instanced(prim_id * 3 + 2); + + t.material_id = get_materials_instanced(prim_id); + + t.cluster = ~0u; + + return t; +} + +void +store_instanced_triangle(InstancedTriangle t, uint instance_id, uint prim_id) +{ + set_positions_instanced(prim_id * 3 + 0, t.positions[0]); + set_positions_instanced(prim_id * 3 + 1, t.positions[1]); + set_positions_instanced(prim_id * 3 + 2, t.positions[2]); + + set_normals_instanced(prim_id * 3 + 0, t.normals[0]); + set_normals_instanced(prim_id * 3 + 1, t.normals[1]); + set_normals_instanced(prim_id * 3 + 2, t.normals[2]); + + set_tex_coords_instanced(prim_id * 3 + 0, t.tex_coords[0]); + set_tex_coords_instanced(prim_id * 3 + 1, t.tex_coords[1]); + set_tex_coords_instanced(prim_id * 3 + 2, t.tex_coords[2]); + + set_materials_instanced(prim_id, t.material_id); + + set_instance_id_instanced(prim_id, instance_id); +} + +#endif diff --git a/src/refresh/vkpt/shader/water.glsl b/src/refresh/vkpt/shader/water.glsl new file mode 100644 index 0000000..c4492e2 --- /dev/null +++ b/src/refresh/vkpt/shader/water.glsl @@ -0,0 +1,137 @@ +/* +Copyright (C) 2018 Christoph Schied +Copyright (C) 2018 Johannes Hanika + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + + +float hash1( vec2 p ) +{ + p = 50.0*fract( p*0.3183099 ); + return fract( p.x*p.y*(p.x+p.y) ); +} +float noise( vec2 x ) +{ + vec2 p = floor(x); + vec2 w = fract(x); + vec2 u = w*w*w*(w*(w*6.0-15.0)+10.0); + + float a = hash1(p+vec2(0,0)); + float b = hash1(p+vec2(1,0)); + float c = hash1(p+vec2(0,1)); + float d = hash1(p+vec2(1,1)); + + return -1.0+2.0*( a + (b-a)*u.x + (c-a)*u.y + (a - b - c + d)*u.x*u.y ); +} + +const int ITER_FRAGMENT = 4; +const float SEA_HEIGHT = 5.0; +const float SEA_CHOPPY = 3.0; +const float SEA_SPEED = 0.15; +const float SEA_FREQ = 0.02;//0.16; +#define SEA_TIME (1.0f + 4.0 * float(global_ubo.time)) +//#define SEA_TIME (1.0f + time * SEA_SPEED) +// #define SEA_TIME 1.0 // static sea +float sea_octave(vec2 uv, float choppy) +{ + uv += noise(uv); + vec2 wv = 1.0-abs(sin(uv)); + vec2 swv = abs(cos(uv)); + wv = mix(wv,swv,wv); + return pow(1.0-pow(wv.x * wv.y,0.65),choppy); +} + +const mat2 octave_m = mat2(1.6,1.2,-1.2,1.6); +float water(vec2 p) +{ + float freq = SEA_FREQ; + float amp = SEA_HEIGHT; + float choppy = SEA_CHOPPY; + vec2 uv = p.xy; uv.x *= 0.75; + + float d, h = 0.0; + for(int i = 0; i < ITER_FRAGMENT; i++) + { + d = sea_octave((uv+SEA_TIME)*freq,choppy); + // d += sea_octave((uv-SEA_TIME)*freq,choppy); + h += d * amp; + uv *= octave_m; freq *= 1.9; amp *= 0.22; + choppy = mix(choppy,1.0,0.2); + } + return h; +} +vec3 waterd( in vec2 p ) +{ + float eps = 0.01; + vec3 n; + n.y = water(p); + n.x = water(vec2(p.x+eps,p.y)) - n.y; + n.z = water(vec2(p.x,p.y+eps)) - n.y; + n.y = eps * 10.0; + n = normalize(n); + return n; +} + +mat2 makem2(in float theta){float c = cos(theta);float s = sin(theta);return mat2(c,-s,s,c);} + +/* https://www.shadertoy.com/view/lslXRS */ +vec2 gradn(vec2 p) +{ + float ep = .09; + float gradx = noise(vec2(p.x+ep,p.y))-noise(vec2(p.x-ep,p.y)); + float grady = noise(vec2(p.x,p.y+ep))-noise(vec2(p.x,p.y-ep)); + return vec2(gradx,grady); +} + +vec3 +lava (in vec2 p) +{ + float z=2.; + float rz = 0.; + vec2 bp = p; + float time = global_ubo.time * 0.06; + for (float i= 1.;i < 7.;i++ ) + { + //primary flow speed + p += time * .6; + + //secondary flow speed (speed of the perceived flow) + bp += time * 1.9; + + //displacement field (try changing time multiplier) + vec2 gr = gradn(i*p*.34+ time); + + //rotation of the displacement field + gr*=makem2(time * 6.-(0.05*p.x+0.03*p.y)*40.); + + //displace the system + p += gr*.5; + + //add noise octave + rz+= (sin(noise(p)*7.)*0.5+0.5)/z; + + //blend factor (blending displaced system with base system) + //you could call this advection factor (.5 being low, .95 being high) + p = mix(bp,p,.77); + + //intensity scaling + z *= 1.4; + //octave scaling + p *= 2.; + bp *= 1.9; + } + return pow(vec3(.2,0.07,0.01) / rz, vec3(1.4 * 2.4)) * 5.0; +} diff --git a/src/refresh/vkpt/stb.c b/src/refresh/vkpt/stb.c new file mode 100644 index 0000000..c554d08 --- /dev/null +++ b/src/refresh/vkpt/stb.c @@ -0,0 +1,24 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#define STB_IMAGE_IMPLEMENTATION +#include "include/stb_image.h" +#define STB_IMAGE_RESIZE_IMPLEMENTATION +#include "include/stb_image_resize.h" +#define STB_IMAGE_WRITE_IMPLEMENTATION +#include "include/stb_image_write.h" diff --git a/src/refresh/vkpt/textures.c b/src/refresh/vkpt/textures.c new file mode 100644 index 0000000..8aa7825 --- /dev/null +++ b/src/refresh/vkpt/textures.c @@ -0,0 +1,1166 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "vkpt.h" +#include "vk_util.h" + +#include + +#include "include/stb_image.h" +#include "include/stb_image_resize.h" +#include "include/stb_image_write.h" + +static VkSampler tex_sampler, tex_sampler_nearest; // todo: rename to make consistent + +static VkImage tex_images [MAX_RIMAGES]; // todo: rename to make consistent +static VkImageView tex_image_views[MAX_RIMAGES]; // todo: rename to make consistent +static VkDeviceMemory img_memory, mem_blue_noise, mem_envmap; // todo: rename to make consistent +static VkImage img_blue_noise; +static VkImageView imv_blue_noise; +static VkImage img_envmap; +static VkImageView imv_envmap; +static VkDescriptorPool desc_pool_textures; + +static VkDeviceMemory mem_images; + + +static int image_loading_dirty_flag = 0; + +static void +load_material(int material_idx, const image_t *image) +{ +} + +VkResult +vkpt_textures_upload_envmap(int w, int h, byte *data) +{ + vkDeviceWaitIdle(qvk.device); + if(imv_envmap != VK_NULL_HANDLE) { + vkDestroyImageView(qvk.device, imv_envmap, NULL); + imv_envmap = NULL; + } + if(img_envmap != VK_NULL_HANDLE) { + vkDestroyImage(qvk.device, img_envmap, NULL); + img_envmap = NULL; + } + + const int num_images = 6; + size_t img_size = w * h * 4; + + BufferResource_t buf_img_upload; + buffer_create(&buf_img_upload, img_size * num_images, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + byte *envmap = (byte *) buffer_map(&buf_img_upload); + memcpy(envmap, data, img_size * num_images); + buffer_unmap(&buf_img_upload); + envmap = NULL; + + VkImageCreateInfo img_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + .extent = { + .width = w, + .height = h, + .depth = 1, + }, + .imageType = VK_IMAGE_TYPE_2D, + .format = VK_FORMAT_R8G8B8A8_UNORM, + .mipLevels = 1, + .arrayLayers = num_images, + .samples = VK_SAMPLE_COUNT_1_BIT, + .tiling = VK_IMAGE_TILING_OPTIMAL, + .usage = VK_IMAGE_USAGE_STORAGE_BIT + | VK_IMAGE_USAGE_TRANSFER_DST_BIT + | VK_IMAGE_USAGE_SAMPLED_BIT, + .flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = qvk.queue_idx_graphics, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + }; + + _VK(vkCreateImage(qvk.device, &img_info, NULL, &img_envmap)); + ATTACH_LABEL_VARIABLE(img_envmap, IMAGE); + + VkMemoryRequirements mem_req; + vkGetImageMemoryRequirements(qvk.device, img_envmap, &mem_req); + assert(mem_req.size >= buf_img_upload.size); + + VkMemoryAllocateInfo mem_alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = mem_req.size, + .memoryTypeIndex = get_memory_type(mem_req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + }; + _VK(vkAllocateMemory(qvk.device, &mem_alloc_info, NULL, &mem_envmap)); + + _VK(vkBindImageMemory(qvk.device, img_envmap, mem_envmap, 0)); + + VkImageViewCreateInfo img_view_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .viewType = VK_IMAGE_VIEW_TYPE_CUBE, + .format = VK_FORMAT_R8G8B8A8_UNORM, + .image = img_envmap, + .subresourceRange = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = num_images, + }, + .components = { + VK_COMPONENT_SWIZZLE_R, + VK_COMPONENT_SWIZZLE_G, + VK_COMPONENT_SWIZZLE_B, + VK_COMPONENT_SWIZZLE_A, + }, + }; + _VK(vkCreateImageView(qvk.device, &img_view_info, NULL, &imv_envmap)); + ATTACH_LABEL_VARIABLE(imv_envmap, IMAGE_VIEW); + + VkCommandBufferAllocateInfo cmd_alloc = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = qvk.command_pool, + .commandBufferCount = 1, + }; + VkCommandBuffer cmd_buf; + vkAllocateCommandBuffers(qvk.device, &cmd_alloc, &cmd_buf); + VkCommandBufferBeginInfo cmd_begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + vkBeginCommandBuffer(cmd_buf, &cmd_begin_info); + + for(int layer = 0; layer < num_images; layer++) { + + VkImageSubresourceRange subresource_range = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = layer, + .layerCount = 1, + }; + + IMAGE_BARRIER(cmd_buf, + .image = img_envmap, + .subresourceRange = subresource_range, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL + ); + + VkBufferImageCopy cpy_info = { + .bufferOffset = img_size * layer, + .imageSubresource = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = 0, + .baseArrayLayer = layer, + .layerCount = 1, + }, + .imageOffset = { 0, 0, 0 }, + .imageExtent = { w, h, 1 } + }; + vkCmdCopyBufferToImage(cmd_buf, buf_img_upload.buffer, img_envmap, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cpy_info); + + + IMAGE_BARRIER(cmd_buf, + .image = img_envmap, + .subresourceRange = subresource_range, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + .newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + ); + } + + vkEndCommandBuffer(cmd_buf); + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &cmd_buf, + }; + vkQueueSubmit(qvk.queue_graphics, 1, &submit_info, VK_NULL_HANDLE); + + + VkDescriptorImageInfo desc_img_info = { + .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + .imageView = imv_envmap, + .sampler = tex_sampler, + }; + + VkWriteDescriptorSet s = { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = qvk.desc_set_textures, + .dstBinding = BINDING_OFFSET_ENVMAP, + .dstArrayElement = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1, + .pImageInfo = &desc_img_info, + }; + + vkUpdateDescriptorSets(qvk.device, 1, &s, 0, NULL); + + vkQueueWaitIdle(qvk.queue_graphics); + + buffer_destroy(&buf_img_upload); + + return VK_SUCCESS; +} + +static VkResult +load_blue_noise() +{ + const int num_images = NUM_BLUE_NOISE_TEX / 4; + const int res = BLUE_NOISE_RES; + size_t img_size = res * res; + size_t total_size = img_size * sizeof(uint16_t); + + BufferResource_t buf_img_upload; + buffer_create(&buf_img_upload, total_size * NUM_BLUE_NOISE_TEX, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + uint16_t *bn_tex = (uint16_t *) buffer_map(&buf_img_upload); + + for(int i = 0; i < num_images; i++) { + int w, h, n; + char buf[1024]; + + + snprintf(buf, sizeof buf, "blue_noise_textures/%d_%d/HDR_RGBA_%04d.png", res, res, i); + uint16_t *data = stbi_load_16(buf, &w, &h, &n, 4); + if(!data) { + Com_EPrintf("error loading blue noise tex %s\n", buf); + buffer_unmap(&buf_img_upload); + buffer_destroy(&buf_img_upload); + return VK_ERROR_INITIALIZATION_FAILED; + } + + /* loaded images are RGBA, want to upload as texture array though */ + for(int k = 0; k < 4; k++) { + for(int j = 0; j < img_size; j++) + bn_tex[(i * 4 + k) * img_size + j] = data[j * 4 + k]; + } + + stbi_image_free(data); + } + buffer_unmap(&buf_img_upload); + bn_tex = NULL; + + VkImageCreateInfo img_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + .extent = { + .width = BLUE_NOISE_RES, + .height = BLUE_NOISE_RES, + .depth = 1, + }, + .imageType = VK_IMAGE_TYPE_2D, + .format = VK_FORMAT_R16_UNORM, + .mipLevels = 1, + .arrayLayers = NUM_BLUE_NOISE_TEX, + .samples = VK_SAMPLE_COUNT_1_BIT, + .tiling = VK_IMAGE_TILING_OPTIMAL, + .usage = VK_IMAGE_USAGE_STORAGE_BIT + | VK_IMAGE_USAGE_TRANSFER_DST_BIT + | VK_IMAGE_USAGE_SAMPLED_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = qvk.queue_idx_graphics, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + }; + + _VK(vkCreateImage(qvk.device, &img_info, NULL, &img_blue_noise)); + ATTACH_LABEL_VARIABLE(img_blue_noise, IMAGE); + + VkMemoryRequirements mem_req; + vkGetImageMemoryRequirements(qvk.device, img_blue_noise, &mem_req); + assert(mem_req.size >= buf_img_upload.size); + + VkMemoryAllocateInfo mem_alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = mem_req.size, + .memoryTypeIndex = get_memory_type(mem_req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + }; + _VK(vkAllocateMemory(qvk.device, &mem_alloc_info, NULL, &mem_blue_noise)); + + _VK(vkBindImageMemory(qvk.device, img_blue_noise, mem_blue_noise, 0)); + + VkImageViewCreateInfo img_view_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY, + .format = VK_FORMAT_R16_UNORM, + .image = img_blue_noise, + .subresourceRange = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = NUM_BLUE_NOISE_TEX, + }, + .components = { + VK_COMPONENT_SWIZZLE_R, + VK_COMPONENT_SWIZZLE_R, + VK_COMPONENT_SWIZZLE_R, + VK_COMPONENT_SWIZZLE_R, + }, + }; + _VK(vkCreateImageView(qvk.device, &img_view_info, NULL, &imv_blue_noise)); + ATTACH_LABEL_VARIABLE(imv_blue_noise, IMAGE_VIEW); + + VkCommandBufferAllocateInfo cmd_alloc = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = qvk.command_pool, + .commandBufferCount = 1, + }; + VkCommandBuffer cmd_buf; + vkAllocateCommandBuffers(qvk.device, &cmd_alloc, &cmd_buf); + VkCommandBufferBeginInfo cmd_begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + vkBeginCommandBuffer(cmd_buf, &cmd_begin_info); + + for(int layer = 0; layer < NUM_BLUE_NOISE_TEX; layer++) { + + VkImageSubresourceRange subresource_range = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = layer, + .layerCount = 1, + }; + + IMAGE_BARRIER(cmd_buf, + .image = img_blue_noise, + .subresourceRange = subresource_range, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL + ); + + VkBufferImageCopy cpy_info = { + .bufferOffset = total_size * layer, + .imageSubresource = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = 0, + .baseArrayLayer = layer, + .layerCount = 1, + }, + .imageOffset = { 0, 0, 0 }, + .imageExtent = { BLUE_NOISE_RES, BLUE_NOISE_RES, 1 } + }; + vkCmdCopyBufferToImage(cmd_buf, buf_img_upload.buffer, img_blue_noise, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cpy_info); + + + IMAGE_BARRIER(cmd_buf, + .image = img_blue_noise, + .subresourceRange = subresource_range, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + .newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + ); + } + + vkEndCommandBuffer(cmd_buf); + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &cmd_buf, + }; + vkQueueSubmit(qvk.queue_graphics, 1, &submit_info, VK_NULL_HANDLE); + + + VkDescriptorImageInfo desc_img_info = { + .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + .imageView = imv_blue_noise, + .sampler = tex_sampler_nearest, + }; + + VkWriteDescriptorSet s = { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = qvk.desc_set_textures, + .dstBinding = BINDING_OFFSET_BLUE_NOISE, + .dstArrayElement = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1, + .pImageInfo = &desc_img_info, + }; + + vkUpdateDescriptorSets(qvk.device, 1, &s, 0, NULL); + + vkQueueWaitIdle(qvk.queue_graphics); + + buffer_destroy(&buf_img_upload); + + return VK_SUCCESS; +} + +static int +get_num_miplevels(int w, int h) +{ + return 1 + log2(MAX(w, h)); +} + + +/* +================ +IMG_Load +================ +*/ + +/* we match on radiance in the bsp, unfortunately this + does not catch all lights :( we therefore have this + explicit lists of light textures here */ +static const char *light_texture_names[] = { + "textures/e1u1/baselt_3.tga", + "textures/e1u1/baslt3_1.tga", + "textures/e1u1/baselt_2.tga", + "textures/e1u1/baselt_1.tga", + "textures/e1u1/baselt_b.tga", + "textures/e1u1/baselt_c.tga", + "textures/e1u1/baselt_a.tga", + "textures/e1u1/ceil1_1.tga", + "textures/e1u1/ceil1_2.tga", + "textures/e1u1/ceil1_3.tga", + "textures/e1u1/ceil1_4.tga", + "textures/e1u1/ceil1_5.tga", + "textures/e1u1/ceil1_6.tga", + "textures/e1u1/ceil1_7.tga", + "textures/e1u1/ceil1_8.tga", + "textures/e1u1/jaildr2_3.tga", + "textures/e1u1/plite1_1.tga", + "textures/e1u2/ceil1_1.tga", + "textures/e1u2/ceil1_3.tga", + "textures/e1u2/ceil1_8.tga", + "textures/e1u2/fuse1_1.tga", + "textures/e1u2/fuse1_2.tga", + "textures/e1u2/support1_8.tga", + "textures/e1u2/wslt1_5.tga", + "textures/e2u3/ceil1_13.tga", + "textures/e2u3/ceil1_14.tga", + "textures/e2u3/ceil1_4.tga", + "textures/e2u3/wstlt1_5.tga", + "textures/e2u3/wstlt1_8.tga", + "textures/e1u2/brlava.tga", + "textures/e3u1/brlava.tga", + "textures/e1u1/brlava.tga", + "textures/e1u2/brlava.tga", + "textures/e1u3/lava.tga", + "textures/e2u1/brlava.tga", + "textures/e2u1/lava.tga", + "textures/e2u2/brlava.tga", + "textures/e2u2/lava.tga", + "textures/e2u3/brlava.tga", + "textures/e2u3/tlava1_3.tga", + "textures/e3u1/brlava.tga", +}; + +void +IMG_Load(image_t *image, byte *pic) +{ + //Com_Printf("%s %s %s\n", __func__, image->flags & IF_PERMANENT ? "(permanent) " : "", image->name); + int w = image->upload_width; + int h = image->upload_height; + + image->is_light = 0; + for(int i = 0; i < LENGTH(light_texture_names); i++) { + if(!strncmp(image->name, light_texture_names[i], strlen(light_texture_names[i]) - 4)) { + image->is_light = 1; + break; + } + } + + //int num_mip_levels = log2(MAX(w, h)); + image->pix_data = Z_Malloc(w * h * 4 * 2); + int w_mip = w, h_mip = h; + byte *mip_off = image->pix_data; + memcpy(mip_off, pic, w_mip * h_mip * 4); + int level = 0; + while(w_mip > 1 || h_mip > 1) { +#if 0 + char buf[1024]; + snprintf(buf, sizeof buf, "/tmp/%s_%02d.png", image->name, level++); + for(int i = 5; buf[i]; i++) { + if(buf[i] == '/') + buf[i] = '_'; + } + + stbi_write_png(buf, w_mip, h_mip, 4, mip_off, w_mip * 4); +#endif + + int w_mip_next = w_mip >> (w_mip > 1); + int h_mip_next = h_mip >> (h_mip > 1); + stbir_resize_uint8_generic( + //image->pix_data, w, h, w * 4, + mip_off, w_mip, h_mip, w_mip * 4, + mip_off + w_mip * h_mip * 4, w_mip_next, h_mip_next, w_mip_next * 4, + 4, 3, 0, STBIR_EDGE_WRAP, STBIR_FILTER_TRIANGLE, STBIR_COLORSPACE_SRGB, NULL); + //4, 3, 0, STBIR_EDGE_WRAP, STBIR_FILTER_MITCHELL, STBIR_COLORSPACE_SRGB, NULL); + + mip_off += w_mip * h_mip * 4; + w_mip = w_mip_next; + h_mip = h_mip_next; + } + + + //image->pix_data = pic; + + float r = (float) pic[((h / 2) * w + w/ 2) * 4 + 0]; + float g = (float) pic[((h / 2) * w + w/ 2) * 4 + 1]; + float b = (float) pic[((h / 2) * w + w/ 2) * 4 + 2]; + + r = powf(r / 255.0f, 2.2f); + g = powf(g / 255.0f, 2.2f); + b = powf(b / 255.0f, 2.2f); + + float m = r > g ? r : g; + m = g > b ? g : b; + + m = 1.0; + + r = (r / m) * 255.0f; + g = (g / m) * 255.0f; + b = (b / m) * 255.0f; + + r = r > 255.0 ? 255.0 : r; + g = g > 255.0 ? 255.0 : g; + b = b > 255.0 ? 255.0 : b; + + image->light_color = 0; + image->light_color |= ((uint32_t) r) << 0; + image->light_color |= ((uint32_t) g) << 8; + image->light_color |= ((uint32_t) b) << 16; + + image->material_idx = (int) (image - r_images); + if(image->material_idx >= 0) { + load_material(image->material_idx, image); + } + + image_loading_dirty_flag = 1; + Z_Free(pic); +} + +void +IMG_Unload(image_t *image) +{ + if(image->pix_data) + Z_Free(image->pix_data); + image->pix_data = NULL; +} + +VkResult +vkpt_textures_initialize() +{ + VkSamplerCreateInfo sampler_info = { + .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, + .magFilter = VK_FILTER_LINEAR, + .minFilter = VK_FILTER_LINEAR, + .addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .anisotropyEnable = VK_FALSE, + .maxAnisotropy = 16, + .borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK, + .unnormalizedCoordinates = VK_FALSE, + .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR, + .minLod = 0.0f, + .maxLod = 128.0f, + }; + _VK(vkCreateSampler(qvk.device, &sampler_info, NULL, &tex_sampler)); + ATTACH_LABEL_VARIABLE(tex_sampler, SAMPLER); + VkSamplerCreateInfo sampler_nearest_info = { + .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, + .magFilter = VK_FILTER_NEAREST, + .minFilter = VK_FILTER_NEAREST, + .addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .anisotropyEnable = VK_FALSE, + .maxAnisotropy = 16, + .borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK, + .unnormalizedCoordinates = VK_FALSE, + .mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST, + }; + _VK(vkCreateSampler(qvk.device, &sampler_nearest_info, NULL, &tex_sampler_nearest)); + ATTACH_LABEL_VARIABLE(tex_sampler_nearest, SAMPLER); + + VkDescriptorSetLayoutBinding layout_bindings[] = { + { + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = MAX_RIMAGES, + .binding = 0, + .stageFlags = VK_SHADER_STAGE_ALL, + }, +#define IMG_DO(_name, _binding, ...) \ + { \ + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, \ + .descriptorCount = 1, \ + .binding = BINDING_OFFSET_IMAGES + _binding, \ + .stageFlags = VK_SHADER_STAGE_ALL, \ + }, + LIST_IMAGES +#undef IMG_DO +#define IMG_DO(_name, _binding, ...) \ + { \ + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, \ + .descriptorCount = 1, \ + .binding = BINDING_OFFSET_TEXTURES + _binding, \ + .stageFlags = VK_SHADER_STAGE_ALL, \ + }, + LIST_IMAGES +#undef IMG_DO + { + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1, + .binding = BINDING_OFFSET_BLUE_NOISE, + .stageFlags = VK_SHADER_STAGE_ALL, + }, + { + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1, + .binding = BINDING_OFFSET_ENVMAP, + .stageFlags = VK_SHADER_STAGE_ALL, + }, + }; + + VkDescriptorSetLayoutCreateInfo layout_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = LENGTH(layout_bindings), + .pBindings = layout_bindings, + }; + + _VK(vkCreateDescriptorSetLayout(qvk.device, &layout_info, NULL, &qvk.desc_set_layout_textures)); + ATTACH_LABEL_VARIABLE(qvk.desc_set_layout_textures, DESCRIPTOR_SET_LAYOUT); + VkDescriptorPoolSize pool_size = { + .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = MAX_RIMAGES + 2 * NUM_VKPT_IMAGES + 128, + }; + + VkDescriptorPoolCreateInfo pool_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .poolSizeCount = 1, + .pPoolSizes = &pool_size, + .maxSets = 1, + }; + + _VK(vkCreateDescriptorPool(qvk.device, &pool_info, NULL, &desc_pool_textures)); + ATTACH_LABEL_VARIABLE(desc_pool_textures, DESCRIPTOR_POOL); + + VkDescriptorSetAllocateInfo descriptor_set_alloc_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = desc_pool_textures, + .descriptorSetCount = 1, + .pSetLayouts = &qvk.desc_set_layout_textures, + }; + + _VK(vkAllocateDescriptorSets(qvk.device, &descriptor_set_alloc_info, &qvk.desc_set_textures)); + + if(load_blue_noise() != VK_SUCCESS) + return VK_ERROR_INITIALIZATION_FAILED; + + LOG_FUNC(); + return VK_SUCCESS; +} + +static void +destroy_tex_images() +{ + for(int i = 0; i < MAX_RIMAGES; i++) { + if(tex_image_views[i]) { + vkDestroyImageView(qvk.device, tex_image_views[i], NULL); + tex_image_views[i] = VK_NULL_HANDLE; + } + if(tex_images[i]) { + vkDestroyImage(qvk.device, tex_images[i], NULL); + tex_images[i] = VK_NULL_HANDLE; + } + } + if(img_memory) { + vkFreeMemory(qvk.device, img_memory, NULL); + img_memory = VK_NULL_HANDLE; + } +} + +VkResult +vkpt_textures_destroy() +{ + destroy_tex_images(); + vkDestroyDescriptorSetLayout(qvk.device, qvk.desc_set_layout_textures, NULL); + vkDestroyDescriptorPool(qvk.device, desc_pool_textures, NULL); + + vkFreeMemory (qvk.device, mem_blue_noise, NULL); + vkDestroyImage (qvk.device, img_blue_noise, NULL); + vkDestroyImageView(qvk.device, imv_blue_noise, NULL); + vkDestroySampler (qvk.device, tex_sampler, NULL); + vkDestroySampler (qvk.device, tex_sampler_nearest, NULL); + + if(imv_envmap != VK_NULL_HANDLE) { + vkDestroyImageView(qvk.device, imv_envmap, NULL); + imv_envmap = NULL; + } + if(img_envmap != VK_NULL_HANDLE) { + vkDestroyImage(qvk.device, img_envmap, NULL); + img_envmap = NULL; + } + LOG_FUNC(); + return VK_SUCCESS; +} + +VkResult +vkpt_textures_end_registration() +{ + if(!image_loading_dirty_flag) + return VK_SUCCESS; + image_loading_dirty_flag = 0; + vkDeviceWaitIdle(qvk.device); + destroy_tex_images(); + + VkImageCreateInfo img_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + .extent = { + .width = 1337, + .height = 1337, + .depth = 1 + }, + .imageType = VK_IMAGE_TYPE_2D, + .format = VK_FORMAT_R8G8B8A8_SRGB, + .mipLevels = 1, + .arrayLayers = 1, + .samples = VK_SAMPLE_COUNT_1_BIT, + .tiling = VK_IMAGE_TILING_OPTIMAL, + .usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT + | VK_IMAGE_USAGE_SAMPLED_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = qvk.queue_idx_graphics, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + }; + + VkImageViewCreateInfo img_view_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = VK_FORMAT_R8G8B8A8_SRGB, + .subresourceRange = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + }, + .components = { + VK_COMPONENT_SWIZZLE_R, + VK_COMPONENT_SWIZZLE_G, + VK_COMPONENT_SWIZZLE_B, + VK_COMPONENT_SWIZZLE_A + }, + }; + + uint32_t pix_invalid = 0xffff00ff; + image_t q_img_invalid = { + .width = 1, + .height = 1, + .upload_width = 1, + .upload_height = 1, + .pix_data = (byte *) &pix_invalid + }; + + size_t total_size = 0; + uint32_t memory_type_bits = ~0; + for(int i = 0; i < MAX_RIMAGES; i++) { + image_t *q_img = r_images + i; + if(q_img->registration_sequence) { + img_info.extent.width = q_img->upload_width; + img_info.extent.height = q_img->upload_height; + } + else { + q_img = &q_img_invalid; + } + img_info.mipLevels = get_num_miplevels(q_img->upload_width, q_img->upload_height); + + _VK(vkCreateImage(qvk.device, &img_info, NULL, tex_images + i)); + ATTACH_LABEL_VARIABLE(tex_images[i], IMAGE); + + VkMemoryRequirements mem_req; + vkGetImageMemoryRequirements(qvk.device, tex_images[i], &mem_req); + + assert(!(mem_req.alignment & (mem_req.alignment - 1))); + total_size += mem_req.alignment - 1; + total_size &= ~(mem_req.alignment - 1); + total_size += mem_req.size; + + if(memory_type_bits != ~0 && mem_req.memoryTypeBits != memory_type_bits) { + Com_EPrintf("memory type bits don't match!\n"); + } + memory_type_bits = mem_req.memoryTypeBits; + } + + Com_Printf("allocating %.02f MB of image memory for textures\n", (double) total_size / (1024.0 * 1024.0)); + VkMemoryAllocateInfo mem_alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = total_size, + .memoryTypeIndex = get_memory_type(memory_type_bits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + }; + _VK(vkAllocateMemory(qvk.device, &mem_alloc_info, NULL, &img_memory)); + + BufferResource_t buf_img_upload; + buffer_create(&buf_img_upload, total_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + VkCommandBufferAllocateInfo cmd_alloc = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = qvk.command_pool, + .commandBufferCount = 1, + }; + + VkCommandBuffer cmd_buf; + vkAllocateCommandBuffers(qvk.device, &cmd_alloc, &cmd_buf); + VkCommandBufferBeginInfo cmd_begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + vkBeginCommandBuffer(cmd_buf, &cmd_begin_info); + + char *staging_buffer = buffer_map(&buf_img_upload); + + size_t offset = 0; + for(int i = 0; i < MAX_RIMAGES; i++) { + image_t *q_img = r_images + i; + if(!q_img->registration_sequence) + q_img = &q_img_invalid; + + int num_mip_levels = get_num_miplevels(q_img->upload_width, q_img->upload_height); + + VkMemoryRequirements mem_req; + vkGetImageMemoryRequirements(qvk.device, tex_images[i], &mem_req); + + assert(!(mem_req.alignment & (mem_req.alignment - 1))); + offset += mem_req.alignment - 1; + offset &= ~(mem_req.alignment - 1); + + uint32_t wd = q_img->upload_width; + uint32_t ht = q_img->upload_height; + + + _VK(vkBindImageMemory(qvk.device, tex_images[i], img_memory, offset)); + + VkImageSubresourceRange subresource_range = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = num_mip_levels, + .baseArrayLayer = 0, + .layerCount = 1 + }; + + IMAGE_BARRIER(cmd_buf, + .image = tex_images[i], + .subresourceRange = subresource_range, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL + ); + + size_t mip_offset = 0; + for(int mip = 0; mip < num_mip_levels; mip++) { + memcpy(staging_buffer + offset + mip_offset, q_img->pix_data + mip_offset, wd * ht * 4); + + VkBufferImageCopy cpy_info = { + .bufferOffset = offset + mip_offset, + .imageSubresource = { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = mip, + .baseArrayLayer = 0, + .layerCount = 1, + }, + .imageOffset = { 0, 0, 0 }, + .imageExtent = { wd, ht, 1 } + }; + + vkCmdCopyBufferToImage(cmd_buf, buf_img_upload.buffer, tex_images[i], + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cpy_info); + + mip_offset += wd * ht * 4; + wd >>= (wd > 1); + ht >>= (ht > 1); + } + + IMAGE_BARRIER(cmd_buf, + .image = tex_images[i], + .subresourceRange = subresource_range, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + .newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + ); + + img_view_info.image = tex_images[i]; + img_view_info.subresourceRange.levelCount = num_mip_levels; + _VK(vkCreateImageView(qvk.device, &img_view_info, NULL, tex_image_views + i)); + ATTACH_LABEL_VARIABLE(tex_image_views[i], IMAGE_VIEW); + + offset += mem_req.size; + } + + buffer_unmap(&buf_img_upload); + staging_buffer = NULL; + + vkEndCommandBuffer(cmd_buf); + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &cmd_buf, + }; + + vkQueueSubmit(qvk.queue_graphics, 1, &submit_info, VK_NULL_HANDLE); + + for(int i = 0; i < MAX_RIMAGES; i++) { + image_t *q_img = r_images + i; + if(!q_img->registration_sequence) + q_img = &q_img_invalid; + + VkDescriptorImageInfo img_info = { + .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + .imageView = tex_image_views[i], + .sampler = (!strcmp(q_img->name, "pics/conchars.pcx") || !strcmp(q_img->name, "pics/ch1.pcx")) + ? tex_sampler_nearest : tex_sampler, + }; + + VkWriteDescriptorSet s = { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = qvk.desc_set_textures, + .dstBinding = GLOBAL_TEXTURES_TEX_ARR_BINDING_IDX, + .dstArrayElement = i, + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1, + .pImageInfo = &img_info, + }; + + vkUpdateDescriptorSets(qvk.device, 1, &s, 0, NULL); + } + + vkQueueWaitIdle(qvk.queue_graphics); + buffer_destroy(&buf_img_upload); + + return VK_SUCCESS; +} + +VkResult +vkpt_create_images() +{ + VkImageCreateInfo images_create_info[NUM_VKPT_IMAGES] = { +#define IMG_DO(_name, _binding, _vkformat, _glslformat, _w, _h) \ + [VKPT_IMG_##_name] = { \ + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, \ + .imageType = VK_IMAGE_TYPE_2D, \ + .format = VK_FORMAT_##_vkformat, \ + .extent = { \ + .width = _w, \ + .height = _h, \ + .depth = 1 \ + }, \ + .mipLevels = 1, \ + .arrayLayers = 1, \ + .samples = VK_SAMPLE_COUNT_1_BIT, \ + .tiling = VK_IMAGE_TILING_OPTIMAL, \ + .usage = VK_IMAGE_USAGE_STORAGE_BIT \ + | VK_IMAGE_USAGE_TRANSFER_SRC_BIT \ + | VK_IMAGE_USAGE_TRANSFER_DST_BIT \ + | VK_IMAGE_USAGE_SAMPLED_BIT \ + | VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT, \ + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, \ + .queueFamilyIndexCount = qvk.queue_idx_graphics, \ + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, \ + }, +LIST_IMAGES +#undef IMG_DO + }; + + size_t total_size = 0; + + + /* create images and compute required memory to make a single allocation */ + uint32_t memory_type_bits; + for(int i = 0; i < NUM_VKPT_IMAGES; i++) { + _VK(vkCreateImage(qvk.device, images_create_info + i, NULL, qvk.images + i)); + ATTACH_LABEL_VARIABLE(qvk.images[i], IMAGE); + + VkMemoryRequirements mem_req; + vkGetImageMemoryRequirements(qvk.device, qvk.images[i], &mem_req); + + assert(!(mem_req.alignment & (mem_req.alignment - 1))); + total_size += mem_req.alignment - 1; + total_size &= ~(mem_req.alignment - 1); + total_size += mem_req.size; + + if(i && mem_req.memoryTypeBits != memory_type_bits) { + Com_EPrintf("memory type bits don't match!\n"); + } + memory_type_bits = mem_req.memoryTypeBits; + } + + /* attach labels to images */ +#define IMG_DO(_name, _binding, ...) \ + ATTACH_LABEL_VARIABLE_NAME(qvk.images[VKPT_IMG_##_name], IMAGE, #_name); + LIST_IMAGES +#undef IMG_DO + /* attach labels to images */ +#define IMG_DO(_name, _binding, ...) \ + ATTACH_LABEL_VARIABLE_NAME(qvk.images[VKPT_IMG_##_name], IMAGE, #_name); + LIST_IMAGES +#undef IMG_DO + + Com_Printf("allocating %.02f MB of image memory\n", (double) total_size / (1024.0 * 1024.0)); + + VkMemoryAllocateInfo mem_alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = total_size, + .memoryTypeIndex = get_memory_type(memory_type_bits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + }; + + _VK(vkAllocateMemory(qvk.device, &mem_alloc_info, NULL, &mem_images)); + ATTACH_LABEL_VARIABLE(mem_images, DEVICE_MEMORY); + + /* bind memory to images */ + size_t offset = 0; + for(int i = 0; i < NUM_VKPT_IMAGES; i++) { + VkMemoryRequirements mem_req; + vkGetImageMemoryRequirements(qvk.device, qvk.images[i], &mem_req); + + assert(!(mem_req.alignment & (mem_req.alignment - 1))); + offset += mem_req.alignment - 1; + offset &= ~(mem_req.alignment - 1); + + _VK(vkBindImageMemory(qvk.device, qvk.images[i], mem_images, offset)); + + offset += mem_req.size; + } + +#define IMG_DO(_name, _binding, _vkformat, _glslformat, _w, _h) \ + [VKPT_IMG_##_name] = { \ + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, \ + .viewType = VK_IMAGE_VIEW_TYPE_2D, \ + .format = VK_FORMAT_##_vkformat, \ + .image = qvk.images[VKPT_IMG_##_name], \ + .subresourceRange = { \ + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, \ + .baseMipLevel = 0, \ + .levelCount = 1, \ + .baseArrayLayer = 0, \ + .layerCount = 1 \ + }, \ + .components = { \ + VK_COMPONENT_SWIZZLE_R, \ + VK_COMPONENT_SWIZZLE_G, \ + VK_COMPONENT_SWIZZLE_B, \ + VK_COMPONENT_SWIZZLE_A \ + }, \ + }, + + VkImageViewCreateInfo images_view_create_info[NUM_VKPT_IMAGES] = { + LIST_IMAGES + }; +#undef IMG_DO + + + for(int i = 0; i < NUM_VKPT_IMAGES; i++) { + _VK(vkCreateImageView(qvk.device, images_view_create_info + i, NULL, qvk.images_views + i)); + } + + /* attach labels to image views */ +#define IMG_DO(_name, ...) \ + ATTACH_LABEL_VARIABLE_NAME(qvk.images_views[VKPT_IMG_##_name], IMAGE_VIEW, #_name); + LIST_IMAGES +#undef IMG_DO + +#define IMG_DO(_name, ...) \ + [VKPT_IMG_##_name] = { \ + .sampler = VK_NULL_HANDLE, \ + .imageView = qvk.images_views[VKPT_IMG_##_name], \ + .imageLayout = VK_IMAGE_LAYOUT_GENERAL \ + }, + VkDescriptorImageInfo desc_output_img_info[] = { + LIST_IMAGES + }; +#undef IMG_DO + + VkDescriptorImageInfo img_info[] = { +#define IMG_DO(_name, ...) \ + [VKPT_IMG_##_name] = { \ + .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, \ + .imageView = qvk.images_views[VKPT_IMG_##_name], \ + .sampler = tex_sampler, \ + }, + + LIST_IMAGES + }; +#undef IMG_DO + + /* create information to update descriptor sets */ + VkWriteDescriptorSet output_img_write[] = { +#define IMG_DO(_name, _binding, ...) \ + [VKPT_IMG_##_name] = { \ + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, \ + .dstSet = qvk.desc_set_textures, \ + .dstBinding = BINDING_OFFSET_IMAGES + _binding, \ + .dstArrayElement = 0, \ + .descriptorCount = 1, \ + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, \ + .pImageInfo = desc_output_img_info + VKPT_IMG_##_name, \ + }, + LIST_IMAGES +#undef IMG_DO +#define IMG_DO(_name, _binding, ...) \ + [VKPT_IMG_##_name + NUM_VKPT_IMAGES] = { \ + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, \ + .dstSet = qvk.desc_set_textures, \ + .dstBinding = BINDING_OFFSET_TEXTURES + _binding, \ + .dstArrayElement = 0, \ + .descriptorCount = 1, \ + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, \ + .pImageInfo = img_info + VKPT_IMG_##_name, \ + }, + LIST_IMAGES +#undef IMG_DO + }; + + vkUpdateDescriptorSets(qvk.device, LENGTH(output_img_write), output_img_write, 0, NULL); + + return VK_SUCCESS; +} + +VkResult +vkpt_destroy_images() +{ + for(int i = 0; i < NUM_VKPT_IMAGES; i++) { + vkDestroyImageView(qvk.device, qvk.images_views[i], NULL); + vkDestroyImage (qvk.device, qvk.images[i], NULL); + + qvk.images_views[i] = VK_NULL_HANDLE; + qvk.images[i] = VK_NULL_HANDLE; + } + vkFreeMemory(qvk.device, mem_images, NULL); + mem_images = VK_NULL_HANDLE; + + return VK_SUCCESS; +} + + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/uniform_buffer.c b/src/refresh/vkpt/uniform_buffer.c new file mode 100644 index 0000000..a59e5c1 --- /dev/null +++ b/src/refresh/vkpt/uniform_buffer.c @@ -0,0 +1,131 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "vkpt.h" + +#include + +static BufferResource_t uniform_buffers[MAX_SWAPCHAIN_IMAGES]; +static VkDescriptorPool desc_pool_ubo; + +VkResult +vkpt_uniform_buffer_create() +{ + VkDescriptorSetLayoutBinding ubo_layout_binding = { + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = 1, + .binding = GLOBAL_UBO_BINDING_IDX, + .stageFlags = VK_SHADER_STAGE_ALL, + }; + + VkDescriptorSetLayoutCreateInfo layout_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = 1, + .pBindings = &ubo_layout_binding, + }; + + _VK(vkCreateDescriptorSetLayout(qvk.device, &layout_info, NULL, &qvk.desc_set_layout_ubo)); + + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + buffer_create(uniform_buffers + i, sizeof(QVKUniformBuffer_t), + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + } + + VkDescriptorPoolSize pool_size = { + .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = qvk.num_swap_chain_images, + }; + + VkDescriptorPoolCreateInfo pool_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .poolSizeCount = 1, + .pPoolSizes = &pool_size, + .maxSets = qvk.num_swap_chain_images, + }; + + _VK(vkCreateDescriptorPool(qvk.device, &pool_info, NULL, &desc_pool_ubo)); + + VkDescriptorSetAllocateInfo descriptor_set_alloc_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = desc_pool_ubo, + .descriptorSetCount = 1, + .pSetLayouts = &qvk.desc_set_layout_ubo, + }; + + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + _VK(vkAllocateDescriptorSets(qvk.device, &descriptor_set_alloc_info, qvk.desc_set_ubo + i)); + BufferResource_t *ubo = uniform_buffers + i; + + VkDescriptorBufferInfo buf_info = { + .buffer = ubo->buffer, + .offset = 0, + .range = sizeof(QVKUniformBuffer_t), + }; + + VkWriteDescriptorSet output_buf_write = { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = qvk.desc_set_ubo[i], + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = 1, + .pBufferInfo = &buf_info, + }; + + vkUpdateDescriptorSets(qvk.device, 1, &output_buf_write, 0, NULL); + } + + return VK_SUCCESS; +} + +VkResult +vkpt_uniform_buffer_destroy() +{ + vkDestroyDescriptorPool(qvk.device, desc_pool_ubo, NULL); + vkDestroyDescriptorSetLayout(qvk.device, qvk.desc_set_layout_ubo, NULL); + desc_pool_ubo = VK_NULL_HANDLE; + qvk.desc_set_layout_ubo = VK_NULL_HANDLE; + + for(int i = 0; i < qvk.num_swap_chain_images; i++) { + buffer_destroy(uniform_buffers + i); + } + + return VK_SUCCESS; +} + +VkResult +vkpt_uniform_buffer_update() +{ + BufferResource_t *ubo = uniform_buffers + qvk.current_image_index; + assert(ubo); + assert(ubo->memory != VK_NULL_HANDLE); + assert(ubo->buffer != VK_NULL_HANDLE); + assert(qvk.current_image_index < qvk.num_swap_chain_images); + + QVKUniformBuffer_t *mapped_ubo = buffer_map(ubo); + assert(mapped_ubo); + memcpy(mapped_ubo, &vkpt_refdef.uniform_buffer, sizeof(QVKUniformBuffer_t)); + buffer_unmap(ubo); + mapped_ubo = NULL; + + return VK_SUCCESS; +} + + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/vertex_buffer.c b/src/refresh/vkpt/vertex_buffer.c new file mode 100644 index 0000000..227813e --- /dev/null +++ b/src/refresh/vkpt/vertex_buffer.c @@ -0,0 +1,336 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "vkpt.h" + +#include "shader/vertex_buffer.h" + +#include +#include + +static VkDescriptorPool desc_pool_vertex_buffer; +static VkPipeline pipeline_instance_geometry; +static VkPipelineLayout pipeline_layout_instance_geometry; + +VkResult +vkpt_vertex_buffer_upload_staging() +{ + vkDeviceWaitIdle(qvk.device); + assert(!qvk.buf_vertex_staging.is_mapped); + VkCommandBufferAllocateInfo cmd_alloc = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = qvk.command_pool, + .commandBufferCount = 1, + }; + + VkCommandBuffer cmd_buf; + vkAllocateCommandBuffers(qvk.device, &cmd_alloc, &cmd_buf); + VkCommandBufferBeginInfo cmd_begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + vkBeginCommandBuffer(cmd_buf, &cmd_begin_info); + + VkBufferCopy copyRegion = { + .size = sizeof(VertexBuffer), + }; + vkCmdCopyBuffer(cmd_buf, qvk.buf_vertex_staging.buffer, qvk.buf_vertex.buffer, 1, ©Region); + + vkEndCommandBuffer(cmd_buf); + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &cmd_buf, + }; + + vkQueueSubmit(qvk.queue_graphics, 1, &submit_info, VK_NULL_HANDLE); + + vkQueueWaitIdle(qvk.queue_graphics); + + return VK_SUCCESS; +} + +VkResult +vkpt_vertex_buffer_upload_bsp_mesh_to_staging(bsp_mesh_t *bsp_mesh) +{ + assert(bsp_mesh); + VertexBuffer *vbo = (VertexBuffer *) buffer_map(&qvk.buf_vertex_staging); + assert(vbo); + + assert(bsp_mesh->num_vertices < MAX_VERT_BSP); + + memcpy(vbo->positions_bsp, bsp_mesh->positions, bsp_mesh->num_vertices * sizeof(float) * 3 ); + memcpy(vbo->tex_coords_bsp, bsp_mesh->tex_coords,bsp_mesh->num_vertices * sizeof(float) * 2 ); + memcpy(vbo->materials_bsp, bsp_mesh->materials, bsp_mesh->num_vertices * sizeof(uint32_t) / 3); + memcpy(vbo->clusters_bsp, bsp_mesh->clusters, bsp_mesh->num_vertices * sizeof(uint32_t) / 3); + + assert(bsp_mesh->num_clusters + 1 < MAX_LIGHT_LISTS); + assert(bsp_mesh->num_cluster_lights < MAX_LIGHT_LIST_NODES); + + memcpy(vbo->light_list_offsets, bsp_mesh->cluster_light_offsets, (bsp_mesh->num_clusters + 1) * sizeof(uint32_t)); + memcpy(vbo->light_list_lights, bsp_mesh->cluster_lights, bsp_mesh->num_cluster_lights * sizeof(uint32_t)); + + buffer_unmap(&qvk.buf_vertex_staging); + vbo = NULL; + + return VK_SUCCESS; +} + +VkResult +vkpt_vertex_buffer_upload_models_to_staging() +{ + VertexBuffer *vbo = (VertexBuffer *) buffer_map(&qvk.buf_vertex_staging); + assert(vbo); + + int idx_offset = 0; + int vertex_offset = 0; + for(int i = 0; i < MAX_MODELS; i++) { + if(!r_models[i].meshes) { + continue; + } + maliasmesh_t *m = r_models[i].meshes; + + m->idx_offset = idx_offset; + m->vertex_offset = vertex_offset; + + assert(r_models[i].numframes > 0); + + int num_verts = r_models[i].numframes * m->numverts; + assert(num_verts > 0); +#if 0 + for(int j = 0; j < num_verts; j++) + Com_Printf("%f %f %f\n", + m->positions[j][0], + m->positions[j][1], + m->positions[j][2]); + + for(int j = 0; j < m->numtris; j++) + Com_Printf("%d %d %d\n", + m->indices[j * 3 + 0], + m->indices[j * 3 + 1], + m->indices[j * 3 + 2]); +#endif + +#if 0 + char buf[1024]; + snprintf(buf, sizeof buf, "model_%04d.obj", i); + FILE *f = fopen(buf, "wb+"); + assert(f); + for(int j = 0; j < m->numverts; j++) { + fprintf(f, "v %f %f %f\n", + m->positions[j][0], + m->positions[j][1], + m->positions[j][2]); + } + for(int j = 0; j < m->numindices / 3; j++) { + fprintf(f, "f %d %d %d\n", + m->indices[j * 3 + 0] + 1, + m->indices[j * 3 + 1] + 1, + m->indices[j * 3 + 2] + 1); + } + fclose(f); +#endif + + memcpy(vbo->positions_model + vertex_offset * 3, m->positions, sizeof(float) * 3 * num_verts); + memcpy(vbo->normals_model + vertex_offset * 3, m->normals, sizeof(float) * 3 * num_verts); + memcpy(vbo->tex_coords_model + vertex_offset * 2, m->tex_coords, sizeof(float) * 2 * num_verts); + memcpy(vbo->idx_model + idx_offset, m->indices, sizeof(uint32_t) * m->numindices); + + vertex_offset += num_verts; + idx_offset += m->numtris * 3; + + assert(vertex_offset < MAX_VERT_MODEL); + assert(idx_offset < MAX_IDX_MODEL); + } + + buffer_unmap(&qvk.buf_vertex_staging); + vbo = NULL; + + Com_Printf("uploaded %d vert, %d idx\n", vertex_offset, idx_offset); + + return VK_SUCCESS; +} + +VkResult +vkpt_vertex_buffer_create() +{ + VkDescriptorSetLayoutBinding vbo_layout_binding = { + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .binding = VERTEX_BUFFER_BINDING_IDX, + .stageFlags = VK_SHADER_STAGE_ALL, + }; + + VkDescriptorSetLayoutCreateInfo layout_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = 1, + .pBindings = &vbo_layout_binding, + }; + + _VK(vkCreateDescriptorSetLayout(qvk.device, &layout_info, NULL, &qvk.desc_set_layout_vertex_buffer)); + + Com_Printf("allocating %.02f MB of memory for vertex buffer\n", (double) sizeof(VertexBuffer) / (1024.0 * 1024.0)); + buffer_create(&qvk.buf_vertex, sizeof(VertexBuffer), + VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + + Com_Printf("allocating %.02f MB of memory for staging vertex buffer\n", (double) sizeof(VertexBuffer) / (1024.0 * 1024.0)); + buffer_create(&qvk.buf_vertex_staging, sizeof(VertexBuffer), + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + VkDescriptorPoolSize pool_size = { + .type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + }; + + VkDescriptorPoolCreateInfo pool_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .poolSizeCount = 1, + .pPoolSizes = &pool_size, + .maxSets = 1, + }; + + _VK(vkCreateDescriptorPool(qvk.device, &pool_info, NULL, &desc_pool_vertex_buffer)); + + VkDescriptorSetAllocateInfo descriptor_set_alloc_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = desc_pool_vertex_buffer, + .descriptorSetCount = 1, + .pSetLayouts = &qvk.desc_set_layout_vertex_buffer, + }; + + _VK(vkAllocateDescriptorSets(qvk.device, &descriptor_set_alloc_info, &qvk.desc_set_vertex_buffer)); + + VkDescriptorBufferInfo buf_info = { + .buffer = qvk.buf_vertex.buffer, + .offset = 0, + .range = sizeof(VertexBuffer), + }; + + VkWriteDescriptorSet output_buf_write = { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = qvk.desc_set_vertex_buffer, + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .pBufferInfo = &buf_info, + }; + + vkUpdateDescriptorSets(qvk.device, 1, &output_buf_write, 0, NULL); + + return VK_SUCCESS; +} + +VkResult +vkpt_vertex_buffer_destroy() +{ + vkDestroyDescriptorPool(qvk.device, desc_pool_vertex_buffer, NULL); + vkDestroyDescriptorSetLayout(qvk.device, qvk.desc_set_layout_vertex_buffer, NULL); + desc_pool_vertex_buffer = VK_NULL_HANDLE; + qvk.desc_set_layout_vertex_buffer = VK_NULL_HANDLE; + + buffer_destroy(&qvk.buf_vertex); + buffer_destroy(&qvk.buf_vertex_staging); + + return VK_SUCCESS; +} + +VkResult +vkpt_vertex_buffer_create_pipelines() +{ + assert(!pipeline_instance_geometry); + assert(!pipeline_layout_instance_geometry); + + assert(qvk.desc_set_layout_ubo); + assert(qvk.desc_set_layout_vertex_buffer); + + VkDescriptorSetLayout desc_set_layouts[] = { + qvk.desc_set_layout_ubo, qvk.desc_set_layout_vertex_buffer + }; + + CREATE_PIPELINE_LAYOUT(qvk.device, &pipeline_layout_instance_geometry, + .setLayoutCount = LENGTH(desc_set_layouts), + .pSetLayouts = desc_set_layouts, + ); + + VkComputePipelineCreateInfo compute_pipeline_info = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = SHADER_STAGE(QVK_MOD_INSTANCE_GEOMETRY_COMP, VK_SHADER_STAGE_COMPUTE_BIT), + .layout = pipeline_layout_instance_geometry + }; + + assert(compute_pipeline_info.stage.module); + + _VK(vkCreateComputePipelines(qvk.device, 0, 1, &compute_pipeline_info, 0, &pipeline_instance_geometry)); + + return VK_SUCCESS; +} + +VkResult +vkpt_vertex_buffer_destroy_pipelines() +{ + assert(pipeline_instance_geometry); + assert(pipeline_layout_instance_geometry); + + vkDestroyPipeline(qvk.device, pipeline_instance_geometry, NULL); + vkDestroyPipelineLayout(qvk.device, pipeline_layout_instance_geometry, NULL); + + pipeline_instance_geometry = VK_NULL_HANDLE; + pipeline_layout_instance_geometry = VK_NULL_HANDLE; + + return VK_SUCCESS; +} + +VkResult +vkpt_vertex_buffer_create_instance(uint32_t num_instances) +{ + VkDescriptorSet desc_sets[] = { + qvk.desc_set_ubo[qvk.current_image_index], + qvk.desc_set_vertex_buffer + }; + vkCmdBindPipeline(qvk.cmd_buf_current, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_instance_geometry); + vkCmdBindDescriptorSets(qvk.cmd_buf_current, VK_PIPELINE_BIND_POINT_COMPUTE, + pipeline_layout_instance_geometry, 0, LENGTH(desc_sets), desc_sets, 0, 0); + + vkCmdDispatch(qvk.cmd_buf_current, num_instances, 1, 1); + + VkBufferMemoryBarrier barrier = { + .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, + .buffer = qvk.buf_vertex.buffer, + .size = qvk.buf_vertex.size, + .srcQueueFamilyIndex = qvk.queue_idx_graphics, + .dstQueueFamilyIndex = qvk.queue_idx_graphics + }; + + vkCmdPipelineBarrier( + qvk.cmd_buf_current, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + 0, + 0, NULL, + 1, &barrier, + 0, NULL); + + return VK_SUCCESS; +} + +// vim: shiftwidth=4 noexpandtab tabstop=4 cindent diff --git a/src/refresh/vkpt/vk_util.c b/src/refresh/vkpt/vk_util.c new file mode 100644 index 0000000..8551d73 --- /dev/null +++ b/src/refresh/vkpt/vk_util.c @@ -0,0 +1,369 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#include "shared/shared.h" +#include "vk_util.h" +#include "vkpt.h" + +#include + +uint32_t +get_memory_type(uint32_t mem_req_type_bits, VkMemoryPropertyFlags mem_prop) +{ + for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; i++) { + if(mem_req_type_bits & (1 << i)) { + if((qvk.mem_properties.memoryTypes[i].propertyFlags & mem_prop) == mem_prop) + return i; + } + } + return 0; +} + +VkResult +buffer_create( + BufferResource_t *buf, + VkDeviceSize size, + VkBufferUsageFlags usage, + VkMemoryPropertyFlags mem_properties) +{ + assert(size > 0); + assert(buf); + VkResult result = VK_SUCCESS; + + VkBufferCreateInfo buf_create_info = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .size = size, + .usage = usage, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = NULL + }; + + buf->size = size; + buf->is_mapped = 0; + + result = vkCreateBuffer(qvk.device, &buf_create_info, NULL, &buf->buffer); + if(result != VK_SUCCESS) { + goto fail_buffer; + } + assert(buf->buffer != VK_NULL_HANDLE); + + VkMemoryRequirements mem_reqs; + vkGetBufferMemoryRequirements(qvk.device, buf->buffer, &mem_reqs); + + VkMemoryAllocateInfo mem_alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = mem_reqs.size, + .memoryTypeIndex = get_memory_type(mem_reqs.memoryTypeBits, mem_properties) + }; + + result = vkAllocateMemory(qvk.device, &mem_alloc_info, NULL, &buf->memory); + if(result != VK_SUCCESS) { + goto fail_mem_alloc; + } + + assert(buf->memory != VK_NULL_HANDLE); + + result = vkBindBufferMemory(qvk.device, buf->buffer, buf->memory, 0); + if(result != VK_SUCCESS) { + goto fail_bind_buf_memory; + } + + return VK_SUCCESS; + +fail_bind_buf_memory: + vkFreeMemory(qvk.device, buf->memory, NULL); +fail_mem_alloc: + vkDestroyBuffer(qvk.device, buf->buffer, NULL); +fail_buffer: + buf->buffer = VK_NULL_HANDLE; + buf->memory = VK_NULL_HANDLE; + buf->size = 0; + return result; +} + +VkResult +buffer_destroy(BufferResource_t *buf) +{ + assert(!buf->is_mapped); + if(buf->memory != VK_NULL_HANDLE) + vkFreeMemory(qvk.device, buf->memory, NULL); + if(buf->buffer != VK_NULL_HANDLE) + vkDestroyBuffer(qvk.device, buf->buffer, NULL); + buf->buffer = VK_NULL_HANDLE; + buf->memory = VK_NULL_HANDLE; + buf->size = 0; + + return VK_SUCCESS; +} + +void * +buffer_map(BufferResource_t *buf) +{ + assert(!buf->is_mapped); + buf->is_mapped = 1; + void *ret = NULL; + assert(buf->memory != VK_NULL_HANDLE); + assert(buf->size > 0); + _VK(vkMapMemory(qvk.device, buf->memory, 0 /*offset*/, buf->size, 0 /*flags*/, &ret)); + return ret; +} + +void +buffer_unmap(BufferResource_t *buf) +{ + assert(buf->is_mapped); + buf->is_mapped = 0; + vkUnmapMemory(qvk.device, buf->memory); +} + +const char * +vk_format_to_string(VkFormat format) +{ + switch(format) { + case 0: return "UNDEFINED"; + case 1: return "R4G4_UNORM_PACK8"; + case 2: return "R4G4B4A4_UNORM_PACK16"; + case 3: return "B4G4R4A4_UNORM_PACK16"; + case 4: return "R5G6B5_UNORM_PACK16"; + case 5: return "B5G6R5_UNORM_PACK16"; + case 6: return "R5G5B5A1_UNORM_PACK16"; + case 7: return "B5G5R5A1_UNORM_PACK16"; + case 8: return "A1R5G5B5_UNORM_PACK16"; + case 9: return "R8_UNORM"; + case 10: return "R8_SNORM"; + case 11: return "R8_USCALED"; + case 12: return "R8_SSCALED"; + case 13: return "R8_UINT"; + case 14: return "R8_SINT"; + case 15: return "R8_SRGB"; + case 16: return "R8G8_UNORM"; + case 17: return "R8G8_SNORM"; + case 18: return "R8G8_USCALED"; + case 19: return "R8G8_SSCALED"; + case 20: return "R8G8_UINT"; + case 21: return "R8G8_SINT"; + case 22: return "R8G8_SRGB"; + case 23: return "R8G8B8_UNORM"; + case 24: return "R8G8B8_SNORM"; + case 25: return "R8G8B8_USCALED"; + case 26: return "R8G8B8_SSCALED"; + case 27: return "R8G8B8_UINT"; + case 28: return "R8G8B8_SINT"; + case 29: return "R8G8B8_SRGB"; + case 30: return "B8G8R8_UNORM"; + case 31: return "B8G8R8_SNORM"; + case 32: return "B8G8R8_USCALED"; + case 33: return "B8G8R8_SSCALED"; + case 34: return "B8G8R8_UINT"; + case 35: return "B8G8R8_SINT"; + case 36: return "B8G8R8_SRGB"; + case 37: return "R8G8B8A8_UNORM"; + case 38: return "R8G8B8A8_SNORM"; + case 39: return "R8G8B8A8_USCALED"; + case 40: return "R8G8B8A8_SSCALED"; + case 41: return "R8G8B8A8_UINT"; + case 42: return "R8G8B8A8_SINT"; + case 43: return "R8G8B8A8_SRGB"; + case 44: return "B8G8R8A8_UNORM"; + case 45: return "B8G8R8A8_SNORM"; + case 46: return "B8G8R8A8_USCALED"; + case 47: return "B8G8R8A8_SSCALED"; + case 48: return "B8G8R8A8_UINT"; + case 49: return "B8G8R8A8_SINT"; + case 50: return "B8G8R8A8_SRGB"; + case 51: return "A8B8G8R8_UNORM_PACK32"; + case 52: return "A8B8G8R8_SNORM_PACK32"; + case 53: return "A8B8G8R8_USCALED_PACK32"; + case 54: return "A8B8G8R8_SSCALED_PACK32"; + case 55: return "A8B8G8R8_UINT_PACK32"; + case 56: return "A8B8G8R8_SINT_PACK32"; + case 57: return "A8B8G8R8_SRGB_PACK32"; + case 58: return "A2R10G10B10_UNORM_PACK32"; + case 59: return "A2R10G10B10_SNORM_PACK32"; + case 60: return "A2R10G10B10_USCALED_PACK32"; + case 61: return "A2R10G10B10_SSCALED_PACK32"; + case 62: return "A2R10G10B10_UINT_PACK32"; + case 63: return "A2R10G10B10_SINT_PACK32"; + case 64: return "A2B10G10R10_UNORM_PACK32"; + case 65: return "A2B10G10R10_SNORM_PACK32"; + case 66: return "A2B10G10R10_USCALED_PACK32"; + case 67: return "A2B10G10R10_SSCALED_PACK32"; + case 68: return "A2B10G10R10_UINT_PACK32"; + case 69: return "A2B10G10R10_SINT_PACK32"; + case 70: return "R16_UNORM"; + case 71: return "R16_SNORM"; + case 72: return "R16_USCALED"; + case 73: return "R16_SSCALED"; + case 74: return "R16_UINT"; + case 75: return "R16_SINT"; + case 76: return "R16_SFLOAT"; + case 77: return "R16G16_UNORM"; + case 78: return "R16G16_SNORM"; + case 79: return "R16G16_USCALED"; + case 80: return "R16G16_SSCALED"; + case 81: return "R16G16_UINT"; + case 82: return "R16G16_SINT"; + case 83: return "R16G16_SFLOAT"; + case 84: return "R16G16B16_UNORM"; + case 85: return "R16G16B16_SNORM"; + case 86: return "R16G16B16_USCALED"; + case 87: return "R16G16B16_SSCALED"; + case 88: return "R16G16B16_UINT"; + case 89: return "R16G16B16_SINT"; + case 90: return "R16G16B16_SFLOAT"; + case 91: return "R16G16B16A16_UNORM"; + case 92: return "R16G16B16A16_SNORM"; + case 93: return "R16G16B16A16_USCALED"; + case 94: return "R16G16B16A16_SSCALED"; + case 95: return "R16G16B16A16_UINT"; + case 96: return "R16G16B16A16_SINT"; + case 97: return "R16G16B16A16_SFLOAT"; + case 98: return "R32_UINT"; + case 99: return "R32_SINT"; + case 100: return "R32_SFLOAT"; + case 101: return "R32G32_UINT"; + case 102: return "R32G32_SINT"; + case 103: return "R32G32_SFLOAT"; + case 104: return "R32G32B32_UINT"; + case 105: return "R32G32B32_SINT"; + case 106: return "R32G32B32_SFLOAT"; + case 107: return "R32G32B32A32_UINT"; + case 108: return "R32G32B32A32_SINT"; + case 109: return "R32G32B32A32_SFLOAT"; + case 110: return "R64_UINT"; + case 111: return "R64_SINT"; + case 112: return "R64_SFLOAT"; + case 113: return "R64G64_UINT"; + case 114: return "R64G64_SINT"; + case 115: return "R64G64_SFLOAT"; + case 116: return "R64G64B64_UINT"; + case 117: return "R64G64B64_SINT"; + case 118: return "R64G64B64_SFLOAT"; + case 119: return "R64G64B64A64_UINT"; + case 120: return "R64G64B64A64_SINT"; + case 121: return "R64G64B64A64_SFLOAT"; + case 122: return "B10G11R11_UFLOAT_PACK32"; + case 123: return "E5B9G9R9_UFLOAT_PACK32"; + case 124: return "D16_UNORM"; + case 125: return "X8_D24_UNORM_PACK32"; + case 126: return "D32_SFLOAT"; + case 127: return "S8_UINT"; + case 128: return "D16_UNORM_S8_UINT"; + case 129: return "D24_UNORM_S8_UINT"; + case 130: return "D32_SFLOAT_S8_UINT"; + case 131: return "BC1_RGB_UNORM_BLOCK"; + case 132: return "BC1_RGB_SRGB_BLOCK"; + case 133: return "BC1_RGBA_UNORM_BLOCK"; + case 134: return "BC1_RGBA_SRGB_BLOCK"; + case 135: return "BC2_UNORM_BLOCK"; + case 136: return "BC2_SRGB_BLOCK"; + case 137: return "BC3_UNORM_BLOCK"; + case 138: return "BC3_SRGB_BLOCK"; + case 139: return "BC4_UNORM_BLOCK"; + case 140: return "BC4_SNORM_BLOCK"; + case 141: return "BC5_UNORM_BLOCK"; + case 142: return "BC5_SNORM_BLOCK"; + case 143: return "BC6H_UFLOAT_BLOCK"; + case 144: return "BC6H_SFLOAT_BLOCK"; + case 145: return "BC7_UNORM_BLOCK"; + case 146: return "BC7_SRGB_BLOCK"; + case 147: return "ETC2_R8G8B8_UNORM_BLOCK"; + case 148: return "ETC2_R8G8B8_SRGB_BLOCK"; + case 149: return "ETC2_R8G8B8A1_UNORM_BLOCK"; + case 150: return "ETC2_R8G8B8A1_SRGB_BLOCK"; + case 151: return "ETC2_R8G8B8A8_UNORM_BLOCK"; + case 152: return "ETC2_R8G8B8A8_SRGB_BLOCK"; + case 153: return "EAC_R11_UNORM_BLOCK"; + case 154: return "EAC_R11_SNORM_BLOCK"; + case 155: return "EAC_R11G11_UNORM_BLOCK"; + case 156: return "EAC_R11G11_SNORM_BLOCK"; + case 157: return "ASTC_4x4_UNORM_BLOCK"; + case 158: return "ASTC_4x4_SRGB_BLOCK"; + case 159: return "ASTC_5x4_UNORM_BLOCK"; + case 160: return "ASTC_5x4_SRGB_BLOCK"; + case 161: return "ASTC_5x5_UNORM_BLOCK"; + case 162: return "ASTC_5x5_SRGB_BLOCK"; + case 163: return "ASTC_6x5_UNORM_BLOCK"; + case 164: return "ASTC_6x5_SRGB_BLOCK"; + case 165: return "ASTC_6x6_UNORM_BLOCK"; + case 166: return "ASTC_6x6_SRGB_BLOCK"; + case 167: return "ASTC_8x5_UNORM_BLOCK"; + case 168: return "ASTC_8x5_SRGB_BLOCK"; + case 169: return "ASTC_8x6_UNORM_BLOCK"; + case 170: return "ASTC_8x6_SRGB_BLOCK"; + case 171: return "ASTC_8x8_UNORM_BLOCK"; + case 172: return "ASTC_8x8_SRGB_BLOCK"; + case 173: return "ASTC_10x5_UNORM_BLOCK"; + case 174: return "ASTC_10x5_SRGB_BLOCK"; + case 175: return "ASTC_10x6_UNORM_BLOCK"; + case 176: return "ASTC_10x6_SRGB_BLOCK"; + case 177: return "ASTC_10x8_UNORM_BLOCK"; + case 178: return "ASTC_10x8_SRGB_BLOCK"; + case 179: return "ASTC_10x10_UNORM_BLOCK"; + case 180: return "ASTC_10x10_SRGB_BLOCK"; + case 181: return "ASTC_12x10_UNORM_BLOCK"; + case 182: return "ASTC_12x10_SRGB_BLOCK"; + case 183: return "ASTC_12x12_UNORM_BLOCK"; + case 184: return "ASTC_12x12_SRGB_BLOCK"; + case 1000156000: return "G8B8G8R8_422_UNORM"; + case 1000156001: return "B8G8R8G8_422_UNORM"; + case 1000156002: return "G8_B8_R8_3PLANE_420_UNORM"; + case 1000156003: return "G8_B8R8_2PLANE_420_UNORM"; + case 1000156004: return "G8_B8_R8_3PLANE_422_UNORM"; + case 1000156005: return "G8_B8R8_2PLANE_422_UNORM"; + case 1000156006: return "G8_B8_R8_3PLANE_444_UNORM"; + case 1000156007: return "R10X6_UNORM_PACK16"; + case 1000156008: return "R10X6G10X6_UNORM_2PACK16"; + case 1000156009: return "R10X6G10X6B10X6A10X6_UNORM_4PACK16"; + case 1000156010: return "G10X6B10X6G10X6R10X6_422_UNORM_4PACK16"; + case 1000156011: return "B10X6G10X6R10X6G10X6_422_UNORM_4PACK16"; + case 1000156012: return "G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16"; + case 1000156013: return "G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16"; + case 1000156014: return "G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16"; + case 1000156015: return "G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16"; + case 1000156016: return "G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16"; + case 1000156017: return "R12X4_UNORM_PACK16"; + case 1000156018: return "R12X4G12X4_UNORM_2PACK16"; + case 1000156019: return "R12X4G12X4B12X4A12X4_UNORM_4PACK16"; + case 1000156020: return "G12X4B12X4G12X4R12X4_422_UNORM_4PACK16"; + case 1000156021: return "B12X4G12X4R12X4G12X4_422_UNORM_4PACK16"; + case 1000156022: return "G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16"; + case 1000156023: return "G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16"; + case 1000156024: return "G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16"; + case 1000156025: return "G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16"; + case 1000156026: return "G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16"; + case 1000156027: return "G16B16G16R16_422_UNORM"; + case 1000156028: return "B16G16R16G16_422_UNORM"; + case 1000156029: return "G16_B16_R16_3PLANE_420_UNORM"; + case 1000156030: return "G16_B16R16_2PLANE_420_UNORM"; + case 1000156031: return "G16_B16_R16_3PLANE_422_UNORM"; + case 1000156032: return "G16_B16R16_2PLANE_422_UNORM"; + case 1000156033: return "G16_B16_R16_3PLANE_444_UNORM"; + case 1000054000: return "PVRTC1_2BPP_UNORM_BLOCK_IMG"; + case 1000054001: return "PVRTC1_4BPP_UNORM_BLOCK_IMG"; + case 1000054002: return "PVRTC2_2BPP_UNORM_BLOCK_IMG"; + case 1000054003: return "PVRTC2_4BPP_UNORM_BLOCK_IMG"; + case 1000054004: return "PVRTC1_2BPP_SRGB_BLOCK_IMG"; + case 1000054005: return "PVRTC1_4BPP_SRGB_BLOCK_IMG"; + case 1000054006: return "PVRTC2_2BPP_SRGB_BLOCK_IMG"; + case 1000054007: return "PVRTC2_4BPP_SRGB_BLOCK_IMG"; + default: break; + } + return ""; +} diff --git a/src/refresh/vkpt/vk_util.h b/src/refresh/vkpt/vk_util.h new file mode 100644 index 0000000..c31ef1f --- /dev/null +++ b/src/refresh/vkpt/vk_util.h @@ -0,0 +1,98 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef __VK_UTIL_H__ +#define __VK_UTIL_H__ + +#include + +typedef struct BufferResource_s { + VkBuffer buffer; + VkDeviceMemory memory; + size_t size; + int is_mapped; +} BufferResource_t; + +VkResult +buffer_create( + BufferResource_t *buf, + VkDeviceSize size, + VkBufferUsageFlags usage, + VkMemoryPropertyFlags mem_properties); + +VkResult buffer_destroy(BufferResource_t *buf); +void buffer_unmap(BufferResource_t *buf); +void *buffer_map(BufferResource_t *buf); +void buffer_unmap(BufferResource_t *buf); + +uint32_t get_memory_type(uint32_t mem_req_type_bits, VkMemoryPropertyFlags mem_prop); + +#define IMAGE_BARRIER(cmd_buf, ...) \ + do { \ + VkImageMemoryBarrier img_mem_barrier = { \ + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, \ + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, \ + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, \ + __VA_ARGS__ \ + }; \ + vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, \ + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, \ + 1, &img_mem_barrier); \ + } while(0) + +#define CREATE_PIPELINE_LAYOUT(dev, layout, ...) \ + do { \ + VkPipelineLayoutCreateInfo pipeline_layout_info = { \ + .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, \ + __VA_ARGS__ \ + }; \ + _VK(vkCreatePipelineLayout(dev, &pipeline_layout_info, NULL, layout)); \ + } while(0) \ + +const char * vk_format_to_string(VkFormat format); + +#ifdef VKPT_ENABLE_VALIDATION +#define ATTACH_LABEL_VARIABLE(a, type) \ + do { \ + /*Com_Printf("attaching object label 0x%08lx %s\n", (uint64_t) a, #a);*/ \ + VkDebugMarkerObjectNameInfoEXT name_info = { \ + .sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT, \ + .object = (uint64_t) a, \ + .objectType = VK_DEBUG_REPORT_OBJECT_TYPE_##type##_EXT, \ + .pObjectName = #a \ + }; \ + qvkDebugMarkerSetObjectNameEXT(qvk.device, &name_info); \ + } while(0) + +#define ATTACH_LABEL_VARIABLE_NAME(a, type, name) \ + do { \ + /*Com_Printf("attaching object label 0x%08lx %s\n", (uint64_t) a, name);*/ \ + VkDebugMarkerObjectNameInfoEXT name_info = { \ + .sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT, \ + .object = (uint64_t) a, \ + .objectType = VK_DEBUG_REPORT_OBJECT_TYPE_##type##_EXT, \ + .pObjectName = name, \ + }; \ + qvkDebugMarkerSetObjectNameEXT(qvk.device, &name_info); \ + } while(0) +#else +#define ATTACH_LABEL_VARIABLE(a, type) do{}while(0) +#define ATTACH_LABEL_VARIABLE_NAME(a, type, name) do{}while(0) +#endif + +#endif /*__VK_UTIL_H__*/ diff --git a/src/refresh/vkpt/vkpt.h b/src/refresh/vkpt/vkpt.h new file mode 100644 index 0000000..45b702b --- /dev/null +++ b/src/refresh/vkpt/vkpt.h @@ -0,0 +1,397 @@ +/* +Copyright (C) 2018 Christoph Schied + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +#ifndef __VKPT_H__ +#define __VKPT_H__ + +#include +#include +#include + +#include "vk_util.h" + +#include "shared/shared.h" +#include "common/bsp.h" +#include "common/cmd.h" +#include "common/common.h" +#include "common/cvar.h" +#include "common/files.h" +#include "common/math.h" +#include "client/video.h" +#include "client/client.h" +#include "refresh/refresh.h" +#include "refresh/images.h" +#include "refresh/models.h" +#include "system/hunk.h" + +#include "shader/global_ubo.h" +#include "shader/global_textures.h" + +#define LENGTH(a) ((sizeof (a)) / (sizeof(*(a)))) +#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#define MAX(a,b) ((a) > (b) ? (a) : (b)) + +#define LOG_FUNC_(f) do {} while(0) +//#define LOG_FUNC_(f) Com_Printf("%s\n", f) +#define LOG_FUNC() LOG_FUNC_(__func__) + +#define FUNC_UNIMPLEMENTED_(f) Com_EPrintf("Calling unimplemented function %s\n", f) +#define FUNC_UNIMPLEMENTED() FUNC_UNIMPLEMENTED_(__func__) + +#define MAX_LIGHTS 4096 + +#define _VK(...) \ + do { \ + VkResult _res = __VA_ARGS__; \ + if(_res != VK_SUCCESS) { \ + Com_EPrintf("error %d executing %s!\n", _res, # __VA_ARGS__); \ + } \ + } while(0) + +/* see main.c to override default file path. By default it will strip away + * QVK_MOD_, fix the file ending, and convert to lower case */ +#define LIST_SHADER_MODULES \ + SHADER_MODULE_DO(QVK_MOD_STRETCH_PIC_VERT) \ + SHADER_MODULE_DO(QVK_MOD_STRETCH_PIC_FRAG) \ + SHADER_MODULE_DO(QVK_MOD_PATH_TRACER_RGEN) \ + SHADER_MODULE_DO(QVK_MOD_PATH_TRACER_RCHIT) \ + SHADER_MODULE_DO(QVK_MOD_PATH_TRACER_RMISS) \ + SHADER_MODULE_DO(QVK_MOD_PATH_TRACER_RTX_RGEN) \ + SHADER_MODULE_DO(QVK_MOD_PATH_TRACER_RTX_RCHIT) \ + SHADER_MODULE_DO(QVK_MOD_PATH_TRACER_RTX_RMISS) \ + SHADER_MODULE_DO(QVK_MOD_INSTANCE_GEOMETRY_COMP) \ + SHADER_MODULE_DO(QVK_MOD_ASVGF_SEED_RNG_COMP) \ + SHADER_MODULE_DO(QVK_MOD_ASVGF_FWD_PROJECT_COMP) \ + SHADER_MODULE_DO(QVK_MOD_ASVGF_GRADIENT_IMG_COMP) \ + SHADER_MODULE_DO(QVK_MOD_ASVGF_GRADIENT_ATROUS_COMP) \ + SHADER_MODULE_DO(QVK_MOD_ASVGF_ATROUS_COMP) \ + SHADER_MODULE_DO(QVK_MOD_ASVGF_TEMPORAL_COMP) \ + SHADER_MODULE_DO(QVK_MOD_ASVGF_TAA_COMP) \ + +#ifndef VKPT_SHADER_DIR +#define VKPT_SHADER_DIR "shader_vkpt" +#endif + +#define SHADER_PATH_TEMPLATE VKPT_SHADER_DIR "/%s.spv" + +#define SHADER_STAGE(_module, _stage) \ + { \ + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, \ + .stage = _stage, \ + .module = qvk.shader_modules[_module], \ + .pName = "main" \ + } + +enum QVK_SHADER_MODULES { +#define SHADER_MODULE_DO(a) a, + LIST_SHADER_MODULES +#undef SHADER_MODULE_DO + NUM_QVK_SHADER_MODULES +}; + +#define MAX_FRAMES_IN_FLIGHT 2 + +enum { + SEM_IMG_AVAILABLE = 0, + SEM_RENDER_FINISHED = SEM_IMG_AVAILABLE + MAX_FRAMES_IN_FLIGHT, + NUM_SEMAPHORES = SEM_RENDER_FINISHED + MAX_FRAMES_IN_FLIGHT, +}; + +#define MAX_SWAPCHAIN_IMAGES 4 + +typedef struct QVK_s { + VkInstance instance; + VkPhysicalDevice physical_device; + VkPhysicalDeviceMemoryProperties mem_properties; + VkDevice device; + VkQueue queue_graphics; + VkQueue queue_compute; + VkQueue queue_transfer; + int32_t queue_idx_graphics; + int32_t queue_idx_compute; + int32_t queue_idx_transfer; + VkSurfaceKHR surface; + VkSwapchainKHR swap_chain; + VkSurfaceFormatKHR surf_format; + VkPresentModeKHR present_mode; + VkExtent2D extent; + VkCommandPool command_pool; + uint32_t num_swap_chain_images; + VkImage swap_chain_images[MAX_SWAPCHAIN_IMAGES]; + VkImageView swap_chain_image_views[MAX_SWAPCHAIN_IMAGES]; + + uint32_t num_command_buffers; + VkCommandBuffer *command_buffers; + VkCommandBuffer cmd_buf_current; + + uint32_t num_extensions; + VkExtensionProperties *extensions; + + uint32_t num_layers; + VkLayerProperties *layers; + + VkDebugUtilsMessengerEXT dbg_messenger; + + VkSemaphore semaphores[NUM_SEMAPHORES]; + VkFence fences_frame_sync[MAX_FRAMES_IN_FLIGHT]; + + + int win_width; + int win_height; + uint64_t frame_counter; + + SDL_Window *window; + uint32_t num_sdl2_extensions; + const char **sdl2_extensions; + + uint32_t current_image_index; + + VkShaderModule shader_modules[NUM_QVK_SHADER_MODULES]; + + VkDescriptorSetLayout desc_set_layout_ubo; + VkDescriptorSet desc_set_ubo[MAX_SWAPCHAIN_IMAGES]; + + VkDescriptorSetLayout desc_set_layout_textures; + VkDescriptorSet desc_set_textures; + VkImage images [NUM_VKPT_IMAGES]; // todo: rename to make consistent + VkImageView images_views[NUM_VKPT_IMAGES]; // todo: rename to make consistent + + VkDescriptorSetLayout desc_set_layout_vertex_buffer; + VkDescriptorSet desc_set_vertex_buffer; + + VkDescriptorSetLayout desc_set_layout_light_hierarchy; + VkDescriptorSet desc_set_light_hierarchy[MAX_SWAPCHAIN_IMAGES]; + + BufferResource_t buf_vertex; + BufferResource_t buf_vertex_staging; +} QVK_t; + +extern QVK_t qvk; + + + +#define _VK_EXTENSION_LIST \ + _VK_EXTENSION_DO(vkCreateAccelerationStructureNV) \ + _VK_EXTENSION_DO(vkCreateAccelerationStructureNV) \ + _VK_EXTENSION_DO(vkDestroyAccelerationStructureNV) \ + _VK_EXTENSION_DO(vkGetAccelerationStructureMemoryRequirementsNV) \ + _VK_EXTENSION_DO(vkBindAccelerationStructureMemoryNV) \ + _VK_EXTENSION_DO(vkCmdBuildAccelerationStructureNV) \ + _VK_EXTENSION_DO(vkCmdCopyAccelerationStructureNV) \ + _VK_EXTENSION_DO(vkCmdTraceRaysNV) \ + _VK_EXTENSION_DO(vkCreateRayTracingPipelinesNV) \ + _VK_EXTENSION_DO(vkGetRayTracingShaderGroupHandlesNV) \ + _VK_EXTENSION_DO(vkGetAccelerationStructureHandleNV) \ + _VK_EXTENSION_DO(vkCmdWriteAccelerationStructuresPropertiesNV) \ + _VK_EXTENSION_DO(vkCompileDeferredNV) \ + _VK_EXTENSION_DO(vkDebugMarkerSetObjectNameEXT) + + +#define _VK_EXTENSION_DO(a) extern PFN_##a q##a; +_VK_EXTENSION_LIST +#undef _VK_EXTENSION_DO + +#define WM_MAX_VERTICES (1<<24) +typedef struct bsp_mesh_s { + uint32_t world_idx_count; + uint32_t *models_idx_offset; + uint32_t *models_idx_count; + vec3_t *model_centers; + int num_models; + + uint32_t world_fluid_offset; + uint32_t world_fluid_count; + + uint32_t world_light_offset; + uint32_t world_light_count; + + float *positions, *tex_coords; + int *indices; + uint32_t *materials; + int num_indices; + int num_vertices; + + int num_clusters; + int *clusters; + + int num_cluster_lights; + int *cluster_light_offsets; + int *cluster_lights; +} bsp_mesh_t; + +void bsp_mesh_create_from_bsp(bsp_mesh_t *wm, bsp_t *bsp); +void bsp_mesh_destroy(bsp_mesh_t *wm); +void bsp_mesh_register_textures(bsp_t *bsp); + +typedef struct vkpt_refdef_s { + QVKUniformBuffer_t uniform_buffer; + refdef_t *fd; + float view_matrix[16]; + float projection_matrix[16]; + float view_projection_matrix[16]; + + float view_matrix_prev[16]; + float projection_matrix_prev[16]; + float view_projection_matrix_prev[16]; + float z_near, z_far; + + bsp_mesh_t bsp_mesh_world; + int bsp_mesh_world_loaded; + + uint32_t *light_colors; + float *light_positions; + int num_static_lights; + int num_dynamic_lights; +} vkpt_refdef_t; + +extern vkpt_refdef_t vkpt_refdef; + +void mult_matrix_matrix(float *p, const float *a, const float *b); +void mult_matrix_vector(float *p, const float *a, const float *b); +void create_entity_matrix(float matrix[16], entity_t *e); +void create_projection_matrix(float matrix[16], float znear, float zfar, float fov_x, float fov_y); +void create_view_matrix(float matrix[16], refdef_t *fd); +void inverse(const float *m, float *inv); +void create_orthographic_matrix(float matrix[16], float xmin, float xmax, + float ymin, float ymax, float znear, float zfar); + +#define PROFILER_LIST \ + PROFILER_DO(PROFILER_FRAME_TIME, 0) \ + PROFILER_DO(PROFILER_INSTANCE_GEOMETRY, 1) \ + PROFILER_DO(PROFILER_BVH_UPDATE, 1) \ + PROFILER_DO(PROFILER_ASVGF_GRADIENT_SAMPLES, 1) \ + PROFILER_DO(PROFILER_PATH_TRACER, 1) \ + PROFILER_DO(PROFILER_ASVGF_FULL, 1) \ + PROFILER_DO(PROFILER_ASVGF_RECONSTRUCT_GRADIENT, 2) \ + PROFILER_DO(PROFILER_ASVGF_TEMPORAL, 2) \ + PROFILER_DO(PROFILER_ASVGF_ATROUS, 2) \ + PROFILER_DO(PROFILER_ASVGF_TAA, 2) + +enum { +#define PROFILER_DO(a, ...) a, + PROFILER_LIST +#undef PROFILER_DO + NUM_PROFILER_ENTRIES, +}; + +#define NUM_PROFILER_QUERIES_PER_FRAME (NUM_PROFILER_ENTRIES * 2) + +typedef enum { + PROFILER_START, + PROFILER_STOP, +} VKPTProfilerAction; + +VkResult vkpt_profiler_initialize(); +VkResult vkpt_profiler_destroy(); +VkResult vkpt_profiler_query(int idx, VKPTProfilerAction action); +VkResult vkpt_profiler_next_frame(int frame_num); +void draw_profiler(); + +VkResult vkpt_textures_initialize(); +VkResult vkpt_textures_destroy(); +VkResult vkpt_textures_end_registration(); +VkResult vkpt_textures_upload_envmap(int w, int h, byte *data); + +VkResult vkpt_draw_initialize(); +VkResult vkpt_draw_destroy(); +VkResult vkpt_draw_destroy_pipelines(); +VkResult vkpt_draw_create_pipelines(); +VkResult vkpt_draw_submit_stretch_pics(VkCommandBuffer *cmd_buf); +VkResult vkpt_draw_clear_stretch_pics(); + +VkResult vkpt_uniform_buffer_create(); +VkResult vkpt_uniform_buffer_destroy(); +VkResult vkpt_uniform_buffer_update(); + +VkResult vkpt_vertex_buffer_create(); +VkResult vkpt_vertex_buffer_destroy(); +VkResult vkpt_vertex_buffer_upload_bsp_mesh_to_staging(bsp_mesh_t *bsp_mesh); +VkResult vkpt_vertex_buffer_create_pipelines(); +VkResult vkpt_vertex_buffer_destroy_pipelines(); +VkResult vkpt_vertex_buffer_create_instance(uint32_t num_instances); +VkResult vkpt_vertex_buffer_upload_models_to_staging(); +VkResult vkpt_vertex_buffer_upload_staging(); + +VkResult vkpt_lh_upload_staging(); +VkResult vkpt_lh_update(const float *positions, const uint32_t *light_colors, int num_primitives, VkCommandBuffer cmd_buf); +VkResult vkpt_lh_initialize(); +VkResult vkpt_lh_destroy(); + +VkResult vkpt_load_shader_modules(); +VkResult vkpt_destroy_shader_modules(); +VkResult vkpt_create_images(); +VkResult vkpt_destroy_images(); + +VkResult vkpt_pt_init(); +VkResult vkpt_pt_destroy(); +VkResult vkpt_pt_create_pipelines(); +VkResult vkpt_pt_destroy_pipelines(); + +VkResult vkpt_pt_create_toplevel(int idx); +VkResult vkpt_pt_create_static(VkBuffer vertex_buffer, size_t buffer_offset, int num_vertices); +VkResult vkpt_pt_destroy_static(); +VkResult vkpt_pt_record_cmd_buffer(VkCommandBuffer cmd_buf, uint32_t frame_num); +VkResult vkpt_pt_update_descripter_set_bindings(int idx); +VkResult vkpt_pt_create_dynamic(int idx, VkBuffer vertex_buffer, size_t buffer_offset, int num_vertices); +VkResult vkpt_pt_destroy_dynamic(int idx); + +VkResult vkpt_asvgf_initialize(); +VkResult vkpt_asvgf_destroy(); +VkResult vkpt_asvgf_create_pipelines(); +VkResult vkpt_asvgf_destroy_pipelines(); +VkResult vkpt_asvgf_record_cmd_buffer(VkCommandBuffer cmd_buf); +VkResult vkpt_asvgf_create_gradient_samples(VkCommandBuffer cmd_buf, uint32_t frame_num); + +qerror_t load_img(const char *name, image_t *image); + +typedef struct maliasframe_s { + vec3_t scale; + vec3_t translate; + vec3_t bounds[2]; + vec_t radius; +} maliasframe_t; + +typedef struct maliasmesh_s { + int numverts; + int numtris; + int numindices; + int idx_offset; /* offset in vertex buffer on device */ + int vertex_offset; /* offset in vertex buffer on device */ + int *indices; + vec3_t *positions; + vec3_t *normals; + vec2_t *tex_coords; + image_t *skins[MAX_ALIAS_SKINS]; + int numskins; +} maliasmesh_t; + +// needed for model.c +#define TESS_MAX_VERTICES 4096 +#define TESS_MAX_INDICES (3 * TESS_MAX_VERTICES) + +#define QGL_INDEX_TYPE GLuint + +typedef struct { + color_t colors[2]; // 0 - actual color, 1 - transparency (for text drawing) + float scale; +} drawStatic_t; + +extern drawStatic_t draw; +extern cvar_t *cvar_rtx; + +#endif /*__VKPT_H__*/ diff --git a/src/server/entities.c b/src/server/entities.c index b04b148..03f192a 100644 --- a/src/server/entities.c +++ b/src/server/entities.c @@ -392,6 +392,7 @@ void SV_BuildClientFrame(client_t *client) mleaf_t *leaf; byte clientphs[VIS_MAX_BYTES]; byte clientpvs[VIS_MAX_BYTES]; + int cull_nonvisible_entities = Cvar_Get("sv_cull_nonvisible_entities", "1", CVAR_CHEAT)->integer; clent = client->edict; if (!clent->client) @@ -480,7 +481,7 @@ void SV_BuildClientFrame(client_t *client) if (!Q_IsBitSet(clientphs, l)) continue; } else { - if (!SV_EdictIsVisible(client->cm, ent, clientpvs)) { + if (cull_nonvisible_entities && !SV_EdictIsVisible(client->cm, ent, clientpvs)) { continue; } diff --git a/src/shared/shared.c b/src/shared/shared.c index bcbe398..3ca243a 100644 --- a/src/shared/shared.c +++ b/src/shared/shared.c @@ -57,7 +57,7 @@ vec_t VectorNormalize(vec3_t v) float length, ilength; length = v[0] * v[0] + v[1] * v[1] + v[2] * v[2]; - length = sqrt(length); // FIXME + length = sqrtf(length); // FIXME if (length) { ilength = 1 / length; @@ -75,7 +75,7 @@ vec_t VectorNormalize2(vec3_t v, vec3_t out) float length, ilength; length = v[0] * v[0] + v[1] * v[1] + v[2] * v[2]; - length = sqrt(length); // FIXME + length = sqrtf(length); // FIXME if (length) { ilength = 1 / length; @@ -141,7 +141,8 @@ vec_t RadiusFromBounds(const vec3_t mins, const vec3_t maxs) //==================================================================================== -static const char hexchars[] = "0123456789ABCDEF"; +// unused: +// static const char hexchars[] = "0123456789ABCDEF"; /* ============ diff --git a/src/unix/sdl2/sound.c b/src/unix/sdl2/sound.c index a38cb29..48c0c7c 100644 --- a/src/unix/sdl2/sound.c +++ b/src/unix/sdl2/sound.c @@ -23,7 +23,7 @@ with this program; if not, write to the Free Software Foundation, Inc., #include "shared/shared.h" #include "common/zone.h" #include "client/sound/dma.h" -#include +#include static void Filler(void *userdata, Uint8 *stream, int len) { @@ -156,4 +156,3 @@ void WAVE_FillAPI(snddmaAPI_t *api) api->Submit = Submit; api->Activate = Activate; } - diff --git a/src/unix/sdl2/video.c b/src/unix/sdl2/video.c index 949b71a..5668309 100644 --- a/src/unix/sdl2/video.c +++ b/src/unix/sdl2/video.c @@ -33,9 +33,13 @@ with this program; if not, write to the Free Software Foundation, Inc., #include "refresh/refresh.h" #include "system/system.h" #include "../res/q2pro.xbm" -#include +#include -static SDL_Window *sdl_window; +#if USE_REF == REF_GLPT +#include +#endif + +SDL_Window *sdl_window; static vidFlags_t sdl_flags; /* @@ -46,7 +50,7 @@ OPENGL STUFF =============================================================================== */ -#if USE_REF == REF_GL +#if USE_REF == REF_GL || USE_REF == REF_GLPT static SDL_GLContext *sdl_context; @@ -101,6 +105,16 @@ static qboolean VID_SDL_GL_LoadLibrary(void) static void VID_SDL_GL_SetAttributes(void) { +#if USE_REF == REF_GLPT + // Set attributes + SDL_GL_SetAttribute(SDL_GL_ACCELERATED_VISUAL, 1); + SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4); + SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 5); + + SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1); + SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24); + SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, SDL_GL_CONTEXT_DEBUG_FLAG); +#else int colorbits = Cvar_ClampInteger( Cvar_Get("gl_colorbits", "0", CVAR_REFRESH), 0, 32); int depthbits = Cvar_ClampInteger( @@ -137,6 +151,7 @@ static void VID_SDL_GL_SetAttributes(void) SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, 1); SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, multisamples); } +#endif } #if !USE_FIXED_LIBGL @@ -255,8 +270,10 @@ void VID_BeginFrame(void) void VID_EndFrame(void) { -#if USE_REF == REF_GL +#if USE_REF == REF_GL || USE_REF == REF_GLPT SDL_GL_SwapWindow(sdl_window); +#elif USE_REF == REF_VKPT + /* subsystem does it itself */ #else SDL_UpdateWindowSurface(sdl_window); #endif @@ -366,7 +383,7 @@ qboolean VID_Init(void) return qfalse; } -#if USE_REF == REF_GL +#if USE_REF == REF_GL || USE_REF == REF_GLPT if (!VID_SDL_GL_LoadLibrary()) { goto fail; } @@ -382,6 +399,7 @@ qboolean VID_Init(void) rc.y = SDL_WINDOWPOS_UNDEFINED; } +#if !(USE_REF == REF_VKPT) sdl_window = SDL_CreateWindow(PRODUCT, rc.x, rc.y, rc.width, rc.height, flags); if (!sdl_window) { Com_EPrintf("Couldn't create SDL window: %s\n", SDL_GetError()); @@ -405,7 +423,10 @@ qboolean VID_Init(void) VID_SDL_SetMode(); -#if USE_REF == REF_GL +#endif + + +#if USE_REF == REF_GL || USE_REF == REF_GLPT sdl_context = SDL_GL_CreateContext(sdl_window); if (!sdl_context) { Com_EPrintf("Couldn't create OpenGL context: %s\n", SDL_GetError()); @@ -416,6 +437,15 @@ qboolean VID_Init(void) gl_swapinterval->changed = gl_swapinterval_changed; gl_swapinterval_changed(gl_swapinterval); #endif +#if USE_REF == REF_GLPT + if(!gladLoadGLLoader(SDL_GL_GetProcAddress)) { + goto fail; + } + Com_Printf("Vendor: %s\n", glGetString(GL_VENDOR)); + Com_Printf("Renderer: %s\n", glGetString(GL_RENDERER)); + Com_Printf("Version OpenGL: %s\n", glGetString(GL_VERSION)); + Com_Printf("Version GLSL: %s\n", glGetString(GL_SHADING_LANGUAGE_VERSION)); +#endif cvar_t *vid_hwgamma = Cvar_Get("vid_hwgamma", "0", CVAR_REFRESH); if (vid_hwgamma->integer) { @@ -441,7 +471,7 @@ qboolean VID_Init(void) void VID_Shutdown(void) { -#if USE_REF == REF_GL +#if USE_REF == REF_GL || USE_REF == REF_GLPT if (sdl_context) { SDL_GL_DeleteContext(sdl_context); sdl_context = NULL; @@ -536,12 +566,13 @@ static void key_event(SDL_KeyboardEvent *event) { byte result; - if (event->keysym.scancode < 120) - result = scantokey[event->keysym.scancode]; + if(event->keysym.sym < 127) + result = event->keysym.sym; // direct mapping of ascii else if (event->keysym.scancode >= 224 && event->keysym.scancode < 224 + 8) - result = scantokey[event->keysym.scancode - 104]; - else - result = 0; + result = scantokey[event->keysym.scancode - 104]; + else if (event->keysym.scancode > 0x30) + result = scantokey[event->keysym.scancode]; + else result = 0; if (result == 0) { Com_DPrintf("%s: unknown scancode %d\n", __func__, event->keysym.scancode); diff --git a/src/windows/debug.c b/src/windows/debug.c index 6c7492c..1c67c31 100644 --- a/src/windows/debug.c +++ b/src/windows/debug.c @@ -215,7 +215,8 @@ LONG WINAPI Sys_ExceptionFilter(LPEXCEPTION_POINTERS exceptionInfo) } #if USE_CLIENT - Win_Shutdown(); + //Win_Shutdown(); + VID_Shutdown(); #endif ret = MessageBox(NULL, diff --git a/src/windows/glimp.c b/src/windows/glimp.c index 2dead68..85cff47 100644 --- a/src/windows/glimp.c +++ b/src/windows/glimp.c @@ -28,7 +28,9 @@ GLimp_Init GLimp_Shutdown GLimp_SwitchFullscreen */ - +#if USE_REF == REF_GLPT +#include "glad/glad.h" +#endif #include "client.h" #include "glimp.h" #include "wgl.h" @@ -459,6 +461,10 @@ qboolean VID_Init(void) if (ret) return qfalse; +#if USE_REF == REF_GLPT + gladLoadGL(); +#endif + // initialize WGL extensions WGL_InitExtensions(QWGL_ARB_extensions_string); diff --git a/src/windows/system.c b/src/windows/system.c index 8dae728..6ec6c4e 100644 --- a/src/windows/system.c +++ b/src/windows/system.c @@ -578,7 +578,7 @@ void Sys_Error(const char *error, ...) errorEntered = qtrue; #if USE_CLIENT - Win_Shutdown(); + VID_Shutdown(); #endif #if USE_SYSCON diff --git a/src/windows/threads/threads.c b/src/windows/threads/threads.c new file mode 100644 index 0000000..42953d5 --- /dev/null +++ b/src/windows/threads/threads.c @@ -0,0 +1,4 @@ +#include "threads.h" + +// store the thread id per thread in thread local storage +uint32_t threads_id = 0; diff --git a/src/windows/threads/threads.h b/src/windows/threads/threads.h new file mode 100644 index 0000000..9ecd2f9 --- /dev/null +++ b/src/windows/threads/threads.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include +#include + +// storage will be in the corresponding threads.c file: +extern uint32_t threads_id; + +typedef struct { void* x; } pthread_mutex_t; +#define threads_mutex_lock(m) ((void*)m) +#define threads_mutex_unlock(m) ((void*)m) +#define threads_mutex_destroy(m) ((void*)m) +#define threads_mutex_init(m, p) ((void*)m) + +// hack :/ +#define __sync_fetch_and_add(p, a) ((*(p) += (a)), *(p) - (a)) + +#ifndef aligned_alloc +#define aligned_alloc(a, s) _aligned_malloc(s, a) +#endif +#ifndef aligned_free +#define aligned_free(p) _aligned_free(p) +#endif + +#define sysconf(x) 1 +#ifndef usleep +#define usleep(x) +#endif +#ifndef sched_yield +#define sched_yield() +#endif + +typedef struct threads_t +{ + uint32_t num_threads; + uint32_t task[1]; + void *pool; +} +threads_t; + +static inline void +pthread_pool_task_init(uint32_t *task, void *pool, void* (*f)(void *), void *param) +{ // single threaded execution in line + f(param); +} + +static inline void pthread_pool_wait(void *pool) { } + +static inline threads_t *threads_init(uint32_t num_threads) +{ + threads_t *t = malloc(sizeof(*t)); + t->num_threads = 1; + t->task[0] = 0; + t->pool = 0; + threads_id = 0; + return t; +} + +static inline void threads_cleanup(threads_t *t) +{ + free(t); +} + diff --git a/vulkan-1.lib b/vulkan-1.lib new file mode 100644 index 0000000..89f2cd3 Binary files /dev/null and b/vulkan-1.lib differ