Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
82 commits
Select commit Hold shift + click to select a range
38f889e
Add L0 support for gpu
jkasprza Jun 30, 2025
ce58599
Fix L0 DPAS check
jkasprza Jul 10, 2025
d8283b1
Merge remote-tracking branch 'upstream/master' into gpu_l0
jkasprza Jul 10, 2025
f38ae58
Use copy offload only when supported
jkasprza Jul 10, 2025
8aaec53
Remove dlopen for L0
jkasprza Jul 11, 2025
51e581b
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
jkasprza Aug 4, 2025
f8eb99a
Add dep_events for l0 mem fill
jkasprza Aug 4, 2025
44d0a79
Add OneDNN with L0 support
jkasprza Aug 5, 2025
c3b1a67
Adjust to L0 OneDNN
jkasprza Aug 11, 2025
7f79b07
Disable OneDNN pooling
jkasprza Aug 12, 2025
2bbe030
Merge remote-tracking branch 'upstream/master' into gpu_l0
jkasprza Aug 13, 2025
0e70375
Update L0 OneDNN submodule
jkasprza Aug 13, 2025
35e492d
Adjust to new OneDNN
jkasprza Aug 13, 2025
6b586d3
Fix include
jkasprza Aug 18, 2025
afe606b
Add new OneDNN for L0
jkasprza Aug 29, 2025
66a9c6c
Update L0 OneDNN submodule
jkasprza Sep 4, 2025
758c022
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
jkasprza Sep 4, 2025
02d4a15
Fix ze_stream impl
jkasprza Sep 5, 2025
40d7e4b
Update onednn l0 submodule
jkasprza Sep 10, 2025
b7c56f5
Add dedicated copy queue
jkasprza Sep 11, 2025
ab762bf
Fix windows compilation
jkasprza Sep 12, 2025
8f48750
Fix onednn_gpu submodule
jkasprza Sep 16, 2025
862df7e
Handle local memory size argument for L0
jkasprza Sep 19, 2025
96e70c7
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
jkasprza Sep 24, 2025
fedec68
Add separate onednn submodule for L0 and OCL
jkasprza Sep 24, 2025
8725b28
Fix onednn include paths
jkasprza Sep 25, 2025
4fa5c19
Prevent redundant opencl linking
jkasprza Sep 25, 2025
933e262
Add new compute runtime l0 headers
jkasprza Sep 26, 2025
3993c80
Fix includes
jkasprza Sep 26, 2025
078d3a3
Add initial counter based event implementation
jkasprza Sep 26, 2025
08c1555
workaround for cb event host signal
jkasprza Oct 3, 2025
31311c7
Rework level zero event implementation
jkasprza Oct 9, 2025
e8dce15
Enable cb events for in-order queue type
jkasprza Oct 9, 2025
ae16177
Restore pooling impl and convolution ref impl
jkasprza Oct 9, 2025
8a7b566
Fix OneDNN include paths
jkasprza Oct 10, 2025
0e7c363
Move compute runtime headers to separate target
jkasprza Oct 10, 2025
7b5829f
Remove redundant DNNL macro
jkasprza Oct 13, 2025
debf47f
Add copy offload
jkasprza Oct 15, 2025
33a1736
warn if copy offload is not supported
jkasprza Oct 15, 2025
37cfa39
Copy level zero headers to build dir
jkasprza Oct 15, 2025
8e7c1ae
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
jkasprza Oct 16, 2025
55cb68e
Update l0 onednn submodule
jkasprza Oct 17, 2025
4893a28
Change L0 macro check names
jkasprza Oct 17, 2025
3007f7b
Add default supported simd sizes for L0
jkasprza Oct 24, 2025
ddbe71b
Add L0 supported simd sizes query
jkasprza Oct 28, 2025
7c01937
Add L0 interface when building kernel selector
jkasprza Oct 29, 2025
707a238
Update L0 OneDNN submodule
jkasprza Oct 30, 2025
7b2d8ae
L0 retrieve global cache size
jkasprza Nov 3, 2025
cba0d29
Add L0 build kernel API wip
jkasprza Nov 4, 2025
2f855da
Add L0 kernel build logic
jkasprza Nov 6, 2025
9ba3c9c
Fix style
jkasprza Nov 7, 2025
d0f157c
Remove OpenCL dependency when running L0
jkasprza Nov 7, 2025
4ec431e
Fix OCL program build
jkasprza Nov 12, 2025
3e6b368
Add gfx_ver parsing for L0
jkasprza Nov 17, 2025
cfdbb02
Skip L0 symbol table kernel
jkasprza Nov 18, 2025
04b086a
Avoid cl_mem usage with L0 engine
jkasprza Nov 20, 2025
6c6664d
Fix windows build
jkasprza Nov 24, 2025
a3eac10
Remove unnecessary vector during L0 mem fill
jkasprza Nov 27, 2025
e6480ac
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
jkasprza Nov 28, 2025
c76cdb2
Fix memory
jkasprza Nov 28, 2025
6473aa0
Set runtime interface for kernel_selector
jkasprza Nov 28, 2025
93bb04b
Remove onednn ocl include
jkasprza Dec 1, 2025
c621131
Fix backend compilation and remote tensor
jkasprza Dec 2, 2025
fadca80
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
jkasprza Jan 7, 2026
8c9e855
Rename counter based events
jkasprza Jan 7, 2026
d377c45
Adjust event set_profiling_duration
jkasprza Jan 7, 2026
ca7f6cf
Remove unnecessary engine parameter
jkasprza Jan 7, 2026
674556c
Update L0 OneDNN submodule
jkasprza Jan 8, 2026
863ea10
Update onednn L0 submodule and fix profiling
jkasprza Jan 8, 2026
0cdb6f5
Restore cpu checks in primitive_base
jkasprza Jan 8, 2026
86c81b4
Reuse check_allocatable for ocl and l0
jkasprza Jan 8, 2026
8621d68
Use asserts instead of throw
jkasprza Jan 8, 2026
8d0b5bf
Fix get_user_context
jkasprza Jan 9, 2026
d64e6b6
Move common code to engine class
jkasprza Jan 9, 2026
5ccd52c
Rename L0 timeout
jkasprza Jan 9, 2026
786fbb6
Fix test build errors
jkasprza Jan 12, 2026
4690578
Update L0 OneDNN submodule
jkasprza Jan 13, 2026
206a580
Add mutex to l0 event factory
jkasprza Jan 13, 2026
d71ae12
Adjust ze_events naming
jkasprza Jan 13, 2026
0a48846
Remove unnecessary comment
jkasprza Jan 14, 2026
ae5afd8
Improve engine and runtime type selection
jkasprza Jan 14, 2026
d964b51
Fix typo in assert
jkasprza Jan 16, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/linux_riscv.yml
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ jobs:
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/telemetry
git submodule update --init -- ${OPENVINO_REPO}/src/plugins/intel_cpu
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/flatbuffers/flatbuffers
git submodule update --init -- ${OPENVINO_REPO}/thirdparty/level_zero
popd

#
Expand Down
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -93,3 +93,6 @@
[submodule "src/plugins/intel_cpu/thirdparty/xbyak_riscv"]
path = src/plugins/intel_cpu/thirdparty/xbyak_riscv
url = https://github.com/herumi/xbyak_riscv.git
[submodule "src/plugins/intel_gpu/thirdparty/l0_onednn_gpu"]
path = src/plugins/intel_gpu/thirdparty/l0_onednn_gpu
url = https://github.com/jkasprza/oneDNN.git
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for the record: I guess we will have single copy of onednn_gpu. Please update that before it is merged.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Need to wait for uxlfoundation/oneDNN#4499 to be merged first.

6 changes: 6 additions & 0 deletions cmake/features.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,12 @@ else()
set(ENABLE_ONEDNN_FOR_GPU_DEFAULT ON)
endif()

# Set default GPU runtime to OCL
set(OV_GPU_DEFAULT_RT "OCL")
if (ENABLE_INTEL_GPU)
ov_option_enum (GPU_RT_TYPE "Type of GPU runtime. Supported value: OCL and L0" ${OV_GPU_DEFAULT_RT} ALLOWED_VALUES L0 OCL)
endif()

ov_dependent_option (ENABLE_ONEDNN_FOR_GPU "Enable oneDNN with GPU support" ${ENABLE_ONEDNN_FOR_GPU_DEFAULT} "ENABLE_INTEL_GPU" OFF)

ov_dependent_option (ENABLE_INTEL_NPU "NPU plugin for OpenVINO runtime" ON "X86_64;WIN32 OR LINUX" OFF)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ using gpu_handle_param = void*;
enum class ContextType {
OCL = 0, //!< Pure OpenCL context
VA_SHARED = 1, //!< Context shared with a video decoding device
ZE = 2, //!< Pure Level0 context
};

/** @cond INTERNAL */
Expand All @@ -33,6 +34,8 @@ inline std::ostream& operator<<(std::ostream& os, const ContextType& context_typ
return os << "OCL";
case ContextType::VA_SHARED:
return os << "VA_SHARED";
case ContextType::ZE:
return os << "ZE";
default:
OPENVINO_THROW("Unsupported context type");
}
Expand All @@ -43,6 +46,8 @@ inline std::istream& operator>>(std::istream& is, ContextType& context_type) {
is >> str;
if (str == "OCL") {
context_type = ContextType::OCL;
} else if (str == "ZE") {
context_type = ContextType::ZE;
} else if (str == "VA_SHARED") {
context_type = ContextType::VA_SHARED;
} else {
Expand Down
7 changes: 7 additions & 0 deletions src/plugins/intel_gpu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ endif()

set (TARGET_NAME "openvino_intel_gpu_plugin")

include(${CMAKE_CURRENT_LIST_DIR}/cmake/utils.cmake)

if(OV_COMPILER_IS_INTEL_LLVM)
# For windows we need to disable warning as error option to make FindSYCL.cmake work
if (WIN32)
Expand Down Expand Up @@ -36,6 +38,10 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zc:__cplusplus")
endif()

if(WIN32)
add_definitions(-DNOMINMAX)
endif()

if(ENABLE_GPU_DEBUG_CAPS)
add_definitions(-DGPU_DEBUG_CONFIG=1)
add_definitions(-DENABLE_DEBUG_CAPS=1)
Expand Down Expand Up @@ -77,6 +83,7 @@ target_include_directories(${TARGET_NAME} PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/include/)

ov_set_threading_interface_for(${TARGET_NAME})
ov_gpu_set_runtime_interface_for(${TARGET_NAME})

set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})

Expand Down
15 changes: 15 additions & 0 deletions src/plugins/intel_gpu/cmake/utils.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

function(ov_gpu_set_runtime_interface_for TARGET_NAME)
if(GPU_RT_TYPE STREQUAL "L0")
target_compile_definitions(${TARGET_NAME} PRIVATE OV_GPU_WITH_ZE_RT=1)
target_link_libraries(${TARGET_NAME} PRIVATE LevelZero::LevelZero)
elseif(GPU_RT_TYPE STREQUAL "OCL")
target_compile_definitions(${TARGET_NAME} PRIVATE OV_GPU_WITH_OCL_RT=1)
target_link_libraries(${TARGET_NAME} PRIVATE OpenCL::OpenCL)
else()
message(FATAL_ERROR "Invalid GPU runtime type: `${GPU_RT_TYPE}` Only `L0` and `OCL` are supported")
endif()
endfunction()
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
# define NOMINMAX
#endif

#include "intel_gpu/runtime/engine_configuration.hpp"
#include "openvino/runtime/intel_gpu/remote_properties.hpp"
#include "openvino/runtime/iremote_context.hpp"

Expand Down Expand Up @@ -93,7 +94,11 @@ class RemoteContextImpl : public ov::IRemoteContext {
ov::intel_gpu::gpu_handle_param m_va_display = nullptr;
ov::intel_gpu::gpu_handle_param m_external_queue = nullptr;

#ifdef OV_GPU_WITH_ZE_RT
ContextType m_type = ContextType::ZE;
#else
ContextType m_type = ContextType::OCL;
#endif
std::string m_device_name = "";
static const size_t cache_capacity = 100;
cldnn::LruCache<size_t, cldnn::memory::ptr> m_memory_cache = cldnn::LruCache<size_t, cldnn::memory::ptr>(cache_capacity);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,15 @@
# define NOMINMAX
#endif


// Do not include DirectX / VA wrappers when running with L0 runtime as they depend on OCL
#ifndef OV_GPU_WITH_ZE_RT
#ifdef _WIN32
# include <openvino/runtime/intel_gpu/ocl/dx.hpp>
#else
# include <openvino/runtime/intel_gpu/ocl/va.hpp>
#endif
#endif
#include "openvino/runtime/iremote_tensor.hpp"

#include "intel_gpu/runtime/memory_caps.hpp"
Expand Down
24 changes: 24 additions & 0 deletions src/plugins/intel_gpu/include/intel_gpu/runtime/device.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,28 @@ struct device {
virtual ~device() = default;
};

// The priority return by this function impacts the order of devices reported by GPU plugin and devices enumeration
// Lower priority value means lower device ID
// Current behavior is: Intel iGPU < Intel dGPU < any other GPU
// Order of Intel dGPUs is undefined and depends on the OCL impl
// Order of other vendor GPUs is undefined and depends on the OCL impl
inline size_t get_device_priority(const cldnn::device_info& info) {
if (info.vendor_id == cldnn::INTEL_VENDOR_ID && info.dev_type == cldnn::device_type::integrated_gpu) {
return 0;
} else if (info.vendor_id == cldnn::INTEL_VENDOR_ID) {
return 1;
} else {
return std::numeric_limits<size_t>::max();
}
}

inline std::vector<device::ptr> sort_devices(const std::vector<device::ptr>& devices_list) {
std::vector<device::ptr> sorted_list = devices_list;
std::stable_sort(sorted_list.begin(), sorted_list.end(), [](device::ptr d1, device::ptr d2) {
return get_device_priority(d1->get_info()) < get_device_priority(d2->get_info());
});

return sorted_list;
}

} // namespace cldnn
50 changes: 46 additions & 4 deletions src/plugins/intel_gpu/include/intel_gpu/runtime/device_info.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,13 @@ struct gfx_version {
<= std::tie(r.major, r.minor, r.revision); // same order
}

bool operator==(const gfx_version& other) {
bool operator==(const gfx_version& other) const {
return major == other.major &&
minor == other.minor &&
revision == other.revision;
}

bool operator!=(const gfx_version& other) {
bool operator!=(const gfx_version& other) const {
return !(*this == other);
}
};
Expand All @@ -73,14 +73,14 @@ struct pci_bus_info {
uint32_t pci_device = 0;
uint32_t pci_function = 0;

bool operator==(const pci_bus_info& other) {
bool operator==(const pci_bus_info& other) const {
return pci_domain == other.pci_domain &&
pci_bus == other.pci_bus &&
pci_device == other.pci_device &&
pci_function == other.pci_function;
}

bool operator!=(const pci_bus_info& other) {
bool operator!=(const pci_bus_info& other) const {
return !(*this == other);
}
};
Expand Down Expand Up @@ -116,9 +116,14 @@ struct device_info {
bool supports_imad; ///< Does engine support int8 mad.
bool supports_immad; ///< Does engine support int8 multi mad.

bool supports_mutable_command_list; ///< [L0] Does the target runtime/device support mutable command list feature

bool supports_usm; ///< Does engine support unified shared memory.
bool has_separate_cache; ///< Does the target hardware has separate cache for usm_device and usm_host

bool supports_cp_offload; ///< [L0] Does the command queue support copy offload
bool supports_counter_based_events; ///< [L0] Does the target runtime support counter based events

std::vector<size_t> supported_simd_sizes; ///< List of SIMD sizes supported by current device and compiler

uint32_t vendor_id; ///< Vendor ID
Expand All @@ -140,8 +145,45 @@ struct device_info {

pci_bus_info pci_info; ///< PCI bus information for the device

uint64_t timer_resolution; ///< [L0] Resolution of device timer used for profiling in cycles/sec
uint32_t kernel_timestamp_valid_bits; ///< [L0] Number of valid bits in the kernel timestamp values
uint32_t compute_queue_group_ordinal; ///< [L0] Ordinal of the command queue group to use for compute
uint32_t device_memory_ordinal; ///< [L0] Ordinal of the selected global device memory

ov::device::UUID uuid; ///< UUID of the gpu device
ov::device::LUID luid; ///< LUID of the gpu device

inline bool is_same_device(const device_info &other) const {
// Relying solely on the UUID is not reliable in all the cases (particularly on legacy platforms),
// where the UUID may be missing or incorrectly generated
// Therefore, we also validate other attributes
if (uuid.uuid != other.uuid.uuid)
return false;

if (pci_info != other.pci_info)
return false;

if (sub_device_idx != other.sub_device_idx)
return false;

if (vendor_id != other.vendor_id ||
dev_name != other.dev_name ||
driver_version != other.driver_version)
return false;

if (dev_type != other.dev_type ||
gfx_ver != other.gfx_ver ||
arch != other.arch)
return false;

if (ip_version != other.ip_version || device_id != other.device_id)
return false;

if (execution_units_count != other.execution_units_count || max_global_mem_size != other.max_global_mem_size)
return false;

return true;
}
};

/// @}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@ namespace cldnn {
struct device_query {
public:
static int device_id;
/// @brief Get default engine type
static engine_types get_default_engine_type();

/// @brief Get default runtime type
static runtime_types get_default_runtime_type();

explicit device_query(engine_types engine_type,
runtime_types runtime_type,
void* user_context = nullptr,
Expand All @@ -25,6 +31,13 @@ struct device_query {
int target_tile_id = -1,
bool initialize_devices = false);

/// @brief Create device query with default values for engine type and runtime type
explicit device_query(void* user_context = nullptr,
void* user_device = nullptr,
int ctx_device_id = 0,
int target_tile_id = -1,
bool initialize_devices = false);

std::map<std::string, device::ptr> get_available_devices() const {
return _available_devices;
}
Expand Down
19 changes: 12 additions & 7 deletions src/plugins/intel_gpu/include/intel_gpu/runtime/engine.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "layout.hpp"
#include "execution_config.hpp"
#include "engine_configuration.hpp"
#include "kernel_builder.hpp"

#include <memory>
#include <set>
Expand Down Expand Up @@ -83,7 +84,7 @@ class engine {
/// Checks whether two memory objects represents the same physical memory
virtual bool is_the_same_buffer(const memory& mem1, const memory& mem2) = 0;

virtual bool check_allocatable(const layout& layout, allocation_type type) = 0;
virtual bool check_allocatable(const layout& layout, allocation_type type);

/// Returns basic allocation type which will be used as a fallback when allocation type is not specified or device doesn't support some features.
virtual allocation_type get_default_allocation_type() const = 0;
Expand Down Expand Up @@ -141,7 +142,9 @@ class engine {
virtual stream_ptr create_stream(const ExecutionConfig& config, void *handle) const = 0;

/// Returns service stream which can be used during program build and optimizations
virtual stream& get_service_stream() const = 0;
virtual stream& get_service_stream() const;

virtual std::shared_ptr<kernel_builder> create_kernel_builder() const = 0;

virtual allocation_type detect_usm_allocation_type(const void* memory) const = 0;

Expand All @@ -154,13 +157,9 @@ class engine {
virtual void create_onednn_engine(const ExecutionConfig& config) = 0;

/// Returns onednn engine object which shares device and context with current engine
virtual dnnl::engine& get_onednn_engine() const = 0;
virtual dnnl::engine& get_onednn_engine() const;
#endif

/// This method is intended to create kernel handle for current engine from handle from arbitrary engine
/// For instance, source kernel can be compiled using ocl engine, and then we can build L0 kernel object based on that
virtual kernel::ptr prepare_kernel(const kernel::ptr kernel) const = 0;

/// Factory method which creates engine object with impl configured by @p engine_type
/// @param engine_type requested engine type
/// @param runtime_type requested execution runtime for the engine. @note some runtime/engine types configurations might be unsupported
Expand All @@ -178,6 +177,12 @@ class engine {
engine(const device::ptr device);
const device::ptr _device;
bool enable_large_allocations = false;
std::unique_ptr<stream> _service_stream;

#ifdef ENABLE_ONEDNN_FOR_GPU
std::mutex onednn_mutex;
std::shared_ptr<dnnl::engine> _onednn_engine;
#endif

std::array<std::atomic<uint64_t>, static_cast<size_t>(allocation_type::max_value)> _memory_usage_data{};
std::array<std::atomic<uint64_t>, static_cast<size_t>(allocation_type::max_value)> _peak_memory_usage_data{};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,22 @@

#pragma once

#include <string>
#include <ostream>

namespace cldnn {

/// @brief Defines available engine types
enum class engine_types : int32_t {
ocl,
sycl
sycl,
ze
};

inline std::ostream& operator<<(std::ostream& os, const engine_types& type) {
switch (type) {
case engine_types::ocl: os << "ocl"; break;
case engine_types::sycl: os << "sycl"; break;
case engine_types::ze: os << "ze"; break;
default: os << "unknown"; break;
}

Expand All @@ -27,11 +29,13 @@ inline std::ostream& operator<<(std::ostream& os, const engine_types& type) {
/// @brief Defines available runtime types
enum class runtime_types : int32_t {
ocl,
ze,
};

inline std::ostream& operator<<(std::ostream& os, const runtime_types& type) {
switch (type) {
case runtime_types::ocl: os << "ocl"; break;
case runtime_types::ze: os << "ze"; break;
default: os << "unknown"; break;
}

Expand Down
10 changes: 10 additions & 0 deletions src/plugins/intel_gpu/include/intel_gpu/runtime/event.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,16 @@ struct event {
_profiling_captured = false;
_profiling_info.clear();
}
/// @brief Set event profiling data instead of retrieving it from event object
/// @param duration_nsec duration in nanoseconds
void set_profiling_duration(uint64_t duration_nsec) {
auto stage = instrumentation::profiling_stage::executing;
auto duration = std::chrono::nanoseconds(duration_nsec);
auto period = std::make_shared<instrumentation::profiling_period_basic>(duration);

_profiling_info.push_back({ stage, period });
_profiling_captured = true;
}

// returns true if handler has been successfully added
bool add_event_handler(event_handler handler, void* data);
Expand Down
Loading