diff --git a/samples/cpp/beam_search_causal_lm/CMakeLists.txt b/samples/cpp/beam_search_causal_lm/CMakeLists.txt index 9ea4730528..9bf1a8aac8 100644 --- a/samples/cpp/beam_search_causal_lm/CMakeLists.txt +++ b/samples/cpp/beam_search_causal_lm/CMakeLists.txt @@ -1,9 +1,11 @@ # Copyright (C) 2023-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -find_package(OpenVINOGenAI REQUIRED PATHS - "${CMAKE_BINARY_DIR}" # Reuse the package from the build. - ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. +find_package(OpenVINOGenAI REQUIRED + HINTS + "${CMAKE_BINARY_DIR}" # Reuse the package from the build. + ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. + NO_CMAKE_FIND_ROOT_PATH ) add_executable(beam_search_causal_lm beam_search_causal_lm.cpp) diff --git a/samples/cpp/benchmark_genai/CMakeLists.txt b/samples/cpp/benchmark_genai/CMakeLists.txt index 3a05c37d62..902a05eee6 100644 --- a/samples/cpp/benchmark_genai/CMakeLists.txt +++ b/samples/cpp/benchmark_genai/CMakeLists.txt @@ -1,9 +1,11 @@ # Copyright (C) 2023-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -find_package(OpenVINOGenAI REQUIRED PATHS - "${CMAKE_BINARY_DIR}" # Reuse the package from the build. - ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. +find_package(OpenVINOGenAI REQUIRED + PATHS + "${CMAKE_BINARY_DIR}" # Reuse the package from the build. + ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. + NO_CMAKE_FIND_ROOT_PATH ) include(FetchContent) diff --git a/samples/cpp/chat_sample/CMakeLists.txt b/samples/cpp/chat_sample/CMakeLists.txt index 901f003d4c..69578dc86c 100644 --- a/samples/cpp/chat_sample/CMakeLists.txt +++ b/samples/cpp/chat_sample/CMakeLists.txt @@ -1,9 +1,11 @@ # Copyright (C) 2023-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -find_package(OpenVINOGenAI REQUIRED PATHS - "${CMAKE_BINARY_DIR}" # Reuse the package from the build. - ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. +find_package(OpenVINOGenAI REQUIRED + PATHS + "${CMAKE_BINARY_DIR}" # Reuse the package from the build. + ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. + NO_CMAKE_FIND_ROOT_PATH ) add_executable(chat_sample chat_sample.cpp) diff --git a/samples/cpp/greedy_causal_lm/CMakeLists.txt b/samples/cpp/greedy_causal_lm/CMakeLists.txt index 409733bbc6..ff5151676f 100644 --- a/samples/cpp/greedy_causal_lm/CMakeLists.txt +++ b/samples/cpp/greedy_causal_lm/CMakeLists.txt @@ -1,9 +1,11 @@ # Copyright (C) 2023-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -find_package(OpenVINOGenAI REQUIRED PATHS - "${CMAKE_BINARY_DIR}" # Reuse the package from the build. - ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. +find_package(OpenVINOGenAI REQUIRED + PATHS + "${CMAKE_BINARY_DIR}" # Reuse the package from the build. + ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. + NO_CMAKE_FIND_ROOT_PATH ) add_executable(greedy_causal_lm greedy_causal_lm.cpp) diff --git a/samples/cpp/multinomial_causal_lm/CMakeLists.txt b/samples/cpp/multinomial_causal_lm/CMakeLists.txt index 01b3bb3bb4..83b2335431 100644 --- a/samples/cpp/multinomial_causal_lm/CMakeLists.txt +++ b/samples/cpp/multinomial_causal_lm/CMakeLists.txt @@ -1,9 +1,11 @@ # Copyright (C) 2023-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -find_package(OpenVINOGenAI REQUIRED PATHS - "${CMAKE_BINARY_DIR}" # Reuse the package from the build. - ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. +find_package(OpenVINOGenAI REQUIRED + PATHS + "${CMAKE_BINARY_DIR}" # Reuse the package from the build. + ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. + NO_CMAKE_FIND_ROOT_PATH ) add_executable(multinomial_causal_lm multinomial_causal_lm.cpp) diff --git a/samples/cpp/prompt_lookup_decoding_lm/CMakeLists.txt b/samples/cpp/prompt_lookup_decoding_lm/CMakeLists.txt index 9b7a15131d..c899c6e47b 100644 --- a/samples/cpp/prompt_lookup_decoding_lm/CMakeLists.txt +++ b/samples/cpp/prompt_lookup_decoding_lm/CMakeLists.txt @@ -3,9 +3,11 @@ find_package(OpenVINO REQUIRED COMPONENTS Runtime Threading) -find_package(OpenVINOGenAI REQUIRED PATHS - "${CMAKE_BINARY_DIR}" # Reuse the package from the build. - ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. +find_package(OpenVINOGenAI REQUIRED + PATHS + "${CMAKE_BINARY_DIR}" # Reuse the package from the build. + ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. + NO_CMAKE_FIND_ROOT_PATH ) add_executable(prompt_lookup_decoding_lm prompt_lookup_decoding_lm.cpp) diff --git a/samples/cpp/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.cpp b/samples/cpp/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.cpp index 3419f3221a..5e372a3f09 100644 --- a/samples/cpp/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.cpp +++ b/samples/cpp/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.cpp @@ -238,7 +238,7 @@ int main(int argc, char* argv[]) try { ov::Tensor position_ids = model.get_tensor("position_ids"); position_ids.set_shape(input_ids.get_shape()); std::iota(position_ids.data(), position_ids.data() + position_ids.get_size(), 0); - uint64_t seq_len = input_ids.get_shape()[1]; + size_t seq_len = input_ids.get_shape()[1]; // set beam_idx for stateful model: no beam search is used and BATCH_SIZE = 1 model.get_tensor("beam_idx").set_shape({BATCH_SIZE}); diff --git a/samples/cpp/speculative_decoding_lm/CMakeLists.txt b/samples/cpp/speculative_decoding_lm/CMakeLists.txt index 1a9b02f1b2..078ac8bb52 100644 --- a/samples/cpp/speculative_decoding_lm/CMakeLists.txt +++ b/samples/cpp/speculative_decoding_lm/CMakeLists.txt @@ -3,9 +3,11 @@ find_package(OpenVINO REQUIRED COMPONENTS Runtime Threading) -find_package(OpenVINOGenAI REQUIRED PATHS - "${CMAKE_BINARY_DIR}" # Reuse the package from the build. - ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. +find_package(OpenVINOGenAI REQUIRED + PATHS + "${CMAKE_BINARY_DIR}" # Reuse the package from the build. + ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. + NO_CMAKE_FIND_ROOT_PATH ) add_executable(speculative_decoding_lm speculative_decoding_lm.cpp) diff --git a/samples/cpp/speculative_decoding_lm/speculative_decoding_lm.cpp b/samples/cpp/speculative_decoding_lm/speculative_decoding_lm.cpp index de2d2f8837..f26cb6c7c4 100644 --- a/samples/cpp/speculative_decoding_lm/speculative_decoding_lm.cpp +++ b/samples/cpp/speculative_decoding_lm/speculative_decoding_lm.cpp @@ -272,7 +272,7 @@ int main(int argc, char* argv[]) try { ov::InferRequest draft_model = core.compile_model(ov_draft_model, "CPU").create_infer_request(); - uint64_t seq_len = input_ids.get_shape()[1]; + size_t seq_len = input_ids.get_shape()[1]; // main model (which is bigger, more accurate but slower) std::shared_ptr ov_main_model = core.read_model(std::string{argv[2]} + "/openvino_model.xml"); diff --git a/src/cpp/CMakeLists.txt b/src/cpp/CMakeLists.txt index 56e19fbd9f..626c4a7903 100644 --- a/src/cpp/CMakeLists.txt +++ b/src/cpp/CMakeLists.txt @@ -31,6 +31,9 @@ function(ov_genai_build_jinja2cpp) set(JINJA2CPP_STRICT_WARNINGS OFF CACHE BOOL "") set(JINJA2CPP_PIC ON CACHE BOOL "") + # TMP WA: + set(RapidJSON_DIR "${CMAKE_BINARY_DIR}/_deps/rapidjson-build") + # options for Jinja2Cpp dependencies option(RAPIDJSON_BUILD_DOC "Build rapidjson documentation." OFF)