diff --git a/.github/workflows/check_urls.yml b/.github/workflows/check_urls.yml index 1add8ee698..2ba0d8279a 100644 --- a/.github/workflows/check_urls.yml +++ b/.github/workflows/check_urls.yml @@ -46,6 +46,7 @@ jobs: --verbose --no-progress --root-dir=${{ github.workspace }} + --exclude='${{ github.workspace }}/doc/website/overrides/' --remap='(file://.*)/holoscan-sdk/(.*) https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/$2' ${{ steps.changed-files.outputs.changed_files_list }} fail: true @@ -63,6 +64,7 @@ jobs: --verbose --no-progress --root-dir=${{ github.workspace }} + --exclude='${{ github.workspace }}/doc/website/overrides/' --remap='(file://.*)/holoscan-sdk/(.*) https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/$2' '${{ github.workspace }}/**/*.md' '${{ github.workspace }}/**/*.html' diff --git a/README.md b/README.md index fd79600559..177c1c9e5a 100644 --- a/README.md +++ b/README.md @@ -110,6 +110,8 @@ Run the following command to build the development container for a given project ./holohub build-container [project_name] ``` +For Holoscan SDK v3.6.1 and later, you can specify the CUDA version using the `--cuda` option (e.g., `--cuda 12` or `--cuda 13`), which automatically selects the appropriate container tag and passes the CUDA version as a build argument. + Check to verify that the image is created: ```sh diff --git a/applications/CMakeLists.txt b/applications/CMakeLists.txt index 2d0469b388..a1f5bbeab2 100644 --- a/applications/CMakeLists.txt +++ b/applications/CMakeLists.txt @@ -38,6 +38,10 @@ add_holohub_application(deltacast_transmitter DEPENDS OPERATORS deltacast_videomaster ) +add_holohub_application(deltacast_receiver DEPENDS + OPERATORS deltacast_videomaster + ) + add_holohub_application(depth_anything_v2) add_subdirectory(distributed) diff --git a/applications/aja_video_capture/cpp/metadata.json b/applications/aja_video_capture/cpp/metadata.json index ab4a271e9d..c212d9cb1f 100644 --- a/applications/aja_video_capture/cpp/metadata.json +++ b/applications/aja_video_capture/cpp/metadata.json @@ -25,7 +25,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["Computer Vision and Perception", "Video", "format conversion", "Streaming", "AJA"], + "tags": ["Streaming", "Video", "format conversion", "AJA"], "ranking": 0, "requirements": { }, diff --git a/applications/aja_video_capture/python/metadata.json b/applications/aja_video_capture/python/metadata.json index 8174498a4b..05230eaab5 100644 --- a/applications/aja_video_capture/python/metadata.json +++ b/applications/aja_video_capture/python/metadata.json @@ -24,7 +24,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["Computer Vision and Perception", "Video", "format conversion", "Streaming", "AJA"], + "tags": ["Streaming", "Video", "format conversion", "AJA"], "ranking": 0, "requirements": { }, diff --git a/applications/async_buffer_deadline/metadata.json b/applications/async_buffer_deadline/metadata.json index e2bebe5fdc..5e84b1ca82 100644 --- a/applications/async_buffer_deadline/metadata.json +++ b/applications/async_buffer_deadline/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "An Example of Async Lock-free Buffer with SCHED_DEADLINE", + "name": "Async Lock-free Buffer", "description": "A simple application demonstrating the impact of async lock-freebuffer communication between operators with earliest deadline first (SCHED_DEADLINE) scheduling policy of Linux", "authors": [ { @@ -24,7 +24,7 @@ "aarch64" ], "tags": [ - "ping", + "Scheduler", "async_buffer", "deadline_scheduling", "event_based_scheduler" diff --git a/applications/cuda_quantum/metadata.json b/applications/cuda_quantum/metadata.json index c8d07c3803..84896dcfd9 100644 --- a/applications/cuda_quantum/metadata.json +++ b/applications/cuda_quantum/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Tools And Other Specialized Applications", "Quantum Computing"], + "tags": ["Quantum Computing"], "ranking": 4, "requirements": { "cuda_quantum": "^0.4.0" diff --git a/applications/dds/dds_video/metadata.json b/applications/dds/dds_video/metadata.json index f10464a8d7..ba5b0965a1 100644 --- a/applications/dds/dds_video/metadata.json +++ b/applications/dds/dds_video/metadata.json @@ -1,6 +1,7 @@ { "application": { - "name": "DDS Video: Real-time Video Streaming with RTI Connext", + "name": "Real-time Video Streaming with DDS", + "description": "Real-time video streaming with RTI Connext", "authors": [ { "name": "Ian Stewart", diff --git a/applications/deltacast_receiver/CMakeLists.txt b/applications/deltacast_receiver/CMakeLists.txt new file mode 100644 index 0000000000..043280b800 --- /dev/null +++ b/applications/deltacast_receiver/CMakeLists.txt @@ -0,0 +1,19 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 DELTACAST.TV. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.20) +project(deltacast_receiver_apps LANGUAGES NONE) + +add_subdirectory(cpp) diff --git a/applications/deltacast_receiver/README.md b/applications/deltacast_receiver/README.md new file mode 100644 index 0000000000..f403f64928 --- /dev/null +++ b/applications/deltacast_receiver/README.md @@ -0,0 +1,25 @@ +# Deltacast Videomaster Receiver + +This application demonstrates the use of videomaster_source to receive and display video streams from a Deltacast capture card using Holoviz for visualization. + +## Requirements + +This application uses the DELTACAST.TV capture card for input stream. Contact [DELTACAST.TV](https://www.deltacast.tv/) for more details on how to access the SDK and setup your environment. + +## Build Instructions + +See instructions from the top level README on how to build this application. +Note that this application requires to provide the VideoMaster_SDK_DIR if it is not located in a default location on the system. +This can be done with the following command, from the top level Holohub source directory: + +```bash +./holohub build --local deltacast_receiver --configure-args="-DVideoMaster_SDK_DIR=" +``` + +## Run Instructions + +From the build directory, run the command: + +```bash +./applications/deltacast_receiver/cpp/deltacast_receiver +``` diff --git a/applications/deltacast_receiver/cpp/CMakeLists.txt b/applications/deltacast_receiver/cpp/CMakeLists.txt new file mode 100644 index 0000000000..d5e4b7650e --- /dev/null +++ b/applications/deltacast_receiver/cpp/CMakeLists.txt @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 DELTACAST.TV. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +cmake_minimum_required(VERSION 3.20) +project(deltacast_receiver CXX) + +find_package(holoscan 1.0 REQUIRED CONFIG + PATHS "/opt/nvidia/holoscan" "/workspace/holoscan-sdk/install") + +# CPP Application +add_executable(deltacast_receiver + main.cpp +) + +target_link_libraries(deltacast_receiver + PRIVATE + holoscan::core + holoscan::ops::format_converter + holoscan::ops::holoviz + holoscan::videomaster +) + +# Copy config file +add_custom_target(deltacast_receiver_yaml + COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/deltacast_receiver.yaml" ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS "deltacast_receiver.yaml" + BYPRODUCTS "deltacast_receiver.yaml" +) +add_dependencies(deltacast_receiver deltacast_receiver_yaml) diff --git a/applications/deltacast_receiver/cpp/deltacast_receiver.yaml b/applications/deltacast_receiver/cpp/deltacast_receiver.yaml new file mode 100644 index 0000000000..0cc7efa7d8 --- /dev/null +++ b/applications/deltacast_receiver/cpp/deltacast_receiver.yaml @@ -0,0 +1,48 @@ +%YAML 1.2 +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 DELTACAST.TV. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +extensions: + - lib/gxf_extensions/libgxf_videomaster.so + +deltacast: + width: 1920 + height: 1080 + progressive: true + framerate: 30 + board: 0 + input: 0 + rdma: false + +format_converter: + in_dtype: "rgb888" + alpha_value: 255 + out_dtype: "rgba8888" + out_channel_order: [2,1,0,3] + +drop_alpha_channel_converter: + in_dtype: "rgba8888" + out_dtype: "rgb888" + resize_height: 270 + resize_width: 480 + +holoviz: + tensors: + - name: "" + type: color + opacity: 1.0 + priority: 0 + width: 480 + height: 270 \ No newline at end of file diff --git a/applications/deltacast_receiver/cpp/main.cpp b/applications/deltacast_receiver/cpp/main.cpp new file mode 100644 index 0000000000..d9247d7dc1 --- /dev/null +++ b/applications/deltacast_receiver/cpp/main.cpp @@ -0,0 +1,130 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022-2025 DELTACAST.TV. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include + +class App : public holoscan::Application { + public: + /** Compose function */ + void compose() override { + using namespace holoscan; + + uint32_t width = from_config("deltacast.width").as(); + uint32_t height = from_config("deltacast.height").as(); + bool use_rdma = from_config("deltacast.rdma").as(); + uint64_t source_block_size = width * height * 4 * 4; + uint64_t source_num_blocks = use_rdma ? 3 : 4; + + // Create the VideoMaster source operator (receiver) with explicit arguments + auto source = make_operator( + "deltacast_source", + Arg("rdma") = use_rdma, + Arg("board") = from_config("deltacast.board").as(), + Arg("input") = from_config("deltacast.input").as(), + Arg("width") = width, + Arg("height") = height, + Arg("progressive") = from_config("deltacast.progressive").as(), + Arg("framerate") = from_config("deltacast.framerate").as(), + Arg("pool") = make_resource("pool")); + + // Format converter to prepare for visualization + auto format_converter = + make_operator("format_converter", + from_config("format_converter"), + Arg("pool") = make_resource( + "converter_pool", 1, source_block_size, + source_num_blocks)); + + auto drop_alpha_channel_converter = make_operator( + "drop_alpha_channel_converter", + from_config("drop_alpha_channel_converter"), + Arg("pool") = + make_resource("pool", 1, source_block_size, source_num_blocks)); + auto visualizer = make_operator( + "holoviz", + from_config("holoviz"), + Arg("allocator") = make_resource("holoviz_allocator")); + + // Connect the pipeline: source -> format_converter -> holoviz + add_flow(source, drop_alpha_channel_converter); + add_flow(drop_alpha_channel_converter, format_converter); + add_flow(format_converter, visualizer, {{"", "receivers"}}); + } +}; + +/** Helper function to parse the command line arguments */ +bool parse_arguments(int argc, char** argv, std::string& config_name) { + static struct option long_options[] = { + {"config", required_argument, 0, 'c' }, + {"help", no_argument, 0, 'h' }, + {0, 0, 0, 0 } + }; + + while (int c = getopt_long(argc, argv, "c:h", + long_options, NULL)) { + if (c == -1 || c == '?') break; + + switch (c) { + case 'c': + config_name = optarg; + break; + case 'h': + std::cout << "Usage: " << argv[0] << " [options] [config_file]\n"; + std::cout << "Options:\n"; + std::cout << " -c, --config Configuration file path\n"; + std::cout << " -h, --help Show this help message\n"; + std::cout << "\nExample:\n"; + std::cout << " " << argv[0] << " deltacast_receiver.yaml\n"; + return false; + default: + std::cout << "Unknown arguments returned: " << c << std::endl; + return false; + } + } + + if (optind < argc) { + config_name = argv[optind++]; + } + return true; +} + +int main(int argc, char** argv) { + auto app = holoscan::make_application(); + + // Parse the arguments + std::string config_name = ""; + if (!parse_arguments(argc, argv, config_name)) { + return 1; + } + + if (config_name != "") { + app->config(config_name); + } else { + auto config_path = std::filesystem::canonical(argv[0]).parent_path(); + config_path += "/deltacast_receiver.yaml"; + app->config(config_path); + } + + app->run(); + + return 0; +} diff --git a/applications/deltacast_receiver/cpp/metadata.json b/applications/deltacast_receiver/cpp/metadata.json new file mode 100644 index 0000000000..58c04af04f --- /dev/null +++ b/applications/deltacast_receiver/cpp/metadata.json @@ -0,0 +1,43 @@ +{ + "application": { + "name": "Deltacast Videomaster Receiver", + "authors": [ + { + "name": "Laurent Radoux", + "affiliation": "DELTACAST" + }, + { + "name": "Pierre PERICK", + "affiliation": "DELTACAST" + } + ], + "language": "C++", + "version": "1.0", + "changelog": { + "1.0": "Initial Release - Simplified receiver application for video display without AI processing" + }, + "holoscan_sdk": { + "minimum_required_version": "0.5.0", + "tested_versions": [ + "3.6.0" + ] + }, + "videomaster_sdk": { + "minimum_required_version": "6.26.0", + "tested_versions": [ + "6.32.0" + ] + }, + "platforms": [ + "x86_64", + "aarch64" + ], + "tags": ["Healthcare AI", "Video", "Deltacast", "Receiver", "Display", "Holoviz", "RDMA", "GPUDirect"], + "ranking": 2, + "requirements": {}, + "run": { + "command": "/deltacast_receiver", + "workdir": "holohub_bin" + } + } +} diff --git a/applications/deltacast_receiver/python/CMakeLists.txt b/applications/deltacast_receiver/python/CMakeLists.txt new file mode 100644 index 0000000000..759082f9be --- /dev/null +++ b/applications/deltacast_receiver/python/CMakeLists.txt @@ -0,0 +1,44 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 DELTACAST.TV. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Add testing +if(BUILD_TESTING) + # To get the environment path + find_package(holoscan 1.0 REQUIRED CONFIG PATHS "/opt/nvidia/holoscan" "/workspace/holoscan-sdk/install") + + # Add test + add_test(NAME deltacast_receiver_python_test + COMMAND python3 ${CMAKE_CURRENT_SOURCE_DIR}/deltacast_receiver.py + --config ${CMAKE_CURRENT_SOURCE_DIR}/deltacast_receiver.yaml + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + + set_property(TEST deltacast_receiver_python_test PROPERTY ENVIRONMENT + "PYTHONPATH=${GXF_LIB_DIR}/../python/lib:${CMAKE_BINARY_DIR}/python/lib") + + set_tests_properties(deltacast_receiver_python_test PROPERTIES + PASS_REGULAR_EXPRESSION "Running the graph;" + FAIL_REGULAR_EXPRESSION "[^a-z]Error;ERROR;Failed") +endif() + +# Install application and dependencies into the install/ directory for packaging +install( + FILES deltacast_receiver.py + DESTINATION bin/deltacast_receiver/python +) + +install( + FILES deltacast_receiver.yaml + DESTINATION bin/deltacast_receiver/python +) \ No newline at end of file diff --git a/applications/deltacast_receiver/python/deltacast_receiver.py b/applications/deltacast_receiver/python/deltacast_receiver.py new file mode 100644 index 0000000000..be533b9c3f --- /dev/null +++ b/applications/deltacast_receiver/python/deltacast_receiver.py @@ -0,0 +1,139 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 DELTACAST.TV. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from argparse import ArgumentParser + +from holoscan.core import Application +from holoscan.operators import FormatConverterOp, HolovizOp +from holoscan.resources import BlockMemoryPool, MemoryStorageType, UnboundedAllocator + +from holohub.videomaster import VideoMasterSourceOp + + +class DeltacastReceiverApp(Application): + def __init__(self): + """Initialize the deltacast receiver application.""" + super().__init__() + self.name = "Deltacast Receiver" + + def compose(self): + """ + Compose the application by setting up the operators and their connections. + """ + # Retrieve VideoMaster parameters + deltacast_kwargs = self.kwargs("deltacast") + width = deltacast_kwargs.get("width", 1920) + height = deltacast_kwargs.get("height", 1080) + use_rdma = deltacast_kwargs.get("rdma", False) + + # Calculate source block size and count and define the source pool parameters + source_block_size = width * height * 4 * 4 + source_block_count = 3 if use_rdma else 4 + + source_pool_kwargs = dict( + storage_type=MemoryStorageType.DEVICE, + block_size=source_block_size, + num_blocks=source_block_count, + ) + + # Initialize operators + source = VideoMasterSourceOp( + self, + name="deltacast_source", + pool=UnboundedAllocator(self, name="source_pool"), + rdma=use_rdma, + board=deltacast_kwargs.get("board", 0), + input=deltacast_kwargs.get("input", 0), + width=width, + height=height, + progressive=deltacast_kwargs.get("progressive", True), + framerate=deltacast_kwargs.get("framerate", 30), + ) + + # Format converter to prepare for visualization + format_converter = FormatConverterOp( + self, + name="format_converter", + pool=BlockMemoryPool(self, name="converter_pool", **source_pool_kwargs), + **self.kwargs("format_converter"), + ) + + # Drop alpha channel converter (matches C++ implementation exactly) + drop_alpha_channel_converter = FormatConverterOp( + self, + name="drop_alpha_channel_converter", + pool=BlockMemoryPool(self, name="drop_alpha_pool", **source_pool_kwargs), + **self.kwargs("drop_alpha_channel_converter"), + ) + + # Holoviz for visualization + visualizer = HolovizOp( + self, + name="holoviz", + allocator=UnboundedAllocator(self, name="holoviz_allocator"), + **self.kwargs("holoviz"), + ) + + # Connect the pipeline: source -> drop_alpha_channel_converter -> format_converter -> holoviz + # This matches the exact flow from the C++ implementation + self.add_flow(source, drop_alpha_channel_converter) + self.add_flow(drop_alpha_channel_converter, format_converter) + self.add_flow(format_converter, visualizer, {("", "receivers")}) + + +def parse_config(): + """ + Parse command line arguments and validate paths. + + Returns + ------- + args : argparse.Namespace + Parsed command-line arguments. + """ + # Parse command line arguments + parser = ArgumentParser(description="DeltaCast Receiver demo application.") + parser.add_argument( + "-c", + "--config", + type=str, + default=os.path.join(os.path.dirname(__file__), "deltacast_receiver.yaml"), + help="Path to the configuration file to override the default config file location. If not provided, the deltacast_receiver.yaml in root directory will be used. (default location: %(default)s)", + ) + + args = parser.parse_args() + + # Ensure the configuration file exists + if not os.path.exists(args.config): + raise FileNotFoundError( + f"Configuration file {args.config} does not exist at expected location. Use --config to specify the correct path." + ) + + return args + + +def main(): + try: + args = parse_config() + app = DeltacastReceiverApp() + app.config(args.config) + app.run() + except Exception as e: + print(f"Error: {e}") + exit(1) + + +if __name__ == "__main__": + main() diff --git a/applications/deltacast_receiver/python/deltacast_receiver.yaml b/applications/deltacast_receiver/python/deltacast_receiver.yaml new file mode 100644 index 0000000000..0cc7efa7d8 --- /dev/null +++ b/applications/deltacast_receiver/python/deltacast_receiver.yaml @@ -0,0 +1,48 @@ +%YAML 1.2 +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 DELTACAST.TV. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +extensions: + - lib/gxf_extensions/libgxf_videomaster.so + +deltacast: + width: 1920 + height: 1080 + progressive: true + framerate: 30 + board: 0 + input: 0 + rdma: false + +format_converter: + in_dtype: "rgb888" + alpha_value: 255 + out_dtype: "rgba8888" + out_channel_order: [2,1,0,3] + +drop_alpha_channel_converter: + in_dtype: "rgba8888" + out_dtype: "rgb888" + resize_height: 270 + resize_width: 480 + +holoviz: + tensors: + - name: "" + type: color + opacity: 1.0 + priority: 0 + width: 480 + height: 270 \ No newline at end of file diff --git a/applications/deltacast_receiver/python/metadata.json b/applications/deltacast_receiver/python/metadata.json new file mode 100644 index 0000000000..3fadab5b86 --- /dev/null +++ b/applications/deltacast_receiver/python/metadata.json @@ -0,0 +1,39 @@ +{ + "application": { + "name": "Deltacast Videomaster Receiver", + "authors": [ + { + "name": "Pierre PERICK", + "affiliation": "DELTACAST" + } + ], + "language": "Python", + "version": "1.0", + "changelog": { + "1.0": "Initial Release" + }, + "holoscan_sdk": { + "minimum_required_version": "0.5.0", + "tested_versions": [ + "3.6.0" + ] + }, + "videomaster_sdk": { + "minimum_required_version": "6.26.0", + "tested_versions": [ + "6.32.0" + ] + }, + "platforms": [ + "x86_64", + "aarch64" + ], + "tags": ["Healthcare AI", "Video", "Deltacast", "Endoscopy", "RDMA", "GPUDirect", "Receiver"], + "ranking": 2, + "requirements": {}, + "run": { + "command": "python3 /deltacast_receiver.py", + "workdir": "holohub_bin" + } + } +} \ No newline at end of file diff --git a/applications/deltacast_transmitter/cpp/deltacast_transmitter.yaml b/applications/deltacast_transmitter/cpp/deltacast_transmitter.yaml index 5d3b36c639..d1a1844ab3 100644 --- a/applications/deltacast_transmitter/cpp/deltacast_transmitter.yaml +++ b/applications/deltacast_transmitter/cpp/deltacast_transmitter.yaml @@ -36,7 +36,7 @@ deltacast: width: 1920 height: 1080 progressive: true - framerate: 25 + framerate: 30 board: 0 output: 0 rdma: false diff --git a/applications/deltacast_transmitter/cpp/metadata.json b/applications/deltacast_transmitter/cpp/metadata.json index 6dd348d628..834d495c00 100644 --- a/applications/deltacast_transmitter/cpp/metadata.json +++ b/applications/deltacast_transmitter/cpp/metadata.json @@ -22,21 +22,23 @@ "tested_versions": [ "0.5.0", "2.9.0", - "3.0.0" + "3.0.0", + "3.6.0" ] }, "videomaster_sdk": { "minimum_required_version": "6.26.0", "tested_versions": [ "6.29.0", - "6.30.0" + "6.30.0", + "6.32.0" ] }, "platforms": [ "x86_64", "aarch64" ], - "tags": ["Healthcare AI", "Video", "Deltacast", "Endoscopy", "RDMA", "GPUDirect"], + "tags": ["Streaming", "Healthcare AI", "Video", "Deltacast", "Endoscopy", "RDMA", "GPUDirect"], "ranking": 2, "requirements": {}, "run": { diff --git a/applications/deltacast_transmitter/python/deltacast_transmitter.yaml b/applications/deltacast_transmitter/python/deltacast_transmitter.yaml index cdb43249f4..6b62553e93 100644 --- a/applications/deltacast_transmitter/python/deltacast_transmitter.yaml +++ b/applications/deltacast_transmitter/python/deltacast_transmitter.yaml @@ -36,7 +36,7 @@ videomaster: width: 1920 height: 1080 progressive: true - framerate: 60 + framerate: 30 board: 0 output: 0 rdma: false diff --git a/applications/deltacast_transmitter/python/metadata.json b/applications/deltacast_transmitter/python/metadata.json index 2f942622b0..d1609fc93a 100644 --- a/applications/deltacast_transmitter/python/metadata.json +++ b/applications/deltacast_transmitter/python/metadata.json @@ -17,21 +17,23 @@ "tested_versions": [ "0.5.0", "2.9.0", - "3.0.0" + "3.0.0", + "3.6.0" ] }, "videomaster_sdk": { "minimum_required_version": "6.26.0", "tested_versions": [ "6.29.0", - "6.30.0" + "6.30.0", + "6.32.0" ] }, "platforms": [ "x86_64", "aarch64" ], - "tags": ["Healthcare AI", "Video", "Deltacast", "Endoscopy", "RDMA", "GPUDirect"], + "tags": ["Streaming", "Healthcare AI", "Video", "Deltacast", "Endoscopy", "RDMA", "GPUDirect"], "ranking": 2, "requirements": {}, "run": { diff --git a/applications/distributed/grpc/grpc_endoscopy_tool_tracking/cpp/metadata.json b/applications/distributed/grpc/grpc_endoscopy_tool_tracking/cpp/metadata.json index 43cfbb0d0b..d92e206d05 100644 --- a/applications/distributed/grpc/grpc_endoscopy_tool_tracking/cpp/metadata.json +++ b/applications/distributed/grpc/grpc_endoscopy_tool_tracking/cpp/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Distributed Endoscopy Tool Tracking with gRPC Streaming", + "name": "Distributed Endoscopy Tool Tracking with gRPC", "authors": [ { "name": "Holoscan Team", @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Healthcare AI", "Distributed", "LSTM", "Asynchronous Queues", "gRPC", "Video", "Computer Vision and Perception", "Visualization"], + "tags": ["Streaming", "Distributed", "LSTM", "Asynchronous Queues", "gRPC", "Video", "Computer Vision and Perception", "Visualization"], "ranking": 0, "requirements": { "data": [ diff --git a/applications/distributed/grpc/grpc_endoscopy_tool_tracking/python/metadata.json b/applications/distributed/grpc/grpc_endoscopy_tool_tracking/python/metadata.json index cb8ea8ed79..ef8a4c6d3a 100644 --- a/applications/distributed/grpc/grpc_endoscopy_tool_tracking/python/metadata.json +++ b/applications/distributed/grpc/grpc_endoscopy_tool_tracking/python/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Distributed Endoscopy Tool Tracking with gRPC Streaming", + "name": "Distributed Endoscopy Tool Tracking with gRPC", "authors": [ { "name": "Holoscan Team", @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["Healthcare AI", "Distributed", "LSTM", "Asynchronous Queues", "gRPC", "Video", "Computer Vision and Perception", "Visualization"], + "tags": ["Streaming", "Distributed", "LSTM", "Asynchronous Queues", "gRPC", "Video", "Computer Vision and Perception", "Visualization"], "ranking": 0, "requirements": { "data": [ diff --git a/applications/distributed/grpc/grpc_h264_endoscopy_tool_tracking/cpp/metadata.json b/applications/distributed/grpc/grpc_h264_endoscopy_tool_tracking/cpp/metadata.json index 70fd43c127..8f4a0356af 100644 --- a/applications/distributed/grpc/grpc_h264_endoscopy_tool_tracking/cpp/metadata.json +++ b/applications/distributed/grpc/grpc_h264_endoscopy_tool_tracking/cpp/metadata.json @@ -1,6 +1,7 @@ { "application": { - "name": "Distributed H.264 Endoscopy Tool Tracking with gRPC Streaming", + "name": "Distributed H.264 gRPC Streaming", + "description": "Distributed H.264 Endoscopy Tool Tracking with gRPC Streaming", "authors": [ { "name": "Holoscan Team", diff --git a/applications/ehr_query_llm/fhir/metadata.json b/applications/ehr_query_llm/fhir/metadata.json index bab8b8eaec..49e418a224 100644 --- a/applications/ehr_query_llm/fhir/metadata.json +++ b/applications/ehr_query_llm/fhir/metadata.json @@ -1,6 +1,7 @@ { "application": { - "name": "FHIR Client for Retrieving and Posting FHIR Resources", + "name": "FHIR Client", + "description": "This application demonstrates how to retrieve and post FHIR resources using the FHIR client library.", "authors": [ { "name": "Holoscan Team", diff --git a/applications/endoscopy_depth_estimation/metadata.json b/applications/endoscopy_depth_estimation/metadata.json index 91f10d72c6..c3f84689ef 100644 --- a/applications/endoscopy_depth_estimation/metadata.json +++ b/applications/endoscopy_depth_estimation/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Healthcare AI", "Networking and Distributed Computing", "Endoscopy", "Monocular Depth Estimation", "CV CUDA", "Video", "Rendering"], + "tags": ["Healthcare AI", "Endoscopy", "Monocular Depth Estimation", "CV CUDA", "Video", "Rendering"], "ranking": 2, "requirements": { "model": "https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/resources/holoscan_endoscopy_depth_estimation_sample_data", diff --git a/applications/florence-2-vision/metadata.json b/applications/florence-2-vision/metadata.json index e3e676c8e6..fd40a0691c 100644 --- a/applications/florence-2-vision/metadata.json +++ b/applications/florence-2-vision/metadata.json @@ -1,6 +1,7 @@ { "application": { - "name": "Florence-2: Advancing a Unified Representation for a Variety of Vision Tasks", + "name": "Florence-2", + "description": "Florence-2 is a unified vision model that can perform a variety of vision tasks, including detection, segmentation, and tracking.", "authors": [ { "name": "Holoscan Team", diff --git a/applications/holoscan_ros2/pubsub/cpp/metadata.json b/applications/holoscan_ros2/pubsub/cpp/metadata.json index 0f8702a8ad..de13fdfbe5 100644 --- a/applications/holoscan_ros2/pubsub/cpp/metadata.json +++ b/applications/holoscan_ros2/pubsub/cpp/metadata.json @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["ROS2", "Publisher", "Subscriber"], + "tags": ["Robotics", "ROS2", "Publisher", "Subscriber"], "ranking": 1, "requirements": {}, "dependencies": { diff --git a/applications/holoscan_ros2/pubsub/python/metadata.json b/applications/holoscan_ros2/pubsub/python/metadata.json index d19102678f..be78bc433a 100644 --- a/applications/holoscan_ros2/pubsub/python/metadata.json +++ b/applications/holoscan_ros2/pubsub/python/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Holoscan ROS2 Publisher/Subscriber Examples", + "name": "ROS2 Publisher/Subscriber", "authors": [ { "name": "Holoscan SDK Team", @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["ROS2", "Publisher", "Subscriber", "Python"], + "tags": ["Robotics", "ROS2", "Publisher", "Subscriber", "Python"], "ranking": 1, "requirements": {}, "default_mode": "publisher", diff --git a/applications/holoscan_ros2/vb1940/cpp/metadata.json b/applications/holoscan_ros2/vb1940/cpp/metadata.json index 0bc0ac3d04..a6e7ccef10 100644 --- a/applications/holoscan_ros2/vb1940/cpp/metadata.json +++ b/applications/holoscan_ros2/vb1940/cpp/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Holoscan ROS2 VB1940 (Eagle) Camera", + "name": "ROS2 VB1940 (Eagle) Camera", "authors": [ { "name": "Holoscan SDK Team", @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["ROS2", "VB1940", "Publisher", "Subscriber"], + "tags": ["Robotics", "ROS2", "VB1940", "Publisher", "Subscriber"], "ranking": 1, "requirements": {}, "dependencies": { diff --git a/applications/holoviz/holoviz_ui/metadata.json b/applications/holoviz/holoviz_ui/metadata.json index 8d36b23049..fc9c3daf1b 100644 --- a/applications/holoviz/holoviz_ui/metadata.json +++ b/applications/holoviz/holoviz_ui/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Computer Vision and Perception", "Visualization", "Holoviz", "Rendering"], + "tags": ["Rendering", "Visualization", "Holoviz"], "ranking": 1, "requirements": {}, "run": { diff --git a/applications/iio/cpp/metadata.json b/applications/iio/cpp/metadata.json index 3b2aac33a7..0f4bb9d03f 100644 --- a/applications/iio/cpp/metadata.json +++ b/applications/iio/cpp/metadata.json @@ -19,7 +19,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["iio", "libiio"], + "tags": ["Signal Processing", "iio", "libiio"], "ranking": 3, "requirements": { "operators": [ diff --git a/applications/iio/python/metadata.json b/applications/iio/python/metadata.json index ef746ac42a..adca54e46a 100644 --- a/applications/iio/python/metadata.json +++ b/applications/iio/python/metadata.json @@ -19,7 +19,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["iio", "libiio"], + "tags": ["Signal Processing", "iio", "libiio"], "ranking": 3, "requirements": { "operators": [ diff --git a/applications/isaac_sim_holoscan_bridge/metadata.json b/applications/isaac_sim_holoscan_bridge/metadata.json index 3a630d8a63..5dcfcf0d88 100644 --- a/applications/isaac_sim_holoscan_bridge/metadata.json +++ b/applications/isaac_sim_holoscan_bridge/metadata.json @@ -22,11 +22,10 @@ "x86_64" ], "tags": [ - "Sensor", + "Robotics", "Processing", "Isaac Sim", - "Bridge", - "Robotics" + "Bridge" ], "ranking": 3, "requirements": { diff --git a/applications/matlab_gpu_coder/matlab_beamform/metadata.json b/applications/matlab_gpu_coder/matlab_beamform/metadata.json index 8127fcfe6c..bd9bc7d8f5 100644 --- a/applications/matlab_gpu_coder/matlab_beamform/metadata.json +++ b/applications/matlab_gpu_coder/matlab_beamform/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Ultrasound Beamforming with MATLAB GPU Coder", + "name": "Ultrasound Beamforming with MATLAB", "authors": [ { "name": "Holoscan Team", diff --git a/applications/monai_endoscopic_tool_seg/metadata.json b/applications/monai_endoscopic_tool_seg/metadata.json index 35403f483f..e9948f5913 100644 --- a/applications/monai_endoscopic_tool_seg/metadata.json +++ b/applications/monai_endoscopic_tool_seg/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Endoscopy Tool Segmentation from MONAI Model Zoo", + "name": "Endoscopy Tool Segmentation using MONAI", "authors": [ { "name": "Jin Li", diff --git a/applications/multiai_endoscopy/cpp/metadata.json b/applications/multiai_endoscopy/cpp/metadata.json index 6c0592dbcf..5d68f234ad 100644 --- a/applications/multiai_endoscopy/cpp/metadata.json +++ b/applications/multiai_endoscopy/cpp/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Multi AI SSD Detection and MONAI Endoscopic Tool Segmentation", + "name": "Multi AI Detection and Tool Segmentation", "authors": [ { "name": "Holoscan Team", diff --git a/applications/multiai_endoscopy/python/metadata.json b/applications/multiai_endoscopy/python/metadata.json index b9b2373824..b83ee98302 100644 --- a/applications/multiai_endoscopy/python/metadata.json +++ b/applications/multiai_endoscopy/python/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Multi AI SSD Detection and MONAI Endoscopic Tool Segmentation", + "name": "Multi AI Detection and Tool Segmentation", "authors": [ { "name": "Holoscan Team", diff --git a/applications/nvidia_nim/nvidia_nim_imaging/metadata.json b/applications/nvidia_nim/nvidia_nim_imaging/metadata.json index f8ad171b6a..5db8f3186e 100644 --- a/applications/nvidia_nim/nvidia_nim_imaging/metadata.json +++ b/applications/nvidia_nim/nvidia_nim_imaging/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Medical Imaging Segmentation with NVIDIA Vista-3D NIM", + "name": "Medical Imaging Segmentation with Vista-3D", "authors": [ { "name": "Holoscan Team", diff --git a/applications/nvidia_video_codec/CMakeLists.txt b/applications/nvidia_video_codec/CMakeLists.txt index 2ccd68bbf4..4186651157 100644 --- a/applications/nvidia_video_codec/CMakeLists.txt +++ b/applications/nvidia_video_codec/CMakeLists.txt @@ -13,24 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Download the endoscopy sample data -if(OP_nv_video_encoder OR OP_nv_video_decoder) - if(HOLOHUB_DOWNLOAD_DATASETS) - include(holoscan_download_data) - holoscan_download_data(endoscopy - URL nvidia/clara-holoscan/holoscan_endoscopy_sample_data:20230222 - DOWNLOAD_NAME holoscan_endoscopy_sample_data_20230222.zip - DOWNLOAD_DIR ${HOLOHUB_DATA_DIR} - GENERATE_GXF_ENTITIES - GXF_ENTITIES_HEIGHT 480 - GXF_ENTITIES_WIDTH 854 - GXF_ENTITIES_CHANNELS 3 - GXF_ENTITIES_FRAMERATE 30 - ALL - ) - endif() -endif() - add_holohub_application(nvc_decode DEPENDS OPERATORS nv_video_encoder nv_video_decoder @@ -41,3 +23,21 @@ add_holohub_application(nvc_encode_decode DEPENDS OPERATORS add_holohub_application(nvc_encode_writer DEPENDS OPERATORS nv_video_encoder tensor_to_file) + +# Download the endoscopy sample data +if(OP_nv_video_encoder OR OP_nv_video_decoder) + if(HOLOHUB_DOWNLOAD_DATASETS) + include(holoscan_download_data) + holoscan_download_data(endoscopy + URL nvidia/clara-holoscan/holoscan_endoscopy_sample_data:20230222 + DOWNLOAD_NAME holoscan_endoscopy_sample_data_20230222.zip + DOWNLOAD_DIR ${HOLOHUB_DATA_DIR} + GENERATE_GXF_ENTITIES + GXF_ENTITIES_HEIGHT 480 + GXF_ENTITIES_WIDTH 854 + GXF_ENTITIES_CHANNELS 3 + GXF_ENTITIES_FRAMERATE 30 + ALL + ) + endif() +endif() diff --git a/applications/nvidia_video_codec/nvc_decode/cpp/metadata.json b/applications/nvidia_video_codec/nvc_decode/cpp/metadata.json index 05b9752f45..a7107156e0 100644 --- a/applications/nvidia_video_codec/nvc_decode/cpp/metadata.json +++ b/applications/nvidia_video_codec/nvc_decode/cpp/metadata.json @@ -25,6 +25,7 @@ "aarch64" ], "tags": [ + "Streaming", "NVIDIA Video Codec", "H.264", "H.265", diff --git a/applications/nvidia_video_codec/nvc_decode/python/metadata.json b/applications/nvidia_video_codec/nvc_decode/python/metadata.json index dc8677c4ff..fd1c2928c4 100644 --- a/applications/nvidia_video_codec/nvc_decode/python/metadata.json +++ b/applications/nvidia_video_codec/nvc_decode/python/metadata.json @@ -25,6 +25,7 @@ "aarch64" ], "tags": [ + "Streaming", "NVIDIA Video Codec", "H.264", "H.265", diff --git a/applications/nvidia_video_codec/nvc_encode_decode/cpp/metadata.json b/applications/nvidia_video_codec/nvc_encode_decode/cpp/metadata.json index e78987c5ac..c24413f0a7 100644 --- a/applications/nvidia_video_codec/nvc_encode_decode/cpp/metadata.json +++ b/applications/nvidia_video_codec/nvc_encode_decode/cpp/metadata.json @@ -25,6 +25,7 @@ "aarch64" ], "tags": [ + "Streaming", "NVIDIA Video Codec", "H.264", "H.265", diff --git a/applications/nvidia_video_codec/nvc_encode_decode/python/metadata.json b/applications/nvidia_video_codec/nvc_encode_decode/python/metadata.json index 02ee7acd86..f47d73cbc6 100644 --- a/applications/nvidia_video_codec/nvc_encode_decode/python/metadata.json +++ b/applications/nvidia_video_codec/nvc_encode_decode/python/metadata.json @@ -25,6 +25,7 @@ "aarch64" ], "tags": [ + "Streaming", "NVIDIA Video Codec", "H.264", "H.265", diff --git a/applications/nvidia_video_codec/nvc_encode_writer/cpp/metadata.json b/applications/nvidia_video_codec/nvc_encode_writer/cpp/metadata.json index 8f46b2edde..4dbef8c724 100644 --- a/applications/nvidia_video_codec/nvc_encode_writer/cpp/metadata.json +++ b/applications/nvidia_video_codec/nvc_encode_writer/cpp/metadata.json @@ -25,6 +25,7 @@ "aarch64" ], "tags": [ + "Streaming", "NVIDIA Video Codec", "H.264", "H.265", diff --git a/applications/nvidia_video_codec/nvc_encode_writer/python/metadata.json b/applications/nvidia_video_codec/nvc_encode_writer/python/metadata.json index 5f9655f554..34a0f61572 100644 --- a/applications/nvidia_video_codec/nvc_encode_writer/python/metadata.json +++ b/applications/nvidia_video_codec/nvc_encode_writer/python/metadata.json @@ -25,6 +25,7 @@ "aarch64" ], "tags": [ + "Streaming", "NVIDIA Video Codec", "H.264", "H.265", diff --git a/applications/object_detection_torch/CMakeLists.txt b/applications/object_detection_torch/CMakeLists.txt index 8cc3cb25ce..98c2a0d0d0 100644 --- a/applications/object_detection_torch/CMakeLists.txt +++ b/applications/object_detection_torch/CMakeLists.txt @@ -78,6 +78,14 @@ if(holoscan_VERSION GREATER 3.5) ) endif() +if("${holoscan_VERSION}" VERSION_GREATER_EQUAL "3.7") + # holoscan::holoinfer_utils is directly available in Holoscan SDK 3.7 and later + target_link_libraries(object_detection_torch PRIVATE holoscan::holoinfer_utils) +else() + # "holoscan_utils.hpp" is exposed through holoscan::infer in Holoscan SDK 3.6 and earlier + target_link_libraries(object_detection_torch PRIVATE holoscan::infer) +endif() + # Download the cars sample data option(HOLOHUB_DOWNLOAD_DATASETS "Download datasets" ON) if(HOLOHUB_DOWNLOAD_DATASETS) diff --git a/applications/object_detection_torch/main.cpp b/applications/object_detection_torch/main.cpp index 684d86f1d9..d6a056ea24 100644 --- a/applications/object_detection_torch/main.cpp +++ b/applications/object_detection_torch/main.cpp @@ -25,6 +25,7 @@ #include #include +#include #include #include #include diff --git a/applications/openigtlink_3dslicer/cpp/metadata.json b/applications/openigtlink_3dslicer/cpp/metadata.json index f3873fd858..edc522581f 100644 --- a/applications/openigtlink_3dslicer/cpp/metadata.json +++ b/applications/openigtlink_3dslicer/cpp/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "OpenIGTLink 3D Slicer: Bidirectional Video Streaming with AI Segmentation", + "name": "OpenIGTLink 3D Slicer", "authors": [ { "name": "Holoscan Team", diff --git a/applications/openigtlink_3dslicer/python/metadata.json b/applications/openigtlink_3dslicer/python/metadata.json index 58a8a919fb..a896f059a0 100644 --- a/applications/openigtlink_3dslicer/python/metadata.json +++ b/applications/openigtlink_3dslicer/python/metadata.json @@ -1,6 +1,7 @@ { "application": { - "name": "OpenIGTLink 3D Slicer: Bidirectional Video Streaming with AI Segmentation", + "name": "OpenIGTLink 3D Slicer", + "description": "Bidirectional video streaming with AI segmentation", "authors": [ { "name": "Holoscan Team", diff --git a/applications/orsi/orsi_in_out_body/README.md b/applications/orsi/orsi_in_out_body/README.md index 2334c0ec08..d6d1b954c4 100644 --- a/applications/orsi/orsi_in_out_body/README.md +++ b/applications/orsi/orsi_in_out_body/README.md @@ -1,4 +1,4 @@ -# Orsi Academy In-Out Body Detection and Surgical Video Anonymization +# In-Out Body Detection and Surgical Video Anonymization
Fig. 1: Example of anonymized result after inference

diff --git a/applications/orsi/orsi_in_out_body/cpp/metadata.json b/applications/orsi/orsi_in_out_body/cpp/metadata.json index 71de27eb59..49a90abf81 100644 --- a/applications/orsi/orsi_in_out_body/cpp/metadata.json +++ b/applications/orsi/orsi_in_out_body/cpp/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Orsi Academy In-Out Body Detection and Surgical Video Anonymization", + "name": "In-Out Body Detection", "authors": [ { "name": "Jasper Hofman", diff --git a/applications/orsi/orsi_in_out_body/python/metadata.json b/applications/orsi/orsi_in_out_body/python/metadata.json index f3ddf509d7..7740e3e25a 100644 --- a/applications/orsi/orsi_in_out_body/python/metadata.json +++ b/applications/orsi/orsi_in_out_body/python/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Orsi Academy In-Out Body Detection and Surgical Video Anonymization", + "name": "In-Out Body Detection", "authors": [ { "name": "Jasper Hofman", diff --git a/applications/orsi/orsi_multi_ai_ar/README.md b/applications/orsi/orsi_multi_ai_ar/README.md index 5877265286..b4a50c88ce 100644 --- a/applications/orsi/orsi_multi_ai_ar/README.md +++ b/applications/orsi/orsi_multi_ai_ar/README.md @@ -1,4 +1,4 @@ -# Orsi Academy Multi AI and AR Visualization +# Multi AI and AR Visualization
Fig. 1: Application screenshots

diff --git a/applications/orsi/orsi_multi_ai_ar/cpp/metadata.json b/applications/orsi/orsi_multi_ai_ar/cpp/metadata.json index 377e18612d..db00e1e25d 100644 --- a/applications/orsi/orsi_multi_ai_ar/cpp/metadata.json +++ b/applications/orsi/orsi_multi_ai_ar/cpp/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Orsi Academy Multi AI and AR Visualization", + "name": "Multi AI and AR Visualization", "authors": [ { "name": "Jasper Hofman", diff --git a/applications/orsi/orsi_multi_ai_ar/python/metadata.json b/applications/orsi/orsi_multi_ai_ar/python/metadata.json index bef088ee06..b4def4d530 100644 --- a/applications/orsi/orsi_multi_ai_ar/python/metadata.json +++ b/applications/orsi/orsi_multi_ai_ar/python/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Orsi Academy Multi AI and AR Visualization", + "name": "Multi AI and AR Visualization", "authors": [ { "name": "Jasper Hofman", diff --git a/applications/orsi/orsi_segmentation_ar/README.md b/applications/orsi/orsi_segmentation_ar/README.md index 42cdb45a8e..c3e3410e78 100644 --- a/applications/orsi/orsi_segmentation_ar/README.md +++ b/applications/orsi/orsi_segmentation_ar/README.md @@ -1,4 +1,4 @@ -# Orsi Academy Surgical Tool Segmentation and AR Overlay +# Surgical Tool Segmentation and AR Overlay
diff --git a/applications/orsi/orsi_segmentation_ar/cpp/metadata.json b/applications/orsi/orsi_segmentation_ar/cpp/metadata.json index 99d946bfb3..bd72059c1d 100644 --- a/applications/orsi/orsi_segmentation_ar/cpp/metadata.json +++ b/applications/orsi/orsi_segmentation_ar/cpp/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Orsi Academy Surgical Tool Segmentation and AR Overlay", + "name": "Surgical Tool Segmentation and AR Overlay", "authors": [ { "name": "Jasper Hofman", diff --git a/applications/orsi/orsi_segmentation_ar/python/metadata.json b/applications/orsi/orsi_segmentation_ar/python/metadata.json index 3b2a912d74..71c47e2a4f 100644 --- a/applications/orsi/orsi_segmentation_ar/python/metadata.json +++ b/applications/orsi/orsi_segmentation_ar/python/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Orsi Academy Surgical Tool Segmentation and AR Overlay", + "name": "Surgical Tool Segmentation and AR Overlay", "authors": [ { "name": "Jasper Hofman", diff --git a/applications/orthorectification_with_optix/python/metadata.json b/applications/orthorectification_with_optix/python/metadata.json index ec7c767cec..a9d434ea19 100644 --- a/applications/orthorectification_with_optix/python/metadata.json +++ b/applications/orthorectification_with_optix/python/metadata.json @@ -1,6 +1,7 @@ { "application": { - "name": "GPU-Accelerated Orthorectification with NVIDIA OptiX", + "name": "Orthorectification with NVIDIA OptiX", + "description": "GPU-Accelerated Orthorectification with NVIDIA OptiX", "authors": [ { "name": "Brent Bartlett", diff --git a/applications/polyp_detection/metadata.json b/applications/polyp_detection/metadata.json index 5de04414c2..fa00f9856b 100644 --- a/applications/polyp_detection/metadata.json +++ b/applications/polyp_detection/metadata.json @@ -23,6 +23,7 @@ "aarch64" ], "tags": [ + "Healthcare AI", "Colonoscopy", "Detection", "RT-DETR", diff --git a/applications/psd_pipeline/metadata.json b/applications/psd_pipeline/metadata.json index 4bb12b661c..0d7c9062a4 100644 --- a/applications/psd_pipeline/metadata.json +++ b/applications/psd_pipeline/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "VITA 49 Power Spectral Density (PSD)", + "name": "VITA 49 Power Spectral Density", "authors": [ { "name": "John Moon ", diff --git a/applications/slang/slang_gamma_correction/cpp/metadata.json b/applications/slang/slang_gamma_correction/cpp/metadata.json index 80ca8704ce..6c08fd6b9a 100644 --- a/applications/slang/slang_gamma_correction/cpp/metadata.json +++ b/applications/slang/slang_gamma_correction/cpp/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Slang Gamma Correction Example", + "name": "Slang Gamma Correction", "description": "Example for using the Slang shading language operator for gamma correction", "authors": [ { @@ -24,9 +24,9 @@ "aarch64" ], "tags": [ + "Rendering", "Slang", "shading", - "rendering", "shader", "compute", "cuda" diff --git a/applications/slang/slang_gamma_correction/python/metadata.json b/applications/slang/slang_gamma_correction/python/metadata.json index 8896911bfb..2a42e4476f 100644 --- a/applications/slang/slang_gamma_correction/python/metadata.json +++ b/applications/slang/slang_gamma_correction/python/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Slang Gamma Correction Example", + "name": "Slang Gamma Correction", "description": "Example for using the Slang shading language operator for gamma correction", "authors": [ { @@ -24,9 +24,9 @@ "aarch64" ], "tags": [ + "Rendering", "Slang", "shading", - "rendering", "shader", "compute", "cuda", diff --git a/applications/slang/slang_simple/cpp/metadata.json b/applications/slang/slang_simple/cpp/metadata.json index f239433e2a..b70144a89c 100644 --- a/applications/slang/slang_simple/cpp/metadata.json +++ b/applications/slang/slang_simple/cpp/metadata.json @@ -1,6 +1,6 @@ { "application": { - "name": "Slang Simple Compute Kernel Example", + "name": "Slang Simple Compute Kernel", "description": "Example for using the Slang shading language operator for simple compute kernels", "authors": [ { @@ -24,9 +24,9 @@ "aarch64" ], "tags": [ + "Rendering", "Slang", "shading", - "rendering", "shader", "shadertoy", "compute", diff --git a/applications/slang/slang_simple/python/metadata.json b/applications/slang/slang_simple/python/metadata.json index 666037c8ff..5a22c3d078 100644 --- a/applications/slang/slang_simple/python/metadata.json +++ b/applications/slang/slang_simple/python/metadata.json @@ -25,9 +25,9 @@ "aarch64" ], "tags": [ + "Rendering", "Slang", "shading", - "rendering", "shader", "shadertoy", "compute", diff --git a/applications/tao_peoplenet/metadata.json b/applications/tao_peoplenet/metadata.json index d07143dc3b..8249529341 100644 --- a/applications/tao_peoplenet/metadata.json +++ b/applications/tao_peoplenet/metadata.json @@ -1,6 +1,7 @@ { "application": { - "name": "TAO PeopleNet Detection Model on V4L2 Video Stream", + "name": "TAO PeopleNet Detection Model", + "description": "TAO PeopleNet Detection Model on V4L2 Video Stream", "authors": [ { "name": "Holoscan Team", diff --git a/applications/volume_rendering_xr/operators/XrFrameOp/convert_depth/metadata.json b/applications/volume_rendering_xr/operators/XrFrameOp/convert_depth/metadata.json index b84a95f405..8854991715 100644 --- a/applications/volume_rendering_xr/operators/XrFrameOp/convert_depth/metadata.json +++ b/applications/volume_rendering_xr/operators/XrFrameOp/convert_depth/metadata.json @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["Convert", "Depth"], + "tags": ["Rendering", "Convert", "Depth"], "ranking": 2, "requirements": {} } diff --git a/applications/xr_gsplat/metadata.json b/applications/xr_gsplat/metadata.json index 94895958eb..e0f90d8e07 100644 --- a/applications/xr_gsplat/metadata.json +++ b/applications/xr_gsplat/metadata.json @@ -26,7 +26,7 @@ "x86_64" ], "tags": [ - "XR", + "Extended Reality", "OpenXR", "Gaussian Splatting", "3D Reconstruction" diff --git a/benchmarks/exclusive_display/metadata.json b/benchmarks/exclusive_display/metadata.json index c5b99604ea..cdb6b9acc1 100644 --- a/benchmarks/exclusive_display/metadata.json +++ b/benchmarks/exclusive_display/metadata.json @@ -21,7 +21,7 @@ "platforms": [ "aarch64" ], - "tags": ["Benchmarking", "Visualization"], + "tags": ["Visualization", "Benchmarking"], "ranking": 1, "requirements": { "holoscan_sdk": "^2.1.0" diff --git a/benchmarks/holoscan_flow_benchmarking/benchmark.py b/benchmarks/holoscan_flow_benchmarking/benchmark.py index b309072864..da832f1ac1 100644 --- a/benchmarks/holoscan_flow_benchmarking/benchmark.py +++ b/benchmarks/holoscan_flow_benchmarking/benchmark.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,7 +37,7 @@ sys.path.insert(0, holohub_root) from utilities.cli.holohub import HoloHubCLI # noqa: E402 -from utilities.cli.util import build_holohub_path_mapping # noqa: E402 +from utilities.cli.util import build_holohub_path_mapping, resolve_path_prefix # noqa: E402 logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG) @@ -120,16 +120,20 @@ def find_python_files_to_patch(project_metadata, holohub_root_path): return directories_to_patch build_dir = Path(holohub_root_path) / "build" / project_name + prefix = resolve_path_prefix(None) path_mapping = build_holohub_path_mapping( - holohub_root=Path(holohub_root_path), project_data=project_metadata, build_dir=build_dir + holohub_root=Path(holohub_root_path), + project_data=project_metadata, + build_dir=build_dir, + prefix=prefix, ) command = run_config.get("command", "") - if "" in command and source_folder and os.path.isdir(source_folder): + if f"<{prefix}app_source>" in command and source_folder and os.path.isdir(source_folder): directories_to_patch.append(source_folder) logger.info(f"Will patch source directory: {source_folder}") - if "" in command: + if f"<{prefix}app_bin>" in command: if source_folder: - app_build_dir = path_mapping.get("holohub_app_bin", "") + app_build_dir = path_mapping.get(f"{prefix}app_bin", "") if app_build_dir and os.path.isdir(app_build_dir): directories_to_patch.append(app_build_dir) logger.info(f"Will patch build directory: {app_build_dir}") @@ -142,7 +146,7 @@ def find_python_files_to_patch(project_metadata, holohub_root_path): # If no placeholders found but it's a Python command, patch source directory if not directories_to_patch and "python" in command.lower() and source_folder: workdir = run_config.get("workdir", "") - if workdir in ["holohub_app_source", "holohub_app_bin"]: + if workdir in [f"{prefix}app_source", f"{prefix}app_bin"]: source_folder = path_mapping.get(workdir, source_folder) if os.path.isdir(source_folder): directories_to_patch.append(source_folder) diff --git a/benchmarks/realtime_threads_benchmarking/metadata.json b/benchmarks/realtime_threads_benchmarking/metadata.json index 6d4bf5ec44..1f22ed64b6 100644 --- a/benchmarks/realtime_threads_benchmarking/metadata.json +++ b/benchmarks/realtime_threads_benchmarking/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Benchmarking", "Real-time", "Threading", "Performance"], + "tags": ["Threading", "Benchmarking", "Real-time", "Performance"], "ranking": 1, "requirements": {}, "run": { diff --git a/cmake/FetchHolohubOperator.cmake b/cmake/FetchHolohubOperator.cmake index 2e21d7e0ec..06f988faea 100644 --- a/cmake/FetchHolohubOperator.cmake +++ b/cmake/FetchHolohubOperator.cmake @@ -66,6 +66,7 @@ function(fetch_holohub_operator OPERATOR_NAME) && git sparse-checkout set --no-cone operators/${ARGS_PATH} cmake/pybind11_add_holohub_module.cmake + cmake/nvidia_video_codec.cmake cmake/pybind11/ cmake/pydoc/ operators/operator_util.hpp diff --git a/doc/website/Dockerfile b/doc/website/Dockerfile index 6a89a93237..d81f56f3f9 100644 --- a/doc/website/Dockerfile +++ b/doc/website/Dockerfile @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM squidfunk/mkdocs-material:latest +FROM squidfunk/mkdocs-material:9.6.19 # Install plugins. ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh diff --git a/doc/website/docs/applications/index.md b/doc/website/docs/applications/index.md index a41cbd45f6..fefa4cae72 100644 --- a/doc/website/docs/applications/index.md +++ b/doc/website/docs/applications/index.md @@ -1,3 +1,4 @@ -# [Applications](https://github.com/nvidia-holoscan/holohub/tree/main/applications) - -Holohub features a curated collection of reference applications that demonstrate the platform's capabilities across various domains, from medical imaging to industrial automation. Each application is designed to showcase best practices for integrating Holoscan's optimized libraries and microservices, ensuring high performance and low latency. Whether you are looking to streamline data processing workflows, enhance real-time analytics, or develop cutting-edge AI models, the applications in Holohub provide valuable examples and templates to accelerate your development process. \ No newline at end of file +--- +template: pages/applications.html +title: Applications +--- diff --git a/doc/website/docs/assets/css/custom-material.css b/doc/website/docs/assets/css/custom-material.css index 8c7ee76063..b2932c94b8 100644 --- a/doc/website/docs/assets/css/custom-material.css +++ b/doc/website/docs/assets/css/custom-material.css @@ -182,3 +182,47 @@ grid-template-columns: 1fr; } } + +/* Hide the left sidebar on larger screens, but keep it on mobile for the menu */ +@media screen and (min-width: 76.25em) { + .md-sidebar--primary { + display: none; + } +} + +/* Version Selector Dropdown */ +.version-selector { + appearance: none; + background-color: var(--md-default-bg-color); + border: 1px solid var(--md-default-fg-color--lighter); + border-radius: 4px; + color: var(--md-default-fg-color); + cursor: pointer; + font-size: 0.8rem; + padding: 0.25em 1.5em 0.25em 0.5em; + font-family: var(--md-text-font); + font-weight: 500; + margin-left: 0.25em; + vertical-align: baseline; + transition: border-color 0.2s, background-color 0.2s; + background-image: url("data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='currentColor' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e"); + background-repeat: no-repeat; + background-position: right 0.3em center; + background-size: 0.9em; +} + +.version-selector:hover { + border-color: var(--md-accent-fg-color); + background-color: var(--md-default-fg-color--lightest); +} + +.version-selector:focus { + outline: none; + border-color: var(--md-accent-fg-color); + box-shadow: 0 0 0 2px var(--md-accent-fg-color--transparent); +} + +.version-selector option { + background-color: var(--md-default-bg-color); + color: var(--md-default-fg-color); +} \ No newline at end of file diff --git a/doc/website/docs/assets/images/application_default.png b/doc/website/docs/assets/images/application_default.png new file mode 100644 index 0000000000..564089ccd1 Binary files /dev/null and b/doc/website/docs/assets/images/application_default.png differ diff --git a/doc/website/docs/assets/images/benchmark_default.png b/doc/website/docs/assets/images/benchmark_default.png new file mode 100644 index 0000000000..c992736f48 Binary files /dev/null and b/doc/website/docs/assets/images/benchmark_default.png differ diff --git a/doc/website/docs/assets/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png b/doc/website/docs/assets/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png new file mode 100644 index 0000000000..a442c0a18e Binary files /dev/null and b/doc/website/docs/assets/images/jetson-for-dev-ai-labs-icon-2974266-r3_ai-lab-text-generation-28.png differ diff --git a/doc/website/docs/assets/images/operator_default.png b/doc/website/docs/assets/images/operator_default.png new file mode 100644 index 0000000000..253cab0998 Binary files /dev/null and b/doc/website/docs/assets/images/operator_default.png differ diff --git a/doc/website/docs/assets/images/tutorial_default.png b/doc/website/docs/assets/images/tutorial_default.png new file mode 100644 index 0000000000..d5b1dbf78c Binary files /dev/null and b/doc/website/docs/assets/images/tutorial_default.png differ diff --git a/doc/website/docs/assets/images/workflow_default.png b/doc/website/docs/assets/images/workflow_default.png new file mode 100644 index 0000000000..0083b879bb Binary files /dev/null and b/doc/website/docs/assets/images/workflow_default.png differ diff --git a/doc/website/docs/benchmarks/index.md b/doc/website/docs/benchmarks/index.md index 3b8021f2ab..499362bd8d 100644 --- a/doc/website/docs/benchmarks/index.md +++ b/doc/website/docs/benchmarks/index.md @@ -1,5 +1,4 @@ -# [Benchmarks](https://github.com/nvidia-holoscan/holohub/tree/main/benchmarks) - -The HoloHub benchmark resources are a critical resource for developers aiming to optimize and validate the performance of their AI sensor processing applications built with the Holoscan SDK. - -Holohub provides a collection of benchmarking tools and reference implementations designed to measure and compare the efficiency, speed, and scalability of various Holoscan workflows. By offering detailed performance metrics and best practices, these benchmarks help developers identify bottlenecks and optimize their applications for high performance and low latency. Whether you are focusing on real-time data processing, model inference, or end-to-end workflow performance, the benchmarks on this page provide valuable insights and guidelines to ensure your applications meet the highest standards. \ No newline at end of file +--- +template: pages/benchmarks.html +title: Benchmarks +--- diff --git a/doc/website/docs/index.md b/doc/website/docs/index.md index 3764c7cd40..09ab3b51cc 100644 --- a/doc/website/docs/index.md +++ b/doc/website/docs/index.md @@ -1,84 +1,4 @@ --- -title: Holoscan Reference Applications # Used for the header when scrolling down -hide: - - navigation - - footer +template: pages/home.html +title: Home --- - - - - - -# Holoscan Reference Applications - -**Holoscan Reference Applications** is a central repository for the [NVIDIA Holoscan](https://www.nvidia.com/en-us/clara/holoscan/) AI sensor processing community -to share reference applications, operators, tutorials and benchmarks. -The repository hosts a variety of applications that demonstrate how to use Holoscan for streaming, imaging, and other AI-driven tasks across embedded, edge, and cloud environments. These applications serve as reference implementations, providing developers with examples of best practices and efficient coding techniques to build high-performance, low-latency AI applications. The repository is open to contributions from the community, encouraging developers to share their own applications and extensions to enhance the Holoscan ecosystem. - -
- -- :material-merge:{ .lg } __Workflows__ (#workflows) - - --- - - Reference workflows demonstrate how capabilities from applications and operators can be combined - to achieve complex tasks. - - [Browse Workflows](workflows/index.md){ .md-button .md-button } - - -- :material-apps:{ .lg } __Applications__ (#applications) - - --- - - Reference applications demonstrate a specific capability of Holoscan or how a specific operator - can be used to perform an optimize task. - - [Browse Applications](applications/index.md){ .md-button .md-button } - -- :material-code-greater-than:{ .lg } __Operators__ (#operators) - - --- - - Operators perform a specific task. - - [Browse Operators](operators/index.md){ .md-button .md-button } - -- :material-book-open-variant-outline:{ .lg } __Tutorials__ (#tutorials) - - --- - - Tutorials provide hands-on experience. - - [Browse Tutorials](tutorials/index.md){ .md-button .md-button } - - -- :material-chart-box-outline:{ .lg } __Benchmarks__ (#benchmarks) - - --- - - Benchmarks provide tools for assessing performance of Holoscan pipelines as well as reference benchmarks for specific releases - - [Browse Benchmarks](benchmarks/index.md){ .md-button .md-button } - - - - -
diff --git a/doc/website/docs/operators/index.md b/doc/website/docs/operators/index.md index 12d1f3c112..28bcbd9b4c 100644 --- a/doc/website/docs/operators/index.md +++ b/doc/website/docs/operators/index.md @@ -1,5 +1,4 @@ -# [Operators](https://github.com/nvidia-holoscan/holohub/tree/main/operators) - -Operators are fundamental components that extend the functionality of the Holoscan SDK, enabling developers to build custom AI sensor processing workflows. -These operators are designed to handle specific tasks such as data ingestion, preprocessing, model inference, and postprocessing, and can be seamlessly integrated into Holoscan applications. -The repository provides a collection of pre-built operators that serve as reference implementations, demonstrating best practices for efficient and scalable AI processing. \ No newline at end of file +--- +template: pages/operators.html +title: Operators +--- diff --git a/doc/website/docs/tutorials/index.md b/doc/website/docs/tutorials/index.md index a10a40b2a6..cc5677f147 100644 --- a/doc/website/docs/tutorials/index.md +++ b/doc/website/docs/tutorials/index.md @@ -1,6 +1,4 @@ -# [Tutorials](https://github.com/nvidia-holoscan/holohub/tree/main/tutorials) - -The HoloHub tutorials are an invaluable resource for developers looking to master the Holoscan SDK and build advanced AI sensor processing applications. -This page offers a collection of comprehensive tutorials that guide users through various aspects of the Holoscan platform, from basic setup to advanced workflow optimization. - -Each tutorial is designed to provide step-by-step instructions, code examples, and best practices, making it easier for developers to integrate Holoscan's powerful features into their projects. Whether you are new to Holoscan or an experienced developer seeking to enhance your skills, these tutorials offer practical insights and hands-on exercises to help you leverage the full potential of the Holoscan platform. \ No newline at end of file +--- +template: pages/tutorials.html +title: Tutorials +--- diff --git a/doc/website/docs/workflows/index.md b/doc/website/docs/workflows/index.md index e0a7eb8094..a010d01622 100644 --- a/doc/website/docs/workflows/index.md +++ b/doc/website/docs/workflows/index.md @@ -1,3 +1,4 @@ -# [Workflows](https://github.com/nvidia-holoscan/holohub/tree/main/workflows) - -Workflows are pre-configured sequences of operations that help users accomplish specific tasks efficiently. These workflows combine multiple Holoscan components and settings into ready-to-use solutions, making it easier to get started with common use cases. +--- +template: pages/workflows.html +title: Workflows +--- diff --git a/doc/website/mkdocs.yml b/doc/website/mkdocs.yml index 57d3ad11cc..1d32540b11 100644 --- a/doc/website/mkdocs.yml +++ b/doc/website/mkdocs.yml @@ -1,4 +1,4 @@ -site_name: Holoscan Reference Applications +site_name: NVIDIA Holoscan Reference Applications site_url: https://nvidia-holoscan.github.io/holohub repo_url: https://github.com/nvidia-holoscan/holohub repo_name: nvidia-holoscan/holohub @@ -10,9 +10,9 @@ theme: - content.code.annotate - navigation.tabs - navigation.indexes - - navigation.instant + # - navigation.instant # Disabled due to JavaScript timing issues with filterByTag - navigation.path - - navigation.instant.prefetch + # - navigation.instant.prefetch # Disabled to fix JavaScript initialization issues - navigation.top - navigation.footer - search.suggest @@ -53,6 +53,7 @@ plugins: # fallback_to_build_date: true - gen-files: scripts: + - scripts/generate_featured_apps.py - scripts/generate_pages.py - macros - tags @@ -87,7 +88,8 @@ watch: - ../../tutorials - ../../benchmarks - ../../workflows - - overrides + - overrides/assets + - overrides/pages - scripts markdown_extensions: diff --git a/doc/website/overrides/_pages/.gitignore b/doc/website/overrides/_pages/.gitignore new file mode 100644 index 0000000000..6fc977b812 --- /dev/null +++ b/doc/website/overrides/_pages/.gitignore @@ -0,0 +1,2 @@ +*.html +tmp_* \ No newline at end of file diff --git a/doc/website/overrides/assets/shared-cards.css b/doc/website/overrides/assets/shared-cards.css new file mode 100644 index 0000000000..7a11ed1fbe --- /dev/null +++ b/doc/website/overrides/assets/shared-cards.css @@ -0,0 +1,521 @@ +/* Shared styles for Applications and Benchmarks pages */ + +.md-main { + flex-grow: 0; +} + +.md-main__inner { + display: flex; + height: 100%; +} + + +.tx-container { + padding-top: .0rem; + background: var(--md-default-fg-color--lightest); +} + +.tx-hero { + margin: 32px 2.8rem; + color: var(--md-default-fg-color); + justify-content: center; +} + +.tx-hero h1 { + line-height: 1.25; + margin-bottom: 1rem; + color: currentColor; + font-weight: 700 +} + +@media (min-width: 576px){ .tx-hero h1 { font-size: 28px; } } +@media (min-width: 720px){ .tx-hero h1 { font-size: 36px; } } +@media (min-width: 992px){ .tx-hero h1 { font-size: 48px; } } + +.tx-hero p { + line-height: 1.75; + font-weight: 400 +} + +@media (min-width: 576px){ .tx-hero p { font-size: 18px; } } +@media (min-width: 720px){ .tx-hero p { font-size: 20px; } } +@media (min-width: 992px){ .tx-hero p { font-size: 22px; } } + +.tx-hero__content { + padding-bottom: 1rem; + margin: 0 auto; +} + +.tx-hero__image{ + width: 17rem !important; + height: 17rem !important; + order: 1 !important; + padding-right: 2.5rem !important; +} + +.tx-hero__image img { + border: 0px !important; +} + +.tx-hero__ytvideo{ + position: relative; + width: 100%; + height: 100%; +} + +.video { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; +} + +.tx-hero .md-button { + margin-top: .5rem; + margin-right: .5rem; + padding-left: 0.75rem; + padding-right: 0.75rem; + color: var(--md-default-fg-color); + border-color: var(--md-accent-fg-color); + text-decoration: none; + line-height: 1.25; + font-weight: 700; +} + +@media (min-width: 576px){ .tx-hero .md-button { font-size: 14px; } } +@media (min-width: 720px){ .tx-hero .md-button { font-size: 16px; } } +@media (min-width: 992px){ .tx-hero .md-button { font-size: 18px; } } + +.tx-hero .md-button:focus, +.tx-hero .md-button:hover { + background-color: var(--md-default-bg-color--light) !important; + color: var(--md-default-fg-color--light); + border-color: var(--md-default-fg-color); + text-decoration: none; +} + +.tx-hero .md-button--primary { + background-color: var(--md-accent-fg-color); + border-color: var(--md-accent-fg-color); + border-width: 0; +} + +.tx-hero .md-button--primary:focus, +.tx-hero .md-button--primary:hover { + background-color: var(--md-accent-fg-color--light); +} + +.feature-item h2 svg { + height: 30px; + float: left; + margin-right: 10px; + transform: translateY(10%); +} + +.top-hr { + margin: 14px 0px 30px; +} +@media (min-width: 576px){ .top-hr { margin: 14px 0px 30px ; } } +@media (min-width: 720px){ .top-hr { margin: 21px 0px 45px; } } +@media (min-width: 992px){ .top-hr { margin: 21px 0px 45px; } } + +.row { + display:-ms-flexbox; + display:flex; + -ms-flex-wrap:wrap; + flex-wrap:wrap; +} + +.justify-content-center { + -ms-flex-pack:center!important;justify-content:center!important +} + +.col-12 { + position: relative; + width: 100%; + min-height: 1px; + padding-right: 15px; + padding-left: 15px; +} + +.col-12 { + -ms-flex:0 0 100%; + flex:0 0 100%; + max-width:100% +} + +.text-center { + text-align: center!important; +} + +.section-title { + margin-bottom: 32px; + line-height: 1.25; + font-weight: 700; +} +@media (min-width: 576px){ .section-title { font-size: 24px; } } +@media (min-width: 720px){ .section-title { font-size: 28px; } } +@media (min-width: 992px){ .section-title { font-size: 36px; } } + +[data-md-color-scheme="dark"] .section-title, +[data-md-color-scheme="dark"] .section-title a { + color: white; +} + +[data-md-color-scheme="dark"] .section-gray .section-title, +[data-md-color-scheme="dark"] .section-gray .section-title a { + color: white; +} + +.section-gray { + background: #EFEFEF; +} + +[data-md-color-scheme="dark"] .section-gray { + background: #606060; +} + +.col, .col-1, .col-10, .col-11, .col-12, .col-2, .col-3, .col-4, .col-5, .col-6, .col-7, .col-8, .col-9, .col-auto, .col-lg, .col-lg-1, .col-lg-10, .col-lg-11, .col-lg-12, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-auto, .col-md, .col-md-1, .col-md-10, .col-md-11, .col-md-12, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-auto, .col-sm, .col-sm-1, .col-sm-10, .col-sm-11, .col-sm-12, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-auto, .col-xl, .col-xl-1, .col-xl-10, .col-xl-11, .col-xl-12, .col-xl-2, .col-xl-3, .col-xl-4, .col-xl-5, .col-xl-6, .col-xl-7, .col-xl-8, .col-xl-9, .col-xl-auto { + position: relative; + width: 100%; + min-height: 1px; + padding: 15px; +} + +@media (min-width: 576px) { + .col-sm-6 { + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } +} +@media (min-width: 720px) { + .col-lg-3 { + -ms-flex: 0 0 33.333333%; + flex: 0 0 33.333333%; + max-width: 33.333333%; + } +} +@media (min-width: 768px) { + .col-md-6 { + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } +} +@media (min-width: 992px) { + .col-lg-4 { + -ms-flex: 0 0 33.333333%; + flex: 0 0 33.333333%; + max-width: 33.333333%; + } + .col-lg-6 { + -ms-flex: 0 0 50%; + flex: 0 0 50%; + max-width: 50%; + } + .col-lg-3 { + -ms-flex: 0 0 25%; + flex: 0 0 25%; + max-width: 25%; + } +} +@media (min-width: 1200px) { + .col-xl-4 { + -ms-flex: 0 0 33.333333%; + flex: 0 0 33.333333%; + max-width: 33.333333%; + } +} + +.bg-white{background-color:#fff!important} +.px-4{padding-left:1.5rem!important} +.py-5{padding-top:3rem!important} +.no-padding{padding:0rem!important} + +.shadow { + box-shadow: 0 5px 15px rgba(0, 0, 0, .1) !important; + transition: .3s ease; + display: block !important; +} + +.shadow:hover { + box-shadow: 0 5px 15px rgba(0, 0, 0, .3) !important; + transition: .3s ease; + display: block !important; +} + +.padding-feature-box-item{ + padding-left: 0.8rem!important; + padding-right: 0.8rem!important; + padding-top: 0.8rem!important; + padding-bottom: 0.8rem!important; +} + +.padding-graph{ + padding-left: 0.5rem!important; + padding-right: 0.5rem!important; + padding-top: 0.5rem!important; + padding-bottom: 0.5rem!important; +} + +.icon { + font-size: 40px; +} + +.d-block { + display: block!important; +} + +.mb-0, .my-0 { + margin-bottom: 0!important; +} + +.feature-item { + font-family: 'Lato', sans-serif; + font-weight: 300; + box-sizing: border-box; + padding: 0 15px; + word-break: break-word +} + +h2 { + color: #333; + font-weight: 700; + font-size: 1.2rem; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + line-height: normal; + font-family: inherit; +} + +.feature-box { + text-align: left; + display:flex; + justify-content: center!important; + flex-shrink: 0; +} + +.feature-box a{ + height: 320px; + width: 100%; + flex-shrink: 0; +} + +[data-md-color-scheme="dark"] .feature-box a.bg-white { + background-color: #cbcbcb !important; +} + +.feature-box a i { + color: #76b900!important; + font-size: 0.6rem; + -webkit-transition: .4s ease-out; + transition: all ease-out .4s !important +} + +.feature-box a:hover i { + color: #000!important; + transform: translate(0.5em); +} + +.feature-box a p.feature-card-desc{ + min-height: 80px; + font-size: 15px; + line-height: 1.667; + font-weight: 400; +} + +.feature-box a .nv-teaser-text-link { + margin-block-start: 0em; + margin-block-end: 0em; + font-weight: 700; + padding-bottom: 15px !important; + padding-top: 0px !important; +} +@media (min-width: 576px){ .nv-teaser-text-link { font-size: 12px; } } +@media (min-width: 720px){ .nv-teaser-text-link { font-size: 14px; } } +@media (min-width: 992px){ .nv-teaser-text-link { font-size: 16px; } } + +.feature-box h3 { + color: #333; + line-height: 1.25; + font-weight: 700; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + margin-top: 0.5rem; + margin-bottom: 0.5rem; + font-family: inherit; + text-align: left; + font-size: 16px; +} +@media (min-width: 576px){ .feature-box h3 { font-size: 16px; } } +@media (min-width: 720px){ .feature-box h3 { font-size: 18px; } } +@media (min-width: 992px){ .feature-box h3 { font-size: 20px; } } + +.feature-box p { + font-size: 0.6rem; + font-weight: 300; + line-height: 1.8em; + text-rendering: optimizeLegibility; + -webkit-font-smoothing: antialiased; + color: #111; + margin: 0 0 10px; + display: block; + text-align: left; +} + +@media screen and (max-width:30em) { + .tx-hero h1 { + font-size: 1.4rem + } +} + +@media screen and (min-width:60em) { + .md-sidebar--secondary { + display: none + } + + .tx-hero { + display: flex; + align-items: center; + justify-content: center; + } + + .tx-hero__content { + max-width: 30rem; + margin-top: 3.5rem; + margin-bottom: 3.5rem; + margin-left: 1.0rem; + margin-right: 4.0rem; + align-items: center; + } +} + +@media screen and (min-width:76.25em) { + .md-sidebar--primary { + display: none + } + + .top-hr { + width: 100%; + display: flex; + max-width: 61rem; + margin-right: auto; + margin-left: auto; + padding: 0 .2rem; + } + + .bottom-hr { + margin-top: 10px; + width: 100%; + display: flex; + max-width: 61rem; + margin-right: auto; + margin-left: auto; + padding: 0 .2rem; + } + + .feature-item { + flex: 1; + min-width: 0; + } + + .feature-item:hover { + background-color: #526cfe47; + border-radius: 3px; + } +} + +.hr { + border-bottom: 1px solid #eee; + width: 100%; + margin: 20px 0; +} + +.logos { + display: flex; + align-items: center; + justify-content: center; + flex-flow: row wrap; + margin: 0 auto; +} + +.logos img { + flex: 1 1 auto; + padding: 25px; + max-height: 130px; + vertical-align: middle; +} + +.hr-logos { + margin-top: 0; + margin-bottom: 30px; +} + +.md-footer__inner { + display: none !important; +} + +.md-footer-meta__inner { + display: flex; + flex-wrap: wrap; + justify-content: space-between; + margin-top: 1.0rem; +} + +.md-footer-social { + padding-top: 20px; +} + +.twemoji { + display: inline-flex; + height: 0.6rem; + vertical-align: text-top; +} + +/* Sidebar navigation styles */ + +/* Sidebar navigation hover effect only */ +.sidebar-nav a:not([data-active]):hover { + background-color: #76b900 !important; + color: white !important; +} + +/* Highlight the first link (All) by default only when no other link is active */ +.sidebar-nav:not(:has(a[data-active])) a:first-child { + background-color: #76b900 !important; + color: white !important; +} + +/* Card backgrounds - light mode */ +.app-card { + background-color: #fff; + border-radius: 0.5rem; + border: 1px solid transparent; +} + +/* Card backgrounds - dark mode */ +[data-md-color-scheme="dark"] .app-card { + background-color: var(--md-default-bg-color); + border: 1px solid #969696; +} + +/* Card title in dark mode */ +[data-md-color-scheme="dark"] .app-card h3 { + color: #fff !important; +} + +/* Card description in dark mode */ +[data-md-color-scheme="dark"] .app-card .feature-card-desc { + color: #d3d3d3 !important; +} + +/* Image backgrounds in dark mode for better visibility of default images only */ +[data-md-color-scheme="dark"] .app-card img[src*="_default.png"] { + background-color: #e8e8e8; + padding: 0.5rem; +} + diff --git a/doc/website/overrides/assets/shared-filter.js b/doc/website/overrides/assets/shared-filter.js new file mode 100644 index 0000000000..3e60981846 --- /dev/null +++ b/doc/website/overrides/assets/shared-filter.js @@ -0,0 +1,249 @@ +// Shared JavaScript for filtering cards on Applications and Benchmarks pages + +// Store the current page's filter function and nav selector globally +// These are declared at the top so they're available immediately +var currentFilterFn = null; +var currentNavSelector = null; +var globalHandlersInitialized = false; + +// Global filterByTag function that delegates to the current page's filter +window.filterByTag = function(tag) { + if (currentFilterFn) { + currentFilterFn(tag); + } + return false; +}; + +// Auto-initialize based on page type data attribute +function autoInitializeFilters() { + var sidebarNav = document.querySelector('.sidebar-nav[data-page-type]'); + if (!sidebarNav) { + return; // Not a filterable page + } + + var pageType = sidebarNav.getAttribute('data-page-type'); + if (!pageType) { + return; + } + + // Map page types to their titles + var pageTitles = { + 'applications': 'All Applications', + 'operators': 'All Operators', + 'benchmarks': 'All Benchmarks', + 'tutorials': 'All Tutorials', + 'workflows': 'All Workflows' + }; + + var allTitle = pageTitles[pageType] || 'All'; + + // Create and setup the filter function + var pageFilterFn = createFilterFunction({ + normalizeSpaces: true, + updateTitle: true, + allTitle: allTitle, + navSelector: '.sidebar-nav a', + tagMatchStrategy: 'text' + }); + + setupFilterHandlers(pageFilterFn, '.sidebar-nav a'); +} + +function nat_at_top() { + var navtab = document.getElementsByClassName('md-tabs'); + if (navtab && navtab[0]) { + navtab[0].classList.add("topped"); + } +} + +// Initialize filters and navigation on page load +window.addEventListener('load', function() { + nat_at_top(); + autoInitializeFilters(); +}); + +// Also initialize on DOMContentLoaded for faster initial setup +document.addEventListener('DOMContentLoaded', autoInitializeFilters); + +// Handle navigation tab scrolling +window.addEventListener('scroll', function() { + var scroll = document.body.scrollTop || document.documentElement.scrollTop; + var navtab = document.getElementsByClassName('md-tabs'); + if (navtab && navtab[0]) { + navtab[0].classList.toggle("topped", scroll < navtab[0].offsetHeight); + } +}); + +// Generic filter function that can be customized per page +function createFilterFunction(options) { + options = options || {}; + var normalizeSpaces = options.normalizeSpaces !== undefined ? options.normalizeSpaces : false; + var updateTitle = options.updateTitle !== undefined ? options.updateTitle : false; + var allTitle = options.allTitle || 'All'; + var navSelector = options.navSelector || 'nav a'; + var tagMatchStrategy = options.tagMatchStrategy || 'href'; // 'href' or 'text' + + return function filterByTag(tag) { + var cards = document.querySelectorAll('.feature-box'); + var activeCategory = tag.toLowerCase(); + + if (normalizeSpaces) { + activeCategory = activeCategory.replace(/ /g, '-'); + } + + // Update the category title if enabled + if (updateTitle) { + var categoryTitle = document.getElementById('category-title'); + if (categoryTitle) { + if (tag.toLowerCase() === 'all') { + categoryTitle.textContent = allTitle; + } else { + // Always convert to title case for consistent display + // This handles both "Extended Reality" and "extended-reality" inputs + var normalizedTag = tag.replace(/-/g, ' ').toLowerCase(); + var displayTitle = normalizedTag.split(' ') + .map(function(word) { + // Keep acronyms in uppercase + if (word === 'ai') { + return 'AI'; + } + return word.charAt(0).toUpperCase() + word.slice(1); + }) + .join(' '); + categoryTitle.textContent = displayTitle; + } + } + } + + // If "all" is selected, show all cards + if (activeCategory === 'all') { + cards.forEach(function(card) { + card.style.display = ''; + }); + } else { + var shownCount = 0; + // Filter cards by tag + cards.forEach(function(card) { + // Get all tags in the card + var cardTags = card.querySelectorAll('.md-tag'); + var hasTag = false; + + // Check if any of the card's tags match the selected category + cardTags.forEach(function(tagElement) { + if (tagMatchStrategy === 'href') { + // Match by href attribute (benchmarks style) + var tagHref = tagElement.getAttribute('href'); + if (tagHref && tagHref.includes('tag:' + activeCategory)) { + hasTag = true; + } + } else if (tagMatchStrategy === 'text') { + // Match by text content (applications style) + var tagText = tagElement.textContent || tagElement.innerText; + var tagTextNormalized = tagText.toLowerCase().trim(); + + if (normalizeSpaces) { + tagTextNormalized = tagTextNormalized.replace(/ /g, '-'); + } + + if (tagTextNormalized === activeCategory) { + hasTag = true; + } + } + }); + + // Show or hide the card based on whether it has the tag + if (hasTag) { + card.style.display = ''; + shownCount++; + } else { + card.style.display = 'none'; + } + }); + } + + // Update active state on navigation links + var navLinks = document.querySelectorAll(navSelector); + navLinks.forEach(function(link) { + var isActive = false; + + // Check if this link matches the active category + if (tagMatchStrategy === 'text') { + // For applications: extract tag from onclick attribute + var linkOnclick = link.getAttribute('onclick'); + var match = linkOnclick && linkOnclick.match(/filterByTag\('([^']+)'\)/); + var linkTag = match ? match[1].toLowerCase() : ''; + + if (normalizeSpaces) { + linkTag = linkTag.replace(/ /g, '-'); + } + + isActive = linkTag === activeCategory || (activeCategory === 'all' && link.getAttribute('href') === '#all'); + } else { + // For benchmarks: match by href + isActive = link.getAttribute('href') === '#' + activeCategory; + } + + if (isActive) { + link.style.backgroundColor = '#76b900'; + link.style.color = 'white'; + link.setAttribute('data-active', 'true'); + } else { + link.style.backgroundColor = ''; + link.style.color = 'var(--md-default-fg-color)'; + link.removeAttribute('data-active'); + } + }); + }; +} + +// Show all cards +function showAllCards(navSelector) { + navSelector = navSelector || 'nav a'; + + var cards = document.querySelectorAll('.feature-box'); + cards.forEach(function(card) { + card.style.display = ''; + }); + + // Remove active state from all navigation links + var navLinks = document.querySelectorAll(navSelector); + navLinks.forEach(function(link) { + link.style.backgroundColor = ''; + link.style.color = 'var(--md-default-fg-color)'; + link.removeAttribute('data-active'); + }); +} + +// Apply hash-based filter from URL +function applyCurrentHashFilter() { + if (!currentFilterFn || !currentNavSelector) { + return; + } + + var hash = window.location.hash.substring(1); + if (hash) { + currentFilterFn(hash); + } else { + showAllCards(currentNavSelector); + } +} + +// Setup hash change and page load handlers +function setupFilterHandlers(filterFn, navSelector) { + currentFilterFn = filterFn; + currentNavSelector = navSelector; + + // Only set up global event listeners once + if (!globalHandlersInitialized) { + window.addEventListener('hashchange', applyCurrentHashFilter); + window.addEventListener('load', applyCurrentHashFilter); + globalHandlersInitialized = true; + } + + // Apply filter immediately if DOM is ready + if (document.readyState !== 'loading') { + applyCurrentHashFilter(); + } else { + document.addEventListener('DOMContentLoaded', applyCurrentHashFilter); + } +} diff --git a/doc/website/overrides/assets/style.css b/doc/website/overrides/assets/style.css new file mode 100644 index 0000000000..969fae3b2c --- /dev/null +++ b/doc/website/overrides/assets/style.css @@ -0,0 +1,884 @@ +:root { + --primary-color: #02007e; + --body-color: #f9f9f9; + --text-color: #636363; + --text-color-dark: #242738; + --white-color: #ffffff; + --light-color: #f8f9fa; + --font-family: Lato; +} + +body { + line-height: 1.5; + font-family: var(--font-family), sans-serif; + -webkit-font-smoothing: antialiased; + font-size: 17px; + color: var(--text-color); + background-color: var(--body-color) +} + +/* Hide the left sidebar on every page */ +.md-sidebar--primary{ + display: none; + } + +p { + color: var(--text-color); + font-size: 15px +} + +h1, +h2, +h3, +h4, +h5, +h6 { + color: var(--text-color-dark); + font-family: var(--font-family), sans-serif; + font-weight: 700; + line-height: 1.2 +} + +h1, +.h1 { + font-size: 45px +} + +h2, +.h2 { + font-size: 32px +} + +h3, +.h3 { + font-size: 26px +} + +h4, +.h4 { + font-size: 20px +} + +h5, +.h5 { + font-size: 18px +} + +h6, +.h6 { + font-size: 14px +} + +.btn { + font-size: 14px; + font-family: var(--font-family), sans-serif; + text-transform: uppercase; + padding: 16px 44px; + border-radius: 0; + font-weight: 600; + border: 0; + position: relative; + z-index: 1; + transition: .2s ease +} + +.btn:focus { + outline: 0; + box-shadow: none !important +} + +.btn:active { + box-shadow: none +} + +.btn-primary { + background: var(--primary-color); + color: var(--white-color) +} + +.btn-primary:active { + background: var(--primary-color) +} + +.btn-primary:hover { + background: var(--primary-color) +} + +.btn-primary:not(:disabled):not(.disabled).active, +.btn-primary:not(:disabled):not(.disabled):active, +.show>.btn-primary.dropdown-toggle { + color: var(--white-color); + background-color: var(--primary-color); + border-color: var(--primary-color) +} + +.inline-button { + line-height: .8rem !important; + padding: 5px 8px !important; + pointer-events: none; + margin-top: -5px +} + +.overflow-hidden { + overflow: hidden !important +} + +::-moz-selection { + background: var(--primary-color); + color: var(--white-color) +} + +::selection { + background: var(--primary-color); + color: var(--white-color) +} + +.preloader { + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: var(--white-color); + z-index: 999; + display: flex; + align-items: center; + justify-content: center +} + +ul { + list-style-type: none; + margin: 0; + padding-left: 0; + font-size: 15px +} + +ol { + padding-left: 20px; + font-size: 15px +} + +img { + vertical-align: middle; + border: 0 +} + +a, +a:hover, +a:focus { + text-decoration: none; + color: var(--primary-color) +} + +a:hover, +a:focus { + color: var(--primary-color) +} + +a, +button, +select { + cursor: pointer; + transition: .2s ease +} + +a:focus, +button:focus, +select:focus { + outline: 0 +} + +.slick-slide { + outline: 0 +} + +.section { + padding-top: 80px; + padding-bottom: 80px +} + +@media(max-width:768px) { + .section { + padding-top: 60px + } +} + +.section-sm { + padding-top: 60px; + padding-bottom: 60px +} + +@media(max-width:768px) { + .section-sm { + padding-top: 40px + } +} + +.section-title { + margin-bottom: 40px +} + +.bg-cover { + background-size: cover; + background-position: 50%; + background-repeat: no-repeat +} + +.border-primary { + border-color: #f2f2f2 !important +} + +pre { + padding: 20px +} + +.overlay { + position: relative +} + +.overlay::before { + position: absolute; + content: ''; + height: 100%; + width: 100%; + top: 0; + left: 0; + background: var(--primary-color); + opacity: .8 +} + +.outline-0 { + outline: 0 !important +} + +.d-unset { + display: unset !important +} + +.bg-primary { + background: var(--primary-color) !important +} + +.bg-white { + background-color: var(--white-color) !important +} + +.bg-light { + background-color: var(--light-color) !important +} + +.text-primary { + color: var(--primary-color) !important +} + +.text-color { + color: var(--text-color) !important +} + +.text-dark { + color: var(--text-color-dark) !important +} + +.text-white { + color: var(--white-color) !important +} + +.top-50 { + top: 50px +} + +.navbar { + padding: 0 +} + +@media(max-width:768px) { + .navbar { + padding: 10px 0 + } +} + +.navbar-brand img { + max-width: 100px; + margin-bottom: 0 +} + +.navbar .nav-item .nav-link { + text-transform: uppercase; + padding: 10px 15px !important; + font-size: 15px +} + +.navbar .dropdown:hover .dropdown-menu { + visibility: visible; + opacity: 1; + -webkit-transform: scaleX(1); + transform: scaleX(1) +} + +.navbar .dropdown-menu { + box-shadow: 0 3px 9px 0 rgba(0, 0, 0, .12); + padding: 15px 0; + border: 0; + top: 40px; + left: -35px; + border-radius: 0; + display: block; + visibility: hidden; + transition: .3s ease; + opacity: 0; + -webkit-transform: translateY(20px); + transform: translateY(20px); + background: var(--white-color) +} + +@media(max-width:768px) { + .navbar .dropdown-menu { + display: none; + opacity: 1; + visibility: visible; + -webkit-transform: translateY(0); + transform: translateY(0); + -webkit-transform-origin: unset; + transform-origin: unset + } +} + +.navbar .dropdown-menu.view { + visibility: visible !important; + opacity: 1; + -webkit-transform: translateY(0); + transform: translateY(0); + -webkit-transform-origin: top; + transform-origin: top +} + +@media(max-width:768px) { + .navbar .dropdown-menu.view { + display: block + } +} + +.navbar .dropdown-menu.show { + visibility: hidden +} + +@media(max-width:768px) { + .navbar .dropdown-menu.show { + visibility: visible; + display: block + } +} + +.navbar .dropdown-item { + position: relative; + color: var(--text-color-dark); + transition: .2s ease; + font-family: var(--font-family), sans-serif +} + +@media(max-width:768px) { + .navbar .dropdown-item { + text-align: center + } +} + +.navbar .dropdown-item:hover { + color: var(--primary-color); + background: 0 0 +} + +.lang-list { + background: var(--primary-color); + color: var(--white-color) +} + +.lang-list.dark { + color: var(--text-color-dark); + background: var(--white-color) +} + +.banner { + overflow: hidden +} + +.banner p { + font-size: 20px; + opacity: .8 +} + +.banner .nav-link.text-dark { + color: var(--white-color) !important +} + +.banner .nav-link.text-dark:hover { + color: var(--white-color) !important +} + +.banner .navbar-brand { + color: var(--white-color) !important +} + +#project-icon { + float: left; + height: 32px; + width: 32px +} + +#project-description { + margin: 0; + padding: 0 +} + +.ui-helper-hidden-accessible { + display: none +} + +.ui-menu { + background: var(--white-color); + padding: 5px 20px 20px; + right: 0 !important; + max-height: 200px; + overflow: hidden; + border-radius: 0 0 25px 25px; + z-index: 9999; + box-shadow: 0 13px 20px 0 rgba(0, 0, 0, .07) +} + +@media(max-width:575px) { + .ui-menu { + width: calc(100% - 30px) !important + } +} + +@media(min-width:576px) { + .ui-menu { + max-width: 510px !important + } +} + +@media(min-width:768px) { + .ui-menu { + max-width: 690px !important + } +} + +@media(min-width:992px) { + .ui-menu { + max-width: 610px !important + } +} + +@media(min-width:1200px) { + .ui-menu { + max-width: 730px !important + } +} + +.ui-menu-item a { + color: var(--text-color); + padding: 8px 0; + font-size: 15px +} + +.ui-menu-item a:hover { + color: var(--primary-color) +} + +.ui-menu-item:not(:last-child) { + border-bottom: 1px solid #e8e8e8 +} + +.ui-menu-item * { + display: none +} + +.ui-menu-item .ui-corner-all { + display: block +} + +.form-control { + height: 50px; + border-radius: 25px; + border: 0; + padding: 0 20px +} + +.form-control:focus { + border: 0; + box-shadow: none !important +} + +textarea.form-control { + height: 150px; + padding: 20px +} + +.icon { + font-size: 40px +} + +.shadow { + box-shadow: 0 5px 15px rgba(0, 0, 0, .07) !important; + transition: .3s ease +} + +.shadow-bottom { + box-shadow: 0 1px 0 rgba(12, 13, 14, .1), 0 1px 6px rgba(59, 64, 69, .1) +} + +.shadow:hover, +.shadow:focus { + box-shadow: 0 14px 25px rgba(0, 0, 0, .1) !important +} + +.content * { + margin-bottom: 20px +} + +.content img { + max-width: 100%; + height: auto; + margin: 0 auto 15px; + display: block; + text-align: center +} + +.content ul { + padding-left: 0; + margin-bottom: 20px +} + +.content ul li { + padding-left: 20px; + position: relative +} + +.content ul li::before { + position: absolute; + content: ''; + height: 8px; + width: 8px; + border-radius: 50%; + background: var(--primary-color); + opacity: .3; + left: 0; + top: 8px +} + +.list-styled li { + padding-left: 20px; + position: relative +} + +.list-styled li::before { + position: absolute; + content: ''; + height: 8px; + width: 8px; + border-radius: 50%; + background: var(--primary-color); + opacity: .3; + left: 0; + top: 17px +} + +.post-meta { + color: var(--text-color); + font-style: italic; + font-size: 14px +} + +blockquote { + font-size: 20px !important; + color: var(--text-color-dark); + padding: 20px 40px; + border-left: 2px solid var(--primary-color); + margin: 40px 0; + font-weight: 700; + background: var(--light-color) +} + +blockquote p { + margin-bottom: 0 !important +} + +.pagination { + justify-content: space-between +} + +.pagination a { + color: var(--primary-color) +} + +.pagination i { + font-size: 15px; + line-height: 1.8 +} + +#accordion i { + font-size: 14px; + line-height: 2 +} + +table { + text-align: left; + width: 100%; + max-width: 100%; + margin-bottom: 1rem; + border: 1px solid #dee2e6 +} + +table td, +table th { + padding: .75rem; + vertical-align: top; + border: 1px solid #dee2e6; + margin-bottom: 0 +} + +thead { + background: #ececec; + margin-bottom: 0 +} + +tbody { + background: #f8f8f8; + margin-bottom: 0 +} + +.notices { + margin: 2rem 0; + position: relative +} + +.notices p { + padding: 10px +} + +.notices p::before { + position: absolute; + top: 2px; + color: #fff; + font-family: themify; + font-weight: 900; + content: "\e717"; + left: 10px +} + +.notices.note p { + border-top: 30px solid #6ab0de; + background: #e7f2fa +} + +.notices.note p::after { + content: 'Note'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.notices.tip p { + border-top: 30px solid #78c578; + background: #e6f9e6 +} + +.notices.tip p::after { + content: 'Tip'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.notices.info p { + border-top: 30px solid #f0b37e; + background: #fff2db +} + +.notices.info p::after { + content: 'Info'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.notices.warning p { + border-top: 30px solid #e06f6c; + background: #fae2e2 +} + +.notices.warning p::after { + content: 'Warning'; + position: absolute; + top: 2px; + color: #fff; + left: 2rem +} + +.sidebar { + background-color: var(--white-color); + position: sticky; + top: 50px; + margin-bottom: 30px; + padding: 40px 10px 20px +} + +.sidelist { + display: block +} + +li.sidelist>a { + margin-left: 20px; + margin-bottom: 10px; + display: block; + font-size: 20px +} + +li.sidelist li a { + margin: 0 +} + +.sidelist li.sidelist { + display: block +} + +.sidelist li.sidelist.active a { + color: var(--primary-color) +} + +.sidelist li.sidelist.active::before { + opacity: 1 +} + +.page-list li a { + display: none +} + +.page-list li ul a { + display: block +} + +.sidelist li a { + color: var(--text-color-dark); + display: block; + font-size: 15px; + font-weight: 500; + padding: 10px 0; + line-height: 1.4 +} + +p:empty, +p a:empty { + display: none !important +} + +pre { + display: block; + padding: 9.5px; + margin: 10px 0 +} + +code { + margin-bottom: 0 !important; + font-size: 100% +} + +.back-btn { + position: relative +} + +.back-btn::before { + position: absolute; + font-family: themify; + content: "\e6bc"; + font-size: 25px; + height: 30px; + width: 40px; + background-color: var(--white-color); + color: inherit; + text-align: right; + z-index: 1; + left: -5px; + top: -5px +} + +.ui-autocomplete-input { + border-bottom: 1px solid #d4d4d4 !important +} + +.ui-autocomplete-input.active { + border-bottom-left-radius: 0; + border-bottom-right-radius: 0 +} + +.search-icon { + position: absolute; + right: 20px; + top: 18px; + font-size: 14px +} + +i { + transition: .2s ease +} + +a:hover i { + color: var(--primary-color) +} + +pre code::-webkit-scrollbar { + height: 5px +} + +pre code::-webkit-scrollbar-track { + background: #000 +} + +pre code::-webkit-scrollbar-thumb { + background: #888 +} + +.code-tabs { + border: 1px solid #dee2e6; + overflow: hidden; + margin: 20px 0 +} + +.code-tabs .tab-content { + padding: 20px 15px; + margin-bottom: 0 +} + +.code-tabs .tab-content .tab-pane { + margin-bottom: 0 +} + +.code-tabs .nav-tabs { + margin-bottom: 0 +} + +.code-tabs .nav-tabs .nav-item { + padding-left: 0; + border-right: 1px solid #dee2e6 +} + +.code-tabs .nav-tabs .nav-item .nav-link { + text-decoration: none; + font-weight: 500; + border: 0; + margin-bottom: 0 +} + +.code-tabs .nav-tabs .nav-item::before { + display: none +} + +.code-tabs .nav-tabs .nav-item.active { + background: var(--primary-color) +} + +.code-tabs .nav-tabs .nav-item.active .nav-link { + color: var(--white-color) +} \ No newline at end of file diff --git a/doc/website/overrides/pages/applications.html b/doc/website/overrides/pages/applications.html new file mode 100644 index 0000000000..979d228406 --- /dev/null +++ b/doc/website/overrides/pages/applications.html @@ -0,0 +1,63 @@ + +{% extends "main.html" %} +{% block extrahead %} + + + + + {{ super() }} +{% endblock %} + + +{# block announce #} + + + +{# endblock #} + +{% block tabs %} +{{ super() }} + + +
+
+
+ +
+
+

Applications

+ +
+
+ + +
+
+
+

All Applications

+

+ Explore a diverse collection of reference applications showcasing NVIDIA Holoscan capabilities across multiple domains. + These production-ready examples demonstrate real-time AI workflows in healthcare, computer vision, robotics, streaming, + and more. Browse by category to find applications relevant to your use case, or use them as starting points for your own + sensor processing pipelines and AI-powered applications. +

+
+ {% include '_pages/applications.html' %} +
+
+
+
+
+ + + +{% endblock %} + +{% block content %} +{% endblock %} diff --git a/doc/website/overrides/pages/benchmarks.html b/doc/website/overrides/pages/benchmarks.html new file mode 100644 index 0000000000..82b777bbe6 --- /dev/null +++ b/doc/website/overrides/pages/benchmarks.html @@ -0,0 +1,63 @@ + +{% extends "main.html" %} +{% block extrahead %} + + + + + {{ super() }} +{% endblock %} + + +{# block announce #} + + + +{# endblock #} + +{% block tabs %} +{{ super() }} + + +
+
+
+ +
+
+

Benchmarks

+ +
+
+ + +
+
+
+

All Benchmarks

+

+ Benchmarks provide performance measurements and comparative analysis of NVIDIA Holoscan capabilities. + These examples demonstrate real-world performance characteristics including threading models, visualization techniques, + and system optimization strategies. Use these benchmarks to understand performance implications and optimize your applications + for specific hardware configurations and use cases. +

+
+ {% include '_pages/benchmarks.html' %} +
+
+
+
+
+ + + +{% endblock %} + +{% block content %} +{% endblock %} diff --git a/doc/website/overrides/pages/home.html b/doc/website/overrides/pages/home.html new file mode 100644 index 0000000000..88270ee6d4 --- /dev/null +++ b/doc/website/overrides/pages/home.html @@ -0,0 +1,618 @@ + +{% extends "main.html" %} +{% block extrahead %} + + + {{ super() }} +{% endblock %} + + +{# block announce #} + + + +{# endblock #} + +{% block tabs %} +{{ super() }} + + + +
+
+
+
+

Accelerate Sensor Processing

+

Bring Real-time AI to the world with NVIDIA Holoscan

+ + Explore Applications + + +  Learn more about Holoscan + +
+
+ +
+
+
+
+ +
+
+
+ + {% include '_pages/featured-applications.html' %} +
+
+
+ + +
+
+
+ + {% include '_pages/featured-tutorials.html' %} +
+
+
+ + + + +
+
+
+ + {% include '_pages/featured-benchmarks.html' %} +
+
+
+ + + + +{% endblock %} + +{% block content %} +{% endblock %} + diff --git a/doc/website/overrides/pages/operators.html b/doc/website/overrides/pages/operators.html new file mode 100644 index 0000000000..8f372563fd --- /dev/null +++ b/doc/website/overrides/pages/operators.html @@ -0,0 +1,64 @@ + +{% extends "main.html" %} +{% block extrahead %} + + + + + {{ super() }} +{% endblock %} + + +{# block announce #} + + + +{# endblock #} + +{% block tabs %} +{{ super() }} + + +
+
+
+ +
+
+

Operators

+ +
+
+ + +
+
+
+

All Operators

+

+ Operators are the fundamental building blocks of Holoscan applications, representing individual processing units + that can be composed into complex data processing pipelines. Browse this collection of custom operators that extend + Holoscan's capabilities for specialized data ingestion, transformation, visualization, AI inference, and output operations. + Each operator can be integrated into your workflows to accelerate development of sensor processing applications. +

+
+ {% include '_pages/operators.html' %} +
+
+
+
+
+ + + +{% endblock %} + +{% block content %} +{% endblock %} + diff --git a/doc/website/overrides/pages/tutorials.html b/doc/website/overrides/pages/tutorials.html new file mode 100644 index 0000000000..b4e71a3450 --- /dev/null +++ b/doc/website/overrides/pages/tutorials.html @@ -0,0 +1,64 @@ + +{% extends "main.html" %} +{% block extrahead %} + + + + + {{ super() }} +{% endblock %} + + +{# block announce #} + + + +{# endblock #} + +{% block tabs %} +{{ super() }} + + +
+
+
+ +
+
+

Tutorials

+ +
+
+ + +
+
+
+

All Tutorials

+

+ Learn NVIDIA Holoscan through hands-on tutorials covering fundamental concepts, advanced techniques, and best practices. + These step-by-step guides help you understand how to build sensor processing pipelines, integrate AI models, optimize performance, + and deploy applications. Whether you're new to Holoscan or looking to expand your expertise, these tutorials provide practical + knowledge for developing real-time streaming applications. +

+
+ {% include '_pages/tutorials.html' %} +
+
+
+
+
+ + + +{% endblock %} + +{% block content %} +{% endblock %} + diff --git a/doc/website/overrides/pages/workflows.html b/doc/website/overrides/pages/workflows.html new file mode 100644 index 0000000000..044c9f37b3 --- /dev/null +++ b/doc/website/overrides/pages/workflows.html @@ -0,0 +1,64 @@ + +{% extends "main.html" %} +{% block extrahead %} + + + + + {{ super() }} +{% endblock %} + + +{# block announce #} + + + +{# endblock #} + +{% block tabs %} +{{ super() }} + + +
+
+
+ +
+
+

Workflows

+ +
+
+ + +
+
+
+

All Workflows

+

+ Workflows demonstrate complete end-to-end processing pipelines built by composing multiple operators together. + These examples showcase how to orchestrate complex data flows for real-time sensor processing, from data ingestion + through AI inference to visualization and output. Explore workflows across different domains to understand pipeline + architecture patterns, data flow management, and integration strategies for building production-ready streaming applications. +

+
+ {% include '_pages/workflows.html' %} +
+
+
+
+
+ + + +{% endblock %} + +{% block content %} +{% endblock %} + diff --git a/doc/website/scripts/common_utils.py b/doc/website/scripts/common_utils.py new file mode 100644 index 0000000000..fae65dc03d --- /dev/null +++ b/doc/website/scripts/common_utils.py @@ -0,0 +1,398 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Common utilities shared across website scripts.""" + +import json +import logging +import os +import re +import subprocess +from datetime import datetime +from pathlib import Path +from urllib.parse import urljoin + +import markdown +import requests +from bs4 import BeautifulSoup + +# Set up logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") +logger = logging.getLogger(__name__) + +# Constants +COMPONENT_TYPES = ["workflows", "applications", "operators", "tutorials", "benchmarks"] +HOLOHUB_REPO_URL = "https://github.com/nvidia-holoscan/holohub" + + +def get_current_git_ref() -> str: + """Get the current git branch, tag, or commit hash being built. + + Returns: + A string representing the current git reference (branch, tag, or commit hash). + Falls back to 'main' if detection fails. + """ + # Check common CI environment variables + if "GITHUB_REF_NAME" in os.environ: + return os.environ["GITHUB_REF_NAME"] + if "CI_COMMIT_REF_NAME" in os.environ: # GitLab + return os.environ["CI_COMMIT_REF_NAME"] + try: + # Get current branch, tag, or commit hash + for cmd in [ + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + ["git", "describe", "--tags", "--exact-match"], + ["git", "rev-parse", "--short", "HEAD"], + ]: + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode == 0 and result.stdout.strip() and result.stdout.strip() != "HEAD": + return result.stdout.strip() + except Exception as e: + logger.warning(f"Failed to determine git reference: {e}") + return "main" # Default fallback + + +# Use a function to get the raw base URL dynamically +def get_raw_base_url(name="") -> str: + """Get the base URL for raw GitHub content based on current git reference.""" + if not name: + name = get_current_git_ref() + return f"https://raw.githubusercontent.com/nvidia-holoscan/holohub/{name}/" + + +# Ranking levels for documentation +RANKING_LEVELS = { + 0: "Level 0 - Core Stable", + 1: "Level 1 - Highly Reliable", + 2: "Level 2 - Trusted", + 3: "Level 3 - Developmental", + 4: "Level 4 - Experimental", + 5: "Level 5 - Obsolete", +} + + +def get_git_root() -> Path: + """Get the absolute path to the Git repository root.""" + try: + result = subprocess.run( + ["git", "rev-parse", "--show-toplevel"], capture_output=True, text=True, check=True + ) + return Path(result.stdout.strip()) + except Exception as e: + logger.error(f"Error getting Git root: {e}") + return Path(".") + + +def parse_metadata_file(metadata_path: Path) -> dict: + """Parse metadata.json file and extract relevant information.""" + with metadata_path.open("r") as f: + data = json.load(f) + component_type = list(data.keys())[0] + metadata = data[component_type] + return metadata, component_type + + +def get_metadata_file_commit_date(metadata_path: Path, git_repo_path: Path) -> datetime: + """Get the creation date of a metadata.json file from git history. + + This function determines when an application/component was first created by finding + the first commit that introduced its metadata.json file. + + Uses: git log --follow --format=%at --reverse + - --follow: Tracks file through renames + - --format=%at: Returns Unix timestamp + - --reverse: Oldest commits first (so first entry is the creation date) + + Args: + metadata_path: Path to the metadata.json file + git_repo_path: Path to the Git repository root + + Returns: + datetime: The date when the metadata.json was first committed (application creation date) + """ + rel_file_path = str(metadata_path.relative_to(git_repo_path)) + cmd = [ + "git", + "-C", + str(git_repo_path), + "log", + "--follow", + "--format=%at", + "--reverse", + rel_file_path, + ] + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + timestamps = result.stdout.strip().split("\n") + if timestamps and timestamps[0]: + return datetime.fromtimestamp(int(timestamps[0])) + except (subprocess.CalledProcessError, ValueError) as e: + logger.error(f"Error getting creation date for {metadata_path}: {e}") + # Fallback to file modification time if git fails + return datetime.fromtimestamp(metadata_path.stat().st_mtime) + + +def get_recent_source_code_update_date(metadata_path: Path, git_repo_path: Path): + """Get the most recent update date for source code files in a component directory. + + This function checks for recent modifications to source code files (.py, .cpp, .h, .hpp, .cu, .cuh) + in the component directory to determine if the component has been recently updated. + + Args: + metadata_path: Path to the metadata.json file + git_repo_path: Path to the Git repository root + + Returns: + datetime: The date of the most recent source code update, or None if no updates found + """ + # Get the component directory (parent of metadata.json, or parent's parent if in cpp/python subdirs) + component_dir = metadata_path.parent + if component_dir.name in ["cpp", "python"]: + component_dir = component_dir.parent + + # Source code file extensions to check + source_extensions = ["*.py", "*.cpp", "*.h", "*.hpp", "*.cu", "*.cuh", "*.c", "*.cc", "*.cxx"] + + rel_component_dir = str(component_dir.relative_to(git_repo_path)) + + # Build git command to find the most recent commit affecting source files + # Format: git log -1 --format=%at -- ... + patterns = [f"{rel_component_dir}/**/{ext}" for ext in source_extensions] + + # Use git log to find the most recent commit that modified source files + cmd = ["git", "-C", str(git_repo_path), "log", "-1", "--format=%at", "--"] + cmd.extend(patterns) + + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + timestamp_str = result.stdout.strip() + if timestamp_str: + return datetime.fromtimestamp(int(timestamp_str)) + except (subprocess.CalledProcessError, ValueError) as e: + logger.debug(f"No recent source code updates found for {component_dir.name}: {e}") + + return None + + +def format_date(date_str: str) -> str: + """Format a date string in YYYY-MM-DD format to Month DD, YYYY format.""" + try: + year, month, day = date_str.split("-") + months = [ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + return f"{months[int(month)-1]} {int(day)}, {year}" + except (ValueError, IndexError): + # Return the original string if we can't parse it + return date_str + + +def get_last_modified_date(file_path: Path, git_repo_path: Path) -> str: + """Get the last modified date of a file or directory using git or stat.""" + # Try using git to get the last modified date + rel_file_path = str(file_path.relative_to(git_repo_path)) + cmd = f"git -C {git_repo_path} log -1 --format=%ad --date=short {rel_file_path}".split() + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + git_date = result.stdout.strip() + if git_date: # If we got a valid date from git + return format_date(git_date) + except (subprocess.CalledProcessError, ValueError): + pass + + # Second try: Filesystem stat date + cmd = ["stat", "-c", "%y", str(file_path)] + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + stat_date = result.stdout.split()[0].strip() # Get just the date portion + if stat_date: # If we got a valid date from stat + return format_date(stat_date) + except (subprocess.CalledProcessError, ValueError, IndexError): + logger.error(f"Failed to get modification date for {file_path}") + # Fallback if both methods fail + return "Unknown" + + +def get_file_from_git(file_path: Path, git_ref: str, git_repo_path: Path) -> str: + """Get file content from a specific git revision.""" + try: + rel_file_path = file_path.relative_to(git_repo_path) + cmd = ["git", "-C", str(git_repo_path), "show", f"{git_ref}:{rel_file_path}"] + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + return result.stdout + except (subprocess.CalledProcessError, ValueError) as e: + if isinstance(e, subprocess.CalledProcessError): + logger.error(f"Git error: {e.stderr}") + else: + logger.error(f"Path {file_path} is not within the Git repository") + raise e + + +def extract_image_from_readme(readme_content): + """Extracts the first image from a README file.""" + if not readme_content: + return None + + # Try HTML image tags + html_pattern = r']*src=["\'](.*?)["\'][^>]*>' + html_match = re.search(html_pattern, readme_content, re.IGNORECASE) + if html_match: + return html_match.group(1).strip() + + # Try Markdown image syntax + md_pattern = r"!\[[^\]]*\]\(([^)]+)\)" + md_match = re.search(md_pattern, readme_content, re.IGNORECASE) + if md_match: + return md_match.group(1).strip() + + return None + + +def get_readme_content(component_type, component_name, href=None): + """Reads the README.md content from the local filesystem, handling various path variations. + + Args: + component_type: Type of the component (applications, operators, etc.) + component_name: Name of the component + href: Optional full href path from the app card which may provide a more precise location + + Returns: + Tuple of (readme_content, readme_path) or (None, None) if not found + """ + try: + # Get the Git repository root + git_repo_path = get_git_root() + + # Initialize the list of possible README paths + readme_paths = [] + + # If href is provided, use it as the primary path + if href and "/" in href: + href_path = href.strip("/") + readme_paths.append(git_repo_path / href_path / "README.md") + href_parent = Path(href_path).parent + if str(href_parent) != ".": # Only if parent is not root + readme_paths.append(git_repo_path / href_parent / "README.md") + + readme_paths.extend( + [ + git_repo_path / component_type / component_name / "README.md", + git_repo_path / component_type / component_name / "python" / "README.md", + git_repo_path / component_type / component_name / "cpp" / "README.md", + ] + ) + + # Try each possible path + for path in readme_paths: + if path.exists(): + logger.info(f"Found README at {path}") + with open(path, "r", encoding="utf-8") as f: + return f.read(), path + + # No README found + logger.warning(f"No README found for {component_type}/{component_name} (href: {href})") + return None, None + + except Exception as e: + logger.warning(f"Error reading README for {component_type}/{component_name}: {e}") + return None, None + + +def get_full_image_url(relative_path, readme_path=None): + """Converts a relative image path to a full GitHub URL using the README path for context. + + Args: + relative_path: The relative path to the image from the README + readme_path: Path object pointing to the README file that referenced the image + """ + if relative_path.startswith("./"): + relative_path = relative_path[2:] + if relative_path.startswith(("http://", "https://")): + return relative_path + if readme_path: + readme_dir = readme_path.parent + image_path = (readme_dir / relative_path).resolve() + git_root = get_git_root() + rel_image_path = image_path.relative_to(git_root) + + url = urljoin(get_raw_base_url(), str(rel_image_path)) + try: + response = requests.head(url, timeout=5) + if response.status_code == 200: + return str(url) + else: + logger.warning(f"URL {url} returned status code {response.status_code}") + except requests.RequestException as e: + logger.warning(f"Error checking URL {url}: {e}") + return str(urljoin(get_raw_base_url("main"), str(rel_image_path))) + + logger.warning(f"Using direct URL without context: {relative_path}") + return relative_path + + +def extract_first_sentences(readme_text, num_sentences=3, max_chars=160): + """Extract the first few meaningful sentences from README markdown content. + + Args: + readme_text: The raw markdown content of the README + num_sentences: Number of sentences to extract + max_chars: Maximum character length before truncation + + Returns: + A string with the first few sentences, truncated if necessary + """ + html = markdown.markdown(readme_text, extensions=["markdown.extensions.fenced_code"]) + soup = BeautifulSoup(html, "html.parser") + # Remove code blocks + for code_block in soup.find_all(["pre", "code"]): + code_block.decompose() + # Get all paragraphs + pghs = soup.find_all("p") + # Skip first paragraph if it's very short (likely badges) + start_idx = 1 if pghs and len(pghs) > 1 and len(pghs[0].get_text().strip()) < 40 else 0 + text_content = "" + for p in pghs[start_idx:]: + p_text = p.get_text().strip() + if len(p_text) > 15 and not p_text.startswith(("$", "http")): + text_content += p_text + " " + if len(text_content) > max_chars * 2: + break + # Use nltk if available for better sentence tokenization + try: + import nltk.tokenize + + nltk.download("punkt_tab") + sentences = nltk.tokenize.sent_tokenize(text_content) + except (ImportError, AttributeError): + # Fallback to regex-based approach + sentences = re.split(r"(?<=[.!?])\s+(?=[A-Z])", text_content) + result = " ".join(sentences[:num_sentences]).strip() + + result = re.sub(r"\s+", " ", result) + if len(result) > max_chars: + result = result[: max_chars - 3].rstrip() + "..." + + return result diff --git a/doc/website/scripts/generate_featured_apps.py b/doc/website/scripts/generate_featured_apps.py new file mode 100644 index 0000000000..9409f5e2c9 --- /dev/null +++ b/doc/website/scripts/generate_featured_apps.py @@ -0,0 +1,516 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-Apache2 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Generate featured content HTML based on the most recent metadata.json files for any component type.""" + +import os +import sys +from pathlib import Path + +script_dir = Path(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, str(script_dir)) if str(script_dir) not in sys.path else None + +from common_utils import ( # noqa: E402 + COMPONENT_TYPES, + extract_first_sentences, + extract_image_from_readme, + get_full_image_url, + get_git_root, + get_metadata_file_commit_date, + get_recent_source_code_update_date, + logger, + parse_metadata_file, +) + + +def find_most_recent_metadata_files( + git_repo_path: Path, component_type: str, count: int = 3 +) -> list: + """Find the most recently created/updated metadata.json files. + + Args: + git_repo_path: Path to the Git repository root + component_type: Component type to search for (e.g., 'applications', 'tutorials') + count: Number of recent metadata files to retrieve + + Returns: + List of tuples (metadata_path, datetime) for the most recent unique components + """ + unique_components = {} + component_dir = git_repo_path / component_type + + if not component_dir.exists(): + return [] + + for metadata_path in component_dir.rglob("metadata.json"): + # Skip certain non-regular components based on component type + if component_type == "applications" and any( + t in str(metadata_path) for t in ["datawriter", "operators", "xr_hello_holoscan"] + ): + continue # non regular apps skip. + + component_dir_path = metadata_path.parent + if component_dir_path.name in ["cpp", "python"]: + component_dir_path = component_dir_path.parent + + component_name = component_dir_path.name + # Get the creation date of the metadata.json file from git history + # This uses 'git log --follow --format=%at --reverse' to find the first commit + commit_date = get_metadata_file_commit_date(metadata_path, git_repo_path) + logger.debug( + f"Component '{component_name}' was created on {commit_date.strftime('%Y-%m-%d')}" + ) + + if ( + component_name not in unique_components + or commit_date > unique_components[component_name][1] + ): + unique_components[component_name] = (metadata_path, commit_date) + + result = sorted(unique_components.values(), key=lambda x: x[1], reverse=True) + return result[:count] + + +def find_readme_path(metadata_dir: Path, git_repo_path: Path) -> Path: + """Find the README.md file associated with the component.""" + # First check in the same directory as the metadata file + readme_path = metadata_dir / "README.md" + if readme_path.exists(): + return readme_path + # If not found, search up the directory tree until we reach a component type directory + parent_dir = metadata_dir + while parent_dir.name not in COMPONENT_TYPES and parent_dir != git_repo_path.parent: + readme_path = parent_dir / "README.md" + if readme_path.exists(): + return readme_path + parent_dir = parent_dir.parent + return None + + +def get_component_path(metadata_path: Path, git_repo_path: Path) -> str: + """Generate the relative path for the component documentation link.""" + rel_path = metadata_path.parent.relative_to(git_repo_path) + if rel_path.name in ["cpp", "python"]: + rel_path = rel_path.parent + return f"{rel_path}" + + +def generate_featured_component_card( + metadata_path: Path, git_repo_path: Path, commit_date=None +) -> str: + """Generate HTML for a featured component card.""" + metadata, component_type = parse_metadata_file(metadata_path) + name = metadata.get("name", metadata_path.parent.name) + description = metadata.get("description", "") + tags = metadata.get("tags", []) + logger.info(f"Generating featured {component_type} card for {name}") + + readme_path = find_readme_path(metadata_path.parent, git_repo_path) + readme_content = "" + image_url = None + + # If no description in metadata, look for first paragraph in README + if readme_path and readme_path.exists(): + with readme_path.open("r") as f: + readme_text = f.read() + readme_content = readme_text + if not description: + description = extract_first_sentences(readme_text, 1, max_chars=120) + if readme_content: + image_path = extract_image_from_readme(readme_content) + if image_path: + image_url = get_full_image_url(image_path, readme_path) + logger.info(f"Found image in README for {name}: {image_url}") + component_path = get_component_path(metadata_path, git_repo_path) + + # Use the found image URL or fall back to default + if not image_url: + logger.info(f"No image found in README for {name}, using default") + image_url = f"/holohub/assets/images/{component_type}_default.png" + + # Check if this is a recent contribution (within 45 days) + # commit_date is the date when metadata.json was first committed to git + from datetime import datetime + + is_recent_attr = "" + badge_html = "" + is_new = False + + if commit_date: + days_old = (datetime.now() - commit_date).days + if days_old <= 45: + is_recent_attr = ' data-recent="true"' + badge_html = 'New' + is_new = True + logger.info( + f"✓ Marking '{name}' as NEW - created {days_old} days ago ({commit_date.strftime('%Y-%m-%d')})" + ) + else: + logger.debug( + f" '{name}' is {days_old} days old (created {commit_date.strftime('%Y-%m-%d')})" + ) + + # Check for recent source code updates (within 30 days) - only if not already marked as "New" + if not is_new: + update_date = get_recent_source_code_update_date(metadata_path, git_repo_path) + if update_date: + days_since_update = (datetime.now() - update_date).days + if days_since_update <= 30: + is_recent_attr = ' data-updated="true"' + badge_html = 'Updated' + logger.info( + f"✓ Marking '{name}' as UPDATED - source code modified {days_since_update} days ago ({update_date.strftime('%Y-%m-%d')})" + ) + else: + logger.debug( + f" '{name}' last updated {days_since_update} days ago ({update_date.strftime('%Y-%m-%d')})" + ) + + # Generate tags HTML (hide first tag visually but keep it for filtering) + tags_html = "" + if tags: + # First tag (category) - hidden but kept for filtering, all lowercase with spaces replaced by hyphens + tag_items = [] + first_tag_display = tags[0].lower() + first_tag_href = tags[0].lower().replace(" ", "-") + tag_items.append( + f'' + ) + + # Remaining tags - visible, all lowercase with spaces replaced by hyphens + for tag in tags[1:]: + tag_display = tag.lower() + tag_href = tag.lower().replace(" ", "-") + tag_items.append( + f'{tag_display}' + ) + + tags_html = f'' + + # Generate card HTML with the found image URL + card_html = f""" +
+
+ {badge_html} + {name} +

{name}

+

{description}

+ {tags_html} + +
+
""" + + return card_html + + +def generate_featured_content_html(component_type: str, output_path: str, count: int = 3): + """Generate the featured content HTML for specified component_type. + + Args: + component_type: Component type to feature (e.g., 'applications', 'tutorials') + output_path: Path to the output HTML file + count: Number of components to feature + """ + git_repo_path = get_git_root() + output_file = Path(output_path) / f"featured-{component_type}.html" + recent_metadata_files = find_most_recent_metadata_files(git_repo_path, component_type, count) + if not recent_metadata_files: + logger.warning( + f"No metadata files found to feature for {component_type} in {git_repo_path}" + ) + return + cards = [] + for metadata_path, commit_date in recent_metadata_files: + # Skip if '/template/' is in the directory path + if "/template/" in str(metadata_path).replace("\\", "/") + "/": + continue + card_html = generate_featured_component_card(metadata_path, git_repo_path, commit_date) + cards.append(card_html) + cards_html = "".join(cards) + + # Generate browse links for each component type + browse_links = [] + browse_links.append( + f""" + + Browse all {component_type} (#{component_type}) + """ + ) + + browse_links_html = "".join(browse_links) + featured_content_html = f""" + {cards_html} +
+ {browse_links_html} +
+""" + with open(output_file, "w") as f: + f.write(featured_content_html) + logger.info( + f"Generated featured content HTML with {len(recent_metadata_files)} components for {component_type}" + ) + + +def get_unique_first_tags(git_repo_path: Path, component_type: str) -> dict: + """Get unique first tags from all metadata.json files for a component type with counts. + + This function counts how many applications have each tag (as first tag for categories, + but counts all occurrences of each tag to match the filtering behavior). + Applications with both cpp and python implementations are counted only once. + + Args: + git_repo_path: Path to the Git repository root + component_type: Component type to search (e.g., 'applications', 'tutorials') + + Returns: + Dictionary mapping first tag names to their counts (counting all apps with that tag anywhere) + """ + # First, collect all first tags (categories) + first_tags = set() + component_dir = git_repo_path / component_type + + if not component_dir.exists(): + return {} + + # Map plural component_type to singular key in metadata + component_key_map = { + "applications": "application", + "operators": "operator", + "tutorials": "tutorial", + "benchmarks": "benchmark", + "workflows": "workflow", + } + expected_key = component_key_map.get(component_type, component_type.rstrip("s")) + + # First pass: collect all first tags (these are the categories) + for metadata_path in component_dir.rglob("metadata.json"): + # Skip template files + if "template" in str(metadata_path): + continue + + try: + metadata, parsed_type = parse_metadata_file(metadata_path) + + # Skip if this metadata doesn't match the expected component type + if parsed_type != expected_key: + continue + + tags = metadata.get("tags", []) + if tags and len(tags) > 0: + first_tags.add(tags[0]) + except Exception as e: + logger.warning(f"Error reading {metadata_path}: {e}") + + # Helper function to normalize app names for deduplication + def normalize_app_name(name: str) -> str: + """Normalize app name by removing common variations.""" + # Convert to lowercase, remove extra spaces, and remove common words that might differ + normalized = name.lower().strip() + # Remove " and " to handle cases like "Tool and AR" vs "Tool AR" + normalized = normalized.replace(" and ", " ") + # Remove multiple spaces + normalized = " ".join(normalized.split()) + return normalized + + # Second pass: for each category (first tag), count unique apps (by normalized name) that have that tag anywhere + tag_counts = {} + for category in first_tags: + # Use a set to track unique app names for this category + unique_app_names = set() + + for metadata_path in component_dir.rglob("metadata.json"): + # Skip template files + if "template" in str(metadata_path): + continue + + try: + metadata, parsed_type = parse_metadata_file(metadata_path) + + # Skip if this metadata doesn't match the expected component type + if parsed_type != expected_key: + continue + + tags = metadata.get("tags", []) + # Check if the category appears anywhere in the tags + if category in tags: + app_name = metadata.get("name", "") + if app_name: + # Use normalized name for deduplication + normalized_name = normalize_app_name(app_name) + unique_app_names.add(normalized_name) + except Exception: + pass + + tag_counts[category] = len(unique_app_names) + + # Return sorted by tag name + return dict(sorted(tag_counts.items())) + + +def generate_component_html(component_type: str, output_path: str): + """Generate the featured content HTML for specified component types. + + Args: + component_type: Component type to feature (e.g., 'applications', 'tutorials') + output_path: Path to the output HTML file + count: Number of components to feature + """ + git_repo_path = get_git_root() + output_file = Path(output_path) / f"{component_type}.html" + recent_metadata_files = find_most_recent_metadata_files(git_repo_path, component_type, 500) + if not recent_metadata_files: + logger.warning( + f"No metadata files found to feature for {component_type} in {git_repo_path}" + ) + return + + # Map plural component_type to singular key in metadata + component_key_map = { + "applications": "application", + "operators": "operator", + "tutorials": "tutorial", + "benchmarks": "benchmark", + "workflows": "workflow", + } + expected_key = component_key_map.get(component_type, component_type.rstrip("s")) + + # Helper function to normalize app names for deduplication + def normalize_app_name(name: str) -> str: + """Normalize app name by removing common variations.""" + # Convert to lowercase, remove extra spaces, and remove common words that might differ + normalized = name.lower().strip() + # Remove " and " to handle cases like "Tool and AR" vs "Tool AR" + normalized = normalized.replace(" and ", " ") + # Remove multiple spaces + normalized = " ".join(normalized.split()) + return normalized + + cards = [] + unique_app_names = set() # Track unique app names for total count + + for metadata_path, commit_date in recent_metadata_files: + # Skip if '/template/' is in the directory path + if "/template/" in str(metadata_path).replace("\\", "/") + "/": + continue + + # Verify this metadata matches the expected component type + try: + metadata, parsed_type = parse_metadata_file(metadata_path) + if parsed_type != expected_key: + logger.debug( + f"Skipping {metadata_path}: expected {expected_key}, got {parsed_type}" + ) + continue + + # Track unique app name for counting (normalized to avoid counting cpp/python separately) + app_name = metadata.get("name", "") + if app_name: + normalized_name = normalize_app_name(app_name) + unique_app_names.add(normalized_name) + except Exception as e: + logger.warning(f"Error parsing {metadata_path}: {e}") + continue + + card_html = generate_featured_component_card(metadata_path, git_repo_path, commit_date) + cards.append(card_html) + cards_html = "".join(cards) + + # Generate browse links for each component type + content_html = f""" + {cards_html} +""" + with open(output_file, "w") as f: + f.write(content_html) + logger.info( + f"Generated featured content HTML with {len(cards)} components for {component_type}" + ) + + # Generate navigation HTML based on unique first tags + tag_counts = get_unique_first_tags(git_repo_path, component_type) + if tag_counts: + nav_output_file = Path(output_path) / f"{component_type}_nav.html" + # Total count is the number of unique application names (not counting cpp/python separately) + total_count = len(unique_app_names) + generate_navigation_html(tag_counts, component_type, nav_output_file, total_count) + + +def generate_navigation_html( + tag_counts: dict, component_type: str, output_file: Path, total_count: int +): + """Generate navigation HTML for component categories. + + Args: + tag_counts: Dictionary mapping tag names to their counts + component_type: Component type (e.g., 'applications') + output_file: Path to output navigation HTML file + total_count: Total number of components + """ + nav_items = [ + f'All ({total_count})' + ] + + # Map long tag names to shorter display names + tag_display_map = { + "Computer Vision and Perception": "Computer Vision", + "Natural Language and Conversational AI": "NLP & Conversational AI", + "Networking and Distributed Computing": "Networking", + "Tools And Other Specialized Applications": "Tools & Specialized", + } + + for tag, count in tag_counts.items(): + tag_lower = tag.lower() + tag_href = tag_lower.replace(" ", "-") + display_name = tag_display_map.get(tag, tag) + + nav_item = f'{display_name} ({count})' + nav_items.append(nav_item) + + nav_html = "\n ".join(nav_items) + + with open(output_file, "w") as f: + f.write(nav_html) + + logger.info(f"Generated navigation HTML with {len(tag_counts)} categories for {component_type}") + + +def main(): + """Main function that generates featured content for operators, applications, benchmarks, and tutorials.""" + # Define the component types and their corresponding output files + component_configs = [ + "operators", + "applications", + "benchmarks", + "tutorials", + "workflows", + ] + + # Validate output file path + output_path = "overrides/_pages" + if not Path(output_path).parent.exists(): + logger.error(f"Output directory does not exist: {output_path.parent}") + return + + # Generate featured content for each component type + for component_type in component_configs: + logger.info(f"Generating featured {component_type} HTML...") + + generate_featured_content_html(component_type, output_path, 3) + generate_component_html(component_type, output_path) + + logger.info("Finished generating all featured content HTML files") + + +if __name__ in {"__main__", ""}: + main() diff --git a/doc/website/scripts/generate_pages.py b/doc/website/scripts/generate_pages.py index 7f3cc825da..4ab5fce4ea 100644 --- a/doc/website/scripts/generate_pages.py +++ b/doc/website/scripts/generate_pages.py @@ -140,7 +140,9 @@ def create_frontmatter(metadata: dict, archive_version: str = None) -> str: """ -def create_metadata_header(metadata: dict, last_modified: str, archive_version: str = None) -> str: +def create_metadata_header( + metadata: dict, last_modified: str, archive_version: str = None, version_selector_html: str = "" +) -> str: """Create the metadata header for the documentation page. This function generates a formatted metadata header with icons and labels for display @@ -151,6 +153,7 @@ def create_metadata_header(metadata: dict, last_modified: str, archive_version: metadata (dict): Dictionary containing the application metadata last_modified (str): String representing the last modification date archive_version (str, optional): Version string for archived documentation. Default: None. + version_selector_html (str, optional): HTML for version selector dropdown. Default: "". Returns: str: Formatted HTML-like string containing the metadata header with icons and labels @@ -186,7 +189,11 @@ def create_metadata_header(metadata: dict, last_modified: str, archive_version: line_str_inputs.append(("clock", "Last modified", last_modified)) - if archive_version: + # Add version line - either with dropdown selector or plain text + if version_selector_html: + # Use the version selector instead of plain text + line_str_inputs.append(("tag", "Version", version_selector_html)) + elif archive_version: line_str_inputs.append(("history", "Archive version", archive_version)) else: line_str_inputs.append(("tag", "Latest version", version)) @@ -377,6 +384,91 @@ def extract_markdown_header(md_txt: str) -> tuple[str, str, str] | None: return None +def create_version_selector_html( + current_version: str, archives: dict, dest_dir: Path, latest_version: str = None +) -> tuple[str, str]: + """Create HTML and JavaScript for version selector dropdown. + + Args: + current_version: The current version being displayed ("latest" or version number) + archives: Dictionary mapping version names to git references + dest_dir: Destination directory for the current page (to calculate relative paths) + latest_version: The version string from metadata.json for the latest version + + Returns: + Tuple of (dropdown_html, script_html) - dropdown for inline use, script for page footer + """ + # Build the version options + options = [] + + # Add "latest" option with version number + is_latest = current_version == "latest" + selected_latest = " selected" if is_latest else "" + latest_label = f"latest ({latest_version})" if latest_version else "latest" + options.append(f'') + + # Add archived versions (sorted in reverse order) + for version in sorted(archives.keys(), reverse=True): + is_selected = current_version == version + selected_attr = " selected" if is_selected else "" + options.append(f'') + + options_html = "\n".join(options) + + dropdown_html = f'' + + script_html = """ + +""" + + return dropdown_html, script_html + + def patch_header(readme_text: str, url: str, metadata_header: str) -> str: """Finds the main header in the readme_text, replaces it with a linked version, and inserts the metadata_header. @@ -424,6 +516,7 @@ def create_page( last_modified: str, git_repo_path: Path, archive: dict = {"version": None, "git_ref": "main"}, + archives: dict = None, ): """Create a documentation page, handling both versioned and non-versioned cases. @@ -435,6 +528,7 @@ def create_page( git_repo_path: Path to the Git repository root archive: Dictionary of version label and git reference strings - if provided, links are versioned accordingly + archives: Dictionary of all archives (for version selector dropdown) Returns: Generated page content as string """ @@ -454,8 +548,20 @@ def create_page( base_url, ) - # Patch the header (finds header, links it, inserts metadata) - metadata_header = create_metadata_header(metadata, last_modified, archive_version) + # Generate version selector HTML if archives are present + version_selector_html = "" + version_script_html = "" + if archives: + current_version = archive_version if archive_version else "latest" + latest_version = metadata.get("version") + version_selector_html, version_script_html = create_version_selector_html( + current_version, archives, relative_dir, latest_version + ) + + # Patch the header (finds header, links it, inserts metadata with version selector) + metadata_header = create_metadata_header( + metadata, last_modified, archive_version, version_selector_html + ) encoded_rel_dir = _encode_path_for_url(relative_dir) url = f"{base_url}/{encoded_rel_dir}" readme_text = patch_header(readme_text, url, metadata_header) @@ -463,22 +569,144 @@ def create_page( # Append the text to the output output_text += readme_text + # Append the version selector script at the end + if version_script_html: + output_text += "\n" + version_script_html + # Write the mkdocs page with mkdocs_gen_files.open(dest_path, "w") as dest_file: dest_file.write(output_text) -def parse_metadata_path(metadata_path: Path, components, git_repo_path: Path) -> None: +def create_title_from_readme_title(readme_title: str, suffix: str = "") -> str: + """Create a cleaned title from the README title by removing common component type suffixes. + + Args: + readme_title: The title extracted from the README + suffix: Optional suffix to append (e.g., language identifier) + + Returns: + Cleaned title with suffix appended + """ + title = re.sub( + r"(Operator|Operators|Op|Application|App|Workflow)\b", + "", + readme_title, + flags=re.IGNORECASE, + ).strip() + return title + suffix + + +def process_archived_versions( + archives: dict, + metadata: dict, + metadata_path: Path, + readme_path: Path, + dest_dir: Path, + project_type: str, + git_repo_path: Path, +) -> str: + """Process archived versions of documentation and return nav content. + + Args: + archives: Dictionary mapping version names to git references + metadata: Current metadata dictionary + metadata_path: Path to the metadata file + readme_path: Path to the README file (may be language-specific or parent) + dest_dir: Destination directory for generated pages + project_type: Type of project (operator, application, etc.) + git_repo_path: Path to the Git repository root + + Returns: + Navigation content string for archived versions + """ + logger.info(f"Processing versioned documentation for {str(dest_dir)}") + + nav_content = """ +nav: + - README.md +""" + + for version in sorted(archives.keys(), reverse=True): + git_ref = archives[version] + + # Get metadata and README from the specified git reference + archived_metadata_content = get_file_from_git(metadata_path, git_ref, git_repo_path) + archived_readme_content = get_file_from_git(readme_path, git_ref, git_repo_path) + if not archived_metadata_content or not archived_readme_content: + logger.error(f"Failed to retrieve archived content for {dest_dir} at {git_ref}") + continue + + # Parse the archived metadata + try: + archived_metadata = json.loads(archived_metadata_content) + except json.JSONDecodeError: + logger.error(f"Failed to parse archived metadata for {dest_dir.name} at {git_ref}") + continue + archived_metadata = archived_metadata[project_type] + + # Get commit date as last modified + repo_str = str(git_repo_path) + cmd = [ + "git", + "-C", + repo_str, + "show", + "-s", + "--format=%ad", + "--date=short", + git_ref, + ] + archive_last_modified = subprocess.run( + cmd, capture_output=True, text=True, check=True + ).stdout.strip() + archive_last_modified = format_date(archive_last_modified) + + # Create the archived version content + archive_dest_path = dest_dir / f"{version}.md" + create_page( + archived_metadata, + archived_readme_content, + archive_dest_path, + archive_last_modified, + git_repo_path, + archive={"version": version, "git_ref": git_ref}, + archives=archives, + ) + + # Add archives to nav file + nav_content += f' - "{version}": {version}.md\n' + + return nav_content + + +def write_nav_file(nav_path: Path, nav_content: str) -> None: + """Write navigation file content to the specified path. + + Args: + nav_path: Path where the navigation file should be written + nav_content: Content to write to the navigation file + """ + with mkdocs_gen_files.open(nav_path, "w") as nav_file: + nav_file.write(nav_content) + + +def parse_metadata_path( + metadata_path: Path, components, git_repo_path: Path, processed_parent_readmes=None +) -> None: """Copy README file from a sub-package to the user guide's developer guide directory. Args: metadata_path: Path to the metadata file components: Dictionary tracking unique components git_repo_path: Path to the Git repository root + processed_parent_readmes: Set tracking which parent READMEs have been processed Returns: None """ + if processed_parent_readmes is None: + processed_parent_readmes = set() # Disable application with {{ in the name if "{{" in str(metadata_path): return @@ -548,22 +776,14 @@ def parse_metadata_path(metadata_path: Path, components, git_repo_path: Path) -> suffix = f" ({suffix})" logger.debug(f"suffix: {suffix}") - # Create the title - # strip the common name from the end of the title ("Operator", "Operators", "Op", "Application", "App", "Workflow", "Benchmark") - title = ( - re.sub( - r"(Operator|Operators|Op|Application|App|Workflow)\b", - "", - readme_title, - flags=re.IGNORECASE, - ).strip() - + suffix - ) + title = create_title_from_readme_title(readme_title, suffix) # Generate page dest_path = dest_dir / "README.md" last_modified = get_last_modified_date(metadata_path, git_repo_path) - create_page(metadata, readme_text, dest_path, last_modified, git_repo_path) + # Check for archives in metadata for version selector + archives = metadata["archives"] if "archives" in metadata else None + create_page(metadata, readme_text, dest_path, last_modified, git_repo_path, archives=archives) # Initialize nav file content to set title nav_path = dest_dir / ".nav.yml" @@ -574,57 +794,75 @@ def parse_metadata_path(metadata_path: Path, components, git_repo_path: Path) -> # Check for archives in metadata archives = metadata["archives"] if "archives" in metadata else None if archives: - logger.info(f"Processing versioned documentation for {str(dest_dir)}") - - # List the current version first - nav_content += """ -nav: - - README.md -""" - - for version in sorted(archives.keys(), reverse=True): - git_ref = archives[version] - - # Get metadata and README from the specified git reference - archived_metadata_content = get_file_from_git(metadata_path, git_ref, git_repo_path) - archived_readme_content = get_file_from_git(readme_path, git_ref, git_repo_path) - if not archived_metadata_content or not archived_readme_content: - logger.error(f"Failed to retrieve archived content for {dest_dir} at {git_ref}") - continue + nav_content += process_archived_versions( + archives, metadata, metadata_path, readme_path, dest_dir, project_type, git_repo_path + ) - # Parse the archived metadata - try: - archived_metadata = json.loads(archived_metadata_content) - except json.JSONDecodeError: - logger.error(f"Failed to parse archived metadata for {dest_dir.name} at {git_ref}") - return - archived_metadata = archived_metadata[project_type] - - # Get commit date as last modified - repo_str = str(git_repo_path) - cmd = ["git", "-C", repo_str, "show", "-s", "--format=%ad", "--date=short", git_ref] - archive_last_modified = subprocess.run( - cmd, capture_output=True, text=True, check=True - ).stdout.strip() - archive_last_modified = format_date(archive_last_modified) - - # Create the archived version content - archive_dest_path = dest_dir / f"{version}.md" + # Write nav file + write_nav_file(nav_path, nav_content) + + # If we're in a language-specific directory (cpp/python) and processed a language-specific README, + # also check if there's a parent README to process as a language-agnostic page + if ( + metadata_dir.name in ["cpp", "python"] + and readme_dir == metadata_dir + and nbr_language_dirs > 1 + ): + + parent_readme_path = language_agnostic_dir / "README.md" + parent_readme_key = str(parent_readme_path.relative_to(git_repo_path)) + + # Only process if parent README exists and hasn't been processed yet + if parent_readme_path.exists() and parent_readme_key not in processed_parent_readmes: + processed_parent_readmes.add(parent_readme_key) + logger.info(f"Also processing parent README: {parent_readme_key}") + + # Read parent README + with parent_readme_path.open("r") as parent_readme_file: + parent_readme_text = parent_readme_file.read() + + # Extract title from parent README + parent_readme_title = metadata["name"] + parent_header_info = extract_markdown_header(parent_readme_text) + if parent_header_info: + parent_readme_title = parent_header_info[1] + + # Create title without language suffix (language-agnostic) + parent_title = create_title_from_readme_title(parent_readme_title) + + # Generate parent page + parent_dest_dir = language_agnostic_dir.relative_to(git_repo_path) + parent_dest_path = parent_dest_dir / "README.md" + parent_last_modified = get_last_modified_date(metadata_path, git_repo_path) create_page( - archived_metadata, - archived_readme_content, - archive_dest_path, - archive_last_modified, + metadata, + parent_readme_text, + parent_dest_path, + parent_last_modified, git_repo_path, - archive={"version": version, "git_ref": git_ref}, + archives=archives, ) - # Add archives to nav file - nav_content += f' - "{version}": {version}.md\n' + # Write parent nav file + parent_nav_path = parent_dest_dir / ".nav.yml" + parent_nav_content = f""" +title: "{parent_title}" +""" - # Write nav file - with mkdocs_gen_files.open(nav_path, "w") as nav_file: - nav_file.write(nav_content) + # Check for archives in metadata (parent page uses same metadata) + if archives: + parent_nav_content += process_archived_versions( + archives, + metadata, + metadata_path, + parent_readme_path, + parent_dest_dir, + project_type, + git_repo_path, + ) + + # Write parent nav file + write_nav_file(parent_nav_path, parent_nav_content) def generate_pages() -> None: @@ -652,6 +890,9 @@ def generate_pages() -> None: # Initialize map of projects/component per type components = {key: set() for key in COMPONENT_TYPES} + # Track processed parent READMEs to avoid duplicates + processed_parent_readmes = set() + for component_type in COMPONENT_TYPES: component_dir = src_dir / component_type if not component_dir.exists(): @@ -661,7 +902,9 @@ def generate_pages() -> None: # Parse the metadata.json files for metadata_path in component_dir.rglob("metadata.json"): try: - parse_metadata_path(metadata_path, components, git_repo_path) + parse_metadata_path( + metadata_path, components, git_repo_path, processed_parent_readmes + ) except Exception: logger.error(f"Failed to process {metadata_path}:\n{traceback.format_exc()}") @@ -676,18 +919,32 @@ def generate_pages() -> None: logger.debug(f"Components: {components}") - # Write the home page - homefile_path = website_src_dir / "docs" / "index.md" - with homefile_path.open("r") as home_file: - home_text = home_file.read() - - # Replace the number of components in the home page - for component_type in COMPONENT_TYPES: - nbr_components = len(components[component_type]) - home_text = home_text.replace(f"#{component_type}", str(nbr_components)) + # Update all featured component pages with component counts + featured_pages = [ + "featured-operators.html", + "featured-applications.html", + "featured-benchmarks.html", + "featured-tutorials.html", + ] - with mkdocs_gen_files.open("index.md", "w") as index_file: - index_file.write(home_text) + for featured_page in featured_pages: + homefile_path = website_src_dir / "overrides" / "_pages" / featured_page + if homefile_path.exists(): + with homefile_path.open("r+") as home_file: + home_text = home_file.read() + + # Replace the number of components in the featured page + for component_type in COMPONENT_TYPES: + nbr_components = len(components[component_type]) + home_text = home_text.replace(f"#{component_type}", str(nbr_components)) + + # Write the updated content back to the file + home_file.seek(0) # Go to beginning + home_file.truncate() # Clear all content + home_file.write(home_text) + logger.info(f"Updated component counts in {featured_page}") + else: + logger.warning(f"Featured page not found: {featured_page}") # Write explicit navigation order for the root nav_content = """ diff --git a/operators/apriltag_detector/metadata.json b/operators/apriltag_detector/metadata.json index 862783699d..d9c974f37e 100644 --- a/operators/apriltag_detector/metadata.json +++ b/operators/apriltag_detector/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Camera"], + "tags": ["Image Processing", "Camera"], "ranking": 1, "requirements": {} } diff --git a/operators/cvcuda_holoscan_interop/metadata.json b/operators/cvcuda_holoscan_interop/metadata.json index e3ed3f8e74..7ade8e8ab4 100644 --- a/operators/cvcuda_holoscan_interop/metadata.json +++ b/operators/cvcuda_holoscan_interop/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["CV CUDA", "Computer Vision and Perception"], + "tags": ["Image Processing", "CV CUDA", "Computer Vision and Perception"], "ranking": 1, "requirements": { "data": [ diff --git a/operators/dds/base/metadata.json b/operators/dds/base/metadata.json index 15de69a011..709993abd1 100644 --- a/operators/dds/base/metadata.json +++ b/operators/dds/base/metadata.json @@ -20,7 +20,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["DDS", "RTI Connext"], + "tags": ["Networking and Distributed Computing", "DDS", "RTI Connext"], "ranking": 2, "requirements": { "packages": [ diff --git a/operators/dds/dds_shapes_subscriber/metadata.json b/operators/dds/dds_shapes_subscriber/metadata.json index d6381876ca..b67d82199d 100644 --- a/operators/dds/dds_shapes_subscriber/metadata.json +++ b/operators/dds/dds_shapes_subscriber/metadata.json @@ -20,7 +20,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["DDS", "RTI Connext"], + "tags": ["Networking and Distributed Computing", "DDS", "RTI Connext"], "ranking": 2, "requirements": { "packages": [ diff --git a/operators/dds/metadata.json b/operators/dds/metadata.json index 4c01f6a7df..e669669817 100644 --- a/operators/dds/metadata.json +++ b/operators/dds/metadata.json @@ -1,7 +1,7 @@ { "operator": { "name": "DDS Operators", - "description": "DDS operators.", + "description": "Data Distribution Service (DDS) operators for RTI Connext, enabling real-time, scalable, and reliable data exchange in distributed systems using the DDS middleware standard.", "authors": [ { "name": "Ian Stewart", @@ -20,7 +20,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["DDS", "RTI Connext"], + "tags": ["Networking and Distributed Computing", "DDS", "RTI Connext"], "ranking": 2, "requirements": {} } diff --git a/operators/dds/video/dds_video_publisher/metadata.json b/operators/dds/video/dds_video_publisher/metadata.json index 789ad41e44..06855f3310 100644 --- a/operators/dds/video/dds_video_publisher/metadata.json +++ b/operators/dds/video/dds_video_publisher/metadata.json @@ -1,7 +1,7 @@ { "operator": { "name": "DDSVideoPublisherOp", - "description": "Publishes video frames as a DDS topic", + "description": "Publishes video frames as a Data Distribution Service (DDS) topic", "authors": [ { "name": "Ian Stewart", @@ -20,7 +20,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["DDS", "RTI Connext", "Video"], + "tags": ["Networking and Distributed Computing", "DDS", "RTI Connext", "Video"], "ranking": 2, "requirements": { "packages": [ diff --git a/operators/dds/video/dds_video_subscriber/metadata.json b/operators/dds/video/dds_video_subscriber/metadata.json index 4e9658dfeb..086ed96f73 100644 --- a/operators/dds/video/dds_video_subscriber/metadata.json +++ b/operators/dds/video/dds_video_subscriber/metadata.json @@ -20,7 +20,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["DDS", "RTI Connext", "Video"], + "tags": ["Networking and Distributed Computing", "DDS", "RTI Connext", "Video"], "ranking": 2, "requirements": { "packages": [ diff --git a/operators/deidentification/pixelator/metadata.json b/operators/deidentification/pixelator/metadata.json index 444002379d..2eec777292 100644 --- a/operators/deidentification/pixelator/metadata.json +++ b/operators/deidentification/pixelator/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Deidentification", "Image Processing", "Anonymization"], + "tags": ["Image Processing", "Deidentification", "Anonymization"], "ranking": 1, "requirements": { "python-packages": { diff --git a/operators/ehr_query_llm/fhir_client_op/metadata.json b/operators/ehr_query_llm/fhir_client_op/metadata.json index 82a2b4c458..072e4ca9c6 100644 --- a/operators/ehr_query_llm/fhir_client_op/metadata.json +++ b/operators/ehr_query_llm/fhir_client_op/metadata.json @@ -27,6 +27,7 @@ "aarch64" ], "tags": [ + "Networking and Distributed Computing", "LLM", "Healthcare Interop" ], diff --git a/operators/ehr_query_llm/fhir_resource_sanitizer_op/metadata.json b/operators/ehr_query_llm/fhir_resource_sanitizer_op/metadata.json index e8b44d78fe..9f5e491326 100644 --- a/operators/ehr_query_llm/fhir_resource_sanitizer_op/metadata.json +++ b/operators/ehr_query_llm/fhir_resource_sanitizer_op/metadata.json @@ -27,6 +27,7 @@ "aarch64" ], "tags": [ + "Networking and Distributed Computing", "LLM", "Healthcare Interop" ], diff --git a/operators/ehr_query_llm/metadata.json b/operators/ehr_query_llm/metadata.json index 5b6ae16baf..0b8b783255 100644 --- a/operators/ehr_query_llm/metadata.json +++ b/operators/ehr_query_llm/metadata.json @@ -24,7 +24,7 @@ "x86_64", "aarch64" ], - "tags": ["LLM", "Healthcare Interop"], + "tags": ["Networking and Distributed Computing", "LLM", "Healthcare Interop"], "ranking": 2, "requirements": { "python-packages": { diff --git a/operators/ehr_query_llm/zero_mq_publisher_op/metadata.json b/operators/ehr_query_llm/zero_mq_publisher_op/metadata.json index f079f0cae0..198e6bd451 100644 --- a/operators/ehr_query_llm/zero_mq_publisher_op/metadata.json +++ b/operators/ehr_query_llm/zero_mq_publisher_op/metadata.json @@ -27,6 +27,7 @@ "aarch64" ], "tags": [ + "Networking and Distributed Computing", "LLM", "Healthcare Interop" ], diff --git a/operators/ehr_query_llm/zero_mq_subscriber_op/metadata.json b/operators/ehr_query_llm/zero_mq_subscriber_op/metadata.json index ae47799dba..e623658f01 100644 --- a/operators/ehr_query_llm/zero_mq_subscriber_op/metadata.json +++ b/operators/ehr_query_llm/zero_mq_subscriber_op/metadata.json @@ -27,6 +27,7 @@ "aarch64" ], "tags": [ + "Networking and Distributed Computing", "LLM", "Healthcare Interop" ], diff --git a/operators/gamma_correction/metadata.json b/operators/gamma_correction/metadata.json index 6b90788737..6b41febe29 100644 --- a/operators/gamma_correction/metadata.json +++ b/operators/gamma_correction/metadata.json @@ -27,11 +27,9 @@ "aarch64" ], "tags": [ + "Image Processing", "gamma", - "correction", - "image", - "processing", - "gamma" + "correction" ], "ranking": 1, "requirements": {} diff --git a/operators/grpc_operators/metadata.json b/operators/grpc_operators/metadata.json index 127b47bbd1..e5cb1a0b9f 100644 --- a/operators/grpc_operators/metadata.json +++ b/operators/grpc_operators/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["gRPC", "Visualization"], + "tags": ["Networking and Distributed Computing", "gRPC", "Visualization"], "ranking": 1, "requirements": {} } diff --git a/operators/holoscan_ros2/metadata.json b/operators/holoscan_ros2/metadata.json index 2cde444e0e..70f0e99746 100644 --- a/operators/holoscan_ros2/metadata.json +++ b/operators/holoscan_ros2/metadata.json @@ -13,7 +13,7 @@ }, "language": ["C++", "Python"], "platforms": ["x86_64", "aarch64"], - "tags": ["ROS2", "Bridge", "Interface"], + "tags": ["Robotics", "ROS2", "Bridge", "Interface"], "holoscan_sdk": { "minimum_required_version": "3.0.0", "tested_versions": [ diff --git a/operators/iio_controller/metadata.json b/operators/iio_controller/metadata.json index 113c1606c1..b9574752bb 100644 --- a/operators/iio_controller/metadata.json +++ b/operators/iio_controller/metadata.json @@ -19,7 +19,7 @@ ] }, "platforms": ["x86_64"], - "tags": ["iio", "libiio"], + "tags": ["Signal Processing", "iio", "libiio"], "ranking": 4, "requirements": { "libraries": [{ diff --git a/operators/lstm_tensor_rt_inference/metadata.json b/operators/lstm_tensor_rt_inference/metadata.json index 339ea33ebe..629730f719 100644 --- a/operators/lstm_tensor_rt_inference/metadata.json +++ b/operators/lstm_tensor_rt_inference/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["LSTM", "TensorRT"], + "tags": ["Inference", "LSTM", "TensorRT"], "ranking": 1, "requirements": {} } diff --git a/operators/medical_imaging/dicom_series_to_volume_operator/metadata.json b/operators/medical_imaging/dicom_series_to_volume_operator/metadata.json index ea2b5c61e9..d14ac4d369 100644 --- a/operators/medical_imaging/dicom_series_to_volume_operator/metadata.json +++ b/operators/medical_imaging/dicom_series_to_volume_operator/metadata.json @@ -18,7 +18,7 @@ "tested_versions": ["2.2.0", "3.2.0"] }, "platforms": ["x86_64", "aarch64"], - "tags": ["Medical Imaging", "DICOM", "Volume"], + "tags": ["Converter", "Medical Imaging", "DICOM", "Volume"], "ranking": 2, "requirements": {} } diff --git a/operators/medical_imaging/stl_conversion_operator/metadata.json b/operators/medical_imaging/stl_conversion_operator/metadata.json index e6e350a476..c787e464b3 100644 --- a/operators/medical_imaging/stl_conversion_operator/metadata.json +++ b/operators/medical_imaging/stl_conversion_operator/metadata.json @@ -18,7 +18,7 @@ "tested_versions": ["2.2.0", "3.2.0"] }, "platforms": ["x86_64", "aarch64"], - "tags": ["Medical Imaging", "STL", "Conversion"], + "tags": ["Converter", "Medical Imaging", "STL"], "ranking": 2, "requirements": {} } diff --git a/operators/mesh_to_usd/metadata.json b/operators/mesh_to_usd/metadata.json index 5bd5129a96..6677d476bd 100644 --- a/operators/mesh_to_usd/metadata.json +++ b/operators/mesh_to_usd/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["OpenUSD", "STL"], + "tags": ["Converter", "OpenUSD", "STL"], "ranking": 2, "requirements": {} } diff --git a/operators/npp_filter/metadata.json b/operators/npp_filter/metadata.json index c1b9dcc6ae..abc5b3dada 100644 --- a/operators/npp_filter/metadata.json +++ b/operators/npp_filter/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["NPP"], + "tags": ["Signal Processing", "NPP"], "ranking": 1, "requirements": { } } diff --git a/operators/nvidia_video_codec/nv_video_decoder/metadata.json b/operators/nvidia_video_codec/nv_video_decoder/metadata.json index fc3c397ec1..43c6bedfc0 100644 --- a/operators/nvidia_video_codec/nv_video_decoder/metadata.json +++ b/operators/nvidia_video_codec/nv_video_decoder/metadata.json @@ -24,6 +24,7 @@ "aarch64" ], "tags": [ + "Streaming", "NVIDIA Video Codec SDK", "H.264", "H.265", diff --git a/operators/nvidia_video_codec/nv_video_encoder/metadata.json b/operators/nvidia_video_codec/nv_video_encoder/metadata.json index 9cd6980317..63eac9655c 100644 --- a/operators/nvidia_video_codec/nv_video_encoder/metadata.json +++ b/operators/nvidia_video_codec/nv_video_encoder/metadata.json @@ -25,6 +25,7 @@ "aarch64" ], "tags": [ + "Streaming", "NVIDIA Video Codec SDK", "H.264", "H.265", diff --git a/operators/nvidia_video_codec/nv_video_reader/metadata.json b/operators/nvidia_video_codec/nv_video_reader/metadata.json index 8cd17c2541..7efe1bc626 100644 --- a/operators/nvidia_video_codec/nv_video_reader/metadata.json +++ b/operators/nvidia_video_codec/nv_video_reader/metadata.json @@ -25,6 +25,7 @@ "aarch64" ], "tags": [ + "Streaming", "NVIDIA Video Codec SDK", "H.264", "H.265", diff --git a/operators/orsi/orsi_format_converter/metadata.json b/operators/orsi/orsi_format_converter/metadata.json index 5b251b29dd..290bdc8fac 100644 --- a/operators/orsi/orsi_format_converter/metadata.json +++ b/operators/orsi/orsi_format_converter/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["converter"], + "tags": ["Converter", "Format"], "ranking": 3, "requirements": { } diff --git a/operators/qt_video/metadata.json b/operators/qt_video/metadata.json index 0853a87c73..2befc604ec 100644 --- a/operators/qt_video/metadata.json +++ b/operators/qt_video/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Qt", "Video", "UI"], + "tags": ["Visualization", "Qt", "Video", "UI"], "ranking": 1, "requirements": { } } diff --git a/operators/slang_shader/metadata.json b/operators/slang_shader/metadata.json index 5760852249..db92310e3f 100644 --- a/operators/slang_shader/metadata.json +++ b/operators/slang_shader/metadata.json @@ -28,6 +28,7 @@ "aarch64" ], "tags": [ + "Visualization", "Slang", "shading", "rendering", diff --git a/operators/tensor_to_file/metadata.json b/operators/tensor_to_file/metadata.json index 7516de8a1c..61c66797be 100644 --- a/operators/tensor_to_file/metadata.json +++ b/operators/tensor_to_file/metadata.json @@ -25,6 +25,7 @@ "aarch64" ], "tags": [ + "Converter", "Tensor", "File", "Writer" diff --git a/operators/tensor_to_video_buffer/metadata.json b/operators/tensor_to_video_buffer/metadata.json index 298b5a2339..4ec9686e0f 100644 --- a/operators/tensor_to_video_buffer/metadata.json +++ b/operators/tensor_to_video_buffer/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Tensor", "Video"], + "tags": ["Converter", "Tensor", "Video"], "ranking": 1, "requirements": {} } diff --git a/operators/velodyne_lidar/cpp/metadata.json b/operators/velodyne_lidar/cpp/metadata.json index 93dd85aef2..08956684dc 100644 --- a/operators/velodyne_lidar/cpp/metadata.json +++ b/operators/velodyne_lidar/cpp/metadata.json @@ -43,7 +43,7 @@ ] }, "platforms": ["x86_64", "aarch64"], - "tags": ["Lidar", "Point Cloud"], + "tags": ["Robotics", "Lidar", "Point Cloud"], "ranking": 4, "requirements": {} } diff --git a/operators/volume_loader/metadata.json b/operators/volume_loader/metadata.json index a73d8c5607..79e3eca08e 100644 --- a/operators/volume_loader/metadata.json +++ b/operators/volume_loader/metadata.json @@ -25,7 +25,7 @@ "x86_64", "aarch64" ], - "tags": ["Volume"], + "tags": ["Visualization", "Volume"], "ranking": 1, "requirements": { } } diff --git a/operators/volume_renderer/metadata.json b/operators/volume_renderer/metadata.json index 692c130aa5..f868da0a89 100644 --- a/operators/volume_renderer/metadata.json +++ b/operators/volume_renderer/metadata.json @@ -26,7 +26,7 @@ "x86_64", "aarch64" ], - "tags": ["Volume", "Rendering"], + "tags": ["Visualization", "Volume", "Rendering"], "ranking": 1, "requirements": {} } diff --git a/operators/webrtc_client/metadata.json b/operators/webrtc_client/metadata.json index 3b874b3042..3afba873c8 100644 --- a/operators/webrtc_client/metadata.json +++ b/operators/webrtc_client/metadata.json @@ -13,7 +13,7 @@ }, "language": ["Python"], "platforms": ["x86_64", "aarch64"], - "tags": ["WebRTC", "Video"], + "tags": ["Networking and Distributed Computing", "WebRTC", "Video"], "holoscan_sdk": { "minimum_required_version": "0.6.0", "tested_versions": [ diff --git a/operators/webrtc_server/metadata.json b/operators/webrtc_server/metadata.json index 34db4aa89d..d6d8a16616 100644 --- a/operators/webrtc_server/metadata.json +++ b/operators/webrtc_server/metadata.json @@ -13,7 +13,7 @@ }, "language": ["Python"], "platforms": ["x86_64", "aarch64"], - "tags": ["WebRTC", "Video"], + "tags": ["Networking and Distributed Computing", "WebRTC", "Video"], "holoscan_sdk": { "minimum_required_version": "0.6.0", "tested_versions": [ diff --git a/tutorials/async_buffer_deadline/metadata.json b/tutorials/async_buffer_deadline/metadata.json index 07f1461d10..594cda1d58 100644 --- a/tutorials/async_buffer_deadline/metadata.json +++ b/tutorials/async_buffer_deadline/metadata.json @@ -1,7 +1,7 @@ { "tutorial": { - "name": "A Study using Asynchronous Lock-free Buffer with SCHED_DEADLINE", - "description": "This tutorial demonstrates the impact of using an asynchronous lock-free buffer with SCHED_DEADLINE scheduling policy in Linux on the message latency in a Holoscan SDK application and compares it with the default buffer.", + "name": "Asynchronous Lock-free Buffer", + "description": "This tutorial demonstrates the impact of using an asynchronous lock-free buffer with SCHED_DEADLINE scheduling policy in Linux on the message latency.", "authors": [ { "name": "Holoscan Team", @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Real-Time", "Performance", "SCHED_DEADLINE", "Buffer", "Latency", "Linux"], + "tags": ["Optimization", "Real-Time", "Performance", "SCHED_DEADLINE", "Buffer", "Latency", "Linux"], "ranking": 3, "requirements": { "libraries": [ diff --git a/tutorials/cloudxr_runtime_for_xr_applications/metadata.json b/tutorials/cloudxr_runtime_for_xr_applications/metadata.json index e80efc8b12..0f82fb4a48 100644 --- a/tutorials/cloudxr_runtime_for_xr_applications/metadata.json +++ b/tutorials/cloudxr_runtime_for_xr_applications/metadata.json @@ -1,6 +1,6 @@ { "tutorial": { - "name": "Setting up NVIDIA CloudXR Runtime for XR Applications", + "name": "NVIDIA CloudXR Runtime for XR Applications", "description": "In this tutorial, we will walk through the process of setting up the NVIDIA CloudXR OpenXR Runtime for XR Applications.", "authors": [ { diff --git a/tutorials/creating-multi-node-applications/metadata.json b/tutorials/creating-multi-node-applications/metadata.json index 3af0aaeff1..7838c33fe1 100644 --- a/tutorials/creating-multi-node-applications/metadata.json +++ b/tutorials/creating-multi-node-applications/metadata.json @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["Distributed"], + "tags": ["Networking and Distributed Computing", "Distributed"], "ranking": 2, "requirements": {} } diff --git a/tutorials/cuda_mps/metadata.json b/tutorials/cuda_mps/metadata.json index 8c109d6e6e..f3ae9af25e 100644 --- a/tutorials/cuda_mps/metadata.json +++ b/tutorials/cuda_mps/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Acceleration", "Benchmarking", "CUDA", "MPS"], + "tags": ["Optimization", "CUDA", "MPS"], "ranking": 1, "requirements": { "libraries": [ diff --git a/tutorials/debugging/cli_debugging/metadata.json b/tutorials/debugging/cli_debugging/metadata.json index 7876247c24..d62008b496 100644 --- a/tutorials/debugging/cli_debugging/metadata.json +++ b/tutorials/debugging/cli_debugging/metadata.json @@ -24,7 +24,7 @@ "x86_64", "aarch64" ], - "tags": ["Container"], + "tags": ["Development", "Debugging"], "ranking": 1, "requirements": { "applications": [ diff --git a/tutorials/debugging/holoscan_container_vscode/metadata.json b/tutorials/debugging/holoscan_container_vscode/metadata.json index f050aec7ca..28fad12627 100644 --- a/tutorials/debugging/holoscan_container_vscode/metadata.json +++ b/tutorials/debugging/holoscan_container_vscode/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["VS Code"], + "tags": ["Development", "VS Code", "Debugging"], "ranking": 1, "requirements": {} } diff --git a/tutorials/dicom_to_usd_with_monai_and_holoscan/metadata.json b/tutorials/dicom_to_usd_with_monai_and_holoscan/metadata.json index 8ee27b7096..821119bc80 100755 --- a/tutorials/dicom_to_usd_with_monai_and_holoscan/metadata.json +++ b/tutorials/dicom_to_usd_with_monai_and_holoscan/metadata.json @@ -1,6 +1,7 @@ { "tutorial": { - "name": "DICOM to OpenUSD mesh segmentation with MONAI Deploy and Holoscan", + "name": "DICOM to OpenUSD mesh segmentation", + "description": "In this tutorial we demonstrate a method leveraging a combined MONAI Deploy and Holoscan pipeline to process DICOM input data and write a resulting mesh to disk in the OpenUSD file format.", "authors": [ { "name": "Rahul Choudhury", @@ -51,7 +52,7 @@ "x86_64", "aarch64" ], - "tags": ["Healthcare Interop", "MONAI", "OpenUSD", "STL"], + "tags": ["Interoperability", "Medical Imaging", "MONAI", "OpenUSD", "STL"], "ranking": 2, "requirements": { "libraries": [ diff --git a/tutorials/gpu_direct_storage_on_holoscan/metadata.json b/tutorials/gpu_direct_storage_on_holoscan/metadata.json index e9a83118c6..947e2fbc4f 100644 --- a/tutorials/gpu_direct_storage_on_holoscan/metadata.json +++ b/tutorials/gpu_direct_storage_on_holoscan/metadata.json @@ -21,7 +21,7 @@ "platforms": [ "aarch64" ], - "tags": ["GPUDirect"], + "tags": ["Interoperability", "Optimization", "GPUDirect"], "ranking": 4, "requirements": {} } diff --git a/tutorials/gui_for_python_applications/metadata.json b/tutorials/gui_for_python_applications/metadata.json index 9e24083c21..41d8fd3313 100644 --- a/tutorials/gui_for_python_applications/metadata.json +++ b/tutorials/gui_for_python_applications/metadata.json @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["UI"], + "tags": ["Development", "UI", "Python"], "ranking": 1, "requirements": {} } diff --git a/tutorials/high_performance_networking/metadata.json b/tutorials/high_performance_networking/metadata.json index aac6948b0b..f580af2b88 100644 --- a/tutorials/high_performance_networking/metadata.json +++ b/tutorials/high_performance_networking/metadata.json @@ -1,7 +1,7 @@ { "tutorial": { - "name": "Achieving High Performance Networking with Holoscan", - "description": "TODO", + "name": "High Performance Networking with Holoscan", + "description": "How to use the Advanced Network library (ANO) for low latency and high throughput communication through NVIDIA SmartNICs.", "authors": [ { "name": "Alexis Girault", @@ -28,7 +28,7 @@ "x86_64", "aarch64" ], - "tags": ["DPDK", "RDMA", "GPUNetIO", "GPUDirect", "Networking and Distributed Computing", "HPC"], + "tags": ["Networking and Distributed Computing", "DPDK", "RDMA", "GPUNetIO", "GPUDirect", "HPC"], "ranking": 1, "requirements": { "applications": [ diff --git a/tutorials/holohub_operators_external_applications/metadata.json b/tutorials/holohub_operators_external_applications/metadata.json index f1f332842b..a0f28f8950 100644 --- a/tutorials/holohub_operators_external_applications/metadata.json +++ b/tutorials/holohub_operators_external_applications/metadata.json @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["Interoperability"], + "tags": ["Development","Interoperability"], "ranking": 1, "requirements": {} } diff --git a/tutorials/holoscan-bootcamp/metadata.json b/tutorials/holoscan-bootcamp/metadata.json index 0a3d9e5a5d..430a5314f2 100644 --- a/tutorials/holoscan-bootcamp/metadata.json +++ b/tutorials/holoscan-bootcamp/metadata.json @@ -31,7 +31,7 @@ "x86_64", "aarch64" ], - "tags": [], + "tags": ["Development"], "ranking": 1, "requirements": {} } diff --git a/tutorials/holoscan-playground-on-aws/metadata.json b/tutorials/holoscan-playground-on-aws/metadata.json index 0ac34d7e47..cd9560f575 100644 --- a/tutorials/holoscan-playground-on-aws/metadata.json +++ b/tutorials/holoscan-playground-on-aws/metadata.json @@ -22,7 +22,7 @@ "x86_64", "aarch64" ], - "tags": ["Cloud"], + "tags": ["Deployment", "Cloud", "AWS"], "ranking": 1, "requirements": { "services": [ diff --git a/tutorials/holoscan_response_time_analysis/metadata.json b/tutorials/holoscan_response_time_analysis/metadata.json index 4b296154f8..1ba5136b00 100644 --- a/tutorials/holoscan_response_time_analysis/metadata.json +++ b/tutorials/holoscan_response_time_analysis/metadata.json @@ -30,7 +30,7 @@ "x86_64", "aarch64" ], - "tags": ["Real-Time", "Performance", "Optimization"], + "tags": ["Optimization", "Performance"], "ranking": 2, "requirements": {} } diff --git a/tutorials/integrate_external_libs_into_pipeline/metadata.json b/tutorials/integrate_external_libs_into_pipeline/metadata.json index 1bd51731ac..581faf3a84 100644 --- a/tutorials/integrate_external_libs_into_pipeline/metadata.json +++ b/tutorials/integrate_external_libs_into_pipeline/metadata.json @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["CV CUDA", "OpenCV", "Holoscan"], + "tags": ["Interoperability", "CV CUDA", "OpenCV"], "ranking": 1, "requirements": { "libraries": [ diff --git a/tutorials/local-llama/metadata.json b/tutorials/local-llama/metadata.json index 31ee18b037..4853883390 100644 --- a/tutorials/local-llama/metadata.json +++ b/tutorials/local-llama/metadata.json @@ -1,6 +1,6 @@ { "tutorial": { - "name": "Deploying Llama-2 70b model on the edge with IGX Orin", + "name": "Deploying Llama-2 70b model on the edge", "description": "This tutorial will walk you through how to run a quantized version of Meta's Llama-2 70b model as the backend LLM for a Gradio chatbot app, all running on an NVIDIA IGX Orin.", "authors": [ { @@ -22,7 +22,7 @@ "platforms": [ "aarch64" ], - "tags": ["Natural Language and Conversational AI", "CUDA", "Huggingface", "LLM"], + "tags": ["Deployment","NLP", "CUDA", "Huggingface", "LLM"], "ranking": 1, "requirements": { "data": [ diff --git a/tutorials/pretrained_foundational_models/self_supervised_training/README.md b/tutorials/pretrained_foundational_models/self_supervised_training/README.md index 0bf8c2f1ff..e403bd243d 100644 --- a/tutorials/pretrained_foundational_models/self_supervised_training/README.md +++ b/tutorials/pretrained_foundational_models/self_supervised_training/README.md @@ -1,9 +1,9 @@ # Self-Supervised Contrastive Learning for Surgical videos -The focus of this repo is to walkthrough the process of doing Self-Supervised Learning using Contrastive Pre-training on Surgical Video data. +The focus of this tutorial is to walkthrough the process of doing Self-Supervised Learning using Contrastive Pre-training on Surgical Video data. As part of the walk-through we will guide through the steps needed to pre-process and extract the frames from the public *Cholec80 Dataset*. This will be required to run the tutorial. -The repo is organized as follows - +The repository is organized as follows - * `Contrastive_learning_Notebook.ipynb` walks through the process of SSL in a tutorial style * `train_simclr_multiGPU.py` enables running of "pre-training" on surgical data across multiple GPUs through the CLI * `downstream_task_tool_segmentation.py` shows the process of "fine-tuning" for a downstream task starting from a pretrained checkpoint using [MONAI](https://github.com/Project-MONAI/MONAI) diff --git a/tutorials/pretrained_foundational_models/self_supervised_training/metadata.json b/tutorials/pretrained_foundational_models/self_supervised_training/metadata.json index 4e3d06c667..0d6c57f528 100644 --- a/tutorials/pretrained_foundational_models/self_supervised_training/metadata.json +++ b/tutorials/pretrained_foundational_models/self_supervised_training/metadata.json @@ -1,6 +1,6 @@ { "tutorial": { - "name": "Self-Supervised Contrastive Learning for Surgical videos", + "name": "Self-Supervised Learning for Surgical videos", "description": "The focus of this repo is to walkthrough the process of doing Self-Supervised Learning using Contrastive Pre-training on Surgical Video data.", "authors": [ { @@ -23,7 +23,7 @@ "x86_64", "aarch64" ], - "tags": ["Computer Vision and Perception", "Learning", "Healthcare AI", "Surgical AI", "Video"], + "tags": ["Interoperability", "MONAI", "Learning", "Healthcare AI", "Surgical AI", "Video"], "ranking": 1, "requirements": { "data": [ diff --git a/tutorials/windows_vm/metadata.json b/tutorials/windows_vm/metadata.json index 29805dd3b1..ea509bfab8 100644 --- a/tutorials/windows_vm/metadata.json +++ b/tutorials/windows_vm/metadata.json @@ -1,6 +1,6 @@ { "tutorial": { - "name": "Interoperability between Holoscan and a Windows Application on a Single Machine", + "name": "Holoscan & Windows Application", "description": "Demonstrates interoperability between a Holoscan on Linux and a Windows VM application on a single machine", "authors": [ { @@ -22,7 +22,7 @@ "platforms": [ "x86_64" ], - "tags": [], + "tags": ["Interoperability","VM","Deployment"], "ranking": 1, "requirements": {} } diff --git a/utilities/cli/container.py b/utilities/cli/container.py index ac6e15c07f..b5c33ca53c 100644 --- a/utilities/cli/container.py +++ b/utilities/cli/container.py @@ -23,15 +23,18 @@ import subprocess import sys from pathlib import Path -from typing import List, Optional +from typing import List, Optional, Union from .util import ( + DEFAULT_BASE_SDK_VERSION, build_holohub_path_mapping, check_nvidia_ctk, docker_args_to_devcontainer_format, fatal, find_hsdk_build_rel_dir, get_compute_capacity, + get_cuda_tag, + get_default_cuda_version, get_group_id, get_holohub_root, get_host_gpu, @@ -64,7 +67,7 @@ class HoloHubContainer: # SDK and path configuration SDK_PATH = os.environ.get("HOLOHUB_SDK_PATH", "/opt/nvidia/holoscan") - BASE_SDK_VERSION = os.environ.get("HOLOHUB_BASE_SDK_VERSION", "3.6.0") + BASE_SDK_VERSION = os.environ.get("HOLOHUB_BASE_SDK_VERSION", DEFAULT_BASE_SDK_VERSION) BENCHMARKING_SUBDIR = os.environ.get( "HOLOHUB_BENCHMARKING_SUBDIR", "benchmarks/holoscan_flow_benchmarking" ) @@ -73,26 +76,30 @@ class HoloHubContainer: # Image naming format templates BASE_IMAGE_NAME = os.environ.get("HOLOHUB_BASE_IMAGE", "nvcr.io/nvidia/clara-holoscan/holoscan") BASE_IMAGE_FORMAT = os.environ.get( - "HOLOHUB_BASE_IMAGE_FORMAT", "{base_image}:v{sdk_version}-{gpu_type}" + "HOLOHUB_BASE_IMAGE_FORMAT", "{base_image}:v{sdk_version}-{cuda_tag}" ) DEFAULT_IMAGE_FORMAT = os.environ.get( - "HOLOHUB_DEFAULT_IMAGE_FORMAT", "{container_prefix}:ngc-v{sdk_version}-{gpu_type}" + "HOLOHUB_DEFAULT_IMAGE_FORMAT", "{container_prefix}:ngc-v{sdk_version}-{cuda_tag}" ) + # Additional Default build arguments for docker build command (e.g., --build-context flags) + DEFAULT_DOCKER_BUILD_ARGS = os.environ.get("HOLOHUB_DEFAULT_DOCKER_BUILD_ARGS", "") + # Additional Default run arguments for docker run command + DEFAULT_DOCKER_RUN_ARGS = os.environ.get("HOLOHUB_DEFAULT_DOCKER_RUN_ARGS", "") @classmethod - def default_base_image(cls) -> str: + def default_base_image(cls, cuda_version: Optional[Union[str, int]] = None) -> str: return cls.BASE_IMAGE_FORMAT.format( base_image=cls.BASE_IMAGE_NAME, sdk_version=cls.BASE_SDK_VERSION, - gpu_type=get_host_gpu(), + cuda_tag=get_cuda_tag(cuda_version, cls.BASE_SDK_VERSION), ) @classmethod - def default_image(cls) -> str: + def default_image(cls, cuda_version: Optional[Union[str, int]] = None) -> str: return cls.DEFAULT_IMAGE_FORMAT.format( container_prefix=cls.CONTAINER_PREFIX, sdk_version=cls.BASE_SDK_VERSION, - gpu_type=get_host_gpu(), + cuda_tag=get_cuda_tag(cuda_version, cls.BASE_SDK_VERSION), ) @classmethod @@ -113,6 +120,11 @@ def get_build_argparse() -> argparse.ArgumentParser: action="store_true", help="(Build container) Do not use cache when building the image", ) + parser.add_argument( + "--cuda", + type=str, + help="(Build container) CUDA version (e.g., 12, 13). Default: 12", + ) parser.add_argument( "--build-args", help="(Build container) Extra arguments to docker build command, " @@ -205,6 +217,10 @@ def get_device_mounts() -> List[str]: if os.path.exists(delta_sdi): options.extend(["--device", f"{delta_sdi}:{delta_sdi}"]) + delta_sdi = f"/dev/delta-x370{i}" + if os.path.exists(delta_sdi): + options.extend(["--device", f"{delta_sdi}:{delta_sdi}"]) + # Deltacast HDMI capture board delta_hdmi = f"/dev/delta-x350{i}" if os.path.exists(delta_hdmi): @@ -303,7 +319,7 @@ def get_conditional_options( def image_name(self) -> str: if self.dockerfile_path != HoloHubContainer.default_dockerfile(): return f"{self.CONTAINER_PREFIX}:{self.project_metadata.get('project_name', '')}" - return HoloHubContainer.default_image() + return HoloHubContainer.default_image(self.cuda_version) @property def dockerfile_path(self) -> Path: @@ -358,6 +374,7 @@ def __init__(self, project_metadata: Optional[dict[str, any]], language: Optiona language = self.project_metadata.get("metadata", {}).get("language", "") self.language = list_normalized_languages(language)[0] + self.cuda_version = None # None means use default from get_cuda_tag self.dryrun = False self.verbose = False @@ -368,16 +385,24 @@ def build( img: Optional[str] = None, no_cache: bool = False, build_args: Optional[str] = None, + cuda_version: Optional[Union[str, int]] = None, ) -> None: """Build the container image""" + if cuda_version is not None: + self.cuda_version = cuda_version + # Get Dockerfile path docker_file_path = docker_file or self.dockerfile_path - base_img = base_img or self.default_base_image() + base_img = base_img or self.default_base_image(self.cuda_version) img = img or self.image_name gpu_type = get_host_gpu() compute_capacity = get_compute_capacity() + cuda_major = ( + self.cuda_version if self.cuda_version is not None else get_default_cuda_version() + ) + # Check if buildx exists if not self.dryrun: try: @@ -404,14 +429,19 @@ def build( f"BASE_SDK_VERSION={self.BASE_SDK_VERSION}", "--build-arg", f"COMPUTE_CAPACITY={compute_capacity}", + "--build-arg", + f"CUDA_MAJOR={cuda_major}", "--network=host", ] if no_cache: cmd.append("--no-cache") - if build_args: - cmd.extend(shlex.split(build_args)) + full_build_args = " ".join( + filter(None, [HoloHubContainer.DEFAULT_DOCKER_BUILD_ARGS, build_args]) + ) + if full_build_args: + cmd.extend(shlex.split(full_build_args)) cmd.extend(["-f", str(docker_file_path), "-t", img, str(HoloHubContainer.HOLOHUB_ROOT)]) @@ -461,6 +491,10 @@ def run( if local_sdk_root or os.environ.get("HOLOSCAN_SDK_ROOT"): cmd.extend(self.get_local_sdk_options(local_sdk_root)) + # Add default docker run arguments + if HoloHubContainer.DEFAULT_DOCKER_RUN_ARGS: + cmd.extend(shlex.split(HoloHubContainer.DEFAULT_DOCKER_RUN_ARGS)) + if docker_opts: cmd.extend(shlex.split(docker_opts)) @@ -556,7 +590,7 @@ def get_gpu_runtime_args(self) -> List[str]: def get_environment_args(self) -> List[str]: """Environment variable arguments""" - return [ + args = [ "-e", "NVIDIA_DRIVER_CAPABILITIES=graphics,video,compute,utility,display", "-e", @@ -566,6 +600,15 @@ def get_environment_args(self) -> List[str]: "-e", "HOLOHUB_BUILD_LOCAL=1", ] + # Pass CMAKE_BUILD_PARALLEL_LEVEL to container if set on host + cmake_parallel_level = os.environ.get("CMAKE_BUILD_PARALLEL_LEVEL") + if cmake_parallel_level: + args.extend(["-e", f"CMAKE_BUILD_PARALLEL_LEVEL={cmake_parallel_level}"]) + # Pass HOLOHUB_PATH_PREFIX to container if set on host + holohub_path_prefix = os.environ.get("HOLOHUB_PATH_PREFIX") + if holohub_path_prefix: + args.extend(["-e", f"HOLOHUB_PATH_PREFIX={holohub_path_prefix}"]) + return args def enable_x11_access(self) -> None: if ( @@ -656,6 +699,8 @@ def get_devcontainer_args(self, docker_opts: str = "") -> str: docker_args.extend(self.ucx_args()) docker_args.extend(self.get_device_cgroup_args()) docker_args.extend(self.get_nvidia_runtime_args()) + if HoloHubContainer.DEFAULT_DOCKER_RUN_ARGS: + docker_args.extend(shlex.split(HoloHubContainer.DEFAULT_DOCKER_RUN_ARGS)) if docker_opts: docker_args.extend(shlex.split(docker_opts)) project_name = self.project_metadata.get("project_name") if self.project_metadata else None diff --git a/utilities/cli/holohub.py b/utilities/cli/holohub.py index 2c27d9ea51..b48f41b711 100755 --- a/utilities/cli/holohub.py +++ b/utilities/cli/holohub.py @@ -181,8 +181,8 @@ def _create_parser(self) -> argparse.ArgumentParser: build.add_argument("--verbose", action="store_true", help="Print extra output") build.add_argument( "--build-type", - choices=["debug", "release", "rel-debug"], - help="Build type (debug, release, rel-debug)", + help="Build type (debug, release, rel-debug). " + "If not specified, uses CMAKE_BUILD_TYPE environment variable or defaults to 'release'", ) build.add_argument( "--build-with", @@ -236,8 +236,8 @@ def _create_parser(self) -> argparse.ArgumentParser: ) run.add_argument( "--build-type", - choices=["debug", "release", "rel-debug"], - help="Build type (debug, release, rel-debug)", + help="Build type (debug, release, rel-debug). " + "If not specified, uses CMAKE_BUILD_TYPE environment variable or defaults to 'release'", ) run.add_argument( "--run-args", @@ -337,8 +337,8 @@ def _create_parser(self) -> argparse.ArgumentParser: ) install.add_argument( "--build-type", - choices=["debug", "release", "rel-debug"], - help="Build type (debug, release, rel-debug)", + help="Build type (debug, release, rel-debug). " + "If not specified, uses CMAKE_BUILD_TYPE environment variable or defaults to 'release'", ) install.add_argument( "--language", choices=["cpp", "python"], help="Specify language implementation" @@ -779,6 +779,7 @@ def handle_build_container(self, args: argparse.Namespace) -> None: img=args.img, no_cache=args.no_cache, build_args=args.build_args, + cuda_version=getattr(args, "cuda", None), ) def handle_run_container(self, args: argparse.Namespace) -> None: @@ -796,6 +797,7 @@ def handle_run_container(self, args: argparse.Namespace) -> None: img=args.img, no_cache=args.no_cache, build_args=args.build_args, + cuda_version=getattr(args, "cuda", None), ) trailing_args = getattr(args, "_trailing_args", []) @@ -844,11 +846,12 @@ def handle_test(self, args: argparse.Namespace) -> None: img=args.img, no_cache=args.no_cache, build_args=args.build_args, + cuda_version=getattr(args, "cuda", None), ) xvfb = "" if args.no_xvfb else "xvfb-run -a" - # TAG is used in utilities/testing/holohub.container.ctest by default + # TAG is used in CTest scripts by default if getattr(args, "build_name_suffix", None): tag = args.build_name_suffix else: @@ -857,7 +860,11 @@ def handle_test(self, args: argparse.Namespace) -> None: else: image_name = args.base_img or container.default_base_image() tag = image_name.split(":")[-1] - ctest_cmd = f"{xvfb} ctest -DAPP={args.project} -DTAG={tag} " + + ctest_cmd = f"{xvfb} ctest " + if args.project: + ctest_cmd += f"-DAPP={args.project} " + ctest_cmd += f"-DTAG={tag} " if args.cmake_options: cmake_opts = ";".join(args.cmake_options) @@ -962,10 +969,12 @@ def build_project_locally( # Build the project with optional parallel jobs build_cmd = ["cmake", "--build", str(build_dir), "--config", build_type] - if parallel: - build_cmd.extend(["-j", parallel]) + # Determine the number of parallel jobs (user input > env var > CPU count): + if parallel is not None: + build_njobs = str(parallel) else: - build_cmd.append("-j") # Use default number of jobs + build_njobs = os.environ.get("CMAKE_BUILD_PARALLEL_LEVEL", str(os.cpu_count())) + build_cmd.extend(["-j", build_njobs]) holohub_cli_util.run_command(build_cmd, dry_run=dryrun) @@ -1040,6 +1049,7 @@ def handle_build(self, args: argparse.Namespace) -> None: img=args.img, no_cache=args.no_cache, build_args=build_args.get("build_args"), + cuda_version=getattr(args, "cuda", None), ) # Build command with all necessary arguments @@ -1131,7 +1141,7 @@ def handle_run(self, args: argparse.Namespace) -> None: build_dir, project_data = self.build_project_locally( project_name=args.project, language=args.language if hasattr(args, "language") else None, - build_type=args.build_type or "Release", # Default to Release for run + build_type=args.build_type, with_operators=build_args.get("with_operators"), dryrun=args.dryrun, pkg_generator=getattr(args, "pkg_generator", "DEB"), @@ -1152,11 +1162,13 @@ def handle_run(self, args: argparse.Namespace) -> None: f"Project '{args.project}' does not have a run configuration" ) + prefix = holohub_cli_util.resolve_path_prefix(None) path_mapping = holohub_cli_util.build_holohub_path_mapping( holohub_root=HoloHubCLI.HOLOHUB_ROOT, project_data=project_data, build_dir=build_dir, data_dir=HoloHubCLI.DEFAULT_DATA_DIR, + prefix=prefix, ) if path_mapping: mapping_info = ";\n".join( @@ -1185,10 +1197,9 @@ def handle_run(self, args: argparse.Namespace) -> None: f"Did you forget to '{self.script_name} build {args.project}'?" ) - # Handle workdir using the path mapping - workdir_spec = run_config.get("workdir", "holohub_app_bin") + workdir_spec = run_config.get("workdir", f"{prefix}app_bin") if not workdir_spec: - target_dir = Path(path_mapping.get("holohub_root", ".")) + target_dir = Path(path_mapping.get(f"{prefix}root", ".")) elif workdir_spec in path_mapping: target_dir = Path(path_mapping[workdir_spec]) else: @@ -1271,6 +1282,7 @@ def handle_run(self, args: argparse.Namespace) -> None: img=args.img, no_cache=args.no_cache, build_args=build_args.get("build_args"), + cuda_version=getattr(args, "cuda", None), ) language = holohub_cli_util.normalize_language( container.project_metadata.get("metadata", {}).get("language", None) @@ -1757,6 +1769,7 @@ def handle_install(self, args: argparse.Namespace) -> None: img=args.img, no_cache=args.no_cache, build_args=build_args.get("build_args"), + cuda_version=getattr(args, "cuda", None), ) # Install command with all necessary arguments @@ -1865,6 +1878,7 @@ def handle_vscode(self, args: argparse.Namespace) -> None: img=dev_container_tag, no_cache=args.no_cache, build_args=args.build_args, + cuda_version=getattr(args, "cuda", None), ) else: print(f"Skipping build, using existing Dev Container {dev_container_tag}...") diff --git a/utilities/cli/tests/CMakeLists.txt b/utilities/cli/tests/CMakeLists.txt index 5b54ea6dc1..e681424ff7 100644 --- a/utilities/cli/tests/CMakeLists.txt +++ b/utilities/cli/tests/CMakeLists.txt @@ -100,6 +100,30 @@ set_property(TEST test_holohub_build_container_build_args PROPERTY PASS_REGULAR_EXPRESSION "--build-arg TEST=value" ) +# default docker build args via environment variable +add_test( + NAME test_holohub_default_docker_build_args_env + COMMAND ${CMAKE_SOURCE_DIR}/holohub build-container --dryrun + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) +set_property(TEST test_holohub_default_docker_build_args_env PROPERTY + ENVIRONMENT "HOLOHUB_DEFAULT_DOCKER_BUILD_ARGS=--build-arg DEFAULT_FLAG=abc" +) +set_property(TEST test_holohub_default_docker_build_args_env PROPERTY + PASS_REGULAR_EXPRESSION "docker build" + PASS_REGULAR_EXPRESSION "DEFAULT_FLAG=abc" +) + +add_test( + NAME test_holohub_build_container_cuda_version + COMMAND ${CMAKE_SOURCE_DIR}/holohub build-container --dryrun --cuda 13 + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) +set_property(TEST test_holohub_build_container_cuda_version PROPERTY + PASS_REGULAR_EXPRESSION "docker build" + PASS_REGULAR_EXPRESSION "--build-arg CUDA_MAJOR=13" +) + add_test( NAME test_holohub_run_container_docker_opts COMMAND ${CMAKE_SOURCE_DIR}/holohub run-container --dryrun --docker-opts "--memory 4g --entrypoint=bash" @@ -120,6 +144,20 @@ set_property(TEST test_holohub_run_container_add_volume PROPERTY PASS_REGULAR_EXPRESSION "-v /tmp:/tmp" ) +# default docker run args via environment variable +add_test( + NAME test_holohub_default_docker_run_args_env + COMMAND ${CMAKE_SOURCE_DIR}/holohub run-container --dryrun + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) +set_property(TEST test_holohub_default_docker_run_args_env PROPERTY + ENVIRONMENT "HOLOHUB_DEFAULT_DOCKER_RUN_ARGS=-e TEST_ENV=123" +) +set_property(TEST test_holohub_default_docker_run_args_env PROPERTY + PASS_REGULAR_EXPRESSION "docker run" + PASS_REGULAR_EXPRESSION "TEST_ENV=123" +) + add_test( NAME test_holohub_build_dryrun COMMAND ${CMAKE_SOURCE_DIR}/holohub build endoscopy_tool_tracking --dryrun @@ -371,3 +409,30 @@ add_test( set_property(TEST test_holohub_test_ctest_options PROPERTY PASS_REGULAR_EXPRESSION "DMAKE2=2.*DTEST=2" ) + +# Test that HOLOHUB_PATH_PREFIX is passed from host to container +add_test( + NAME test_holohub_path_prefix_env + COMMAND ${CMAKE_SOURCE_DIR}/holohub run-container --dryrun + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) +set_property(TEST test_holohub_path_prefix_env PROPERTY + ENVIRONMENT "HOLOHUB_PATH_PREFIX=custom_prefix_" +) +set_property(TEST test_holohub_path_prefix_env PROPERTY + PASS_REGULAR_EXPRESSION "docker run" + PASS_REGULAR_EXPRESSION "-e HOLOHUB_PATH_PREFIX=custom_prefix_" +) + +# Test that HOLOHUB_PATH_PREFIX is passed when running an application locally +add_test( + NAME test_holohub_path_prefix_app_local + COMMAND ${CMAKE_SOURCE_DIR}/holohub run endoscopy_tool_tracking --local --dryrun + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) +set_property(TEST test_holohub_path_prefix_app_local PROPERTY + ENVIRONMENT "HOLOHUB_PATH_PREFIX=custom_prefix_" +) +set_property(TEST test_holohub_path_prefix_app_local PROPERTY + PASS_REGULAR_EXPRESSION "custom_prefix_root" +) diff --git a/utilities/cli/tests/test_cli.py b/utilities/cli/tests/test_cli.py index 0e4a5fadff..074438c602 100644 --- a/utilities/cli/tests/test_cli.py +++ b/utilities/cli/tests/test_cli.py @@ -128,7 +128,12 @@ def test_container_commands( ) build_args.func(build_args) mock_container.build.assert_called_with( - docker_file=None, base_img="test_image", img=None, no_cache=True, build_args=None + docker_file=None, + base_img="test_image", + img=None, + no_cache=True, + build_args=None, + cuda_version=None, ) # Test run command run_args = self.cli.parser.parse_args("run test_project --local".split()) diff --git a/utilities/cli/tests/test_container.py b/utilities/cli/tests/test_container.py index 9b32baf6a1..d68775b978 100644 --- a/utilities/cli/tests/test_container.py +++ b/utilities/cli/tests/test_container.py @@ -24,6 +24,7 @@ sys.path.append(str(Path(os.getcwd()) / "utilities")) from utilities.cli.container import HoloHubContainer +from utilities.cli.util import get_cuda_tag, get_default_cuda_version class TestHoloHubContainer(unittest.TestCase): @@ -127,6 +128,46 @@ def test_get_pythonpath_options_with_image_env(self, mock_get_image_pythonpath): self.assertEqual(result, ["-e", f"PYTHONPATH={expected_pythonpath}"]) self.container.dryrun = False + @patch("utilities.cli.util.get_host_gpu") + def test_get_cuda_tag_sdk(self, mock_get_host_gpu): + """Test CUDA tag with different SDK versions""" + mock_get_host_gpu.return_value = "dgpu" + + # Test SDK 3.6.0 (old format) - returns gpu_type only + self.assertEqual(get_cuda_tag("12", "3.6.0"), "dgpu") + self.assertEqual(get_cuda_tag("13", "3.6.0"), "dgpu") + + # Test SDK > 3.6.1 (new format) - returns cuda{version}-{gpu_type} + self.assertEqual(get_cuda_tag("12", "3.7.0"), "cuda12-dgpu") + self.assertEqual(get_cuda_tag("13", "3.7.0"), "cuda13") + self.assertEqual(get_cuda_tag("11", "3.7.0"), "cuda11-dgpu") + + # Test with iGPU + mock_get_host_gpu.return_value = "igpu" + self.assertEqual(get_cuda_tag("12", "3.7.0"), "cuda12-igpu") + self.assertEqual(get_cuda_tag("12", "3.6.0"), "igpu") + + @patch("utilities.cli.util.run_info_command") + @patch("utilities.cli.util.shutil.which") + def test_get_default_cuda_version(self, mock_which, mock_run_info_command): + """Test default CUDA version detection based on NVIDIA driver version""" + # nvidia-smi not available -> default to 13 + mock_which.return_value = None + self.assertEqual(get_default_cuda_version(), "13") + # nvidia-smi available but driver version unknown -> default to 13 + mock_which.return_value = "/usr/bin/nvidia-smi" + mock_run_info_command.return_value = None + self.assertEqual(get_default_cuda_version(), "13") + # Driver version < 580 -> CUDA 12 + mock_run_info_command.return_value = "550.54.14" + self.assertEqual(get_default_cuda_version(), "12") + # Driver version >= 580 -> CUDA 13 + mock_run_info_command.return_value = "580.1" + self.assertEqual(get_default_cuda_version(), "13") + # Unparsable driver version -> default to 13 + mock_run_info_command.return_value = "not.a.version" + self.assertEqual(get_default_cuda_version(), "13") + if __name__ == "__main__": unittest.main() diff --git a/utilities/cli/util.py b/utilities/cli/util.py index fdba9663cc..5dc2c36d62 100644 --- a/utilities/cli/util.py +++ b/utilities/cli/util.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools import grp import json import os @@ -28,6 +29,8 @@ from pathlib import Path from typing import List, Optional, Tuple, Union +DEFAULT_BASE_SDK_VERSION = "3.7.0" + PROJECT_PREFIXES = { "application": "APP", "benchmark": "APP", @@ -314,25 +317,107 @@ def check_nvidia_ctk(min_version: str = "1.12.0", recommended_version: str = "1. fatal(f"Could not determine nvidia-ctk version. Version {min_version}+ required.") +def get_gpu_name() -> Optional[str]: + """ + Helper function to get GPU name from nvidia-smi. Returns None if nvidia-smi is not available. + """ + if not shutil.which("nvidia-smi"): + return None + try: + output = subprocess.check_output( + ["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"], + text=True, + stderr=subprocess.DEVNULL, + ) + return output.strip() if output else None + except (subprocess.CalledProcessError, FileNotFoundError): + return None + + def get_host_gpu() -> str: """Determine if running on dGPU or iGPU""" - if not shutil.which("nvidia-smi"): + gpu_name = get_gpu_name() + if gpu_name is None: print( "Could not find any GPU drivers on host. Defaulting build to target dGPU/CPU stack.", file=sys.stderr, ) return "dgpu" + # Check for iGPU (Orin integrated GPU) + if "Orin (nvgpu)" in gpu_name: + return "igpu" + return "dgpu" + + +def get_default_cuda_version() -> str: + """ + Get default CUDA version based on NVIDIA driver version. + + Returns: + - "13" if driver version >= 580 or if nvidia-smi is not available + - "12" if driver version < 580 + """ + # Default to CUDA 13 if nvidia-smi is not available + if not shutil.which("nvidia-smi"): + warn("nvidia-smi not found, default CUDA version is 13") + return "13" + + # Check the driver version using nvidia-smi + driver_version = run_info_command( + ["nvidia-smi", "--query-gpu=driver_version", "--format=csv,noheader"] + ) + + if not driver_version: + warn("Unable to detect NVIDIA driver version, default CUDA version is 13") + return "13" + try: - output = subprocess.check_output( - ["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"] - ) - if not output or "Orin (nvgpu)" in output.decode(): - return "igpu" - except subprocess.CalledProcessError: - return "dgpu" + driver_major_version = int(driver_version.split(".")[0]) + if driver_major_version >= 580: + return "13" + else: + return "12" + except (ValueError, IndexError): + warn(f"Unable to parse driver version '{driver_version}', default CUDA version is 13") + return "13" - return "dgpu" + +def get_cuda_tag(cuda_version: Optional[Union[str, int]] = None, sdk_version: str = "3.6.1") -> str: + """ + Determine the CUDA container tag based on CUDA version and GPU type. + + SDK version support: + - SDK < 3.6.1: Old format (dgpu/igpu) + - SDK == 3.6.1: only cuda13-dgpu available + - SDK >= 3.7.0: Full CUDA support + - cuda13: CUDA 13 (x86_64, Jetson Thor) + - cuda12-dgpu: CUDA 12 dGPU (x86_64, IGX Orin dGPU, Clara AGX dGPU, GH200) + - cuda12-igpu: CUDA 12 iGPU (Jetson Orin, IGX Orin iGPU, Clara AGX iGPU) + + Args: + cuda_version: CUDA major version (e.g., 12, 13). If None, uses platform default. + sdk_version: SDK version string (e.g., "3.6.0", "3.6.1", "3.7.0"). + + Returns: + The appropriate container tag string + """ + try: + sdk_ver = parse_semantic_version(sdk_version) + except (ValueError, IndexError): + sdk_ver = parse_semantic_version(DEFAULT_BASE_SDK_VERSION) + if sdk_ver < (3, 6, 1): + return get_host_gpu() + if sdk_ver == (3, 6, 1): + return "cuda13-dgpu" + if cuda_version is None: + cuda_version = get_default_cuda_version() + cuda_str = str(cuda_version) + if cuda_str == "13": + return "cuda13" + if cuda_str == "12": + return f"cuda12-{get_host_gpu()}" + return f"cuda{cuda_str}-{get_host_gpu()}" def get_host_arch() -> str: @@ -759,46 +844,71 @@ def list_cmake_dir_options(script_dir: Path, cmake_function: str) -> List[str]: return sorted(results) +@functools.lru_cache(maxsize=32) +def resolve_path_prefix(prefix: Optional[str] = None) -> str: + """Resolve the path prefix for HoloHub placeholders""" + if prefix is None: + prefix = os.environ.get("HOLOHUB_PATH_PREFIX", "holohub_") + if not prefix.endswith("_"): + prefix = prefix + "_" + return prefix + + def build_holohub_path_mapping( holohub_root: Path, project_data: Optional[dict] = None, build_dir: Optional[Path] = None, data_dir: Optional[Path] = None, + prefix: Optional[str] = None, ) -> dict[str, str]: - """Build a mapping of HoloHub placeholders to their resolved paths""" + """Build a mapping of HoloHub placeholders to their resolved paths + + Args: + holohub_root: Root directory of HoloHub + project_data: Optional project metadata dictionary + build_dir: Optional build directory path + data_dir: Optional data directory path + prefix: Prefix for placeholder keys. If None, reads from HOLOHUB_PATH_PREFIX + environment variable (default: "holohub_") + + Returns: + Dictionary mapping placeholder names to their resolved paths + """ + prefix = resolve_path_prefix(prefix) + if data_dir is None: data_dir = holohub_root / "data" path_mapping = { - "holohub_root": str(holohub_root), - "holohub_data_dir": str(data_dir), + f"{prefix}root": str(holohub_root), + f"{prefix}data_dir": str(data_dir), } if not project_data: return path_mapping # Add project-specific mappings if project_data is provided app_source_path = project_data.get("source_folder", "") if app_source_path: - path_mapping["holohub_app_source"] = str(app_source_path) + path_mapping[f"{prefix}app_source"] = str(app_source_path) if build_dir: - path_mapping["holohub_bin"] = str(build_dir) + path_mapping[f"{prefix}bin"] = str(build_dir) if app_source_path: try: app_build_dir = build_dir / Path(app_source_path).relative_to(holohub_root) - path_mapping["holohub_app_bin"] = str(app_build_dir) + path_mapping[f"{prefix}app_bin"] = str(app_build_dir) except ValueError: # Handle case where app_source_path is not relative to holohub_root - path_mapping["holohub_app_bin"] = str(build_dir) + path_mapping[f"{prefix}app_bin"] = str(build_dir) elif project_data.get("project_name"): # If no build_dir provided but we have project name, try to infer it project_name = project_data["project_name"] inferred_build_dir = holohub_root / "build" / project_name - path_mapping["holohub_bin"] = str(inferred_build_dir) + path_mapping[f"{prefix}bin"] = str(inferred_build_dir) if app_source_path: try: app_build_dir = inferred_build_dir / Path(app_source_path).relative_to(holohub_root) - path_mapping["holohub_app_bin"] = str(app_build_dir) + path_mapping[f"{prefix}app_bin"] = str(app_build_dir) except ValueError: - path_mapping["holohub_app_bin"] = str(inferred_build_dir) + path_mapping[f"{prefix}app_bin"] = str(inferred_build_dir) return path_mapping @@ -1128,12 +1238,15 @@ def collect_environment_variables() -> None: "HOLOHUB_DEFAULT_DOCKERFILE", "HOLOHUB_BASE_IMAGE_FORMAT", "HOLOHUB_DEFAULT_IMAGE_FORMAT", + "HOLOHUB_DEFAULT_DOCKER_BUILD_ARGS", + "HOLOHUB_DEFAULT_DOCKER_RUN_ARGS", "HOLOHUB_DOCS_URL", "HOLOHUB_CLI_DOCS_URL", "HOLOHUB_DATA_PATH", # Legacy variables "HOLOHUB_APP_NAME", "HOLOHUB_CONTAINER_BASE_NAME", + "HOLOHUB_PATH_PREFIX", ] for var in sorted(holohub_env_vars): print(f" {var}: {os.environ.get(var) or '(not set)'}") diff --git a/utilities/metadata/gather_metadata.py b/utilities/metadata/gather_metadata.py index 9fe0e81a3d..b147b65510 100644 --- a/utilities/metadata/gather_metadata.py +++ b/utilities/metadata/gather_metadata.py @@ -69,7 +69,7 @@ def extract_project_name(metadata_filepath: str) -> str: """ parts = metadata_filepath.split(os.sep) - if parts[-2] in ["cpp", "python"]: + if parts[-2] in ["cpp", "python", "py"]: return parts[-3] return parts[-2] diff --git a/utilities/metadata/metadata_validator.py b/utilities/metadata/metadata_validator.py index 30e698e0be..d9d68d8beb 100644 --- a/utilities/metadata/metadata_validator.py +++ b/utilities/metadata/metadata_validator.py @@ -190,10 +190,10 @@ def validate_json_directory(directory, ignore_patterns=[], metadata_is_required: print(name + ": valid") # Check if name matches README title - name_matches, name_msg = check_name_matches_readme(name, jsonData) - if not name_matches: - print("ERROR:" + name + ": " + name_msg) - exit_code = 1 + # name_matches, name_msg = check_name_matches_readme(name, jsonData) + # if not name_matches: + # print("ERROR:" + name + ": " + name_msg) + # exit_code = 1 else: print("ERROR:" + name + ": invalid") print(msg) diff --git a/workflows/ai_surgical_video/python/metadata.json b/workflows/ai_surgical_video/python/metadata.json index 3e5dc3c11c..ff26492a65 100644 --- a/workflows/ai_surgical_video/python/metadata.json +++ b/workflows/ai_surgical_video/python/metadata.json @@ -28,7 +28,7 @@ "x86_64", "aarch64" ], - "tags": ["SSD", "Detection", "MONAI", "Segmentation"], + "tags": ["Healthcare AI", "SSD", "Detection", "MONAI", "Segmentation"], "ranking": 1, "requirements": { "data": [