|
| 1 | +#!/bin/bash |
| 2 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | +# All rights reserved. |
| 4 | +# |
| 5 | +# This source code is licensed under the BSD-style license found in the |
| 6 | +# LICENSE file in the root directory of this source tree. |
| 7 | + |
| 8 | +set -exu |
| 9 | + |
| 10 | +CUDA_VERSION=${1:-"12.6"} |
| 11 | + |
| 12 | +echo "=== Testing ExecutorTorch CUDA ${CUDA_VERSION} Build ===" |
| 13 | + |
| 14 | +# Function to build and test ExecutorTorch with CUDA support |
| 15 | +test_executorch_cuda_build() { |
| 16 | + local cuda_version=$1 |
| 17 | + |
| 18 | + echo "Building ExecutorTorch with CUDA ${cuda_version} support..." |
| 19 | + echo "ExecutorTorch will automatically detect CUDA and install appropriate PyTorch wheel" |
| 20 | + |
| 21 | + # Check available resources before starting |
| 22 | + echo "=== System Information ===" |
| 23 | + echo "Available memory: $(free -h | grep Mem | awk '{print $2}')" |
| 24 | + echo "Available disk space: $(df -h . | tail -1 | awk '{print $4}')" |
| 25 | + echo "CPU cores: $(nproc)" |
| 26 | + echo "CUDA version check:" |
| 27 | + nvcc --version || echo "nvcc not found" |
| 28 | + nvidia-smi || echo "nvidia-smi not found" |
| 29 | + |
| 30 | + # Set CMAKE_ARGS to enable CUDA build - ExecutorTorch will handle PyTorch installation automatically |
| 31 | + export CMAKE_ARGS="-DEXECUTORCH_BUILD_CUDA=ON" |
| 32 | + |
| 33 | + echo "=== Starting ExecutorTorch Installation ===" |
| 34 | + # Install ExecutorTorch with CUDA support with timeout and error handling |
| 35 | + timeout 5400 ./install_executorch.sh || { |
| 36 | + local exit_code=$? |
| 37 | + echo "ERROR: install_executorch.sh failed with exit code: $exit_code" |
| 38 | + if [ $exit_code -eq 124 ]; then |
| 39 | + echo "ERROR: Installation timed out after 90 minutes" |
| 40 | + fi |
| 41 | + exit $exit_code |
| 42 | + } |
| 43 | + |
| 44 | + echo "SUCCESS: ExecutorTorch CUDA build completed" |
| 45 | + |
| 46 | + # Verify the installation |
| 47 | + echo "=== Verifying ExecutorTorch CUDA Installation ===" |
| 48 | + |
| 49 | + # Test that ExecutorTorch was built successfully |
| 50 | + python -c " |
| 51 | +import executorch |
| 52 | +print('SUCCESS: ExecutorTorch imported successfully') |
| 53 | +" |
| 54 | + |
| 55 | + # Test CUDA availability and show details |
| 56 | + python -c " |
| 57 | +try: |
| 58 | + import torch |
| 59 | + print('INFO: PyTorch version:', torch.__version__) |
| 60 | + print('INFO: CUDA available:', torch.cuda.is_available()) |
| 61 | +
|
| 62 | + if torch.cuda.is_available(): |
| 63 | + print('SUCCESS: CUDA is available for ExecutorTorch') |
| 64 | + print('INFO: CUDA version:', torch.version.cuda) |
| 65 | + print('INFO: GPU device count:', torch.cuda.device_count()) |
| 66 | + print('INFO: Current GPU device:', torch.cuda.current_device()) |
| 67 | + print('INFO: GPU device name:', torch.cuda.get_device_name()) |
| 68 | +
|
| 69 | + # Test basic CUDA tensor operation |
| 70 | + device = torch.device('cuda') |
| 71 | + x = torch.randn(10, 10).to(device) |
| 72 | + y = torch.randn(10, 10).to(device) |
| 73 | + z = torch.mm(x, y) |
| 74 | + print('SUCCESS: CUDA tensor operation completed on device:', z.device) |
| 75 | + print('INFO: Result tensor shape:', z.shape) |
| 76 | +
|
| 77 | + print('SUCCESS: ExecutorTorch CUDA integration verified') |
| 78 | + else: |
| 79 | + print('WARNING: CUDA not detected, but ExecutorTorch built successfully') |
| 80 | + exit(1) |
| 81 | +except Exception as e: |
| 82 | + print('ERROR: ExecutorTorch CUDA test failed:', e) |
| 83 | + exit(1) |
| 84 | +" |
| 85 | + |
| 86 | + echo "SUCCESS: ExecutorTorch CUDA ${cuda_version} build and verification completed successfully" |
| 87 | +} |
| 88 | + |
| 89 | +# Main execution |
| 90 | +echo "Current working directory: $(pwd)" |
| 91 | +echo "Directory contents:" |
| 92 | +ls -la |
| 93 | + |
| 94 | +# Run the CUDA build test |
| 95 | +test_executorch_cuda_build "${CUDA_VERSION}" |
0 commit comments